focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
final FileEntity entity = new FilesApi(new BrickApiClient(session))
.download(StringUtils.removeStart(file.getAbsolute(), String.valueOf(Path.DELIMITER)),
null, null, null, null);
final HttpUriRequest request = new HttpGet(entity.getDownloadUri());
if(status.isAppend()) {
final HttpRange range = HttpRange.withStatus(status);
final String header;
if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) {
header = String.format("bytes=%d-", range.getStart());
}
else {
header = String.format("bytes=%d-%d", range.getStart(), range.getEnd());
}
if(log.isDebugEnabled()) {
log.debug(String.format("Add range header %s for file %s", header, file));
}
request.addHeader(new BasicHeader(HttpHeaders.RANGE, header));
// Disable compression
request.addHeader(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, "identity"));
}
final HttpResponse response = session.getClient().execute(request);
switch(response.getStatusLine().getStatusCode()) {
case HttpStatus.SC_OK:
case HttpStatus.SC_PARTIAL_CONTENT:
return new HttpMethodReleaseInputStream(response, status);
default:
throw new DefaultHttpResponseExceptionMappingService().map("Download {0} failed", new HttpResponseException(
response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file);
}
}
catch(ApiException e) {
throw new BrickExceptionMappingService().map("Download {0} failed", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Download {0} failed", e, file);
}
}
|
@Test
public void testReadRange() throws Exception {
final Path room = new BrickDirectoryFeature(session).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path test = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new BrickTouchFeature(session).touch(test, new TransferStatus());
final Local local = new Local(System.getProperty("java.io.tmpdir"), new AlphanumericRandomStringService().random());
final byte[] content = RandomUtils.nextBytes(1023);
final OutputStream out = local.getOutputStream(false);
assertNotNull(out);
IOUtils.write(content, out);
out.close();
final TransferStatus upload = new TransferStatus().withLength(content.length);
upload.setExists(true);
new BrickUploadFeature(session, new BrickWriteFeature(session)).upload(
test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener(), upload,
new DisabledConnectionCallback());
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
status.setAppend(true);
status.setOffset(100L);
final InputStream in = new BrickReadFeature(session).read(test, status.withLength(content.length - 100), new DisabledConnectionCallback());
assertNotNull(in);
final ByteArrayOutputStream buffer = new ByteArrayOutputStream(content.length - 100);
new StreamCopier(status, status).transfer(in, buffer);
final byte[] reference = new byte[content.length - 100];
System.arraycopy(content, 100, reference, 0, content.length - 100);
assertArrayEquals(reference, buffer.toByteArray());
in.close();
new BrickDeleteFeature(session).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static boolean testURLPassesExclude(String url, String exclude) {
// If the url doesn't decode to UTF-8 then return false, it could be trying to get around our rules with nonstandard encoding
// If the exclude rule includes a "?" character, the url must exactly match the exclude rule.
// If the exclude rule does not contain the "?" character, we chop off everything starting at the first "?"
// in the URL and then the resulting url must exactly match the exclude rule. If the exclude ends with a "*"
// (wildcard) character, and wildcards are allowed in excludes, then the URL is allowed if it exactly
// matches everything before the * and there are no ".." even encoded ones characters after the "*".
String decodedUrl = null;
try {
decodedUrl = URLDecoder.decode(url, "UTF-8");
} catch (Exception e) {
return false;
}
if (exclude.endsWith("*") && ALLOW_WILDCARDS_IN_EXCLUDES.getValue()) {
if (url.startsWith(exclude.substring(0, exclude.length()-1))) {
// Now make sure that there are no ".." characters in the rest of the URL.
if (!decodedUrl.contains("..")) {
return true;
}
}
}
else if (exclude.contains("?")) {
if (url.equals(exclude)) {
return true;
}
}
else {
int paramIndex = url.indexOf("?");
if (paramIndex != -1) {
url = url.substring(0, paramIndex);
}
if (url.equals(exclude)) {
return true;
}
}
return false;
}
|
@Test
public void wildcardInExcludeBlockedWhenWildcardsNotAllowed() throws Exception {
AuthCheckFilter.ALLOW_WILDCARDS_IN_EXCLUDES.setValue(false);
assertFalse(AuthCheckFilter.testURLPassesExclude("setup/setup-new.jsp","setup/setup-*"));
}
|
public static KTableHolder<GenericKey> build(
final KGroupedTableHolder groupedTable,
final TableAggregate aggregate,
final RuntimeBuildContext buildContext,
final MaterializedFactory materializedFactory) {
return build(
groupedTable,
aggregate,
buildContext,
materializedFactory,
new AggregateParamsFactory()
);
}
|
@Test
public void shouldBuildMaterializedWithCorrectNameForAggregate() {
// When:
aggregate.build(planBuilder, planInfo);
// Then:
verify(materializedFactory).create(any(), any(), eq("agg-regate-Materialize"));
}
|
@Udf(description = "Returns the hyperbolic sine of an INT value")
public Double sinh(
@UdfParameter(
value = "value",
description = "The value in radians to get the hyperbolic sine of."
) final Integer value
) {
return sinh(value == null ? null : value.doubleValue());
}
|
@Test
public void shouldHandleLessThanNegative2Pi() {
assertThat(udf.sinh(-9.1), closeTo(-4477.64629590835, 0.000000000000001));
assertThat(udf.sinh(-6.3), closeTo(-272.28503691057597, 0.000000000000001));
assertThat(udf.sinh(-7), closeTo(-548.3161232732465, 0.000000000000001));
assertThat(udf.sinh(-7L), closeTo(-548.3161232732465, 0.000000000000001));
}
|
public static String getSuffixName(String dirPath, String filePath) {
if (!FeConstants.runningUnitTest) {
Preconditions.checkArgument(filePath.startsWith(dirPath),
"dirPath " + dirPath + " should be prefix of filePath " + filePath);
}
//we had checked the startsWith, so just get substring
String name = filePath.substring(dirPath.length());
if (name.startsWith("/")) {
name = name.substring(1);
}
return name;
}
|
@Test
public void testGetSuffixName() {
Assert.assertEquals("file", getSuffixName("/path/", "/path/file"));
Assert.assertEquals("file", getSuffixName("/path", "/path/file"));
Assert.assertEquals("file", getSuffixName("/dt=(a)/", "/dt=(a)/file"));
}
|
public static <T> Response call(RestUtils.RestCallable<T> callable,
AlluxioConfiguration alluxioConf, @Nullable Map<String, Object> headers) {
try {
// TODO(cc): reconsider how to enable authentication
if (SecurityUtils.isSecurityEnabled(alluxioConf)
&& AuthenticatedClientUser.get(alluxioConf) == null) {
AuthenticatedClientUser.set(ServerUserState.global().getUser().getName());
}
} catch (IOException e) {
LOG.warn("Failed to set AuthenticatedClientUser in REST service handler: {}", e.toString());
return createErrorResponse(e, alluxioConf);
}
try {
return createResponse(callable.call(), alluxioConf, headers);
} catch (Exception e) {
LOG.warn("Unexpected error invoking rest endpoint: {}", e.toString());
return createErrorResponse(e, alluxioConf);
}
}
|
@Test
public void voidOkResponse() {
Response response = RestUtils.call(new RestUtils.RestCallable<Void>() {
@Override
public Void call() throws Exception {
return null;
}
}, Configuration.global());
Assert.assertEquals(Response.Status.OK.getStatusCode(), response.getStatus());
Assert.assertNull(response.getEntity());
}
|
@VisibleForTesting
void addQueues(String args, SchedConfUpdateInfo updateInfo) {
if (args == null) {
return;
}
ArrayList<QueueConfigInfo> queueConfigInfos = new ArrayList<>();
for (String arg : args.split(";")) {
queueConfigInfos.add(getQueueConfigInfo(arg));
}
updateInfo.setAddQueueInfo(queueConfigInfos);
}
|
@Test(timeout = 10000)
public void testAddQueuesWithCommaInValue() {
SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo();
cli.addQueues("root.a:a1=a1Val1\\,a1Val2 a1Val3,a2=a2Val1\\,a2Val2",
schedUpdateInfo);
List<QueueConfigInfo> addQueueInfo = schedUpdateInfo.getAddQueueInfo();
Map<String, String> params = new HashMap<>();
params.put("a1", "a1Val1,a1Val2 a1Val3");
params.put("a2", "a2Val1,a2Val2");
validateQueueConfigInfo(addQueueInfo, 0, "root.a", params);
}
|
public Map<String, Properties> getNameServerConfig(final List<String> nameServers, long timeoutMillis)
throws InterruptedException,
RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException,
MQClientException, UnsupportedEncodingException {
List<String> invokeNameServers = (nameServers == null || nameServers.isEmpty()) ?
this.remotingClient.getNameServerAddressList() : nameServers;
if (invokeNameServers == null || invokeNameServers.isEmpty()) {
return null;
}
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_NAMESRV_CONFIG, null);
Map<String, Properties> configMap = new HashMap<>(4);
for (String nameServer : invokeNameServers) {
RemotingCommand response = this.remotingClient.invokeSync(nameServer, request, timeoutMillis);
assert response != null;
if (ResponseCode.SUCCESS == response.getCode()) {
configMap.put(nameServer, MixAll.string2Properties(new String(response.getBody(), MixAll.DEFAULT_CHARSET)));
} else {
throw new MQClientException(response.getCode(), response.getRemark());
}
}
return configMap;
}
|
@Test
public void assertGetNameServerConfig() throws RemotingException, InterruptedException, UnsupportedEncodingException, MQClientException {
mockInvokeSync();
setResponseBody("{\"key\":\"value\"}");
Map<String, Properties> actual = mqClientAPI.getNameServerConfig(Collections.singletonList(defaultNsAddr), defaultTimeout);
assertNotNull(actual);
assertEquals(1, actual.size());
assertTrue(actual.containsKey(defaultNsAddr));
}
|
@Deprecated
@RequestMapping("/meta")
public List<ServiceDTO> getMetaService() {
return Collections.emptyList();
}
|
@Test
public void testGetMetaService() {
assertTrue(serviceController.getMetaService().isEmpty());
}
|
public Heap<T> insert(T item) {
data.add(item);
bubbleUp();
return this;
}
|
@Test
public void insert() {
Heap<Integer> h = new Heap<>(data, MIN);
assertEquals("incorrect size", 10, h.size());
h.insert(3);
assertEquals("incorrect size", 11, h.size());
}
|
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
}
|
@Test
public void testFetchOffsetOutOfRange() {
buildFetcher();
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, sendFetches());
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.OFFSET_OUT_OF_RANGE, 100L, 0));
consumerClient.poll(time.timer(0));
assertEmptyFetch("Should not return records or advance position on fetch error");
assertTrue(subscriptions.isOffsetResetNeeded(tp0));
assertNull(subscriptions.validPosition(tp0));
assertNull(subscriptions.position(tp0));
}
|
@Override
public boolean tryFence(HAServiceTarget target, String argsStr)
throws BadFencingConfigurationException {
Args args = new Args(argsStr);
InetSocketAddress serviceAddr = target.getAddress();
String host = serviceAddr.getHostName();
Session session;
try {
session = createSession(serviceAddr.getHostName(), args);
} catch (JSchException e) {
LOG.warn("Unable to create SSH session", e);
return false;
}
LOG.info("Connecting to " + host + "...");
try {
session.connect(getSshConnectTimeout());
} catch (JSchException e) {
LOG.warn("Unable to connect to " + host
+ " as user " + args.user, e);
return false;
}
LOG.info("Connected to " + host);
try {
return doFence(session, serviceAddr);
} catch (JSchException e) {
LOG.warn("Unable to achieve fencing on remote host", e);
return false;
} finally {
session.disconnect();
}
}
|
@Test(timeout=20000)
public void testConnectTimeout() throws BadFencingConfigurationException {
Configuration conf = new Configuration();
conf.setInt(SshFenceByTcpPort.CONF_CONNECT_TIMEOUT_KEY, 3000);
SshFenceByTcpPort fence = new SshFenceByTcpPort();
fence.setConf(conf);
assertFalse(fence.tryFence(UNFENCEABLE_TARGET, ""));
}
|
@VisibleForTesting
static Path resolveEntropy(Path path, EntropyInjectingFileSystem efs, boolean injectEntropy)
throws IOException {
final String entropyInjectionKey = efs.getEntropyInjectionKey();
if (entropyInjectionKey == null) {
return path;
} else {
final URI originalUri = path.toUri();
final String checkpointPath = originalUri.getPath();
final int indexOfKey = checkpointPath.indexOf(entropyInjectionKey);
if (indexOfKey == -1) {
return path;
} else {
final StringBuilder buffer = new StringBuilder(checkpointPath.length());
buffer.append(checkpointPath, 0, indexOfKey);
if (injectEntropy) {
buffer.append(efs.generateEntropy());
}
buffer.append(
checkpointPath,
indexOfKey + entropyInjectionKey.length(),
checkpointPath.length());
final String rewrittenPath = buffer.toString();
try {
return new Path(
new URI(
originalUri.getScheme(),
originalUri.getAuthority(),
rewrittenPath,
originalUri.getQuery(),
originalUri.getFragment())
.normalize());
} catch (URISyntaxException e) {
// this could only happen if the injected entropy string contains invalid
// characters
throw new IOException(
"URI format error while processing path for entropy injection", e);
}
}
}
}
|
@Test
void testPathOnlyMatching() throws Exception {
EntropyInjectingFileSystem efs = new TestEntropyInjectingFs("_entropy_key_", "xyzz");
Path path = new Path("/path/_entropy_key_/file");
assertThat(EntropyInjector.resolveEntropy(path, efs, true))
.isEqualTo(new Path("/path/xyzz/file"));
assertThat(EntropyInjector.resolveEntropy(path, efs, false))
.isEqualTo(new Path("/path/file"));
}
|
@Override
public Object handle(String targetService, List<Object> invokers, Object invocation, Map<String, String> queryMap,
String serviceInterface) {
if (!shouldHandle(invokers)) {
return invokers;
}
List<Object> result = getTargetInvokersByRules(invokers, targetService);
return super.handle(targetService, result, invocation, queryMap, serviceInterface);
}
|
@Test
public void testGetTargetInvokerByConsumerTagRules() {
// initialize the routing rule
RuleInitializationUtils.initConsumerTagRules();
List<Object> invokers = new ArrayList<>();
Map<String, String> parameters1 = new HashMap<>();
parameters1.put(RouterConstant.PARAMETERS_KEY_PREFIX + "group", "red");
ApacheInvoker<Object> invoker1 = new ApacheInvoker<>("1.0.0", parameters1);
invokers.add(invoker1);
Map<String, String> parameters2 = new HashMap<>();
parameters1.put(RouterConstant.PARAMETERS_KEY_PREFIX + "group", "green");
ApacheInvoker<Object> invoker2 = new ApacheInvoker<>("1.0.1", parameters2);
invokers.add(invoker2);
Invocation invocation = new ApacheInvocation();
invocation.setAttachment("bar", "bar1");
Map<String, String> queryMap = new HashMap<>();
queryMap.put("side", "consumer");
queryMap.put("group", "red");
queryMap.put("version", "0.0.1");
queryMap.put("interface", "io.sermant.foo.FooTest");
Map<String, String> parameters = new HashMap<>();
parameters.put(RouterConstant.PARAMETERS_KEY_PREFIX + "group", "red");
DubboCache.INSTANCE.setParameters(parameters);
DubboCache.INSTANCE.putApplication("io.sermant.foo.FooTest", "foo");
List<Object> targetInvokers = (List<Object>) tagRouteHandler.handle(DubboCache.INSTANCE.getApplication("io.sermant.foo.FooTest")
, invokers, invocation, queryMap, "io.sermant.foo.FooTest");
Assert.assertEquals(1, targetInvokers.size());
Assert.assertEquals(invoker1, targetInvokers.get(0));
ConfigCache.getLabel(RouterConstant.DUBBO_CACHE_NAME).resetRouteRule(Collections.emptyMap());
}
|
@Override
public void remove(NamedNode master) {
connection.sync(RedisCommands.SENTINEL_REMOVE, master.getName());
}
|
@Test
public void testRemove() {
Collection<RedisServer> masters = connection.masters();
connection.remove(masters.iterator().next());
}
|
@VisibleForTesting
public WeightedPolicyInfo getWeightedPolicyInfo() {
return weightedPolicyInfo;
}
|
@Test
public void testPolicyInfoSetCorrectly() throws Exception {
serializeAndDeserializePolicyManager(wfp, expectedPolicyManager,
expectedAMRMProxyPolicy, expectedRouterPolicy);
// check the policyInfo propagates through ser/der correctly
Assert.assertEquals(
((PriorityBroadcastPolicyManager) wfp).getWeightedPolicyInfo(),
policyInfo);
}
|
public static <T extends PipelineOptions> T validate(Class<T> klass, PipelineOptions options) {
return validate(klass, options, false);
}
|
@Test
public void testSuperInterfaceRequiredOptionsAlsoRequiredInSubInterface() {
SubOptions subOpts = PipelineOptionsFactory.as(SubOptions.class);
subOpts.setFoo("Bar");
subOpts.setRunner(CrashingRunner.class);
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("otherSuper");
expectedException.expectMessage("Missing required value");
expectedException.expectMessage("getSuperclassObj");
PipelineOptionsValidator.validate(SubOptions.class, subOpts);
}
|
@Override
protected int rsv(WebSocketFrame msg) {
return msg.rsv() | WebSocketExtension.RSV1;
}
|
@Test
public void testAlreadyCompressedFrame() {
EmbeddedChannel encoderChannel = new EmbeddedChannel(new PerFrameDeflateEncoder(9, 15, false));
// initialize
byte[] payload = new byte[300];
random.nextBytes(payload);
BinaryWebSocketFrame frame = new BinaryWebSocketFrame(true,
WebSocketExtension.RSV3 | WebSocketExtension.RSV1, Unpooled.wrappedBuffer(payload));
// execute
assertTrue(encoderChannel.writeOutbound(frame));
BinaryWebSocketFrame newFrame = encoderChannel.readOutbound();
// test
assertNotNull(newFrame);
assertNotNull(newFrame.content());
assertEquals(WebSocketExtension.RSV3 | WebSocketExtension.RSV1, newFrame.rsv());
assertEquals(300, newFrame.content().readableBytes());
byte[] finalPayload = new byte[300];
newFrame.content().readBytes(finalPayload);
assertArrayEquals(finalPayload, payload);
newFrame.release();
}
|
public void createReport() throws Exception {
// Create a new report
//
report = new MasterReport();
// Define where which transformation and step to read from, explain it to the reporting engine
//
KettleFileTableModel transMetaTableModel = new KettleFileTableModel( parentObject, filenames );
TableDataFactory dataFactory = new TableDataFactory( "default", transMetaTableModel );
// Give the data to the report at runtime!
//
report.setDataFactory( dataFactory );
// Add a report header and footer
//
ReportHeader reportHeader = new ReportHeader();
report.setReportHeader( reportHeader );
ReportFooter reportFooter = new ReportFooter();
report.setReportFooter( reportFooter );
// Now we need to define an area on which we can draw report elements, called groups and bands...
//
RelationalGroup group = new RelationalGroup();
group.addField( "filename" );
GroupDataBody groupData = new GroupDataBody();
ItemBand itemBand = new ItemBand();
itemBand.setVisible( true );
itemBand.setLayout( BandStyleKeys.LAYOUT_AUTO );
groupData.setItemBand( itemBand );
group.setBody( groupData );
report.setRootGroup( group );
// Put a title at the top of the report
//
/*
* LabelElementFactory labelElementFactory = new LabelElementFactory();
* labelElementFactory.setText("Kettle documentation"); labelElementFactory.setMinimumWidth(500f);
* labelElementFactory.setMinimumHeight(20f); labelElementFactory.setUnderline(true);
* labelElementFactory.setBold(true); Element label = labelElementFactory.createElement();
*
* // Add the label to the header... // reportHeader.addElement(label);
*/
int pagePosition = 0;
// Set the header to bold...
//
reportHeader.getStyle().setStyleProperty( TextStyleKeys.BOLD, true );
// Now add the filename to the report
//
pagePosition = createTextField( itemBand, "Filename: ", "filename", pagePosition );
// The name of the transformation
//
if ( options.isIncludingName() ) {
pagePosition = createTextField( itemBand, "Name: ", "name", pagePosition );
}
// The description of the transformation...
//
if ( options.isIncludingDescription() ) {
pagePosition = createTextField( itemBand, "Description: ", "description", pagePosition );
}
// The description of the transformation...
//
if ( options.isIncludingExtendedDescription() ) {
pagePosition = createTextField( itemBand, "Long description: ", "extended_description", pagePosition );
}
// Include a line with logging information
//
if ( options.isIncludingLoggingConfiguration() ) {
pagePosition = createTextField( itemBand, "Logging: ", "logging", pagePosition );
}
// Include a line with the creation date and user
//
if ( options.isIncludingCreated() ) {
pagePosition = createTextField( itemBand, "Creation: ", "creation", pagePosition );
}
// Include a line with the modification date and user
//
if ( options.isIncludingModified() ) {
pagePosition = createTextField( itemBand, "Modification: ", "modification", pagePosition );
}
// The last execution result
//
if ( options.isIncludingLastExecutionResult() ) {
pagePosition = createTextField( itemBand, "Last execution result: : ", "last_exec_result", pagePosition );
}
// Optionally include an image of the transformation...
//
if ( options.isIncludingImage() ) {
// for this to work the reporting engine must be able to see our classes, we do this by changing the thread
// classloader to be the plugin's classloader. see #render()
String bshCode =
"Object getValue() { "
+ Const.CR + " return new " + TransJobDrawable.class.getName() + "(dataRow, "
+ ( options.getOutputType() == OutputType.PDF ? "true" : "false" ) + ");" + Const.CR + "}";
BSHExpression bshExpression = new BSHExpression();
bshExpression.setExpression( bshCode );
bshExpression.setName( "getImage" );
report.addExpression( bshExpression );
ContentElementFactory contentElementFactory = new ContentElementFactory();
contentElementFactory.setName( "image" );
contentElementFactory.setAbsolutePosition( new Point( 0, pagePosition ) );
contentElementFactory.setMinimumWidth( 750f );
contentElementFactory.setMaximumWidth( 750f );
contentElementFactory.setMinimumHeight( 400f );
contentElementFactory.setMaximumHeight( 750f );
contentElementFactory.setScale( true );
contentElementFactory.setDynamicHeight( true );
Element imageElement = contentElementFactory.createElement();
imageElement
.setAttributeExpression( AttributeNames.Core.NAMESPACE, AttributeNames.Core.VALUE, bshExpression );
imageElement.setAttribute( AttributeNames.Core.NAMESPACE, AttributeNames.Core.IMAGE_ENCODING_QUALITY, "9" );
imageElement.setAttribute( AttributeNames.Core.NAMESPACE, AttributeNames.Core.IMAGE_ENCODING_TYPE, "PNG" );
ItemBand imageBand = new ItemBand();
imageBand.setLayout( BandStyleKeys.LAYOUT_ROW );
imageBand.addElement( imageElement );
itemBand.addElement( imageBand );
}
Paper a4Paper = new Paper();
double paperWidth = 8.26;
double paperHeight = 11.69;
a4Paper.setSize( paperWidth * 72.0, paperHeight * 72.0 );
/*
* set the margins respectively the imageable area
*/
double leftMargin = 0.78; /* should be about 2cm */
double rightMargin = 0.78;
double topMargin = 0.08; // this is a very small topMargin
double bottomMargin = 0.78;
a4Paper.setImageableArea(
leftMargin * 72.0, topMargin * 72.0, ( paperWidth - leftMargin - rightMargin ) * 72.0, ( paperHeight
- topMargin - bottomMargin ) * 72.0 );
/*
* create a PageFormat and associate the Paper with it.
*/
PageFormat pageFormat = new PageFormat();
pageFormat.setOrientation( PageFormat.LANDSCAPE );
pageFormat.setPaper( a4Paper );
SimplePageDefinition pageDefinition = new SimplePageDefinition( pageFormat );
report.setPageDefinition( pageDefinition );
}
|
@Test
public void createReport() throws Exception {
LoggingObjectInterface log = mock( LoggingObjectInterface.class );
AutoDocOptionsInterface options = mock( AutoDocOptionsInterface.class );
when( options.isIncludingImage() ).thenReturn( Boolean.TRUE );
KettleReportBuilder builder = new KettleReportBuilder( log, Collections.<ReportSubjectLocation>emptyList(), "", options );
builder.createReport();
assertNotNull( builder.getReport() );
assertNotNull( builder.getReport().getDataFactory() );
assertNotNull( builder.getReport().getReportHeader() );
assertNotNull( builder.getReport().getReportFooter() );
assertNotNull( builder.getReport().getRootGroup() );
assertNotNull( builder.getReport().getPageDefinition() );
assertTrue( builder.getReport().getExpressions().size() > 0 );
}
|
public Map<Integer, Long> queryCorrectionOffset(final String addr, final String topic, final String group,
Set<String> filterGroup,
long timeoutMillis) throws MQClientException, RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException,
InterruptedException {
QueryCorrectionOffsetHeader requestHeader = new QueryCorrectionOffsetHeader();
requestHeader.setCompareGroup(group);
requestHeader.setTopic(topic);
if (filterGroup != null) {
StringBuilder sb = new StringBuilder();
String splitor = "";
for (String s : filterGroup) {
sb.append(splitor).append(s);
splitor = ",";
}
requestHeader.setFilterGroups(sb.toString());
}
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.QUERY_CORRECTION_OFFSET, requestHeader);
RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr),
request, timeoutMillis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
if (response.getBody() != null) {
QueryCorrectionOffsetBody body = QueryCorrectionOffsetBody.decode(response.getBody(), QueryCorrectionOffsetBody.class);
return body.getCorrectionOffsets();
}
}
default:
break;
}
throw new MQClientException(response.getCode(), response.getRemark());
}
|
@Test
public void assertQueryCorrectionOffset() throws RemotingException, InterruptedException, MQClientException {
mockInvokeSync();
QueryCorrectionOffsetBody responseBody = new QueryCorrectionOffsetBody();
responseBody.getCorrectionOffsets().put(1, 1L);
setResponseBody(responseBody);
Map<Integer, Long> actual = mqClientAPI.queryCorrectionOffset(defaultBrokerAddr, topic, group, new HashSet<>(), defaultTimeout);
assertNotNull(actual);
assertEquals(1, actual.size());
assertTrue(actual.containsKey(1));
assertTrue(actual.containsValue(1L));
}
|
public static String format(double amount, boolean isUseTraditional) {
return format(amount, isUseTraditional, false);
}
|
@Test
public void formatThousandLongTest() {
String f = NumberChineseFormatter.format(0, false);
assertEquals("้ถ", f);
f = NumberChineseFormatter.format(1, false);
assertEquals("ไธ", f);
f = NumberChineseFormatter.format(10, false);
assertEquals("ไธๅ", f);
f = NumberChineseFormatter.format(12, false);
assertEquals("ไธๅไบ", f);
f = NumberChineseFormatter.format(100, false);
assertEquals("ไธ็พ", f);
f = NumberChineseFormatter.format(101, false);
assertEquals("ไธ็พ้ถไธ", f);
f = NumberChineseFormatter.format(110, false);
assertEquals("ไธ็พไธๅ", f);
f = NumberChineseFormatter.format(112, false);
assertEquals("ไธ็พไธๅไบ", f);
f = NumberChineseFormatter.format(1000, false);
assertEquals("ไธๅ", f);
f = NumberChineseFormatter.format(1001, false);
assertEquals("ไธๅ้ถไธ", f);
f = NumberChineseFormatter.format(1010, false);
assertEquals("ไธๅ้ถไธๅ", f);
f = NumberChineseFormatter.format(1100, false);
assertEquals("ไธๅไธ็พ", f);
f = NumberChineseFormatter.format(1101, false);
assertEquals("ไธๅไธ็พ้ถไธ", f);
f = NumberChineseFormatter.format(9999, false);
assertEquals("ไนๅไน็พไนๅไน", f);
}
|
public static boolean createFile(final Path filePath) {
try {
final Path parent = filePath.getParent();
if (parent == null) {
return false;
}
if (Files.notExists(parent)) {
Files.createDirectories(parent);
}
if (Files.notExists(filePath)) {
Files.createFile(filePath);
}
return true;
} catch (final Exception e) {
return false;
}
}
|
@Test
void testCreateFileSubDir() {
Path subDirHistoryFile = Paths.get(realFolder.toFile().getPath(), "subdir", "history.file");
CliUtils.createFile(subDirHistoryFile);
assertThat(Files.exists(subDirHistoryFile)).isTrue();
}
|
@Override
public void updateInstanceStatus(String status) {
client.updateInstanceStatus(status);
}
|
@Test
public void updateInstanceStatus() {
nacosRegister.updateInstanceStatus(status);
Mockito.verify(client, Mockito.times(1)).updateInstanceStatus(status);
}
|
public static boolean isEmpty(String s) {
return s == null || s.isEmpty();
}
|
@SuppressWarnings("ConstantConditions")
@Test
public void testEmptyString() {
assertTrue(isEmpty(null));
assertTrue(isEmpty(""));
assertFalse(isEmpty("hello world"));
}
|
public static Date jsToDate( Object value, String classType ) throws KettleValueException {
double dbl;
if ( !classType.equalsIgnoreCase( JS_UNDEFINED ) ) {
if ( classType.equalsIgnoreCase( "org.mozilla.javascript.NativeDate" ) ) {
dbl = Context.toNumber( value );
} else if ( classType.equalsIgnoreCase( JS_NATIVE_JAVA_OBJ )
|| classType.equalsIgnoreCase( "java.util.Date" ) ) {
// Is it a java Date() class ?
try {
Date dat = (Date) Context.jsToJava( value, java.util.Date.class );
dbl = dat.getTime();
} catch ( Exception e ) {
// Is it a Value?
//
return convertValueToDate( value );
}
} else if ( classType.equalsIgnoreCase( "java.lang.Double" ) ) {
dbl = (Double) value;
} else {
String string = (String) Context.jsToJava( value, String.class );
dbl = Double.parseDouble( string );
}
long lng = Math.round( dbl );
return new Date( lng );
}
return null;
}
|
@Test
public void jsToDate_Undefined() throws Exception {
assertNull( JavaScriptUtils.jsToDate( null, UNDEFINED ) );
}
|
public static String pickBestEncoding(String acceptHeader, Set<String> customMimeTypesSupported)
{
return pickBestEncoding(acceptHeader, null, customMimeTypesSupported);
}
|
@Test(dataProvider = "invalidHeaders")
public void testPickBestEncodingWithInvalidHeaders(String header)
{
try
{
RestUtils.pickBestEncoding(header, Collections.emptySet());
Assert.fail();
}
catch (RestLiServiceException e)
{
Assert.assertEquals(e.getStatus(), HttpStatus.S_400_BAD_REQUEST);
Assert.assertTrue(e.getMessage().matches("Encountered invalid MIME type '\\w*' in accept header."));
}
}
|
public static FromEndOfWindow pastEndOfWindow() {
return new FromEndOfWindow();
}
|
@Test
public void testFromEndOfWindowToString() {
Trigger trigger = AfterWatermark.pastEndOfWindow();
assertEquals("AfterWatermark.pastEndOfWindow()", trigger.toString());
}
|
public String getStageName() {
if(isBuilding()) {
try {
return buildLocator.split("/")[2];
} catch (ArrayIndexOutOfBoundsException e) {
return null;
}
}
return null;
}
|
@Test
public void shouldReturnTheStageName() {
AgentBuildingInfo agentBuildingInfo = new AgentBuildingInfo("buildInfo", "foo/1/bar");
assertThat(agentBuildingInfo.getStageName(), is("bar"));
}
|
@Override
public void debug(String msg) {
logger.debug(msg);
}
|
@Test
void testMarkerDebugWithException() {
Exception exception = new Exception();
jobRunrDashboardLogger.debug(marker, "Debug", exception);
verify(slfLogger).debug(marker, "Debug", exception);
}
|
public <T extends AwsSyncClientBuilder> void applyHttpClientConfigurations(T builder) {
if (Strings.isNullOrEmpty(httpClientType)) {
httpClientType = CLIENT_TYPE_DEFAULT;
}
switch (httpClientType) {
case CLIENT_TYPE_URLCONNECTION:
UrlConnectionHttpClientConfigurations urlConnectionHttpClientConfigurations =
loadHttpClientConfigurations(UrlConnectionHttpClientConfigurations.class.getName());
urlConnectionHttpClientConfigurations.configureHttpClientBuilder(builder);
break;
case CLIENT_TYPE_APACHE:
ApacheHttpClientConfigurations apacheHttpClientConfigurations =
loadHttpClientConfigurations(ApacheHttpClientConfigurations.class.getName());
apacheHttpClientConfigurations.configureHttpClientBuilder(builder);
break;
default:
throw new IllegalArgumentException("Unrecognized HTTP client type " + httpClientType);
}
}
|
@Test
public void testInvalidHttpClientType() {
Map<String, String> properties = Maps.newHashMap();
properties.put(HttpClientProperties.CLIENT_TYPE, "test");
HttpClientProperties httpProperties = new HttpClientProperties(properties);
S3ClientBuilder s3ClientBuilder = S3Client.builder();
assertThatThrownBy(() -> httpProperties.applyHttpClientConfigurations(s3ClientBuilder))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Unrecognized HTTP client type test");
}
|
@Nullable
public static ZNRecord fetchTaskMetadata(HelixPropertyStore<ZNRecord> propertyStore, String taskType,
String tableNameWithType) {
String newPath = ZKMetadataProvider.constructPropertyStorePathForMinionTaskMetadata(tableNameWithType, taskType);
if (propertyStore.exists(newPath, AccessOption.PERSISTENT)) {
return fetchTaskMetadata(propertyStore, newPath);
} else {
return fetchTaskMetadata(propertyStore,
ZKMetadataProvider.constructPropertyStorePathForMinionTaskMetadataDeprecated(taskType, tableNameWithType));
}
}
|
@Test
public void testFetchTaskMetadata() {
// no metadata path exists
HelixPropertyStore<ZNRecord> propertyStore = new FakePropertyStore();
assertNull(MinionTaskMetadataUtils.fetchTaskMetadata(propertyStore, TASK_TYPE, TABLE_NAME_WITH_TYPE));
// only the old metadata path exists
propertyStore = new FakePropertyStore();
propertyStore.set(OLD_MINION_METADATA_PATH, OLD_TASK_METADATA.toZNRecord(), EXPECTED_VERSION, ACCESS_OPTION);
assertEquals(MinionTaskMetadataUtils.fetchTaskMetadata(propertyStore, TASK_TYPE, TABLE_NAME_WITH_TYPE),
OLD_TASK_METADATA.toZNRecord());
// only the new metadata path exists
propertyStore = new FakePropertyStore();
propertyStore.set(NEW_MINION_METADATA_PATH, NEW_TASK_METADATA.toZNRecord(), EXPECTED_VERSION, ACCESS_OPTION);
assertEquals(MinionTaskMetadataUtils.fetchTaskMetadata(propertyStore, TASK_TYPE, TABLE_NAME_WITH_TYPE),
NEW_TASK_METADATA.toZNRecord());
// if two metadata paths exist at the same time, the new one will be used.
propertyStore = new FakePropertyStore();
propertyStore.set(OLD_MINION_METADATA_PATH, OLD_TASK_METADATA.toZNRecord(), EXPECTED_VERSION, ACCESS_OPTION);
propertyStore.set(NEW_MINION_METADATA_PATH, NEW_TASK_METADATA.toZNRecord(), EXPECTED_VERSION, ACCESS_OPTION);
assertEquals(MinionTaskMetadataUtils.fetchTaskMetadata(propertyStore, TASK_TYPE, TABLE_NAME_WITH_TYPE),
NEW_TASK_METADATA.toZNRecord());
}
|
public static void initRequestFromEntity(HttpRequestBase requestBase, Map<String, String> body, String charset)
throws Exception {
if (body == null || body.isEmpty()) {
return;
}
List<NameValuePair> params = new ArrayList<>(body.size());
for (Map.Entry<String, String> entry : body.entrySet()) {
params.add(new BasicNameValuePair(entry.getKey(), entry.getValue()));
}
if (requestBase instanceof HttpEntityEnclosingRequest) {
HttpEntityEnclosingRequest request = (HttpEntityEnclosingRequest) requestBase;
HttpEntity entity = new UrlEncodedFormEntity(params, charset);
request.setEntity(entity);
}
}
|
@Test
void testInitRequestFromEntity3() throws Exception {
BaseHttpMethod.HttpGetWithEntity httpRequest = new BaseHttpMethod.HttpGetWithEntity("");
HttpUtils.initRequestFromEntity(httpRequest, Collections.emptyMap(), "UTF-8");
// nothing change
assertEquals(new BaseHttpMethod.HttpGetWithEntity("").getEntity(), httpRequest.getEntity());
}
|
@Override
public boolean isIn(String ipAddress) {
//is cache expired
//Uses Double Checked Locking using volatile
if (cacheExpiryTimeStamp >= 0 && cacheExpiryTimeStamp < System.currentTimeMillis()) {
synchronized(this) {
//check if cache expired again
if (cacheExpiryTimeStamp < System.currentTimeMillis()) {
reset();
}
}
}
return ipList.isIn(ipAddress);
}
|
@Test
public void testRemovalWithSleepForCacheTimeout() throws IOException, InterruptedException {
String[] ips = {"10.119.103.112", "10.221.102.0/23",
"10.222.0.0/16", "10.113.221.221", "10.113.221.222"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips);
CacheableIPList cipl = new CacheableIPList(
new FileBasedIPList("ips.txt"),100);
assertTrue("10.113.221.222 is not in the list",
cipl.isIn("10.113.221.222"));
assertTrue ("10.222.103.121 is not in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
String[]ips2 = {"10.119.103.112", "10.221.102.0/23", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips2);
Thread.sleep(1005);
assertFalse("10.113.221.222 is in the list",
cipl.isIn("10.113.221.222"));
assertFalse ("10.222.103.121 is in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
}
|
public static ShowResultSet execute(ShowStmt statement, ConnectContext context) {
return GlobalStateMgr.getCurrentState().getShowExecutor().showExecutorVisitor.visit(statement, context);
}
|
@Test
public void testShowCreateExternalCatalogNotExists() {
new MockUp<CatalogMgr>() {
@Mock
public Catalog getCatalogByName(String name) {
return null;
}
};
ShowCreateExternalCatalogStmt stmt = new ShowCreateExternalCatalogStmt("catalog_not_exist");
ExceptionChecker.expectThrowsWithMsg(SemanticException.class, "Unknown catalog 'catalog_not_exist'",
() -> ShowExecutor.execute(stmt, ctx));
}
|
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
for(Path f : files.keySet()) {
try {
new FilesApi(new BrickApiClient(session)).deleteFilesPath(
StringUtils.removeStart(f.getAbsolute(), String.valueOf(Path.DELIMITER)), f.isDirectory());
}
catch(ApiException e) {
throw new BrickExceptionMappingService().map("Cannot delete {0}", e, f);
}
}
}
|
@Test
public void testDeleteWithLock() throws Exception {
final Path test = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final Local local = new Local(System.getProperty("java.io.tmpdir"), test.getName());
final byte[] random = RandomUtils.nextBytes(2547);
IOUtils.write(random, local.getOutputStream(false));
final TransferStatus status = new TransferStatus().withLength(random.length);
new BrickUploadFeature(session, new BrickWriteFeature(session)).upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED),
new DisabledStreamListener(), status, new DisabledLoginCallback());
local.delete();
final String lock = new BrickLockFeature(session).lock(test);
assertNotNull(lock);
new BrickDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public CompletableFuture<Map<TopicIdPartition, PartitionData>> fetchMessages(
String groupId,
String memberId,
FetchParams fetchParams,
Map<TopicIdPartition, Integer> partitionMaxBytes
) {
log.trace("Fetch request for topicIdPartitions: {} with groupId: {} fetch params: {}",
partitionMaxBytes.keySet(), groupId, fetchParams);
CompletableFuture<Map<TopicIdPartition, PartitionData>> future = new CompletableFuture<>();
ShareFetchPartitionData shareFetchPartitionData = new ShareFetchPartitionData(fetchParams, groupId, memberId, future, partitionMaxBytes);
fetchQueue.add(shareFetchPartitionData);
maybeProcessFetchQueue();
return future;
}
|
@Test
public void testReplicaManagerFetchShouldProceed() {
String groupId = "grp";
Uuid memberId = Uuid.randomUuid();
FetchParams fetchParams = new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, 0,
1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty());
Uuid fooId = Uuid.randomUuid();
TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0));
Map<TopicIdPartition, Integer> partitionMaxBytes = new HashMap<>();
partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES);
ReplicaManager replicaManager = mock(ReplicaManager.class);
SharePartition sp0 = mock(SharePartition.class);
when(sp0.maybeAcquireFetchLock()).thenReturn(true);
when(sp0.canAcquireRecords()).thenReturn(true);
Map<SharePartitionManager.SharePartitionKey, SharePartition> partitionCacheMap = new HashMap<>();
partitionCacheMap.put(new SharePartitionManager.SharePartitionKey(groupId, tp0), sp0);
SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder()
.withPartitionCacheMap(partitionCacheMap).withReplicaManager(replicaManager).build();
sharePartitionManager.fetchMessages(groupId, memberId.toString(), fetchParams, partitionMaxBytes);
// Since the nextFetchOffset does not point to endOffset + 1, i.e. some of the records in the cachedState are AVAILABLE,
// even though the maxInFlightMessages limit is exceeded, replicaManager.fetchMessages should be called
Mockito.verify(replicaManager, times(1)).fetchMessages(
any(), any(), any(ReplicaQuota.class), any());
}
|
@Override
public synchronized ConsumerStats getStats() {
if (stats == null) {
return null;
}
stats.reset();
consumers.forEach((partition, consumer) -> stats.updateCumulativeStats(partition, consumer.getStats()));
return stats;
}
|
@Test
public void testGetStats() throws Exception {
String topicName = "test-stats";
ClientConfigurationData conf = new ClientConfigurationData();
// ip and port is arbitrary since test will attempt to make a connection
// and the connection is not needed for making the test to pass. This test should be improved.
conf.setServiceUrl("pulsar://127.0.0.99:23456");
conf.setStatsIntervalSeconds(100);
ThreadFactory threadFactory = new DefaultThreadFactory("client-test-stats", Thread.currentThread().isDaemon());
@Cleanup("shutdown")
EventLoopGroup eventLoopGroup = EventLoopUtil.newEventLoopGroup(conf.getNumIoThreads(), false, threadFactory);
@Cleanup("shutdownNow")
ExecutorProvider executorProvider = new ExecutorProvider(1, "client-test-stats");
@Cleanup
PulsarClientImpl clientImpl = new PulsarClientImpl(conf, eventLoopGroup);
ConsumerConfigurationData consumerConfData = new ConsumerConfigurationData();
consumerConfData.setTopicNames(Sets.newHashSet(topicName));
assertEquals(Long.parseLong("100"), clientImpl.getConfiguration().getStatsIntervalSeconds());
MultiTopicsConsumerImpl impl = new MultiTopicsConsumerImpl(
clientImpl, consumerConfData,
executorProvider, null, Schema.BYTES, null, true);
impl.getStats();
clientImpl.close();
executorProvider.shutdownNow();
eventLoopGroup.shutdownGracefully().get();
}
|
private void setOptions( JMSProducer producer ) {
String optionValue = meta.getDisableMessageId();
getLogChannel().logDebug( "Disable Message ID is set to " + optionValue );
if ( !StringUtil.isEmpty( optionValue ) ) {
producer.setDisableMessageID( BooleanUtils.toBoolean( optionValue ) );
}
optionValue = meta.getDisableMessageTimestamp();
getLogChannel().logDebug( "Disable Message Timestamp is set to " + optionValue );
if ( !StringUtil.isEmpty( optionValue ) ) {
producer.setDisableMessageTimestamp( BooleanUtils.toBoolean( optionValue ) );
}
optionValue = meta.getDeliveryMode();
getLogChannel().logDebug( "Delivery Mode is set to " + optionValue );
if ( !StringUtil.isEmpty( optionValue ) ) {
producer.setDeliveryMode( Integer.parseInt( optionValue ) );
}
optionValue = meta.getPriority();
getLogChannel().logDebug( "Priority is set to " + optionValue );
if ( !StringUtil.isEmpty( optionValue ) ) {
producer.setPriority( Integer.parseInt( optionValue ) );
}
optionValue = meta.getTimeToLive();
getLogChannel().logDebug( "Time to Live is set to " + optionValue );
if ( !StringUtil.isEmpty( optionValue ) ) {
producer.setTimeToLive( Long.parseLong( optionValue ) );
}
optionValue = meta.getDeliveryDelay();
getLogChannel().logDebug( "Delivery Delay is set to " + optionValue );
if ( !StringUtil.isEmpty( optionValue ) ) {
producer.setDeliveryDelay( Long.parseLong( optionValue ) );
}
optionValue = meta.getJmsCorrelationId();
getLogChannel().logDebug( "JMS Correlation ID is set to " + optionValue );
if ( !StringUtil.isEmpty( optionValue ) ) {
producer.setJMSCorrelationID( optionValue );
}
optionValue = meta.getJmsType();
getLogChannel().logDebug( "JMS Type is set to " + optionValue );
if ( !StringUtil.isEmpty( optionValue ) ) {
producer.setJMSType( optionValue );
}
}
|
@Test
public void testSetOptions() throws KettleException {
step.init( meta, data );
//Defaults
step.processRow( meta, data );
assertEquals( false, step.producer.getDisableMessageID() );
assertEquals( false, step.producer.getDisableMessageTimestamp() );
assertEquals( 2, step.producer.getDeliveryMode() );
assertEquals( 4, step.producer.getPriority() );
assertEquals( 0, step.producer.getTimeToLive() );
assertEquals( 0, step.producer.getDeliveryDelay() );
assertNull( step.producer.getJMSCorrelationID() );
assertNull( step.producer.getJMSType() );
}
|
@Subscribe
public void publishClusterEvent(Object event) {
if (event instanceof DeadEvent) {
LOG.debug("Skipping DeadEvent on cluster event bus");
return;
}
final String className = AutoValueUtils.getCanonicalName(event.getClass());
final ClusterEvent clusterEvent = ClusterEvent.create(nodeId.getNodeId(), className, Collections.singleton(nodeId.getNodeId()), event);
try {
final String id = dbCollection.save(clusterEvent, WriteConcern.JOURNALED).getSavedId();
// We are handling a locally generated event, so we can speed up processing by posting it to the local event
// bus immediately. Due to having added the local node id to its list of consumers, it will not be picked up
// by the db cursor again, avoiding double processing of the event. See #11263 for details.
serverEventBus.post(event);
LOG.debug("Published cluster event with ID <{}> and type <{}>", id, className);
} catch (MongoException e) {
LOG.error("Couldn't publish cluster event of type <" + className + ">", e);
}
}
|
@Test
public void publishClusterEventSkipsDeadEvent() throws Exception {
@SuppressWarnings("deprecation")
DBCollection collection = mongoConnection.getDatabase().getCollection(ClusterEventPeriodical.COLLECTION_NAME);
DeadEvent event = new DeadEvent(clusterEventBus, new SimpleEvent("test"));
assertThat(collection.count()).isEqualTo(0L);
clusterEventPeriodical.publishClusterEvent(event);
verify(clusterEventBus, never()).post(any());
assertThat(collection.count()).isEqualTo(0L);
}
|
public static List<String> notEmptyElements(List<String> list, String name) {
notNull(list, name);
for (int i = 0; i < list.size(); i++) {
notEmpty(list.get(i), MessageFormat.format("list [{0}] element [{1}]", name, i));
}
return list;
}
|
@Test(expected = IllegalArgumentException.class)
public void notEmptyElementsNullList() {
Check.notEmptyElements(null, "name");
}
|
@Override
public String render(String text) {
if (StringUtils.isBlank(text)) {
return "";
}
if (regex.isEmpty() || link.isEmpty()) {
Comment comment = new Comment();
comment.escapeAndAdd(text);
return comment.render();
}
try {
Matcher matcher = Pattern.compile(regex).matcher(text);
int start = 0;
Comment comment = new Comment();
while (hasMatch(matcher)) {
comment.escapeAndAdd(text.substring(start, matcher.start()));
comment.add(dynamicLink(matcher));
start = matcher.end();
}
comment.escapeAndAdd(text.substring(start));
return comment.render();
} catch (PatternSyntaxException e) {
LOGGER.warn("Illegal regular expression: {} - {}", regex, e.getMessage());
}
return text;
}
|
@Test
public void shouldRenderStringWithRegexThatHasSubSelect() throws Exception {
String link = "http://mingle05/projects/cce/cards/${ID}";
String regex = "evo-(\\d+)";
trackingTool = new DefaultCommentRenderer(link, regex);
String result = trackingTool.render("evo-111: checkin message");
assertThat(result,
is("<a href=\"" + "http://mingle05/projects/cce/cards/111\" "
+ "target=\"story_tracker\">evo-111</a>: checkin message"));
}
|
public static String removeLeadingAndEndingQuotes(final String s) {
if (ObjectHelper.isEmpty(s)) {
return s;
}
String copy = s.trim();
if (copy.length() < 2) {
return s;
}
if (copy.startsWith("'") && copy.endsWith("'")) {
return copy.substring(1, copy.length() - 1);
}
if (copy.startsWith("\"") && copy.endsWith("\"")) {
return copy.substring(1, copy.length() - 1);
}
// no quotes, so return as-is
return s;
}
|
@Test
public void testRemoveLeadingAndEndingQuotesWithSpaces() {
assertNull(StringHelper.removeLeadingAndEndingQuotes(null));
assertEquals(" ", StringHelper.removeLeadingAndEndingQuotes(" "));
assertEquals("Hello World", StringHelper.removeLeadingAndEndingQuotes("Hello World"));
assertEquals("Hello World", StringHelper.removeLeadingAndEndingQuotes("'Hello World'"));
assertEquals("Hello World", StringHelper.removeLeadingAndEndingQuotes("\"Hello World\""));
assertEquals("Hello 'Camel'", StringHelper.removeLeadingAndEndingQuotes("Hello 'Camel'"));
}
|
public static boolean isWebService(Optional<String> serviceName) {
return serviceName.isPresent()
&& IS_PLAIN_HTTP_BY_KNOWN_WEB_SERVICE_NAME.containsKey(
Ascii.toLowerCase(serviceName.get()));
}
|
@Test
public void isWebService_whenHttpsService_returnsTrue() {
assertThat(
NetworkServiceUtils.isWebService(
NetworkService.newBuilder().setServiceName("https").build()))
.isTrue();
}
|
@Override
public Optional<ErrorResponse> filter(DiscFilterRequest request) {
try {
Optional<ResourceNameAndAction> resourceMapping =
requestResourceMapper.getResourceNameAndAction(request);
log.log(Level.FINE, () -> String.format("Resource mapping for '%s': %s", request, resourceMapping));
if (resourceMapping.isEmpty()) {
incrementAcceptedMetrics(request, false, Optional.empty());
return Optional.empty();
}
Result result = checkAccessAllowed(request, resourceMapping.get());
AuthorizationResult.Type resultType = result.zpeResult.type();
setAttribute(request, RESULT_ATTRIBUTE, resultType.name());
if (resultType == AuthorizationResult.Type.ALLOW) {
populateRequestWithResult(request, result);
incrementAcceptedMetrics(request, true, Optional.of(result));
return Optional.empty();
}
log.log(Level.FINE, () -> String.format("Forbidden (403) for '%s': %s", request, resultType.name()));
incrementRejectedMetrics(request, FORBIDDEN, resultType.name(), Optional.of(result));
return Optional.of(new ErrorResponse(FORBIDDEN, "Access forbidden: " + resultType.getDescription()));
} catch (IllegalArgumentException e) {
log.log(Level.FINE, () -> String.format("Unauthorized (401) for '%s': %s", request, e.getMessage()));
incrementRejectedMetrics(request, UNAUTHORIZED, "Unauthorized", Optional.empty());
return Optional.of(new ErrorResponse(UNAUTHORIZED, e.getMessage()));
}
}
|
@Test
void returns_unauthorized_for_request_with_disabled_credential_type() {
AthenzAuthorizationFilter filter =
createFilter(new AllowingZpe(), List.of(EnabledCredentials.ROLE_CERTIFICATE, EnabledCredentials.ACCESS_TOKEN));
MockResponseHandler responseHandler = new MockResponseHandler();
DiscFilterRequest request = createRequest(ROLE_TOKEN, null, null);
filter.filter(request, responseHandler);
assertStatusCode(responseHandler, 401);
assertErrorMessage(responseHandler, "Not authorized - request did not contain any of the allowed credentials: " +
"[Athenz X.509 role certificate, Athenz access token with X.509 identity certificate]");
}
|
@Override
public Meter submit(MeterRequest request) {
checkNotNull(request, "request cannot be null.");
MeterCellId cellId;
if (request.index().isPresent()) {
checkArgument(userDefinedIndex, "Index cannot be provided when userDefinedIndex mode is disabled");
// User provides index
if (request.scope().isGlobal()) {
cellId = MeterId.meterId(request.index().get());
} else {
cellId = PiMeterCellId.ofIndirect(
PiMeterId.of(request.scope().id()), request.index().get());
}
} else {
checkArgument(!userDefinedIndex, "Index cannot be allocated when userDefinedIndex mode is enabled");
// Allocate an id
cellId = allocateMeterId(request.deviceId(), request.scope());
}
Meter.Builder mBuilder = DefaultMeter.builder()
.forDevice(request.deviceId())
.fromApp(request.appId())
.withBands(request.bands())
.withCellId(cellId)
.withUnit(request.unit());
if (request.isBurst()) {
mBuilder.burst();
}
if (request.annotations() != null && !request.annotations().keys().isEmpty()) {
mBuilder.withAnnotations(request.annotations());
}
DefaultMeter m = (DefaultMeter) mBuilder.build();
// Meter installation logic (happy ending case)
// PENDING -> stats -> ADDED -> future completes
m.setState(MeterState.PENDING_ADD);
store.addOrUpdateMeter(m).whenComplete((result, error) ->
onComplete.accept(request, result, error));
return m;
}
|
@Test(expected = IllegalArgumentException.class)
public void testWrongAdd() {
initMeterStore();
manager.submit(userDefinedRequest.add());
}
|
@Override
public void reset() {
resetCount++;
super.reset();
initEvaluatorMap();
initCollisionMaps();
root.recursiveReset();
resetTurboFilterList();
cancelScheduledTasks();
fireOnReset();
resetListenersExceptResetResistant();
resetStatusListenersExceptResetResistant();
}
|
@SuppressWarnings("unchecked")
@Test
public void collisionMapsPostReset() {
lc.reset();
Map<String, String> fileCollisions = (Map<String, String>) lc.getObject(FA_FILENAME_COLLISION_MAP);
assertNotNull(fileCollisions);
assertTrue(fileCollisions.isEmpty());
Map<String, FileNamePattern> filenamePatternCollisionMap = (Map<String, FileNamePattern>) lc.getObject(
CoreConstants.RFA_FILENAME_PATTERN_COLLISION_MAP);
assertNotNull(filenamePatternCollisionMap);
assertTrue(filenamePatternCollisionMap.isEmpty());
}
|
public static GoPluginBundleDescriptor parseXML(InputStream pluginXml,
BundleOrPluginFileDetails bundleOrPluginJarFile) throws IOException, JAXBException, XMLStreamException, SAXException {
return parseXML(pluginXml, bundleOrPluginJarFile.file().getAbsolutePath(), bundleOrPluginJarFile.extractionLocation(), bundleOrPluginJarFile.isBundledPlugin());
}
|
@Test
void shouldNotAllowPluginWithEmptyListOfExtensionsInABundle() throws Exception {
try (InputStream pluginXml = getClass().getClassLoader().getResourceAsStream("defaultFiles/gocd-bundle-with-no-extension-classes.xml")) {
final JAXBException e = assertThrows(JAXBException.class, () ->
parseXML(pluginXml, "/tmp/a.jar", new File("/tmp/"), true));
assertTrue(e.getCause().getMessage().contains("The content of element 'extensions' is not complete. One of '{extension}' is expected"), format("Message not correct: [%s]", e.getCause().getMessage()));
}
}
|
@Override
public ListenableFuture<HttpResponse> sendAsync(HttpRequest httpRequest) {
return sendAsync(httpRequest, null);
}
|
@Test
public void sendAsync_whenPostRequestWithEmptyHeaders_returnsExpectedHttpResponse()
throws IOException, ExecutionException, InterruptedException {
String responseBody = "{ \"test\": \"json\" }";
mockWebServer.enqueue(
new MockResponse()
.setResponseCode(HttpStatus.OK.code())
.setHeader(CONTENT_TYPE, MediaType.JSON_UTF_8.toString())
.setBody(responseBody));
mockWebServer.start();
String requestUrl = mockWebServer.url("/test/post").toString();
HttpResponse response = httpClient.sendAsync(post(requestUrl).withEmptyHeaders().build()).get();
assertThat(response)
.isEqualTo(
HttpResponse.builder()
.setStatus(HttpStatus.OK)
.setHeaders(
HttpHeaders.builder()
.addHeader(CONTENT_TYPE, MediaType.JSON_UTF_8.toString())
// MockWebServer always adds this response header.
.addHeader(CONTENT_LENGTH, String.valueOf(responseBody.length()))
.build())
.setBodyBytes(ByteString.copyFrom(responseBody, UTF_8))
.setResponseUrl(HttpUrl.parse(requestUrl))
.build());
}
|
public static SinkConfig validateUpdate(SinkConfig existingConfig, SinkConfig newConfig) {
SinkConfig mergedConfig = clone(existingConfig);
if (!existingConfig.getTenant().equals(newConfig.getTenant())) {
throw new IllegalArgumentException("Tenants differ");
}
if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) {
throw new IllegalArgumentException("Namespaces differ");
}
if (!existingConfig.getName().equals(newConfig.getName())) {
throw new IllegalArgumentException("Sink Names differ");
}
if (!StringUtils.isEmpty(newConfig.getClassName())) {
mergedConfig.setClassName(newConfig.getClassName());
}
if (!StringUtils.isEmpty(newConfig.getSourceSubscriptionName()) && !newConfig.getSourceSubscriptionName()
.equals(existingConfig.getSourceSubscriptionName())) {
throw new IllegalArgumentException("Subscription Name cannot be altered");
}
if (newConfig.getInputSpecs() == null) {
newConfig.setInputSpecs(new HashMap<>());
}
if (mergedConfig.getInputSpecs() == null) {
mergedConfig.setInputSpecs(new HashMap<>());
}
if (!StringUtils.isEmpty(newConfig.getLogTopic())) {
mergedConfig.setLogTopic(newConfig.getLogTopic());
}
if (newConfig.getInputs() != null) {
newConfig.getInputs().forEach((topicName -> {
newConfig.getInputSpecs().putIfAbsent(topicName,
ConsumerConfig.builder().isRegexPattern(false).build());
}));
}
if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) {
newConfig.getInputSpecs().put(newConfig.getTopicsPattern(),
ConsumerConfig.builder()
.isRegexPattern(true)
.build());
}
if (newConfig.getTopicToSerdeClassName() != null) {
newConfig.getTopicToSerdeClassName().forEach((topicName, serdeClassName) -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder()
.serdeClassName(serdeClassName)
.isRegexPattern(false)
.build());
});
}
if (newConfig.getTopicToSchemaType() != null) {
newConfig.getTopicToSchemaType().forEach((topicName, schemaClassname) -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder()
.schemaType(schemaClassname)
.isRegexPattern(false)
.build());
});
}
if (!newConfig.getInputSpecs().isEmpty()) {
SinkConfig finalMergedConfig = mergedConfig;
newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> {
if (!existingConfig.getInputSpecs().containsKey(topicName)) {
throw new IllegalArgumentException("Input Topics cannot be altered");
}
if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) {
throw new IllegalArgumentException(
"isRegexPattern for input topic " + topicName + " cannot be altered");
}
finalMergedConfig.getInputSpecs().put(topicName, consumerConfig);
});
}
if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees()
.equals(existingConfig.getProcessingGuarantees())) {
throw new IllegalArgumentException("Processing Guarantees cannot be altered");
}
if (newConfig.getConfigs() != null) {
mergedConfig.setConfigs(newConfig.getConfigs());
}
if (newConfig.getSecrets() != null) {
mergedConfig.setSecrets(newConfig.getSecrets());
}
if (newConfig.getParallelism() != null) {
mergedConfig.setParallelism(newConfig.getParallelism());
}
if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering()
.equals(existingConfig.getRetainOrdering())) {
throw new IllegalArgumentException("Retain Ordering cannot be altered");
}
if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering()
.equals(existingConfig.getRetainKeyOrdering())) {
throw new IllegalArgumentException("Retain Key Ordering cannot be altered");
}
if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) {
throw new IllegalArgumentException("AutoAck cannot be altered");
}
if (newConfig.getResources() != null) {
mergedConfig
.setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources()));
}
if (newConfig.getTimeoutMs() != null) {
mergedConfig.setTimeoutMs(newConfig.getTimeoutMs());
}
if (newConfig.getCleanupSubscription() != null) {
mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription());
}
if (!StringUtils.isEmpty(newConfig.getArchive())) {
mergedConfig.setArchive(newConfig.getArchive());
}
if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) {
mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags());
}
if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) {
mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions());
}
if (newConfig.getTransformFunction() != null) {
mergedConfig.setTransformFunction(newConfig.getTransformFunction());
}
if (newConfig.getTransformFunctionClassName() != null) {
mergedConfig.setTransformFunctionClassName(newConfig.getTransformFunctionClassName());
}
if (newConfig.getTransformFunctionConfig() != null) {
mergedConfig.setTransformFunctionConfig(newConfig.getTransformFunctionConfig());
}
return mergedConfig;
}
|
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Tenants differ")
public void testMergeDifferentTenant() {
SinkConfig sinkConfig = createSinkConfig();
SinkConfig newSinkConfig = createUpdatedSinkConfig("tenant", "Different");
SinkConfigUtils.validateUpdate(sinkConfig, newSinkConfig);
}
|
@Override
public Hotspots.HotspotPullQueryTimestamp generateTimestampMessage(long timestamp) {
Hotspots.HotspotPullQueryTimestamp.Builder responseBuilder = Hotspots.HotspotPullQueryTimestamp.newBuilder();
responseBuilder.setQueryTimestamp(timestamp);
return responseBuilder.build();
}
|
@Test
public void generateTimestampMessage_shouldMapTimestamp() {
long timestamp = System.currentTimeMillis();
HotspotPullQueryTimestamp result = underTest.generateTimestampMessage(timestamp);
assertThat(result.getQueryTimestamp()).isEqualTo(timestamp);
}
|
@Override
public Properties info(RedisClusterNode node) {
Map<String, String> info = execute(node, RedisCommands.INFO_ALL);
Properties result = new Properties();
for (Entry<String, String> entry : info.entrySet()) {
result.setProperty(entry.getKey(), entry.getValue());
}
return result;
}
|
@Test
public void testInfo() {
RedisClusterNode master = getFirstMaster();
Properties info = connection.info(master);
assertThat(info.size()).isGreaterThan(10);
}
|
@Override
public V get(K key) {
return cache.get(key);
}
|
@Test
public void testGet() {
cache.put(42, "foobar");
String result = adapter.get(42);
assertEquals("foobar", result);
}
|
public static void executeWithRetries(
final Function function,
final RetryBehaviour retryBehaviour
) throws Exception {
executeWithRetries(() -> {
function.call();
return null;
}, retryBehaviour);
}
|
@Test
public void shouldNotRetryOnCustomRetryableDenied() throws Exception {
// Given:
final AtomicBoolean firstCall = new AtomicBoolean(true);
final Callable<Object> throwsException = () -> {
if (firstCall.get()) {
firstCall.set(false);
// this is a retryable exception usually
throw new UnknownTopicOrPartitionException("First non-retry exception");
} else {
throw new RuntimeException("Test should not retry");
}
};
// When:
final RuntimeException e = assertThrows(
UnknownTopicOrPartitionException.class,
() -> ExecutorUtil.executeWithRetries(throwsException, e2 -> !(e2 instanceof UnknownTopicOrPartitionException))
);
// Then:
assertThat(e.getMessage(), containsString("First non-retry exception"));
}
|
public void appBecomeActive() {
synchronized (mTrackTimer) {
try {
for (Map.Entry<String, EventTimer> entry : mTrackTimer.entrySet()) {
if (entry != null) {
EventTimer eventTimer = entry.getValue();
if (eventTimer != null) {
eventTimer.setStartTime(SystemClock.elapsedRealtime());
}
}
}
} catch (Exception e) {
SALog.printStackTrace(e);
}
}
}
|
@Test
public void appBecomeActive() {
mInstance.addEventTimer("EventTimer", new EventTimer(TimeUnit.SECONDS, 10000L));
mInstance.appBecomeActive();
Assert.assertEquals(100, mInstance.getEventTimer("EventTimer").getStartTime());
}
|
public void shortenIdsIfAvailable(java.util.@Nullable List<CounterUpdate> counters) {
if (counters == null) {
return;
}
for (CounterUpdate update : counters) {
cache.shortenIdsIfAvailable(update);
}
}
|
@Test
public void testNullUpdates() {
CounterShortIdCache shortIdCache = new CounterShortIdCache();
shortIdCache.shortenIdsIfAvailable(null);
}
|
@Override
public boolean add(FilteredBlock block) throws VerificationException, PrunedException {
boolean success = super.add(block);
if (success) {
trackFilteredTransactions(block.getTransactionCount());
}
return success;
}
|
@Test
public void coinbaseTransactionAvailability() throws Exception {
Context.propagate(new Context(100, Coin.ZERO, false, true));
// Check that a coinbase transaction is only available to spend after NetworkParameters.getSpendableCoinbaseDepth() blocks.
// Create a second wallet to receive the coinbase spend.
Wallet wallet2 = Wallet.createDeterministic(BitcoinNetwork.TESTNET, ScriptType.P2PKH);
ECKey receiveKey = wallet2.freshReceiveKey();
int height = 1;
testNetChain.addWallet(wallet2);
Address addressToSendTo = receiveKey.toAddress(ScriptType.P2PKH, BitcoinNetwork.TESTNET);
// Create a block, sending the coinbase to the coinbaseTo address (which is in the wallet).
Block b1 = TESTNET.getGenesisBlock().createNextBlockWithCoinbase(Block.BLOCK_VERSION_GENESIS, testNetWallet.currentReceiveKey().getPubKey(), height++);
testNetChain.add(b1);
final Transaction coinbaseTransaction = b1.getTransactions().get(0);
// Check a transaction has been received.
assertNotNull(coinbaseTransaction);
// The coinbase tx is not yet available to spend.
assertEquals(Coin.ZERO, testNetWallet.getBalance());
assertEquals(FIFTY_COINS, testNetWallet.getBalance(BalanceType.ESTIMATED));
assertFalse(testNetWallet.isTransactionMature(coinbaseTransaction));
// Attempt to spend the coinbase - this should fail as the coinbase is not mature yet.
try {
testNetWallet.createSend(addressToSendTo, valueOf(49, 0));
fail();
} catch (InsufficientMoneyException e) {
}
// Check that the coinbase is unavailable to spend for the next spendableCoinbaseDepth - 2 blocks.
for (int i = 0; i < TESTNET.getSpendableCoinbaseDepth() - 2; i++) {
// Non relevant tx - just for fake block creation.
Transaction tx2 = createFakeTx(TESTNET.network(), COIN, new ECKey().toAddress(ScriptType.P2PKH, TESTNET.network()));
Block b2 = createFakeBlock(testNetStore, height++, tx2).block;
testNetChain.add(b2);
// Wallet still does not have the coinbase transaction available for spend.
assertEquals(Coin.ZERO, testNetWallet.getBalance());
assertEquals(FIFTY_COINS, testNetWallet.getBalance(BalanceType.ESTIMATED));
// The coinbase transaction is still not mature.
assertFalse(testNetWallet.isTransactionMature(coinbaseTransaction));
// Attempt to spend the coinbase - this should fail.
try {
testNetWallet.createSend(addressToSendTo, valueOf(49, 0));
fail();
} catch (InsufficientMoneyException e) {
}
}
// Give it one more block - should now be able to spend coinbase transaction. Non relevant tx.
Transaction tx3 = createFakeTx(TESTNET.network(), COIN, new ECKey().toAddress(ScriptType.P2PKH, TESTNET.network()));
Block b3 = createFakeBlock(testNetStore, height++, tx3).block;
testNetChain.add(b3);
// Wallet now has the coinbase transaction available for spend.
assertEquals(FIFTY_COINS, testNetWallet.getBalance());
assertEquals(FIFTY_COINS, testNetWallet.getBalance(BalanceType.ESTIMATED));
assertTrue(testNetWallet.isTransactionMature(coinbaseTransaction));
// Create a spend with the coinbase BTC to the address in the second wallet - this should now succeed.
Transaction coinbaseSend2 = testNetWallet.createSend(addressToSendTo, valueOf(49, 0));
assertNotNull(coinbaseSend2);
// Commit the coinbaseSpend to the first wallet and check the balances decrement.
testNetWallet.commitTx(coinbaseSend2);
assertEquals(COIN, testNetWallet.getBalance(BalanceType.ESTIMATED));
// Available balance is zero as change has not been received from a block yet.
assertEquals(ZERO, testNetWallet.getBalance(BalanceType.AVAILABLE));
// Give it one more block - change from coinbaseSpend should now be available in the first wallet.
Block b4 = createFakeBlock(testNetStore, height++, coinbaseSend2).block;
testNetChain.add(b4);
assertEquals(COIN, testNetWallet.getBalance(BalanceType.AVAILABLE));
// Check the balances in the second wallet.
assertEquals(valueOf(49, 0), wallet2.getBalance(BalanceType.ESTIMATED));
assertEquals(valueOf(49, 0), wallet2.getBalance(BalanceType.AVAILABLE));
}
|
public Set<Cookie> decode(String header) {
Set<Cookie> cookies = new TreeSet<Cookie>();
decode(cookies, header);
return cookies;
}
|
@Test
public void testDecodingSingleCookie() {
String cookieString = "myCookie=myValue";
Set<Cookie> cookies = ServerCookieDecoder.STRICT.decode(cookieString);
assertEquals(1, cookies.size());
Cookie cookie = cookies.iterator().next();
assertNotNull(cookie);
assertEquals("myValue", cookie.value());
}
|
@Override
public MaterialPollResult responseMessageForLatestRevision(String responseBody) {
Map responseBodyMap = getResponseMap(responseBody);
return new MaterialPollResult(toMaterialDataMap(responseBodyMap), toSCMRevision(responseBodyMap));
}
|
@Test
public void shouldBuildSCMRevisionFromLatestRevisionResponse() throws Exception {
String revisionJSON = "{\"revision\":\"r1\",\"timestamp\":\"2011-07-14T19:43:37.100Z\",\"user\":\"some-user\",\"revisionComment\":\"comment\",\"data\":{\"dataKeyTwo\":\"data-value-two\",\"dataKeyOne\":\"data-value-one\"}," +
"\"modifiedFiles\":[{\"fileName\":\"f1\",\"action\":\"added\"},{\"fileName\":\"f2\",\"action\":\"modified\"},{\"fileName\":\"f3\",\"action\":\"deleted\"}]}";
String responseBody = "{\"revision\": " + revisionJSON + "}";
MaterialPollResult pollResult = messageHandler.responseMessageForLatestRevision(responseBody);
assertThat(pollResult.getMaterialData(), is(nullValue()));
assertSCMRevision(pollResult.getLatestRevision(), "r1", "some-user", "2011-07-14T19:43:37.100Z", "comment", List.of(new ModifiedFile("f1", ModifiedAction.added), new ModifiedFile("f2", ModifiedAction.modified), new ModifiedFile("f3", ModifiedAction.deleted)));
}
|
public static List<File> loopFiles(String path, FileFilter fileFilter) {
return loopFiles(file(path), fileFilter);
}
|
@Test
@Disabled
public void loopFilesTest2() {
FileUtil.loopFiles("").forEach(Console::log);
}
|
private Main() {
// Utility Class.
}
|
@Test
public void runsRepeatedDatasetAgainstRelease() throws Exception {
final File pwd = temp.newFolder();
Main.main(
String.format("--%s=5.5.0", UserInput.DISTRIBUTION_VERSION_PARAM),
String.format("--workdir=%s", pwd.getAbsolutePath()),
String.format("--%s=%d", UserInput.REPEAT_PARAM, 2)
);
}
|
public static UriTemplate create(String template, Charset charset) {
return new UriTemplate(template, true, charset);
}
|
@Test
void literalTemplate() {
String template = "https://www.example.com/do/stuff";
UriTemplate uriTemplate = UriTemplate.create(template, Util.UTF_8);
String expandedTemplate = uriTemplate.expand(Collections.emptyMap());
assertThat(expandedTemplate).isEqualToIgnoringCase(template);
assertThat(URI.create(expandedTemplate)).isNotNull();
}
|
@Override
public CompletableFuture<Void> localCleanupAsync(JobID jobId, Executor unusedExecutor) {
if (isRegistered(jobId)) {
return unregister(jobId).closeAsync();
}
return FutureUtils.completedVoidFuture();
}
|
@Test
void testLocalCleanupAsyncOnUnknownJobId() {
assertThat(testInstance.localCleanupAsync(new JobID(), Executors.directExecutor()))
.isCompleted();
}
|
@Override
public boolean remove(Object o) {
return map.remove(o) != null;
}
|
@Test
public void testRemove() {
ExtendedSet<TestValue> set = new ExtendedSet<>(Maps.newConcurrentMap());
TestValue val = new TestValue("foo", 1);
assertTrue(set.add(val));
TestValue nextval = new TestValue("goo", 2);
assertTrue(set.add(nextval));
assertTrue(set.remove(val));
assertFalse(set.contains(val));
assertTrue(set.remove(nextval));
assertFalse(set.contains(nextval));
}
|
public <T> Map<String, Object> properties(Class<T> base, Class<? extends T> cls) {
return this.generate(cls, base);
}
|
@SuppressWarnings("unchecked")
@Test
void testEnum() {
Map<String, Object> generate = jsonSchemaGenerator.properties(Task.class, TaskWithEnum.class);
assertThat(generate, is(not(nullValue())));
assertThat(((Map<String, Map<String, Object>>) generate.get("properties")).size(), is(4));
assertThat(((Map<String, Map<String, Object>>) generate.get("properties")).get("stringWithDefault").get("default"), is("default"));
}
|
public static SerializableFunction<Row, Mutation> beamRowToMutationFn(
Mutation.Op operation, String table) {
return (row -> {
switch (operation) {
case INSERT:
return MutationUtils.createMutationFromBeamRows(Mutation.newInsertBuilder(table), row);
case DELETE:
return Mutation.delete(table, MutationUtils.createKeyFromBeamRow(row));
case UPDATE:
return MutationUtils.createMutationFromBeamRows(Mutation.newUpdateBuilder(table), row);
case REPLACE:
return MutationUtils.createMutationFromBeamRows(Mutation.newReplaceBuilder(table), row);
case INSERT_OR_UPDATE:
return MutationUtils.createMutationFromBeamRows(
Mutation.newInsertOrUpdateBuilder(table), row);
default:
throw new IllegalArgumentException(
String.format("Unknown mutation operation type: %s", operation));
}
});
}
|
@Test
public void testCreateReplaceMutationFromRow() {
Mutation expectedMutation = createMutation(Mutation.Op.REPLACE);
Mutation mutation = beamRowToMutationFn(Mutation.Op.REPLACE, TABLE).apply(WRITE_ROW);
assertEquals(expectedMutation, mutation);
}
|
@Override
public <T> void write(T payload) {
if (payload == null) {
LOG.debug("Payload was null. Skipping.");
return;
}
String canonicalClassName = AutoValueUtils.getCanonicalName(payload.getClass());
write(canonicalClassName, payload);
}
|
@Test
public void writePostsClusterConfigChangedEvent() throws Exception {
CustomConfig customConfig = new CustomConfig();
customConfig.text = "TEST";
final ClusterConfigChangedEventHandler eventHandler = new ClusterConfigChangedEventHandler();
clusterEventBus.registerClusterEventSubscriber(eventHandler);
@SuppressWarnings("deprecation")
final DBCollection collection = mongoConnection.getDatabase().getCollection(COLLECTION_NAME);
assertThat(collection.count()).isEqualTo(0L);
clusterConfigService.write(customConfig);
assertThat(collection.count()).isEqualTo(1L);
assertThat(eventHandler.event).isNotNull();
assertThat(eventHandler.event.nodeId()).isEqualTo("ID");
assertThat(eventHandler.event.type()).isEqualTo(CustomConfig.class.getCanonicalName());
clusterEventBus.unregister(eventHandler);
}
|
@Override
public void writeInt(final int v) throws IOException {
ensureAvailable(INT_SIZE_IN_BYTES);
MEM.putInt(buffer, ARRAY_BYTE_BASE_OFFSET + pos, v);
pos += INT_SIZE_IN_BYTES;
}
|
@Test
public void testWriteIntForPositionV() throws Exception {
int expected = 100;
out.writeInt(1, expected);
int actual = Bits.readInt(out.buffer, 1, ByteOrder.nativeOrder() == ByteOrder.BIG_ENDIAN);
assertEquals(expected, actual);
}
|
public float getBulletPosition() {
return bullet.getPosition();
}
|
@Test
void testGetBulletPosition() {
assertEquals(controller.bullet.getPosition(), controller.getBulletPosition(), 0);
}
|
public static String encodeMap(Map<String, String> map) {
if (map == null) {
return null;
}
if (map.isEmpty()) {
return StringUtils.EMPTY;
}
StringBuilder sb = new StringBuilder();
for (Map.Entry<String, String> entry : map.entrySet()) {
sb.append(entry.getKey()).append(KV_SPLIT).append(entry.getValue()).append(PAIR_SPLIT);
}
return sb.substring(0, sb.length() - 1);
}
|
@Test
public void encodeMap() {
Map<String, String> map = null;
Assertions.assertNull(CollectionUtils.encodeMap(map));
map = new LinkedHashMap<>();
Assertions.assertEquals("", CollectionUtils.encodeMap(map));
map.put("x", "1");
Assertions.assertEquals("x=1", CollectionUtils.encodeMap(map));
map.put("y", "2");
Assertions.assertEquals("x=1&y=2", CollectionUtils.encodeMap(map));
}
|
public int getPrefixMatchLength(ElementPath p) {
if (p == null) {
return 0;
}
int lSize = this.partList.size();
int rSize = p.partList.size();
// no match possible for empty sets
if ((lSize == 0) || (rSize == 0)) {
return 0;
}
int minLen = (lSize <= rSize) ? lSize : rSize;
int match = 0;
for (int i = 0; i < minLen; i++) {
String l = this.partList.get(i);
String r = p.partList.get(i);
if (equalityCheck(l, r)) {
match++;
} else {
break;
}
}
return match;
}
|
@Test
public void testPrefixMatch() {
{
ElementPath p = new ElementPath("/a/b");
ElementSelector ruleElementSelector = new ElementSelector("/x/*");
assertEquals(0, ruleElementSelector.getPrefixMatchLength(p));
}
{
ElementPath p = new ElementPath("/a");
ElementSelector ruleElementSelector = new ElementSelector("/x/*");
assertEquals(0, ruleElementSelector.getPrefixMatchLength(p));
}
{
ElementPath p = new ElementPath("/a/b");
ElementSelector ruleElementSelector = new ElementSelector("/a/*");
assertEquals(1, ruleElementSelector.getPrefixMatchLength(p));
}
{
ElementPath p = new ElementPath("/a/b");
ElementSelector ruleElementSelector = new ElementSelector("/A/*");
assertEquals(1, ruleElementSelector.getPrefixMatchLength(p));
}
{
ElementPath p = new ElementPath("/A/b");
ElementSelector ruleElementSelector = new ElementSelector("/a/*");
assertEquals(1, ruleElementSelector.getPrefixMatchLength(p));
}
{
ElementPath p = new ElementPath("/a/b");
ElementSelector ruleElementSelector = new ElementSelector("/a/b/*");
assertEquals(2, ruleElementSelector.getPrefixMatchLength(p));
}
{
ElementPath p = new ElementPath("/a/b");
ElementSelector ruleElementSelector = new ElementSelector("/*");
assertEquals(0, ruleElementSelector.getPrefixMatchLength(p));
}
}
|
static void checkFormat(final String originalFilename) {
final List<String> fileNameSplit = Splitter.on(".").splitToList(originalFilename);
if (fileNameSplit.size() <= 1) {
throw new BadRequestException("The file format is invalid.");
}
for (String s : fileNameSplit) {
if (StringUtils.isEmpty(s)) {
throw new BadRequestException("The file format is invalid.");
}
}
}
|
@Test
public void checkFormat() {
ConfigFileUtils.checkFormat("1234+default+app.properties");
ConfigFileUtils.checkFormat("1234+default+app.yml");
ConfigFileUtils.checkFormat("1234+default+app.json");
}
|
protected boolean isConnectPoint(String field, FieldPresence presence) {
return isConnectPoint(object, field, presence);
}
|
@Test
public void isConnectPoint() {
assertTrue("is not proper connectPoint", cfg.isConnectPoint(CONNECT_POINT, MANDATORY));
assertTrue("is not proper connectPoint", cfg.isConnectPoint(CONNECT_POINT, OPTIONAL));
assertTrue("is not proper connectPoint", cfg.isConnectPoint("none", OPTIONAL));
assertTrue("did not detect missing connectPoint",
expectInvalidField(() -> cfg.isConnectPoint("none", MANDATORY)));
assertTrue("did not detect bad connectPoint",
expectInvalidField(() -> cfg.isConnectPoint(BAD_CONNECT_POINT, MANDATORY)));
}
|
public double sphericalDistance(LatLong other) {
return LatLongUtils.sphericalDistance(this, other);
}
|
@Test
public void sphericalDistance_nearOfSriLankaToIslaGenovesa_returnHalfOfEarthEquatorCircumference() {
// These coordinates are 1/4 Earth circumference from zero on the equator
LatLong nearSriLanka = new LatLong(0d, 90d);
// These coordinates are 1/4 Earth circumference from zero on the equator
LatLong islaGenovesa = new LatLong(0d, -90d);
// These points are as far apart as they could be, half way around the earth
double spherical = LatLongUtils.sphericalDistance(nearSriLanka, islaGenovesa);
assertEquals(EARTH_EQUATOR_CIRCUMFERENCE / 2, spherical, 0d);
}
|
@VisibleForTesting
Object[] callHttpService( RowMetaInterface rowMeta, Object[] rowData ) throws KettleException {
HttpClientManager.HttpClientBuilderFacade clientBuilder = HttpClientManager.getInstance().createBuilder();
if ( data.realConnectionTimeout > -1 ) {
clientBuilder.setConnectionTimeout( data.realConnectionTimeout );
}
if ( data.realSocketTimeout > -1 ) {
clientBuilder.setSocketTimeout( data.realSocketTimeout );
}
if ( StringUtils.isNotBlank( data.realHttpLogin ) ) {
clientBuilder.setCredentials( data.realHttpLogin, data.realHttpPassword );
}
if ( StringUtils.isNotBlank( data.realProxyHost ) ) {
clientBuilder.setProxy( data.realProxyHost, data.realProxyPort );
}
CloseableHttpClient httpClient = clientBuilder.build();
// Prepare HTTP get
URI uri = null;
try {
URIBuilder uriBuilder = constructUrlBuilder( rowMeta, rowData );
uri = uriBuilder.build();
HttpGet method = new HttpGet( uri );
// Add Custom HTTP headers
if ( data.useHeaderParameters ) {
for ( int i = 0; i < data.header_parameters_nrs.length; i++ ) {
method.addHeader( data.headerParameters[ i ].getName(), data.inputRowMeta.getString( rowData,
data.header_parameters_nrs[ i ] ) );
if ( isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "HTTPDialog.Log.HeaderValue",
data.headerParameters[ i ].getName(), data.inputRowMeta
.getString( rowData, data.header_parameters_nrs[ i ] ) ) );
}
}
}
Object[] newRow = null;
if ( rowData != null ) {
newRow = rowData.clone();
}
// Execute request
CloseableHttpResponse httpResponse = null;
try {
// used for calculating the responseTime
long startTime = System.currentTimeMillis();
HttpHost target = new HttpHost( uri.getHost(), uri.getPort(), uri.getScheme() );
// Create AuthCache instance
AuthCache authCache = new BasicAuthCache();
// Generate BASIC scheme object and add it to the local
// auth cache
BasicScheme basicAuth = new BasicScheme();
authCache.put( target, basicAuth );
// Add AuthCache to the execution context
HttpClientContext localContext = HttpClientContext.create();
localContext.setAuthCache( authCache );
// Preemptive authentication
if ( StringUtils.isNotBlank( data.realProxyHost ) ) {
httpResponse = httpClient.execute( target, method, localContext );
} else {
httpResponse = httpClient.execute( method, localContext );
}
// calculate the responseTime
long responseTime = System.currentTimeMillis() - startTime;
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "HTTP.Log.ResponseTime", responseTime, uri ) );
}
int statusCode = requestStatusCode( httpResponse );
// The status code
if ( isDebug() ) {
logDebug( BaseMessages.getString( PKG, "HTTP.Log.ResponseStatusCode", "" + statusCode ) );
}
String body;
switch ( statusCode ) {
case HttpURLConnection.HTTP_UNAUTHORIZED:
throw new KettleStepException( BaseMessages
.getString( PKG, "HTTP.Exception.Authentication", data.realUrl ) );
case -1:
throw new KettleStepException( BaseMessages
.getString( PKG, "HTTP.Exception.IllegalStatusCode", data.realUrl ) );
case HttpURLConnection.HTTP_NO_CONTENT:
body = "";
break;
default:
HttpEntity entity = httpResponse.getEntity();
if ( entity != null ) {
body = StringUtils.isEmpty( meta.getEncoding() ) ? EntityUtils.toString( entity ) : EntityUtils.toString( entity, meta.getEncoding() );
} else {
body = "";
}
break;
}
Header[] headers = searchForHeaders( httpResponse );
JSONObject json = new JSONObject();
for ( Header header : headers ) {
Object previousValue = json.get( header.getName() );
if ( previousValue == null ) {
json.put( header.getName(), header.getValue() );
} else if ( previousValue instanceof List ) {
List<String> list = (List<String>) previousValue;
list.add( header.getValue() );
} else {
ArrayList<String> list = new ArrayList<String>();
list.add( (String) previousValue );
list.add( header.getValue() );
json.put( header.getName(), list );
}
}
String headerString = json.toJSONString();
int returnFieldsOffset = rowMeta.size();
if ( !Utils.isEmpty( meta.getFieldName() ) ) {
newRow = RowDataUtil.addValueData( newRow, returnFieldsOffset, body );
returnFieldsOffset++;
}
if ( !Utils.isEmpty( meta.getResultCodeFieldName() ) ) {
newRow = RowDataUtil.addValueData( newRow, returnFieldsOffset, new Long( statusCode ) );
returnFieldsOffset++;
}
if ( !Utils.isEmpty( meta.getResponseTimeFieldName() ) ) {
newRow = RowDataUtil.addValueData( newRow, returnFieldsOffset, new Long( responseTime ) );
returnFieldsOffset++;
}
if ( !Utils.isEmpty( meta.getResponseHeaderFieldName() ) ) {
newRow = RowDataUtil.addValueData( newRow, returnFieldsOffset, headerString );
}
} finally {
if ( httpResponse != null ) {
httpResponse.close();
}
// Release current connection to the connection pool once you are done
method.releaseConnection();
}
return newRow;
} catch ( UnknownHostException uhe ) {
throw new KettleException( BaseMessages.getString( PKG, "HTTP.Error.UnknownHostException", uhe.getMessage() ) );
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString( PKG, "HTTP.Log.UnableGetResult", uri ), e );
}
}
|
@Test
public void callHttpServiceWithoutEncoding() throws Exception {
try ( MockedStatic<HttpClientManager> httpClientManagerMockedStatic = mockStatic( HttpClientManager.class ) ) {
httpClientManagerMockedStatic.when( HttpClientManager::getInstance ).thenReturn( manager );
doReturn( null ).when( meta ).getEncoding();
assertNotEquals( DATA, http.callHttpService( rmi, new Object[] { 0 } )[ 0 ] );
}
}
|
ClassicGroup getOrMaybeCreateClassicGroup(
String groupId,
boolean createIfNotExists
) throws GroupIdNotFoundException {
Group group = groups.get(groupId);
if (group == null && !createIfNotExists) {
throw new GroupIdNotFoundException(String.format("Classic group %s not found.", groupId));
}
if (group == null) {
ClassicGroup classicGroup = new ClassicGroup(logContext, groupId, ClassicGroupState.EMPTY, time, metrics);
groups.put(groupId, classicGroup);
metrics.onClassicGroupStateTransition(null, classicGroup.currentState());
return classicGroup;
} else {
if (group.type() == CLASSIC) {
return (ClassicGroup) group;
} else {
// We don't support upgrading/downgrading between protocols at the moment so
// we throw an exception if a group exists with the wrong type.
throw new GroupIdNotFoundException(String.format("Group %s is not a classic group.",
groupId));
}
}
}
|
@Test
public void testSyncGroupFollowerAfterLeader() throws Exception {
// To get a group of two members:
// 1. join and sync with a single member (because we can't immediately join with two members)
// 2. join and sync with the first member and a new member
GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
.build();
JoinGroupResponseData leaderJoinResponse = context.joinClassicGroupAsDynamicMemberAndCompleteRebalance("group-id");
ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false);
JoinGroupRequestData joinRequest = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder()
.withGroupId("group-id")
.withMemberId(UNKNOWN_MEMBER_ID)
.withDefaultProtocolTypeAndProtocols()
.withRebalanceTimeoutMs(10000)
.withSessionTimeoutMs(5000)
.build();
GroupMetadataManagerTestContext.JoinResult followerJoinResult = context.sendClassicGroupJoin(joinRequest.setMemberId(UNKNOWN_MEMBER_ID));
assertTrue(followerJoinResult.records.isEmpty());
assertFalse(followerJoinResult.joinFuture.isDone());
GroupMetadataManagerTestContext.JoinResult leaderJoinResult = context.sendClassicGroupJoin(joinRequest.setMemberId(leaderJoinResponse.memberId()));
assertTrue(leaderJoinResult.records.isEmpty());
assertTrue(leaderJoinResult.joinFuture.isDone());
assertTrue(followerJoinResult.joinFuture.isDone());
assertEquals(Errors.NONE.code(), leaderJoinResult.joinFuture.get().errorCode());
assertEquals(Errors.NONE.code(), followerJoinResult.joinFuture.get().errorCode());
assertEquals(leaderJoinResult.joinFuture.get().generationId(), followerJoinResult.joinFuture.get().generationId());
assertEquals(leaderJoinResponse.memberId(), leaderJoinResult.joinFuture.get().leader());
assertEquals(leaderJoinResponse.memberId(), followerJoinResult.joinFuture.get().leader());
assertTrue(group.isInState(COMPLETING_REBALANCE));
int nextGenerationId = leaderJoinResult.joinFuture.get().generationId();
String followerId = followerJoinResult.joinFuture.get().memberId();
byte[] leaderAssignment = new byte[]{0};
byte[] followerAssignment = new byte[]{1};
// Sync group with leader to get new assignment.
List<SyncGroupRequestAssignment> assignment = new ArrayList<>();
assignment.add(new SyncGroupRequestAssignment()
.setMemberId(leaderJoinResponse.memberId())
.setAssignment(leaderAssignment)
);
assignment.add(new SyncGroupRequestAssignment()
.setMemberId(followerId)
.setAssignment(followerAssignment)
);
SyncGroupRequestData syncRequest = new GroupMetadataManagerTestContext.SyncGroupRequestBuilder()
.withGroupId("group-id")
.withMemberId(leaderJoinResponse.memberId())
.withGenerationId(leaderJoinResponse.generationId())
.withAssignment(assignment)
.build();
GroupMetadataManagerTestContext.SyncResult syncResult = context.sendClassicGroupSync(
syncRequest.setGenerationId(nextGenerationId)
);
// Simulate a successful write to log. This will update the group's assignment with the new assignment.
syncResult.appendFuture.complete(null);
assertTrue(syncResult.syncFuture.isDone());
assertEquals(Errors.NONE.code(), syncResult.syncFuture.get().errorCode());
assertEquals(leaderAssignment, syncResult.syncFuture.get().assignment());
// Sync group with follower to get new assignment.
GroupMetadataManagerTestContext.SyncResult followerSyncResult = context.sendClassicGroupSync(
syncRequest
.setMemberId(followerId)
.setGenerationId(nextGenerationId)
);
assertTrue(followerSyncResult.records.isEmpty());
assertTrue(followerSyncResult.syncFuture.isDone());
assertEquals(Errors.NONE.code(), followerSyncResult.syncFuture.get().errorCode());
assertEquals(followerAssignment, followerSyncResult.syncFuture.get().assignment());
assertTrue(group.isInState(STABLE));
}
|
public void parseStepParameter(
Map<String, Map<String, Object>> allStepOutputData,
Map<String, Parameter> workflowParams,
Map<String, Parameter> stepParams,
Parameter param,
String stepId) {
parseStepParameter(
allStepOutputData, workflowParams, stepParams, param, stepId, new HashSet<>());
}
|
@Test
public void testParseStepParameterWithInvalidReference() {
AssertHelper.assertThrows(
"cannot find a step id or signal instance",
MaestroInvalidExpressionException.class,
"Expression evaluation throws an exception",
() ->
paramEvaluator.parseStepParameter(
Collections.singletonMap("step1", Collections.emptyMap()),
Collections.emptyMap(),
Collections.singletonMap("foo", StringParameter.builder().value("123").build()),
StringParameter.builder().name("bar").expression("step2__foo + '-1';").build(),
"step1"));
AssertHelper.assertThrows(
"step id is ambiguous",
MaestroInternalError.class,
"reference [step2___foo] cannot be parsed due to ambiguity (both step ids [step2] and [step2_] exist",
() ->
paramEvaluator.parseStepParameter(
twoItemMap("step2", Collections.emptyMap(), "step2_", Collections.emptyMap()),
Collections.emptyMap(),
Collections.emptyMap(),
StringParameter.builder().name("bar").expression("step2___foo + '-1';").build(),
"step1"));
AssertHelper.assertThrows(
"cannot find a step id",
MaestroInternalError.class,
"reference [step3___foo] cannot be parsed as cannot find either step id [step3] or [step3_]",
() ->
paramEvaluator.parseStepParameter(
Collections.singletonMap("step2", Collections.emptyMap()),
Collections.emptyMap(),
Collections.emptyMap(),
StringParameter.builder().name("bar").expression("step3___foo + '-1';").build(),
"step1"));
AssertHelper.assertThrows(
"cannot find a step id",
MaestroInternalError.class,
"reference [step3____foo] cannot be parsed as cannot find either step id [step3] or [step3_]",
() ->
paramEvaluator.parseStepParameter(
Collections.singletonMap("step2", Collections.emptyMap()),
Collections.emptyMap(),
Collections.emptyMap(),
StringParameter.builder().name("bar").expression("step3____foo + '-1';").build(),
"step1"));
}
|
int getMaxLevel(int maxLevel) {
return (maxLevel <= 0 || maxLevel > this.maxLevelAllowed) ? this.maxLevelAllowed : maxLevel;
}
|
@Test
public void givenMaxLevelZeroOrNegative_whenGetMaxLevel_thenReturnDefaultMaxLevel() {
assertThat(repo.getMaxLevel(0), equalTo(repo.getMaxLevelAllowed()));
assertThat(repo.getMaxLevel(-1), equalTo(repo.getMaxLevelAllowed()));
assertThat(repo.getMaxLevel(-2), equalTo(repo.getMaxLevelAllowed()));
assertThat(repo.getMaxLevel(Integer.MIN_VALUE), equalTo(repo.getMaxLevelAllowed()));
}
|
public static Optional<Path> getQualifiedRemoteProvidedUsrLib(
org.apache.flink.configuration.Configuration configuration,
YarnConfiguration yarnConfiguration)
throws IOException, IllegalArgumentException {
String usrlib = configuration.get(YarnConfigOptions.PROVIDED_USRLIB_DIR);
if (usrlib == null) {
return Optional.empty();
}
final Path qualifiedUsrLibPath =
FileSystem.get(yarnConfiguration).makeQualified(new Path(usrlib));
checkArgument(
isRemotePath(qualifiedUsrLibPath.toString()),
"The \"%s\" must point to a remote dir "
+ "which is accessible from all worker nodes.",
YarnConfigOptions.PROVIDED_USRLIB_DIR.key());
checkArgument(
isUsrLibDirectory(FileSystem.get(yarnConfiguration), qualifiedUsrLibPath),
"The \"%s\" should be named with \"%s\".",
YarnConfigOptions.PROVIDED_USRLIB_DIR.key(),
ConfigConstants.DEFAULT_FLINK_USR_LIB_DIR);
return Optional.of(qualifiedUsrLibPath);
}
|
@Test
void testInvalidRemoteUsrLib(@TempDir Path tempDir) throws IOException {
final String sharedLibPath = "hdfs:///flink/badlib";
final org.apache.hadoop.conf.Configuration hdConf =
new org.apache.hadoop.conf.Configuration();
hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tempDir.toAbsolutePath().toString());
try (final MiniDFSCluster hdfsCluster = new MiniDFSCluster.Builder(hdConf).build()) {
final org.apache.hadoop.fs.Path hdfsRootPath =
new org.apache.hadoop.fs.Path(hdfsCluster.getURI());
hdfsCluster.getFileSystem().mkdirs(new org.apache.hadoop.fs.Path(sharedLibPath));
final Configuration flinkConfig = new Configuration();
flinkConfig.set(YarnConfigOptions.PROVIDED_USRLIB_DIR, sharedLibPath);
final YarnConfiguration yarnConfig = new YarnConfiguration();
yarnConfig.set(
CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, hdfsRootPath.toString());
assertThatThrownBy(
() -> Utils.getQualifiedRemoteProvidedUsrLib(flinkConfig, yarnConfig))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage(
"The \"%s\" should be named with \"%s\".",
YarnConfigOptions.PROVIDED_USRLIB_DIR.key(),
ConfigConstants.DEFAULT_FLINK_USR_LIB_DIR);
}
}
|
int run() {
final Map<String, String> configProps = options.getConfigFile()
.map(Ksql::loadProperties)
.orElseGet(Collections::emptyMap);
final Map<String, String> sessionVariables = options.getVariables();
try (KsqlRestClient restClient = buildClient(configProps)) {
try (Cli cli = cliBuilder.build(
options.getStreamedQueryRowLimit(),
options.getStreamedQueryTimeoutMs(),
options.getOutputFormat(),
restClient)
) {
// Add CLI variables If defined by parameters
cli.addSessionVariables(sessionVariables);
if (options.getExecute().isPresent()) {
return cli.runCommand(options.getExecute().get());
} else if (options.getScriptFile().isPresent()) {
final File scriptFile = new File(options.getScriptFile().get());
if (scriptFile.exists() && scriptFile.isFile()) {
return cli.runScript(scriptFile.getPath());
} else {
throw new KsqlException("No such script file: " + scriptFile.getPath());
}
} else {
return cli.runInteractively();
}
}
}
}
|
@Test
public void shouldRunNonInteractiveCommandWhenExecuteOptionIsUsed() {
// Given:
when(options.getExecute()).thenReturn(Optional.of("this is a command"));
// When:
ksql.run();
// Then:
verify(cli).runCommand("this is a command");
}
|
public ConsumerGroup consumerGroup(
String groupId,
long committedOffset
) throws GroupIdNotFoundException {
Group group = group(groupId, committedOffset);
if (group.type() == CONSUMER) {
return (ConsumerGroup) group;
} else {
// We don't support upgrading/downgrading between protocols at the moment so
// we throw an exception if a group exists with the wrong type.
throw new GroupIdNotFoundException(String.format("Group %s is not a consumer group.",
groupId));
}
}
|
@Test
public void testJoiningConsumerGroupReplacingExistingStaticMember() throws Exception {
String groupId = "group-id";
Uuid fooTopicId = Uuid.randomUuid();
String fooTopicName = "foo";
String memberId = Uuid.randomUuid().toString();
String instanceId = "instance-id";
GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
.withConsumerGroupAssignors(Collections.singletonList(new NoOpPartitionAssignor()))
.withMetadataImage(new MetadataImageBuilder()
.addTopic(fooTopicId, fooTopicName, 2)
.addRacks()
.build())
.withConsumerGroup(new ConsumerGroupBuilder(groupId, 10)
.withSubscriptionMetadata(new HashMap<String, TopicMetadata>() {
{
put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 2, mkMapOfPartitionRacks(2)));
}
})
.withMember(new ConsumerGroupMember.Builder(memberId)
.setInstanceId(instanceId)
.setState(MemberState.STABLE)
.setMemberEpoch(10)
.setPreviousMemberEpoch(10)
.setSubscribedTopicNames(Collections.singletonList(fooTopicName))
.setRebalanceTimeoutMs(500)
.setAssignedPartitions(mkAssignment(
mkTopicAssignment(fooTopicId, 0, 1)))
.build())
.withAssignment(memberId, mkAssignment(
mkTopicAssignment(fooTopicId, 0, 1)))
.withAssignmentEpoch(10))
.build();
context.groupMetadataManager.consumerGroup(groupId).setMetadataRefreshDeadline(Long.MAX_VALUE, 10);
JoinGroupRequestData request = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder()
.withGroupId(groupId)
.withMemberId(UNKNOWN_MEMBER_ID)
.withGroupInstanceId(instanceId)
.withProtocols(GroupMetadataManagerTestContext.toConsumerProtocol(
Collections.singletonList(fooTopicName),
Collections.emptyList()))
.build();
// The static member joins with UNKNOWN_MEMBER_ID.
GroupMetadataManagerTestContext.JoinResult joinResult = context.sendClassicGroupJoin(
request,
true
);
// Simulate a successful write to log.
joinResult.appendFuture.complete(null);
String newMemberId = joinResult.joinFuture.get().memberId();
assertNotEquals("", newMemberId);
ConsumerGroupMember expectedCopiedMember = new ConsumerGroupMember.Builder(newMemberId)
.setMemberEpoch(0)
.setPreviousMemberEpoch(0)
.setInstanceId(instanceId)
.setState(MemberState.STABLE)
.setSubscribedTopicNames(Collections.singletonList(fooTopicName))
.setAssignedPartitions(mkAssignment(
mkTopicAssignment(fooTopicId, 0, 1)))
.setRebalanceTimeoutMs(500)
.build();
ConsumerGroupMember expectedMember = new ConsumerGroupMember.Builder(newMemberId)
.setMemberEpoch(10)
.setPreviousMemberEpoch(0)
.setInstanceId(instanceId)
.setState(MemberState.STABLE)
.setClientId(DEFAULT_CLIENT_ID)
.setClientHost(DEFAULT_CLIENT_ADDRESS.toString())
.setSubscribedTopicNames(Collections.singletonList(fooTopicName))
.setAssignedPartitions(mkAssignment(
mkTopicAssignment(fooTopicId, 0, 1)))
.setRebalanceTimeoutMs(500)
.setClassicMemberMetadata(
new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata()
.setSessionTimeoutMs(request.sessionTimeoutMs())
.setSupportedProtocols(ConsumerGroupMember.classicProtocolListFromJoinRequestProtocolCollection(request.protocols())))
.build();
List<CoordinatorRecord> expectedRecords = Arrays.asList(
// Remove the old static member.
GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId),
GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId),
GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId),
// Replace the old static member by the new static member.
GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedCopiedMember),
GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, newMemberId, mkAssignment(mkTopicAssignment(fooTopicId, 0, 1))),
GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedCopiedMember),
// Updated the new static member.
GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember),
GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember)
);
assertRecordsEquals(expectedRecords, joinResult.records);
assertEquals(
new JoinGroupResponseData()
.setMemberId(newMemberId)
.setGenerationId(10)
.setProtocolType(ConsumerProtocol.PROTOCOL_TYPE)
.setProtocolName("range"),
joinResult.joinFuture.get()
);
context.assertSessionTimeout(groupId, newMemberId, request.sessionTimeoutMs());
context.assertSyncTimeout(groupId, newMemberId, request.rebalanceTimeoutMs());
}
|
public void sendReportMail(Collector collector, boolean collectorServer,
List<JavaInformations> javaInformationsList, Period period) throws Exception { // NOPMD
final File tmpFile = new File(Parameters.TEMPORARY_DIRECTORY,
PdfReport.getFileName(collector.getApplication()));
try {
try (OutputStream output = new BufferedOutputStream(new FileOutputStream(tmpFile))) {
final PdfReport pdfReport = new PdfReport(collector, collectorServer,
javaInformationsList, period, output);
pdfReport.toPdf();
}
final String subject;
final String subjectPrefix = Parameter.MAIL_SUBJECT_PREFIX.getValue();
if (subjectPrefix != null) {
// รฉchappement des quotes qui sont des caractรจres spรฉciaux pour MessageFormat
subject = MessageFormat.format(subjectPrefix.replace("'", "''"),
collector.getApplication()) + " - " + period.getLabel();
} else {
subject = I18N.getFormattedString("Monitoring_sur", collector.getApplication())
+ " - " + period.getLabel();
}
final Mailer mailer = new Mailer(Parameter.MAIL_SESSION.getValue());
final String adminEmails = Parameter.ADMIN_EMAILS.getValue();
mailer.send(adminEmails, subject, "", Collections.singletonList(tmpFile), false);
} finally {
if (!tmpFile.delete()) {
tmpFile.deleteOnExit();
}
}
}
|
@Test
public void testSendReportMail() throws Exception {
final Counter counter = new Counter("http", null);
final Collector collector = new Collector("test", Collections.singletonList(counter));
final List<JavaInformations> javaInformationslist = Collections
.singletonList(new JavaInformations(null, true));
setProperty(Parameter.ADMIN_EMAILS, "evernat@free.fr");
setProperty(Parameter.MAIL_SESSION, "mail/Session");
try {
new MailReport().sendReportMail(collector, false, javaInformationslist, Period.SEMAINE);
} catch (final NoInitialContextException e) {
assertNotNull("ok", e);
}
setProperty(Parameter.MAIL_SUBJECT_PREFIX, "[javamelody] ");
try {
new MailReport().sendReportMail(collector, false, javaInformationslist, Period.SEMAINE);
} catch (final NoInitialContextException e) {
assertNotNull("ok", e);
}
// sendReportMailForLocalServer
final String path = "path";
final ServletContext context = createNiceMock(ServletContext.class);
expect(context.getMajorVersion()).andReturn(5).anyTimes();
expect(context.getMinorVersion()).andReturn(0).anyTimes();
expect(context.getContextPath()).andReturn(path).anyTimes();
replay(context);
Parameters.initialize(context);
try {
new MailReport().sendReportMailForLocalServer(collector, Period.SEMAINE);
} catch (final NoInitialContextException e) {
assertNotNull("ok", e);
}
verify(context);
}
|
public static DBSCAN<double[]> fit(double[][] data, int minPts, double radius) {
return fit(data, new KDTree<>(data, data), minPts, radius);
}
|
@Test
public void testGaussianMixture() throws Exception {
System.out.println("Gaussian Mixture");
double[][] x = GaussianMixture.x;
int[] y = GaussianMixture.y;
DBSCAN<double[]> model = DBSCAN.fit(x,200, 0.8);
System.out.println(model);
double r = RandIndex.of(y, model.y);
double r2 = AdjustedRandIndex.of(y, model.y);
System.out.format("Training rand index = %.2f%%, adjusted rand index = %.2f%%%n", 100.0 * r, 100.0 * r2);
assertEquals(0.5424, r, 1E-4);
assertEquals(0.1215, r2, 1E-4);
System.out.format("MI = %.2f%n", MutualInformation.of(y, model.y));
System.out.format("NMI.joint = %.2f%%%n", 100 * NormalizedMutualInformation.joint(y, model.y));
System.out.format("NMI.max = %.2f%%%n", 100 * NormalizedMutualInformation.max(y, model.y));
System.out.format("NMI.min = %.2f%%%n", 100 * NormalizedMutualInformation.min(y, model.y));
System.out.format("NMI.sum = %.2f%%%n", 100 * NormalizedMutualInformation.sum(y, model.y));
System.out.format("NMI.sqrt = %.2f%%%n", 100 * NormalizedMutualInformation.sqrt(y, model.y));
java.nio.file.Path temp = Write.object(model);
Read.object(temp);
}
|
public byte[] toByteArray() {
ByteArrayOutputStream stream = new ByteArrayOutputStream();
try {
write(stream);
} catch (IOException e) {
// Should not happen as ByteArrayOutputStream does not throw IOException on write
throw new RuntimeException(e);
}
return stream.toByteArray();
}
|
@Test
public void testToByteArray_OP_PUSHDATA2() {
// OP_PUSHDATA2
byte[] bytes = new byte[0x0102];
RANDOM.nextBytes(bytes);
byte[] expected = ByteUtils.concat(new byte[] { OP_PUSHDATA2, 0x02, 0x01 }, bytes);
byte[] actual = new ScriptChunk(OP_PUSHDATA2, bytes).toByteArray();
assertArrayEquals(expected, actual);
}
|
public static <T> void padRight(Collection<T> list, int minLen, T padObj) {
Objects.requireNonNull(list);
for (int i = list.size(); i < minLen; i++) {
list.add(padObj);
}
}
|
@Test
public void testPadRight() {
final List<String> srcList = CollUtil.newArrayList("a");
final List<String> answerList = CollUtil.newArrayList("a", "b", "b", "b", "b");
CollUtil.padRight(srcList, 5, "b");
assertEquals(srcList, answerList);
}
|
@InvokeOnHeader(Web3jConstants.ETH_BLOCK_NUMBER)
void ethBlockNumber(Message message) throws IOException {
Request<?, EthBlockNumber> request = web3j.ethBlockNumber();
setRequestId(message, request);
EthBlockNumber response = request.send();
boolean hasError = checkForError(message, response);
if (!hasError) {
message.setBody(response.getBlockNumber());
}
}
|
@Test
public void ethBlockNumberTest() throws Exception {
EthBlockNumber response = Mockito.mock(EthBlockNumber.class);
Mockito.when(mockWeb3j.ethBlockNumber()).thenReturn(request);
Mockito.when(request.send()).thenReturn(response);
Mockito.when(response.getBlockNumber()).thenReturn(BigInteger.ONE);
Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_BLOCK_NUMBER);
template.send(exchange);
BigInteger body = exchange.getIn().getBody(BigInteger.class);
assertEquals(BigInteger.ONE, body);
}
|
@Nonnull
@Override
public Future<?> submit(@Nonnull Runnable runnable) {
submitted.mark();
try {
return delegate.submit(new InstrumentedRunnable(runnable));
} catch (RejectedExecutionException e) {
rejected.mark();
throw e;
}
}
|
@Test
public void reportsTasksInformation() throws Exception {
this.executor = Executors.newCachedThreadPool();
final InstrumentedExecutorService instrumentedExecutorService = new InstrumentedExecutorService(executor, registry, "xs");
final Meter submitted = registry.meter("xs.submitted");
final Counter running = registry.counter("xs.running");
final Meter completed = registry.meter("xs.completed");
final Timer duration = registry.timer("xs.duration");
final Meter rejected = registry.meter("xs.rejected");
assertThat(submitted.getCount()).isEqualTo(0);
assertThat(running.getCount()).isEqualTo(0);
assertThat(completed.getCount()).isEqualTo(0);
assertThat(duration.getCount()).isEqualTo(0);
assertThat(rejected.getCount()).isEqualTo(0);
Future<?> theFuture = instrumentedExecutorService.submit(() -> {
assertThat(submitted.getCount()).isEqualTo(1);
assertThat(running.getCount()).isEqualTo(1);
assertThat(completed.getCount()).isEqualTo(0);
assertThat(duration.getCount()).isEqualTo(0);
assertThat(rejected.getCount()).isEqualTo(0);
});
theFuture.get();
assertThat(submitted.getCount()).isEqualTo(1);
assertThat(running.getCount()).isEqualTo(0);
assertThat(completed.getCount()).isEqualTo(1);
assertThat(duration.getCount()).isEqualTo(1);
assertThat(duration.getSnapshot().size()).isEqualTo(1);
assertThat(rejected.getCount()).isEqualTo(0);
}
|
static void unregisterCommand(PrintStream stream, Admin adminClient, int id) throws Exception {
try {
adminClient.unregisterBroker(id).all().get();
stream.println("Broker " + id + " is no longer registered.");
} catch (ExecutionException ee) {
Throwable cause = ee.getCause();
if (cause instanceof UnsupportedVersionException) {
stream.println("The target cluster does not support the broker unregistration API.");
} else {
throw ee;
}
}
}
|
@Test
public void testUnregisterBroker() throws Exception {
Admin adminClient = new MockAdminClient.Builder().numBrokers(3).
usingRaftController(true).
build();
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ClusterTool.unregisterCommand(new PrintStream(stream), adminClient, 0);
assertEquals("Broker 0 is no longer registered.\n", stream.toString());
}
|
protected DataTranslator()
{
}
|
@Test
public void testDataTranslator() throws IOException
{
boolean debug = false;
String[][][] inputs =
{
// {
// {
// 1 string holding the Pegasus schema in JSON.
// The string may be marked with ##T_START and ##T_END markers. The markers are used for typeref testing.
// If the string these markers, then two schemas will be constructed and tested.
// The first schema replaces these markers with two empty strings.
// The second schema replaces these markers with a typeref enclosing the type between these markers.
// },
// {
// 1st string is input DataMap, JSON will be deserialized into DataMap.
// 2nd string is expected output after translating from DataMap to Avro GenericRecord
// if translation is successful, this string should be Avro GenericRecord serialized to JSON,
// else the output should be a string providing diagnostic messages regarding the translation
// failure. In this case, the 2nd string provides a string that will be checked against the
// diagnostic messages. The diagnostic message should contain this string.
// }
// }
{
// record with int field
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" { \"name\" : \"intRequired\", \"type\" : ##T_START \"int\" ##T_END }\n" +
" ]\n" +
"}\n"
},
{
"{ \"intRequired\" : 42 }",
"{\"intRequired\":42}"
},
{
"{ }",
"Error processing /intRequired"
},
{
"{ \"intRequired\" : null }",
"Error processing /intRequired"
},
{
"{ \"intRequired\" : \"string\" }",
"Error processing /intRequired"
}
},
// record with long field
{
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" { \"name\" : \"longRequired\", \"type\" : ##T_START \"long\" ##T_END }\n" +
" ]\n" +
"}\n"
},
{
"{ \"longRequired\" : 42 }",
"{\"longRequired\":42}"
},
{
"{ }",
"Error processing /longRequired"
},
{
"{ \"longRequired\" : null }",
"Error processing /longRequired"
},
{
"{ \"longRequired\" : \"string\" }",
"Error processing /longRequired"
}
},
// record with float field
{
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" { \"name\" : \"floatRequired\", \"type\" : ##T_START \"float\" ##T_END }\n" +
" ]\n" +
"}\n"
},
{
"{ \"floatRequired\" : 42 }",
"{\"floatRequired\":42.0}"
},
{
"{ }",
"Error processing /floatRequired"
},
{
"{ \"floatRequired\" : null }",
"Error processing /floatRequired"
},
{
"{ \"floatRequired\" : \"string\" }",
"Error processing /floatRequired"
}
},
// record with double field
{
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" { \"name\" : \"doubleRequired\", \"type\" : ##T_START \"double\" ##T_END }\n" +
" ]\n" +
"}\n"
},
{
"{ \"doubleRequired\" : 42 }",
"{\"doubleRequired\":42.0}"
},
{
"{ }",
"Error processing /doubleRequired"
},
{
"{ \"doubleRequired\" : null }",
"Error processing /doubleRequired"
},
{
"{ \"doubleRequired\" : \"string\" }",
"Error processing /doubleRequired"
}
},
{
// record with boolean field
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" { \"name\" : \"booleanRequired\", \"type\" : ##T_START \"boolean\" ##T_END }\n" +
" ]\n" +
"}\n"
},
{
"{ \"booleanRequired\" : true }",
"{\"booleanRequired\":true}"
},
{
"{ \"booleanRequired\" : false }",
"{\"booleanRequired\":false}"
},
{
"{ }",
"Error processing /booleanRequired"
},
{
"{ \"booleanRequired\" : null }",
"Error processing /booleanRequired"
},
{
"{ \"booleanRequired\" : \"string\" }",
"Error processing /booleanRequired"
}
},
{
// record with string field
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" { \"name\" : \"stringRequired\", \"type\" : ##T_START \"string\" ##T_END }\n" +
" ]\n" +
"}\n"
},
{
"{ \"stringRequired\" : \"bar\" }",
"{\"stringRequired\":\"bar\"}"
},
{
"{ }",
"Error processing /stringRequired"
},
{
"{ \"stringRequired\" : null }",
"Error processing /stringRequired"
},
{
"{ \"stringRequired\" : false }",
"Error processing /stringRequired"
}
},
{
// record with bytes field
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" { \"name\" : \"bytesRequired\", \"type\" : ##T_START \"bytes\" ##T_END }\n" +
" ]\n" +
"}\n"
},
{
"{ \"bytesRequired\" : \"12345\\u0001\" }",
"{\"bytesRequired\":\"12345\\u0001\"}"
},
{
"{ }",
"Error processing /bytesRequired"
},
{
"{ \"bytesRequired\" : null }",
"Error processing /bytesRequired"
},
{
"{ \"bytesRequired\" : false }",
"Error processing /bytesRequired"
},
{
"{ \"bytesRequired\" : \"1234\\u0101\" }",
"Error processing /bytesRequired"
}
},
{
// record with fixed field
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"fixedRequired\",\n" +
" \"type\" : ##T_START { \"type\" : \"fixed\", \"name\" : \"Fixed5\", \"size\" : 5 } ##T_END\n" +
" }\n" +
" ]\n" +
"}\n"
},
{
"{ \"fixedRequired\" : \"12345\" }",
"{\"fixedRequired\":\"12345\"}"
},
{
"{ }",
"Error processing /fixedRequired"
},
{
"{ \"fixedRequired\" : null }",
"Error processing /fixedRequired"
},
{
"{ \"fixedRequired\" : false }",
"Error processing /fixedRequired"
},
{
"{ \"fixedRequired\" : \"1234\" }",
"Error processing /fixedRequired"
},
{
"{ \"fixedRequired\" : \"123456\" }",
"Error processing /fixedRequired"
}
},
{
// record with enum field
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"enumRequired\",\n" +
" \"type\" : ##T_START {\n" +
" \"name\" : \"Fruits\",\n" +
" \"type\" : \"enum\",\n" +
" \"symbols\" : [ \"APPLE\", \"ORANGE\" ]\n" +
" } ##T_END\n" +
" }\n" +
" ]\n" +
"}\n"
},
{
"{ \"enumRequired\" : \"APPLE\" }",
"{\"enumRequired\":\"APPLE\"}"
},
{
"{ \"enumRequired\" : \"ORANGE\" }",
"{\"enumRequired\":\"ORANGE\"}"
},
{
"{ }",
"Error processing /enumRequired"
},
{
"{ \"enumRequired\" : null }",
"Error processing /enumRequired"
},
{
"{ \"enumRequired\" : false }",
"Error processing /enumRequired"
}
},
{
// record with array field
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"arrayRequired\",\n" +
" \"type\" : ##T_START {\n" +
" \"type\" : \"array\",\n" +
" \"items\" : {\n" +
" \"name\" : \"Fruits\",\n" +
" \"type\" : \"enum\",\n" +
" \"symbols\" : [ \"APPLE\", \"ORANGE\" ]\n" +
" }\n" +
" } ##T_END\n" +
" }\n" +
" ]\n" +
"}\n"
},
{
"{ \"arrayRequired\" : [] }",
"{\"arrayRequired\":[]}"
},
{
"{ \"arrayRequired\" : [ \"APPLE\", \"ORANGE\" ] }",
"{\"arrayRequired\":[\"APPLE\",\"ORANGE\"]}"
},
{
"{ }",
"Error processing /arrayRequired"
},
{
"{ \"arrayRequired\" : null }",
"Error processing /arrayRequired"
},
{
"{ \"arrayRequired\" : {} }",
"Error processing /arrayRequired"
},
{
"{ \"arrayRequired\" : [ null ] }",
"Error processing /arrayRequired/0"
},
{
"{ \"arrayRequired\" : [ \"APPLE\", \"PINEAPPLE\" ] }",
"Error processing /arrayRequired/1"
}
},
{
// record with map field
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"mapRequired\",\n" +
" \"type\" : ##T_START {\n" +
" \"type\" : \"map\",\n" +
" \"values\" : \"int\" " +
" } ##T_END\n" +
" }\n" +
" ]\n" +
"}\n"
},
{
"{ \"mapRequired\" : {} }",
"{\"mapRequired\":{}}"
},
{
"{ \"mapRequired\" : { \"x\" : 1} }",
"{\"mapRequired\":{\"x\":1}}"
},
{
"{ }",
"Error processing /mapRequired"
},
{
"{ \"mapRequired\" : null }",
"Error processing /mapRequired"
},
{
"{ \"mapRequired\" : [] }",
"Error processing /mapRequired"
},
{
"{ \"mapRequired\" : { \"x\" : null } }",
"Error processing /mapRequired/x"
},
{
"{ \"mapRequired\" : { \"x\" : \"PINEAPPLE\" } }",
"Error processing /mapRequired/x"
}
},
{
// record with union field
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"foo.Foo\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"unionRequired\",\n" +
" \"type\" : ##T_START [ \"int\", \"string\", \"foo.Foo\" ] ##T_END\n" +
" }\n" +
" ]\n" +
"}\n"
},
{
"{ \"unionRequired\" : { \"int\" : 5 } }",
"{\"unionRequired\":{\"int\":5}}"
},
{
"{ \"unionRequired\" : { \"string\" : \"s1\" } }",
"{\"unionRequired\":{\"string\":\"s1\"}}"
},
{
"{ \"unionRequired\" : { \"foo.Foo\" : { \"unionRequired\" : { \"int\" : 5 } } } }",
"{\"unionRequired\":{\"##NS(foo.)Foo\":{\"unionRequired\":{\"int\":5}}}}"
},
{
"{ }",
"Error processing /unionRequired"
},
{
"{ \"unionRequired\" : null }",
"Error processing /unionRequired"
},
{
"{ \"unionRequired\" : {} }",
"Error processing /unionRequired"
},
{
"{ \"unionRequired\" : { \"bad\" : 0 } }",
"Error processing /unionRequired"
}
},
{
// record with a required "union with aliases" field
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"foo.Foo\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"uwaRequiredNoNull\",\n" +
" \"type\" : ##T_START [\n" +
" { \"alias\": \"success\", \"type\": \"string\" },\n" +
" { \"alias\": \"failure\", \"type\": \"string\" }\n" +
" ] ##T_END\n" +
" }\n" +
" ]\n" +
"}\n"
},
{
"{ \"uwaRequiredNoNull\" : { \"success\" : \"Union with aliases!\" } }",
"{\"uwaRequiredNoNull\":{\"success\":{\"string\":\"Union with aliases!\"},\"failure\":null,\"fieldDiscriminator\":\"success\"}}"
},
{
"{ \"uwaRequiredNoNull\" : { \"failure\" : \"Union with aliases!\" } }",
"{\"uwaRequiredNoNull\":{\"success\":null,\"failure\":{\"string\":\"Union with aliases!\"},\"fieldDiscriminator\":\"failure\"}}"
},
{
"{ \"uwaRequiredNoNull\" : null }",
"Error processing /uwaRequiredNoNull"
},
{
"{}",
"Error processing /uwaRequiredNoNull"
},
{
"{ \"uwaRequiredNoNull\" : {} }",
"Error processing /uwaRequiredNoNull"
},
{
"{ \"uwaRequiredNoNull\" : \"Union with aliases!\" }",
"Error processing /uwaRequiredNoNull"
},
{
"{ \"uwaRequiredNoNull\" : { \"string\" : \"Union with aliases!\" } }",
"Error processing /uwaRequiredNoNull"
},
{
"{ \"uwaRequiredNoNull\" : { \"success\" : 123 } }",
"Error processing /uwaRequiredNoNull/success"
}
},
{
// record with a required "union with aliases" field with null member
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"foo.Foo\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"uwaRequiredWithNull\",\n" +
" \"type\" : ##T_START [\n" +
" \"null\",\n" +
" { \"alias\": \"success\", \"type\": \"string\" },\n" +
" { \"alias\": \"failure\", \"type\": \"string\" }\n" +
" ] ##T_END\n" +
" }\n" +
" ]\n" +
"}\n"
},
{
"{ \"uwaRequiredWithNull\" : { \"success\" : \"Union with aliases!\" } }",
"{\"uwaRequiredWithNull\":{\"success\":{\"string\":\"Union with aliases!\"},\"failure\":null,\"fieldDiscriminator\":\"success\"}}"
},
{
"{ \"uwaRequiredWithNull\" : { \"failure\" : \"Union with aliases!\" } }",
"{\"uwaRequiredWithNull\":{\"success\":null,\"failure\":{\"string\":\"Union with aliases!\"},\"fieldDiscriminator\":\"failure\"}}"
},
{
"{ \"uwaRequiredWithNull\" : null }",
"{\"uwaRequiredWithNull\":{\"success\":null,\"failure\":null,\"fieldDiscriminator\":\"null\"}}"
},
{
"{}",
"Error processing /uwaRequiredWithNull"
},
{
"{ \"uwaRequiredWithNull\" : {} }",
"Error processing /uwaRequiredWithNull"
},
{
"{ \"uwaRequiredWithNull\" : \"Union with aliases!\" }",
"Error processing /uwaRequiredWithNull"
},
{
"{ \"uwaRequiredWithNull\" : { \"string\" : \"Union with aliases!\" } }",
"Error processing /uwaRequiredWithNull"
},
{
"{ \"uwaRequiredWithNull\" : { \"success\" : 123 } }",
"Error processing /uwaRequiredWithNull/success"
}
},
{
// record with array of union with null field
// this is to check that translation of union with null that does not get converted to optional,
// and that null union member translates correctly from Data to Avro and Avro to Data.
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"foo.Foo\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"arrayOfUnionWitNull\",\n" +
" \"type\" : ##T_START { \"type\" : \"array\", \"items\" : [ \"int\", \"null\" ] } ##T_END\n" +
" }\n" +
" ]\n" +
"}\n"
},
{
"{ \"arrayOfUnionWitNull\" : [ { \"int\" : 5 } ] }",
"{\"arrayOfUnionWitNull\":[{\"int\":5}]}"
},
{
"{ \"arrayOfUnionWitNull\" : [ null ] }",
"{\"arrayOfUnionWitNull\":[null]}"
},
{
"{ }",
"Error processing /arrayOfUnionWitNull"
},
{
"{ \"arrayOfUnionWitNull\" : [ {} ] }",
"Error processing /arrayOfUnionWitNull/0"
},
{
"{ \"arrayOfUnionWitNull\" : [ { \"bad\" : 0 } ] }",
"Error processing /arrayOfUnionWitNull/0"
}
},
{
// record with record field.
{
"{ \n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"bar\",\n" +
" \"type\" : ##T_START {\n" +
" \"name\" : \"Bar\",\n" +
" \"type\" : \"record\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"baz\",\n" +
" \"type\" : \"int\"\n" +
" }\n" +
" ]\n" +
" } ##T_END\n" +
" }\n" +
" ]\n" +
"}\n"
},
{
"{ \"bar\" : { \"baz\" : 1 } }",
"{\"bar\":{\"baz\":1}}"
},
{
"{ \"bar\" : { \"baz\" : null } }",
"Error processing /bar/baz"
},
},
//
// Optional
//
{
// record with optional non-union field
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"intOptional\",\n" +
" \"type\" : ##T_START \"int\" ##T_END,\n" +
" \"optional\" : true\n" +
" }\n" +
" ]\n" +
"}\n"
},
{
"{ }",
"{\"intOptional\":null}"
},
{
"{ \"intOptional\" : 42 }",
"{\"intOptional\":{\"int\":42}}"
},
{
"{ \"intOptional\" : null }",
ONE_WAY,
"{\"intOptional\":null}"
},
{
"{ \"intOptional\" : \"s1\" }",
"Error processing /intOptional"
},
{
"{ \"intOptional\" : {} }",
"Error processing /intOptional"
},
},
{
// record with optional union field that does not include null
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"unionOptional\",\n" +
" \"type\" : ##T_START [ \"int\", \"string\" ] ##T_END,\n" +
" \"optional\" : true\n" +
" }\n" +
" ]\n" +
"}\n"
},
{
"{ }",
"{\"unionOptional\":null}"
},
{
"{ \"unionOptional\" : { \"int\" : 42 } }",
"{\"unionOptional\":{\"int\":42}}"
},
{
"{ \"unionOptional\" : { \"string\" : \"s1\" } }",
"{\"unionOptional\":{\"string\":\"s1\"}}"
},
{
"{ \"unionOptional\" : null }",
ONE_WAY,
"{\"unionOptional\":null}"
},
{
"{ \"unionOptional\" : \"s1\" }",
"Error processing /unionOptional"
},
{
"{ \"unionOptional\" : {} }",
"Error processing /unionOptional"
},
},
{
// record with optional union field that includes null
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"unionOptional\",\n" +
" \"type\" : ##T_START [ \"null\", \"string\" ] ##T_END,\n" +
" \"optional\" : true\n" +
" }\n" +
" ]\n" +
"}\n"
},
{
"{ }",
"{\"unionOptional\":null}"
},
{
"{ \"unionOptional\" : { \"string\" : \"s1\" } }",
"{\"unionOptional\":{\"string\":\"s1\"}}"
},
{
"{ \"unionOptional\" : null }",
// The round-trip result will drop the optional field.
// A null in the union is translated to an absent field.
ONE_WAY,
"{\"unionOptional\":null}"
},
{
"{ \"unionOptional\" : \"s1\" }",
"Error processing /unionOptional"
},
{
"{ \"unionOptional\" : {} }",
"Error processing /unionOptional"
},
},
{
// record with an optional "union with aliases" field
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"foo.Foo\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"uwaOptionalNoNull\",\n" +
" \"type\" : ##T_START [\n" +
" { \"alias\": \"success\", \"type\": \"string\" },\n" +
" { \"alias\": \"failure\", \"type\": \"string\" }\n" +
" ] ##T_END,\n" +
" \"optional\": true\n" +
" }\n" +
" ]\n" +
"}\n"
},
{
"{ \"uwaOptionalNoNull\" : { \"success\" : \"Union with aliases!\" } }",
"{\"uwaOptionalNoNull\":{\"##NS(foo.)FooUwaOptionalNoNull\":{\"success\":{\"string\":\"Union with aliases!\"},\"failure\":null,\"fieldDiscriminator\":\"success\"}}}"
},
{
"{}",
"{\"uwaOptionalNoNull\":null}"
},
{
"{ \"uwaOptionalNoNull\" : null }",
ONE_WAY,
"{\"uwaOptionalNoNull\":null}"
},
{
"{ \"uwaOptionalNoNull\" : {} }",
"Error processing /uwaOptionalNoNull"
},
{
"{ \"uwaOptionalNoNull\" : \"Union with aliases!\" }",
"Error processing /uwaOptionalNoNull"
},
{
"{ \"uwaOptionalNoNull\" : { \"string\" : \"Union with aliases!\" } }",
"Error processing /uwaOptionalNoNull"
},
{
"{ \"uwaOptionalNoNull\" : { \"success\" : 123 } }",
"Error processing /uwaOptionalNoNull/success"
}
},
{
// record with an optional "union with aliases" field with null member
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"foo.Foo\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"uwaOptionalWithNull\",\n" +
" \"type\" : ##T_START [\n" +
" \"null\",\n" +
" { \"alias\": \"success\", \"type\": \"string\" },\n" +
" { \"alias\": \"failure\", \"type\": \"string\" }\n" +
" ] ##T_END,\n" +
" \"optional\": true\n" +
" }\n" +
" ]\n" +
"}\n"
},
{
"{ \"uwaOptionalWithNull\" : { \"success\" : \"Union with aliases!\" } }",
"{\"uwaOptionalWithNull\":{\"##NS(foo.)FooUwaOptionalWithNull\":{\"success\":{\"string\":\"Union with aliases!\"},\"failure\":null,\"fieldDiscriminator\":\"success\"}}}"
},
{
"{}",
"{\"uwaOptionalWithNull\":null}"
},
{
"{ \"uwaOptionalWithNull\" : null }",
"{\"uwaOptionalWithNull\":null}"
},
{
"{ \"uwaOptionalWithNull\" : {} }",
"Error processing /uwaOptionalWithNull"
},
{
"{ \"uwaOptionalWithNull\" : \"Union with aliases!\" }",
"Error processing /uwaOptionalWithNull"
},
{
"{ \"uwaOptionalWithNull\" : { \"string\" : \"Union with aliases!\" } }",
"Error processing /uwaOptionalWithNull"
},
{
"{ \"uwaOptionalWithNull\" : { \"success\" : 123 } }",
"Error processing /uwaOptionalWithNull/success"
}
},
{
// record with optional enum field
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"enumOptional\",\n" +
" \"type\" : ##T_START { \"type\" : \"enum\", \"name\" : \"foo.bar\", \"symbols\" : [ \"A\", \"B\" ] } ##T_END,\n" +
" \"optional\" : true\n" +
" }\n" +
" ]\n" +
"}\n"
},
{
"{ }",
"{\"enumOptional\":null}"
},
{
"{ \"enumOptional\" : \"A\" } }",
"{\"enumOptional\":{\"##NS(foo.)bar\":\"A\"}}"
},
{
"{ \"enumOptional\" : \"B\" } }",
"{\"enumOptional\":{\"##NS(foo.)bar\":\"B\"}}"
},
{
"{ \"enumOptional\" : {} }",
"Error processing /enumOptional"
},
},
{
// record with optional enum field
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"enumOptional\",\n" +
" \"type\" : ##T_START { \"type\" : \"enum\", \"name\" : \"foo.bar\", \"symbols\" : [ \"A\", \"B\" ] } ##T_END,\n" +
" \"optional\" : true\n" +
" }\n" +
" ]\n" +
"}\n"
},
{
"{ }",
"{\"enumOptional\":null}"
},
{
"{ \"enumOptional\" : \"A\" } }",
"{\"enumOptional\":{\"##NS(foo.)bar\":\"A\"}}"
},
{
"{ \"enumOptional\" : \"B\" } }",
"{\"enumOptional\":{\"##NS(foo.)bar\":\"B\"}}"
},
{
"{ \"enumOptional\" : {} }",
"Error processing /enumOptional"
},
},
{
// record with optional union field of records
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"unionOptional\",\n" +
" \"type\" : ##T_START [\n" +
" { \"type\" : \"record\", \"name\" : \"R1\", \"fields\" : [ { \"name\" : \"r1\", \"type\" : \"string\" } ] },\n" +
" { \"type\" : \"record\", \"name\" : \"R2\", \"fields\" : [ { \"name\" : \"r2\", \"type\" : \"int\" } ] },\n" +
" \"int\",\n" +
" \"string\"\n" +
" ] ##T_END,\n" +
" \"optional\" : true\n" +
" }\n" +
" ]\n" +
"}\n"
},
{
"{ }",
"{\"unionOptional\":null}"
},
{
"{ \"unionOptional\" : { \"R1\" : { \"r1\" : \"value\" } } }",
"{\"unionOptional\":{\"R1\":{\"r1\":\"value\"}}}"
},
{
"{ \"unionOptional\" : { \"R2\" : { \"r2\" : 52 } } }",
"{\"unionOptional\":{\"R2\":{\"r2\":52}}}"
},
{
"{ \"unionOptional\" : { \"int\" : 52 } }",
"{\"unionOptional\":{\"int\":52}}"
},
{
"{ \"unionOptional\" : { \"string\" : \"value\" } }",
"{\"unionOptional\":{\"string\":\"value\"}}"
},
{
"{ \"unionOptional\" : {} }",
"Error processing /unionOptional"
},
},
{
// record with optional union field with alias, union types are RECORD
{
"{\n" +
" \"type\" : \"record\",\n" +
" \"name\" : \"Foo\",\n" +
" \"fields\" : [\n" +
" {\n" +
" \"name\" : \"unionOptionalAlias\",\n" +
" \"type\" : ##T_START [\n" +
" { " +
" \"type\" : { \"type\" : \"record\", \"name\" : \"R1\", \"fields\" : [ { \"name\" : \"r1\", \"type\" : \"string\" } ] }, " +
" \"alias\": \"success\"" +
" },\n" +
" { " +
" \"type\": { \"type\" : \"record\", \"name\" : \"R2\", \"fields\" : [ { \"name\" : \"r2\", \"type\" : \"int\" } ] }, " +
" \"alias\": \"failure\"" +
" }\n" +
" ] ##T_END,\n" +
" \"optional\" : true\n" +
" }\n" +
" ]\n" +
"}\n"
},
{
"{ \"unionOptionalAlias\" : { \"success\" : { \"r1\" : \"value\" } } }",
"{\"unionOptionalAlias\":{\"FooUnionOptionalAlias\":{\"success\":{\"R1\":{\"r1\":\"value\"}},\"failure\":null,\"fieldDiscriminator\":\"success\"}}}"
},
{
"{}",
"{\"unionOptionalAlias\":null}"
},
{
"{ \"unionOptionalAlias\" : {} }",
"Error processing /unionOptionalAlias"
},
{
"{ \"unionOptionalAlias\" : { \"success\" : { \"r1\" : 123 } } }",
"Error processing /unionOptionalAlias/success"
}
}
};
// test translation of Pegasus DataMap to Avro GenericRecord.
for (String[][] row : inputs)
{
String schemaText = row[0][0];
if (schemaText.contains("##T_START"))
{
assertTrue(schemaText.contains("##T_END"));
String noTyperefSchemaText = schemaText.replace("##T_START", "").replace("##T_END", "");
assertFalse(noTyperefSchemaText.contains("##T_"));
assertFalse(noTyperefSchemaText.contains("typeref"));
String typerefSchemaText = schemaText
.replace("##T_START", "{ \"type\" : \"typeref\", \"name\" : \"Ref\", \"ref\" : ")
.replace("##T_END", "}");
assertFalse(typerefSchemaText.contains("##T_"));
assertTrue(typerefSchemaText.contains("typeref"));
testDataTranslation(noTyperefSchemaText, row);
testDataTranslation(typerefSchemaText, row);
}
else
{
assertFalse(schemaText.contains("##"));
testDataTranslation(schemaText, row);
}
}
}
|
public CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> commitOffset(
RequestContext context,
OffsetCommitRequestData request
) throws ApiException {
Group group = validateOffsetCommit(context, request);
// In the old consumer group protocol, the offset commits maintain the session if
// the group is in Stable or PreparingRebalance state.
if (group.type() == Group.GroupType.CLASSIC) {
ClassicGroup classicGroup = (ClassicGroup) group;
if (classicGroup.isInState(ClassicGroupState.STABLE) || classicGroup.isInState(ClassicGroupState.PREPARING_REBALANCE)) {
groupMetadataManager.rescheduleClassicGroupMemberHeartbeat(
classicGroup,
classicGroup.member(request.memberId())
);
}
}
final OffsetCommitResponseData response = new OffsetCommitResponseData();
final List<CoordinatorRecord> records = new ArrayList<>();
final long currentTimeMs = time.milliseconds();
final OptionalLong expireTimestampMs = expireTimestampMs(request.retentionTimeMs(), currentTimeMs);
request.topics().forEach(topic -> {
final OffsetCommitResponseTopic topicResponse = new OffsetCommitResponseTopic().setName(topic.name());
response.topics().add(topicResponse);
topic.partitions().forEach(partition -> {
if (isMetadataInvalid(partition.committedMetadata())) {
topicResponse.partitions().add(new OffsetCommitResponsePartition()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code()));
} else {
log.debug("[GroupId {}] Committing offsets {} for partition {}-{} from member {} with leader epoch {}.",
request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(),
request.memberId(), partition.committedLeaderEpoch());
topicResponse.partitions().add(new OffsetCommitResponsePartition()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(Errors.NONE.code()));
final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest(
partition,
currentTimeMs,
expireTimestampMs
);
records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord(
request.groupId(),
topic.name(),
partition.partitionIndex(),
offsetAndMetadata,
metadataImage.features().metadataVersion()
));
}
});
});
if (!records.isEmpty()) {
metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size());
}
return new CoordinatorResult<>(records, response);
}
|
@Test
public void testGenericGroupOffsetCommitWithFencedInstanceId() {
OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build();
// Create an empty group.
ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(
"foo",
true
);
// Add member with static id.
group.add(mkGenericMember("member", Optional.of("new-instance-id")));
// Verify that the request is rejected with the correct exception.
assertThrows(UnknownMemberIdException.class, () -> context.commitOffset(
new OffsetCommitRequestData()
.setGroupId("foo")
.setMemberId("member")
.setGroupInstanceId("old-instance-id")
.setGenerationIdOrMemberEpoch(10)
.setTopics(Collections.singletonList(
new OffsetCommitRequestData.OffsetCommitRequestTopic()
.setName("bar")
.setPartitions(Collections.singletonList(
new OffsetCommitRequestData.OffsetCommitRequestPartition()
.setPartitionIndex(0)
.setCommittedOffset(100L)
))
))
)
);
}
|
public void wakeup() {
// wakeup should be safe without holding the client lock since it simply delegates to
// Selector's wakeup, which is thread-safe
log.debug("Received user wakeup");
this.wakeup.set(true);
this.client.wakeup();
}
|
@Test
public void wakeup() {
RequestFuture<ClientResponse> future = consumerClient.send(node, heartbeat());
consumerClient.wakeup();
assertThrows(WakeupException.class, () -> consumerClient.poll(time.timer(0)));
client.respond(heartbeatResponse(Errors.NONE));
consumerClient.poll(future);
assertTrue(future.isDone());
}
|
List<DataflowPackage> stageClasspathElements(
Collection<StagedFile> classpathElements, String stagingPath, CreateOptions createOptions) {
return stageClasspathElements(classpathElements, stagingPath, DEFAULT_SLEEPER, createOptions);
}
|
@Test
public void testPackageUploadWithDirectorySucceeds() throws Exception {
Pipe pipe = Pipe.open();
File tmpDirectory = tmpFolder.newFolder("folder");
tmpFolder.newFolder("folder", "empty_directory");
tmpFolder.newFolder("folder", "directory");
makeFileWithContents("folder/file.txt", "This is a test!");
makeFileWithContents("folder/directory/file.txt", "This is also a test!");
when(mockGcsUtil.getObjects(anyListOf(GcsPath.class)))
.thenReturn(
ImmutableList.of(
StorageObjectOrIOException.create(new FileNotFoundException("some/path"))));
when(mockGcsUtil.create(any(GcsPath.class), any(GcsUtil.CreateOptions.class)))
.thenReturn(pipe.sink());
defaultPackageUtil.stageClasspathElements(
ImmutableList.of(makeStagedFile(tmpDirectory.getAbsolutePath())),
STAGING_PATH,
createOptions);
verify(mockGcsUtil).getObjects(anyListOf(GcsPath.class));
verify(mockGcsUtil).create(any(GcsPath.class), any(GcsUtil.CreateOptions.class));
verifyNoMoreInteractions(mockGcsUtil);
List<String> zipEntryNames = new ArrayList<>();
try (ZipInputStream inputStream = new ZipInputStream(Channels.newInputStream(pipe.source()))) {
for (ZipEntry entry = inputStream.getNextEntry();
entry != null;
entry = inputStream.getNextEntry()) {
zipEntryNames.add(entry.getName());
}
}
assertThat(
zipEntryNames, containsInAnyOrder("directory/file.txt", "empty_directory/", "file.txt"));
}
|
public TableView addRow(final Object... row) {
if (row.length > sizeOfColumnsInRow) {
throw new IllegalArgumentException(String.format("Expecting the size of row to be %d but was %s", sizeOfColumnsInRow, row.length));
}
String[] columns = Arrays.stream(row).map(Object::toString).map(String::trim).toArray(String[]::new);
for (int i = 0; i < columns.length; i++) {
maxContentLengthOfColumns[i] = Math.max(maxContentLengthOfColumns[i], columns[i].length());
}
table.add(columns);
return this;
}
|
@Test
void testUnexpectedColumns() {
TableView tableView = new TableView("header 1", "header 2", "header 3", "header 4");
Assertions.assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> tableView.addRow("column 1", "column 2", "column 3", "column 4", "column 5"))
.withMessage("Expecting the size of row to be 4 but was 5");
}
|
ConsumerRecord<Object, Object> deserialize(final ProcessorContext<?, ?> processorContext,
final ConsumerRecord<byte[], byte[]> rawRecord) {
try {
return new ConsumerRecord<>(
rawRecord.topic(),
rawRecord.partition(),
rawRecord.offset(),
rawRecord.timestamp(),
TimestampType.CREATE_TIME,
rawRecord.serializedKeySize(),
rawRecord.serializedValueSize(),
sourceNode.deserializeKey(rawRecord.topic(), rawRecord.headers(), rawRecord.key()),
sourceNode.deserializeValue(rawRecord.topic(), rawRecord.headers(), rawRecord.value()),
rawRecord.headers(),
Optional.empty()
);
} catch (final RuntimeException deserializationException) {
handleDeserializationFailure(deserializationExceptionHandler, processorContext, deserializationException, rawRecord, log, droppedRecordsSensor, sourceNode().name());
return null; // 'handleDeserializationFailure' would either throw or swallow -- if we swallow we need to skip the record by returning 'null'
}
}
|
@Test
public void shouldFailWhenDeserializationFailsAndExceptionHandlerReturnsNull() {
try (final Metrics metrics = new Metrics()) {
final RecordDeserializer recordDeserializer = new RecordDeserializer(
new TheSourceNode(
sourceNodeName,
true,
false,
"key",
"value"
),
new DeserializationExceptionHandlerMock(
Optional.empty(),
rawRecord,
sourceNodeName,
taskId
),
new LogContext(),
metrics.sensor("dropped-records")
);
final StreamsException exception = assertThrows(
StreamsException.class,
() -> recordDeserializer.deserialize(context, rawRecord)
);
assertEquals("Fatal user code error in deserialization error callback", exception.getMessage());
assertInstanceOf(NullPointerException.class, exception.getCause());
assertEquals("Invalid DeserializationExceptionHandler response.", exception.getCause().getMessage());
}
}
|
public boolean init(String uri, final SnapshotThrottle snapshotThrottle, final SnapshotCopierOptions opts) {
this.rpcService = opts.getRaftClientService();
this.timerManager = opts.getTimerManager();
this.raftOptions = opts.getRaftOptions();
this.snapshotThrottle = snapshotThrottle;
final int prefixSize = Snapshot.REMOTE_SNAPSHOT_URI_SCHEME.length();
if (uri == null || !uri.startsWith(Snapshot.REMOTE_SNAPSHOT_URI_SCHEME)) {
LOG.error("Invalid uri {}.", uri);
return false;
}
uri = uri.substring(prefixSize);
final int slasPos = uri.indexOf('/');
final String ipAndPort = uri.substring(0, slasPos);
uri = uri.substring(slasPos + 1);
try {
this.readId = Long.parseLong(uri);
final String[] ipAndPortStrs = ipAndPort.split(":");
this.endpoint = new Endpoint(ipAndPortStrs[0], Integer.parseInt(ipAndPortStrs[1]));
} catch (final Exception e) {
LOG.error("Fail to parse readerId or endpoint.", e);
return false;
}
if (!this.rpcService.connect(this.endpoint)) {
LOG.error("Fail to init channel to {}.", this.endpoint);
return false;
}
return true;
}
|
@Test
public void testInitFail() {
Mockito.when(rpcService.connect(new Endpoint("localhost", 8081))).thenReturn(false);
assertFalse(copier.init("remote://localhost:8081/999", null, new SnapshotCopierOptions(GROUP_ID, rpcService,
timerManager, new RaftOptions(), new NodeOptions())));
}
|
public ObjectArray() {
this(0);
}
|
@Test
public void testObjectArray() {
ObjectArray array = new ObjectArray(10);
for (int i = 0; i < 10; i++) {
array.add("abc");
}
assertEquals(array.objects, IntStream.range(0, 10).mapToObj(i -> "abc").toArray());
Object[] elementData = array.objects;
array.add(1);
assertNotSame(elementData, array.objects);
}
|
public static <T extends PipelineOptions> T as(Class<T> klass) {
return new Builder().as(klass);
}
|
@Test
public void testMultipleGettersWithInconsistentJsonIgnore() {
// Initial construction is valid.
MultiGetters options = PipelineOptionsFactory.as(MultiGetters.class);
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("Property getters are inconsistently marked with @JsonIgnore:");
expectedException.expectMessage("property [object] to be marked on all");
expectedException.expectMessage(
"found only on [org.apache.beam.sdk.options." + "PipelineOptionsFactoryTest$MultiGetters]");
expectedException.expectMessage("property [other] to be marked on all");
expectedException.expectMessage(
"found only on [org.apache.beam.sdk.options."
+ "PipelineOptionsFactoryTest$MultipleGettersWithInconsistentJsonIgnore]");
expectedException.expectMessage(
anyOf(
containsString(
java.util.Arrays.toString(
new String[] {
"org.apache.beam.sdk.options."
+ "PipelineOptionsFactoryTest$MultipleGettersWithInconsistentJsonIgnore",
"org.apache.beam.sdk.options.PipelineOptionsFactoryTest$MultiGetters"
})),
containsString(
java.util.Arrays.toString(
new String[] {
"org.apache.beam.sdk.options.PipelineOptionsFactoryTest$MultiGetters",
"org.apache.beam.sdk.options."
+ "PipelineOptionsFactoryTest$MultipleGettersWithInconsistentJsonIgnore"
}))));
expectedException.expectMessage(not(containsString("property [consistent]")));
// When we attempt to convert, we should error immediately
options.as(MultipleGettersWithInconsistentJsonIgnore.class);
}
|
public void patchBoardById(
final Long boardId,
final Long memberId,
final BoardUpdateRequest request
) {
Board board = findBoardWithImages(boardId);
board.validateWriter(memberId);
BoardUpdateResult result = board.update(request.title(), request.content(), request.addedImages(), request.deletedImages(), imageConverter);
imageUploader.upload(result.addedImages(), request.addedImages());
imageUploader.delete(result.deletedImages());
}
|
@Test
void ๊ฒ์๊ธ์_์์ ํ๋ค() {
// given
Board savedBoard = boardRepository.save(๊ฒ์๊ธ_์์ฑ_์ฌ์ง์์());
Long boardId = savedBoard.getId();
Long memberId = savedBoard.getWriterId();
MockMultipartFile file = new MockMultipartFile("name", "origin.jpg", "image", "content".getBytes());
BoardUpdateRequest req = new BoardUpdateRequest("์์ ", "์์ ", new ArrayList<>(List.of(file)), new ArrayList<>());
// when & then
boardService.patchBoardById(boardId, memberId, req);
// then
assertSoftly(softly -> {
softly.assertThat(savedBoard.getPost().getTitle()).isEqualTo("์์ ");
softly.assertThat(savedBoard.getPost().getContent()).isEqualTo("์์ ");
});
}
|
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo,
List<String> partNames, boolean areAllPartsFound) throws MetaException {
checkStatisticsList(colStatsWithSourceInfo);
ColumnStatisticsObj statsObj = null;
String colType = null;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType,
cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName,
doAllPartitionContainStats);
}
StringColumnStatsDataInspector stringColumnStatsData = stringInspectorFromStats(cso);
if (stringColumnStatsData.getNdvEstimator() == null) {
ndvEstimator = null;
break;
} else {
// check if all of the bit vectors can merge
NumDistinctValueEstimator estimator = stringColumnStatsData.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (ndvEstimator.canMerge(estimator)) {
continue;
} else {
ndvEstimator = null;
break;
}
}
}
}
if (ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory
.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for " + colName + " is " + (ndvEstimator != null));
ColumnStatisticsData columnStatisticsData = initColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
StringColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
StringColumnStatsDataInspector newData = stringInspectorFromStats(cso);
if (ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData
.setMaxColLen(Math.max(aggregateData.getMaxColLen(), newData.getMaxColLen()));
aggregateData
.setAvgColLen(Math.max(aggregateData.getAvgColLen(), newData.getAvgColLen()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
// aggregateData already has the ndv of the max of all
}
columnStatisticsData.setStringStats(aggregateData);
} else {
// TODO: bail out if missing stats are over a certain threshold
// we need extrapolation
LOG.debug("start extrapolation for " + colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
if (ndvEstimator == null) {
// if not every partition uses bitvector for ndv, we just fall back to
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
StringColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
StringColumnStatsDataInspector newData =
stringInspectorFromStats(cso);
// newData.isSetBitVectors() should be true for sure because we
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setStringStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory
.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setAvgColLen(Math.max(aggregateData.getAvgColLen(),
newData.getAvgColLen()));
aggregateData.setMaxColLen(Math.max(aggregateData.getMaxColLen(),
newData.getMaxColLen()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setStringStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(),
adjustedIndexMap, adjustedStatsMap, -1);
}
LOG.debug(
"Ndv estimatation for {} is {} # of partitions requested: {} # of partitions found: {}",
colName, columnStatisticsData.getStringStats().getNumDVs(), partNames.size(),
colStatsWithSourceInfo.size());
statsObj.setStatsData(columnStatisticsData);
return statsObj;
}
|
@Test
public void testAggregateMultiStatsWhenUnmergeableBitVectors() throws MetaException {
List<String> partitions = Arrays.asList("part1", "part2", "part3");
ColumnStatisticsData data1 = new ColStatsBuilder<>(String.class).numNulls(1).numDVs(3).avgColLen(20.0 / 3).maxColLen(13)
.fmSketch(S_1, S_2, S_3).build();
ColumnStatisticsData data2 = new ColStatsBuilder<>(String.class).numNulls(2).numDVs(3).avgColLen(14).maxColLen(18)
.hll(S_3, S_4, S_5).build();
ColumnStatisticsData data3 = new ColStatsBuilder<>(String.class).numNulls(3).numDVs(2).avgColLen(17.5).maxColLen(18)
.hll(S_6, S_7).build();
List<ColStatsObjWithSourceInfo> statsList = Arrays.asList(
createStatsWithInfo(data1, TABLE, COL, partitions.get(0)),
createStatsWithInfo(data2, TABLE, COL, partitions.get(1)),
createStatsWithInfo(data3, TABLE, COL, partitions.get(2)));
StringColumnStatsAggregator aggregator = new StringColumnStatsAggregator();
ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true);
// the aggregation does not update the bitvector, only numDVs is, it keeps the first bitvector;
// numDVs is set to the maximum among all stats when non-mergeable bitvectors are detected
ColumnStatisticsData expectedStats = new ColStatsBuilder<>(String.class).numNulls(6).numDVs(3).avgColLen(17.5).maxColLen(18)
.fmSketch(S_1, S_2, S_3).build();
Assert.assertEquals(expectedStats, computedStatsObj.getStatsData());
// both useDensityFunctionForNDVEstimation and ndvTuner are ignored by StringColumnStatsAggregator
aggregator.useDensityFunctionForNDVEstimation = true;
computedStatsObj = aggregator.aggregate(statsList, partitions, true);
Assert.assertEquals(expectedStats, computedStatsObj.getStatsData());
aggregator.useDensityFunctionForNDVEstimation = false;
double[] tunerValues = new double[] { 0, 0.5, 0.75, 1 };
for (double tunerValue : tunerValues) {
aggregator.ndvTuner = tunerValue;
computedStatsObj = aggregator.aggregate(statsList, partitions, true);
Assert.assertEquals(expectedStats, computedStatsObj.getStatsData());
}
}
|
public Transaction createSend(Address address, Coin value)
throws InsufficientMoneyException, CompletionException {
return createSend(address, value, false);
}
|
@Test
public void balances() throws Exception {
Coin nanos = COIN;
Transaction tx1 = sendMoneyToWallet(AbstractBlockChain.NewBlockType.BEST_CHAIN, nanos);
assertEquals(nanos, tx1.getValueSentToMe(wallet));
assertTrue(tx1.getWalletOutputs(wallet).size() >= 1);
// Send 0.10 to somebody else.
Transaction send1 = wallet.createSend(OTHER_ADDRESS, valueOf(0, 10));
// Reserialize.
Transaction send2 = TESTNET_PARAMS.getDefaultSerializer().makeTransaction(ByteBuffer.wrap(send1.serialize()));
assertEquals(nanos, send2.getValueSentFromMe(wallet));
assertEquals(ZERO.subtract(valueOf(0, 10)), send2.getValue(wallet));
}
|
public static TDigest merge(double compression, Iterable<TDigest> subData) {
Preconditions.checkArgument(subData.iterator().hasNext(), "Can't merge 0 digests");
List<TDigest> elements = Lists.newArrayList(subData);
int n = Math.max(1, elements.size() / 4);
TDigest r = new TDigest(compression, elements.get(0).gen);
if (elements.get(0).recordAllData) {
r.recordAllData();
}
for (int i = 0; i < elements.size(); i += n) {
if (n > 1) {
r.add(merge(compression, elements.subList(i, Math.min(i + n, elements.size()))));
} else {
r.add(elements.get(i));
}
}
return r;
}
|
@Test
public void testMerge() {
Random gen = RandomUtils.getRandom();
for (int parts : new int[]{2, 5, 10, 20, 50, 100}) {
List<Double> data = Lists.newArrayList();
TDigest dist = new TDigest(100, gen);
dist.recordAllData();
List<TDigest> many = Lists.newArrayList();
for (int i = 0; i < 100; i++) {
many.add(new TDigest(100, gen).recordAllData());
}
// we accumulate the data into multiple sub-digests
List<TDigest> subs = Lists.newArrayList();
for (int i = 0; i < parts; i++) {
subs.add(new TDigest(50, gen).recordAllData());
}
for (int i = 0; i < 100000; i++) {
double x = gen.nextDouble();
data.add(x);
dist.add(x);
subs.get(i % parts).add(x);
}
dist.compress();
Collections.sort(data);
// collect the raw data from the sub-digests
List<Double> data2 = Lists.newArrayList();
for (TDigest digest : subs) {
for (TDigest.Group group : digest.centroids()) {
Iterables.addAll(data2, group.data());
}
}
Collections.sort(data2);
// verify that the raw data all got recorded
assertEquals(data.size(), data2.size());
Iterator<Double> ix = data.iterator();
for (Double x : data2) {
assertEquals(ix.next(), x);
}
// now merge the sub-digests
TDigest dist2 = TDigest.merge(50, subs);
for (double q : new double[]{0.001, 0.01, 0.1, 0.2, 0.3, 0.5}) {
double z = quantile(q, data);
double e1 = dist.quantile(q) - z;
double e2 = dist2.quantile(q) - z;
System.out.printf("quantile\t%d\t%.6f\t%.6f\t%.6f\t%.6f\t%.6f\n", parts, q, z - q, e1, e2, Math.abs(e2) / q);
assertTrue(String.format("parts=%d, q=%.4f, e1=%.5f, e2=%.5f, rel=%.4f", parts, q, e1, e2, Math.abs(e2) / q), Math.abs(e2) / q < 0.1);
assertTrue(String.format("parts=%d, q=%.4f, e1=%.5f, e2=%.5f, rel=%.4f", parts, q, e1, e2, Math.abs(e2) / q), Math.abs(e2) < 0.015);
}
for (double x : new double[]{0.001, 0.01, 0.1, 0.2, 0.3, 0.5}) {
double z = cdf(x, data);
double e1 = dist.cdf(x) - z;
double e2 = dist2.cdf(x) - z;
System.out.printf("cdf\t%d\t%.6f\t%.6f\t%.6f\t%.6f\t%.6f\n", parts, x, z - x, e1, e2, Math.abs(e2) / x);
assertTrue(String.format("parts=%d, x=%.4f, e1=%.5f, e2=%.5f", parts, x, e1, e2), Math.abs(e2) < 0.015);
assertTrue(String.format("parts=%d, x=%.4f, e1=%.5f, e2=%.5f", parts, x, e1, e2), Math.abs(e2) / x < 0.1);
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.