focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public List<UiView> views() {
return (isValid || ui2Valid) ? viewList : ImmutableList.of();
} | @Test
public void twoViews() {
viewList = ImmutableList.of(FOO_VIEW, BAR_VIEW);
ext = new UiExtension.Builder(cl, viewList).build();
assertEquals("expected 2 views", 2, ext.views().size());
view = ext.views().get(0);
assertEquals("wrong view category", OTHER, view.category());
assertEquals("wrong view id", FOO_ID, view.id());
assertEquals("wrong view label", FOO_LABEL, view.label());
view = ext.views().get(1);
assertEquals("wrong view category", OTHER, view.category());
assertEquals("wrong view id", BAR_ID, view.id());
assertEquals("wrong view label", BAR_LABEL, view.label());
} |
public static String formatMethodWithClass(Method input) {
return String.format("%s#%s", input.getDeclaringClass().getName(), formatMethod(input));
} | @Test
public void testClassMethodFormatter() throws Exception {
assertEquals(
getClass().getName() + "#testMethodFormatter()",
ReflectHelpers.formatMethodWithClass(getClass().getMethod("testMethodFormatter")));
assertEquals(
getClass().getName() + "#oneArg(int)",
ReflectHelpers.formatMethodWithClass(getClass().getDeclaredMethod("oneArg", int.class)));
assertEquals(
getClass().getName() + "#twoArg(String, List)",
ReflectHelpers.formatMethodWithClass(
getClass().getDeclaredMethod("twoArg", String.class, List.class)));
} |
public static String clientIdBase(WorkerConfig config) {
String result = Optional.ofNullable(config.groupId())
.orElse("connect");
String userSpecifiedClientId = config.getString(CLIENT_ID_CONFIG);
if (userSpecifiedClientId != null && !userSpecifiedClientId.trim().isEmpty()) {
result += "-" + userSpecifiedClientId;
}
return result + "-";
} | @Test
public void testClientIdBase() {
String groupId = "connect-cluster";
String userSpecifiedClientId = "worker-57";
String expectedClientIdBase = groupId + "-" + userSpecifiedClientId + "-";
assertClientIdBase(groupId, userSpecifiedClientId, expectedClientIdBase);
expectedClientIdBase = groupId + "-";
assertClientIdBase(groupId, null, expectedClientIdBase);
expectedClientIdBase = "connect-";
assertClientIdBase(null, null, expectedClientIdBase);
expectedClientIdBase = "connect-" + userSpecifiedClientId + "-";
assertClientIdBase(null, userSpecifiedClientId, expectedClientIdBase);
expectedClientIdBase = "connect-";
assertClientIdBase(null, "", expectedClientIdBase);
} |
public static Map<String, Object> map(String metricName, Metric metric) {
final Map<String, Object> metricMap = Maps.newHashMap();
metricMap.put("full_name", metricName);
metricMap.put("name", metricName.substring(metricName.lastIndexOf(".") + 1));
if (metric instanceof Timer) {
metricMap.put("metric", buildTimerMap((Timer) metric));
metricMap.put("type", "timer");
} else if(metric instanceof Meter) {
metricMap.put("metric", buildMeterMap((Meter) metric));
metricMap.put("type", "meter");
} else if(metric instanceof Histogram) {
metricMap.put("metric", buildHistogramMap((Histogram) metric));
metricMap.put("type", "histogram");
} else if(metric instanceof Counter) {
metricMap.put("metric", metric);
metricMap.put("type", "counter");
} else if(metric instanceof Gauge) {
metricMap.put("metric", metric);
metricMap.put("type", "gauge");
} else {
throw new IllegalArgumentException("Unknown metric type " + metric.getClass());
}
return metricMap;
} | @Test
public void mapSupportsCounter() {
final Counter counter = new Counter();
counter.inc(23L);
final Map<String, Object> map = MetricUtils.map("metric", counter);
assertThat(map)
.containsEntry("type", "counter")
.extracting("metric")
.extracting("count")
.isEqualTo(23L);
} |
@Override
public void serialize(ModelLocalUriId value, JsonGenerator gen, SerializerProvider provider) throws IOException {
gen.writeStartObject();
gen.writeStringField("model", value.model());
gen.writeStringField("basePath", decodedPath(value.basePath()));
gen.writeStringField("fullPath", decodedPath(value.fullPath()));
gen.writeEndObject();
} | @Test
void serializeDecodedPath() throws IOException {
String path = "/example/some-id/instances/some-instance-id";
LocalUri parsed = LocalUri.parse(path);
ModelLocalUriId modelLocalUriId = new ModelLocalUriId(parsed);
Writer jsonWriter = new StringWriter();
JsonGenerator jsonGenerator = new JsonFactory().createGenerator(jsonWriter);
SerializerProvider serializerProvider = new ObjectMapper().getSerializerProvider();
new ModelLocalUriIdSerializer().serialize(modelLocalUriId, jsonGenerator, serializerProvider);
jsonGenerator.flush();
String expected = "{\"model\":\"example\",\"basePath\":\"/some-id/instances/some-instance-id\"," +
"\"fullPath\":\"/example/some-id/instances/some-instance-id\"}";
assertThat(jsonWriter.toString()).isEqualTo(expected);
} |
@Override
public ObjectNode encode(Criterion criterion, CodecContext context) {
EncodeCriterionCodecHelper encoder = new EncodeCriterionCodecHelper(criterion, context);
return encoder.encode();
} | @Test
public void matchUdpDstTest() {
Criterion criterion = Criteria.matchUdpDst(tpPort);
ObjectNode result = criterionCodec.encode(criterion, context);
assertThat(result, matchesCriterion(criterion));
} |
static String prettyPrintTaskInfo(TaskState taskState, ZoneOffset zoneOffset) {
if (taskState instanceof TaskPending) {
return "Will start at " + dateString(taskState.spec().startMs(), zoneOffset);
} else if (taskState instanceof TaskRunning) {
TaskRunning runState = (TaskRunning) taskState;
return "Started " + dateString(runState.startedMs(), zoneOffset) +
"; will stop after " + durationString(taskState.spec().durationMs());
} else if (taskState instanceof TaskStopping) {
TaskStopping stoppingState = (TaskStopping) taskState;
return "Started " + dateString(stoppingState.startedMs(), zoneOffset);
} else if (taskState instanceof TaskDone) {
TaskDone doneState = (TaskDone) taskState;
String status;
if (doneState.error() == null || doneState.error().isEmpty()) {
if (doneState.cancelled()) {
status = "CANCELLED";
} else {
status = "FINISHED";
}
} else {
status = "FAILED";
}
return String.format("%s at %s after %s", status,
dateString(doneState.doneMs(), zoneOffset),
durationString(doneState.doneMs() - doneState.startedMs()));
} else {
throw new RuntimeException("Unknown task state type " + taskState.stateType());
}
} | @Test
public void testPrettyPrintTaskInfo() {
assertEquals("Will start at 2019-01-08T07:05:59.85Z",
CoordinatorClient.prettyPrintTaskInfo(
new TaskPending(new NoOpTaskSpec(1546931159850L, 9000)),
ZoneOffset.UTC));
assertEquals("Started 2009-07-07T01:45:59.85Z; will stop after 9s",
CoordinatorClient.prettyPrintTaskInfo(
new TaskRunning(new NoOpTaskSpec(1146931159850L, 9000),
1246931159850L,
JsonNodeFactory.instance.objectNode()), ZoneOffset.UTC));
assertEquals("Started 2009-07-07T01:45:59.85Z",
CoordinatorClient.prettyPrintTaskInfo(
new TaskStopping(new NoOpTaskSpec(1146931159850L, 9000),
1246931159850L,
JsonNodeFactory.instance.objectNode()), ZoneOffset.UTC));
assertEquals("FINISHED at 2019-01-08T20:59:29.85Z after 10s",
CoordinatorClient.prettyPrintTaskInfo(
new TaskDone(new NoOpTaskSpec(0, 1000),
1546981159850L,
1546981169850L,
"",
false,
JsonNodeFactory.instance.objectNode()), ZoneOffset.UTC));
assertEquals("CANCELLED at 2019-01-08T20:59:29.85Z after 10s",
CoordinatorClient.prettyPrintTaskInfo(
new TaskDone(new NoOpTaskSpec(0, 1000),
1546981159850L,
1546981169850L,
"",
true,
JsonNodeFactory.instance.objectNode()), ZoneOffset.UTC));
assertEquals("FAILED at 2019-01-08T20:59:29.85Z after 10s",
CoordinatorClient.prettyPrintTaskInfo(
new TaskDone(new NoOpTaskSpec(0, 1000),
1546981159850L,
1546981169850L,
"foobar",
true,
JsonNodeFactory.instance.objectNode()), ZoneOffset.UTC));
} |
public <T extends ManifestTemplate> ManifestAndDigest<T> pullManifest(
String imageQualifier, Class<T> manifestTemplateClass) throws IOException, RegistryException {
ManifestPuller<T> manifestPuller =
new ManifestPuller<>(
registryEndpointRequestProperties, imageQualifier, manifestTemplateClass);
return callRegistryEndpoint(manifestPuller);
} | @Test
public void testPullManifest()
throws IOException, InterruptedException, GeneralSecurityException, URISyntaxException,
RegistryException {
String manifestResponse =
"HTTP/1.1 200 OK\nContent-Length: 307\n\n{\n"
+ " \"schemaVersion\": 2,\n"
+ " \"mediaType\": \"application/vnd.docker.distribution.manifest.v2+json\",\n"
+ " \"config\": {\n"
+ " \"mediaType\": \"application/vnd.docker.container.image.v1+json\",\n"
+ " \"size\": 7023,\n"
+ " \"digest\": \"sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7\"\n"
+ " }\n"
+ "}";
registry = new TestWebServer(false, Arrays.asList(manifestResponse), 1);
RegistryClient registryClient = createRegistryClient(null);
ManifestAndDigest<?> manifestAndDigest = registryClient.pullManifest("image-tag");
Assert.assertEquals(
"sha256:6b61466eabab6e5ffb68ae2bd9b85c789225540c2ac54ea1f71eb327588e8946",
manifestAndDigest.getDigest().toString());
Assert.assertTrue(manifestAndDigest.getManifest() instanceof V22ManifestTemplate);
V22ManifestTemplate manifest = (V22ManifestTemplate) manifestAndDigest.getManifest();
Assert.assertEquals(2, manifest.getSchemaVersion());
Assert.assertEquals(
"application/vnd.docker.distribution.manifest.v2+json", manifest.getManifestMediaType());
Assert.assertEquals(
"sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7",
manifest.getContainerConfiguration().getDigest().toString());
Assert.assertEquals(7023, manifest.getContainerConfiguration().getSize());
MatcherAssert.assertThat(
registry.getInputRead(),
CoreMatchers.containsString("GET /v2/foo/bar/manifests/image-tag "));
} |
@Override
public KeyValueIterator<Windowed<K>, V> fetch(final K key) {
Objects.requireNonNull(key, "key can't be null");
final List<ReadOnlySessionStore<K, V>> stores = storeProvider.stores(storeName, queryableStoreType);
for (final ReadOnlySessionStore<K, V> store : stores) {
try {
final KeyValueIterator<Windowed<K>, V> result = store.fetch(key);
if (!result.hasNext()) {
result.close();
} else {
return result;
}
} catch (final InvalidStateStoreException ise) {
throw new InvalidStateStoreException("State store [" + storeName + "] is not available anymore" +
" and may have been migrated to another instance; " +
"please re-discover its location from the state metadata. " +
"Original error message: " + ise);
}
}
return KeyValueIterators.emptyIterator();
} | @Test
public void shouldThrowNPEIfKeyIsNull() {
assertThrows(NullPointerException.class, () -> underlyingSessionStore.fetch(null));
} |
@Override
public CompletableFuture<Long> getCounterAsync(String key) {
ensureStateEnabled();
return defaultStateStore.getCounterAsync(key);
} | @Test
public void testGetCounterStateEnabled() throws Exception {
context.defaultStateStore = mock(BKStateStoreImpl.class);
context.getCounterAsync("test-key");
verify(context.defaultStateStore, times(1)).getCounterAsync(eq("test-key"));
} |
static BlockStmt getDiscretizeBinVariableDeclaration(final String variableName,
final DiscretizeBin discretizeBin) {
final MethodDeclaration methodDeclaration =
DISCRETIZE_BIN_TEMPLATE.getMethodsByName(GETKIEPMMLDISCRETIZE_BIN).get(0).clone();
final BlockStmt discretizeBinBody =
methodDeclaration.getBody().orElseThrow(() -> new KiePMMLException(String.format(MISSING_BODY_TEMPLATE, methodDeclaration)));
final VariableDeclarator variableDeclarator =
getVariableDeclarator(discretizeBinBody, DISCRETIZE_BIN).orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_IN_BODY, DISCRETIZE_BIN, discretizeBinBody)));
variableDeclarator.setName(variableName);
final BlockStmt toReturn = new BlockStmt();
String nestedVariableName = String.format("%s_Interval", variableName);
BlockStmt toAdd = getIntervalVariableDeclaration(nestedVariableName, discretizeBin.getInterval());
toAdd.getStatements().forEach(toReturn::addStatement);
final ObjectCreationExpr objectCreationExpr = variableDeclarator.getInitializer()
.orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_INITIALIZER_TEMPLATE,
DISCRETIZE_BIN, discretizeBinBody)))
.asObjectCreationExpr();
final Expression nameExpr = new StringLiteralExpr(variableName);
final Expression binValueExpr = getExpressionForObject(discretizeBin.getBinValue());
final NameExpr intervalExpr = new NameExpr(nestedVariableName);
objectCreationExpr.getArguments().set(0, nameExpr);
objectCreationExpr.getArguments().set(2, binValueExpr);
objectCreationExpr.getArguments().set(3, intervalExpr);
discretizeBinBody.getStatements().forEach(toReturn::addStatement);
return toReturn;
} | @Test
void getDiscretizeBinVariableDeclaration() throws IOException {
String variableName = "variableName";
double leftMargin = 45.32;
Interval interval = new Interval();
interval.setLeftMargin(leftMargin);
interval.setRightMargin(null);
interval.setClosure(Interval.Closure.CLOSED_OPEN);
String binValue = "binValue";
DiscretizeBin discretizeBin = new DiscretizeBin();
discretizeBin.setBinValue(binValue);
discretizeBin.setInterval(interval);
BlockStmt retrieved = KiePMMLDiscretizeBinFactory.getDiscretizeBinVariableDeclaration(variableName,
discretizeBin);
String closureString =
CLOSURE.class.getName() + "." + CLOSURE.byName(interval.getClosure().value()).name();
String text = getFileContent(TEST_01_SOURCE);
Statement expected = JavaParserUtils.parseBlock(String.format(text, variableName, leftMargin, closureString,
binValue));
assertThat(JavaParserUtils.equalsNode(expected, retrieved)).isTrue();
List<Class<?>> imports = Arrays.asList(Collections.class, KiePMMLDiscretizeBin.class, KiePMMLInterval.class);
commonValidateCompilationWithImports(retrieved, imports);
} |
public static String random(int size) {
return random(size, new Random());
} | @Test
public void testRandom() {
StringUtils.random(40);
assertEquals(StringUtils.random(4, 7), "#,q7");
} |
@Override
public ObjectNode encode(Driver driver, CodecContext context) {
checkNotNull(driver, "Driver cannot be null");
ObjectNode result = context.mapper().createObjectNode()
.put(NAME, driver.name())
.put(MANUFACTURER, driver.manufacturer())
.put(HW_VERSION, driver.hwVersion())
.put(SW_VERSION, driver.swVersion());
if (driver.parent() != null) {
result.put(PARENT, driver.parent().name());
}
ArrayNode behaviours = context.mapper().createArrayNode();
driver.behaviours().forEach(behaviour -> {
ObjectNode entry = context.mapper().createObjectNode()
.put(BEHAVIORS_NAME, behaviour.getCanonicalName())
.put(BEHAVIORS_IMPLEMENTATION_NAME,
driver.implementation(behaviour).getCanonicalName());
behaviours.add(entry);
});
result.set(BEHAVIOURS, behaviours);
ArrayNode properties = context.mapper().createArrayNode();
driver.properties().forEach((name, value) -> {
ObjectNode entry = context.mapper().createObjectNode()
.put("name", name)
.put("value", value);
properties.add(entry);
});
result.set(PROPERTIES, properties);
return result;
} | @Test
public void codecTest() {
Map<Class<? extends Behaviour>, Class<? extends Behaviour>> behaviours =
ImmutableMap.of(TestBehaviour.class,
TestBehaviourImpl.class,
TestBehaviourTwo.class,
TestBehaviourTwoImpl.class);
Map<String, String> properties =
ImmutableMap.of("key1", "value1", "key2", "value2");
DefaultDriver parent = new DefaultDriver("parent", new ArrayList<>(), "Acme",
"HW1.2.3", "SW1.2.3",
behaviours,
properties);
DefaultDriver child = new DefaultDriver("child", ImmutableList.of(parent), "Acme",
"HW1.2.3.1", "SW1.2.3.1",
behaviours,
properties);
MockCodecContext context = new MockCodecContext();
ObjectNode driverJson = context.codec(Driver.class).encode(child, context);
assertThat(driverJson, matchesDriver(child));
} |
@VisibleForTesting
public ProcessContinuation run(
RestrictionTracker<OffsetRange, Long> tracker,
OutputReceiver<PartitionRecord> receiver,
ManualWatermarkEstimator<Instant> watermarkEstimator,
InitialPipelineState initialPipelineState)
throws Exception {
LOG.debug("DNP: Watermark: " + watermarkEstimator.getState());
LOG.debug("DNP: CurrentTracker: " + tracker.currentRestriction().getFrom());
if (tracker.currentRestriction().getFrom() == 0L) {
if (!tracker.tryClaim(0L)) {
LOG.error(
"Could not claim initial DetectNewPartition restriction. No partitions are outputted.");
return ProcessContinuation.stop();
}
watermarkEstimator.setWatermark(initialPipelineState.getStartTime());
if (initialPipelineState.isResume()) {
resumeFromPreviousPipelineAction.run(receiver);
} else {
generateInitialPartitionsAction.run(receiver, initialPipelineState.getStartTime());
}
return ProcessContinuation.resume();
}
// Create a new partition reconciler every run to reset the state each time.
partitionReconciler = new PartitionReconciler(metadataTableDao, metrics);
orphanedMetadataCleaner = new OrphanedMetadataCleaner();
// Calculating the new value of watermark is a resource intensive process. We have to do a full
// scan of the metadata table and then ensure we're not missing partitions and then calculate
// the low watermark. This is usually a fairly fast process even with thousands of partitions.
// However, sometimes this may take so long that the runner checkpoints before the watermark is
// calculated. Because the checkpoint takes place before tryClaim, this forces the DoFn to
// restart, wasting the resources spent calculating the watermark. On restart, we will try to
// calculate the watermark again. The problem causing the slow watermark calculation can persist
// leading to a crash loop. In order to ensure we persist the calculated watermark, we calculate
// the watermark after successful tryClaim. Then we write to the metadata table the new
// watermark. On the start of each run we read the watermark and update the DoFn's watermark.
DetectNewPartitionsState detectNewPartitionsState =
metadataTableDao.readDetectNewPartitionsState();
if (detectNewPartitionsState != null) {
watermarkEstimator.setWatermark(detectNewPartitionsState.getWatermark());
}
// Terminate if endTime <= watermark that means all partitions have read up to or beyond
// watermark. We no longer need to manage splits and merges, we can terminate.
if (endTime != null && !watermarkEstimator.currentWatermark().isBefore(endTime)) {
tracker.tryClaim(tracker.currentRestriction().getTo());
return ProcessContinuation.stop();
}
if (!tracker.tryClaim(tracker.currentRestriction().getFrom())) {
LOG.warn("DNP: Checkpointing, stopping this run: " + tracker.currentRestriction());
return ProcessContinuation.stop();
}
// Read StreamPartitions to calculate watermark.
List<StreamPartitionWithWatermark> streamPartitionsWithWatermark = null;
if (shouldUpdateWatermark(tracker.currentRestriction().getFrom(), detectNewPartitionsState)) {
streamPartitionsWithWatermark = metadataTableDao.readStreamPartitionsWithWatermark();
}
// Process NewPartitions and track the ones successfully outputted.
List<NewPartition> newPartitions = metadataTableDao.readNewPartitions();
List<ByteStringRange> outputtedNewPartitions = new ArrayList<>();
for (NewPartition newPartition : newPartitions) {
if (processNewPartitionsAction.processNewPartition(newPartition, receiver)) {
outputtedNewPartitions.add(newPartition.getPartition());
} else if (streamPartitionsWithWatermark != null) {
// streamPartitionsWithWatermark is not null on runs that we update watermark. We only run
// reconciliation when we update watermark. Only add incompleteNewPartitions if
// reconciliation is being run
partitionReconciler.addIncompleteNewPartitions(newPartition);
orphanedMetadataCleaner.addIncompleteNewPartitions(newPartition);
}
}
// Process the watermark using read StreamPartitions and NewPartitions.
if (streamPartitionsWithWatermark != null) {
Optional<Instant> maybeWatermark =
getNewWatermark(streamPartitionsWithWatermark, newPartitions);
maybeWatermark.ifPresent(metadataTableDao::updateDetectNewPartitionWatermark);
// Only start reconciling after the pipeline has been running for a while.
if (tracker.currentRestriction().getFrom() > 50) {
// Using NewPartitions and StreamPartitions, evaluate partitions that are possibly not being
// streamed. This isn't perfect because there may be partitions moving between
// StreamPartitions and NewPartitions while scanning the metadata table. Also, this does not
// include NewPartitions marked as deleted from a previous DNP run not yet processed by
// RCSP.
List<ByteStringRange> existingPartitions =
streamPartitionsWithWatermark.stream()
.map(StreamPartitionWithWatermark::getPartition)
.collect(Collectors.toList());
existingPartitions.addAll(outputtedNewPartitions);
List<ByteStringRange> missingStreamPartitions =
getMissingPartitionsFromEntireKeySpace(existingPartitions);
orphanedMetadataCleaner.addMissingPartitions(missingStreamPartitions);
partitionReconciler.addMissingPartitions(missingStreamPartitions);
processReconcilerPartitions(
receiver, watermarkEstimator, initialPipelineState.getStartTime());
cleanUpOrphanedMetadata();
}
}
return ProcessContinuation.resume().withResumeDelay(Duration.millis(100));
} | @Test
public void testUpdateWatermarkOnEvenCountAfter10Seconds() throws Exception {
// We update watermark every 2 iterations only if it's been more than 10s since the last update.
OffsetRange offsetRange = new OffsetRange(2, Long.MAX_VALUE);
when(tracker.currentRestriction()).thenReturn(offsetRange);
when(tracker.tryClaim(offsetRange.getFrom())).thenReturn(true);
assertEquals(
DoFn.ProcessContinuation.resume().withResumeDelay(Duration.millis(100)),
action.run(
tracker, receiver, watermarkEstimator, new InitialPipelineState(startTime, false)));
assertEquals(startTime, watermarkEstimator.currentWatermark());
assertNull(metadataTableDao.readDetectNewPartitionsState());
// Manually set the watermark of DNP to start time with a timestamp of 10s prior.
RowMutation rowMutation =
RowMutation.create(
MetadataTableAdminDao.DEFAULT_METADATA_TABLE_NAME,
metadataTableDao
.getChangeStreamNamePrefix()
.concat(MetadataTableAdminDao.DETECT_NEW_PARTITION_SUFFIX))
.setCell(
MetadataTableAdminDao.CF_WATERMARK,
MetadataTableAdminDao.QUALIFIER_DEFAULT,
Instant.now().minus(Duration.standardSeconds(10)).getMillis() * 1000L,
startTime.getMillis());
dataClient.mutateRow(rowMutation);
// Create a partition covering the entire keyspace with watermark after endTime.
ByteStringRange partition1 = ByteStringRange.create("", "");
Instant watermark1 = endTime.plus(Duration.millis(100));
PartitionRecord partitionRecord1 =
new PartitionRecord(
partition1,
watermark1,
UniqueIdGenerator.getNextId(),
watermark1,
Collections.emptyList(),
null);
metadataTableDao.lockAndRecordPartition(partitionRecord1);
assertEquals(
DoFn.ProcessContinuation.resume().withResumeDelay(Duration.millis(100)),
action.run(
tracker, receiver, watermarkEstimator, new InitialPipelineState(startTime, false)));
assertEquals(startTime, watermarkEstimator.currentWatermark());
assertEquals(watermark1, metadataTableDao.readDetectNewPartitionsState().getWatermark());
// On the 2nd run, watermark estimator is updated which is beyond endTime and terminates.
assertEquals(
DoFn.ProcessContinuation.stop(),
action.run(
tracker, receiver, watermarkEstimator, new InitialPipelineState(startTime, false)));
verify(tracker, times(1)).tryClaim(offsetRange.getTo());
assertEquals(watermark1, watermarkEstimator.currentWatermark());
} |
public List<String> getLiveBrokers() {
List<String> brokerUrls = new ArrayList<>();
try {
byte[] brokerResourceNodeData = _zkClient.readData(BROKER_EXTERNAL_VIEW_PATH, true);
brokerResourceNodeData = unpackZnodeIfNecessary(brokerResourceNodeData);
JsonNode jsonObject = OBJECT_READER.readTree(getInputStream(brokerResourceNodeData));
JsonNode brokerResourceNode = jsonObject.get("mapFields");
Iterator<Entry<String, JsonNode>> resourceEntries = brokerResourceNode.fields();
while (resourceEntries.hasNext()) {
JsonNode resource = resourceEntries.next().getValue();
Iterator<Entry<String, JsonNode>> brokerEntries = resource.fields();
while (brokerEntries.hasNext()) {
Entry<String, JsonNode> brokerEntry = brokerEntries.next();
String brokerName = brokerEntry.getKey();
if (brokerName.startsWith("Broker_") && "ONLINE".equals(brokerEntry.getValue().asText())) {
brokerUrls.add(getHostPort(brokerName));
}
}
}
} catch (Exception e) {
LOGGER.warn("Exception while reading External view from zookeeper", e);
// ignore
}
return brokerUrls;
} | @Test
public void testGetBrokerListByInstanceConfigTls() {
configureData(_instanceConfigTls, true);
final List<String> brokers = _externalViewReaderUnderTest.getLiveBrokers();
assertEquals(brokers, Arrays.asList("first.pug-pinot-broker-headless:8090"));
} |
public static KeyId ofBytes(byte[] keyIdBytes) {
Objects.requireNonNull(keyIdBytes);
return new KeyId(Arrays.copyOf(keyIdBytes, keyIdBytes.length));
} | @Test
void malformed_utf8_key_id_is_rejected_on_construction() {
byte[] malformedIdBytes = new byte[]{ (byte)0xC0 }; // First part of a 2-byte continuation without trailing byte
assertThrows(IllegalArgumentException.class, () -> KeyId.ofBytes(malformedIdBytes));
} |
@Override
public CompletableFuture<V> whenComplete(BiConsumer<? super V, ? super Throwable> action) {
return future.handleAsync(new WhenCompleteAdapter(action), defaultExecutor());
} | @Test
public void whenComplete_whenExceptional() {
CompletableFuture<String> nextStage = delegatingFuture.whenComplete((v, t) -> assertInstanceOf(IllegalArgumentException.class, t));
invocationFuture.completeExceptionally(new IllegalArgumentException());
assertTrueEventually(() -> assertTrue(nextStage.isDone()));
assertThatThrownBy(delegatingFuture::join)
.isInstanceOf(CompletionException.class)
.cause().has(rootCause(IllegalArgumentException.class));
} |
public static <E> ArrayList<E> newArrayListWithCapacity(
int initialArraySize) {
checkNonnegative(initialArraySize, "initialArraySize");
return new ArrayList<>(initialArraySize);
} | @Test
public void testArrayListWithSize() {
List<String> list = Lists.newArrayListWithCapacity(3);
list.add("record1");
list.add("record2");
list.add("record3");
Assert.assertEquals(3, list.size());
Assert.assertEquals("record1", list.get(0));
Assert.assertEquals("record2", list.get(1));
Assert.assertEquals("record3", list.get(2));
list = Lists.newArrayListWithCapacity(3);
list.add("record1");
list.add("record2");
list.add("record3");
Assert.assertEquals(3, list.size());
Assert.assertEquals("record1", list.get(0));
Assert.assertEquals("record2", list.get(1));
Assert.assertEquals("record3", list.get(2));
} |
public void verifyAndValidate(final String jwt) {
try {
Jws<Claims> claimsJws = Jwts.parser()
.verifyWith(tokenConfigurationParameter.getPublicKey())
.build()
.parseSignedClaims(jwt);
// Log the claims for debugging purposes
Claims claims = claimsJws.getPayload();
log.info("Token claims: {}", claims);
// Additional checks (e.g., expiration, issuer, etc.)
if (claims.getExpiration().before(new Date())) {
throw new JwtException("Token has expired");
}
log.info("Token is valid");
} catch (ExpiredJwtException e) {
log.error("Token has expired", e);
throw new ResponseStatusException(HttpStatus.UNAUTHORIZED, "Token has expired", e);
} catch (JwtException e) {
log.error("Invalid JWT token", e);
throw new ResponseStatusException(HttpStatus.UNAUTHORIZED, "Invalid JWT token", e);
} catch (Exception e) {
log.error("Error validating token", e);
throw new ResponseStatusException(HttpStatus.INTERNAL_SERVER_ERROR, "Error validating token", e);
}
} | @Test
void givenMalformedToken_whenVerifyAndValidate_thenThrowJwtException() {
// Given
String malformedToken = "malformed.token.string";
// When & Then
assertThatThrownBy(() -> tokenService.verifyAndValidate(malformedToken))
.isInstanceOf(ResponseStatusException.class)
.hasMessageContaining("Invalid JWT token")
.hasCauseInstanceOf(JwtException.class);
} |
@Override
public void calculate(TradePriceCalculateReqBO param, TradePriceCalculateRespBO result) {
// 默认使用积分为 0
result.setUsePoint(0);
// 1.1 校验是否使用积分
if (!BooleanUtil.isTrue(param.getPointStatus())) {
result.setUsePoint(0);
return;
}
// 1.2 校验积分抵扣是否开启
MemberConfigRespDTO config = memberConfigApi.getConfig();
if (!isDeductPointEnable(config)) {
return;
}
// 1.3 校验用户积分余额
MemberUserRespDTO user = memberUserApi.getUser(param.getUserId());
if (user.getPoint() == null || user.getPoint() <= 0) {
return;
}
// 2.1 计算积分优惠金额
int pointPrice = calculatePointPrice(config, user.getPoint(), result);
// 2.2 计算分摊的积分、抵扣金额
List<TradePriceCalculateRespBO.OrderItem> orderItems = filterList(result.getItems(), TradePriceCalculateRespBO.OrderItem::getSelected);
List<Integer> dividePointPrices = TradePriceCalculatorHelper.dividePrice(orderItems, pointPrice);
List<Integer> divideUsePoints = TradePriceCalculatorHelper.dividePrice(orderItems, result.getUsePoint());
// 3.1 记录优惠明细
TradePriceCalculatorHelper.addPromotion(result, orderItems,
param.getUserId(), "积分抵扣", PromotionTypeEnum.POINT.getType(),
StrUtil.format("积分抵扣:省 {} 元", TradePriceCalculatorHelper.formatPrice(pointPrice)),
dividePointPrices);
// 3.2 更新 SKU 优惠金额
for (int i = 0; i < orderItems.size(); i++) {
TradePriceCalculateRespBO.OrderItem orderItem = orderItems.get(i);
orderItem.setPointPrice(dividePointPrices.get(i));
orderItem.setUsePoint(divideUsePoints.get(i));
TradePriceCalculatorHelper.recountPayPrice(orderItem);
}
TradePriceCalculatorHelper.recountAllPrice(result);
} | @Test
public void testCalculate_success() {
// 准备参数
TradePriceCalculateReqBO param = new TradePriceCalculateReqBO()
.setUserId(233L).setPointStatus(true) // 是否使用积分
.setItems(asList(
new TradePriceCalculateReqBO.Item().setSkuId(10L).setCount(2).setSelected(true), // 使用积分
new TradePriceCalculateReqBO.Item().setSkuId(20L).setCount(3).setSelected(true), // 使用积分
new TradePriceCalculateReqBO.Item().setSkuId(30L).setCount(5).setSelected(false) // 未选中,不使用积分
));
TradePriceCalculateRespBO result = new TradePriceCalculateRespBO()
.setType(TradeOrderTypeEnum.NORMAL.getType())
.setPrice(new TradePriceCalculateRespBO.Price())
.setPromotions(new ArrayList<>())
.setItems(asList(
new TradePriceCalculateRespBO.OrderItem().setSkuId(10L).setCount(2).setSelected(true)
.setPrice(100).setSpuId(1L),
new TradePriceCalculateRespBO.OrderItem().setSkuId(20L).setCount(3).setSelected(true)
.setPrice(50).setSpuId(2L),
new TradePriceCalculateRespBO.OrderItem().setSkuId(30L).setCount(5).setSelected(false)
.setPrice(30).setSpuId(3L)
));
// 保证价格被初始化上
TradePriceCalculatorHelper.recountPayPrice(result.getItems());
TradePriceCalculatorHelper.recountAllPrice(result);
// mock 方法(积分配置 信息)
MemberConfigRespDTO memberConfig = randomPojo(MemberConfigRespDTO.class,
o -> o.setPointTradeDeductEnable(true) // 启用积分折扣
.setPointTradeDeductUnitPrice(1) // 1 积分抵扣多少金额(单位分)
.setPointTradeDeductMaxPrice(100)); // 积分抵扣最大值
when(memberConfigApi.getConfig()).thenReturn(memberConfig);
// mock 方法(会员 信息)
MemberUserRespDTO user = randomPojo(MemberUserRespDTO.class, o -> o.setId(param.getUserId()).setPoint(100));
when(memberUserApi.getUser(user.getId())).thenReturn(user);
// 调用
tradePointUsePriceCalculator.calculate(param, result);
// 断言:使用了多少积分
assertEquals(result.getUsePoint(), 100);
// 断言:Price 部分
TradePriceCalculateRespBO.Price price = result.getPrice();
assertEquals(price.getTotalPrice(), 350);
assertEquals(price.getPayPrice(), 250);
assertEquals(price.getPointPrice(), 100);
// 断言:SKU 1
TradePriceCalculateRespBO.OrderItem orderItem01 = result.getItems().get(0);
assertEquals(orderItem01.getSkuId(), 10L);
assertEquals(orderItem01.getCount(), 2);
assertEquals(orderItem01.getPrice(), 100);
assertEquals(orderItem01.getPointPrice(), 57);
assertEquals(orderItem01.getPayPrice(), 143);
// 断言:SKU 2
TradePriceCalculateRespBO.OrderItem orderItem02 = result.getItems().get(1);
assertEquals(orderItem02.getSkuId(), 20L);
assertEquals(orderItem02.getCount(), 3);
assertEquals(orderItem02.getPrice(), 50);
assertEquals(orderItem02.getPointPrice(), 43);
assertEquals(orderItem02.getPayPrice(), 107);
// 断言:SKU 3
TradePriceCalculateRespBO.OrderItem orderItem03 = result.getItems().get(2);
assertEquals(orderItem03.getSkuId(), 30L);
assertEquals(orderItem03.getCount(), 5);
assertEquals(orderItem03.getPrice(), 30);
assertEquals(orderItem03.getPointPrice(), 0);
assertEquals(orderItem03.getPayPrice(), 150);
// 断言:Promotion 部分
assertEquals(result.getPromotions().size(), 1);
TradePriceCalculateRespBO.Promotion promotion01 = result.getPromotions().get(0);
assertEquals(promotion01.getId(), user.getId());
assertEquals(promotion01.getName(), "积分抵扣");
assertEquals(promotion01.getType(), PromotionTypeEnum.POINT.getType());
assertEquals(promotion01.getTotalPrice(), 350);
assertEquals(promotion01.getDiscountPrice(), 100);
assertTrue(promotion01.getMatch());
assertEquals(promotion01.getDescription(), "积分抵扣:省 1.00 元");
assertEquals(promotion01.getItems().size(), 2);
TradePriceCalculateRespBO.PromotionItem promotionItem011 = promotion01.getItems().get(0);
assertEquals(promotionItem011.getSkuId(), 10L);
assertEquals(promotionItem011.getTotalPrice(), 200);
assertEquals(promotionItem011.getDiscountPrice(), 57);
TradePriceCalculateRespBO.PromotionItem promotionItem012 = promotion01.getItems().get(1);
assertEquals(promotionItem012.getSkuId(), 20L);
assertEquals(promotionItem012.getTotalPrice(), 150);
assertEquals(promotionItem012.getDiscountPrice(), 43);
} |
@Nullable
@Override
public Message decode(@Nonnull RawMessage rawMessage) {
final byte[] payload = rawMessage.getPayload();
final JsonNode event;
try {
event = objectMapper.readTree(payload);
if (event == null || event.isMissingNode()) {
throw new IOException("null result");
}
} catch (IOException e) {
LOG.error("Couldn't decode raw message {}", rawMessage);
return null;
}
return parseEvent(event);
} | @Test
public void decodeMessagesHandleGenericBeatMessages() throws Exception {
final Message message = codec.decode(messageFromJson("generic.json"));
assertThat(message).isNotNull();
assertThat(message.getSource()).isEqualTo("unknown");
assertThat(message.getTimestamp()).isEqualTo(new DateTime(2016, 4, 1, 0, 0, DateTimeZone.UTC));
assertThat(message.getField("beats_type")).isEqualTo("beat");
assertThat(message.getField("beat_foo")).isEqualTo("bar");
} |
@Override
public void execute(String commandName, BufferedReader reader, BufferedWriter writer)
throws Py4JException, IOException {
char subCommand = safeReadLine(reader).charAt(0);
String returnCommand = null;
if (subCommand == ARRAY_GET_SUB_COMMAND_NAME) {
returnCommand = getArray(reader);
} else if (subCommand == ARRAY_SET_SUB_COMMAND_NAME) {
returnCommand = setArray(reader);
} else if (subCommand == ARRAY_SLICE_SUB_COMMAND_NAME) {
returnCommand = sliceArray(reader);
} else if (subCommand == ARRAY_LEN_SUB_COMMAND_NAME) {
returnCommand = lenArray(reader);
} else if (subCommand == ARRAY_CREATE_SUB_COMMAND_NAME) {
returnCommand = createArray(reader);
} else {
returnCommand = Protocol.getOutputErrorCommand("Unknown Array SubCommand Name: " + subCommand);
}
logger.finest("Returning command: " + returnCommand);
writer.write(returnCommand);
writer.flush();
} | @Test
public void testSet() {
String inputCommand = ArrayCommand.ARRAY_SET_SUB_COMMAND_NAME + "\n" + target2 + "\ni1\ni555\ne\n";
try {
command.execute("a", new BufferedReader(new StringReader(inputCommand)), writer);
assertEquals("!yv\n", sWriter.toString());
assertEquals(Array.getInt(array2, 1), 555);
} catch (Exception e) {
e.printStackTrace();
fail();
}
} |
@Override
public TenantPackageDO validTenantPackage(Long id) {
TenantPackageDO tenantPackage = tenantPackageMapper.selectById(id);
if (tenantPackage == null) {
throw exception(TENANT_PACKAGE_NOT_EXISTS);
}
if (tenantPackage.getStatus().equals(CommonStatusEnum.DISABLE.getStatus())) {
throw exception(TENANT_PACKAGE_DISABLE, tenantPackage.getName());
}
return tenantPackage;
} | @Test
public void testValidTenantPackage_disable() {
// mock 数据
TenantPackageDO dbTenantPackage = randomPojo(TenantPackageDO.class,
o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()));
tenantPackageMapper.insert(dbTenantPackage);// @Sql: 先插入出一条存在的数据
// 调用, 并断言异常
assertServiceException(() -> tenantPackageService.validTenantPackage(dbTenantPackage.getId()),
TENANT_PACKAGE_DISABLE, dbTenantPackage.getName());
} |
String getProtocol(URL url) {
String protocol = url.getSide();
protocol = protocol == null ? url.getProtocol() : protocol;
return protocol;
} | @Test
void testGetProtocol() {
URL url = URL.valueOf("dubbo://" + NetUtils.getLocalAddress().getHostName()
+ ":4444/org.apache.dubbo.TestService?version=1.0.0&application=vic&side=provider");
String protocol = abstractMetadataReport.getProtocol(url);
assertEquals("provider", protocol);
URL url2 = URL.valueOf("consumer://" + NetUtils.getLocalAddress().getHostName()
+ ":4444/org.apache.dubbo.TestService?version=1.0.0&application=vic");
String protocol2 = abstractMetadataReport.getProtocol(url2);
assertEquals("consumer", protocol2);
} |
@Override
public void persist(final String key, final String value) {
try {
if (isExisted(key)) {
update(key, value);
return;
}
String tempPrefix = "";
String parent = SEPARATOR;
String[] paths = Arrays.stream(key.split(SEPARATOR)).filter(each -> !Strings.isNullOrEmpty(each)).toArray(String[]::new);
// Create key level directory recursively.
for (int i = 0; i < paths.length - 1; i++) {
String tempKey = tempPrefix + SEPARATOR + paths[i];
if (!isExisted(tempKey)) {
insert(tempKey, "", parent);
}
tempPrefix = tempKey;
parent = tempKey;
}
insert(key, value, parent);
} catch (final SQLException ex) {
log.error("Persist {} data to key: {} failed", getType(), key, ex);
}
} | @Test
void assertPersistWithUpdateForSimpleKeys() throws SQLException {
final String key = "key";
final String value = "value";
when(mockJdbcConnection.prepareStatement(repositorySQL.getSelectByKeySQL())).thenReturn(mockPreparedStatement);
when(mockJdbcConnection.prepareStatement(repositorySQL.getUpdateSQL())).thenReturn(mockPreparedStatementForPersist);
when(mockPreparedStatement.executeQuery()).thenReturn(mockResultSet);
when(mockResultSet.next()).thenReturn(true);
repository.persist(key, value);
verify(mockPreparedStatement).setString(1, key);
verify(mockPreparedStatementForPersist).setString(eq(1), anyString());
verify(mockPreparedStatementForPersist).setString(1, value);
verify(mockPreparedStatementForPersist).setString(2, key);
verify(mockPreparedStatementForPersist).executeUpdate();
} |
@SuppressWarnings("unchecked")
public static synchronized <T extends Cache> T createCache(String name) {
T cache = (T) caches.get(name);
if (cache != null) {
return cache;
}
cache = (T) cacheFactoryStrategy.createCache(name);
log.info("Created cache [" + cacheFactoryStrategy.getClass().getName() + "] for " + name);
return wrapCache(cache, name);
} | @Test
public void testCacheCreation() throws Exception
{
// Setup test fixture.
// Execute system under test.
final Cache result = CacheFactory.createCache("unittest-cache-creation");
// Verify results.
assertNotNull(result);
} |
@Override
public void ignorableWhitespace(char[] ch, int start, int length) throws SAXException {
filter(ch, start, length, ignorableWhitespaceOutput);
} | @Test
public void testNormalWhitespace() throws SAXException {
safe.ignorableWhitespace("abc".toCharArray(), 0, 3);
assertEquals("abc", output.toString());
} |
public static void refreshSuperUserGroupsConfiguration() {
//load server side configuration;
refreshSuperUserGroupsConfiguration(new Configuration());
} | @Test
public void testIPRange() {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME),
"*");
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
PROXY_IP_RANGE);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
// First try proxying a group that's allowed
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
// From good IP
assertAuthorized(proxyUserUgi, "10.222.0.0");
// From bad IP
assertNotAuthorized(proxyUserUgi, "10.221.0.0");
} |
public Container getContainer() {
return container;
} | @Test
public void getContainer() {
assertEquals(container, context.getContainer());
} |
@PutMapping
@TpsControl(pointName = "NamingServiceUpdate", name = "HttpNamingServiceUpdate")
@Secured(action = ActionTypes.WRITE)
public String update(HttpServletRequest request) throws Exception {
String namespaceId = WebUtils.optional(request, CommonParams.NAMESPACE_ID, Constants.DEFAULT_NAMESPACE_ID);
String serviceName = WebUtils.required(request, CommonParams.SERVICE_NAME);
Map<String, String> metadata = UtilsAndCommons.parseMetadata(
WebUtils.optional(request, "metadata", StringUtils.EMPTY));
ServiceMetadata serviceMetadata = new ServiceMetadata();
serviceMetadata.setProtectThreshold(NumberUtils.toFloat(WebUtils.required(request, "protectThreshold")));
serviceMetadata.setExtendData(metadata);
serviceMetadata.setSelector(parseSelector(WebUtils.optional(request, "selector", StringUtils.EMPTY)));
com.alibaba.nacos.naming.core.v2.pojo.Service service = com.alibaba.nacos.naming.core.v2.pojo.Service.newService(
namespaceId, NamingUtils.getGroupName(serviceName), NamingUtils.getServiceName(serviceName));
getServiceOperator().update(service, serviceMetadata);
NotifyCenter.publishEvent(new UpdateServiceTraceEvent(System.currentTimeMillis(), namespaceId,
NamingUtils.getGroupName(serviceName), NamingUtils.getServiceName(serviceName), metadata));
return "ok";
} | @Test
void testUpdate() throws Exception {
MockHttpServletRequest servletRequest = new MockHttpServletRequest();
servletRequest.addParameter(CommonParams.SERVICE_NAME, TEST_SERVICE_NAME);
servletRequest.addParameter("protectThreshold", "0.01");
try {
String res = serviceController.update(servletRequest);
assertEquals("ok", res);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
TimeUnit.SECONDS.sleep(1);
assertEquals(UpdateServiceTraceEvent.class, eventReceivedClass);
} |
public void decode(ByteBuf in, ByteBuf out) {
while (in.isReadable()) {
switch (state) {
case READING_PREAMBLE:
int uncompressedLength = readPreamble(in);
if (uncompressedLength == PREAMBLE_NOT_FULL) {
// We've not yet read all of the preamble, so wait until we can
return;
}
if (uncompressedLength == 0) {
// Should never happen, but it does mean we have nothing further to do
return;
}
out.ensureWritable(uncompressedLength);
state = State.READING_TAG;
// fall through
case READING_TAG:
if (!in.isReadable()) {
return;
}
tag = in.readByte();
switch (tag & 0x03) {
case LITERAL:
state = State.READING_LITERAL;
break;
case COPY_1_BYTE_OFFSET:
case COPY_2_BYTE_OFFSET:
case COPY_4_BYTE_OFFSET:
state = State.READING_COPY;
break;
}
break;
case READING_LITERAL:
int literalWritten = decodeLiteral(tag, in, out);
if (literalWritten != NOT_ENOUGH_INPUT) {
state = State.READING_TAG;
written += literalWritten;
} else {
// Need to wait for more data
return;
}
break;
case READING_COPY:
int decodeWritten;
switch (tag & 0x03) {
case COPY_1_BYTE_OFFSET:
decodeWritten = decodeCopyWith1ByteOffset(tag, in, out, written);
if (decodeWritten != NOT_ENOUGH_INPUT) {
state = State.READING_TAG;
written += decodeWritten;
} else {
// Need to wait for more data
return;
}
break;
case COPY_2_BYTE_OFFSET:
decodeWritten = decodeCopyWith2ByteOffset(tag, in, out, written);
if (decodeWritten != NOT_ENOUGH_INPUT) {
state = State.READING_TAG;
written += decodeWritten;
} else {
// Need to wait for more data
return;
}
break;
case COPY_4_BYTE_OFFSET:
decodeWritten = decodeCopyWith4ByteOffset(tag, in, out, written);
if (decodeWritten != NOT_ENOUGH_INPUT) {
state = State.READING_TAG;
written += decodeWritten;
} else {
// Need to wait for more data
return;
}
break;
}
}
}
} | @Test
public void testDecodeCopyWithOffsetBeforeChunk() {
final ByteBuf in = Unpooled.wrappedBuffer(new byte[] {
0x0a, // preamble length
0x04 << 2, // literal tag + length
0x6e, 0x65, 0x74, 0x74, 0x79, // "netty"
0x05 << 2 | 0x01, // copy with 1-byte offset + length
0x0b // INVALID offset (greater than chunk size)
});
final ByteBuf out = Unpooled.buffer(10);
try {
assertThrows(DecompressionException.class, new Executable() {
@Override
public void execute() {
snappy.decode(in, out);
}
});
} finally {
in.release();
out.release();
}
} |
@Override
public int getIdleTimeout() {
return clientConfig.getPropertyAsInteger(
IClientConfigKey.Keys.ConnIdleEvictTimeMilliSeconds, DEFAULT_IDLE_TIMEOUT);
} | @Test
void testGetIdleTimeout() {
assertEquals(ConnectionPoolConfigImpl.DEFAULT_IDLE_TIMEOUT, connectionPoolConfig.getIdleTimeout());
} |
@Override
public MapTask apply(MapTask input) {
for (ParallelInstruction instruction : Apiary.listOrEmpty(input.getInstructions())) {
ParDoInstruction parDoInstruction = instruction.getParDo();
if (parDoInstruction != null) {
int numOutputs = Apiary.intOrZero(parDoInstruction.getNumOutputs());
List<MultiOutputInfo> multiOutputInfos =
Apiary.listOrEmpty(parDoInstruction.getMultiOutputInfos());
if (numOutputs != Apiary.listOrEmpty(instruction.getParDo().getMultiOutputInfos()).size()) {
if (numOutputs == 1) {
parDoInstruction.setMultiOutputInfos(
ImmutableList.of(new MultiOutputInfo().setTag(idGenerator.getId())));
} else {
throw new IllegalArgumentException(
String.format(
"Invalid ParDoInstruction %s, %d outputs specified, found %s tags.",
instruction.getSystemName(), numOutputs, multiOutputInfos));
}
}
}
}
return input;
} | @Test
public void testMissingTagsForMultipleOutputsThrows() {
FixMultiOutputInfosOnParDoInstructions function =
new FixMultiOutputInfosOnParDoInstructions(IdGenerators.decrementingLongs());
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("Invalid ParDoInstruction");
thrown.expectMessage("2 outputs specified");
function.apply(createMapTaskWithParDo(2));
} |
public static <InputT, OutputT> PTransformRunnerFactory<?> forWindowedValueMapFnFactory(
WindowedValueMapFnFactory<InputT, OutputT> fnFactory) {
return new Factory<>(new ExplodedWindowedValueMapperFactory<>(fnFactory));
} | @Test
public void testFullWindowedValueMappingWithCompressedWindow() throws Exception {
PTransformRunnerFactoryTestContext context =
PTransformRunnerFactoryTestContext.builder(EXPECTED_ID, EXPECTED_PTRANSFORM)
.processBundleInstructionId("57")
.pCollections(Collections.singletonMap("inputPC", INPUT_PCOLLECTION))
.coders(Collections.singletonMap("coder-id", valueCoder))
.build();
List<WindowedValue<?>> outputConsumer = new ArrayList<>();
context.addPCollectionConsumer("outputPC", outputConsumer::add);
MapFnRunners.forWindowedValueMapFnFactory(this::createMapFunctionForPTransform)
.createRunnerForPTransform(context);
assertThat(context.getStartBundleFunctions(), empty());
assertThat(context.getFinishBundleFunctions(), empty());
assertThat(context.getTearDownFunctions(), empty());
assertThat(
context.getPCollectionConsumers().keySet(), containsInAnyOrder("inputPC", "outputPC"));
IntervalWindow firstWindow = new IntervalWindow(new Instant(0L), Duration.standardMinutes(10L));
IntervalWindow secondWindow =
new IntervalWindow(new Instant(-10L), Duration.standardSeconds(22L));
context
.getPCollectionConsumer("inputPC")
.accept(
WindowedValue.of(
"abc",
new Instant(12),
ImmutableSet.of(firstWindow, GlobalWindow.INSTANCE, secondWindow),
PaneInfo.NO_FIRING));
assertThat(
outputConsumer,
containsInAnyOrder(
WindowedValue.timestampedValueInGlobalWindow("ABC", new Instant(12)),
WindowedValue.of("ABC", new Instant(12), secondWindow, PaneInfo.NO_FIRING),
WindowedValue.of("ABC", new Instant(12), firstWindow, PaneInfo.NO_FIRING)));
} |
@GetMapping("/plugin/delete")
public Mono<String> delete(@RequestParam("name") final String name) {
LOG.info("delete apache shenyu local plugin for {}", name);
PluginData pluginData = PluginData.builder().name(name).build();
subscriber.unSubscribe(pluginData);
return Mono.just(Constants.SUCCESS);
} | @Test
public void testDelete() throws Exception {
final String testPluginName = "testDeletePluginName";
final PluginData pluginData = new PluginData();
pluginData.setName(testPluginName);
subscriber.onSubscribe(pluginData);
assertThat(baseDataCache.obtainPluginData(testPluginName)).isNotNull();
final MockHttpServletResponse response = this.mockMvc
.perform(MockMvcRequestBuilders.get("/shenyu/plugin/delete")
.param("name", testPluginName))
.andExpect(status().isOk())
.andReturn()
.getResponse();
assertThat(response.getStatus()).isEqualTo(HttpStatus.OK.value());
assertThat(baseDataCache.obtainPluginData(testPluginName)).isNull();
} |
public Optional<UserDto> authenticate(HttpRequest request) {
return extractCredentialsFromHeader(request)
.flatMap(credentials -> Optional.ofNullable(authenticate(credentials, request)));
} | @Test
public void does_not_authenticate_when_authorization_header_is_not_BASIC() {
when(request.getHeader(AUTHORIZATION_HEADER)).thenReturn("OTHER " + CREDENTIALS_IN_BASE64);
underTest.authenticate(request);
verifyNoInteractions(credentialsAuthentication, authenticationEvent);
} |
public static UAssign create(UExpression variable, UExpression expression) {
return new AutoValue_UAssign(variable, expression);
} | @Test
public void serialization() {
SerializableTester.reserializeAndAssert(
UAssign.create(UFreeIdent.create("foo"), ULiteral.intLit(5)));
} |
@Override
public Object copy(Object value) {
try {
Object replacement = writeReplaceMethod.invoke(value);
Object newReplacement = getDataSerializer().copy(replacement);
return READ_RESOLVE_METHOD.invoke(newReplacement);
} catch (Exception e) {
throw new RuntimeException("Can't copy lambda " + value, e);
}
} | @Test(dataProvider = "furyCopyConfig")
public void testLambdaCopy(Fury fury) {
{
BiFunction<Fury, Object, byte[]> function =
(Serializable & BiFunction<Fury, Object, byte[]>) Fury::serialize;
fury.copy(function);
}
{
Function<Integer, Integer> function =
(Serializable & Function<Integer, Integer>) (x) -> x + x;
Function<Integer, Integer> newFunc = fury.copy(function);
assertEquals(newFunc.apply(10), Integer.valueOf(20));
List<Function<Integer, Integer>> list = fury.copy(Arrays.asList(function, function));
assertSame(list.get(0), list.get(1));
assertEquals(list.get(0).apply(20), Integer.valueOf(40));
}
assertSame(
fury.getClassResolver().getSerializerClass(Class.class), Serializers.ClassSerializer.class);
} |
@Override
public String generateInstanceId(Instance instance) {
ensureWorkerIdInitialization();
return SNOW_FLOWER_ID_GENERATOR.nextId() + NAMING_INSTANCE_ID_SPLITTER
+ instance.getClusterName() + NAMING_INSTANCE_ID_SPLITTER
+ instance.getServiceName();
} | @Test
void testGenerateInstanceId() {
final SnowFlakeInstanceIdGenerator instanceIdGenerator = new SnowFlakeInstanceIdGenerator();
Instance instance = new Instance();
Map<String, String> metaData = new HashMap<>(1);
metaData.put(PreservedMetadataKeys.INSTANCE_ID_GENERATOR, SNOWFLAKE_INSTANCE_ID_GENERATOR);
instance.setMetadata(metaData);
instance.setServiceName("service");
instance.setClusterName("cluster");
instance.setIp("1.1.1.1");
instance.setPort(1000);
String instanceId = instanceIdGenerator.generateInstanceId(instance);
assertTrue(instanceId.endsWith("#cluster#service"));
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ReflectionServiceDescriptor that = (ReflectionServiceDescriptor) o;
return Objects.equals(interfaceName, that.interfaceName)
&& Objects.equals(serviceInterfaceClass, that.serviceInterfaceClass)
&& Objects.equals(methods, that.methods)
&& Objects.equals(descToMethods, that.descToMethods);
} | @Test
void testEquals() {
ReflectionServiceDescriptor service2 = new ReflectionServiceDescriptor(DemoService.class);
ReflectionServiceDescriptor service3 = new ReflectionServiceDescriptor(DemoService.class);
Assertions.assertEquals(service2, service3);
} |
@ProcessElement
public void processElement(
@Element KV<ByteString, ChangeStreamRecord> changeStreamRecordKV,
OutputReceiver<KV<ByteString, ChangeStreamMutation>> receiver) {
ChangeStreamRecord inputRecord = changeStreamRecordKV.getValue();
if (inputRecord instanceof ChangeStreamMutation) {
receiver.output(KV.of(changeStreamRecordKV.getKey(), (ChangeStreamMutation) inputRecord));
}
} | @Test
public void shouldOutputChangeStreamMutations() {
ChangeStreamMutation mutation = mock(ChangeStreamMutation.class);
doFn.processElement(KV.of(ByteString.copyFromUtf8("test"), mutation), outputReceiver);
verify(outputReceiver, times(1)).output(KV.of(ByteString.copyFromUtf8("test"), mutation));
} |
@Override
public boolean overlap(final Window other) throws IllegalArgumentException {
if (getClass() != other.getClass()) {
throw new IllegalArgumentException("Cannot compare windows of different type. Other window has type "
+ other.getClass() + ".");
}
final TimeWindow otherWindow = (TimeWindow) other;
return startMs < otherWindow.endMs && otherWindow.startMs < endMs;
} | @Test
public void shouldOverlapIfOtherWindowIsWithinThisWindow() {
/*
* This: [-------)
* Other: [---)
*/
assertTrue(window.overlap(new TimeWindow(start, 75)));
assertTrue(window.overlap(new TimeWindow(start, end)));
assertTrue(window.overlap(new TimeWindow(75, end)));
} |
public static void validateValue(Schema schema, Object value) {
validateValue(null, schema, value);
} | @Test
public void testValidateValueMismatchInt16() {
assertThrows(DataException.class,
() -> ConnectSchema.validateValue(Schema.INT16_SCHEMA, 1));
} |
@Override
public ByteBuf getBytes(int index, byte[] dst) {
getBytes(index, dst, 0, dst.length);
return this;
} | @Test
public void testGetBytesAfterRelease2() {
final ByteBuf buffer = buffer();
try {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().getBytes(0, buffer, 1);
}
});
} finally {
buffer.release();
}
} |
@Override
public void parse(InputStream stream, ContentHandler handler, Metadata metadata,
ParseContext context) throws IOException, SAXException, TikaException {
if (classifier == null) {
LOG.warn(getClass().getSimpleName() + " is not configured properly.");
return;
}
String inputString = IOUtils.toString(stream, "UTF-8");
String sentiment = classifier.predict(inputString);
metadata.add("Sentiment", sentiment);
} | @Test
public void endToEndTest() throws Exception {
Tika tika = getTika("tika-config-sentiment-opennlp.xml");
if (tika == null) {
return;
}
String text = "What a wonderful thought it is that" +
" some of the best days of our lives haven't happened yet.";
ByteArrayInputStream stream =
new ByteArrayInputStream(text.getBytes(StandardCharsets.UTF_8));
Metadata md = new Metadata();
tika.parse(stream, md);
String sentiment = md.get("Sentiment");
assertNotNull(sentiment);
assertEquals("positive", sentiment);
} |
Object getFromStep(String stepId, String paramName) {
try {
return executor
.submit(() -> fromStep(stepId, paramName))
.get(TIMEOUT_IN_MILLIS, TimeUnit.MILLISECONDS);
} catch (Exception e) {
throw new MaestroInternalError(
e, "getFromStep throws an exception for stepId=[%s], paramName=[%s]", stepId, paramName);
}
} | @Test
public void testInvalidGetFromStep() throws Exception {
StepRuntimeSummary summary = loadObject(TEST_STEP_RUNTIME_SUMMARY, StepRuntimeSummary.class);
when(allStepOutputData.get("step1"))
.thenReturn(Collections.singletonMap("maestro_step_runtime_summary", summary));
AssertHelper.assertThrows(
"Cannot find the referenced step id",
MaestroInternalError.class,
"getFromStep throws an exception",
() -> paramExtension.getFromStep("step2", "param1"));
AssertHelper.assertThrows(
"Cannot find the referenced param name",
MaestroInternalError.class,
"getFromStep throws an exception",
() -> paramExtension.getFromStep("step1", "param2"));
summary.getParams().get("param1").setEvaluatedTime(null);
AssertHelper.assertThrows(
"Referenced param is not evaluated yet.",
MaestroInternalError.class,
"getFromStep throws an exception",
() -> paramExtension.getFromStep("step1", "param1"));
} |
public static void rollMockClock(Duration delta) {
if (clock.equals(Clock.systemUTC()))
throw new IllegalStateException("You need to use setMockClock() first.");
setMockClock(clock.instant().plus(delta));
} | @Test(expected = IllegalStateException.class)
public void rollMockClock_uninitialized() {
TimeUtils.rollMockClock(Duration.ofMinutes(1));
} |
public static DataSchema canonicalizeDataSchemaForDistinct(QueryContext queryContext, DataSchema dataSchema) {
List<ExpressionContext> selectExpressions = queryContext.getSelectExpressions();
int numSelectExpressions = selectExpressions.size();
Preconditions.checkState(dataSchema.size() == numSelectExpressions,
"BUG: Expect same number of columns in SELECT clause and data schema, got %s in SELECT clause, %s in data "
+ "schema", numSelectExpressions, dataSchema.size());
String[] columnNames = new String[numSelectExpressions];
for (int i = 0; i < numSelectExpressions; i++) {
columnNames[i] = selectExpressions.get(i).toString();
}
return new DataSchema(columnNames, dataSchema.getColumnDataTypes());
} | @Test
public void testCanonicalizeDataSchemaForDistinct() {
QueryContext queryContext =
QueryContextConverterUtils.getQueryContext("SELECT DISTINCT col1, col2 + col3 FROM testTable");
// Intentionally make data schema not matching the string representation of the expression
DataSchema dataSchema = new DataSchema(new String[]{"col1", "add(col2+col3)"},
new ColumnDataType[]{ColumnDataType.INT, ColumnDataType.DOUBLE});
DataSchema canonicalDataSchema = ReducerDataSchemaUtils.canonicalizeDataSchemaForDistinct(queryContext, dataSchema);
assertEquals(canonicalDataSchema, new DataSchema(new String[]{"col1", "plus(col2,col3)"},
new ColumnDataType[]{ColumnDataType.INT, ColumnDataType.DOUBLE}));
} |
static List<Factory> discoverFactories(ClassLoader classLoader) {
final Iterator<Factory> serviceLoaderIterator =
ServiceLoader.load(Factory.class, classLoader).iterator();
final List<Factory> loadResults = new ArrayList<>();
while (true) {
try {
// error handling should also be applied to the hasNext() call because service
// loading might cause problems here as well
if (!serviceLoaderIterator.hasNext()) {
break;
}
loadResults.add(serviceLoaderIterator.next());
} catch (Throwable t) {
if (t instanceof NoClassDefFoundError) {
LOG.debug(
"NoClassDefFoundError when loading a "
+ Factory.class.getCanonicalName()
+ ". This is expected when trying to load a format dependency but no flink-connector-files is loaded.",
t);
} else {
throw new TableException(
"Unexpected error when trying to load service provider.", t);
}
}
}
return loadResults;
} | @Test
void testDiscoverFactoryBadClass(@TempDir Path tempDir) throws IOException {
// Let's prepare the classloader with a factory interface and 2 classes, one implements our
// sub-interface of SerializationFormatFactory and the other implements only
// SerializationFormatFactory.
final String subInterfaceName = "MyFancySerializationSchemaFormat";
final String subInterfaceImplementationName = "MyFancySerializationSchemaFormatImpl";
final String serializationSchemaImplementationName = "AnotherSerializationSchema";
final URLClassLoader classLoaderIncludingTheInterface =
ClassLoaderUtils.withRoot(tempDir.toFile())
.addClass(
subInterfaceName,
"public interface "
+ subInterfaceName
+ " extends "
+ SerializationFormatFactory.class.getName()
+ " {}")
.addClass(
subInterfaceImplementationName,
"import org.apache.flink.api.common.serialization.SerializationSchema;"
+ "import org.apache.flink.configuration.ConfigOption;"
+ "import org.apache.flink.configuration.ReadableConfig;"
+ "import org.apache.flink.table.connector.format.EncodingFormat;"
+ "import org.apache.flink.table.data.RowData;"
+ "import org.apache.flink.table.factories.DynamicTableFactory;"
+ "import org.apache.flink.table.factories.SerializationFormatFactory;"
+ "import java.util.Set;"
+ "public class "
+ subInterfaceImplementationName
+ " implements "
+ subInterfaceName
+ " {"
+ "@Override public String factoryIdentifier() { return null; }"
+ "@Override public Set<ConfigOption<?>> requiredOptions() { return null; }"
+ "@Override public Set<ConfigOption<?>> optionalOptions() { return null; }"
+ "@Override public EncodingFormat<SerializationSchema<RowData>> createEncodingFormat(DynamicTableFactory.Context context, ReadableConfig formatOptions) { return null; }"
+ "}")
.addClass(
serializationSchemaImplementationName,
"import org.apache.flink.api.common.serialization.SerializationSchema;"
+ "import org.apache.flink.configuration.ConfigOption;"
+ "import org.apache.flink.configuration.ReadableConfig;"
+ "import org.apache.flink.table.connector.format.EncodingFormat;"
+ "import org.apache.flink.table.data.RowData;"
+ "import org.apache.flink.table.factories.DynamicTableFactory;"
+ "import org.apache.flink.table.factories.SerializationFormatFactory;"
+ "import java.util.Set;"
+ "public class "
+ serializationSchemaImplementationName
+ " implements "
+ SerializationFormatFactory.class.getName()
+ " {"
+ "@Override public String factoryIdentifier() { return null; }"
+ "@Override public Set<ConfigOption<?>> requiredOptions() { return null; }"
+ "@Override public Set<ConfigOption<?>> optionalOptions() { return null; }"
+ "@Override public EncodingFormat<SerializationSchema<RowData>> createEncodingFormat(DynamicTableFactory.Context context, ReadableConfig formatOptions) { return null; }"
+ "}")
.addService(Factory.class.getName(), subInterfaceImplementationName)
.addService(Factory.class.getName(), serializationSchemaImplementationName)
.build();
// Delete the sub interface now, so it can't be loaded
Files.delete(tempDir.resolve(subInterfaceName + ".class"));
assertThat(FactoryUtil.discoverFactories(classLoaderIncludingTheInterface))
.map(f -> f.getClass().getName())
.doesNotContain(subInterfaceImplementationName)
.contains(serializationSchemaImplementationName);
} |
@Override
public String getHost() {
return host;
} | @Test
public void testGetHost() {
assertThat(polarisRegistration1.getHost()).isEqualTo(HOST);
} |
@Override
public void parse(InputStream stream, ContentHandler handler, Metadata metadata,
ParseContext context) throws IOException, SAXException, TikaException {
// Automatically detect the character encoding
try (AutoDetectReader reader = new AutoDetectReader(CloseShieldInputStream.wrap(stream),
metadata, getEncodingDetector(context))) {
//try to get detected content type; could be a subclass of text/plain
//such as vcal, etc.
String incomingMime = metadata.get(Metadata.CONTENT_TYPE);
MediaType mediaType = MediaType.TEXT_PLAIN;
if (incomingMime != null) {
MediaType tmpMediaType = MediaType.parse(incomingMime);
if (tmpMediaType != null) {
mediaType = tmpMediaType;
}
}
Charset charset = reader.getCharset();
MediaType type = new MediaType(mediaType, charset);
metadata.set(Metadata.CONTENT_TYPE, type.toString());
// deprecated, see TIKA-431
metadata.set(Metadata.CONTENT_ENCODING, charset.name());
XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata);
xhtml.startDocument();
xhtml.startElement("p");
char[] buffer = new char[4096];
int n = reader.read(buffer);
while (n != -1) {
xhtml.characters(buffer, 0, n);
n = reader.read(buffer);
}
xhtml.endElement("p");
xhtml.endDocument();
}
} | @Test
public void testUTF8Text() throws Exception {
String text = "I\u00F1t\u00EBrn\u00E2ti\u00F4n\u00E0liz\u00E6ti\u00F8n";
ContentHandler handler = new BodyContentHandler();
Metadata metadata = new Metadata();
parser.parse(new ByteArrayInputStream(text.getBytes(UTF_8)), handler, metadata,
new ParseContext());
assertEquals("text/plain; charset=UTF-8", metadata.get(Metadata.CONTENT_TYPE));
assertEquals("UTF-8", metadata.get(Metadata.CONTENT_ENCODING)); // deprecated
TikaTest.assertContains(text, handler.toString());
} |
@Override
public boolean test(Pickle pickle) {
URI picklePath = pickle.getUri();
if (!lineFilters.containsKey(picklePath)) {
return true;
}
for (Integer line : lineFilters.get(picklePath)) {
if (Objects.equals(line, pickle.getLocation().getLine())
|| Objects.equals(line, pickle.getScenarioLocation().getLine())
|| pickle.getExamplesLocation().map(Location::getLine).map(line::equals).orElse(false)
|| pickle.getRuleLocation().map(Location::getLine).map(line::equals).orElse(false)
|| pickle.getFeatureLocation().map(Location::getLine).map(line::equals).orElse(false)) {
return true;
}
}
return false;
} | @Test
void does_not_match_step() {
LinePredicate predicate = new LinePredicate(singletonMap(
featurePath,
singletonList(4)));
assertFalse(predicate.test(firstPickle));
assertFalse(predicate.test(secondPickle));
assertFalse(predicate.test(thirdPickle));
assertFalse(predicate.test(fourthPickle));
} |
public static <T> GoConfigClassLoader<T> classParser(Element e, Class<T> aClass, ConfigCache configCache, GoCipher goCipher, final ConfigElementImplementationRegistry registry, ConfigReferenceElements configReferenceElements) {
return new GoConfigClassLoader<>(e, aClass, configCache, goCipher, registry, configReferenceElements);
} | @Test
public void shouldErrorOutWhenConfigClassHasAttributeAwareConfigTagAnnotationButAttributeIsNotPresent() {
final Element element = new Element("example");
final GoConfigClassLoader<ConfigWithAttributeAwareConfigTagAnnotation> loader = GoConfigClassLoader.classParser(element, ConfigWithAttributeAwareConfigTagAnnotation.class, configCache, goCipher, registry, referenceElements);
assertThatThrownBy(loader::parse)
.isInstanceOf(RuntimeException.class)
.hasMessageContaining("Expected attribute `type` to be present for \n\t<example />");
} |
static byte[] generateRandomPayload(Integer recordSize, List<byte[]> payloadByteList, byte[] payload,
SplittableRandom random, boolean payloadMonotonic, long recordValue) {
if (!payloadByteList.isEmpty()) {
payload = payloadByteList.get(random.nextInt(payloadByteList.size()));
} else if (recordSize != null) {
for (int j = 0; j < payload.length; ++j)
payload[j] = (byte) (random.nextInt(26) + 65);
} else if (payloadMonotonic) {
payload = Long.toString(recordValue).getBytes(StandardCharsets.UTF_8);
} else {
throw new IllegalArgumentException("no payload File Path or record Size or payload-monotonic option provided");
}
return payload;
} | @Test
public void testGenerateRandomPayloadByRecordSize() {
Integer recordSize = 100;
byte[] payload = new byte[recordSize];
List<byte[]> payloadByteList = new ArrayList<>();
SplittableRandom random = new SplittableRandom(0);
payload = ProducerPerformance.generateRandomPayload(recordSize, payloadByteList, payload, random, false, 0L);
for (byte b : payload) {
assertNotEquals(0, b);
}
} |
@Nonnull
@Override
public Iterator<T> getIterator(int unused)
{
List<T> hosts = new ArrayList<>(_cumulativePointsMap.values());
if (!hosts.isEmpty())
{
Collections.shuffle(hosts);
//we try to put host with higher probability as the first by calling get. This avoids the situation where unhealthy host is returned first.
try
{
Collections.swap(hosts, 0, hosts.indexOf(get(0)));
} catch (IndexOutOfBoundsException e)
{
LOG.warn("Got indexOutOfBound when trying to shuffle list:" + e.getMessage());
}
}
return hosts.iterator();
} | @Test
public void testLowProbabilityHost() throws Exception {
Map<URI, Integer> pointsMap = new HashMap<>();
Map<URI, Integer> countsMap = new HashMap<>();
List<URI> goodHosts = addHostsToPointMap(9, 100, pointsMap);
List<URI> slowStartHost = addHostsToPointMap(1, 1, pointsMap);
Ring<URI> ring = new DistributionNonDiscreteRingFactory<URI>().createRing(pointsMap);
List<URI> results = new ArrayList<>();
Iterator<URI> iter = ring.getIterator(0);
long startTime = System.currentTimeMillis();
while (iter.hasNext()) {
results.add(iter.next());
}
long endTime = System.currentTimeMillis();
long duration = endTime - startTime;
Assert.assertTrue(results.size() == 10);
} |
public Object evaluatePredictedValue(final ProcessingDTO processingDTO) {
return commonEvaluate(getValueFromKiePMMLNameValuesByVariableName(targetField, processingDTO.getKiePMMLNameValues())
.orElse(null), dataType);
} | @Test
void evaluatePredictedValue() {
final String variableName = "variableName";
KiePMMLOutputField kiePMMLOutputField = KiePMMLOutputField.builder("outputfield", Collections.emptyList())
.withResultFeature(RESULT_FEATURE.PREDICTED_VALUE)
.withTargetField(variableName)
.build();
final List<KiePMMLNameValue> kiePMMLNameValues = IntStream.range(0, 3).mapToObj(i -> new KiePMMLNameValue(
"val-" + i, i)).collect(Collectors.toList());
ProcessingDTO processingDTO = getProcessingDTO(Collections.emptyList(),
kiePMMLNameValues, Collections.emptyList());
assertThat(kiePMMLOutputField.evaluate(processingDTO)).isNull();
final Object variableValue = 243.94;
kiePMMLNameValues.add(new KiePMMLNameValue(variableName, variableValue));
processingDTO = getProcessingDTO(Collections.emptyList(),
kiePMMLNameValues, Collections.emptyList());
Object retrieved = kiePMMLOutputField.evaluate(processingDTO);
assertThat(retrieved).isNotNull();
assertThat(retrieved).isEqualTo(variableValue);
} |
@Override
public void beforeMigration(PartitionMigrationEvent event) {
if (isPrimaryReplicaMigrationEvent(event)) {
ownerMigrationsStarted.incrementAndGet();
}
migrationAwareService.beforeMigration(event);
} | @Test
public void beforeMigration() {
// when: countingMigrationAwareService.beforeMigration was invoked (in setUp method)
// then: if event involves primary replica, stamp should change.
if (isPrimaryReplicaMigrationEvent(event)) {
assertEquals(IN_FLIGHT_MIGRATION_STAMP, countingMigrationAwareService.getMigrationStamp());
assertFalse(countingMigrationAwareService.validateMigrationStamp(IN_FLIGHT_MIGRATION_STAMP));
} else {
assertEquals(initialMigrationStamp, countingMigrationAwareService.getMigrationStamp());
assertTrue(countingMigrationAwareService.validateMigrationStamp(initialMigrationStamp));
}
} |
@Override
public String toString() {
return toStringHelper(getClass())
.add("currentHopLimit", Byte.toString(currentHopLimit))
.add("mFlag", Byte.toString(mFlag))
.add("oFlag", Byte.toString(oFlag))
.add("routerLifetime", Short.toString(routerLifetime))
.add("reachableTime", Integer.toString(reachableTime))
.add("retransmitTimer", Integer.toString(retransmitTimer))
.toString();
// TODO: need to handle optionis
} | @Test
public void testToStringRA() throws Exception {
RouterAdvertisement ra = deserializer.deserialize(bytePacket, 0, bytePacket.length);
String str = ra.toString();
assertTrue(StringUtils.contains(str, "currentHopLimit=" + (byte) 3));
assertTrue(StringUtils.contains(str, "mFlag=" + (byte) 1));
assertTrue(StringUtils.contains(str, "oFlag=" + (byte) 1));
assertTrue(StringUtils.contains(str, "routerLifetime=" + (short) 0x258));
assertTrue(StringUtils.contains(str, "reachableTime=" + 0x3e8));
assertTrue(StringUtils.contains(str, "retransmitTimer=" + 0x1f4));
// TODO: need to handle options
} |
@SqlInvokedScalarFunction(value = "array_min_by", deterministic = true, calledOnNullInput = true)
@Description("Get the minimum value of array, by using a specific transformation function")
@TypeParameter("T")
@TypeParameter("U")
@SqlParameters({@SqlParameter(name = "input", type = "array(T)"), @SqlParameter(name = "f", type = "function(T, U)")})
@SqlType("T")
public static String arrayMinBy()
{
return "RETURN input[" +
"array_min(zip_with(transform(input, f), sequence(1, cardinality(input)), (x, y)->IF(x IS NULL, NULL, (x, y))))[2]" +
"]";
} | @Test
public void testArrayMinBy()
{
assertFunction("ARRAY_MIN_BY(ARRAY [double'1.0', double'2.0'], i -> i)", DOUBLE, 1.0d);
assertFunction("ARRAY_MIN_BY(ARRAY [double'-3.0', double'2.0'], i -> i*i)", DOUBLE, 2.0d);
assertFunction("ARRAY_MIN_BY(ARRAY ['a', 'bb', 'c'], x -> LENGTH(x))", createVarcharType(2), "a");
assertFunction("ARRAY_MIN_BY(ARRAY [1, 2, 3], x -> 1-x)", INTEGER, 3);
assertFunction("ARRAY_MIN_BY(ARRAY [ARRAY['a'], ARRAY['b', 'b'], ARRAY['c']], x -> CARDINALITY(x))", new ArrayType(createVarcharType(1)), singletonList("a"));
assertFunction("ARRAY_MIN_BY(ARRAY [MAP(ARRAY['foo', 'bar'], ARRAY[1, 2]), MAP(ARRAY['foo', 'bar'], ARRAY[0, 3])], x -> x['foo'])", mapType(createVarcharType(3), INTEGER), ImmutableMap.of("foo", 0, "bar", 3));
assertFunction("ARRAY_MIN_BY(ARRAY [CAST(ROW(0, 2.0) AS ROW(x BIGINT, y DOUBLE)), CAST(ROW(1, 3.0) AS ROW(x BIGINT, y DOUBLE))], r -> r.y).x", BIGINT, 0L);
assertFunction("ARRAY_MIN_BY(ARRAY [null, double'1.0', double'2.0'], i -> i)", DOUBLE, null);
assertFunction("ARRAY_MIN_BY(ARRAY [cast(null as double), cast(null as double)], i -> i)", DOUBLE, null);
assertFunction("ARRAY_MIN_BY(cast(null as array(double)), i -> i)", DOUBLE, null);
} |
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
} | @Test
public void testMultipleAbortMarkers() {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(),
new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
int currentOffset = 0;
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset,
new SimpleRecord(time.milliseconds(), "abort1-1".getBytes(), "value".getBytes()),
new SimpleRecord(time.milliseconds(), "abort1-2".getBytes(), "value".getBytes()));
currentOffset += abortTransaction(buffer, 1L, currentOffset);
// Duplicate abort -- should be ignored.
currentOffset += abortTransaction(buffer, 1L, currentOffset);
// Now commit a transaction.
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset,
new SimpleRecord(time.milliseconds(), "commit1-1".getBytes(), "value".getBytes()),
new SimpleRecord(time.milliseconds(), "commit1-2".getBytes(), "value".getBytes()));
commitTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(
new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0)
);
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchRecords();
assertTrue(fetchedRecords.containsKey(tp0));
assertEquals(fetchedRecords.get(tp0).size(), 2);
List<ConsumerRecord<byte[], byte[]>> fetchedConsumerRecords = fetchedRecords.get(tp0);
Set<String> expectedCommittedKeys = new HashSet<>(Arrays.asList("commit1-1", "commit1-2"));
Set<String> actuallyCommittedKeys = new HashSet<>();
for (ConsumerRecord<byte[], byte[]> consumerRecord : fetchedConsumerRecords) {
actuallyCommittedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8));
}
assertEquals(expectedCommittedKeys, actuallyCommittedKeys);
} |
@VisibleForTesting
static void configureDataSource( BasicDataSource ds, DatabaseMeta databaseMeta, String partitionId,
int initialSize, int maximumSize ) throws KettleDatabaseException {
// substitute variables and populate pool properties; add credentials
Properties connectionPoolProperties = new Properties( databaseMeta.getConnectionPoolingProperties() );
connectionPoolProperties = environmentSubstitute( connectionPoolProperties, databaseMeta );
setPoolProperties( ds, connectionPoolProperties, initialSize, maximumSize );
setCredentials( ds, databaseMeta, partitionId );
// add url/driver class
String url = databaseMeta.environmentSubstitute( databaseMeta.getURL( partitionId ) );
ds.setUrl( url );
String clazz = databaseMeta.getDriverClass();
if ( databaseMeta.getDatabaseInterface() != null ) {
ds.setDriverClassLoader( databaseMeta.getDatabaseInterface().getClass().getClassLoader() );
}
ds.setDriverClassName( clazz );
dataSourcesAttributesMap.put( getDataSourceName( databaseMeta, partitionId ), databaseMeta.getAttributes() );
} | @Test
public void testConfigureDataSourceWhenNoDatabaseInterface() throws KettleDatabaseException {
when( dbMeta.getDatabaseInterface() ).thenReturn( null );
ConnectionPoolUtil.configureDataSource(
dataSource, dbMeta, "partId", INITIAL_SIZE, MAX_SIZE );
verify( dataSource, never() ).setDriverClassLoader( any( ClassLoader.class ) );
} |
void handleLine(final String line) {
final String trimmedLine = Optional.ofNullable(line).orElse("").trim();
if (trimmedLine.isEmpty()) {
return;
}
handleStatements(trimmedLine);
} | @Test
public void shouldThrowOnCCloudConnectorRequestWithoutApiKey() throws Exception {
// Given:
final KsqlRestClient mockRestClient = givenMockRestClient();
when(mockRestClient.getIsCCloudServer()).thenReturn(true);
when(mockRestClient.getHasCCloudApiKey()).thenReturn(false);
// When:
final Exception e = assertThrows(
KsqlMissingCredentialsException.class,
() -> localCli.handleLine("list connectors;")
);
// Then:
assertThat(e.getMessage(), containsString("In order to use ksqlDB's connector "
+ "management capabilities with a Confluent Cloud ksqlDB server, launch the "
+ "ksqlDB command line with the additional flags '--confluent-api-key' and "
+ "'--confluent-api-secret' to pass a Confluent Cloud API key."));
verify(mockRestClient, never()).makeConnectorRequest(anyString(), anyLong());
} |
@Override
public Collection<RejectedAwarePlugin> getRejectedAwarePluginList() {
return Collections.emptyList();
} | @Test
public void testGetRejectedAwarePluginList() {
Assert.assertEquals(Collections.emptyList(), manager.getRejectedAwarePluginList());
} |
@Override
public boolean isAllowable(URL url, Invocation invocation) {
int rate = url.getMethodParameter(RpcUtils.getMethodName(invocation), TPS_LIMIT_RATE_KEY, -1);
long interval = url.getMethodParameter(
RpcUtils.getMethodName(invocation), TPS_LIMIT_INTERVAL_KEY, DEFAULT_TPS_LIMIT_INTERVAL);
String serviceKey = url.getServiceKey();
if (rate > 0) {
StatItem statItem = stats.get(serviceKey);
if (statItem == null) {
stats.putIfAbsent(serviceKey, new StatItem(serviceKey, rate, interval));
statItem = stats.get(serviceKey);
} else {
// rate or interval has changed, rebuild
if (statItem.getRate() != rate || statItem.getInterval() != interval) {
stats.put(serviceKey, new StatItem(serviceKey, rate, interval));
statItem = stats.get(serviceKey);
}
}
return statItem.isAllowable();
} else {
StatItem statItem = stats.get(serviceKey);
if (statItem != null) {
stats.remove(serviceKey);
}
}
return true;
} | @Test
void testConfigChange() {
Invocation invocation = new MockInvocation();
URL url = URL.valueOf("test://test");
url = url.addParameter(INTERFACE_KEY, "org.apache.dubbo.rpc.file.TpsService");
url = url.addParameter(TPS_LIMIT_RATE_KEY, TEST_LIMIT_RATE);
url = url.addParameter(TPS_LIMIT_INTERVAL_KEY, 1000);
for (int i = 1; i <= TEST_LIMIT_RATE; i++) {
Assertions.assertTrue(defaultTPSLimiter.isAllowable(url, invocation));
}
final int tenTimesLimitRate = TEST_LIMIT_RATE * 10;
url = url.addParameter(TPS_LIMIT_RATE_KEY, tenTimesLimitRate);
for (int i = 1; i <= tenTimesLimitRate; i++) {
Assertions.assertTrue(defaultTPSLimiter.isAllowable(url, invocation));
}
Assertions.assertFalse(defaultTPSLimiter.isAllowable(url, invocation));
} |
public void uninstall(String pluginKey) {
if (!pluginRepository.hasPlugin(pluginKey) || pluginRepository.getPlugin(pluginKey).getType() != EXTERNAL) {
throw new IllegalArgumentException(format("Plugin [%s] is not installed", pluginKey));
}
Set<String> uninstallKeys = new HashSet<>();
uninstallKeys.add(pluginKey);
appendDependentPluginKeys(pluginKey, uninstallKeys);
for (String uninstallKey : uninstallKeys) {
PluginInfo info = pluginRepository.getPluginInfo(uninstallKey);
// we don't check type because the dependent of an external plugin should never be a bundled plugin!
uninstall(info.getKey(), info.getName(), info.getNonNullJarFile().getName());
}
} | @Test
public void uninstall() throws Exception {
File installedJar = copyTestPluginTo("test-base-plugin", fs.getInstalledExternalPluginsDir());
serverPluginRepository.addPlugin(newPlugin("testbase", EXTERNAL, installedJar.getName()));
underTest.start();
assertThat(installedJar).exists();
underTest.uninstall("testbase");
assertThat(installedJar).doesNotExist();
assertThat(uninstallDir.list()).containsOnly(installedJar.getName());
} |
@Override
public Serde<GenericKey> create(
final FormatInfo format,
final PersistenceSchema schema,
final KsqlConfig ksqlConfig,
final Supplier<SchemaRegistryClient> schemaRegistryClientFactory,
final String loggerNamePrefix,
final ProcessingLogContext processingLogContext,
final Optional<TrackedCallback> tracker
) {
return createInner(
format,
schema,
ksqlConfig,
schemaRegistryClientFactory,
loggerNamePrefix,
processingLogContext,
tracker
);
} | @Test
public void shouldReturnTrackingSerdeNonWindowed() {
// When:
factory.create(format, schema, config, srClientFactory, LOGGER_PREFIX, processingLogCxt,
Optional.of(callback));
// Then:
verify(innerFactory).wrapInTrackingSerde(loggingSerde, callback);
} |
public void logWithRunnable(Runnable runnable) {
long currentTimeMillis = System.currentTimeMillis();
if (disabled) {
runnable.run();
} else if (currentTimeMillis > lastLogTime + waitTime) {
lastLogTime = currentTimeMillis;
runnable.run();
}
} | @Test
public void testDisable() throws InterruptedException {
System.setProperty(RpcOptions.DISABLE_LOG_TIME_WAIT_CONF,"true");
try{
TimeWaitLogger timeWaitLogger = new TimeWaitLogger(100);
AtomicLong atomicLong = new AtomicLong();
new Thread(()->{
while (true){
timeWaitLogger.logWithRunnable(atomicLong::incrementAndGet);
}
}).start();
Thread.sleep(150);
Assert.assertTrue(atomicLong.get()>1000);
}finally {
System.setProperty(RpcOptions.DISABLE_LOG_TIME_WAIT_CONF,"");
}
TimeWaitLogger timeWaitLogger = new TimeWaitLogger(100);
AtomicLong atomicLong = new AtomicLong();
new Thread(()->{
while (true){
timeWaitLogger.logWithRunnable(atomicLong::incrementAndGet);
}
}).start();
Thread.sleep(150);
Assert.assertEquals(2L,atomicLong.get());
} |
public void process(Packet packet) {
MemberHandshake handshake = serverContext.getSerializationService().toObject(packet);
TcpServerConnection connection = (TcpServerConnection) packet.getConn();
if (!connection.setHandshake()) {
if (logger.isFinestEnabled()) {
logger.finest("Connection " + connection + " handshake is already completed, ignoring incoming " + handshake);
}
return;
}
if (handshake.getPlaneCount() != expectedPlaneCount) {
connection.close("The connection handshake has incorrect number of planes. "
+ "Expected " + expectedPlaneCount + " found " + handshake.getPlaneCount(), null);
return;
}
// before we register the connection on the plane, we make sure the plane index is set on the connection
// so that we can safely remove the connection from the plane.
connection.setPlaneIndex(handshake.getPlaneIndex());
process(connection, handshake);
} | @Test
public void process() {
tcpServerControl.process(memberHandshakeMessage());
assertExpectedAddressesRegistered();
assertMemberConnectionRegistered();
assertTrueEventually(() ->
assertEquals(
0,
connectionManager.getConnections().size()
), 5);
connection.close("close connection", null);
assertAddressesCleanedUp();
} |
private String hash(String password, String salt) {
return PREFIX + BCrypt.hashpw(password, salt) + SALT_PREFIX + salt;
} | @Test
public void testHash() throws Exception {
final String clearTextPassword = "foobar";
final String hashedPassword = bCryptPasswordAlgorithm.hash(clearTextPassword);
assertThat(hashedPassword)
.isNotEmpty()
.startsWith("{bcrypt}")
.contains("{salt}");
assertThat(bCryptPasswordAlgorithm.matches(hashedPassword, clearTextPassword)).isTrue();
} |
@VisibleForTesting
static YarnConfiguration getYarnConfWithRmHaId(Configuration conf)
throws IOException {
YarnConfiguration yarnConf = new YarnConfiguration(conf);
if (yarnConf.get(YarnConfiguration.RM_HA_ID) == null) {
// If RM_HA_ID is not configured, use the first of RM_HA_IDS.
// Any valid RM HA ID should work.
String[] rmIds = yarnConf.getStrings(YarnConfiguration.RM_HA_IDS);
if ((rmIds != null) && (rmIds.length > 0)) {
yarnConf.set(YarnConfiguration.RM_HA_ID, rmIds[0]);
} else {
throw new IOException("RM_HA_IDS property is not set for HA resource "
+ "manager");
}
}
return yarnConf;
} | @Test
public void testGetYarnConfWithRmHaId() throws IOException {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.RM_HA_ID, "rm0");
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, false);
YarnConfiguration result = YarnClientUtils.getYarnConfWithRmHaId(conf);
assertSameConf(conf, result);
assertEquals("RM_HA_ID was changed when it shouldn't have been: "
+ result.get(YarnConfiguration.RM_HA_ID), "rm0",
result.get(YarnConfiguration.RM_HA_ID));
conf = new Configuration();
conf.set(YarnConfiguration.RM_HA_ID, "rm0");
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
result = YarnClientUtils.getYarnConfWithRmHaId(conf);
assertSameConf(conf, result);
assertEquals("RM_HA_ID was changed when it shouldn't have been: "
+ result.get(YarnConfiguration.RM_HA_ID), "rm0",
result.get(YarnConfiguration.RM_HA_ID));
conf = new Configuration();
conf.set(YarnConfiguration.RM_HA_IDS, "rm0,rm1");
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
result = YarnClientUtils.getYarnConfWithRmHaId(conf);
assertSameConf(conf, result);
assertEquals("RM_HA_ID was not set correctly: "
+ result.get(YarnConfiguration.RM_HA_ID), "rm0",
result.get(YarnConfiguration.RM_HA_ID));
conf = new Configuration();
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
try {
YarnClientUtils.getYarnConfWithRmHaId(conf);
fail("Allowed invalid HA configuration: HA is enabled, but no RM ID "
+ "is set");
} catch (IOException ex) {
// Expected
}
} |
public static String uncompress(byte[] compressedURL) {
StringBuffer url = new StringBuffer();
switch (compressedURL[0] & 0x0f) {
case EDDYSTONE_URL_PROTOCOL_HTTP_WWW:
url.append(URL_PROTOCOL_HTTP_WWW_DOT);
break;
case EDDYSTONE_URL_PROTOCOL_HTTPS_WWW:
url.append(URL_PROTOCOL_HTTPS_WWW_DOT);
break;
case EDDYSTONE_URL_PROTOCOL_HTTP:
url.append(URL_PROTOCOL_HTTP_COLON_SLASH_SLASH);
break;
case EDDYSTONE_URL_PROTOCOL_HTTPS:
url.append(URL_PROTOCOL_HTTPS_COLON_SLASH_SLASH);
break;
default:
break;
}
byte lastByte = -1;
for (int i = 1; i < compressedURL.length; i++) {
byte b = compressedURL[i];
if (lastByte == 0 && b == 0 ) {
break;
}
lastByte = b;
String tld = topLevelDomainForByte(b);
if (tld != null) {
url.append(tld);
}
else {
url.append((char) b);
}
}
return url.toString();
} | @Test
public void testDecompressWithPath() {
String testURL = "http://google.com/123";
byte[] testBytes = {0x02, 'g', 'o', 'o', 'g', 'l', 'e', 0x00, '1', '2', '3'};
assertEquals(testURL, UrlBeaconUrlCompressor.uncompress(testBytes));
} |
@Override
public List<SnowflakeIdentifier> listIcebergTables(SnowflakeIdentifier scope) {
StringBuilder baseQuery = new StringBuilder("SHOW ICEBERG TABLES");
String[] queryParams = null;
switch (scope.type()) {
case ROOT:
// account-level listing
baseQuery.append(" IN ACCOUNT");
break;
case DATABASE:
// database-level listing
baseQuery.append(" IN DATABASE IDENTIFIER(?)");
queryParams = new String[] {scope.toIdentifierString()};
break;
case SCHEMA:
// schema-level listing
baseQuery.append(" IN SCHEMA IDENTIFIER(?)");
queryParams = new String[] {scope.toIdentifierString()};
break;
default:
throw new IllegalArgumentException(
String.format("Unsupported scope type for listIcebergTables: %s", scope));
}
final String finalQuery = baseQuery.toString();
final String[] finalQueryParams = queryParams;
List<SnowflakeIdentifier> tables;
try {
tables =
connectionPool.run(
conn ->
queryHarness.query(conn, finalQuery, TABLE_RESULT_SET_HANDLER, finalQueryParams));
} catch (SQLException e) {
throw snowflakeExceptionToIcebergException(
scope, e, String.format("Failed to list tables for scope '%s'", scope));
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(
e, "Interrupted while listing tables for scope '%s'", scope);
}
tables.forEach(
table ->
Preconditions.checkState(
table.type() == SnowflakeIdentifier.Type.TABLE,
"Expected TABLE, got identifier '%s' for scope '%s'",
table,
scope));
return tables;
} | @SuppressWarnings("unchecked")
@Test
public void testListIcebergTablesInAccount() throws SQLException {
when(mockResultSet.next())
.thenReturn(true)
.thenReturn(true)
.thenReturn(true)
.thenReturn(true)
.thenReturn(false);
when(mockResultSet.getString("database_name"))
.thenReturn("DB_1")
.thenReturn("DB_1")
.thenReturn("DB_1")
.thenReturn("DB_2");
when(mockResultSet.getString("schema_name"))
.thenReturn("SCHEMA_1")
.thenReturn("SCHEMA_1")
.thenReturn("SCHEMA_2")
.thenReturn("SCHEMA_3");
when(mockResultSet.getString("name"))
.thenReturn("TABLE_1")
.thenReturn("TABLE_2")
.thenReturn("TABLE_3")
.thenReturn("TABLE_4");
List<SnowflakeIdentifier> actualList =
snowflakeClient.listIcebergTables(SnowflakeIdentifier.ofRoot());
verify(mockQueryHarness)
.query(
eq(mockConnection),
eq("SHOW ICEBERG TABLES IN ACCOUNT"),
any(JdbcSnowflakeClient.ResultSetParser.class),
eq(null));
assertThat(actualList)
.containsExactly(
SnowflakeIdentifier.ofTable("DB_1", "SCHEMA_1", "TABLE_1"),
SnowflakeIdentifier.ofTable("DB_1", "SCHEMA_1", "TABLE_2"),
SnowflakeIdentifier.ofTable("DB_1", "SCHEMA_2", "TABLE_3"),
SnowflakeIdentifier.ofTable("DB_2", "SCHEMA_3", "TABLE_4"));
} |
@VisibleForTesting
BlockingResultInfo getBlockingResultInfo(IntermediateDataSetID resultId) {
return blockingResultInfos.get(resultId);
} | @Test
void testUpdateBlockingResultInfoWhileScheduling() throws Exception {
JobGraph jobGraph = createJobGraph();
Iterator<JobVertex> jobVertexIterator = jobGraph.getVertices().iterator();
JobVertex source1 = jobVertexIterator.next();
JobVertex source2 = jobVertexIterator.next();
JobVertex sink = jobVertexIterator.next();
final TestingJobMasterPartitionTracker partitionTracker =
new TestingJobMasterPartitionTracker();
partitionTracker.setIsPartitionTrackedFunction(ignore -> true);
int maxParallelism = 6;
AdaptiveBatchScheduler scheduler =
new DefaultSchedulerBuilder(
jobGraph, mainThreadExecutor, EXECUTOR_RESOURCE.getExecutor())
.setDelayExecutor(taskRestartExecutor)
.setPartitionTracker(partitionTracker)
.setRestartBackoffTimeStrategy(
new FixedDelayRestartBackoffTimeStrategyFactory(10, 0).create())
.setVertexParallelismAndInputInfosDecider(
createCustomParallelismDecider(maxParallelism))
.setDefaultMaxParallelism(maxParallelism)
.buildAdaptiveBatchJobScheduler();
final DefaultExecutionGraph graph = (DefaultExecutionGraph) scheduler.getExecutionGraph();
final ExecutionJobVertex source1ExecutionJobVertex = graph.getJobVertex(source1.getID());
final ExecutionJobVertex sinkExecutionJobVertex = graph.getJobVertex(sink.getID());
PointwiseBlockingResultInfo blockingResultInfo;
scheduler.startScheduling();
// trigger source1 finished.
transitionExecutionsState(scheduler, ExecutionState.FINISHED, source1);
blockingResultInfo =
(PointwiseBlockingResultInfo) getBlockingResultInfo(scheduler, source1);
assertThat(blockingResultInfo.getNumOfRecordedPartitions()).isEqualTo(SOURCE_PARALLELISM_1);
// trigger source2 finished.
transitionExecutionsState(scheduler, ExecutionState.FINISHED, source2);
blockingResultInfo =
(PointwiseBlockingResultInfo) getBlockingResultInfo(scheduler, source2);
assertThat(blockingResultInfo.getNumOfRecordedPartitions()).isEqualTo(SOURCE_PARALLELISM_2);
// trigger sink fail with partition not found
triggerFailedByPartitionNotFound(
scheduler,
source1ExecutionJobVertex.getTaskVertices()[0],
sinkExecutionJobVertex.getTaskVertices()[0]);
taskRestartExecutor.triggerScheduledTasks();
// check the partition info is reset
assertThat(
((PointwiseBlockingResultInfo) getBlockingResultInfo(scheduler, source1))
.getNumOfRecordedPartitions())
.isEqualTo(SOURCE_PARALLELISM_1 - 1);
} |
public BeamFnApi.InstructionResponse.Builder processBundle(BeamFnApi.InstructionRequest request)
throws Exception {
BeamFnApi.ProcessBundleResponse.Builder response = BeamFnApi.ProcessBundleResponse.newBuilder();
BundleProcessor bundleProcessor =
bundleProcessorCache.get(
request,
() -> {
try {
return createBundleProcessor(
request.getProcessBundle().getProcessBundleDescriptorId(),
request.getProcessBundle());
} catch (IOException e) {
throw new RuntimeException(e);
}
});
try {
PTransformFunctionRegistry startFunctionRegistry = bundleProcessor.getStartFunctionRegistry();
PTransformFunctionRegistry finishFunctionRegistry =
bundleProcessor.getFinishFunctionRegistry();
ExecutionStateTracker stateTracker = bundleProcessor.getStateTracker();
try (HandleStateCallsForBundle beamFnStateClient = bundleProcessor.getBeamFnStateClient()) {
stateTracker.start(request.getInstructionId());
try {
// Already in reverse topological order so we don't need to do anything.
for (ThrowingRunnable startFunction : startFunctionRegistry.getFunctions()) {
LOG.debug("Starting function {}", startFunction);
startFunction.run();
}
if (request.getProcessBundle().hasElements()) {
boolean inputFinished =
bundleProcessor
.getInboundObserver()
.multiplexElements(request.getProcessBundle().getElements());
if (!inputFinished) {
throw new RuntimeException(
"Elements embedded in ProcessBundleRequest do not contain stream terminators for "
+ "all data and timer inputs. Unterminated endpoints: "
+ bundleProcessor.getInboundObserver().getUnfinishedEndpoints());
}
} else if (!bundleProcessor.getInboundEndpointApiServiceDescriptors().isEmpty()) {
BeamFnDataInboundObserver observer = bundleProcessor.getInboundObserver();
beamFnDataClient.registerReceiver(
request.getInstructionId(),
bundleProcessor.getInboundEndpointApiServiceDescriptors(),
observer);
observer.awaitCompletion();
beamFnDataClient.unregisterReceiver(
request.getInstructionId(),
bundleProcessor.getInboundEndpointApiServiceDescriptors());
}
// Need to reverse this since we want to call finish in topological order.
for (ThrowingRunnable finishFunction :
Lists.reverse(finishFunctionRegistry.getFunctions())) {
LOG.debug("Finishing function {}", finishFunction);
finishFunction.run();
}
// If bundleProcessor has not flushed any elements, embed them in response.
embedOutboundElementsIfApplicable(response, bundleProcessor);
// Add all checkpointed residuals to the response.
response.addAllResidualRoots(bundleProcessor.getSplitListener().getResidualRoots());
// Add all metrics to the response.
bundleProcessor.getProgressRequestLock().lock();
Map<String, ByteString> monitoringData = finalMonitoringData(bundleProcessor);
if (runnerAcceptsShortIds) {
response.putAllMonitoringData(monitoringData);
} else {
for (Map.Entry<String, ByteString> metric : monitoringData.entrySet()) {
response.addMonitoringInfos(
shortIds.get(metric.getKey()).toBuilder().setPayload(metric.getValue()));
}
}
if (!bundleProcessor.getBundleFinalizationCallbackRegistrations().isEmpty()) {
finalizeBundleHandler.registerCallbacks(
bundleProcessor.getInstructionId(),
ImmutableList.copyOf(bundleProcessor.getBundleFinalizationCallbackRegistrations()));
response.setRequiresFinalization(true);
}
} finally {
// We specifically deactivate state tracking while we are holding the progress request and
// sampling locks.
stateTracker.reset();
}
}
// Mark the bundle processor as re-usable.
bundleProcessorCache.release(
request.getProcessBundle().getProcessBundleDescriptorId(), bundleProcessor);
return BeamFnApi.InstructionResponse.newBuilder().setProcessBundle(response);
} catch (Exception e) {
// Make sure we clean-up from the active set of bundle processors.
bundleProcessorCache.discard(bundleProcessor);
throw e;
}
} | @Test
public void testCreatingPTransformExceptionsArePropagated() throws Exception {
BeamFnApi.ProcessBundleDescriptor processBundleDescriptor =
BeamFnApi.ProcessBundleDescriptor.newBuilder()
.putTransforms(
"2L",
RunnerApi.PTransform.newBuilder()
.setSpec(RunnerApi.FunctionSpec.newBuilder().setUrn(DATA_INPUT_URN).build())
.build())
.build();
Map<String, BeamFnApi.ProcessBundleDescriptor> fnApiRegistry =
ImmutableMap.of("1L", processBundleDescriptor);
ProcessBundleHandler handler =
new ProcessBundleHandler(
PipelineOptionsFactory.create(),
Collections.emptySet(),
fnApiRegistry::get,
beamFnDataClient,
null /* beamFnStateGrpcClientCache */,
null /* finalizeBundleHandler */,
new ShortIdMap(),
executionStateSampler,
ImmutableMap.of(
DATA_INPUT_URN,
(context) -> {
throw new IllegalStateException("TestException");
}),
Caches.noop(),
new BundleProcessorCache(),
null /* dataSampler */);
assertThrows(
"TestException",
IllegalStateException.class,
() ->
handler.processBundle(
BeamFnApi.InstructionRequest.newBuilder()
.setProcessBundle(
BeamFnApi.ProcessBundleRequest.newBuilder()
.setProcessBundleDescriptorId("1L"))
.build()));
} |
@Override
public void validateDictDataList(String dictType, Collection<String> values) {
if (CollUtil.isEmpty(values)) {
return;
}
Map<String, DictDataDO> dictDataMap = CollectionUtils.convertMap(
dictDataMapper.selectByDictTypeAndValues(dictType, values), DictDataDO::getValue);
// 校验
values.forEach(value -> {
DictDataDO dictData = dictDataMap.get(value);
if (dictData == null) {
throw exception(DICT_DATA_NOT_EXISTS);
}
if (!CommonStatusEnum.ENABLE.getStatus().equals(dictData.getStatus())) {
throw exception(DICT_DATA_NOT_ENABLE, dictData.getLabel());
}
});
} | @Test
public void testValidateDictDataList_notEnable() {
// mock 数据
DictDataDO dictDataDO = randomDictDataDO().setStatus(CommonStatusEnum.DISABLE.getStatus());
dictDataMapper.insert(dictDataDO);
// 准备参数
String dictType = dictDataDO.getDictType();
List<String> values = singletonList(dictDataDO.getValue());
// 调用, 并断言异常
assertServiceException(() -> dictDataService.validateDictDataList(dictType, values),
DICT_DATA_NOT_ENABLE, dictDataDO.getLabel());
} |
@Override
public int readUnsignedShortLE() {
return readShortLE() & 0xFFFF;
} | @Test
public void testReadUnsignedShortLEAfterRelease() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().readUnsignedShortLE();
}
});
} |
@SuppressWarnings("unchecked")
@Override
public void configure(final Map<String, ?> configs, final boolean isKey) {
final String windowedInnerClassSerdeConfig = (String) configs.get(StreamsConfig.WINDOWED_INNER_CLASS_SERDE);
Serde<T> windowInnerClassSerde = null;
if (windowedInnerClassSerdeConfig != null) {
try {
windowInnerClassSerde = Utils.newInstance(windowedInnerClassSerdeConfig, Serde.class);
} catch (final ClassNotFoundException e) {
throw new ConfigException(StreamsConfig.WINDOWED_INNER_CLASS_SERDE, windowedInnerClassSerdeConfig,
"Serde class " + windowedInnerClassSerdeConfig + " could not be found.");
}
}
if (inner != null && windowedInnerClassSerdeConfig != null) {
if (!inner.getClass().getName().equals(windowInnerClassSerde.serializer().getClass().getName())) {
throw new IllegalArgumentException("Inner class serializer set using constructor "
+ "(" + inner.getClass().getName() + ")" +
" is different from the one set in windowed.inner.class.serde config " +
"(" + windowInnerClassSerde.serializer().getClass().getName() + ").");
}
} else if (inner == null && windowedInnerClassSerdeConfig == null) {
throw new IllegalArgumentException("Inner class serializer should be set either via constructor " +
"or via the windowed.inner.class.serde config");
} else if (inner == null)
inner = windowInnerClassSerde.serializer();
} | @Test
public void shouldThrowErrorIfWindowedInnerClassSerialiserIsNotSet() {
final TimeWindowedSerializer<?> serializer = new TimeWindowedSerializer<>();
assertThrows(IllegalArgumentException.class, () -> serializer.configure(props, false));
} |
@Override
public ByteBuf discardReadBytes() {
if (readerIndex == 0) {
ensureAccessible();
return this;
}
if (readerIndex != writerIndex) {
setBytes(0, this, readerIndex, writerIndex - readerIndex);
writerIndex -= readerIndex;
adjustMarkers(readerIndex);
readerIndex = 0;
} else {
ensureAccessible();
adjustMarkers(readerIndex);
writerIndex = readerIndex = 0;
}
return this;
} | @Test
public void testDiscardReadBytes() {
buffer.writerIndex(0);
for (int i = 0; i < buffer.capacity(); i += 4) {
buffer.writeInt(i);
}
ByteBuf copy = copiedBuffer(buffer);
// Make sure there's no effect if called when readerIndex is 0.
buffer.readerIndex(CAPACITY / 4);
buffer.markReaderIndex();
buffer.writerIndex(CAPACITY / 3);
buffer.markWriterIndex();
buffer.readerIndex(0);
buffer.writerIndex(CAPACITY / 2);
buffer.discardReadBytes();
assertEquals(0, buffer.readerIndex());
assertEquals(CAPACITY / 2, buffer.writerIndex());
assertEquals(copy.slice(0, CAPACITY / 2), buffer.slice(0, CAPACITY / 2));
buffer.resetReaderIndex();
assertEquals(CAPACITY / 4, buffer.readerIndex());
buffer.resetWriterIndex();
assertEquals(CAPACITY / 3, buffer.writerIndex());
// Make sure bytes after writerIndex is not copied.
buffer.readerIndex(1);
buffer.writerIndex(CAPACITY / 2);
buffer.discardReadBytes();
assertEquals(0, buffer.readerIndex());
assertEquals(CAPACITY / 2 - 1, buffer.writerIndex());
assertEquals(copy.slice(1, CAPACITY / 2 - 1), buffer.slice(0, CAPACITY / 2 - 1));
if (discardReadBytesDoesNotMoveWritableBytes()) {
// If writable bytes were copied, the test should fail to avoid unnecessary memory bandwidth consumption.
assertFalse(copy.slice(CAPACITY / 2, CAPACITY / 2).equals(buffer.slice(CAPACITY / 2 - 1, CAPACITY / 2)));
} else {
assertEquals(copy.slice(CAPACITY / 2, CAPACITY / 2), buffer.slice(CAPACITY / 2 - 1, CAPACITY / 2));
}
// Marks also should be relocated.
buffer.resetReaderIndex();
assertEquals(CAPACITY / 4 - 1, buffer.readerIndex());
buffer.resetWriterIndex();
assertEquals(CAPACITY / 3 - 1, buffer.writerIndex());
copy.release();
} |
public static JsonAsserter with(String json) {
return new JsonAsserterImpl(JsonPath.parse(json).json());
} | @Test
public void has_path() throws Exception {
assertThrows(AssertionError.class, () -> with(JSON).assertNotDefined("$.store.bicycle[?(@.color == 'red' )]"));
} |
@Override
public void onPluginChanged(final List<PluginData> changed, final DataEventTypeEnum eventType) {
if (CollectionUtils.isEmpty(changed)) {
return;
}
this.updatePluginCache();
this.afterPluginChanged(changed, eventType);
} | @Test
public void testOnPluginChanged() {
List<PluginData> empty = Lists.newArrayList();
DataEventTypeEnum eventType = mock(DataEventTypeEnum.class);
listener.onPluginChanged(empty, eventType);
assertFalse(listener.getCache().containsKey(ConfigGroupEnum.PLUGIN.name()));
List<PluginData> pluginDatas = Lists.newArrayList(mock(PluginData.class));
listener.onPluginChanged(pluginDatas, eventType);
assertTrue(listener.getCache().containsKey(ConfigGroupEnum.PLUGIN.name()));
} |
@Override
public org.apache.kafka.streams.kstream.Transformer<KIn, VIn, Iterable<KeyValue<KOut, VOut>>> get() {
return new org.apache.kafka.streams.kstream.Transformer<KIn, VIn, Iterable<KeyValue<KOut, VOut>>>() {
private final org.apache.kafka.streams.kstream.Transformer<KIn, VIn, KeyValue<KOut, VOut>> transformer = transformerSupplier.get();
@Override
public void init(final ProcessorContext context) {
transformer.init(context);
}
@Override
public Iterable<KeyValue<KOut, VOut>> transform(final KIn key, final VIn value) {
final KeyValue<KOut, VOut> pair = transformer.transform(key, value);
if (pair != null) {
return Collections.singletonList(pair);
}
return Collections.emptyList();
}
@Override
public void close() {
transformer.close();
}
};
} | @Test
public void shouldCallTransformOfAdaptedTransformerAndReturnSingletonIterable() {
when(transformerSupplier.get()).thenReturn(transformer);
when(transformer.transform(key, value)).thenReturn(KeyValue.pair(0, 1));
final TransformerSupplierAdapter<String, String, Integer, Integer> adapter =
new TransformerSupplierAdapter<>(transformerSupplier);
final org.apache.kafka.streams.kstream.Transformer<String, String, Iterable<KeyValue<Integer, Integer>>> adaptedTransformer = adapter.get();
final Iterator<KeyValue<Integer, Integer>> iterator = adaptedTransformer.transform(key, value).iterator();
assertThat(iterator.hasNext(), equalTo(true));
iterator.next();
assertThat(iterator.hasNext(), equalTo(false));
} |
public static List<String> getStringList(String property, JsonNode node) {
Preconditions.checkArgument(node.has(property), "Cannot parse missing list: %s", property);
return ImmutableList.<String>builder()
.addAll(new JsonStringArrayIterator(property, node))
.build();
} | @Test
public void getStringList() throws JsonProcessingException {
assertThatThrownBy(() -> JsonUtil.getStringList("items", JsonUtil.mapper().readTree("{}")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse missing list: items");
assertThatThrownBy(
() -> JsonUtil.getStringList("items", JsonUtil.mapper().readTree("{\"items\": null}")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse JSON array from non-array value: items: null");
assertThatThrownBy(
() ->
JsonUtil.getStringList(
"items", JsonUtil.mapper().readTree("{\"items\": [\"23\", 45]}")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse string from non-text value in items: 45");
List<String> items = Arrays.asList("23", "45");
assertThat(
JsonUtil.getStringList(
"items", JsonUtil.mapper().readTree("{\"items\": [\"23\", \"45\"]}")))
.containsExactlyElementsOf(items);
String json =
JsonUtil.generate(
gen -> {
gen.writeStartObject();
JsonUtil.writeStringArray("items", items, gen);
gen.writeEndObject();
},
false);
assertThat(JsonUtil.getStringList("items", JsonUtil.mapper().readTree(json))).isEqualTo(items);
} |
public Long retryAfter() {
return retryAfter;
} | @Test
void createRetryableExceptionWithResponseAndResponseHeader() {
// given
Long retryAfter = 5000L;
Request request =
Request.create(Request.HttpMethod.GET, "/", Collections.emptyMap(), null, Util.UTF_8);
byte[] response = "response".getBytes(StandardCharsets.UTF_8);
Map<String, Collection<String>> responseHeader = new HashMap<>();
responseHeader.put("TEST_HEADER", Arrays.asList("TEST_CONTENT"));
// when
RetryableException retryableException =
new RetryableException(-1, null, null, retryAfter, request, response, responseHeader);
// then
assertThat(retryableException).isNotNull();
assertThat(retryableException.retryAfter()).isEqualTo(retryAfter);
assertThat(retryableException.contentUTF8()).isEqualTo(new String(response, UTF_8));
assertThat(retryableException.responseHeaders()).containsKey("TEST_HEADER");
assertThat(retryableException.responseHeaders().get("TEST_HEADER")).contains("TEST_CONTENT");
} |
@Override
public Map<String, Object> resolve(DynamicConfigEvent event) {
final Map<String, Object> result = new HashMap<>();
final Optional<Map<String, Object>> convert = yamlConverter.convert(event.getContent(), Map.class);
convert.ifPresent(stringObjectMap -> MapUtils.resolveNestMap(result, stringObjectMap, null));
return result;
} | @Test
public void resolve() {
final DynamicConfigEvent event = Mockito.mock(DynamicConfigEvent.class);
Mockito.when(event.getContent()).thenReturn("test: 'hello world'");
final DefaultConfigResolver defaultConfigResolver = new DefaultConfigResolver();
final Map<String, Object> resolve = defaultConfigResolver.resolve(event);
Assert.assertEquals(resolve.get("test"), "hello world");
} |
public boolean isEnrolled(final UUID accountUuid, final String experimentName) {
final Optional<DynamicExperimentEnrollmentConfiguration> maybeConfiguration = dynamicConfigurationManager
.getConfiguration().getExperimentEnrollmentConfiguration(experimentName);
return maybeConfiguration.map(config -> {
if (config.getUuidSelector().getUuids().contains(accountUuid)) {
final int r = random.nextInt(100);
return r < config.getUuidSelector().getUuidEnrollmentPercentage();
}
return isEnrolled(accountUuid, config.getEnrollmentPercentage(), experimentName);
}).orElse(false);
} | @Test
void testIsEnrolled_UuidExperiment() {
assertFalse(experimentEnrollmentManager.isEnrolled(account.getUuid(), UUID_EXPERIMENT_NAME));
assertFalse(
experimentEnrollmentManager.isEnrolled(account.getUuid(), UUID_EXPERIMENT_NAME + "-unrelated-experiment"));
when(uuidSelector.getUuids()).thenReturn(Set.of(ACCOUNT_UUID));
assertTrue(experimentEnrollmentManager.isEnrolled(account.getUuid(), UUID_EXPERIMENT_NAME));
when(uuidSelector.getUuids()).thenReturn(Collections.emptySet());
when(experimentEnrollmentConfiguration.getEnrollmentPercentage()).thenReturn(0);
assertFalse(experimentEnrollmentManager.isEnrolled(account.getUuid(), UUID_EXPERIMENT_NAME));
when(experimentEnrollmentConfiguration.getEnrollmentPercentage()).thenReturn(100);
assertTrue(experimentEnrollmentManager.isEnrolled(account.getUuid(), UUID_EXPERIMENT_NAME));
} |
@Override
public boolean userDefinedIndexMode(boolean enable) {
if (meters.isEmpty() && meterIdGenerators.isEmpty()) {
userDefinedIndexMode = enable;
} else {
log.warn("Unable to {} user defined index mode as store did" +
"already some allocations", enable ? "activate" : "deactivate");
}
return userDefinedIndexMode;
} | @Test
public void testInvalidEnableUserDefinedIndex() {
testStoreMeter();
assertFalse(meterStore.userDefinedIndexMode(true));
} |
public static Schema inferWiderSchema(List<Schema> schemas) {
if (schemas.isEmpty()) {
return null;
} else if (schemas.size() == 1) {
return schemas.get(0);
} else {
Schema outputSchema = null;
for (Schema schema : schemas) {
outputSchema = inferWiderSchema(outputSchema, schema);
}
return outputSchema;
}
} | @Test
public void testInferWiderSchema() {
// Test normal merges
Assertions.assertThat(
SchemaUtils.inferWiderSchema(
Schema.newBuilder()
.physicalColumn("Column1", DataTypes.INT())
.physicalColumn("Column2", DataTypes.DOUBLE())
.primaryKey("Column1")
.partitionKey("Column2")
.build(),
Schema.newBuilder()
.physicalColumn("Column1", DataTypes.BIGINT())
.physicalColumn("Column2", DataTypes.FLOAT())
.primaryKey("Column1")
.partitionKey("Column2")
.build()))
.isEqualTo(
Schema.newBuilder()
.physicalColumn("Column1", DataTypes.BIGINT())
.physicalColumn("Column2", DataTypes.DOUBLE())
.primaryKey("Column1")
.partitionKey("Column2")
.build());
// Test merging with incompatible types
Assertions.assertThatThrownBy(
() ->
SchemaUtils.inferWiderSchema(
Schema.newBuilder()
.physicalColumn("Column1", DataTypes.INT())
.physicalColumn("Column2", DataTypes.DOUBLE())
.primaryKey("Column1")
.partitionKey("Column2")
.build(),
Schema.newBuilder()
.physicalColumn("Column1", DataTypes.STRING())
.physicalColumn("Column2", DataTypes.STRING())
.primaryKey("Column1")
.partitionKey("Column2")
.build()))
.isExactlyInstanceOf(IllegalStateException.class);
// Test merging with incompatible column names
Assertions.assertThatThrownBy(
() ->
SchemaUtils.inferWiderSchema(
Schema.newBuilder()
.physicalColumn("Column1", DataTypes.INT())
.physicalColumn("Column2", DataTypes.DOUBLE())
.primaryKey("Column1")
.partitionKey("Column2")
.build(),
Schema.newBuilder()
.physicalColumn("NotColumn1", DataTypes.INT())
.physicalColumn("NotColumn2", DataTypes.DOUBLE())
.primaryKey("NotColumn1")
.partitionKey("NotColumn2")
.build()))
.isExactlyInstanceOf(IllegalStateException.class);
// Test merging with different column counts
Assertions.assertThatThrownBy(
() ->
SchemaUtils.inferWiderSchema(
Schema.newBuilder()
.physicalColumn("Column1", DataTypes.INT())
.physicalColumn("Column2", DataTypes.DOUBLE())
.physicalColumn("Column3", DataTypes.STRING())
.primaryKey("Column1")
.partitionKey("Column2")
.build(),
Schema.newBuilder()
.physicalColumn("NotColumn1", DataTypes.INT())
.physicalColumn("NotColumn2", DataTypes.DOUBLE())
.primaryKey("NotColumn1")
.partitionKey("NotColumn2")
.build()))
.isExactlyInstanceOf(IllegalStateException.class);
// Test merging with incompatible schema metadata
Assertions.assertThatThrownBy(
() ->
SchemaUtils.inferWiderSchema(
Schema.newBuilder()
.physicalColumn("Column1", DataTypes.INT())
.physicalColumn("Column2", DataTypes.DOUBLE())
.primaryKey("Column1")
.partitionKey("Column2")
.option("Key1", "Value1")
.build(),
Schema.newBuilder()
.physicalColumn("Column1", DataTypes.INT())
.physicalColumn("Column2", DataTypes.DOUBLE())
.primaryKey("Column2")
.partitionKey("Column1")
.option("Key2", "Value2")
.build()))
.isExactlyInstanceOf(IllegalStateException.class);
} |
@Udf(description = "Converts a string representation of a date in the given format"
+ " into the number of milliseconds since 1970-01-01 00:00:00 UTC/GMT."
+ " Single quotes in the timestamp format can be escaped with '',"
+ " for example: 'yyyy-MM-dd''T''HH:mm:ssX'."
+ " The system default time zone is used when no time zone is explicitly provided.")
public long stringToTimestamp(
@UdfParameter(
description = "The string representation of a date.") final String formattedTimestamp,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
// NB: We do not perform a null here preferring to throw an exception as
// there is no sentinel value for a "null" Date.
try {
final StringToTimestampParser timestampParser = parsers.get(formatPattern);
return timestampParser.parse(formattedTimestamp);
} catch (final ExecutionException | RuntimeException e) {
throw new KsqlFunctionException("Failed to parse timestamp '" + formattedTimestamp
+ "' with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
} | @Test
public void shouldSupportPSTTimeZone() {
// When:
final Object result = udf.stringToTimestamp("2018-08-15 10:10:43",
"yyyy-MM-dd HH:mm:ss", "America/Los_Angeles");
// Then:
assertThat(result, is(1534353043000L));
} |
protected List<T> getEntities(
@Nonnull final EntityProvider<T, ?> entities,
@Nullable final Pageable pageable,
@Nullable final Class<T> clazz,
@Nullable final Sort sort)
{
Objects.requireNonNull(entities);
Stream<? extends T> entityStream = entities
.stream()
.filter(this.criteria::evaluate);
final Sort sortToUse = this.staticSort.orElse(sort);
if(sortToUse != null)
{
entityStream = EntitySorter.sortEntitiesStream(clazz, sortToUse, entityStream);
}
entityStream = this.pageEntityStream(pageable, entityStream);
final List<T> result = this.copyEntities(entityStream);
if(LOG.isTraceEnabled())
{
LOG.trace("Found {} entries.", result.size());
}
return result;
} | @Test
void getEntities_NoCriteria_NoPageable_NoSortable_Null()
{
final PageableSortableCollectionQuerier<Customer> querier = new PageableSortableCollectionQuerier<>(
new DummyWorkingCopier<>(),
Criteria.createNoCriteria()
);
Assertions.assertThrows(NullPointerException.class, () -> querier.getEntities(null, Customer.class));
} |
@Override
public void run() {
// top-level command, do nothing
} | @Test
public void test_submit_with_JetBootstrap() throws IOException {
Path testJarWithJetBootstrap = Files.createTempFile("testjob-with-jet-bootstrap-", ".jar");
try (InputStream inputStream = HazelcastCommandLineTest.class.getResourceAsStream("testjob-with-jet-bootstrap.jar")) {
assert inputStream != null;
Files.copy(inputStream, testJarWithJetBootstrap, StandardCopyOption.REPLACE_EXISTING);
}
run("submit", testJarWithJetBootstrap.toString());
assertTrueEventually(() -> assertEquals(1, hz.getJet().getJobs().size()));
Job job = hz.getJet().getJobs().get(0);
assertThat(job).eventuallyHasStatus(JobStatus.RUNNING);
assertNull(job.getName());
IOUtil.deleteQuietly(testJarWithJetBootstrap.toFile());
} |
public SSLParametersConfiguration getParameters() {
if (parameters == null) {
parameters = new SSLParametersConfiguration();
}
return parameters;
} | @Test
public void testParameters() throws Exception {
assertNotNull(configuration.getParameters());
} |
public static int scan(final UnsafeBuffer termBuffer, final int termOffset, final int limitOffset)
{
int offset = termOffset;
while (offset < limitOffset)
{
final int frameLength = frameLengthVolatile(termBuffer, offset);
if (frameLength <= 0)
{
break;
}
final int alignedFrameLength = align(frameLength, FRAME_ALIGNMENT);
if (isPaddingFrame(termBuffer, offset))
{
if (termOffset == offset)
{
offset += alignedFrameLength;
}
break;
}
if (offset + alignedFrameLength > limitOffset)
{
break;
}
offset += alignedFrameLength;
}
return offset;
} | @Test
void shouldReadFirstMessage()
{
final int offset = 0;
final int limit = termBuffer.capacity();
final int messageLength = 50;
final int alignedMessageLength = BitUtil.align(messageLength, FRAME_ALIGNMENT);
when(termBuffer.getIntVolatile(lengthOffset(offset))).thenReturn(messageLength);
when(termBuffer.getShort(typeOffset(offset))).thenReturn((short)HDR_TYPE_DATA);
final int newOffset = TermBlockScanner.scan(termBuffer, offset, limit);
assertEquals(alignedMessageLength, newOffset);
} |
public Set<MapperConfig> load(InputStream inputStream) throws IOException {
final PrometheusMappingConfig config = ymlMapper.readValue(inputStream, PrometheusMappingConfig.class);
return config.metricMappingConfigs()
.stream()
.flatMap(this::mapMetric)
.collect(Collectors.toSet());
} | @Test
void metricMatchType() throws Exception {
final Map<String, ImmutableList<Serializable>> config = Collections.singletonMap("metric_mappings",
ImmutableList.of(
ImmutableMap.of(
"type", "metric_match",
"metric_name", "test1",
"match_pattern", "foo.bar"
)));
assertThat(configLoader.load(new ByteArrayInputStream(objectMapper.writeValueAsBytes(config))))
.containsExactlyInAnyOrder(new MapperConfig(
"foo.bar",
"gl_test1",
ImmutableMap.of("node", "5ca1ab1e-0000-4000-a000-000000000000")));
} |
public static String getUnresolvedSchemaName(final Schema schema) {
if (!isUnresolvedSchema(schema)) {
throw new IllegalArgumentException("Not a unresolved schema: " + schema);
}
return schema.getProp(UR_SCHEMA_ATTR);
} | @Test(expected = IllegalArgumentException.class)
public void testGetUnresolvedSchemaNameError() {
Schema s = SchemaBuilder.fixed("a").size(10);
SchemaResolver.getUnresolvedSchemaName(s);
} |
@Override
public synchronized boolean onActivity() {
if (!firstEventReceived) {
firstEventReceived = true;
return true;
}
return false;
} | @Test
public void testOnActivity_SubsequentCalls() {
assertTrue(strategy.onActivity(), "First call of onActivity() should return true.");
assertFalse(strategy.onActivity(), "Subsequent calls of onActivity() should return false.");
} |
public static Timer deadlineTimer(final Time time, final long deadlineMs) {
long diff = Math.max(0, deadlineMs - time.milliseconds());
return time.timer(diff);
} | @Test
public void testDeadlineTimer() {
long deadlineMs = time.milliseconds() + DEFAULT_TIMEOUT_MS;
Timer timer = TimedRequestState.deadlineTimer(time, deadlineMs);
assertEquals(DEFAULT_TIMEOUT_MS, timer.remainingMs());
timer.sleep(DEFAULT_TIMEOUT_MS);
assertEquals(0, timer.remainingMs());
} |
public static Status unblock(
final UnsafeBuffer logMetaDataBuffer,
final UnsafeBuffer termBuffer,
final int blockedOffset,
final int tailOffset,
final int termId)
{
Status status = NO_ACTION;
int frameLength = frameLengthVolatile(termBuffer, blockedOffset);
if (frameLength < 0)
{
resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, -frameLength);
status = UNBLOCKED;
}
else if (0 == frameLength)
{
int currentOffset = blockedOffset + FRAME_ALIGNMENT;
while (currentOffset < tailOffset)
{
frameLength = frameLengthVolatile(termBuffer, currentOffset);
if (frameLength != 0)
{
if (scanBackToConfirmZeroed(termBuffer, currentOffset, blockedOffset))
{
final int length = currentOffset - blockedOffset;
resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, length);
status = UNBLOCKED;
}
break;
}
currentOffset += FRAME_ALIGNMENT;
}
if (currentOffset == termBuffer.capacity())
{
if (0 == frameLengthVolatile(termBuffer, blockedOffset))
{
final int length = currentOffset - blockedOffset;
resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, length);
status = UNBLOCKED_TO_END;
}
}
}
return status;
} | @Test
void shouldPatchToEndOfPartition()
{
final int messageLength = HEADER_LENGTH * 4;
final int termOffset = TERM_BUFFER_CAPACITY - messageLength;
final int tailOffset = TERM_BUFFER_CAPACITY;
when(mockTermBuffer.getIntVolatile(termOffset)).thenReturn(0);
assertEquals(
UNBLOCKED_TO_END,
TermUnblocker.unblock(mockLogMetaDataBuffer, mockTermBuffer, termOffset, tailOffset, TERM_ID));
final InOrder inOrder = inOrder(mockTermBuffer);
inOrder.verify(mockTermBuffer).putShort(typeOffset(termOffset), (short)HDR_TYPE_PAD, LITTLE_ENDIAN);
inOrder.verify(mockTermBuffer).putInt(termOffsetOffset(termOffset), termOffset, LITTLE_ENDIAN);
inOrder.verify(mockTermBuffer).putIntOrdered(termOffset, messageLength);
} |
protected String cleanExpression(String rawExpression) {
if (rawExpression != null && rawExpression.trim().startsWith(MVEL_ESCAPE_SYMBOL)) {
return rawExpression.replaceFirst(MVEL_ESCAPE_SYMBOL, "");
}
Optional<JsonNode> optionalJSONNode = JsonUtils.convertFromStringToJSONNode(rawExpression);
if (optionalJSONNode.isPresent()) {
JsonNode jsonNode = optionalJSONNode.get();
if (jsonNode.isTextual() && jsonNode.asText() != null && jsonNode.asText().trim().startsWith(MVEL_ESCAPE_SYMBOL)) {
String expression = jsonNode.asText();
return expression.replaceFirst(MVEL_ESCAPE_SYMBOL, "");
}
}
throw new IllegalArgumentException(MALFORMED_MVEL_EXPRESSION + "'" + rawExpression + "'");
} | @Test
public void cleanExpression() {
assertThat(evaluator.cleanExpression(MVEL_ESCAPE_SYMBOL + "test")).isEqualTo("test");
assertThat(evaluator.cleanExpression(MVEL_ESCAPE_SYMBOL + " test")).isEqualTo(" test");
assertThat(evaluator.cleanExpression(MVEL_ESCAPE_SYMBOL + " " + MVEL_ESCAPE_SYMBOL + " test")).isEqualTo(" " + MVEL_ESCAPE_SYMBOL + " test");
assertThat(evaluator.cleanExpression(new TextNode(MVEL_ESCAPE_SYMBOL + "test").toString())).isEqualTo("test");
assertThat(evaluator.cleanExpression(new TextNode(MVEL_ESCAPE_SYMBOL + " test").toString())).isEqualTo(" test");
assertThat(evaluator.cleanExpression(new TextNode(MVEL_ESCAPE_SYMBOL + " " + MVEL_ESCAPE_SYMBOL + " test").toString())).isEqualTo(" " + MVEL_ESCAPE_SYMBOL + " test");
assertThatThrownBy(() -> evaluator.cleanExpression("test"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageStartingWith("Malformed MVEL expression");
assertThatThrownBy(() -> evaluator.cleanExpression(new TextNode("test").toString()))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageStartingWith("Malformed MVEL expression");
} |
@VisibleForTesting
String normalizeArchitecture(String architecture) {
// Create mapping based on https://docs.docker.com/engine/install/#supported-platforms
if (architecture.equals("x86_64")) {
return "amd64";
} else if (architecture.equals("aarch64")) {
return "arm64";
}
return architecture;
} | @Test
public void testNormalizeArchitecture_x86_64() {
assertThat(stepsRunner.normalizeArchitecture("x86_64")).isEqualTo("amd64");
} |
@Override
public void execute(String commandName, BufferedReader reader, BufferedWriter writer)
throws Py4JException, IOException {
String returnCommand = null;
String subCommand = safeReadLine(reader, false);
if (subCommand.equals(FIELD_GET_SUB_COMMAND_NAME)) {
returnCommand = getField(reader);
} else if (subCommand.equals(FIELD_SET_SUB_COMMAND_NAME)) {
returnCommand = setField(reader);
} else {
returnCommand = Protocol.getOutputErrorCommand("Unknown Field SubCommand Name: " + subCommand);
}
logger.finest("Returning command: " + returnCommand);
writer.write(returnCommand);
writer.flush();
} | @Test
public void testPrimitive() {
String inputCommand = "g\n" + target + "\nfield10\ne\n";
try {
command.execute("f", new BufferedReader(new StringReader(inputCommand)), writer);
assertEquals("!yi10\n", sWriter.toString());
} catch (Exception e) {
e.printStackTrace();
fail();
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.