focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public ConsumerRecords<K, V> poll(Duration timeout) {
return poll(delegate.poll(timeout));
} | @Test void should_add_new_trace_headers_if_b3_missing() {
consumer.addRecord(consumerRecord);
Consumer<String, String> tracingConsumer = kafkaTracing.consumer(consumer);
ConsumerRecords<String, String> poll = tracingConsumer.poll(10);
assertThat(poll)
.extracting(ConsumerRecord::headers)
.flatExtracting(KafkaTest::lastHeaders)
.extracting(Map.Entry::getKey)
.containsOnly("b3");
MutableSpan consumerSpan = spans.get(0);
assertThat(consumerSpan.kind()).isEqualTo(CONSUMER);
assertThat(consumerSpan.parentId()).isNull();
} |
@Override
public Publisher<Exchange> to(String uri, Object data) {
String streamName = requestedUriToStream.computeIfAbsent(uri, camelUri -> {
try {
String uuid = context.getUuidGenerator().generateUuid();
RouteBuilder.addRoutes(context, rb -> rb.from("reactive-streams:" + uuid).to(camelUri));
return uuid;
} catch (Exception e) {
throw new IllegalStateException("Unable to create requested reactive stream from direct URI: " + uri, e);
}
});
return toStream(streamName, data);
} | @Test
public void testToFunctionWithExchange() throws Exception {
context.start();
AtomicInteger value = new AtomicInteger();
CountDownLatch latch = new CountDownLatch(1);
Function<Object, Publisher<Exchange>> fun = crs.to("bean:hello");
Flowable.just(1, 2, 3).flatMap(fun::apply).map(Exchange::getMessage).map(e -> e.getBody(String.class))
.doOnNext(res -> assertEquals("Hello " + value.incrementAndGet(), res)).doOnNext(res -> latch.countDown())
.subscribe();
assertTrue(latch.await(2, TimeUnit.SECONDS));
} |
public String getNextId() {
return String.format("%s-%d-%d", prefix, generatorInstanceId, counter.getAndIncrement());
} | @Test
public void invalidZnode() throws Exception {
store.put("/my/test/invalid", "invalid-number".getBytes(), Optional.of(-1L));
DistributedIdGenerator gen = new DistributedIdGenerator(coordinationService, "/my/test/invalid", "p");
// It should not get exception if content is there
assertEquals(gen.getNextId(), "p-0-0");
} |
@Udf
public <T> List<T> except(
@UdfParameter(description = "Array of values") final List<T> left,
@UdfParameter(description = "Array of exceptions") final List<T> right) {
if (left == null || right == null) {
return null;
}
final Set<T> distinctRightValues = new HashSet<>(right);
final Set<T> distinctLeftValues = new LinkedHashSet<>(left);
return distinctLeftValues
.stream()
.filter(e -> !distinctRightValues.contains(e))
.collect(Collectors.toList());
} | @Test
public void shouldReturnNullForNullExceptionArray() {
final List<String> input2 = Arrays.asList("foo");
final List<String> result = udf.except(null, input2);
assertThat(result, is(nullValue()));
} |
@Udf
public <T> List<T> concat(
@UdfParameter(description = "First array of values") final List<T> left,
@UdfParameter(description = "Second array of values") final List<T> right) {
if (left == null && right == null) {
return null;
}
final int leftSize = left != null ? left.size() : 0;
final int rightSize = right != null ? right.size() : 0;
final List<T> result = new ArrayList<>(leftSize + rightSize);
if (left != null) {
result.addAll(left);
}
if (right != null) {
result.addAll(right);
}
return result;
} | @Test
public void shouldReturnNullForNullLeftInput() {
final List<String> input1 = Arrays.asList("foo");
final List<String> result = udf.concat(null, input1);
assertThat(result, is(Arrays.asList("foo")));
} |
@NotNull @Override
public INode enrich(@NotNull INode node) {
if (node instanceof AES aes) {
return enrich(aes);
}
return node;
} | @Test
void oid() {
DetectionLocation testDetectionLocation =
new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "SSL");
final AES aes = new AES(256, new ECB(testDetectionLocation), testDetectionLocation);
this.logBefore(aes);
final AESEnricher aesEnricher = new AESEnricher();
final INode enriched = aesEnricher.enrich(aes);
this.logAfter(enriched);
assertThat(enriched.is(BlockCipher.class)).isTrue();
assertThat(enriched).isInstanceOf(AES.class);
final AES enrichedAES = (AES) enriched;
assertThat(enrichedAES.hasChildOfType(Oid.class)).isPresent();
assertThat(enrichedAES.hasChildOfType(Oid.class).get().asString())
.isEqualTo("2.16.840.1.101.3.4.1.41");
} |
@Override
public ConnectorPageSource createPageSource(
ConnectorTransactionHandle transaction,
ConnectorSession session,
ConnectorSplit split,
ConnectorTableLayoutHandle layout,
List<ColumnHandle> columns,
SplitContext splitContext,
RuntimeStats runtimeStats)
{
HiveTableLayoutHandle hiveLayout = (HiveTableLayoutHandle) layout;
List<HiveColumnHandle> selectedColumns = columns.stream()
.map(HiveColumnHandle.class::cast)
.collect(toList());
HiveSplit hiveSplit = (HiveSplit) split;
Path path = new Path(hiveSplit.getFileSplit().getPath());
Configuration configuration = hdfsEnvironment.getConfiguration(
new HdfsContext(
session,
hiveSplit.getDatabase(),
hiveSplit.getTable(),
hiveLayout.getTablePath(),
false),
path);
Optional<EncryptionInformation> encryptionInformation = hiveSplit.getEncryptionInformation();
CacheQuota cacheQuota = generateCacheQuota(hiveSplit);
HiveFileContext fileContext = new HiveFileContext(
splitContext.isCacheable(),
cacheQuota,
hiveSplit.getFileSplit().getExtraFileInfo().map(BinaryExtraHiveFileInfo::new),
OptionalLong.of(hiveSplit.getFileSplit().getFileSize()),
OptionalLong.of(hiveSplit.getFileSplit().getStart()),
OptionalLong.of(hiveSplit.getFileSplit().getLength()),
hiveSplit.getFileSplit().getFileModifiedTime(),
HiveSessionProperties.isVerboseRuntimeStatsEnabled(session),
runtimeStats);
if (columns.stream().anyMatch(columnHandle -> ((HiveColumnHandle) columnHandle).getColumnType().equals(AGGREGATED))) {
checkArgument(columns.stream().allMatch(columnHandle -> ((HiveColumnHandle) columnHandle).getColumnType().equals(AGGREGATED)), "Not all columns are of 'AGGREGATED' type");
if (hiveLayout.isFooterStatsUnreliable()) {
throw new PrestoException(HIVE_UNSUPPORTED_FORMAT, format("Partial aggregation pushdown is not supported when footer stats are unreliable. " +
"Table %s has file %s with unreliable footer stats. " +
"Set session property [catalog-name].pushdown_partial_aggregations_into_scan=false and execute query again.",
hiveLayout.getSchemaTableName(),
hiveSplit.getFileSplit().getPath()));
}
return createAggregatedPageSource(aggregatedPageSourceFactories, configuration, session, hiveSplit, hiveLayout, selectedColumns, fileContext, encryptionInformation);
}
if (hiveLayout.isPushdownFilterEnabled()) {
Optional<ConnectorPageSource> selectivePageSource = createSelectivePageSource(
selectivePageSourceFactories,
configuration,
session,
hiveSplit,
hiveLayout,
selectedColumns,
hiveStorageTimeZone,
typeManager,
optimizedRowExpressionCache,
splitContext,
fileContext,
encryptionInformation);
if (selectivePageSource.isPresent()) {
return selectivePageSource.get();
}
}
TupleDomain<HiveColumnHandle> effectivePredicate = hiveLayout.getDomainPredicate()
.transform(Subfield::getRootName)
.transform(hiveLayout.getPredicateColumns()::get);
if (shouldSkipBucket(hiveLayout, hiveSplit, splitContext, isLegacyTimestampBucketing(session))) {
return new HiveEmptySplitPageSource();
}
if (shouldSkipPartition(typeManager, hiveLayout, hiveStorageTimeZone, hiveSplit, splitContext)) {
return new HiveEmptySplitPageSource();
}
Optional<ConnectorPageSource> pageSource = createHivePageSource(
cursorProviders,
pageSourceFactories,
configuration,
session,
hiveSplit.getFileSplit(),
hiveSplit.getTableBucketNumber(),
hiveSplit.getStorage(),
splitContext.getDynamicFilterPredicate().map(filter -> filter.transform(handle -> (HiveColumnHandle) handle).intersect(effectivePredicate)).orElse(effectivePredicate),
selectedColumns,
hiveLayout.getPredicateColumns(),
hiveSplit.getPartitionKeys(),
hiveStorageTimeZone,
typeManager,
hiveLayout.getSchemaTableName(),
hiveLayout.getPartitionColumns().stream().map(HiveColumnHandle.class::cast).collect(toList()),
hiveLayout.getDataColumns(),
hiveLayout.getTableParameters(),
hiveSplit.getPartitionDataColumnCount(),
hiveSplit.getTableToPartitionMapping(),
hiveSplit.getBucketConversion(),
hiveSplit.isS3SelectPushdownEnabled(),
fileContext,
hiveLayout.getRemainingPredicate(),
hiveLayout.isPushdownFilterEnabled(),
rowExpressionService,
encryptionInformation,
hiveSplit.getRowIdPartitionComponent());
if (pageSource.isPresent()) {
return pageSource.get();
}
throw new IllegalStateException("Could not find a file reader for split " + hiveSplit);
} | @Test
public void testAggregatedPageSource()
{
HivePageSourceProvider pageSourceProvider = createPageSourceProvider();
ConnectorPageSource pageSource = pageSourceProvider.createPageSource(
new HiveTransactionHandle(),
SESSION,
getHiveSplit(ORC),
getHiveTableLayout(true, true, false),
ImmutableList.of(LONG_AGGREGATED_COLUMN),
new SplitContext(false),
new RuntimeStats());
assertTrue(pageSource instanceof MockOrcAggregatedPageSource, format("pageSource %s", pageSource.getClass().getSimpleName()));
} |
@Override
public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas, DescribeReplicaLogDirsOptions options) {
final Map<TopicPartitionReplica, KafkaFutureImpl<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>> futures = new HashMap<>(replicas.size());
for (TopicPartitionReplica replica : replicas) {
futures.put(replica, new KafkaFutureImpl<>());
}
Map<Integer, DescribeLogDirsRequestData> partitionsByBroker = new HashMap<>();
for (TopicPartitionReplica replica: replicas) {
DescribeLogDirsRequestData requestData = partitionsByBroker.computeIfAbsent(replica.brokerId(),
brokerId -> new DescribeLogDirsRequestData());
DescribableLogDirTopic describableLogDirTopic = requestData.topics().find(replica.topic());
if (describableLogDirTopic == null) {
List<Integer> partitions = new ArrayList<>();
partitions.add(replica.partition());
describableLogDirTopic = new DescribableLogDirTopic().setTopic(replica.topic())
.setPartitions(partitions);
requestData.topics().add(describableLogDirTopic);
} else {
describableLogDirTopic.partitions().add(replica.partition());
}
}
final long now = time.milliseconds();
for (Map.Entry<Integer, DescribeLogDirsRequestData> entry: partitionsByBroker.entrySet()) {
final int brokerId = entry.getKey();
final DescribeLogDirsRequestData topicPartitions = entry.getValue();
final Map<TopicPartition, ReplicaLogDirInfo> replicaDirInfoByPartition = new HashMap<>();
for (DescribableLogDirTopic topicPartition: topicPartitions.topics()) {
for (Integer partitionId : topicPartition.partitions()) {
replicaDirInfoByPartition.put(new TopicPartition(topicPartition.topic(), partitionId), new ReplicaLogDirInfo());
}
}
runnable.call(new Call("describeReplicaLogDirs", calcDeadlineMs(now, options.timeoutMs()),
new ConstantNodeIdProvider(brokerId)) {
@Override
public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) {
// Query selected partitions in all log directories
return new DescribeLogDirsRequest.Builder(topicPartitions);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse;
for (Map.Entry<String, LogDirDescription> responseEntry: logDirDescriptions(response).entrySet()) {
String logDir = responseEntry.getKey();
LogDirDescription logDirInfo = responseEntry.getValue();
// No replica info will be provided if the log directory is offline
if (logDirInfo.error() instanceof KafkaStorageException)
continue;
if (logDirInfo.error() != null)
handleFailure(new IllegalStateException(
"The error " + logDirInfo.error().getClass().getName() + " for log directory " + logDir + " in the response from broker " + brokerId + " is illegal"));
for (Map.Entry<TopicPartition, ReplicaInfo> replicaInfoEntry: logDirInfo.replicaInfos().entrySet()) {
TopicPartition tp = replicaInfoEntry.getKey();
ReplicaInfo replicaInfo = replicaInfoEntry.getValue();
ReplicaLogDirInfo replicaLogDirInfo = replicaDirInfoByPartition.get(tp);
if (replicaLogDirInfo == null) {
log.warn("Server response from broker {} mentioned unknown partition {}", brokerId, tp);
} else if (replicaInfo.isFuture()) {
replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(replicaLogDirInfo.getCurrentReplicaLogDir(),
replicaLogDirInfo.getCurrentReplicaOffsetLag(),
logDir,
replicaInfo.offsetLag()));
} else {
replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(logDir,
replicaInfo.offsetLag(),
replicaLogDirInfo.getFutureReplicaLogDir(),
replicaLogDirInfo.getFutureReplicaOffsetLag()));
}
}
}
for (Map.Entry<TopicPartition, ReplicaLogDirInfo> entry: replicaDirInfoByPartition.entrySet()) {
TopicPartition tp = entry.getKey();
KafkaFutureImpl<ReplicaLogDirInfo> future = futures.get(new TopicPartitionReplica(tp.topic(), tp.partition(), brokerId));
future.complete(entry.getValue());
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
}
return new DescribeReplicaLogDirsResult(new HashMap<>(futures));
} | @Test
public void testDescribeReplicaLogDirsUnexpected() throws ExecutionException, InterruptedException {
TopicPartitionReplica expected = new TopicPartitionReplica("topic", 12, 1);
TopicPartitionReplica unexpected = new TopicPartitionReplica("topic", 12, 2);
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
String broker1log0 = "/var/data/kafka0";
String broker1log1 = "/var/data/kafka1";
int broker1Log0PartitionSize = 987654321;
int broker1Log0OffsetLag = 24;
int broker1Log1PartitionSize = 123456789;
int broker1Log1OffsetLag = 4321;
env.kafkaClient().prepareResponseFrom(
new DescribeLogDirsResponse(
new DescribeLogDirsResponseData().setResults(asList(
prepareDescribeLogDirsResult(expected, broker1log0, broker1Log0PartitionSize, broker1Log0OffsetLag, false),
prepareDescribeLogDirsResult(unexpected, broker1log1, broker1Log1PartitionSize, broker1Log1OffsetLag, true)))),
env.cluster().nodeById(expected.brokerId()));
DescribeReplicaLogDirsResult result = env.adminClient().describeReplicaLogDirs(singletonList(expected));
Map<TopicPartitionReplica, KafkaFuture<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>> values = result.values();
assertEquals(TestUtils.toSet(singletonList(expected)), values.keySet());
assertNotNull(values.get(expected));
assertEquals(broker1log0, values.get(expected).get().getCurrentReplicaLogDir());
assertEquals(broker1Log0OffsetLag, values.get(expected).get().getCurrentReplicaOffsetLag());
assertEquals(broker1log1, values.get(expected).get().getFutureReplicaLogDir());
assertEquals(broker1Log1OffsetLag, values.get(expected).get().getFutureReplicaOffsetLag());
}
} |
@Override
public KTable<K, VOut> aggregate(final Initializer<VOut> initializer,
final Materialized<K, VOut, KeyValueStore<Bytes, byte[]>> materialized) {
return aggregate(initializer, NamedInternal.empty(), materialized);
} | @Test
public void shouldNotHaveNullNamedOnAggregate() {
assertThrows(NullPointerException.class, () -> cogroupedStream.aggregate(STRING_INITIALIZER, (Named) null));
} |
public synchronized void scheduleRequest(DataSize maxResponseSize)
{
if (closed || (future != null) || scheduled) {
return;
}
scheduled = true;
// start before scheduling to include error delay
backoff.startRequest();
long delayNanos = backoff.getBackoffDelayNanos();
scheduler.schedule(() -> {
try {
initiateRequest(maxResponseSize);
}
catch (Throwable t) {
// should not happen, but be safe and fail the operator
clientCallback.clientFailed(PageBufferClient.this, t);
}
}, delayNanos, NANOSECONDS);
lastUpdate = DateTime.now();
requestsScheduled.incrementAndGet();
} | @Test
public void testHappyPath()
throws Exception
{
Page expectedPage = new Page(100);
DataSize expectedMaxSize = new DataSize(11, Unit.MEGABYTE);
MockExchangeRequestProcessor processor = new MockExchangeRequestProcessor(expectedMaxSize);
CyclicBarrier requestComplete = new CyclicBarrier(2);
TestingClientCallback callback = new TestingClientCallback(requestComplete);
URI location = URI.create("http://localhost:8080");
PageBufferClient client = new PageBufferClient(
new HttpRpcShuffleClient(new TestingHttpClient(processor, scheduler), location),
new Duration(1, TimeUnit.MINUTES),
true,
location,
callback,
scheduler,
pageBufferClientCallbackExecutor);
assertStatus(client, location, "queued", 0, 0, 0, 0, "not scheduled");
// fetch a page and verify
processor.addPage(location, expectedPage);
callback.resetStats();
client.scheduleRequest(expectedMaxSize);
requestComplete.await(10, TimeUnit.SECONDS);
assertEquals(callback.getPages().size(), 1);
assertPageEquals(expectedPage, callback.getPages().get(0));
assertEquals(callback.getCompletedRequests(), 1);
assertEquals(callback.getFinishedBuffers(), 0);
assertStatus(client, location, "queued", 1, 1, 1, 0, "not scheduled");
// fetch no data and verify
callback.resetStats();
client.scheduleRequest(expectedMaxSize);
requestComplete.await(10, TimeUnit.SECONDS);
assertEquals(callback.getPages().size(), 0);
assertEquals(callback.getCompletedRequests(), 1);
assertEquals(callback.getFinishedBuffers(), 0);
assertStatus(client, location, "queued", 1, 2, 2, 0, "not scheduled");
// fetch two more pages and verify
processor.addPage(location, expectedPage);
processor.addPage(location, expectedPage);
callback.resetStats();
client.scheduleRequest(expectedMaxSize);
requestComplete.await(10, TimeUnit.SECONDS);
assertEquals(callback.getPages().size(), 2);
assertPageEquals(expectedPage, callback.getPages().get(0));
assertPageEquals(expectedPage, callback.getPages().get(1));
assertEquals(callback.getCompletedRequests(), 1);
assertEquals(callback.getFinishedBuffers(), 0);
assertEquals(callback.getFailedBuffers(), 0);
callback.resetStats();
assertStatus(client, location, "queued", 3, 3, 3, 0, "not scheduled");
// finish and verify
callback.resetStats();
processor.setComplete(location);
client.scheduleRequest(expectedMaxSize);
requestComplete.await(10, TimeUnit.SECONDS);
// get the buffer complete signal
assertEquals(callback.getPages().size(), 0);
assertEquals(callback.getCompletedRequests(), 1);
// schedule the delete call to the buffer
callback.resetStats();
client.scheduleRequest(expectedMaxSize);
requestComplete.await(10, TimeUnit.SECONDS);
assertEquals(callback.getFinishedBuffers(), 1);
assertEquals(callback.getPages().size(), 0);
assertEquals(callback.getCompletedRequests(), 0);
assertEquals(callback.getFailedBuffers(), 0);
assertStatus(client, location, "closed", 3, 5, 5, 0, "not scheduled");
} |
@Override
public int readMediumLE() {
int value = readUnsignedMediumLE();
if ((value & 0x800000) != 0) {
value |= 0xff000000;
}
return value;
} | @Test
public void testReadMediumLEAfterRelease() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().readMediumLE();
}
});
} |
public CruiseConfig deserializeConfig(String content) throws Exception {
String md5 = md5Hex(content);
Element element = parseInputStream(new ByteArrayInputStream(content.getBytes()));
LOGGER.debug("[Config Save] Updating config cache with new XML");
CruiseConfig configForEdit = classParser(element, BasicCruiseConfig.class, configCache, new GoCipher(), registry, new ConfigReferenceElements()).parse();
setMd5(configForEdit, md5);
configForEdit.setOrigins(new FileConfigOrigin());
return configForEdit;
} | @Test
void shouldLoadExecBuilder() throws Exception {
CruiseConfig cruiseConfig = xmlLoader.deserializeConfig(CONFIG_WITH_NANT_AND_EXEC_BUILDER);
JobConfig plan = cruiseConfig.jobConfigByName("pipeline1", "mingle", "cardlist", true);
ExecTask builder = (ExecTask) plan.tasks().findFirstByType(ExecTask.class);
assertThat(builder).isEqualTo(new ExecTask("ls", "-la", "workdir"));
builder = (ExecTask) plan.tasks().get(2);
assertThat(builder).isEqualTo(new ExecTask("ls", "", (String) null));
} |
@GetMapping(value = "/{appId}/{clusterName}/{namespace:.+}")
public ApolloConfig queryConfig(@PathVariable String appId, @PathVariable String clusterName,
@PathVariable String namespace,
@RequestParam(value = "dataCenter", required = false) String dataCenter,
@RequestParam(value = "releaseKey", defaultValue = "-1") String clientSideReleaseKey,
@RequestParam(value = "ip", required = false) String clientIp,
@RequestParam(value = "label", required = false) String clientLabel,
@RequestParam(value = "messages", required = false) String messagesAsString,
HttpServletRequest request, HttpServletResponse response) throws IOException {
String originalNamespace = namespace;
//strip out .properties suffix
namespace = namespaceUtil.filterNamespaceName(namespace);
//fix the character case issue, such as FX.apollo <-> fx.apollo
namespace = namespaceUtil.normalizeNamespace(appId, namespace);
if (Strings.isNullOrEmpty(clientIp)) {
clientIp = WebUtils.tryToGetClientIp(request);
}
ApolloNotificationMessages clientMessages = transformMessages(messagesAsString);
List<Release> releases = Lists.newLinkedList();
String appClusterNameLoaded = clusterName;
if (!ConfigConsts.NO_APPID_PLACEHOLDER.equalsIgnoreCase(appId)) {
Release currentAppRelease = configService.loadConfig(appId, clientIp, clientLabel, appId, clusterName, namespace,
dataCenter, clientMessages);
if (currentAppRelease != null) {
releases.add(currentAppRelease);
//we have cluster search process, so the cluster name might be overridden
appClusterNameLoaded = currentAppRelease.getClusterName();
}
}
//if namespace does not belong to this appId, should check if there is a public configuration
if (!namespaceBelongsToAppId(appId, namespace)) {
Release publicRelease = this.findPublicConfig(appId, clientIp, clientLabel, clusterName, namespace,
dataCenter, clientMessages);
if (Objects.nonNull(publicRelease)) {
releases.add(publicRelease);
}
}
if (releases.isEmpty()) {
response.sendError(HttpServletResponse.SC_NOT_FOUND,
String.format(
"Could not load configurations with appId: %s, clusterName: %s, namespace: %s",
appId, clusterName, originalNamespace));
Tracer.logEvent("Apollo.Config.NotFound",
assembleKey(appId, clusterName, originalNamespace, dataCenter));
return null;
}
auditReleases(appId, clusterName, dataCenter, clientIp, releases);
String mergedReleaseKey = releases.stream().map(Release::getReleaseKey)
.collect(Collectors.joining(ConfigConsts.CLUSTER_NAMESPACE_SEPARATOR));
if (mergedReleaseKey.equals(clientSideReleaseKey)) {
// Client side configuration is the same with server side, return 304
response.setStatus(HttpServletResponse.SC_NOT_MODIFIED);
Tracer.logEvent("Apollo.Config.NotModified",
assembleKey(appId, appClusterNameLoaded, originalNamespace, dataCenter));
return null;
}
ApolloConfig apolloConfig = new ApolloConfig(appId, appClusterNameLoaded, originalNamespace,
mergedReleaseKey);
apolloConfig.setConfigurations(mergeReleaseConfigurations(releases));
Tracer.logEvent("Apollo.Config.Found", assembleKey(appId, appClusterNameLoaded,
originalNamespace, dataCenter));
return apolloConfig;
} | @Test
public void testQueryConfigWithPubicNamespaceAndNoAppOverride() throws Exception {
String someClientSideReleaseKey = "1";
String someServerSideReleaseKey = "2";
HttpServletResponse someResponse = mock(HttpServletResponse.class);
String somePublicAppId = "somePublicAppId";
String somePublicClusterName = "somePublicClusterName";
AppNamespace somePublicAppNamespace =
assemblePublicAppNamespace(somePublicAppId, somePublicNamespaceName);
when(configService.loadConfig(someAppId, someClientIp, someClientLabel, someAppId, someClusterName, somePublicNamespaceName,
someDataCenter, someNotificationMessages)).thenReturn(null);
when(appNamespaceService.findPublicNamespaceByName(somePublicNamespaceName))
.thenReturn(somePublicAppNamespace);
when(configService.loadConfig(someAppId, someClientIp, someClientLabel, somePublicAppId, someClusterName, somePublicNamespaceName,
someDataCenter, someNotificationMessages)).thenReturn(somePublicRelease);
when(somePublicRelease.getReleaseKey()).thenReturn(someServerSideReleaseKey);
when(somePublicRelease.getAppId()).thenReturn(somePublicAppId);
when(somePublicRelease.getClusterName()).thenReturn(somePublicClusterName);
when(somePublicRelease.getNamespaceName()).thenReturn(somePublicNamespaceName);
ApolloConfig result = configController
.queryConfig(someAppId, someClusterName, somePublicNamespaceName, someDataCenter,
someClientSideReleaseKey, someClientIp, someClientLabel, someMessagesAsString, someRequest, someResponse);
assertEquals(someServerSideReleaseKey, result.getReleaseKey());
assertEquals(someAppId, result.getAppId());
assertEquals(someClusterName, result.getCluster());
assertEquals(somePublicNamespaceName, result.getNamespaceName());
assertEquals("foo", result.getConfigurations().get("apollo.public.bar"));
verify(instanceConfigAuditUtil, times(1)).audit(someAppId, someClusterName, someDataCenter,
someClientIp, somePublicAppId, somePublicClusterName, somePublicNamespaceName, someServerSideReleaseKey);
} |
public MonetaryFormat digits(char zeroDigit) {
if (zeroDigit == this.zeroDigit)
return this;
else
return new MonetaryFormat(negativeSign, positiveSign, zeroDigit, decimalMark, minDecimals, decimalGroups,
shift, roundingMode, codes, codeSeparator, codePrefixed);
} | @Test
public void testDigits() {
assertEquals("١٢.٣٤٥٦٧٨٩٠", NO_CODE.digits('\u0660').format(Coin.valueOf(1234567890l)).toString());
} |
public static boolean subjectExists(
final SchemaRegistryClient srClient,
final String subject
) {
return getLatestSchema(srClient, subject).isPresent();
} | @Test
public void shouldReturnFalseOnSubjectMissing() throws Exception {
// Given:
when(schemaRegistryClient.getLatestSchemaMetadata("bar-value")).thenThrow(
new RestClientException("foo", 404, SchemaRegistryUtil.SUBJECT_NOT_FOUND_ERROR_CODE)
);
// When:
final boolean subjectExists = SchemaRegistryUtil.subjectExists(schemaRegistryClient, "bar-value");
// Then:
assertFalse("Expected subject to not exist", subjectExists);
} |
@Override
public void onSwipeUp() {} | @Test
public void testOnSwipeUp() {
mUnderTest.onSwipeUp();
Mockito.verifyZeroInteractions(mMockParentListener, mMockKeyboardDismissAction);
} |
public SimpleRabbitListenerContainerFactory decorateSimpleRabbitListenerContainerFactory(
SimpleRabbitListenerContainerFactory factory
) {
return decorateRabbitListenerContainerFactory(factory);
} | @Test void decorateSimpleRabbitListenerContainerFactory_adds_by_default() {
SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory();
assertThat(rabbitTracing.decorateSimpleRabbitListenerContainerFactory(factory).getAdviceChain())
.allMatch(advice -> advice instanceof TracingRabbitListenerAdvice);
} |
@Override
public PathAttributes toAttributes(final DavResource resource) {
final PathAttributes attributes = new PathAttributes();
final Map<QName, String> properties = resource.getCustomPropsNS();
if(null != properties && properties.containsKey(DAVTimestampFeature.LAST_MODIFIED_CUSTOM_NAMESPACE)) {
final String value = properties.get(DAVTimestampFeature.LAST_MODIFIED_CUSTOM_NAMESPACE);
if(StringUtils.isNotBlank(value)) {
try {
if(properties.containsKey(DAVTimestampFeature.LAST_MODIFIED_SERVER_CUSTOM_NAMESPACE)) {
final String svalue = properties.get(DAVTimestampFeature.LAST_MODIFIED_SERVER_CUSTOM_NAMESPACE);
if(StringUtils.isNotBlank(svalue)) {
final Date server = rfc1123.parse(svalue);
if(server.equals(resource.getModified())) {
// file not touched with a different client
attributes.setModificationDate(
rfc1123.parse(value).getTime());
}
else {
// file touched with a different client, use default modified date from server
if(resource.getModified() != null) {
attributes.setModificationDate(resource.getModified().getTime());
}
}
}
else {
if(log.isDebugEnabled()) {
log.debug(String.format("Missing value for property %s", DAVTimestampFeature.LAST_MODIFIED_SERVER_CUSTOM_NAMESPACE));
}
if(resource.getModified() != null) {
attributes.setModificationDate(resource.getModified().getTime());
}
}
}
else {
attributes.setModificationDate(
rfc1123.parse(value).getTime());
}
}
catch(InvalidDateException e) {
log.warn(String.format("Failure parsing property %s with value %s", DAVTimestampFeature.LAST_MODIFIED_CUSTOM_NAMESPACE, value));
if(resource.getModified() != null) {
attributes.setModificationDate(resource.getModified().getTime());
}
}
}
else {
if(log.isDebugEnabled()) {
log.debug(String.format("Missing value for property %s", DAVTimestampFeature.LAST_MODIFIED_CUSTOM_NAMESPACE));
}
if(resource.getModified() != null) {
attributes.setModificationDate(resource.getModified().getTime());
}
}
// Validate value with fallback to server side modified date
if(attributes.getModificationDate() == 0) {
if(log.isDebugEnabled()) {
log.debug(String.format("Invalid value for property %s", DAVTimestampFeature.LAST_MODIFIED_CUSTOM_NAMESPACE));
}
if(resource.getModified() != null) {
attributes.setModificationDate(resource.getModified().getTime());
}
}
}
else if(resource.getModified() != null) {
attributes.setModificationDate(resource.getModified().getTime());
}
if(resource.getCreation() != null) {
attributes.setCreationDate(resource.getCreation().getTime());
}
if(resource.getContentLength() != null) {
attributes.setSize(resource.getContentLength());
}
if(StringUtils.isNotBlank(resource.getEtag())) {
attributes.setETag(resource.getEtag());
}
if(StringUtils.isNotBlank(resource.getDisplayName())) {
attributes.setDisplayname(resource.getDisplayName());
}
attributes.setLockId(resource.getLockToken());
return attributes;
} | @Test
public void testCustomModified_NotSet() {
final DAVAttributesFinderFeature f = new DAVAttributesFinderFeature(null);
final DavResource mock = mock(DavResource.class);
Map<QName, String> map = new HashMap<>();
final Date modified = new DateTime("2018-11-01T15:31:57Z").toDate();
when(mock.getModified()).thenReturn(modified);
when(mock.getCustomPropsNS()).thenReturn(map);
final PathAttributes attrs = f.toAttributes(mock);
assertEquals(modified.getTime(), attrs.getModificationDate());
} |
@VisibleForTesting
static Object convertAvroField(Object avroValue, Schema schema) {
if (avroValue == null) {
return null;
}
switch (schema.getType()) {
case NULL:
case INT:
case LONG:
case DOUBLE:
case FLOAT:
case BOOLEAN:
return avroValue;
case ENUM:
case STRING:
return avroValue.toString(); // can be a String or org.apache.avro.util.Utf8
case UNION:
for (Schema s : schema.getTypes()) {
if (s.getType() == Schema.Type.NULL) {
continue;
}
return convertAvroField(avroValue, s);
}
throw new IllegalArgumentException("Found UNION schema but it doesn't contain any type");
case ARRAY:
case BYTES:
case FIXED:
case RECORD:
case MAP:
default:
throw new UnsupportedOperationException("Unsupported avro schema type=" + schema.getType()
+ " for value field schema " + schema.getName());
}
} | @Test(expectedExceptions = UnsupportedOperationException.class,
expectedExceptionsMessageRegExp = "Unsupported avro schema type.*")
public void testNotSupportedAvroTypesMap() {
BaseJdbcAutoSchemaSink.convertAvroField(new Object(), createFieldAndGetSchema((builder) ->
builder.name("field").type().map().values().stringType().noDefault()));
} |
public static final int getTrimTypeByDesc( String tt ) {
if ( tt == null ) {
return 0;
}
for ( int i = 0; i < trimTypeDesc.length; i++ ) {
if ( trimTypeDesc[i].equalsIgnoreCase( tt ) ) {
return i;
}
}
// If this fails, try to match using the code.
return getTrimTypeByCode( tt );
} | @Test
public void testGetTrimTypeByDesc() {
assertEquals( ValueMetaBase.getTrimTypeByDesc( BaseMessages.getString( PKG, "ValueMeta.TrimType.None" ) ),
ValueMetaInterface.TRIM_TYPE_NONE );
assertEquals( ValueMetaBase.getTrimTypeByDesc( BaseMessages.getString( PKG, "ValueMeta.TrimType.Left" ) ),
ValueMetaInterface.TRIM_TYPE_LEFT );
assertEquals( ValueMetaBase.getTrimTypeByDesc( BaseMessages.getString( PKG, "ValueMeta.TrimType.Right" ) ),
ValueMetaInterface.TRIM_TYPE_RIGHT );
assertEquals( ValueMetaBase.getTrimTypeByDesc( BaseMessages.getString( PKG, "ValueMeta.TrimType.Both" ) ),
ValueMetaInterface.TRIM_TYPE_BOTH );
assertEquals( ValueMetaBase.getTrimTypeByDesc( null ), ValueMetaInterface.TRIM_TYPE_NONE );
assertEquals( ValueMetaBase.getTrimTypeByDesc( "" ), ValueMetaInterface.TRIM_TYPE_NONE );
assertEquals( ValueMetaBase.getTrimTypeByDesc( "fake" ), ValueMetaInterface.TRIM_TYPE_NONE );
} |
public String getClientReturnId(String sessionId) {
Optional<OpenIdSession> session = openIdRepository.findById(sessionId);
if (session.isEmpty()) return null;
OpenIdSession openIdSession = session.get();
var returnUrl = openIdSession.getRedirectUri() + "?state=" + openIdSession.getState();
if (!"success".equals(openIdSession.getAuthenticationState())) {
return returnUrl + "&error=CANCELLED";
}
return returnUrl + "&code=" + openIdSession.getCode();
} | @Test
void getNoClientReturnIdTest() {
OpenIdSession openIdSession = new OpenIdSession();
openIdSession.setAuthenticationState("success");
when(httpServletRequest.getSession()).thenReturn(httpSession);
when(httpSession.getId()).thenReturn(null);
when(openIdRepository.findById(anyString())).thenReturn(Optional.of(openIdSession));
String response = openIdService.getClientReturnId("sessionId");
assertEquals("null?state=null&code=null", response);
} |
public static DataPermission get() {
return DATA_PERMISSIONS.get().peekLast();
} | @Test
public void testGet() {
// mock 方法
DataPermission dataPermission01 = mock(DataPermission.class);
DataPermissionContextHolder.add(dataPermission01);
DataPermission dataPermission02 = mock(DataPermission.class);
DataPermissionContextHolder.add(dataPermission02);
// 调用
DataPermission result = DataPermissionContextHolder.get();
// 断言
assertSame(result, dataPermission02);
} |
@Override
public int get(PageId pageId, int pageOffset, int bytesToRead, ReadTargetBuffer target,
boolean isTemporary) throws IOException, PageNotFoundException {
Callable<Integer> callable = () ->
mPageStore.get(pageId, pageOffset, bytesToRead, target, isTemporary);
try {
return mTimeLimter.callWithTimeout(callable, mTimeoutMs, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
// Task got cancelled by others, interrupt the current thread
// and then throw a runtime ex to make the higher level stop.
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} catch (TimeoutException e) {
Metrics.STORE_GET_TIMEOUT.inc();
throw new IOException(e);
} catch (RejectedExecutionException e) {
Metrics.STORE_THREADS_REJECTED.inc();
throw new IOException(e);
} catch (Throwable t) {
Throwables.propagateIfPossible(t, IOException.class, PageNotFoundException.class);
throw new IOException(t);
}
} | @Test
public void getTimeout() throws Exception {
mPageStore.setGetHanging(true);
try {
mTimeBoundPageStore.get(PAGE_ID, 0, PAGE.length, new ByteArrayTargetBuffer(mBuf, 0));
fail();
} catch (IOException e) {
assertTrue(e.getCause() instanceof TimeoutException);
}
} |
static double estimatePixelCount(final Image image, final double widthOverHeight) {
if (image.getHeight() == HEIGHT_UNKNOWN) {
if (image.getWidth() == WIDTH_UNKNOWN) {
// images whose size is completely unknown will be in their own subgroups, so
// any one of them will do, hence returning the same value for all of them
return 0;
} else {
return image.getWidth() * image.getWidth() / widthOverHeight;
}
} else if (image.getWidth() == WIDTH_UNKNOWN) {
return image.getHeight() * image.getHeight() * widthOverHeight;
} else {
return image.getHeight() * image.getWidth();
}
} | @Test
public void testEstimatePixelCountAllUnknown() {
assertEquals(0.0, estimatePixelCount(img(HEIGHT_UNKNOWN, WIDTH_UNKNOWN), 1.0 ), 0.0);
assertEquals(0.0, estimatePixelCount(img(HEIGHT_UNKNOWN, WIDTH_UNKNOWN), 12.0 ), 0.0);
assertEquals(0.0, estimatePixelCount(img(HEIGHT_UNKNOWN, WIDTH_UNKNOWN), 0.1 ), 0.0);
assertEquals(0.0, estimatePixelCount(img(HEIGHT_UNKNOWN, WIDTH_UNKNOWN), 16.0/9.0), 0.0);
} |
@Override
public Repository getRepository() {
//Repository may be null if executing remotely in Pentaho Server
Repository repository = super.getRepository();
return repository != null ? repository : getTransMeta().getRepository();
} | @Test
public void getRepositoryNotNullTest() {
metaInject.setRepository( repository );
//If repository is set in the base step (Local Execution) TransMeta will not be required to get the repository
metaInject.getRepository();
verify( metaInject, times( 0 ) ).getTransMeta();
} |
protected String getUniqueKey(KeyTypeEnum keyType, String... params) {
if (keyType == KeyTypeEnum.PATH) {
return getFilePathKey(params);
}
return getIdentifierKey(params);
} | @Test
void getUniqueKey() {
String uniqueKey = baseServiceMetadataIdentifier.getUniqueKey(KeyTypeEnum.UNIQUE_KEY, "appName");
Assertions.assertEquals(uniqueKey, "BaseServiceMetadataIdentifierTest:1.0.0:test:provider:appName");
String uniqueKey2 = baseServiceMetadataIdentifier.getUniqueKey(KeyTypeEnum.PATH, "appName");
Assertions.assertEquals(uniqueKey2, "metadata/BaseServiceMetadataIdentifierTest/1.0.0/test/provider/appName");
} |
public TopicStatsImpl add(TopicStats ts) {
TopicStatsImpl stats = (TopicStatsImpl) ts;
this.count++;
this.msgRateIn += stats.msgRateIn;
this.msgThroughputIn += stats.msgThroughputIn;
this.msgRateOut += stats.msgRateOut;
this.msgThroughputOut += stats.msgThroughputOut;
this.bytesInCounter += stats.bytesInCounter;
this.msgInCounter += stats.msgInCounter;
this.bytesOutCounter += stats.bytesOutCounter;
this.msgOutCounter += stats.msgOutCounter;
this.waitingPublishers += stats.waitingPublishers;
double newAverageMsgSize = (this.averageMsgSize * (this.count - 1) + stats.averageMsgSize) / this.count;
this.averageMsgSize = newAverageMsgSize;
this.storageSize += stats.storageSize;
this.backlogSize += stats.backlogSize;
this.publishRateLimitedTimes += stats.publishRateLimitedTimes;
this.offloadedStorageSize += stats.offloadedStorageSize;
this.nonContiguousDeletedMessagesRanges += stats.nonContiguousDeletedMessagesRanges;
this.nonContiguousDeletedMessagesRangesSerializedSize += stats.nonContiguousDeletedMessagesRangesSerializedSize;
this.delayedMessageIndexSizeInBytes += stats.delayedMessageIndexSizeInBytes;
this.ongoingTxnCount = stats.ongoingTxnCount;
this.abortedTxnCount = stats.abortedTxnCount;
this.committedTxnCount = stats.committedTxnCount;
this.backlogQuotaLimitTime = stats.backlogQuotaLimitTime;
this.backlogQuotaLimitSize = stats.backlogQuotaLimitSize;
if (stats.oldestBacklogMessageAgeSeconds > this.oldestBacklogMessageAgeSeconds) {
this.oldestBacklogMessageAgeSeconds = stats.oldestBacklogMessageAgeSeconds;
this.oldestBacklogMessageSubscriptionName = stats.oldestBacklogMessageSubscriptionName;
}
stats.bucketDelayedIndexStats.forEach((k, v) -> {
TopicMetricBean topicMetricBean =
this.bucketDelayedIndexStats.computeIfAbsent(k, __ -> new TopicMetricBean());
topicMetricBean.name = v.name;
topicMetricBean.labelsAndValues = v.labelsAndValues;
topicMetricBean.value += v.value;
});
List<? extends PublisherStats> publisherStats = stats.getPublishers();
for (int index = 0; index < publisherStats.size(); index++) {
PublisherStats s = publisherStats.get(index);
if (s.isSupportsPartialProducer() && s.getProducerName() != null) {
this.publishersMap.computeIfAbsent(s.getProducerName(), key -> {
final PublisherStatsImpl newStats = new PublisherStatsImpl();
newStats.setSupportsPartialProducer(true);
newStats.setProducerName(s.getProducerName());
return newStats;
}).add((PublisherStatsImpl) s);
} else {
// Add a publisher stat entry to this.publishers
// if this.publishers.size() is smaller than
// the input stats.publishers.size().
// Here, index == this.publishers.size() means
// this.publishers.size() is smaller than the input stats.publishers.size()
if (index == this.publishers.size()) {
PublisherStatsImpl newStats = new PublisherStatsImpl();
newStats.setSupportsPartialProducer(false);
this.publishers.add(newStats);
}
this.publishers.get(index)
.add((PublisherStatsImpl) s);
}
}
for (Map.Entry<String, SubscriptionStatsImpl> entry : stats.subscriptions.entrySet()) {
SubscriptionStatsImpl subscriptionStats =
this.subscriptions.computeIfAbsent(entry.getKey(), k -> new SubscriptionStatsImpl());
subscriptionStats.add(entry.getValue());
}
for (Map.Entry<String, ReplicatorStatsImpl> entry : stats.replication.entrySet()) {
ReplicatorStatsImpl replStats =
this.replication.computeIfAbsent(entry.getKey(), k -> {
ReplicatorStatsImpl r = new ReplicatorStatsImpl();
r.setConnected(true);
return r;
});
replStats.add(entry.getValue());
}
if (earliestMsgPublishTimeInBacklogs != 0 && ((TopicStatsImpl) ts).earliestMsgPublishTimeInBacklogs != 0) {
earliestMsgPublishTimeInBacklogs = Math.min(
earliestMsgPublishTimeInBacklogs,
((TopicStatsImpl) ts).earliestMsgPublishTimeInBacklogs
);
} else {
earliestMsgPublishTimeInBacklogs = Math.max(
earliestMsgPublishTimeInBacklogs,
((TopicStatsImpl) ts).earliestMsgPublishTimeInBacklogs
);
}
return this;
} | @Test
public void testAdd_EarliestMsgPublishTimeInBacklogs_Zero() {
TopicStatsImpl stats1 = new TopicStatsImpl();
stats1.earliestMsgPublishTimeInBacklogs = 0L;
TopicStatsImpl stats2 = new TopicStatsImpl();
stats2.earliestMsgPublishTimeInBacklogs = 0L;
TopicStatsImpl aggregate = stats1.add(stats2);
assertEquals(aggregate.earliestMsgPublishTimeInBacklogs, 0L);
} |
@Override
public void lock() {
try {
lockInterruptibly(-1, null);
} catch (InterruptedException e) {
throw new IllegalStateException();
}
} | @Test
public void testConcurrencyLoop_MultiInstance() throws InterruptedException {
final int iterations = 100;
final AtomicInteger lockedCounter = new AtomicInteger();
testMultiInstanceConcurrency(16, r -> {
for (int i = 0; i < iterations; i++) {
r.getSpinLock("testConcurrency_MultiInstance1").lock();
try {
Thread.sleep(10);
} catch (InterruptedException e) {
e.printStackTrace();
}
lockedCounter.incrementAndGet();
r.getSpinLock("testConcurrency_MultiInstance1").unlock();
}
});
Assertions.assertEquals(16 * iterations, lockedCounter.get());
} |
public static <K, V>
PTransform<PCollection<KV<K, V>>, PCollection<KV<K, ValueInSingleWindow<V>>>>
windowsInValue() {
return new WindowInValue<>();
} | @Test
@Category(NeedsRunner.class)
public void windowsInValueSucceeds() {
PCollection<KV<String, Integer>> timestamped =
pipeline
.apply(Create.of(KV.of("foo", 0), KV.of("foo", 1), KV.of("bar", 2), KV.of("baz", 3)))
.apply(TIMESTAMP_FROM_V);
PCollection<KV<String, ValueInSingleWindow<Integer>>> reified =
timestamped.apply(Reify.windowsInValue());
PAssert.that(reified)
.containsInAnyOrder(
KV.of(
"foo",
ValueInSingleWindow.of(
0, new Instant(0), GlobalWindow.INSTANCE, PaneInfo.NO_FIRING)),
KV.of(
"foo",
ValueInSingleWindow.of(
1, new Instant(1), GlobalWindow.INSTANCE, PaneInfo.NO_FIRING)),
KV.of(
"bar",
ValueInSingleWindow.of(
2, new Instant(2), GlobalWindow.INSTANCE, PaneInfo.NO_FIRING)),
KV.of(
"baz",
ValueInSingleWindow.of(
3, new Instant(3), GlobalWindow.INSTANCE, PaneInfo.NO_FIRING)));
pipeline.run();
} |
static void removeAllFromManagers(Iterable<DoFnLifecycleManager> managers) throws Exception {
Collection<Exception> thrown = new ArrayList<>();
for (DoFnLifecycleManager manager : managers) {
thrown.addAll(manager.removeAll());
}
if (!thrown.isEmpty()) {
Exception overallException = new Exception("Exceptions thrown while tearing down DoFns");
for (Exception e : thrown) {
overallException.addSuppressed(e);
}
throw overallException;
}
} | @Test
public void removeAllWhenManagersThrowSuppressesAndThrows() throws Exception {
PipelineOptions options = PipelineOptionsFactory.create();
DoFnLifecycleManager first = DoFnLifecycleManager.of(new ThrowsInCleanupFn("foo"), options);
DoFnLifecycleManager second = DoFnLifecycleManager.of(new ThrowsInCleanupFn("bar"), options);
DoFnLifecycleManager third = DoFnLifecycleManager.of(new ThrowsInCleanupFn("baz"), options);
first.get();
second.get();
third.get();
final Collection<Matcher<? super Throwable>> suppressions = new ArrayList<>();
suppressions.add(
allOf(
instanceOf(UserCodeException.class),
new CausedByMatcher(new ThrowableMessageMatcher("foo"))));
suppressions.add(
allOf(
instanceOf(UserCodeException.class),
new CausedByMatcher(new ThrowableMessageMatcher("bar"))));
suppressions.add(
allOf(
instanceOf(UserCodeException.class),
new CausedByMatcher(new ThrowableMessageMatcher("baz"))));
thrown.expect(
new BaseMatcher<Exception>() {
@Override
public void describeTo(Description description) {
description
.appendText("Exception suppressing ")
.appendList("[", ", ", "]", suppressions);
}
@Override
public boolean matches(Object item) {
if (!(item instanceof Exception)) {
return false;
}
Exception that = (Exception) item;
return Matchers.containsInAnyOrder(suppressions)
.matches(ImmutableList.copyOf(that.getSuppressed()));
}
});
DoFnLifecycleManagers.removeAllFromManagers(ImmutableList.of(first, second, third));
} |
public Iterator<Optional<Page>> process(SqlFunctionProperties properties, DriverYieldSignal yieldSignal, LocalMemoryContext memoryContext, Page page)
{
WorkProcessor<Page> processor = createWorkProcessor(properties, yieldSignal, memoryContext, page);
return processor.yieldingIterator();
} | @Test
public void testProjectEmptyPage()
{
PageProcessor pageProcessor = new PageProcessor(Optional.of(new SelectAllFilter()), ImmutableList.of(createInputPageProjectionWithOutputs(0, BIGINT, 0)));
Page inputPage = new Page(createLongSequenceBlock(0, 0));
LocalMemoryContext memoryContext = newSimpleAggregatedMemoryContext().newLocalMemoryContext(PageProcessor.class.getSimpleName());
Iterator<Optional<Page>> output = pageProcessor.process(SESSION.getSqlFunctionProperties(), new DriverYieldSignal(), memoryContext, inputPage);
assertEquals(memoryContext.getBytes(), 0);
// output should be one page containing no columns (only a count)
List<Optional<Page>> outputPages = ImmutableList.copyOf(output);
assertEquals(outputPages.size(), 0);
} |
@SuppressWarnings("argument")
static Status runSqlLine(
String[] args,
@Nullable InputStream inputStream,
@Nullable OutputStream outputStream,
@Nullable OutputStream errorStream)
throws IOException {
String[] modifiedArgs = checkConnectionArgs(args);
SqlLine sqlLine = new SqlLine();
if (outputStream != null) {
sqlLine.setOutputStream(new PrintStream(outputStream, false, StandardCharsets.UTF_8.name()));
}
if (errorStream != null) {
sqlLine.setErrorStream(new PrintStream(errorStream, false, StandardCharsets.UTF_8.name()));
}
return sqlLine.begin(modifiedArgs, inputStream, true);
} | @Test
public void testSqlLine_GroupBy() throws Exception {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
String[] args =
buildArgs(
"CREATE EXTERNAL TABLE table_test (col_a VARCHAR, col_b VARCHAR) TYPE 'test';",
"INSERT INTO table_test SELECT '3', 'foo';",
"INSERT INTO table_test SELECT '3', 'bar';",
"INSERT INTO table_test SELECT '4', 'foo';",
"SELECT col_a, count(*) FROM table_test GROUP BY col_a;");
BeamSqlLine.runSqlLine(args, null, byteArrayOutputStream, null);
List<List<String>> lines = toLines(byteArrayOutputStream);
assertThat(
Arrays.asList(Arrays.asList("3", "2"), Arrays.asList("4", "1")),
everyItem(is(oneOf(lines.toArray()))));
} |
protected File getOutputFile(final String path, final String baseFileName) throws IOException {
makeDir(path);
final String now = new SimpleDateFormat("yyyy-MM-dd_HH-mm-ss").format(new Date());
final String fileName = baseFileName + "." + now;
final File file = Paths.get(path, fileName).toFile();
if (!file.exists() && !file.createNewFile()) {
throw new IOException("Fail to create file: " + file);
}
return file;
} | @Test
public void testGetOutputFileWithPath() throws IOException {
final String path = "abc";
final File f = getOutputFile(path, "test2.log");
assertTrue(f.exists());
FileUtils.forceDelete(new File(path));
} |
public synchronized Stream updateStreamState(String streamId, Stream.State state) {
LOG.info("Updating {}'s state to {} in project {}.", streamId, state.name(), projectId);
try {
Stream.Builder streamBuilder =
Stream.newBuilder()
.setName(StreamName.format(projectId, location, streamId))
.setState(state);
FieldMask.Builder fieldMaskBuilder = FieldMask.newBuilder().addPaths(FIELD_STATE);
UpdateStreamRequest request =
UpdateStreamRequest.newBuilder()
.setStream(streamBuilder)
.setUpdateMask(fieldMaskBuilder)
.build();
Stream reference = datastreamClient.updateStreamAsync(request).get();
LOG.info(
"Successfully updated {}'s state to {} in project {}.",
streamId,
state.name(),
projectId);
return reference;
} catch (InterruptedException | ExecutionException e) {
throw new DatastreamResourceManagerException("Failed to update stream. ", e);
}
} | @Test
public void testUpdateStreamStateExecutionExceptionShouldFail()
throws ExecutionException, InterruptedException {
when(datastreamClient.updateStreamAsync(any(UpdateStreamRequest.class)).get())
.thenThrow(ExecutionException.class);
DatastreamResourceManagerException exception =
assertThrows(
DatastreamResourceManagerException.class,
() -> testManager.updateStreamState(STREAM_ID, State.RUNNING));
assertThat(exception).hasMessageThat().contains("Failed to update stream.");
} |
public boolean accept(final T t) {
checkContext();
if (isComplete() || hasSentComplete()) {
throw new IllegalStateException("Cannot call accept after complete is called");
}
if (!isCancelled() && !isFailed()) {
if (getDemand() == 0) {
buffer.add(t);
} else {
doOnNext(t);
}
}
return buffer.size() >= bufferMaxSize;
} | @Test
public void shouldAcceptBuffered() throws Exception {
publisher = new BufferedPublisher<>(context, 5);
AsyncAssert asyncAssert = new AsyncAssert();
for (int i = 0; i < 10; i++) {
String record = expectedValue(i);
final int index = i;
execOnContextAndWait(() -> {
boolean bufferFull = getBufferedPublisher().accept(record);
asyncAssert.assertAsync(bufferFull, equalTo(index >= 5));
});
}
} |
void checkSupportedCipherSuites() {
if (getSupportedCipherSuites() == null) {
setSupportedCipherSuites(Collections.singletonList(HTTP2_DEFAULT_CIPHER));
} else if (!getSupportedCipherSuites().contains(HTTP2_DEFAULT_CIPHER)) {
throw new IllegalArgumentException("HTTP/2 server configuration must include cipher: " + HTTP2_DEFAULT_CIPHER);
}
} | @Test
void testCustomCiphersAreSupported() {
http2ConnectorFactory.setSupportedCipherSuites(Arrays.asList("TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"));
http2ConnectorFactory.checkSupportedCipherSuites();
assertThat(http2ConnectorFactory.getSupportedCipherSuites()).containsExactly(
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256");
} |
@Override
public Object fromBody(TypedInput body, Type type) throws ConversionException {
try (BufferedReader reader = new BufferedReader(new InputStreamReader(body.in()))) {
String json = reader.readLine();
log.debug("Converting response from influxDb: {}", json);
Map result = getResultObject(json);
List<Map> seriesList = (List<Map>) result.get("series");
if (CollectionUtils.isEmpty(seriesList)) {
log.warn("Received no data from Influxdb.");
return null;
}
Map series = seriesList.get(0);
List<String> seriesColumns = (List<String>) series.get("columns");
List<List> seriesValues = (List<List>) series.get("values");
List<InfluxDbResult> influxDbResultsList = new ArrayList<InfluxDbResult>(seriesValues.size());
// TODO(joerajeev): if returning tags (other than the field names) we will need to skip tags
// from this loop,
// and to extract and set the tag values to the influxDb result.
for (int i = 1;
i < seriesColumns.size();
i++) { // Starting from index 1 to skip 'time' column
String id = seriesColumns.get(i);
long firstTimeMillis = extractTimeInMillis(seriesValues, 0);
long stepMillis = calculateStep(seriesValues, firstTimeMillis);
List<Double> values = new ArrayList<>(seriesValues.size());
for (List<Object> valueRow : seriesValues) {
if (valueRow.get(i) != null) {
String val = valueRow.get(i).toString();
values.add(Double.valueOf(val));
}
}
influxDbResultsList.add(new InfluxDbResult(id, firstTimeMillis, stepMillis, null, values));
}
log.debug("Converted response: {} ", influxDbResultsList);
return influxDbResultsList;
} catch (IOException e) {
e.printStackTrace();
}
return null;
} | @Test
public void deserializeWithFloatValues() throws Exception {
List<InfluxDbResult> results = setupWithFloats();
TypedInput input = new TypedByteArray(MIME_TYPE, EXAMPLE_WITH_FLOATS.getBytes());
List<InfluxDbResult> result =
(List<InfluxDbResult>) influxDbResponseConverter.fromBody(input, List.class);
assertThat(result).isEqualTo(results);
} |
public void isAssignableTo(Class<?> clazz) {
if (!clazz.isAssignableFrom(checkNotNull(actual))) {
failWithActual("expected to be assignable to", clazz.getName());
}
} | @Test
public void testIsAssignableTo_reversed() {
expectFailureWhenTestingThat(Object.class).isAssignableTo(String.class);
assertFailureValue("expected to be assignable to", "java.lang.String");
} |
@VisibleForTesting
boolean isThreadDumpCaptured() {
return isThreadDumpCaptured;
} | @Test(timeout=60000)
public void testThreadDumpCaptureAfterNNStateChange() throws Exception {
startCluster();
MockNameNodeResourceChecker mockResourceChecker =
new MockNameNodeResourceChecker(conf);
mockResourceChecker.setResourcesAvailable(false);
cluster.getNameNode(0).getNamesystem()
.setNNResourceChecker(mockResourceChecker);
waitForHAState(0, HAServiceState.STANDBY);
while (!thr1.zkfc.isThreadDumpCaptured()) {
Thread.sleep(1000);
}
} |
protected static boolean isValidValue(Field f, Object value) {
if (value != null) {
return true;
}
Schema schema = f.schema();
Type type = schema.getType();
// If the type is null, any value is valid
if (type == Type.NULL) {
return true;
}
// If the type is a union that allows nulls, any value is valid
if (type == Type.UNION) {
for (Schema s : schema.getTypes()) {
if (s.getType() == Type.NULL) {
return true;
}
}
}
// The value is null but the type does not allow nulls
return false;
} | @Test
void isValidValueWithPrimitives() {
// Verify that a non-null value is valid for all primitives:
for (Type type : primitives) {
Field f = new Field("f", Schema.create(type), null, null);
assertTrue(RecordBuilderBase.isValidValue(f, new Object()));
}
// Verify that null is not valid for all non-null primitives:
for (Type type : nonNullPrimitives) {
Field f = new Field("f", Schema.create(type), null, null);
assertFalse(RecordBuilderBase.isValidValue(f, null));
}
} |
public static boolean isApplicationEntity(TimelineEntity te) {
return (te == null ? false
: te.getType().equals(TimelineEntityType.YARN_APPLICATION.toString()));
} | @Test
void testIsApplicationEntity() {
TimelineEntity te = new TimelineEntity();
te.setType(TimelineEntityType.YARN_APPLICATION.toString());
assertTrue(ApplicationEntity.isApplicationEntity(te));
te = null;
assertEquals(false, ApplicationEntity.isApplicationEntity(te));
te = new TimelineEntity();
te.setType(TimelineEntityType.YARN_CLUSTER.toString());
assertEquals(false, ApplicationEntity.isApplicationEntity(te));
} |
@Override
protected List<Object[]> rows() {
List<Object[]> rows = new ArrayList<>(dataConnectionCatalogEntries.size());
for (DataConnectionCatalogEntry dl : dataConnectionCatalogEntries) {
final Map<String, String> options;
if (!securityEnabled) {
options = dl.options();
} else {
options = new TreeMap<>();
Set<String> safeOptions = connectorCache.forType(dl.type()).nonSensitiveConnectorOptions();
for (Map.Entry<String, String> e : dl.options().entrySet()) {
if (safeOptions.contains(e.getKey())) {
options.put(e.getKey(), e.getValue());
}
}
}
Object[] row = new Object[]{
catalog(),
dataConnectionSchema,
dl.name(),
dl.type(),
dl.isShared(),
uncheckCall(() -> JsonUtil.toJson(options)),
dl.source().name()
};
rows.add(row);
}
return rows;
} | @Test
public void test_rows() {
// given
DataConnectionCatalogEntry dc = new DataConnectionCatalogEntry(
"dc-name",
"dc-type",
false,
singletonMap("key", "value")
);
DataConnectionsTable dcTable = new DataConnectionsTable(
"catalog",
"public",
"dc-schema",
singletonList(dc),
connectorCache,
false);
// when
List<Object[]> rows = dcTable.rows();
// then
assertThat(rows).containsExactly(new Object[]{
"catalog"
, "dc-schema"
, "dc-name"
, "dc-type"
, false
, "{\"key\":\"value\"}"
, "SQL"
});
} |
public boolean poll(Timer timer, boolean waitForJoinGroup) {
maybeUpdateSubscriptionMetadata();
invokeCompletedOffsetCommitCallbacks();
if (subscriptions.hasAutoAssignedPartitions()) {
if (protocol == null) {
throw new IllegalStateException("User configured " + ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG +
" to empty while trying to subscribe for group protocol to auto assign partitions");
}
// Always update the heartbeat last poll time so that the heartbeat thread does not leave the
// group proactively due to application inactivity even if (say) the coordinator cannot be found.
pollHeartbeat(timer.currentTimeMs());
if (coordinatorUnknownAndUnreadySync(timer)) {
return false;
}
if (rejoinNeededOrPending()) {
// due to a race condition between the initial metadata fetch and the initial rebalance,
// we need to ensure that the metadata is fresh before joining initially. This ensures
// that we have matched the pattern against the cluster's topics at least once before joining.
if (subscriptions.hasPatternSubscription()) {
// For consumer group that uses pattern-based subscription, after a topic is created,
// any consumer that discovers the topic after metadata refresh can trigger rebalance
// across the entire consumer group. Multiple rebalances can be triggered after one topic
// creation if consumers refresh metadata at vastly different times. We can significantly
// reduce the number of rebalances caused by single topic creation by asking consumer to
// refresh metadata before re-joining the group as long as the refresh backoff time has
// passed.
if (this.metadata.timeToAllowUpdate(timer.currentTimeMs()) == 0) {
this.metadata.requestUpdate(true);
}
if (!client.ensureFreshMetadata(timer)) {
return false;
}
maybeUpdateSubscriptionMetadata();
}
// if not wait for join group, we would just use a timer of 0
if (!ensureActiveGroup(waitForJoinGroup ? timer : time.timer(0L))) {
// since we may use a different timer in the callee, we'd still need
// to update the original timer's current time after the call
timer.update(time.milliseconds());
return false;
}
}
} else {
// For manually assigned partitions, we do not try to pro-actively lookup coordinator;
// instead we only try to refresh metadata when necessary.
// If connections to all nodes fail, wakeups triggered while attempting to send fetch
// requests result in polls returning immediately, causing a tight loop of polls. Without
// the wakeup, poll() with no channels would block for the timeout, delaying re-connection.
// awaitMetadataUpdate() in ensureCoordinatorReady initiates new connections with configured backoff and avoids the busy loop.
if (metadata.updateRequested() && !client.hasReadyNodes(timer.currentTimeMs())) {
client.awaitMetadataUpdate(timer);
}
// if there is pending coordinator requests, ensure they have a chance to be transmitted.
client.pollNoWakeup();
}
maybeAutoCommitOffsetsAsync(timer.currentTimeMs());
return true;
} | @Test
public void testAutoCommitManualAssignment() {
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, true, subscriptions)) {
subscriptions.assignFromUser(singleton(t1p));
subscriptions.seek(t1p, 100);
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
time.sleep(autoCommitIntervalMs);
coordinator.poll(time.timer(Long.MAX_VALUE));
assertFalse(client.hasPendingResponses());
}
} |
@Override
public void afterMethod(final TargetAdviceObject target, final TargetAdviceMethod method, final Object[] args, final Object result, final String pluginType) {
if (null == result) {
return;
}
for (RouteUnit each : ((RouteContext) result).getRouteUnits()) {
MetricsCollectorRegistry.<CounterMetricsCollector>get(routedResultConfig, pluginType).inc("data_source", each.getDataSourceMapper().getActualName());
each.getTableMappers().forEach(table -> MetricsCollectorRegistry.<CounterMetricsCollector>get(routedResultConfig, pluginType).inc("table", table.getActualName()));
}
} | @Test
void assertCountRouteResult() {
RouteContext routeContext = new RouteContext();
RouteMapper dataSourceMapper = new RouteMapper("logic_db", "ds_0");
RouteMapper tableMapper = new RouteMapper("t_order", "t_order_0");
routeContext.getRouteUnits().add(new RouteUnit(dataSourceMapper, Collections.singleton(tableMapper)));
new RouteResultCountAdvice().afterMethod(new TargetAdviceObjectFixture(), mock(TargetAdviceMethod.class), new Object[]{}, routeContext, "FIXTURE");
assertThat(MetricsCollectorRegistry.get(routedResultConfig, "FIXTURE").toString(), is("data_source.ds_0=1, table.t_order_0=1"));
} |
public static void boundsCheck(int capacity, int index, int length) {
if (capacity < 0 || index < 0 || length < 0 || (index > (capacity - length))) {
throw new IndexOutOfBoundsException(String.format("index=%d, length=%d, capacity=%d", index, length, capacity));
}
} | @Test(expected = IndexOutOfBoundsException.class)
public void boundsCheck_whenIndexIntegerMax() {
//Testing wrapping does not cause false check
ArrayUtils.boundsCheck(100, Integer.MAX_VALUE, 1);
} |
@Override
public void importData(JsonReader reader) throws IOException {
logger.info("Reading configuration for 1.0");
// this *HAS* to start as an object
reader.beginObject();
while (reader.hasNext()) {
JsonToken tok = reader.peek();
switch (tok) {
case NAME:
String name = reader.nextName();
// find out which member it is
if (name.equals(CLIENTS)) {
readClients(reader);
} else if (name.equals(GRANTS)) {
readGrants(reader);
} else if (name.equals(WHITELISTEDSITES)) {
readWhitelistedSites(reader);
} else if (name.equals(BLACKLISTEDSITES)) {
readBlacklistedSites(reader);
} else if (name.equals(AUTHENTICATIONHOLDERS)) {
readAuthenticationHolders(reader);
} else if (name.equals(ACCESSTOKENS)) {
readAccessTokens(reader);
} else if (name.equals(REFRESHTOKENS)) {
readRefreshTokens(reader);
} else if (name.equals(SYSTEMSCOPES)) {
readSystemScopes(reader);
} else {
for (MITREidDataServiceExtension extension : extensions) {
if (extension.supportsVersion(THIS_VERSION)) {
if (extension.supportsVersion(THIS_VERSION)) {
extension.importExtensionData(name, reader);
break;
}
}
}
// unknown token, skip it
reader.skipValue();
}
break;
case END_OBJECT:
// the object ended, we're done here
reader.endObject();
continue;
default:
logger.debug("Found unexpected entry");
reader.skipValue();
continue; }
}
fixObjectReferences();
for (MITREidDataServiceExtension extension : extensions) {
if (extension.supportsVersion(THIS_VERSION)) {
extension.fixExtensionObjectReferences(maps);
break;
}
}
maps.clearAll();
} | @Test
public void testFixRefreshTokenAuthHolderReferencesOnImport() throws IOException, ParseException {
String expiration1 = "2014-09-10T22:49:44.090+00:00";
Date expirationDate1 = formatter.parse(expiration1, Locale.ENGLISH);
ClientDetailsEntity mockedClient1 = mock(ClientDetailsEntity.class);
when(mockedClient1.getClientId()).thenReturn("mocked_client_1");
OAuth2Request req1 = new OAuth2Request(new HashMap<String, String>(), "client1", new ArrayList<GrantedAuthority>(),
true, new HashSet<String>(), new HashSet<String>(), "http://foo.com",
new HashSet<String>(), null);
Authentication mockAuth1 = mock(Authentication.class, withSettings().serializable());
OAuth2Authentication auth1 = new OAuth2Authentication(req1, mockAuth1);
AuthenticationHolderEntity holder1 = new AuthenticationHolderEntity();
holder1.setId(1L);
holder1.setAuthentication(auth1);
OAuth2RefreshTokenEntity token1 = new OAuth2RefreshTokenEntity();
token1.setId(1L);
token1.setClient(mockedClient1);
token1.setExpiration(expirationDate1);
token1.setJwt(JWTParser.parse("eyJhbGciOiJub25lIn0.eyJqdGkiOiJmOTg4OWQyOS0xMTk1LTQ4ODEtODgwZC1lZjVlYzAwY2Y4NDIifQ."));
token1.setAuthenticationHolder(holder1);
String expiration2 = "2015-01-07T18:31:50.079+00:00";
Date expirationDate2 = formatter.parse(expiration2, Locale.ENGLISH);
ClientDetailsEntity mockedClient2 = mock(ClientDetailsEntity.class);
when(mockedClient2.getClientId()).thenReturn("mocked_client_2");
OAuth2Request req2 = new OAuth2Request(new HashMap<String, String>(), "client2", new ArrayList<GrantedAuthority>(),
true, new HashSet<String>(), new HashSet<String>(), "http://bar.com",
new HashSet<String>(), null);
Authentication mockAuth2 = mock(Authentication.class, withSettings().serializable());
OAuth2Authentication auth2 = new OAuth2Authentication(req2, mockAuth2);
AuthenticationHolderEntity holder2 = new AuthenticationHolderEntity();
holder2.setId(2L);
holder2.setAuthentication(auth2);
OAuth2RefreshTokenEntity token2 = new OAuth2RefreshTokenEntity();
token2.setId(2L);
token2.setClient(mockedClient2);
token2.setExpiration(expirationDate2);
token2.setJwt(JWTParser.parse("eyJhbGciOiJub25lIn0.eyJqdGkiOiJlYmEyYjc3My0xNjAzLTRmNDAtOWQ3MS1hMGIxZDg1OWE2MDAifQ."));
token2.setAuthenticationHolder(holder2);
String configJson = "{" +
"\"" + MITREidDataService.SYSTEMSCOPES + "\": [], " +
"\"" + MITREidDataService.ACCESSTOKENS + "\": [], " +
"\"" + MITREidDataService.CLIENTS + "\": [], " +
"\"" + MITREidDataService.GRANTS + "\": [], " +
"\"" + MITREidDataService.WHITELISTEDSITES + "\": [], " +
"\"" + MITREidDataService.BLACKLISTEDSITES + "\": [], " +
"\"" + MITREidDataService.AUTHENTICATIONHOLDERS + "\": [" +
"{\"id\":1,\"authentication\":{\"clientAuthorization\":{\"clientId\":\"client1\",\"redirectUri\":\"http://foo.com\"},"
+ "\"userAuthentication\":null}}," +
"{\"id\":2,\"authentication\":{\"clientAuthorization\":{\"clientId\":\"client2\",\"redirectUri\":\"http://bar.com\"},"
+ "\"userAuthentication\":null}}" +
" ]," +
"\"" + MITREidDataService.REFRESHTOKENS + "\": [" +
"{\"id\":1,\"clientId\":\"mocked_client_1\",\"expiration\":\"2014-09-10T22:49:44.090+00:00\","
+ "\"authenticationHolderId\":1,\"value\":\"eyJhbGciOiJub25lIn0.eyJqdGkiOiJmOTg4OWQyOS0xMTk1LTQ4ODEtODgwZC1lZjVlYzAwY2Y4NDIifQ.\"}," +
"{\"id\":2,\"clientId\":\"mocked_client_2\",\"expiration\":\"2015-01-07T18:31:50.079+00:00\","
+ "\"authenticationHolderId\":2,\"value\":\"eyJhbGciOiJub25lIn0.eyJqdGkiOiJlYmEyYjc3My0xNjAzLTRmNDAtOWQ3MS1hMGIxZDg1OWE2MDAifQ.\"}" +
" ]" +
"}";
System.err.println(configJson);
JsonReader reader = new JsonReader(new StringReader(configJson));
final Map<Long, OAuth2RefreshTokenEntity> fakeRefreshTokenTable = new HashMap<>();
final Map<Long, AuthenticationHolderEntity> fakeAuthHolderTable = new HashMap<>();
when(tokenRepository.saveRefreshToken(isA(OAuth2RefreshTokenEntity.class))).thenAnswer(new Answer<OAuth2RefreshTokenEntity>() {
Long id = 343L;
@Override
public OAuth2RefreshTokenEntity answer(InvocationOnMock invocation) throws Throwable {
OAuth2RefreshTokenEntity _token = (OAuth2RefreshTokenEntity) invocation.getArguments()[0];
if(_token.getId() == null) {
_token.setId(id++);
}
fakeRefreshTokenTable.put(_token.getId(), _token);
return _token;
}
});
when(tokenRepository.getRefreshTokenById(anyLong())).thenAnswer(new Answer<OAuth2RefreshTokenEntity>() {
@Override
public OAuth2RefreshTokenEntity answer(InvocationOnMock invocation) throws Throwable {
Long _id = (Long) invocation.getArguments()[0];
return fakeRefreshTokenTable.get(_id);
}
});
when(clientRepository.getClientByClientId(anyString())).thenAnswer(new Answer<ClientDetailsEntity>() {
@Override
public ClientDetailsEntity answer(InvocationOnMock invocation) throws Throwable {
String _clientId = (String) invocation.getArguments()[0];
ClientDetailsEntity _client = mock(ClientDetailsEntity.class);
when(_client.getClientId()).thenReturn(_clientId);
return _client;
}
});
when(authHolderRepository.save(isA(AuthenticationHolderEntity.class))).thenAnswer(new Answer<AuthenticationHolderEntity>() {
Long id = 356L;
@Override
public AuthenticationHolderEntity answer(InvocationOnMock invocation) throws Throwable {
AuthenticationHolderEntity _holder = (AuthenticationHolderEntity) invocation.getArguments()[0];
if(_holder.getId() == null) {
_holder.setId(id++);
}
fakeAuthHolderTable.put(_holder.getId(), _holder);
return _holder;
}
});
when(authHolderRepository.getById(anyLong())).thenAnswer(new Answer<AuthenticationHolderEntity>() {
@Override
public AuthenticationHolderEntity answer(InvocationOnMock invocation) throws Throwable {
Long _id = (Long) invocation.getArguments()[0];
return fakeAuthHolderTable.get(_id);
}
});
dataService.importData(reader);
List<OAuth2RefreshTokenEntity> savedRefreshTokens = new ArrayList(fakeRefreshTokenTable.values()); //capturedRefreshTokens.getAllValues();
Collections.sort(savedRefreshTokens, new refreshTokenIdComparator());
assertThat(savedRefreshTokens.get(0).getAuthenticationHolder().getId(), equalTo(356L));
assertThat(savedRefreshTokens.get(1).getAuthenticationHolder().getId(), equalTo(357L));
} |
public boolean isOlderOrEqualTo(VersionNumber versionNumber) {
return equals(versionNumber) || isOlderThan(versionNumber);
} | @Test
void testIsOlderOrEqualTo() {
assertThat(v("6.0.0").isOlderOrEqualTo(v("6.0.0"))).isTrue();
assertThat(v("5.0.0").isOlderOrEqualTo(v("6.0.0"))).isTrue();
assertThat(v("9.0.0").isOlderOrEqualTo(v("10.0.0"))).isTrue();
assertThat(v("1.0.0").isOlderOrEqualTo(v("10.0.0"))).isTrue();
assertThat(v("5.0.0").isOlderOrEqualTo(v("5.0.1"))).isTrue();
assertThat(v("6.0.0").isOlderOrEqualTo(v("7.0.0-beta.2"))).isTrue();
assertThat(v("7.0.0-alpha.1").isOlderOrEqualTo(v("7.0.0-beta.1"))).isTrue();
assertThat(v("7.0.0-beta.2").isOlderOrEqualTo(v("7.0.0-beta.3"))).isTrue();
assertThat(v("6.0.0").isOlderOrEqualTo(v("5.0.1"))).isFalse();
assertThat(v("10.0.0").isOlderOrEqualTo(v("1.0.0"))).isFalse();
assertThat(v("10.0.0").isOlderOrEqualTo(v("9.0.0"))).isFalse();
assertThat(v("10.0.0").isOlderOrEqualTo(v("9.0.0"))).isFalse();
} |
@Override
public boolean allowsUsersToSignUp() {
return settings.allowUsersToSignUp();
} | @Test
public void should_allow_users_to_signup() {
assertThat(underTest.allowsUsersToSignUp()).as("default").isFalse();
settings.setProperty("sonar.auth.github.allowUsersToSignUp", true);
assertThat(underTest.allowsUsersToSignUp()).isTrue();
} |
@PostMapping("/authorize")
@Operation(summary = "申请授权", description = "适合 code 授权码模式,或者 implicit 简化模式;在 sso.vue 单点登录界面被【提交】调用")
@Parameters({
@Parameter(name = "response_type", required = true, description = "响应类型", example = "code"),
@Parameter(name = "client_id", required = true, description = "客户端编号", example = "tudou"),
@Parameter(name = "scope", description = "授权范围", example = "userinfo.read"), // 使用 Map<String, Boolean> 格式,Spring MVC 暂时不支持这么接收参数
@Parameter(name = "redirect_uri", required = true, description = "重定向 URI", example = "https://www.iocoder.cn"),
@Parameter(name = "auto_approve", required = true, description = "用户是否接受", example = "true"),
@Parameter(name = "state", example = "1")
})
public CommonResult<String> approveOrDeny(@RequestParam("response_type") String responseType,
@RequestParam("client_id") String clientId,
@RequestParam(value = "scope", required = false) String scope,
@RequestParam("redirect_uri") String redirectUri,
@RequestParam(value = "auto_approve") Boolean autoApprove,
@RequestParam(value = "state", required = false) String state) {
@SuppressWarnings("unchecked")
Map<String, Boolean> scopes = JsonUtils.parseObject(scope, Map.class);
scopes = ObjectUtil.defaultIfNull(scopes, Collections.emptyMap());
// 0. 校验用户已经登录。通过 Spring Security 实现
// 1.1 校验 responseType 是否满足 code 或者 token 值
OAuth2GrantTypeEnum grantTypeEnum = getGrantTypeEnum(responseType);
// 1.2 校验 redirectUri 重定向域名是否合法 + 校验 scope 是否在 Client 授权范围内
OAuth2ClientDO client = oauth2ClientService.validOAuthClientFromCache(clientId, null,
grantTypeEnum.getGrantType(), scopes.keySet(), redirectUri);
// 2.1 假设 approved 为 null,说明是场景一
if (Boolean.TRUE.equals(autoApprove)) {
// 如果无法自动授权通过,则返回空 url,前端不进行跳转
if (!oauth2ApproveService.checkForPreApproval(getLoginUserId(), getUserType(), clientId, scopes.keySet())) {
return success(null);
}
} else { // 2.2 假设 approved 非 null,说明是场景二
// 如果计算后不通过,则跳转一个错误链接
if (!oauth2ApproveService.updateAfterApproval(getLoginUserId(), getUserType(), clientId, scopes)) {
return success(OAuth2Utils.buildUnsuccessfulRedirect(redirectUri, responseType, state,
"access_denied", "User denied access"));
}
}
// 3.1 如果是 code 授权码模式,则发放 code 授权码,并重定向
List<String> approveScopes = convertList(scopes.entrySet(), Map.Entry::getKey, Map.Entry::getValue);
if (grantTypeEnum == OAuth2GrantTypeEnum.AUTHORIZATION_CODE) {
return success(getAuthorizationCodeRedirect(getLoginUserId(), client, approveScopes, redirectUri, state));
}
// 3.2 如果是 token 则是 implicit 简化模式,则发送 accessToken 访问令牌,并重定向
return success(getImplicitGrantRedirect(getLoginUserId(), client, approveScopes, redirectUri, state));
} | @Test // autoApprove = true,通过 + token
public void testApproveOrDeny_autoApproveWithToken() {
// 准备参数
String responseType = "token";
String clientId = randomString();
String scope = "{\"read\": true, \"write\": false}";
String redirectUri = "https://www.iocoder.cn";
String state = "test";
// mock 方法(client)
OAuth2ClientDO client = randomPojo(OAuth2ClientDO.class).setClientId(clientId).setAdditionalInformation(null);
when(oauth2ClientService.validOAuthClientFromCache(eq(clientId), isNull(), eq("implicit"),
eq(asSet("read", "write")), eq(redirectUri))).thenReturn(client);
// mock 方法(场景一)
when(oauth2ApproveService.checkForPreApproval(isNull(), eq(UserTypeEnum.ADMIN.getValue()),
eq(clientId), eq(SetUtils.asSet("read", "write")))).thenReturn(true);
// mock 方法(访问令牌)
OAuth2AccessTokenDO accessTokenDO = randomPojo(OAuth2AccessTokenDO.class)
.setAccessToken("test_access_token").setExpiresTime(LocalDateTimeUtil.offset(LocalDateTime.now(), 30010L, ChronoUnit.MILLIS));
when(oauth2GrantService.grantImplicit(isNull(), eq(UserTypeEnum.ADMIN.getValue()),
eq(clientId), eq(ListUtil.toList("read")))).thenReturn(accessTokenDO);
// 调用
CommonResult<String> result = oauth2OpenController.approveOrDeny(responseType, clientId,
scope, redirectUri, true, state);
// 断言
assertEquals(0, result.getCode());
assertThat(result.getData(), anyOf( // 29 和 30 都有一定概率,主要是时间计算
is("https://www.iocoder.cn#access_token=test_access_token&token_type=bearer&state=test&expires_in=29&scope=read"),
is("https://www.iocoder.cn#access_token=test_access_token&token_type=bearer&state=test&expires_in=30&scope=read")
));
} |
@Override
public boolean isFragmentAutoTrackAppViewScreen(Class<?> fragment) {
return mFragmentAPI.isFragmentAutoTrackAppViewScreen(fragment);
} | @Test
public void isFragmentAutoTrackAppViewScreen() {
setUp();
mAutoTrackImp.trackFragmentAppViewScreen();
Assert.assertTrue(mAutoTrackImp.isTrackFragmentAppViewScreenEnabled());
} |
@Override
public double variance() {
return k * theta * theta;
} | @Test
public void testVariance() {
System.out.println("variance");
GammaDistribution instance = new GammaDistribution(3, 2.1);
instance.rand();
assertEquals(13.23, instance.variance(), 1E-7);
} |
@Override
public void accept(ServerWebExchange exchange, CachedResponse cachedResponse) {
ServerHttpResponse response = exchange.getResponse();
long calculatedMaxAgeInSeconds = calculateMaxAgeInSeconds(exchange.getRequest(), cachedResponse,
configuredTimeToLive);
rewriteCacheControlMaxAge(response.getHeaders(), calculatedMaxAgeInSeconds);
} | @Test
void otherHeadersAreNotRemoved_whenMaxAgeIsModified() {
inputExchange.getResponse().getHeaders().put("X-Custom-Header", List.of("DO-NOT-REMOVE"));
Duration timeToLive = Duration.ofSeconds(30);
CachedResponse inputCachedResponse = CachedResponse.create(HttpStatus.OK).timestamp(clock.instant()).build();
SetMaxAgeHeaderAfterCacheExchangeMutator toTest = new SetMaxAgeHeaderAfterCacheExchangeMutator(timeToLive,
clock, false);
toTest.accept(inputExchange, inputCachedResponse);
List<String> cacheControlValues = inputExchange.getResponse().getHeaders().get("X-Custom-Header");
assertThat(cacheControlValues).contains("DO-NOT-REMOVE");
} |
public ConsumerBuilder<T> batchReceivePolicy(BatchReceivePolicy batchReceivePolicy) {
checkArgument(batchReceivePolicy != null, "batchReceivePolicy must not be null.");
batchReceivePolicy.verify();
conf.setBatchReceivePolicy(batchReceivePolicy);
return this;
} | @Test(expectedExceptions = IllegalArgumentException.class)
public void testConsumerBuilderImplWhenBatchReceivePolicyIsNotValid() {
consumerBuilderImpl.batchReceivePolicy(BatchReceivePolicy.builder()
.maxNumMessages(0)
.maxNumBytes(0)
.timeout(0, TimeUnit.MILLISECONDS)
.build());
} |
@Override
public Map<RedisClusterNode, Collection<RedisClusterNode>> clusterGetMasterSlaveMap() {
Iterable<RedisClusterNode> res = clusterGetNodes();
Set<RedisClusterNode> masters = new HashSet<RedisClusterNode>();
for (Iterator<RedisClusterNode> iterator = res.iterator(); iterator.hasNext();) {
RedisClusterNode redisClusterNode = iterator.next();
if (redisClusterNode.isMaster()) {
masters.add(redisClusterNode);
}
}
Map<RedisClusterNode, Collection<RedisClusterNode>> result = new HashMap<RedisClusterNode, Collection<RedisClusterNode>>();
for (Iterator<RedisClusterNode> iterator = res.iterator(); iterator.hasNext();) {
RedisClusterNode redisClusterNode = iterator.next();
for (RedisClusterNode masterNode : masters) {
if (redisClusterNode.getMasterId() != null
&& redisClusterNode.getMasterId().equals(masterNode.getId())) {
Collection<RedisClusterNode> list = result.get(masterNode);
if (list == null) {
list = new ArrayList<RedisClusterNode>();
result.put(masterNode, list);
}
list.add(redisClusterNode);
}
}
}
return result;
} | @Test
public void testClusterGetMasterSlaveMap() {
Map<RedisClusterNode, Collection<RedisClusterNode>> map = connection.clusterGetMasterSlaveMap();
assertThat(map).hasSize(3);
for (Collection<RedisClusterNode> slaves : map.values()) {
assertThat(slaves).hasSize(1);
}
} |
@SuppressWarnings("checkstyle:npathcomplexity")
public PartitionServiceState getPartitionServiceState() {
PartitionServiceState state = getPartitionTableState();
if (state != SAFE) {
return state;
}
if (!checkAndTriggerReplicaSync()) {
return REPLICA_NOT_SYNC;
}
return SAFE;
} | @Test
public void shouldNotBeSafe_whenUnknownReplicaOwnerPresent() throws UnknownHostException {
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory();
HazelcastInstance hz = factory.newHazelcastInstance();
InternalPartitionServiceImpl partitionService = getNode(hz).partitionService;
partitionService.firstArrangement();
PartitionStateManager partitionStateManager = partitionService.getPartitionStateManager();
InternalPartitionImpl partition = partitionStateManager.getPartitionImpl(0);
PartitionReplica[] members = partition.replicas();
PartitionReplica[] illegalMembers = Arrays.copyOf(members, members.length);
Address address = members[0].address();
illegalMembers[0] = new PartitionReplica(
new Address(address.getInetAddress(), address.getPort() + 1000), members[0].uuid());
partition.setReplicas(illegalMembers);
PartitionReplicaStateChecker replicaStateChecker = partitionService.getPartitionReplicaStateChecker();
assertEquals(PartitionServiceState.REPLICA_NOT_OWNED, replicaStateChecker.getPartitionServiceState());
partition.setReplicas(members);
assertEquals(PartitionServiceState.SAFE, replicaStateChecker.getPartitionServiceState());
} |
@Override
public <T> ResponseFuture<T> sendRequest(Request<T> request, RequestContext requestContext)
{
doEvaluateDisruptContext(request, requestContext);
return _client.sendRequest(request, requestContext);
} | @Test
public void testSendRequest3()
{
when(_controller.getDisruptContext(any(String.class), any(ResourceMethod.class))).thenReturn(_disrupt);
_client.sendRequest(_request, _context, _behavior);
verify(_underlying, times(1)).sendRequest(eq(_request), eq(_context), eq(_behavior));
verify(_context, times(1)).putLocalAttr(eq(DISRUPT_CONTEXT_KEY), eq(_disrupt));
verify(_context, times(1)).putLocalAttr(eq(DISRUPT_SOURCE_KEY), any(String.class));
} |
@Override
@Cacheable(value = RedisKeyConstants.MAIL_ACCOUNT, key = "#id", unless = "#result == null")
public MailAccountDO getMailAccountFromCache(Long id) {
return getMailAccount(id);
} | @Test
public void testGetMailAccountFromCache() {
// mock 数据
MailAccountDO dbMailAccount = randomPojo(MailAccountDO.class);
mailAccountMapper.insert(dbMailAccount);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbMailAccount.getId();
// 调用
MailAccountDO mailAccount = mailAccountService.getMailAccountFromCache(id);
// 断言
assertPojoEquals(dbMailAccount, mailAccount);
} |
@Override
public final void isEqualTo(@Nullable Object other) {
if (Objects.equal(actual, other)) {
return;
}
// Fail but with a more descriptive message:
if (actual == null || !(other instanceof Map)) {
super.isEqualTo(other);
return;
}
containsEntriesInAnyOrder((Map<?, ?>) other, /* allowUnexpected= */ false);
} | @Test
public void isEqualToNonMap() {
ImmutableMap<String, Integer> actual = ImmutableMap.of("jan", 1, "feb", 2, "march", 3);
expectFailureWhenTestingThat(actual).isEqualTo("something else");
assertFailureKeys("expected", "but was");
} |
public static void verifyKafkaBrokers(Properties props) {
//ensure bootstrap.servers is assigned
String brokerList = getString(BOOTSTRAP_SERVERS_CONFIG, props); //usually = "bootstrap.servers"
String[] brokers = brokerList.split(",");
for (String broker : brokers) {
checkArgument(
broker.contains(":"),
"Proper broker formatting requires a \":\" between the host and the port (input=" + broker + ")"
);
String host = broker.substring(0, broker.indexOf(":")); //we could validate the host is we wanted to
String port = broker.substring(broker.indexOf(":") + 1);
parseInt(port);//every port should be an integer
}
} | @Test
public void verifyKafkaBrokers_happyPath_multipleBrokers() {
Properties props = new Properties();
props.setProperty("bootstrap.servers", "localhost:9092,myhost.com:9091");
//does nothing when input is valid
verifyKafkaBrokers(props);
} |
@Override
public boolean containsAll(Collection<?> c) {
for (Object object : c) {
if (!contains(object)) {
return false;
}
}
return true;
} | @Test
public void testContainsAll() {
Set<Integer> set = redisson.getSortedSet("set");
for (int i = 0; i < 200; i++) {
set.add(i);
}
Assertions.assertTrue(set.containsAll(Arrays.asList(30, 11)));
Assertions.assertFalse(set.containsAll(Arrays.asList(30, 711, 11)));
} |
@Override
public Result analysis(
final Result result,
final StreamAccessLogsMessage.Identifier identifier,
final HTTPAccessLogEntry entry,
final Role role
) {
switch (role) {
case PROXY:
return analyzeProxy(result, entry);
case SIDECAR:
if (result.hasResult()) {
return result;
}
return analyzeSideCar(result, entry);
}
return Result.builder().build();
} | @Test
public void testIngressMetric() throws IOException {
try (InputStreamReader isr = new InputStreamReader(getResourceAsStream("envoy-ingress.msg"))) {
StreamAccessLogsMessage.Builder requestBuilder = StreamAccessLogsMessage.newBuilder();
JsonFormat.parser().merge(isr, requestBuilder);
AccessLogAnalyzer.Result result = this.analysis.analysis(AccessLogAnalyzer.Result.builder().build(), requestBuilder.getIdentifier(), requestBuilder.getHttpLogs().getLogEntry(0), Role.PROXY);
Assertions.assertEquals(2, result.getMetrics().getHttpMetrics().getMetricsCount());
HTTPServiceMeshMetric incoming = result.getMetrics().getHttpMetrics().getMetrics(0);
Assertions.assertEquals("UNKNOWN", incoming.getSourceServiceName());
Assertions.assertEquals("ingress", incoming.getDestServiceName());
Assertions.assertEquals(DetectPoint.server, incoming.getDetectPoint());
HTTPServiceMeshMetric outgoing = result.getMetrics().getHttpMetrics().getMetrics(1);
Assertions.assertEquals("ingress", outgoing.getSourceServiceName());
Assertions.assertEquals("productpage", outgoing.getDestServiceName());
Assertions.assertEquals(DetectPoint.client, outgoing.getDetectPoint());
}
} |
@Override
public boolean updateTaskExecutionState(TaskExecutionStateTransition taskExecutionState) {
return state.tryCall(
StateWithExecutionGraph.class,
stateWithExecutionGraph ->
stateWithExecutionGraph.updateTaskExecutionState(
taskExecutionState, labelFailure(taskExecutionState)),
"updateTaskExecutionState")
.orElse(false);
} | @Test
void testExceptionHistoryWithTaskFailureLabels() throws Exception {
final Exception taskException = new Exception("Task Exception");
BiConsumer<AdaptiveScheduler, List<ExecutionAttemptID>> testLogic =
(scheduler, attemptIds) -> {
final ExecutionAttemptID attemptId = attemptIds.get(1);
scheduler.updateTaskExecutionState(
new TaskExecutionStateTransition(
new TaskExecutionState(
attemptId, ExecutionState.FAILED, taskException)));
};
final TestingFailureEnricher failureEnricher = new TestingFailureEnricher();
final Iterable<RootExceptionHistoryEntry> actualExceptionHistory =
new ExceptionHistoryTester(singleThreadMainThreadExecutor)
.withFailureEnrichers(Collections.singletonList(failureEnricher))
.withTestLogic(testLogic)
.run();
assertThat(actualExceptionHistory).hasSize(1);
final RootExceptionHistoryEntry failure = actualExceptionHistory.iterator().next();
assertThat(failure.getException().deserializeError(classLoader)).isEqualTo(taskException);
assertThat(failure.getFailureLabels()).isEqualTo(failureEnricher.getFailureLabels());
} |
@Override
public void execute(GraphModel graphModel) {
Graph graph = graphModel.getGraphVisible();
execute(graph);
} | @Test
public void testOneNodeDegree() {
GraphModel graphModel = GraphGenerator.generateNullUndirectedGraph(1);
Graph graph = graphModel.getGraph();
Node n = graph.getNode("0");
WeightedDegree d = new WeightedDegree();
d.execute(graph);
assertEquals(n.getAttribute(WeightedDegree.WDEGREE), 0.0);
} |
public static String getType(String fileStreamHexHead) {
if(StrUtil.isBlank(fileStreamHexHead)){
return null;
}
if (MapUtil.isNotEmpty(FILE_TYPE_MAP)) {
for (final Entry<String, String> fileTypeEntry : FILE_TYPE_MAP.entrySet()) {
if (StrUtil.startWithIgnoreCase(fileStreamHexHead, fileTypeEntry.getKey())) {
return fileTypeEntry.getValue();
}
}
}
byte[] bytes = HexUtil.decodeHex(fileStreamHexHead);
return FileMagicNumber.getMagicNumber(bytes).getExtension();
} | @Test
@Disabled
public void inputStreamAndFilenameTest() {
final File file = FileUtil.file("e:/laboratory/test.xlsx");
final String type = FileTypeUtil.getType(file);
assertEquals("xlsx", type);
} |
@Override
public boolean contains(K name) {
return false;
} | @Test
public void testContains() {
assertFalse(HEADERS.contains("name1"));
} |
public static UnifiedDiff parseUnifiedDiff(InputStream stream) throws IOException, UnifiedDiffParserException {
UnifiedDiffReader parser = new UnifiedDiffReader(new BufferedReader(new InputStreamReader(stream)));
return parser.parse();
} | @Test
public void testSimpleParse2() throws IOException {
UnifiedDiff diff = UnifiedDiffReader.parseUnifiedDiff(UnifiedDiffReaderTest.class.getResourceAsStream("jsqlparser_patch_1.diff"));
System.out.println(diff);
assertThat(diff.getFiles().size()).isEqualTo(2);
UnifiedDiffFile file1 = diff.getFiles().get(0);
assertThat(file1.getFromFile()).isEqualTo("src/main/jjtree/net/sf/jsqlparser/parser/JSqlParserCC.jjt");
assertThat(file1.getPatch().getDeltas().size()).isEqualTo(3);
AbstractDelta<String> first = file1.getPatch().getDeltas().get(0);
assertThat(first.getSource().size()).isGreaterThan(0);
assertThat(first.getTarget().size()).isGreaterThan(0);
assertThat(diff.getTail()).isEqualTo("2.17.1.windows.2\n");
} |
@JsonCreator
public static ClosingRetentionStrategyConfig create(@JsonProperty(TYPE_FIELD) String type,
@JsonProperty("max_number_of_indices") @Min(1) int maxNumberOfIndices) {
return new AutoValue_ClosingRetentionStrategyConfig(type, maxNumberOfIndices);
} | @Test
public void testSerialization() throws JsonProcessingException {
final ClosingRetentionStrategyConfig config = ClosingRetentionStrategyConfig.create(20);
final ObjectMapper objectMapper = new ObjectMapperProvider().get();
final String json = objectMapper.writeValueAsString(config);
final Object document = Configuration.defaultConfiguration().jsonProvider().parse(json);
assertThat((String) JsonPath.read(document, "$.type")).isEqualTo("org.graylog2.indexer.retention.strategies.ClosingRetentionStrategyConfig");
assertThat((Integer) JsonPath.read(document, "$.max_number_of_indices")).isEqualTo(20);
} |
@Override
public int run(InputStream in, PrintStream out, PrintStream err, List<String> args) throws Exception {
OptionParser optParser = new OptionParser();
OptionSpec<Long> offsetOpt = optParser.accepts("offset", "offset for reading input").withRequiredArg()
.ofType(Long.class).defaultsTo(Long.valueOf(0));
OptionSpec<Long> limitOpt = optParser.accepts("limit", "maximum number of records in the outputfile")
.withRequiredArg().ofType(Long.class).defaultsTo(Long.MAX_VALUE);
OptionSpec<Double> fracOpt = optParser.accepts("samplerate", "rate at which records will be collected")
.withRequiredArg().ofType(Double.class).defaultsTo(Double.valueOf(1));
OptionSet opts = optParser.parse(args.toArray(new String[0]));
List<String> nargs = (List<String>) opts.nonOptionArguments();
if (nargs.size() < 2) {
printHelp(out);
return 0;
}
inFiles = Util.getFiles(nargs.subList(0, nargs.size() - 1));
System.out.println("List of input files:");
for (Path p : inFiles) {
System.out.println(p);
}
currentInput = -1;
nextInput();
OutputStream output = out;
String lastArg = nargs.get(nargs.size() - 1);
if (nargs.size() > 1 && !lastArg.equals("-")) {
output = Util.createFromFS(lastArg);
}
writer = new DataFileWriter<>(new GenericDatumWriter<>());
String codecName = reader.getMetaString(DataFileConstants.CODEC);
CodecFactory codec = (codecName == null) ? CodecFactory.fromString(DataFileConstants.NULL_CODEC)
: CodecFactory.fromString(codecName);
writer.setCodec(codec);
for (String key : reader.getMetaKeys()) {
if (!DataFileWriter.isReservedMeta(key)) {
writer.setMeta(key, reader.getMeta(key));
}
}
writer.create(schema, output);
long offset = opts.valueOf(offsetOpt);
long limit = opts.valueOf(limitOpt);
double samplerate = opts.valueOf(fracOpt);
sampleCounter = 1;
totalCopied = 0;
reuse = null;
if (limit < 0) {
System.out.println("limit has to be non-negative");
this.printHelp(out);
return 1;
}
if (offset < 0) {
System.out.println("offset has to be non-negative");
this.printHelp(out);
return 1;
}
if (samplerate < 0 || samplerate > 1) {
System.out.println("samplerate has to be a number between 0 and 1");
this.printHelp(out);
return 1;
}
skip(offset);
writeRecords(limit, samplerate);
System.out.println(totalCopied + " records written.");
writer.flush();
writer.close();
Util.close(out);
return 0;
} | @Test
void limitOutOfBounds() throws Exception {
Map<String, String> metadata = new HashMap<>();
metadata.put("myMetaKey", "myMetaValue");
File input1 = generateData("input1.avro", Type.INT, metadata, DEFLATE);
File output = new File(DIR, name.getMethodName() + ".avro");
output.deleteOnExit();
List<String> args = asList(input1.getAbsolutePath(), "--offset=" + String.valueOf(OFFSET),
"--limit=" + String.valueOf(LIMIT_OUT_OF_INPUT_BOUNDS), output.getAbsolutePath());
int returnCode = new CatTool().run(System.in, System.out, System.err, args);
assertEquals(0, returnCode);
assertEquals(ROWS_IN_INPUT_FILES - OFFSET, numRowsInFile(output));
} |
@Override
void executeTask() throws UserException {
LOG.info("begin to execute broker pending task. job: {}", callback.getCallbackId());
getAllFileStatus();
} | @Test
public void testExecuteTask(@Injectable BrokerLoadJob brokerLoadJob,
@Injectable BrokerFileGroup brokerFileGroup,
@Injectable BrokerDesc brokerDesc,
@Mocked GlobalStateMgr globalStateMgr,
@Injectable TBrokerFileStatus tBrokerFileStatus) throws UserException {
Map<FileGroupAggKey, List<BrokerFileGroup>> aggKeyToFileGroups = Maps.newHashMap();
List<BrokerFileGroup> brokerFileGroups = Lists.newArrayList();
brokerFileGroups.add(brokerFileGroup);
FileGroupAggKey aggKey = new FileGroupAggKey(1L, null);
aggKeyToFileGroups.put(aggKey, brokerFileGroups);
new Expectations() {
{
globalStateMgr.getNextId();
result = 1L;
brokerFileGroup.getFilePaths();
result = "hdfs://localhost:8900/test_column";
}
};
new MockUp<HdfsUtil>() {
@Mock
public void parseFile(String path, BrokerDesc brokerDesc, List<TBrokerFileStatus> fileStatuses) {
fileStatuses.add(tBrokerFileStatus);
}
};
BrokerLoadPendingTask brokerLoadPendingTask =
new BrokerLoadPendingTask(brokerLoadJob, aggKeyToFileGroups, brokerDesc);
brokerLoadPendingTask.executeTask();
BrokerPendingTaskAttachment brokerPendingTaskAttachment =
Deencapsulation.getField(brokerLoadPendingTask, "attachment");
Assert.assertEquals(1, brokerPendingTaskAttachment.getFileNumByTable(aggKey));
Assert.assertEquals(tBrokerFileStatus, brokerPendingTaskAttachment.getFileStatusByTable(aggKey).get(0).get(0));
} |
public float toFloat(String name) {
return toFloat(name, 0.0f);
} | @Test
public void testToFloat_String() {
System.out.println("toFloat");
float expResult;
float result;
Properties props = new Properties();
props.put("value1", "12345.6789");
props.put("value2", "-9000.001");
props.put("empty", "");
props.put("str", "abc");
props.put("boolean", "true");
props.put("float", "24.98");
props.put("int", "12");
props.put("char", "a");
PropertyParser instance = new PropertyParser(props);
expResult = 12345.6789f;
result = instance.toFloat("value1");
assertEquals(expResult, result, 0f);
expResult = -9000.001f;
result = instance.toFloat("value2");
assertEquals(expResult, result, 0f);
expResult = 0f;
result = instance.toFloat("empty");
assertEquals(expResult, result, 0f);
expResult = 0f;
result = instance.toFloat("str");
assertEquals(expResult, result, 0f);
expResult = 0f;
result = instance.toFloat("boolean");
assertEquals(expResult, result, 0f);
expResult = 24.98f;
result = instance.toFloat("float");
assertEquals(expResult, result, 0f);
expResult = 12f;
result = instance.toFloat("int");
assertEquals(expResult, result, 0f);
expResult = 0f;
result = instance.toFloat("char");
assertEquals(expResult, result, 0f);
expResult = 0f;
result = instance.toFloat("nonexistent");
assertEquals(expResult, result, 0f);
} |
public static boolean isNumber(String text) {
final int startPos = findStartPosition(text);
if (startPos < 0) {
return false;
}
for (int i = startPos; i < text.length(); i++) {
char ch = text.charAt(i);
if (!Character.isDigit(ch)) {
return false;
}
}
return true;
} | @Test
@DisplayName("Tests that isNumber returns false for empty, space or null")
void isNumberEmpty() {
assertFalse(ObjectHelper.isNumber(""));
assertFalse(ObjectHelper.isNumber(" "));
assertFalse(ObjectHelper.isNumber(null));
} |
private Function<KsqlConfig, Kudf> getUdfFactory(
final Method method,
final UdfDescription udfDescriptionAnnotation,
final String functionName,
final FunctionInvoker invoker,
final String sensorName
) {
return ksqlConfig -> {
final Object actualUdf = FunctionLoaderUtils.instantiateFunctionInstance(
method.getDeclaringClass(), udfDescriptionAnnotation.name());
if (actualUdf instanceof Configurable) {
ExtensionSecurityManager.INSTANCE.pushInUdf();
try {
((Configurable) actualUdf)
.configure(ksqlConfig.getKsqlFunctionsConfigProps(functionName));
} finally {
ExtensionSecurityManager.INSTANCE.popOutUdf();
}
}
final PluggableUdf theUdf = new PluggableUdf(invoker, actualUdf);
return metrics.<Kudf>map(m -> new UdfMetricProducer(
m.getSensor(sensorName),
theUdf,
Time.SYSTEM
)).orElse(theUdf);
};
} | @Test
public void shouldLoadFunctionWithStructSchemaProvider() {
// Given:
final UdfFactory returnDecimal = FUNC_REG.getUdfFactory(FunctionName.of("KsqlStructUdf"));
// When:
final List<SqlArgument> args = ImmutableList.of();
final KsqlScalarFunction function = returnDecimal.getFunction(args);
// Then:
assertThat(function.getReturnType(args), equalTo(KsqlStructUdf.RETURN));
} |
static long calculateGrouping(Set<Integer> groupingSet, List<Integer> columns)
{
long grouping = (1L << columns.size()) - 1;
for (int index = 0; index < columns.size(); index++) {
int column = columns.get(index);
if (groupingSet.contains(column)) {
// Leftmost argument to grouping() (i.e. when index = 0) corresponds to
// the most significant bit in the result. That is why we shift 1L starting
// from the columns.size() - 1 bit index.
grouping = grouping & ~(1L << (columns.size() - 1 - index));
}
}
return grouping;
} | @Test
public void testGroupingOperationSomeBitsSet()
{
List<Integer> groupingOrdinals = ImmutableList.of(7, 2, 9, 3, 5);
List<Set<Integer>> groupingSetOrdinals = ImmutableList.of(ImmutableSet.of(4, 2), ImmutableSet.of(9, 7, 14), ImmutableSet.of(5, 2, 7), ImmutableSet.of(3));
List<Long> expectedResults = ImmutableList.of(23L, 11L, 6L, 29L);
for (int groupId = 0; groupId < groupingSetOrdinals.size(); groupId++) {
Set<Integer> groupingSet = groupingSetOrdinals.get(groupId);
assertEquals(Long.valueOf(calculateGrouping(groupingSet, groupingOrdinals)), expectedResults.get(groupId));
}
} |
@Override
public JCExpression inline(Inliner inliner) throws CouldNotResolveImportException {
return inliner
.importPolicy()
.staticReference(
inliner, classIdent().getTopLevelClass(), classIdent().getName(), getName());
} | @Test
public void inline() {
ImportPolicy.bind(context, ImportPolicy.IMPORT_TOP_LEVEL);
assertInlines(
"Integer.valueOf",
UStaticIdent.create(
"java.lang.Integer",
"valueOf",
UMethodType.create(
UClassType.create("java.lang.Integer"), UClassType.create("java.lang.String"))));
} |
void addIndexSchema(long shadowIdxId, long originIdxId, @NotNull String shadowIndexName,
short shadowIdxShortKeyCount,
@NotNull List<Column> shadowIdxSchema) {
indexIdMap.put(shadowIdxId, originIdxId);
indexIdToName.put(shadowIdxId, shadowIndexName);
indexShortKeyMap.put(shadowIdxId, shadowIdxShortKeyCount);
indexSchemaMap.put(shadowIdxId, shadowIdxSchema);
} | @Test
public void testShow() {
new MockUp<WarehouseManager>() {
@Mock
public Warehouse getWarehouseAllowNull(long warehouseId) {
return new DefaultWarehouse(WarehouseManager.DEFAULT_WAREHOUSE_ID,
WarehouseManager.DEFAULT_WAREHOUSE_NAME);
}
};
SchemaChangeHandler schemaChangeHandler = new SchemaChangeHandler();
LakeTableSchemaChangeJob alterJobV2 =
new LakeTableSchemaChangeJob(12345L, db.getId(), table.getId(), table.getName(), 10);
alterJobV2.addIndexSchema(1L, 2L, "a", (short) 1, Lists.newArrayList());
schemaChangeHandler.addAlterJobV2(alterJobV2);
System.out.println(schemaChangeHandler.getAlterJobInfosByDb(db));
new MockUp<WarehouseManager>() {
@Mock
public Warehouse getWarehouseAllowNull(long warehouseId) {
return null;
}
};
SchemaChangeHandler schemaChangeHandler2 = new SchemaChangeHandler();
alterJobV2 = new LakeTableSchemaChangeJob(12345L, db.getId(), table.getId(), table.getName(), 10);
alterJobV2.addIndexSchema(1L, 2L, "a", (short) 1, Lists.newArrayList());
schemaChangeHandler2.addAlterJobV2(alterJobV2);
System.out.println(schemaChangeHandler2.getAlterJobInfosByDb(db));
} |
@VisibleForTesting
public List<ChunkRange> splitEvenlySizedChunks(
TableId tableId,
Object min,
Object max,
long approximateRowCnt,
int chunkSize,
int dynamicChunkSize) {
LOG.info(
"Use evenly-sized chunk optimization for table {}, the approximate row count is {}, the chunk size is {}, the dynamic chunk size is {}",
tableId,
approximateRowCnt,
chunkSize,
dynamicChunkSize);
if (approximateRowCnt <= chunkSize) {
// there is no more than one chunk, return full table as a chunk
return Collections.singletonList(ChunkRange.all());
}
final List<ChunkRange> splits = new ArrayList<>();
Object chunkStart = null;
Object chunkEnd = ObjectUtils.plus(min, dynamicChunkSize);
while (ObjectUtils.compare(chunkEnd, max) <= 0) {
splits.add(ChunkRange.of(chunkStart, chunkEnd));
chunkStart = chunkEnd;
try {
chunkEnd = ObjectUtils.plus(chunkEnd, dynamicChunkSize);
} catch (ArithmeticException e) {
// Stop chunk split to avoid dead loop when number overflows.
break;
}
}
// add the ending split
splits.add(ChunkRange.of(chunkStart, null));
return splits;
} | @Test
public void testSplitEvenlySizedChunksOverflow() {
MySqlChunkSplitter splitter = new MySqlChunkSplitter(null, null);
List<ChunkRange> res =
splitter.splitEvenlySizedChunks(
new TableId("catalog", "db", "tab"),
Integer.MAX_VALUE - 19,
Integer.MAX_VALUE,
20,
10,
10);
assertEquals(2, res.size());
assertEquals(ChunkRange.of(null, 2147483638), res.get(0));
assertEquals(ChunkRange.of(2147483638, null), res.get(1));
} |
public FloatArrayAsIterable usingExactEquality() {
return new FloatArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject());
} | @Test
public void usingExactEquality_contains_otherTypes_bigDecimalNotSupported() {
BigDecimal expected = BigDecimal.valueOf(2.0);
float[] actual = array(1.0f, 2.0f, 3.0f);
expectFailureWhenTestingThat(actual).usingExactEquality().contains(expected);
assertFailureKeys(
"value of",
"expected to contain",
"testing whether",
"but was",
"additionally, one or more exceptions were thrown while comparing elements",
"first exception");
assertFailureValue("expected to contain", expected.toString());
assertThatFailure()
.factValue("first exception")
.startsWith(
"compare("
+ actual[0]
+ ", "
+ expected
+ ") threw java.lang.IllegalArgumentException");
assertThatFailure()
.factValue("first exception")
.contains(
"Expected value in assertion using exact float equality was of unsupported type "
+ BigDecimal.class
+ " (it may not have an exact float representation)");
} |
@Override
public MetricRegistry metricRegistry() {
return this.metricRegistry;
} | @Test
@Order(1)
void testMetricsEnabled() {
KubernetesMetricsInterceptor metricsInterceptor = new KubernetesMetricsInterceptor();
List<Interceptor> interceptors = Collections.singletonList(metricsInterceptor);
try (KubernetesClient client =
KubernetesClientFactory.buildKubernetesClient(
interceptors, kubernetesClient.getConfiguration())) {
SparkApplication sparkApplication = createSparkApplication();
ConfigMap configMap = createConfigMap();
Map<String, Metric> metrics = new HashMap<>(metricsInterceptor.metricRegistry().getMetrics());
Assertions.assertEquals(9, metrics.size());
client.resource(sparkApplication).create();
client.resource(configMap).get();
Map<String, Metric> metrics2 =
new HashMap<>(metricsInterceptor.metricRegistry().getMetrics());
Assertions.assertEquals(17, metrics2.size());
List<String> expectedMetricsName =
Arrays.asList(
"http.response.201",
"http.request.post",
"sparkapplications.post",
"spark-test.sparkapplications.post",
"spark-test.sparkapplications.post",
"configmaps.get",
"spark-system.configmaps.get",
"2xx",
"4xx");
expectedMetricsName.stream()
.forEach(
name -> {
Meter metric = (Meter) metrics2.get(name);
Assertions.assertEquals(metric.getCount(), 1);
});
client.resource(sparkApplication).delete();
}
} |
public static boolean isValidDateFormatToStringDate( String dateFormat, String dateString ) {
String detectedDateFormat = detectDateFormat( dateString );
if ( ( dateFormat != null ) && ( dateFormat.equals( detectedDateFormat ) ) ) {
return true;
}
return false;
} | @Test
public void testIsValidDateFormatToStringDate() {
assertTrue( DateDetector.isValidDateFormatToStringDate( SAMPLE_DATE_FORMAT_US, SAMPLE_DATE_STRING_US ) );
assertFalse( DateDetector.isValidDateFormatToStringDate( null, SAMPLE_DATE_STRING_US ) );
assertFalse( DateDetector.isValidDateFormatToStringDate( SAMPLE_DATE_FORMAT_US, null ) );
} |
@Override
public int relaunchContainer(ContainerStartContext ctx)
throws IOException, ConfigurationException {
return handleLaunchForLaunchType(ctx,
ApplicationConstants.ContainerLaunchType.RELAUNCH);
} | @Test
public void testRelaunchContainer() throws Exception {
Container container = mock(Container.class);
LinuxContainerExecutor lce = mock(LinuxContainerExecutor.class);
ContainerStartContext.Builder builder =
new ContainerStartContext.Builder();
builder.setContainer(container).setUser("foo");
ContainerStartContext ctx = builder.build();
lce.relaunchContainer(ctx);
verify(lce, times(1)).relaunchContainer(ctx);
} |
public static org.apache.avro.Schema toAvroSchema(
Schema beamSchema, @Nullable String name, @Nullable String namespace) {
final String schemaName = Strings.isNullOrEmpty(name) ? "topLevelRecord" : name;
final String schemaNamespace = namespace == null ? "" : namespace;
String childNamespace =
!"".equals(schemaNamespace) ? schemaNamespace + "." + schemaName : schemaName;
List<org.apache.avro.Schema.Field> fields = Lists.newArrayList();
for (Field field : beamSchema.getFields()) {
org.apache.avro.Schema.Field recordField = toAvroField(field, childNamespace);
fields.add(recordField);
}
return org.apache.avro.Schema.createRecord(schemaName, null, schemaNamespace, false, fields);
} | @Test
public void testAvroSchemaFromBeamSchemaCanBeParsed() {
org.apache.avro.Schema convertedSchema = AvroUtils.toAvroSchema(getBeamSchema());
org.apache.avro.Schema validatedSchema =
new org.apache.avro.Schema.Parser().parse(convertedSchema.toString());
assertEquals(convertedSchema, validatedSchema);
} |
static JavaType constructType(Type type) {
try {
return constructTypeInner(type);
} catch (Exception e) {
throw new InvalidDataTableTypeException(type, e);
}
} | @Test
void upper_bound_of_wild_card_list_type_replaces_wild_card_type() {
JavaType javaType = TypeFactory.constructType(LIST_OF_WILD_CARD_NUMBER);
TypeFactory.ListType listType = (TypeFactory.ListType) javaType;
JavaType elementType = listType.getElementType();
assertThat(elementType.getOriginal(), equalTo(Number.class));
} |
@Override
public void setLoginTimeout(final int seconds) throws SQLException {
dataSource.setLoginTimeout(seconds);
} | @Test
void assertSetLoginTimeoutFailure() throws SQLException {
doThrow(new SQLException("")).when(dataSource).setLoginTimeout(LOGIN_TIMEOUT);
assertThrows(SQLException.class, () -> new PipelineDataSourceWrapper(dataSource, TypedSPILoader.getService(DatabaseType.class, "FIXTURE")).setLoginTimeout(LOGIN_TIMEOUT));
} |
@Override
public ClassLoader getDefaultClassLoader() {
return DEFAULT_CLASS_LOADER;
} | @Test
public void resources_notFound() {
runWithClassloader(provider -> {
try {
var resources = provider.getDefaultClassLoader().getResources("a.b.c");
assertThat(Collections.list(resources)).isEmpty();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
});
} |
public static <T> ProcessorMetaSupplier metaSupplier(
@Nonnull String directoryName,
@Nonnull FunctionEx<? super T, ? extends String> toStringFn,
@Nonnull String charset,
@Nullable String datePattern,
long maxFileSize,
boolean exactlyOnce
) {
return metaSupplier(directoryName, toStringFn, charset, datePattern, maxFileSize, exactlyOnce, SYSTEM_CLOCK);
} | @Test
public void test_rollByDate() {
int numItems = 10;
DAG dag = new DAG();
Vertex src = dag.newVertex("src", () -> new SlowSourceP(semaphore, numItems)).localParallelism(1);
@SuppressWarnings("Convert2MethodRef")
Vertex sink = dag.newVertex("sink", WriteFileP.metaSupplier(
directory.toString(), Objects::toString, "utf-8", "SSS", DISABLE_ROLLING, true,
(LongSupplier & Serializable) () -> clock.get()));
dag.edge(between(src, sink));
Job job = instance().getJet().newJob(dag);
for (int i = 0; i < numItems; i++) {
// When
semaphore.release();
String stringValue = i + System.lineSeparator();
// Then
Path file = directory.resolve(String.format("%03d-0", i));
assertTrueEventually(() -> assertTrue("file not found: " + file, Files.exists(file)), 5);
assertTrueEventually(() ->
assertEquals(stringValue, Files.readString(file)), 5);
clock.incrementAndGet();
}
job.join();
} |
public ShardingTable getShardingTable(final String logicTableName) {
return findShardingTable(logicTableName).orElseThrow(() -> new ShardingTableRuleNotFoundException(Collections.singleton(logicTableName)));
} | @Test
void assertGetTableRuleWithShardingTable() {
ShardingTable actual = createMaximumShardingRule().getShardingTable("Logic_Table");
assertThat(actual.getLogicTable(), is("LOGIC_TABLE"));
} |
public void setBottomPadding(int bottomPadding) {
mBottomPadding = bottomPadding;
for (int childIndex = 0; childIndex < getChildCount(); childIndex++) {
if (getChildAt(childIndex) instanceof MainChild v) {
v.setBottomOffset(bottomPadding);
}
}
} | @Test
public void testSettingPadding() {
AnyKeyboardView mock = Mockito.mock(AnyKeyboardView.class);
mUnderTest.addView(mock);
Mockito.verify(mock).setBottomOffset(0);
View mockRegular = Mockito.mock(View.class);
mUnderTest.addView(mockRegular);
mUnderTest.setBottomPadding(10);
Mockito.verify(mock).setBottomOffset(10);
AnyKeyboardView mock2 = Mockito.mock(AnyKeyboardView.class);
mUnderTest.addView(mock2);
Mockito.verify(mock2).setBottomOffset(10);
} |
public <T> List<T> apply(T[] a) {
return apply(Arrays.asList(a));
} | @Test
public void minOnlyRange() {
Range r = new Range(4,Integer.MAX_VALUE);
assertEquals("[e, f]", toS(r.apply(array)));
assertEquals("[e, f]", toS(r.apply(list)));
assertEquals("[e, f]", toS(r.apply(set)));
} |
public ServiceBuilder<U> addMethod(MethodConfig method) {
if (this.methods == null) {
this.methods = new ArrayList<>();
}
this.methods.add(method);
return getThis();
} | @Test
void addMethod() {
MethodConfig method = new MethodConfig();
ServiceBuilder builder = new ServiceBuilder();
builder.addMethod(method);
Assertions.assertTrue(builder.build().getMethods().contains(method));
Assertions.assertEquals(1, builder.build().getMethods().size());
} |
public final long toLong(byte[] b) {
return toLong(b, 0);
} | @Test
public void testToLong() {
byte[] bytes = bitUtil.fromLong(Long.MAX_VALUE);
assertEquals(Long.MAX_VALUE, bitUtil.toLong(bytes));
bytes = bitUtil.fromLong(Long.MAX_VALUE / 7);
assertEquals(Long.MAX_VALUE / 7, bitUtil.toLong(bytes));
} |
public boolean tryAdd(final Agent agent)
{
Objects.requireNonNull(agent, "agent cannot be null");
if (Status.ACTIVE != status)
{
throw new IllegalStateException("add called when not active");
}
return addAgent.compareAndSet(null, agent);
} | @Test
void shouldDetectConcurrentAdd()
{
final Agent mockAgentOne = mock(Agent.class);
final Agent mockAgentTwo = mock(Agent.class);
final DynamicCompositeAgent compositeAgent = new DynamicCompositeAgent(ROLE_NAME, mockAgentOne, mockAgentTwo);
final AgentInvoker invoker = new AgentInvoker(Throwable::printStackTrace, null, compositeAgent);
invoker.start();
assertTrue(compositeAgent.tryAdd(mockAgentOne));
assertFalse(compositeAgent.tryAdd(mockAgentTwo));
invoker.invoke();
assertTrue(compositeAgent.tryAdd(mockAgentTwo));
} |
public static String safeAppendDirectory( String dir, String file ) {
boolean dirHasSeparator = ( ( dir.lastIndexOf( FILE_SEPARATOR ) ) == dir.length() - 1 );
boolean fileHasSeparator = ( file.indexOf( FILE_SEPARATOR ) == 0 );
if ( ( dirHasSeparator && !fileHasSeparator ) || ( !dirHasSeparator && fileHasSeparator ) ) {
return dir + file;
}
if ( dirHasSeparator && fileHasSeparator ) {
return dir + file.substring( 1 );
}
return dir + FILE_SEPARATOR + file;
} | @Test
public void testSafeAppendDirectory() {
final String expected = "dir" + Const.FILE_SEPARATOR + "file";
assertEquals( expected, Const.safeAppendDirectory( "dir", "file" ) );
assertEquals( expected, Const.safeAppendDirectory( "dir" + Const.FILE_SEPARATOR, "file" ) );
assertEquals( expected, Const.safeAppendDirectory( "dir", Const.FILE_SEPARATOR + "file" ) );
assertEquals( expected, Const.safeAppendDirectory( "dir" + Const.FILE_SEPARATOR, Const.FILE_SEPARATOR + "file" ) );
} |
public boolean isGreaterThanOrEqual(Version than) {
return this.version.isGreaterThanOrEqual(than);
} | @Test
public void verify_methods() {
var version = Version.create(9, 5);
SonarQubeVersion underTest = new SonarQubeVersion(version);
assertThat(underTest).extracting(SonarQubeVersion::toString, SonarQubeVersion::get)
.containsExactly("9.5", version);
var otherVersion = Version.create(8, 5);
assertThat(underTest.isGreaterThanOrEqual(otherVersion)).isTrue();
} |
public TargetState initialTargetState() {
if (initialState != null) {
return initialState.toTargetState();
} else {
return null;
}
} | @Test
public void testToTargetState() {
assertEquals(TargetState.STARTED, CreateConnectorRequest.InitialState.RUNNING.toTargetState());
assertEquals(TargetState.PAUSED, CreateConnectorRequest.InitialState.PAUSED.toTargetState());
assertEquals(TargetState.STOPPED, CreateConnectorRequest.InitialState.STOPPED.toTargetState());
CreateConnectorRequest createConnectorRequest = new CreateConnectorRequest("test-name", Collections.emptyMap(), null);
assertNull(createConnectorRequest.initialTargetState());
} |
@Override
public int remainingCapacity() {
return Integer.MAX_VALUE;
} | @Test
public void remainingCapacity() {
assertEquals(Integer.MAX_VALUE, queue.remainingCapacity());
} |
public static Ip6Prefix valueOf(byte[] address, int prefixLength) {
return new Ip6Prefix(Ip6Address.valueOf(address), prefixLength);
} | @Test(expected = IllegalArgumentException.class)
public void testInvalidValueOfEmptyString() {
Ip6Prefix ipPrefix;
String fromString;
fromString = "";
ipPrefix = Ip6Prefix.valueOf(fromString);
} |
public Comparator<?> getValueComparator(int column) {
return valueComparators[column];
} | @Test
public void getDefaultComparatorForObjectClass() {
ObjectTableSorter sorter = new ObjectTableSorter(createTableModel("object", Object.class));
assertThat(sorter.getValueComparator(0), is(nullValue()));
} |
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
final HttpServletResponse response)
throws IOException, AuthenticationException {
// If the request servlet path is in the whitelist,
// skip Kerberos authentication and return anonymous token.
final String path = request.getServletPath();
for(final String endpoint: whitelist) {
if (endpoint.equals(path)) {
return AuthenticationToken.ANONYMOUS;
}
}
AuthenticationToken token = null;
String authorization = request.getHeader(
KerberosAuthenticator.AUTHORIZATION);
if (authorization == null
|| !authorization.startsWith(KerberosAuthenticator.NEGOTIATE)) {
response.setHeader(WWW_AUTHENTICATE, KerberosAuthenticator.NEGOTIATE);
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
if (authorization == null) {
LOG.trace("SPNEGO starting for url: {}", request.getRequestURL());
} else {
LOG.warn("'" + KerberosAuthenticator.AUTHORIZATION +
"' does not start with '" +
KerberosAuthenticator.NEGOTIATE + "' : {}", authorization);
}
} else {
authorization = authorization.substring(
KerberosAuthenticator.NEGOTIATE.length()).trim();
final Base64 base64 = new Base64(0);
final byte[] clientToken = base64.decode(authorization);
try {
final String serverPrincipal =
KerberosUtil.getTokenServerName(clientToken);
if (!serverPrincipal.startsWith("HTTP/")) {
throw new IllegalArgumentException(
"Invalid server principal " + serverPrincipal +
"decoded from client request");
}
token = Subject.doAs(serverSubject,
new PrivilegedExceptionAction<AuthenticationToken>() {
@Override
public AuthenticationToken run() throws Exception {
return runWithPrincipal(serverPrincipal, clientToken,
base64, response);
}
});
} catch (PrivilegedActionException ex) {
if (ex.getException() instanceof IOException) {
throw (IOException) ex.getException();
} else {
throw new AuthenticationException(ex.getException());
}
} catch (Exception ex) {
throw new AuthenticationException(ex);
}
}
return token;
} | @Test
public void testRequestToWhitelist() throws Exception {
final String token = new Base64(0).encodeToString(new byte[]{0, 1, 2});
final HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
final HttpServletResponse response =
Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getHeader(KerberosAuthenticator.AUTHORIZATION))
.thenReturn(KerberosAuthenticator.NEGOTIATE + token);
Mockito.when(request.getServletPath()).thenReturn("/white");
handler.authenticate(request, response);
Mockito.when(request.getServletPath()).thenReturn("/white4");
try {
handler.authenticate(request, response);
Assert.fail();
} catch (AuthenticationException ex) {
// Expected
} catch (Exception ex) {
Assert.fail();
}
} |
@Override
public IcebergEnumeratorState snapshotState(long checkpointId) {
return new IcebergEnumeratorState(
enumeratorPosition.get(), assigner.state(), enumerationHistory.snapshot());
} | @Test
public void testThrottlingDiscovery() throws Exception {
// create 10 splits
List<IcebergSourceSplit> splits =
SplitHelpers.createSplitsFromTransientHadoopTable(temporaryFolder, 10, 1);
TestingSplitEnumeratorContext<IcebergSourceSplit> enumeratorContext =
new TestingSplitEnumeratorContext<>(4);
ScanContext scanContext =
ScanContext.builder()
.streaming(true)
.startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_EARLIEST_SNAPSHOT)
// discover one snapshot at a time
.maxPlanningSnapshotCount(1)
.build();
ManualContinuousSplitPlanner splitPlanner = new ManualContinuousSplitPlanner(scanContext, 0);
ContinuousIcebergEnumerator enumerator =
createEnumerator(enumeratorContext, scanContext, splitPlanner);
// register reader-2, and let it request a split
enumeratorContext.registerReader(2, "localhost");
enumerator.addReader(2);
enumerator.handleSourceEvent(2, new SplitRequestEvent());
// add splits[0] to the planner for next discovery
splitPlanner.addSplits(Arrays.asList(splits.get(0)));
enumeratorContext.triggerAllActions();
// because discovered split was assigned to reader, pending splits should be empty
assertThat(enumerator.snapshotState(1).pendingSplits()).isEmpty();
// split assignment to reader-2 should contain splits[0, 1)
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.containsExactlyElementsOf(splits.subList(0, 1));
// add the remaining 9 splits (one for every snapshot)
// run discovery cycles while reader-2 still processing the splits[0]
for (int i = 1; i < 10; ++i) {
splitPlanner.addSplits(Arrays.asList(splits.get(i)));
enumeratorContext.triggerAllActions();
}
// can only discover up to 3 snapshots/splits
assertThat(enumerator.snapshotState(2).pendingSplits()).hasSize(3);
// split assignment to reader-2 should be splits[0, 1)
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.containsExactlyElementsOf(splits.subList(0, 1));
// now reader-2 finished splits[0]
enumerator.handleSourceEvent(2, new SplitRequestEvent(Arrays.asList(splits.get(0).splitId())));
enumeratorContext.triggerAllActions();
// still have 3 pending splits. After assigned splits[1] to reader-2, one more split was
// discovered and added.
assertThat(enumerator.snapshotState(3).pendingSplits()).hasSize(3);
// split assignment to reader-2 should be splits[0, 2)
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.containsExactlyElementsOf(splits.subList(0, 2));
// run 3 more split discovery cycles
for (int i = 0; i < 3; ++i) {
enumeratorContext.triggerAllActions();
}
// no more splits are discovered due to throttling
assertThat(enumerator.snapshotState(4).pendingSplits()).hasSize(3);
// split assignment to reader-2 should still be splits[0, 2)
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.containsExactlyElementsOf(splits.subList(0, 2));
// now reader-2 finished splits[1]
enumerator.handleSourceEvent(2, new SplitRequestEvent(Arrays.asList(splits.get(1).splitId())));
enumeratorContext.triggerAllActions();
// still have 3 pending splits. After assigned new splits[2] to reader-2, one more split was
// discovered and added.
assertThat(enumerator.snapshotState(5).pendingSplits()).hasSize(3);
// split assignment to reader-2 should be splits[0, 3)
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.containsExactlyElementsOf(splits.subList(0, 3));
} |
public static Pair<Optional<Method>, Optional<TypedExpression>> resolveMethodWithEmptyCollectionArguments(
final MethodCallExpr methodExpression,
final MvelCompilerContext mvelCompilerContext,
final Optional<TypedExpression> scope,
List<TypedExpression> arguments,
List<Integer> emptyCollectionArgumentsIndexes) {
Objects.requireNonNull(methodExpression, "MethodExpression parameter cannot be null as the method searches methods based on this expression!");
Objects.requireNonNull(mvelCompilerContext, "MvelCompilerContext parameter cannot be null!");
Objects.requireNonNull(arguments, "Arguments parameter cannot be null! Use an empty list instance if needed instead.");
Objects.requireNonNull(emptyCollectionArgumentsIndexes, "EmptyListArgumentIndexes parameter cannot be null! Use an empty list instance if needed instead.");
if (emptyCollectionArgumentsIndexes.size() > arguments.size()) {
throw new IllegalArgumentException("There cannot be more empty collection arguments than all arguments! emptyCollectionArgumentsIndexes parameter has more items than arguments parameter. "
+ "(" + emptyCollectionArgumentsIndexes.size() + " > " + arguments.size() + ")");
} else {
final List<TypedExpression> coercedArgumentsTypesList = new ArrayList<>(arguments);
Pair<Optional<Method>, Optional<TypedExpression>> resolveMethodResult =
MethodResolutionUtils.resolveMethod(methodExpression, mvelCompilerContext, scope, coercedArgumentsTypesList);
if (resolveMethodResult.a.isPresent()) {
return resolveMethodResult;
} else {
// Rather work only with the argumentsType and when a method is resolved, flip the arguments list based on it.
// This needs to go through all possible combinations.
final int indexesListSize = emptyCollectionArgumentsIndexes.size();
for (int numberOfProcessedIndexes = 0; numberOfProcessedIndexes < indexesListSize; numberOfProcessedIndexes++) {
for (int indexOfEmptyListIndex = numberOfProcessedIndexes; indexOfEmptyListIndex < indexesListSize; indexOfEmptyListIndex++) {
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex));
resolveMethodResult =
MethodResolutionUtils.resolveMethod(methodExpression, mvelCompilerContext, scope, coercedArgumentsTypesList);
if (resolveMethodResult.a.isPresent()) {
modifyArgumentsBasedOnCoercedCollectionArguments(arguments, coercedArgumentsTypesList);
return resolveMethodResult;
}
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex));
}
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(numberOfProcessedIndexes));
}
// No method found, return empty.
return new Pair<>(Optional.empty(), scope);
}
}
} | @Test
public void resolveMethodWithEmptyCollectionArgumentsCoerceMap() {
final MethodCallExpr methodExpression = new MethodCallExpr("setItems", new MapCreationLiteralExpression(null, NodeList.nodeList()));
final List<TypedExpression> arguments = new ArrayList<>();
arguments.add(new ListExprT(new ListCreationLiteralExpression(null, NodeList.nodeList())));
final TypedExpression scope = new ObjectCreationExpressionT(Collections.emptyList(), Person.class);
final Pair<Optional<Method>, Optional<TypedExpression>> resolvedMethodResult =
MethodResolutionUtils.resolveMethodWithEmptyCollectionArguments(
methodExpression,
new MvelCompilerContext(null),
Optional.of(scope),
arguments,
List.of(0));
Assertions.assertThat(resolvedMethodResult.a).isPresent();
Assertions.assertThat(getTypedExpressionsClasses(arguments))
.containsExactlyElementsOf(List.of(MapExprT.class));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.