focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public URI uploadSegment(File segmentFile, LLCSegmentName segmentName) {
return uploadSegment(segmentFile, segmentName, _segmentUploadRequestTimeoutMs);
} | @Test
public void testUploadSuccess()
throws URISyntaxException {
Server2ControllerSegmentUploader uploader =
new Server2ControllerSegmentUploader(_logger, _fileUploadDownloadClient, GOOD_CONTROLLER_VIP, "segmentName",
10000, mock(ServerMetrics.class), null, _llcSegmentName.getTableName());
URI segmentURI = uploader.uploadSegment(_file, _llcSegmentName);
Assert.assertEquals(segmentURI.toString(), SEGMENT_LOCATION);
} |
@Override
public ClusterClientProvider<String> deploySessionCluster(
ClusterSpecification clusterSpecification) throws ClusterDeploymentException {
final ClusterClientProvider<String> clusterClientProvider =
deployClusterInternal(
KubernetesSessionClusterEntrypoint.class.getName(),
clusterSpecification,
false);
try (ClusterClient<String> clusterClient = clusterClientProvider.getClusterClient()) {
LOG.info(
"Create flink session cluster {} successfully, JobManager Web Interface: {}",
clusterId,
clusterClient.getWebInterfaceURL());
}
return clusterClientProvider;
} | @Test
void testDeploySessionCluster() throws Exception {
flinkConfig.set(DeploymentOptions.TARGET, KubernetesDeploymentTarget.SESSION.getName());
final ClusterClient<String> clusterClient = deploySessionCluster().getClusterClient();
checkClusterClient(clusterClient);
checkUpdatedConfigAndResourceSetting();
clusterClient.close();
} |
@Override
public void start(EdgeExplorer explorer, int startNode) {
IntArrayDeque stack = new IntArrayDeque();
GHBitSet explored = createBitSet();
stack.addLast(startNode);
int current;
while (stack.size() > 0) {
current = stack.removeLast();
if (!explored.contains(current) && goFurther(current)) {
EdgeIterator iter = explorer.setBaseNode(current);
while (iter.next()) {
int connectedId = iter.getAdjNode();
if (checkAdjacent(iter)) {
stack.addLast(connectedId);
}
}
explored.add(current);
}
}
} | @Test
public void testDFS2() {
DepthFirstSearch dfs = new DepthFirstSearch() {
@Override
protected GHBitSet createBitSet() {
return new GHBitSetImpl();
}
@Override
public boolean goFurther(int v) {
counter++;
assertTrue(!set.contains(v), "v " + v + " is already contained in set. iteration:" + counter);
set.add(v);
list.add(v);
return super.goFurther(v);
}
};
BooleanEncodedValue accessEnc = new SimpleBooleanEncodedValue("access", true);
EncodedValue.InitializerConfig evConf = new EncodedValue.InitializerConfig();
accessEnc.init(evConf);
BaseGraph g = new BaseGraph.Builder(evConf.getRequiredBytes()).create();
g.edge(1, 2).setDistance(1).set(accessEnc, true, false);
g.edge(1, 4).setDistance(1).set(accessEnc, true, true);
g.edge(1, 3).setDistance(1).set(accessEnc, true, false);
g.edge(2, 3).setDistance(1).set(accessEnc, true, false);
g.edge(4, 3).setDistance(1).set(accessEnc, true, true);
dfs.start(g.createEdgeExplorer(AccessFilter.outEdges(accessEnc)), 1);
assertTrue(counter > 0);
assertEquals(list.toString(), "[1, 2, 3, 4]");
} |
@Override
public void run(T configuration, Environment environment) throws Exception {
final String name = name();
final String primaryName = name + PRIMARY;
final String readerName = name + READER;
final PooledDataSourceFactory primaryConfig = getDataSourceFactory(configuration);
final SessionFactory primary = requireNonNull(sessionFactoryFactory.build(this, environment, primaryConfig,
entities, primaryName));
final PooledDataSourceFactory readerConfig = getReadSourceFactory(configuration);
final SessionFactory reader = requireNonNull(sessionFactoryFactory.build(this, environment, readerConfig,
entities, readerName));
final DualSessionFactory factory = new DualSessionFactory(primary, reader);
registerUnitOfWorkListenerIfAbsent(environment).registerSessionFactory(name, factory);
final ExecutorService exec = environment.getHealthCheckExecutorService();
environment.healthChecks().register(primaryName,
new SessionFactoryHealthCheck(
exec,
primaryConfig.getValidationQueryTimeout().orElse(Duration.seconds(5)),
primary,
primaryConfig.getValidationQuery()));
environment.healthChecks().register(readerName,
new SessionFactoryHealthCheck(
exec,
readerConfig.getValidationQueryTimeout().orElse(Duration.seconds(5)),
reader,
readerConfig.getValidationQuery()));
this.sessionFactory = factory;
} | @Test
public void registersACustomNameOfHealthCheckAndDBPoolMetrics() throws Exception {
final String name = "custom-hibernate";
final HibernateBundle<Configuration> customBundle = new HibernateBundle<Configuration>(entities, factory) {
@Override
public DataSourceFactory getDataSourceFactory(Configuration configuration) {
return dbConfig;
}
@Override
public DataSourceFactory getReadSourceFactory(Configuration configuration) {
return readConfig;
}
@Override
protected String name() {
return name;
}
};
when(factory.build(eq(customBundle),
any(Environment.class),
any(DataSourceFactory.class),
anyList(),
eq(name + PRIMARY))).thenReturn(sessionFactory);
when(factory.build(eq(customBundle),
any(Environment.class),
any(DataSourceFactory.class),
anyList(),
eq(name + READER))).thenReturn(readFactory);
customBundle.run(configuration, environment);
final ArgumentCaptor<SessionFactoryHealthCheck> captor =
ArgumentCaptor.forClass(SessionFactoryHealthCheck.class);
verify(healthChecks).register(eq(name + READER), captor.capture());
} |
@Override
public HttpResponse handle(HttpRequest request) {
final List<String> uris = circularArrayAccessLogKeeper.getUris();
return new HttpResponse(200) {
@Override
public void render(OutputStream outputStream) throws IOException {
JsonGenerator generator = jsonFactory.createGenerator(outputStream);
generator.writeStartObject();
generator.writeArrayFieldStart("entries");
for (String uri : uris) {
generator.writeStartObject();
generator.writeStringField("url", uri);
generator.writeEndObject();
}
generator.writeEndArray();
generator.writeEndObject();
generator.close();
}
};
} | @Test
void testEmpty() throws IOException {
HttpResponse response = handler.handle(null);
response.render(out);
assertEquals("{\"entries\":[]}", out.toString());
} |
private Mono<ServerResponse> listSinglePages(ServerRequest request) {
var query = new SinglePagePublicQuery(request.exchange());
return singlePageFinder.list(query.getPage(),
query.getSize(),
query.toPredicate(),
query.toComparator()
)
.flatMap(result -> ServerResponse.ok()
.contentType(MediaType.APPLICATION_JSON)
.bodyValue(result)
);
} | @Test
void listSinglePages() {
ListedSinglePageVo test = ListedSinglePageVo.builder()
.metadata(metadata("test"))
.spec(new SinglePage.SinglePageSpec())
.build();
ListResult<ListedSinglePageVo> pageResult = new ListResult<>(List.of(test));
when(singlePageFinder.list(anyInt(), anyInt(), any(), any()))
.thenReturn(Mono.just(pageResult));
webTestClient.get()
.uri("/singlepages?page=0&size=10")
.exchange()
.expectStatus().isOk()
.expectHeader().contentType(MediaType.APPLICATION_JSON)
.expectBody()
.jsonPath("$.total").isEqualTo(1)
.jsonPath("$.items[0].metadata.name").isEqualTo("test");
verify(singlePageFinder).list(eq(0), eq(10), any(), any());
} |
@Override
public String nameForSetterMethod(MapperConfig<?> config,
AnnotatedMethod method,
String defaultName) {
if (method == null) {
return defaultName;
} else if (method.getDeclaringClass().isAnnotationPresent(JsonSnakeCase.class)) {
return snakeCase.nameForSetterMethod(config, method, defaultName);
}
return super.nameForSetterMethod(config, method, defaultName);
} | @Test
void nameForSetterMethodWorksWithNullField() {
final MapperConfig<?> mapperConfig = mock(MapperConfig.class);
final String name = strategy.nameForSetterMethod(mapperConfig, null, "defaultName");
assertThat(name).isEqualTo("defaultName");
} |
public void setName(String name) throws IllegalStateException {
if (name != null && name.equals(this.name)) {
return; // idempotent naming
}
if (this.name == null
|| CoreConstants.DEFAULT_CONTEXT_NAME.equals(this.name)) {
this.name = name;
} else {
throw new IllegalStateException("Context has been already given a name");
}
} | @Test
public void renameDefault() {
context.setName(CoreConstants.DEFAULT_CONTEXT_NAME);
context.setName("hello");
} |
public Optional<DoFn.ProcessContinuation> run(
PartitionRecord partitionRecord,
ChangeStreamRecord record,
RestrictionTracker<StreamProgress, StreamProgress> tracker,
DoFn.OutputReceiver<KV<ByteString, ChangeStreamRecord>> receiver,
ManualWatermarkEstimator<Instant> watermarkEstimator,
BytesThroughputEstimator<KV<ByteString, ChangeStreamRecord>> throughputEstimator) {
if (record instanceof Heartbeat) {
Heartbeat heartbeat = (Heartbeat) record;
final Instant watermark = toJodaTime(heartbeat.getEstimatedLowWatermark());
// These will be filtered so the key doesn't really matter but the most logical thing to
// key a heartbeat by is the partition it corresponds to.
ByteString heartbeatKey =
Range.ByteStringRange.serializeToByteString(partitionRecord.getPartition());
KV<ByteString, ChangeStreamRecord> outputRecord = KV.of(heartbeatKey, heartbeat);
throughputEstimator.update(Instant.now(), outputRecord);
StreamProgress streamProgress =
new StreamProgress(
heartbeat.getChangeStreamContinuationToken(),
watermark,
throughputEstimator.get(),
Instant.now(),
true);
watermarkEstimator.setWatermark(watermark);
// If the tracker fail to claim the streamProgress, it most likely means the runner initiated
// a checkpoint. See {@link
// org.apache.beam.sdk.io.gcp.bigtable.changestreams.restriction.ReadChangeStreamPartitionProgressTracker}
// for more information regarding runner initiated checkpoints.
if (!tracker.tryClaim(streamProgress)) {
return Optional.of(DoFn.ProcessContinuation.stop());
}
metrics.incHeartbeatCount();
// We output heartbeats so that they are factored into throughput and can be used to
// autoscale. These will be filtered in a downstream step and never returned to users. This is
// to prevent autoscaler from scaling down when we have large tables with no throughput but
// we need enough workers to keep up with heartbeats.
// We are outputting elements with timestamp of 0 to prevent reliance on event time. This
// limits the ability to window on commit time of any data changes. It is still possible to
// window on processing time.
receiver.outputWithTimestamp(outputRecord, Instant.EPOCH);
} else if (record instanceof CloseStream) {
CloseStream closeStream = (CloseStream) record;
StreamProgress streamProgress = new StreamProgress(closeStream);
// If the tracker fail to claim the streamProgress, it most likely means the runner initiated
// a checkpoint. See {@link
// org.apache.beam.sdk.io.gcp.bigtable.changestreams.restriction.ReadChangeStreamPartitionProgressTracker}
// for more information regarding runner initiated checkpoints.
if (!tracker.tryClaim(streamProgress)) {
return Optional.of(DoFn.ProcessContinuation.stop());
}
metrics.incClosestreamCount();
return Optional.of(DoFn.ProcessContinuation.resume());
} else if (record instanceof ChangeStreamMutation) {
ChangeStreamMutation changeStreamMutation = (ChangeStreamMutation) record;
final Instant watermark = toJodaTime(changeStreamMutation.getEstimatedLowWatermark());
watermarkEstimator.setWatermark(watermark);
// Build a new StreamProgress with the continuation token to be claimed.
ChangeStreamContinuationToken changeStreamContinuationToken =
ChangeStreamContinuationToken.create(
Range.ByteStringRange.create(
partitionRecord.getPartition().getStart(),
partitionRecord.getPartition().getEnd()),
changeStreamMutation.getToken());
KV<ByteString, ChangeStreamRecord> outputRecord =
KV.of(changeStreamMutation.getRowKey(), changeStreamMutation);
throughputEstimator.update(Instant.now(), outputRecord);
StreamProgress streamProgress =
new StreamProgress(
changeStreamContinuationToken,
watermark,
throughputEstimator.get(),
Instant.now(),
false);
// If the tracker fail to claim the streamProgress, it most likely means the runner initiated
// a checkpoint. See ReadChangeStreamPartitionProgressTracker for more information regarding
// runner initiated checkpoints.
if (!tracker.tryClaim(streamProgress)) {
return Optional.of(DoFn.ProcessContinuation.stop());
}
if (changeStreamMutation.getType() == ChangeStreamMutation.MutationType.GARBAGE_COLLECTION) {
metrics.incChangeStreamMutationGcCounter();
} else if (changeStreamMutation.getType() == ChangeStreamMutation.MutationType.USER) {
metrics.incChangeStreamMutationUserCounter();
}
Instant delay = toJodaTime(changeStreamMutation.getCommitTimestamp());
metrics.updateProcessingDelayFromCommitTimestamp(
Instant.now().getMillis() - delay.getMillis());
// We are outputting elements with timestamp of 0 to prevent reliance on event time. This
// limits the ability to window on commit time of any data changes. It is still possible to
// window on processing time.
receiver.outputWithTimestamp(outputRecord, Instant.EPOCH);
} else {
LOG.warn(
"RCSP {}: Invalid response type", formatByteStringRange(partitionRecord.getPartition()));
}
return Optional.empty();
} | @Test
public void testChangeStreamMutationUser() {
ByteStringRange partition = ByteStringRange.create("", "");
when(partitionRecord.getPartition()).thenReturn(partition);
final Instant commitTimestamp = Instant.ofEpochMilli(1_000L);
final Instant lowWatermark = Instant.ofEpochMilli(500L);
ChangeStreamContinuationToken changeStreamContinuationToken =
ChangeStreamContinuationToken.create(ByteStringRange.create("", ""), "1234");
ChangeStreamMutation changeStreamMutation = Mockito.mock(ChangeStreamMutation.class);
Mockito.when(changeStreamMutation.getCommitTimestamp())
.thenReturn(toThreetenInstant(commitTimestamp));
Mockito.when(changeStreamMutation.getToken()).thenReturn("1234");
Mockito.when(changeStreamMutation.getEstimatedLowWatermark())
.thenReturn(toThreetenInstant(lowWatermark));
Mockito.when(changeStreamMutation.getType()).thenReturn(ChangeStreamMutation.MutationType.USER);
KV<ByteString, ChangeStreamRecord> record =
KV.of(changeStreamMutation.getRowKey(), changeStreamMutation);
final Optional<DoFn.ProcessContinuation> result =
action.run(
partitionRecord,
changeStreamMutation,
tracker,
receiver,
watermarkEstimator,
throughputEstimator);
assertFalse(result.isPresent());
verify(metrics).incChangeStreamMutationUserCounter();
verify(metrics, never()).incChangeStreamMutationGcCounter();
StreamProgress streamProgress =
new StreamProgress(
changeStreamContinuationToken,
lowWatermark,
BigDecimal.valueOf(1000),
Instant.now(),
false);
verify(tracker).tryClaim(streamProgressArgumentCaptor.capture());
assertEquals(
streamProgress.getCurrentToken(),
streamProgressArgumentCaptor.getValue().getCurrentToken());
assertEquals(
streamProgress.getThroughputEstimate(),
streamProgressArgumentCaptor.getValue().getThroughputEstimate());
assertEquals(
streamProgress.getEstimatedLowWatermark(),
streamProgressArgumentCaptor.getValue().getEstimatedLowWatermark());
assertEquals(
streamProgress.isHeartbeat(), streamProgressArgumentCaptor.getValue().isHeartbeat());
verify(receiver).outputWithTimestamp(eq(record), eq(Instant.EPOCH));
verify(watermarkEstimator).setWatermark(eq(lowWatermark));
verify(throughputEstimator).update(any(), eq(record));
} |
@VisibleForTesting
static Optional<Dependency> parseDependency(String line) {
Matcher dependencyMatcher = SHADE_INCLUDE_MODULE_PATTERN.matcher(line);
if (!dependencyMatcher.find()) {
return Optional.empty();
}
return Optional.of(
Dependency.create(
dependencyMatcher.group("groupId"),
dependencyMatcher.group("artifactId"),
dependencyMatcher.group("version"),
dependencyMatcher.group("classifier")));
} | @Test
void testLineParsingVersion() {
assertThat(
ShadeParser.parseDependency(
"Including external:dependency1:jar:1.0 in the shaded jar."))
.hasValueSatisfying(
dependency -> assertThat(dependency.getVersion()).isEqualTo("1.0"));
} |
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo,
List<String> partNames, boolean areAllPartsFound) throws MetaException {
checkStatisticsList(colStatsWithSourceInfo);
ColumnStatisticsObj statsObj = null;
String colType;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
boolean areAllNDVEstimatorsMergeable = true;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType,
cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName,
doAllPartitionContainStats);
}
DecimalColumnStatsDataInspector columnStatsData = decimalInspectorFromStats(cso);
// check if we can merge NDV estimators
if (columnStatsData.getNdvEstimator() == null) {
areAllNDVEstimatorsMergeable = false;
break;
} else {
NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (!ndvEstimator.canMerge(estimator)) {
areAllNDVEstimatorsMergeable = false;
break;
}
}
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable);
ColumnStatisticsData columnStatisticsData = initColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
DecimalColumnStatsDataInspector aggregateData = null;
long lowerBound = 0;
long higherBound = 0;
double densityAvgSum = 0.0;
DecimalColumnStatsMerger merger = new DecimalColumnStatsMerger();
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
DecimalColumnStatsDataInspector newData = decimalInspectorFromStats(cso);
lowerBound = Math.max(lowerBound, newData.getNumDVs());
higherBound += newData.getNumDVs();
if (newData.isSetLowValue() && newData.isSetHighValue()) {
densityAvgSum += (MetaStoreServerUtils.decimalToDouble(newData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(newData.getLowValue())) / newData.getNumDVs();
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(merger.mergeLowValue(
merger.getLowValue(aggregateData), merger.getLowValue(newData)));
aggregateData.setHighValue(merger.mergeHighValue(
merger.getHighValue(aggregateData), merger.getHighValue(newData)));
aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
long estimation;
if (useDensityFunctionForNDVEstimation && aggregateData != null
&& aggregateData.isSetLowValue() && aggregateData.isSetHighValue()) {
// We have estimation, lowerbound and higherbound. We use estimation
// if it is between lowerbound and higherbound.
double densityAvg = densityAvgSum / partNames.size();
estimation = (long) ((MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(aggregateData.getLowValue())) / densityAvg);
if (estimation < lowerBound) {
estimation = lowerBound;
} else if (estimation > higherBound) {
estimation = higherBound;
}
} else {
estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner);
}
aggregateData.setNumDVs(estimation);
}
columnStatisticsData.setDecimalStats(aggregateData);
} else {
// TODO: bail out if missing stats are over a certain threshold
// we need extrapolation
LOG.debug("start extrapolation for {}", colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
// while we scan the css, we also get the densityAvg, lowerbound and
// higherbound when useDensityFunctionForNDVEstimation is true.
double densityAvgSum = 0.0;
if (!areAllNDVEstimatorsMergeable) {
// if not every partition uses bitvector for ndv, we just fall back to
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DecimalColumnStatsData newData = cso.getStatsData().getDecimalStats();
if (useDensityFunctionForNDVEstimation && newData.isSetLowValue() && newData.isSetHighValue()) {
densityAvgSum += (MetaStoreServerUtils.decimalToDouble(newData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(newData.getLowValue())) / newData.getNumDVs();
}
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
DecimalColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
DecimalColumnStatsDataInspector newData = decimalInspectorFromStats(cso);
// newData.isSetBitVectors() should be true for sure because we
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDecimalStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(aggregateData.getLowValue())) / aggregateData.getNumDVs();
}
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
if (MetaStoreServerUtils.decimalToDouble(aggregateData.getLowValue()) < MetaStoreServerUtils
.decimalToDouble(newData.getLowValue())) {
aggregateData.setLowValue(aggregateData.getLowValue());
} else {
aggregateData.setLowValue(newData.getLowValue());
}
if (MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) > MetaStoreServerUtils
.decimalToDouble(newData.getHighValue())) {
aggregateData.setHighValue(aggregateData.getHighValue());
} else {
aggregateData.setHighValue(newData.getHighValue());
}
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setDecimalStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += (MetaStoreServerUtils.decimalToDouble(aggregateData.getHighValue()) - MetaStoreServerUtils
.decimalToDouble(aggregateData.getLowValue())) / aggregateData.getNumDVs();
}
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(),
adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size());
}
LOG.debug(
"Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}",
colName, columnStatisticsData.getDecimalStats().getNumDVs(), partNames.size(),
colStatsWithSourceInfo.size());
KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo);
if (mergedKllHistogramEstimator != null) {
columnStatisticsData.getDecimalStats().setHistogram(mergedKllHistogramEstimator.serialize());
}
statsObj.setStatsData(columnStatisticsData);
return statsObj;
} | @Test
public void testAggregateSingleStatWhenNullValues() throws MetaException {
List<String> partitions = Collections.singletonList("part1");
ColumnStatisticsData data1 = new ColStatsBuilder<>(Decimal.class).numNulls(1).numDVs(2).build();
List<ColStatsObjWithSourceInfo> statsList =
Collections.singletonList(createStatsWithInfo(data1, TABLE, COL, partitions.get(0)));
DecimalColumnStatsAggregator aggregator = new DecimalColumnStatsAggregator();
ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true);
assertEqualStatistics(data1, computedStatsObj.getStatsData());
aggregator.useDensityFunctionForNDVEstimation = true;
computedStatsObj = aggregator.aggregate(statsList, partitions, true);
assertEqualStatistics(data1, computedStatsObj.getStatsData());
aggregator.useDensityFunctionForNDVEstimation = false;
aggregator.ndvTuner = 1;
// ndv tuner does not have any effect because min numDVs and max numDVs coincide (we have a single stats)
computedStatsObj = aggregator.aggregate(statsList, partitions, true);
assertEqualStatistics(data1, computedStatsObj.getStatsData());
} |
public static OptExpression bind(Pattern pattern, GroupExpression groupExpression) {
Binder binder = new Binder(pattern, groupExpression);
return binder.next();
} | @Test
public void testBinderTop() {
OptExpression expr = OptExpression.create(new MockOperator(OperatorType.LOGICAL_JOIN),
new OptExpression(new MockOperator(OperatorType.LOGICAL_OLAP_SCAN)),
new OptExpression(new MockOperator(OperatorType.LOGICAL_OLAP_SCAN)));
Pattern pattern = Pattern.create(OperatorType.LOGICAL_JOIN);
Memo memo = new Memo();
OptExpression result = Binder.bind(pattern, memo.init(expr));
assertEquals(OperatorType.LOGICAL_JOIN, result.getOp().getOpType());
} |
public EventJournalConfig setEnabled(boolean enabled) {
this.enabled = enabled;
return this;
} | @Test(expected = UnsupportedOperationException.class)
public void testReadOnlyClass_setEnabled_throwsException() {
getReadOnlyConfig().setEnabled(false);
} |
@Override
public boolean isPurgeable(String tableNameWithType, SegmentZKMetadata segmentZKMetadata) {
long endTimeMs = segmentZKMetadata.getEndTimeMs();
// Check that the end time is between 1971 and 2071
if (!TimeUtils.timeValueInValidRange(endTimeMs)) {
LOGGER.warn("Segment: {} of table: {} has invalid end time in millis: {}", segmentZKMetadata.getSegmentName(),
tableNameWithType, endTimeMs);
return false;
}
return System.currentTimeMillis() - endTimeMs > _retentionMs;
} | @Test
public void testTimeRetention() {
String tableNameWithType = "myTable_OFFLINE";
TimeRetentionStrategy retentionStrategy = new TimeRetentionStrategy(TimeUnit.DAYS, 30L);
SegmentZKMetadata segmentZKMetadata = new SegmentZKMetadata("mySegment");
// Without setting time unit or end time, should not throw exception
assertFalse(retentionStrategy.isPurgeable(tableNameWithType, segmentZKMetadata));
segmentZKMetadata.setTimeUnit(TimeUnit.DAYS);
assertFalse(retentionStrategy.isPurgeable(tableNameWithType, segmentZKMetadata));
// Set end time to Jan 2nd, 1970 (not purgeable due to bogus timestamp)
segmentZKMetadata.setEndTime(1L);
assertFalse(retentionStrategy.isPurgeable(tableNameWithType, segmentZKMetadata));
// Set end time to today
long today = TimeUnit.MILLISECONDS.toDays(System.currentTimeMillis());
segmentZKMetadata.setEndTime(today);
assertFalse(retentionStrategy.isPurgeable(tableNameWithType, segmentZKMetadata));
// Set end time to two weeks ago
segmentZKMetadata.setEndTime(today - 14);
assertFalse(retentionStrategy.isPurgeable(tableNameWithType, segmentZKMetadata));
// Set end time to two months ago (purgeable due to being past the retention period)
segmentZKMetadata.setEndTime(today - 60);
assertTrue(retentionStrategy.isPurgeable(tableNameWithType, segmentZKMetadata));
// Set end time to 200 years in the future (not purgeable due to bogus timestamp)
segmentZKMetadata.setEndTime(today + (365 * 200));
assertFalse(retentionStrategy.isPurgeable(tableNameWithType, segmentZKMetadata));
} |
@Nonnull
public static <T> Sink<T> noop() {
return fromProcessor("noop", preferLocalParallelismOne(noopP()));
} | @Test
public void noop() {
// Given
populateList(srcList);
// When
Sink<Object> sink = Sinks.noop();
// Then
p.readFrom(Sources.list(srcName)).writeTo(sink);
execute();
// works without error
} |
@Override
public String info(String name) {
Object struct = db.get(name);
if (isNull(struct))
throw new IllegalStateException(format("DB structure with name [%s] does not exist", name));
if (struct instanceof Set)
return format("%s - Set - %d", name, ((Set) struct).size());
else if (struct instanceof List)
return format("%s - List - %d", name, ((List) struct).size());
else if (struct instanceof Map)
return format("%s - Map - %d", name, ((Map) struct).size());
else
return format("%s - %s", name, struct.getClass().getSimpleName());
} | @Test
void cantGetInfoFromNonexistentDBStructureName() {
Assertions.assertThrows(IllegalStateException.class, () -> db.info(TEST));
} |
public static TriggerUuids toTriggerUuids(Workflow workflow) {
TriggerUuids.TriggerUuidsBuilder builder = TriggerUuids.builder();
if (workflow.getTimeTriggers() != null && !workflow.getTimeTriggers().isEmpty()) {
builder.timeTriggerUuid(
IdHelper.createUuid(workflow.getTimeTriggers().toString()).toString());
}
if (workflow.getSignalTriggers() != null && !workflow.getSignalTriggers().isEmpty()) {
builder.signalTriggerUuids(
IntStream.range(0, workflow.getSignalTriggers().size())
.boxed()
.collect(
Collectors.toMap(
e ->
IdHelper.createUuid(workflow.getSignalTriggers().get(e).toString())
.toString(),
Function.identity())));
}
return builder.build();
} | @Test
public void testToTriggerUuids() {
Workflow workflow =
Workflow.builder()
.id("test-wf-id")
.timeTriggers(Collections.singletonList(new CronTimeTrigger()))
.signalTriggers(Collections.singletonList(new SignalTrigger()))
.build();
TriggerUuids triggerUuids = IdHelper.toTriggerUuids(workflow);
Assert.assertEquals("399e992f-bca3-3cf1-9e1c-f04e7f9ee6f4", triggerUuids.getTimeTriggerUuid());
Assert.assertEquals(
Collections.singletonMap("ae3fd022-76e8-3322-b657-0db619b4575f", 0),
triggerUuids.getSignalTriggerUuids());
} |
public static NetworkEndpoint forHostname(String hostname) {
checkArgument(
!InetAddresses.isInetAddress(hostname), "Expected hostname, got IP address '%s'", hostname);
return NetworkEndpoint.newBuilder()
.setType(NetworkEndpoint.Type.HOSTNAME)
.setHostname(Hostname.newBuilder().setName(hostname))
.build();
} | @Test
public void forHostname_withHostname_returnsHostnameNetworkEndpoint() {
assertThat(NetworkEndpointUtils.forHostname("localhost"))
.isEqualTo(
NetworkEndpoint.newBuilder()
.setType(NetworkEndpoint.Type.HOSTNAME)
.setHostname(Hostname.newBuilder().setName("localhost"))
.build());
} |
@Override
public int nrow() {
return m;
} | @Test
public void testNrows() {
System.out.println("nrow");
assertEquals(3, matrix.nrow());
} |
public void sort(String id1, SortDir dir1, String id2, SortDir dir2) {
Collections.sort(rows, new RowComparator(id1, dir1, id2, dir2));
} | @Test
public void sortAlphaAscNumberDesc() {
tm = unsortedDoubleTableModel();
verifyRowOrder("unsorted", tm, UNSORTED_IDS);
tm.sort(ALPHA, SortDir.ASC, NUMBER, SortDir.DESC);
verifyRowOrder("aand", tm, ROW_ORDER_AA_ND);
} |
public static <T> RemoteIterator<T> remoteIteratorFromSingleton(
@Nullable T singleton) {
return new SingletonIterator<>(singleton);
} | @Test
public void testSingletonStats() throws Throwable {
IOStatsInstance singleton = new IOStatsInstance();
RemoteIterator<IOStatsInstance> it
= remoteIteratorFromSingleton(singleton);
extractStatistics(it);
} |
public long timeOfLastReset() {
List<Status> statusList = sm.getCopyOfStatusList();
if (statusList == null)
return -1;
int len = statusList.size();
for (int i = len - 1; i >= 0; i--) {
Status s = statusList.get(i);
if (CoreConstants.RESET_MSG_PREFIX.equals(s.getMessage())) {
return s.getTimestamp();
}
}
return -1;
} | @Test
public void withoutResetsStatusUtilShouldReturnNotFound() {
context.getStatusManager().add(new InfoStatus("test", this));
assertEquals(-1, statusUtil.timeOfLastReset());
} |
public ClientChannelInfo findChannel(final String group, final String clientId) {
ConsumerGroupInfo consumerGroupInfo = this.consumerTable.get(group);
if (consumerGroupInfo != null) {
return consumerGroupInfo.findChannel(clientId);
}
return null;
} | @Test
public void findChannelTest() {
register();
final ClientChannelInfo consumerManagerChannel = consumerManager.findChannel(GROUP, CLIENT_ID);
Assertions.assertThat(consumerManagerChannel).isNotNull();
} |
public void isAssignableTo(Class<?> clazz) {
if (!clazz.isAssignableFrom(checkNotNull(actual))) {
failWithActual("expected to be assignable to", clazz.getName());
}
} | @Test
public void testIsAssignableTo_differentTypes() {
expectFailureWhenTestingThat(String.class).isAssignableTo(Exception.class);
assertFailureValue("expected to be assignable to", "java.lang.Exception");
} |
private synchronized boolean validateClientAcknowledgement(long h) {
if (h < 0) {
throw new IllegalArgumentException("Argument 'h' cannot be negative, but was: " + h);
}
if (h > MASK) {
throw new IllegalArgumentException("Argument 'h' cannot be larger than 2^32 -1, but was: " + h);
}
final long oldH = clientProcessedStanzas.get();
final Long lastUnackedX = unacknowledgedServerStanzas.isEmpty() ? null : unacknowledgedServerStanzas.getLast().x;
return validateClientAcknowledgement(h, oldH, lastUnackedX);
} | @Test
public void testValidateClientAcknowledgement_rollover_edgecase4() throws Exception
{
// Setup test fixture.
final long MAX = new BigInteger( "2" ).pow( 32 ).longValue() - 1;
final long h = 0;
final long oldH = MAX;
final Long lastUnackedX = 0L;
// Execute system under test.
final boolean result = StreamManager.validateClientAcknowledgement(h, oldH, lastUnackedX);
// Verify results.
assertTrue(result);
} |
public Map<String, String> transform(Map<String, String> configs) {
return transform(null, configs);
} | @Test
public void testReplaceVariableWithTTL() {
// Execution
Map<String, String> props = new HashMap<>();
props.put(MY_KEY, "${test:testPath:testKeyWithTTL}");
props.put(CONFIG_RELOAD_ACTION_CONFIG, CONFIG_RELOAD_ACTION_NONE);
Map<String, String> result = configTransformer.transform(MY_CONNECTOR, props);
// Assertions
assertEquals(TEST_RESULT_WITH_TTL, result.get(MY_KEY));
} |
@Description("arc tangent of given fraction")
@ScalarFunction
@SqlType(StandardTypes.DOUBLE)
public static double atan2(@SqlType(StandardTypes.DOUBLE) double num1, @SqlType(StandardTypes.DOUBLE) double num2)
{
return Math.atan2(num1, num2);
} | @Test
public void testAtan2()
{
for (double doubleValue : DOUBLE_VALUES) {
assertFunction("atan2(" + doubleValue + ", " + doubleValue + ")", DOUBLE, Math.atan2(doubleValue, doubleValue));
assertFunction("atan2(REAL '" + (float) doubleValue + "', REAL '" + (float) doubleValue + "')", DOUBLE, Math.atan2((float) doubleValue, (float) doubleValue));
}
assertFunction("atan2(NULL, NULL)", DOUBLE, null);
assertFunction("atan2(1.0E0, NULL)", DOUBLE, null);
assertFunction("atan2(NULL, 1.0E0)", DOUBLE, null);
} |
@Override
public ChannelFuture writeData(final ChannelHandlerContext ctx, final int streamId, ByteBuf data, int padding,
final boolean endOfStream, ChannelPromise promise) {
promise = promise.unvoid();
final Http2Stream stream;
try {
stream = requireStream(streamId);
// Verify that the stream is in the appropriate state for sending DATA frames.
switch (stream.state()) {
case OPEN:
case HALF_CLOSED_REMOTE:
// Allowed sending DATA frames in these states.
break;
default:
throw new IllegalStateException("Stream " + stream.id() + " in unexpected state " + stream.state());
}
} catch (Throwable e) {
data.release();
return promise.setFailure(e);
}
// Hand control of the frame to the flow controller.
flowController().addFlowControlled(stream,
new FlowControlledData(stream, data, padding, endOfStream, promise));
return promise;
} | @Test
public void dataWriteShouldCreateHalfClosedStream() throws Exception {
writeAllFlowControlledFrames();
Http2Stream stream = createStream(STREAM_ID, false);
ByteBuf data = dummyData();
ChannelPromise promise = newPromise();
encoder.writeData(ctx, STREAM_ID, data.retain(), 0, true, promise);
assertTrue(promise.isSuccess());
verify(remoteFlow).addFlowControlled(eq(stream), any(FlowControlled.class));
verify(lifecycleManager).closeStreamLocal(stream, promise);
assertEquals(data.toString(UTF_8), writtenData.get(0));
data.release();
} |
public boolean eval(StructLike data) {
return new EvalVisitor().eval(data);
} | @Test
public void testIsNull() {
Evaluator evaluator = new Evaluator(STRUCT, isNull("z"));
assertThat(evaluator.eval(TestHelpers.Row.of(1, 2, null))).as("null is null").isTrue();
assertThat(evaluator.eval(TestHelpers.Row.of(1, 2, 3))).as("3 is not null").isFalse();
Evaluator structEvaluator = new Evaluator(STRUCT, isNull("s1.s2.s3.s4.i"));
assertThat(
structEvaluator.eval(
TestHelpers.Row.of(
1,
2,
3,
TestHelpers.Row.of(
TestHelpers.Row.of(TestHelpers.Row.of(TestHelpers.Row.of(3)))))))
.as("3 is not null")
.isFalse();
} |
static List<String> parseEtcResolverSearchDomains() throws IOException {
return parseEtcResolverSearchDomains(new File(ETC_RESOLV_CONF_FILE));
} | @Test
public void searchDomainsWithMultipleSearch(@TempDir Path tempDir) throws IOException {
File f = buildFile(tempDir, "search linecorp.local\n" +
"search squarecorp.local\n" +
"nameserver 127.0.0.2\n");
List<String> domains = UnixResolverDnsServerAddressStreamProvider.parseEtcResolverSearchDomains(f);
assertEquals(Arrays.asList("linecorp.local", "squarecorp.local"), domains);
} |
@Override public long get(long key1, long key2) {
assert key1 != unassignedSentinel : "get() called with key1 == nullKey1 (" + unassignedSentinel + ')';
return super.get0(key1, key2);
} | @Test(expected = AssertionError.class)
@RequireAssertEnabled
public void testGet_whenDisposed() {
hsa.dispose();
hsa.get(1, 1);
} |
public static URL getCorrectHostnamePort(String hostPort) {
return validateHostPortString(hostPort);
} | @Test
void testCorrectHostnamePort() throws Exception {
final URL url = new URL("http", "foo.com", 8080, "/index.html");
assertThat(NetUtils.getCorrectHostnamePort("foo.com:8080/index.html")).isEqualTo(url);
} |
@Override
public void receiveConfigInfo(String configInfo) {
if (StringUtils.isEmpty(configInfo)) {
return;
}
Properties properties = new Properties();
try {
properties.load(new StringReader(configInfo));
innerReceive(properties);
} catch (IOException e) {
LOGGER.error("load properties error:" + configInfo, e);
}
} | @Test
void testReceiveConfigInfo() {
final Deque<Properties> q2 = new ArrayDeque<Properties>();
PropertiesListener a = new PropertiesListener() {
@Override
public void innerReceive(Properties properties) {
q2.offer(properties);
}
};
a.receiveConfigInfo("foo=bar");
final Properties actual = q2.poll();
assertEquals(1, actual.size());
assertEquals("bar", actual.getProperty("foo"));
} |
public void verifyState(HttpRequest request, HttpResponse response, OAuth2IdentityProvider provider) {
verifyState(request, response, provider, DEFAULT_STATE_PARAMETER_NAME);
} | @Test
public void fail_with_AuthenticationException_when_cookie_is_missing() {
when(request.getCookies()).thenReturn(new Cookie[]{});
assertThatThrownBy(() -> underTest.verifyState(request, response, identityProvider))
.hasMessage("Cookie 'OAUTHSTATE' is missing")
.isInstanceOf(AuthenticationException.class)
.hasFieldOrPropertyWithValue("source", AuthenticationEvent.Source.oauth2(identityProvider));
} |
@Override
public int getTotalNumberOfRecords(Configuration conf) throws HiveJdbcDatabaseAccessException {
Connection conn = null;
PreparedStatement ps = null;
ResultSet rs = null;
try {
initializeDatabaseConnection(conf);
String tableName = getQualifiedTableName(conf);
// Always use JDBC_QUERY if available both for correctness and performance. JDBC_QUERY can be set by the user
// or the CBO including pushdown optimizations. SELECT all query should be used only when JDBC_QUERY is null.
String sql = firstNonNull(conf.get(Constants.JDBC_QUERY), selectAllFromTable(tableName));
String countQuery = "SELECT COUNT(*) FROM (" + sql + ") tmptable";
LOGGER.info("Query to execute is [{}]", countQuery);
conn = dbcpDataSource.getConnection();
ps = conn.prepareStatement(countQuery);
rs = ps.executeQuery();
if (rs.next()) {
return rs.getInt(1);
}
else {
LOGGER.warn("The count query {} did not return any results.", countQuery);
throw new HiveJdbcDatabaseAccessException("Count query did not return any results.");
}
}
catch (HiveJdbcDatabaseAccessException he) {
throw he;
}
catch (Exception e) {
LOGGER.error("Caught exception while trying to get the number of records: " + e.getMessage(), e);
throw new HiveJdbcDatabaseAccessException(e);
}
finally {
cleanupResources(conn, ps, rs);
}
} | @Test
public void testGetTotalNumberOfRecords_noRecords() throws HiveJdbcDatabaseAccessException {
Configuration conf = buildConfiguration();
conf.set(JdbcStorageConfig.QUERY.getPropertyName(), "select * from test_strategy where strategy_id = '25'");
DatabaseAccessor accessor = DatabaseAccessorFactory.getAccessor(conf);
int numRecords = accessor.getTotalNumberOfRecords(conf);
assertThat(numRecords, is(equalTo(0)));
} |
@Override
protected void onStateExpiry(UUID sessionId, TransportProtos.SessionInfoProto sessionInfo) {
log.debug("Session with id: [{}] has expired due to last activity time.", sessionId);
SessionMetaData expiredSession = sessions.remove(sessionId);
if (expiredSession != null) {
deregisterSession(sessionInfo);
process(sessionInfo, SESSION_EVENT_MSG_CLOSED, null);
expiredSession.getListener().onRemoteSessionCloseCommand(sessionId, SESSION_EXPIRED_NOTIFICATION_PROTO);
}
} | @Test
void givenSessionDoesNotExist_whenOnStateExpiryCalled_thenShouldNotPerformExpirationActions() {
// GIVEN
TransportProtos.SessionInfoProto sessionInfo = TransportProtos.SessionInfoProto.newBuilder()
.setSessionIdMSB(SESSION_ID.getMostSignificantBits())
.setSessionIdLSB(SESSION_ID.getLeastSignificantBits())
.build();
doCallRealMethod().when(transportServiceMock).onStateExpiry(SESSION_ID, sessionInfo);
// WHEN
transportServiceMock.onStateExpiry(SESSION_ID, sessionInfo);
// THEN
assertThat(sessions.containsKey(SESSION_ID)).isFalse();
verify(transportServiceMock, never()).deregisterSession(sessionInfo);
verify(transportServiceMock, never()).process(sessionInfo, SESSION_EVENT_MSG_CLOSED, null);
} |
@Override
public boolean isPluginOfType(final String extension, String pluginId) {
return goPluginOSGiFramework.hasReferenceFor(GoPlugin.class, pluginId, extension);
} | @Test
void shouldSayPluginIsOfGivenExtensionTypeWhenReferenceIsFound() {
String pluginId = "plugin-id";
String extensionType = "sample-extension";
GoPluginIdentifier pluginIdentifier = new GoPluginIdentifier(extensionType, List.of("1.0"));
final GoPlugin goPlugin = mock(GoPlugin.class);
final GoPluginDescriptor descriptor = mock(GoPluginDescriptor.class);
when(goPluginOSGiFramework.hasReferenceFor(GoPlugin.class, pluginId, extensionType)).thenReturn(true);
lenient().doAnswer(invocationOnMock -> {
ActionWithReturn<GoPlugin, GoPluginApiResponse> action = (ActionWithReturn<GoPlugin, GoPluginApiResponse>) invocationOnMock.getArguments()[2];
return action.execute(goPlugin, descriptor);
}).when(goPluginOSGiFramework).doOn(eq(GoPlugin.class), eq(pluginId), eq(extensionType), any(ActionWithReturn.class));
lenient().when(goPlugin.pluginIdentifier()).thenReturn(pluginIdentifier);
DefaultPluginManager pluginManager = new DefaultPluginManager(monitor, registry, goPluginOSGiFramework, jarChangeListener, pluginRequestProcessorRegistry, systemEnvironment, pluginLoader);
assertThat(pluginManager.isPluginOfType(extensionType, pluginId)).isTrue();
} |
@Override
public DescriptiveUrlBag toUrl(final Path file) {
final DescriptiveUrlBag list = new DefaultUrlProvider(session.getHost()).toUrl(file);
if(file.isFile()) {
// Authenticated browser download using cookie-based Google account authentication in conjunction with ACL
list.add(new DescriptiveUrl(URI.create(String.format("https://storage.cloud.google.com%s",
URIEncoder.encode(file.getAbsolute()))), DescriptiveUrl.Type.authenticated,
MessageFormat.format(LocaleFactory.localizedString("{0} URL"), LocaleFactory.localizedString("Authenticated"))));
// Website configuration
final Distribution distribution = new Distribution(Distribution.DOWNLOAD, URI.create(String.format("%s://%s.%s",
Distribution.DOWNLOAD.getScheme(), containerService.getContainer(file).getName(), session.getHost().getProtocol().getDefaultHostname())),
false);
distribution.setUrl(URI.create(String.format("%s://%s.%s", Distribution.DOWNLOAD.getScheme(), containerService.getContainer(file).getName(),
session.getHost().getProtocol().getDefaultHostname())));
list.addAll(new DistributionUrlProvider(distribution).toUrl(file));
}
// gsutil URI
list.add(new DescriptiveUrl(URI.create(String.format("gs://%s%s",
containerService.getContainer(file).getName(),
file.isRoot() ? Path.DELIMITER : containerService.isContainer(file) ? Path.DELIMITER : String.format("/%s", URIEncoder.encode(containerService.getKey(file))))),
DescriptiveUrl.Type.provider,
MessageFormat.format(LocaleFactory.localizedString("{0} URL"), session.getHost().getProtocol().getName())));
return list;
} | @Test
public void testGet() {
assertEquals("https://storage.cloud.google.com/c/f", new GoogleStorageUrlProvider(session).toUrl(
new Path("/c/f", EnumSet.of(Path.Type.file))).find(DescriptiveUrl.Type.authenticated).getUrl());
} |
public static PDImageXObject createFromImage(PDDocument document, BufferedImage image)
throws IOException
{
if (isGrayImage(image))
{
return createFromGrayImage(image, document);
}
// We try to encode the image with predictor
if (USE_PREDICTOR_ENCODER)
{
PDImageXObject pdImageXObject = new PredictorEncoder(document, image).encode();
if (pdImageXObject != null)
{
if (pdImageXObject.getColorSpace() == PDDeviceRGB.INSTANCE &&
pdImageXObject.getBitsPerComponent() < 16 &&
image.getWidth() * image.getHeight() <= 50 * 50)
{
// also create classic compressed image, compare sizes
PDImageXObject pdImageXObjectClassic = createFromRGBImage(image, document);
if (pdImageXObjectClassic.getCOSObject().getLength() <
pdImageXObject.getCOSObject().getLength())
{
pdImageXObject.getCOSObject().close();
return pdImageXObjectClassic;
}
else
{
pdImageXObjectClassic.getCOSObject().close();
}
}
return pdImageXObject;
}
}
// Fallback: We export the image as 8-bit sRGB and might lose color information
return createFromRGBImage(image, document);
} | @Test
void testCreateLosslessFromImageINT_ARGB() throws IOException
{
PDDocument document = new PDDocument();
BufferedImage image = ImageIO.read(this.getClass().getResourceAsStream("png.png"));
// create an ARGB image
int w = image.getWidth();
int h = image.getHeight();
BufferedImage argbImage = new BufferedImage(w, h, BufferedImage.TYPE_INT_ARGB);
Graphics ag = argbImage.getGraphics();
ag.drawImage(image, 0, 0, null);
ag.dispose();
for (int x = 0; x < argbImage.getWidth(); ++x)
{
for (int y = 0; y < argbImage.getHeight(); ++y)
{
argbImage.setRGB(x, y, (argbImage.getRGB(x, y) & 0xFFFFFF) | ((y / 10 * 10) << 24));
}
}
PDImageXObject ximage = LosslessFactory.createFromImage(document, argbImage);
validate(ximage, 8, argbImage.getWidth(), argbImage.getHeight(), "png", PDDeviceRGB.INSTANCE.getName());
checkIdent(argbImage, ximage.getImage());
checkIdentRGB(argbImage, ximage.getOpaqueImage(null, 1));
assertNotNull(ximage.getSoftMask());
validate(ximage.getSoftMask(), 8, argbImage.getWidth(), argbImage.getHeight(), "png", PDDeviceGray.INSTANCE.getName());
assertTrue(colorCount(ximage.getSoftMask().getImage()) > image.getHeight() / 10);
doWritePDF(document, ximage, TESTRESULTSDIR, "intargb.pdf");
} |
public static Object project(Schema source, Object record, Schema target) throws SchemaProjectorException {
checkMaybeCompatible(source, target);
if (source.isOptional() && !target.isOptional()) {
if (target.defaultValue() != null) {
if (record != null) {
return projectRequiredSchema(source, record, target);
} else {
return target.defaultValue();
}
} else {
throw new SchemaProjectorException("Writer schema is optional, however, target schema does not provide a default value.");
}
} else {
if (record != null) {
return projectRequiredSchema(source, record, target);
} else {
return null;
}
}
} | @Test
public void testProjectMissingDefaultValuedStructField() {
final Schema source = SchemaBuilder.struct().build();
final Schema target = SchemaBuilder.struct().field("id", SchemaBuilder.int64().defaultValue(42L).build()).build();
assertEquals(42L, (long) ((Struct) SchemaProjector.project(source, new Struct(source), target)).getInt64("id"));
} |
public static String prettyJSON(String json) {
return prettyJSON(json, TAB_SEPARATOR);
} | @Test
public void testRenderResultComplexArray() throws Exception {
assertEquals("[\n" + TAB + "1,\n" + TAB + "{\n" + TAB + TAB + "\"A\": \"B\"\n" + TAB + "}\n]", prettyJSON("[1,{\"A\":\"B\"}]"));
} |
@Override
public Optional<ReadError> read(DbFileSources.Line.Builder lineBuilder) {
ScannerReport.LineCoverage reportCoverage = getNextLineCoverageIfMatchLine(lineBuilder.getLine());
if (reportCoverage != null) {
processCoverage(lineBuilder, reportCoverage);
coverage = null;
}
return Optional.empty();
} | @Test
public void nothing_to_do_when_no_coverage_info() {
CoverageLineReader computeCoverageLine = new CoverageLineReader(Collections.<ScannerReport.LineCoverage>emptyList().iterator());
DbFileSources.Line.Builder lineBuilder = DbFileSources.Data.newBuilder().addLinesBuilder().setLine(1);
assertThat(computeCoverageLine.read(lineBuilder)).isEmpty();
assertThat(lineBuilder.hasLineHits()).isFalse();
assertThat(lineBuilder.hasConditions()).isFalse();
assertThat(lineBuilder.hasCoveredConditions()).isFalse();
} |
public static Executor scopeToJob(JobID jobID, Executor executor) {
checkArgument(!(executor instanceof MdcAwareExecutor));
return new MdcAwareExecutor<>(executor, asContextData(jobID));
} | @Test
void testScopeExecutorService() throws Exception {
assertJobIDLogged(
jobID ->
MdcUtils.scopeToJob(jobID, Executors.newDirectExecutorService())
.submit(LOGGING_RUNNABLE)
.get());
} |
public final long getJobFinishTime() {
return jobFinishTime;
} | @Test public final void testGetJobFinishTime() {
Assert.assertNull(resourceSkyline);
ReservationInterval riAdd = new ReservationInterval(0, 10);
skylineList.addInterval(riAdd, resource1);
riAdd = new ReservationInterval(10, 20);
skylineList.addInterval(riAdd, resource1);
resourceSkyline =
new ResourceSkyline("1", 1024.5, 0, 20, resource1, skylineList);
Assert.assertEquals(20, resourceSkyline.getJobFinishTime());
} |
@VisibleForTesting
SegmentSizeRecommendations estimate(long generatedSegmentSize, int desiredSegmentSize,
int numRecordsOfGeneratedSegment, long numRecordsPerPush) {
// calc num rows in desired segment
double sizeRatio = (double) desiredSegmentSize / generatedSegmentSize;
long numRowsInDesiredSegment = Math.round(numRecordsOfGeneratedSegment * sizeRatio);
// calc optimal num segment
long optimalNumSegments = Math.round(numRecordsPerPush / (double) numRowsInDesiredSegment);
optimalNumSegments = Math.max(optimalNumSegments, 1);
// revise optimal num rows in segment
long optimalNumRowsInSegment = Math.round(numRecordsPerPush / (double) optimalNumSegments);
// calc optimal segment size
double rowRatio = (double) optimalNumRowsInSegment / numRecordsOfGeneratedSegment;
long optimalSegmentSize = Math.round(generatedSegmentSize * rowRatio);
return new SegmentSizeRecommendations(optimalNumRowsInSegment, optimalNumSegments, optimalSegmentSize);
} | @Test
public void testEstimate() {
/*
* numRecordsPerPush -> num records per push
* numRecordsOfGeneratedSegment -> num records of generated segment
* generatedSegmentSize -> generated segment size
* desiredSegmentSize -> desired segment size
*/
long numRecordsPerPush = 20 * MILLION;
int numRecordsOfGeneratedSegment = 5 * MILLION;
long generatedSegmentSize = 50 * MEGA_BYTE;
int desiredSegmentSize = 120 * MEGA_BYTE;
SegmentSizeRecommendations params =
RULE.estimate(generatedSegmentSize, desiredSegmentSize, numRecordsOfGeneratedSegment, numRecordsPerPush);
assertEquals(params.getNumSegments(), 2);
assertEquals(params.getSegmentSize(), 100 * MEGA_BYTE);
assertEquals(params.getNumRowsPerSegment(), 10 * MILLION);
numRecordsPerPush = 22 * MILLION;
numRecordsOfGeneratedSegment = 5 * MILLION;
generatedSegmentSize = 50 * MEGA_BYTE;
desiredSegmentSize = 120 * MEGA_BYTE;
params = RULE.estimate(generatedSegmentSize, desiredSegmentSize, numRecordsOfGeneratedSegment, numRecordsPerPush);
assertEquals(params.getNumSegments(), 2);
assertEquals(params.getSegmentSize(), 110 * MEGA_BYTE);
assertEquals(params.getNumRowsPerSegment(), 11 * MILLION);
numRecordsPerPush = 18 * MILLION;
numRecordsOfGeneratedSegment = 5 * MILLION;
generatedSegmentSize = 50 * MEGA_BYTE;
desiredSegmentSize = 120 * MEGA_BYTE;
params = RULE.estimate(generatedSegmentSize, desiredSegmentSize, numRecordsOfGeneratedSegment, numRecordsPerPush);
assertEquals(params.getNumSegments(), 2);
assertEquals(params.getSegmentSize(), 90 * MEGA_BYTE);
assertEquals(params.getNumRowsPerSegment(), 9 * MILLION);
numRecordsPerPush = 16 * MILLION;
numRecordsOfGeneratedSegment = 5 * MILLION;
generatedSegmentSize = 50 * MEGA_BYTE;
desiredSegmentSize = 120 * MEGA_BYTE;
params = RULE.estimate(generatedSegmentSize, desiredSegmentSize, numRecordsOfGeneratedSegment, numRecordsPerPush);
assertEquals(params.getNumSegments(), 1);
assertEquals(params.getSegmentSize(), 160 * MEGA_BYTE);
assertEquals(params.getNumRowsPerSegment(), 16 * MILLION);
numRecordsPerPush = 2 * MILLION;
numRecordsOfGeneratedSegment = 5 * MILLION;
generatedSegmentSize = 50 * MEGA_BYTE;
desiredSegmentSize = 120 * MEGA_BYTE;
params = RULE.estimate(generatedSegmentSize, desiredSegmentSize, numRecordsOfGeneratedSegment, numRecordsPerPush);
assertEquals(params.getNumSegments(), 1);
assertEquals(params.getSegmentSize(), 20 * MEGA_BYTE);
assertEquals(params.getNumRowsPerSegment(), 2 * MILLION);
} |
@Override
public MutableAnalysisMetadataHolder setBranch(Branch branch) {
checkState(!this.branch.isInitialized(), "Branch has already been set");
boolean isCommunityEdition = editionProvider.get().filter(t -> t == EditionProvider.Edition.COMMUNITY).isPresent();
checkState(
!isCommunityEdition || branch.isMain(),
"Branches and Pull Requests are not supported in Community Edition");
this.branch.setProperty(branch);
return this;
} | @Test
public void setBranch_throws_ISE_when_called_twice() {
AnalysisMetadataHolderImpl underTest = new AnalysisMetadataHolderImpl(editionProvider);
underTest.setBranch(new DefaultBranchImpl(DEFAULT_MAIN_BRANCH_NAME));
assertThatThrownBy(() -> underTest.setBranch(new DefaultBranchImpl("main")))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Branch has already been set");
} |
public Node parse() throws ScanException {
if (tokenList == null || tokenList.isEmpty())
return null;
return E();
} | @Test
public void literal() throws ScanException {
Tokenizer tokenizer = new Tokenizer("abc");
Parser parser = new Parser(tokenizer.tokenize());
Node node = parser.parse();
Node witness = new Node(Node.Type.LITERAL, "abc");
assertEquals(witness, node);
} |
public static ImportResult merge(ImportResult ir1, ImportResult ir2) {
if (ir1.getType() == ResultType.ERROR) {
return ir1;
}
if (ir2.getType() == ResultType.ERROR) {
return ir2;
}
ImportResult res = new ImportResult(ResultType.OK);
res.bytes = Stream.of(ir1.getBytes(), ir2.getBytes())
.filter(Optional::isPresent)
.map(Optional::get)
.reduce(Long::sum);
res.counts = mergeCounts(ir1, ir2);
return res;
} | @Test
public void mergeOk() {
assertEquals(OK, ImportResult.merge(OK, OK));
} |
@VisibleForTesting
static List<Tuple2<ConfigGroup, String>> generateTablesForClass(
Class<?> optionsClass, Collection<OptionWithMetaInfo> optionWithMetaInfos) {
ConfigGroups configGroups = optionsClass.getAnnotation(ConfigGroups.class);
List<OptionWithMetaInfo> allOptions = selectOptionsToDocument(optionWithMetaInfos);
if (allOptions.isEmpty()) {
return Collections.emptyList();
}
List<Tuple2<ConfigGroup, String>> tables;
if (configGroups != null) {
tables = new ArrayList<>(configGroups.groups().length + 1);
Tree tree = new Tree(configGroups.groups(), allOptions);
for (ConfigGroup group : configGroups.groups()) {
List<OptionWithMetaInfo> configOptions = tree.findConfigOptions(group);
if (!configOptions.isEmpty()) {
sortOptions(configOptions);
tables.add(Tuple2.of(group, toHtmlTable(configOptions)));
}
}
List<OptionWithMetaInfo> configOptions = tree.getDefaultOptions();
if (!configOptions.isEmpty()) {
sortOptions(configOptions);
tables.add(Tuple2.of(null, toHtmlTable(configOptions)));
}
} else {
sortOptions(allOptions);
tables = Collections.singletonList(Tuple2.of(null, toHtmlTable(allOptions)));
}
return tables;
} | @Test
void testClassWithoutOptionsIsIgnored() {
assertThat(
ConfigOptionsDocGenerator.generateTablesForClass(
EmptyConfigOptions.class,
ConfigurationOptionLocator.extractConfigOptions(
EmptyConfigOptions.class)))
.isEmpty();
} |
@Override
public Optional<ResultSubpartition.BufferAndBacklog> consumeBuffer(
int nextBufferToConsume, Collection<Buffer> buffersToRecycle) throws Throwable {
if (!checkAndGetFirstBufferIndexOrError(nextBufferToConsume, buffersToRecycle)
.isPresent()) {
return Optional.empty();
}
// already ensure that peek element is not null and not throwable.
BufferIndexOrError current = checkNotNull(loadedBuffers.poll());
BufferIndexOrError next = loadedBuffers.peek();
Buffer.DataType nextDataType = next == null ? Buffer.DataType.NONE : next.getDataType();
int bufferIndex = current.getIndex();
Buffer buffer =
current.getBuffer()
.orElseThrow(
() ->
new NullPointerException(
"Get a non-throwable and non-buffer bufferIndexOrError, which is not allowed"));
tryDecreaseBacklog(buffer);
return Optional.of(
ResultSubpartition.BufferAndBacklog.fromBufferAndLookahead(
buffer, nextDataType, backlog.get(), bufferIndex));
} | @Test
void testConsumeBuffer() throws Throwable {
TestingSubpartitionConsumerInternalOperation viewNotifier =
new TestingSubpartitionConsumerInternalOperation();
HsSubpartitionFileReaderImpl subpartitionFileReader =
createSubpartitionFileReader(0, viewNotifier);
// if no preload data in file reader, return Optional.empty.
assertThat(subpartitionFileReader.consumeBuffer(0, Collections.emptyList())).isNotPresent();
// buffers in file: (0-0, 0-1, 0-2)
writeDataToFile(0, 0, 0, 3);
Queue<MemorySegment> memorySegments = createsMemorySegments(3);
subpartitionFileReader.prepareForScheduling();
// trigger reading, add buffer to queue.
subpartitionFileReader.readBuffers(memorySegments, (ignore) -> {});
// if nextBufferToConsume is equal to peek elements index.
assertThat(subpartitionFileReader.consumeBuffer(0, new ArrayList<>()))
.hasValueSatisfying(
(bufferAndBacklog -> {
assertThat(bufferAndBacklog.getNextDataType())
.isEqualTo(DataType.DATA_BUFFER);
assertThat(bufferAndBacklog.getSequenceNumber()).isEqualTo(0);
// first buffer's data is 0.
assertThat(
bufferAndBacklog
.buffer()
.getNioBufferReadable()
.order(ByteOrder.nativeOrder())
.getInt())
.isEqualTo(0);
}));
// if nextBufferToConsume is less than peek elements index, return Optional.empty.
assertThat(subpartitionFileReader.consumeBuffer(0, Collections.emptyList())).isNotPresent();
// if nextBufferToConsume is greater than peek elements index, skip this buffer and keep
// looking.
assertThat(subpartitionFileReader.consumeBuffer(2, new ArrayList<>()))
.hasValueSatisfying(
(bufferAndBacklog -> {
assertThat(bufferAndBacklog.getNextDataType()).isEqualTo(DataType.NONE);
assertThat(bufferAndBacklog.getSequenceNumber()).isEqualTo(2);
assertThat(
bufferAndBacklog
.buffer()
.getNioBufferReadable()
.order(ByteOrder.nativeOrder())
.getInt())
.isEqualTo(2);
}));
assertThat(subpartitionFileReader.getLoadedBuffers()).isEmpty();
} |
public static DataMap convertMap(Map<String, ?> input, boolean stringify)
{
return convertMap(input, false, stringify);
} | @Test
void testConvertMapWithDataComplex()
{
DataMap parent = DataComplexUtil.convertMap(inputMapWithDataComplex());
Assert.assertNotNull(parent);
Assert.assertEquals(parent.size(), 2);
Assert.assertTrue(parent.containsKey("child1"));
DataMap child1 = parent.getDataMap("child1");
Assert.assertNotNull(child1);
Assert.assertEquals(child1.size(), 1);
Assert.assertTrue(child1.containsKey("gchild1"));
Assert.assertEquals(child1.get("gchild1"), 123);
Assert.assertTrue(parent.containsKey("child2"));
DataList child2 = parent.getDataList("child2");
Assert.assertNotNull(child2);
Assert.assertEquals(child2.size(), 1);
DataList gchild2 = child2.getDataList(0);
Assert.assertNotNull(gchild2);
Assert.assertEquals(gchild2.size(), 1);
Assert.assertEquals(gchild2.get(0), "ggchild2");
} |
public List<String> getCwe() {
return cwe;
} | @Test
@SuppressWarnings("squid:S2699")
public void testGetCwe() {
//already tested, this is just left so the IDE doesn't recreate it.
} |
public String map(AmountRequest request) {
if (request instanceof OffsetBasedPageRequest && ((OffsetBasedPageRequest) request).getOffset() > 0L) {
return sqlOffsetBasedPageRequestMapper.mapToSqlQuery((OffsetBasedPageRequest) request, jobTable);
} else {
return sqlAmountRequestMapper.mapToSqlQuery(request, jobTable);
}
} | @Test
void sqlJobPageRequestMapperMapsOffsetBasedPageRequest() {
OffsetBasedPageRequest offsetBasedPageRequest = ascOnUpdatedAt(20, 10);
String filter = jobPageRequestMapper.map(offsetBasedPageRequest);
assertThat(filter).isEqualTo(" ORDER BY updatedAt ASC LIMIT :limit OFFSET :offset");
} |
public InputStream acquire(UnderFileSystem ufs, String path, FileId fileId,
OpenOptions openOptions) throws IOException {
if (!ufs.isSeekable() || !CACHE_ENABLED) {
// only seekable UFSes are cachable/reusable, always return a new input stream
return ufs.openExistingFile(path, openOptions);
}
// explicit cache cleanup
try {
mStreamCache.cleanUp();
} catch (Throwable error) {
SAMPLING_LOG.warn("Explicit cache removal failed.", error);
}
StreamIdSet streamIds = mFileIdToStreamIds.compute(fileId, (key, value) -> {
if (value != null) {
return value;
}
return new StreamIdSet();
});
// Try to acquire an existing id from the stream id set.
// synchronized is required to be consistent between availableIds() and acquire(id).
CachedSeekableInputStream inputStream = null;
synchronized (streamIds) {
// find the next available input stream from the cache
for (long id : streamIds.availableIds()) {
inputStream = mStreamCache.getIfPresent(id);
if (inputStream != null) {
// acquire it now while locked, so other threads cannot take it
streamIds.acquire(id);
break;
}
}
}
if (inputStream != null) {
// for the cached ufs instream, seek (outside of critical section) to the requested position.
LOG.debug("Reused the under file input stream resource of {}", inputStream.getResourceId());
inputStream.seek(openOptions.getOffset());
return inputStream;
}
// no cached input stream is available, acquire a new id and open a new stream
final long newId = streamIds.acquireNewId();
try {
inputStream = mStreamCache.get(newId, () -> {
SeekableUnderFileInputStream ufsStream = (SeekableUnderFileInputStream) ufs
.openExistingFile(path,
OpenOptions.defaults().setPositionShort(openOptions.getPositionShort())
.setOffset(openOptions.getOffset()));
LOG.debug("Created the under file input stream resource of {}", newId);
return new CachedSeekableInputStream(ufsStream, newId, fileId, path);
});
} catch (ExecutionException e) {
LOG.warn("Failed to create a new cached ufs instream of file id {} and path {}", fileId,
path, e);
// fall back to an uncached ufs creation.
return ufs.openExistingFile(path,
OpenOptions.defaults().setOffset(openOptions.getOffset()));
}
catch (UncheckedExecutionException e) {
throw AlluxioRuntimeException.from(e.getCause());
}
return inputStream;
} | @Test
public void multipleCheckIn() throws Exception {
mManager.acquire(mUfs, FILE_NAME, FILE_ID, OpenOptions.defaults().setOffset(2));
mManager.acquire(mUfs, FILE_NAME, FILE_ID, OpenOptions.defaults().setOffset(4));
mManager.acquire(mUfs, FILE_NAME, FILE_ID, OpenOptions.defaults().setOffset(6));
// 3 different input streams are acquired
verify(mUfs, times(3)).openExistingFile(eq(FILE_NAME),
any(OpenOptions.class));
} |
public static int readUint16BE(ByteBuffer buf) throws BufferUnderflowException {
return Short.toUnsignedInt(buf.order(ByteOrder.BIG_ENDIAN).getShort());
} | @Test(expected = ArrayIndexOutOfBoundsException.class)
public void testReadUint16BEThrowsException1() {
ByteUtils.readUint16BE(new byte[]{1}, 2);
} |
@Override
protected int poll() throws Exception {
// must reset for each poll
shutdownRunningTask = null;
pendingExchanges = 0;
List<software.amazon.awssdk.services.sqs.model.Message> messages = pollingTask.call();
// okay we have some response from aws so lets mark the consumer as ready
forceConsumerAsReady();
Queue<Exchange> exchanges = createExchanges(messages);
return processBatch(CastUtils.cast(exchanges));
} | @Test
void shouldRequest11MessagesWithTwoReceiveRequest() throws Exception {
// given
var expectedMessages = IntStream.range(0, 11).mapToObj(Integer::toString).toList();
expectedMessages.stream().map(this::message).forEach(sqsClientMock::addMessage);
try (var tested = createConsumer(11)) {
// when
var polledMessagesCount = tested.poll();
// then
assertThat(polledMessagesCount).isEqualTo(11);
assertThat(receiveMessageBodies()).isEqualTo(expectedMessages);
assertThat(sqsClientMock.getReceiveRequests()).containsExactlyInAnyOrder(
expectedReceiveRequest(10),
expectedReceiveRequest(1));
assertThat(sqsClientMock.getQueues()).isEmpty();
}
} |
@Override
public String selectForUpdateSkipLocked() {
return supportsSelectForUpdateSkipLocked ? " FOR UPDATE SKIP LOCKED" : "";
} | @Test
void mySQL830DoesSupportSelectForUpdateSkipLocked() {
assertThat(new MySqlDialect("MySQL", "8.3.0").selectForUpdateSkipLocked())
.isEqualTo(" FOR UPDATE SKIP LOCKED");
} |
@CanIgnoreReturnValue
public final Ordered containsExactlyEntriesIn(Multimap<?, ?> expectedMultimap) {
checkNotNull(expectedMultimap, "expectedMultimap");
checkNotNull(actual);
ListMultimap<?, ?> missing = difference(expectedMultimap, actual);
ListMultimap<?, ?> extra = difference(actual, expectedMultimap);
// TODO(kak): Possible enhancement: Include "[1 copy]" if the element does appear in
// the subject but not enough times. Similarly for unexpected extra items.
if (!missing.isEmpty()) {
if (!extra.isEmpty()) {
boolean addTypeInfo = hasMatchingToStringPair(missing.entries(), extra.entries());
// Note: The usage of countDuplicatesAndAddTypeInfo() below causes entries no longer to be
// grouped by key in the 'missing' and 'unexpected items' parts of the message (we still
// show the actual and expected multimaps in the standard format).
String missingDisplay =
addTypeInfo
? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(missing).entries())
: countDuplicatesMultimap(annotateEmptyStringsMultimap(missing));
String extraDisplay =
addTypeInfo
? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(extra).entries())
: countDuplicatesMultimap(annotateEmptyStringsMultimap(extra));
failWithActual(
fact("missing", missingDisplay),
fact("unexpected", extraDisplay),
simpleFact("---"),
fact("expected", annotateEmptyStringsMultimap(expectedMultimap)));
return ALREADY_FAILED;
} else {
failWithActual(
fact("missing", countDuplicatesMultimap(annotateEmptyStringsMultimap(missing))),
simpleFact("---"),
fact("expected", annotateEmptyStringsMultimap(expectedMultimap)));
return ALREADY_FAILED;
}
} else if (!extra.isEmpty()) {
failWithActual(
fact("unexpected", countDuplicatesMultimap(annotateEmptyStringsMultimap(extra))),
simpleFact("---"),
fact("expected", annotateEmptyStringsMultimap(expectedMultimap)));
return ALREADY_FAILED;
}
return new MultimapInOrder(/* allowUnexpected = */ false, expectedMultimap);
} | @Test
public void containsExactlyFailureBoth() {
ImmutableMultimap<Integer, String> expected =
ImmutableMultimap.of(3, "one", 3, "six", 3, "two", 4, "five", 4, "four");
ListMultimap<Integer, String> actual = LinkedListMultimap.create(expected);
actual.remove(3, "six");
actual.remove(4, "five");
actual.put(4, "nine");
actual.put(5, "eight");
expectFailureWhenTestingThat(actual).containsExactlyEntriesIn(expected);
assertFailureKeys("missing", "unexpected", "---", "expected", "but was");
assertFailureValue("missing", "{3=[six], 4=[five]}");
assertFailureValue("unexpected", "{4=[nine], 5=[eight]}");
} |
public boolean shouldStartExecutor(String executorName) {
return !notStartExecutors.contains(executorName) && (startExecutors.isEmpty() || startExecutors.contains(executorName));
} | @Test
void shouldStartExecutor() {
assertTrue(startExecutorService.shouldStartExecutor("ExecutorName"));
startExecutorService.applyOptions(List.of("ExecutorName"), Collections.emptyList());
assertTrue(startExecutorService.shouldStartExecutor("ExecutorName"));
startExecutorService.applyOptions(List.of("AnotherExecutorName"), Collections.emptyList());
assertFalse(startExecutorService.shouldStartExecutor("ExecutorName"));
startExecutorService.applyOptions(Collections.emptyList(), List.of("AnotherExecutorName"));
assertTrue(startExecutorService.shouldStartExecutor("ExecutorName"));
startExecutorService.applyOptions(Collections.emptyList(), List.of("ExecutorName"));
assertFalse(startExecutorService.shouldStartExecutor("ExecutorName"));
assertThrows(IllegalArgumentException.class, () -> startExecutorService.applyOptions(List.of("ExecutorName"), List.of("AnotherExecutorName")));
} |
public VersionMatchResult matches(DeploymentInfo info) {
// Skip if no manifest configuration
if(info.getManifest() == null || info.getManifest().size() == 0) {
return VersionMatchResult.SKIPPED;
}
for (ManifestInfo manifest: info.getManifest()) {
VersionMatchResult result = match(manifest);
if(VersionMatchResult.MATCHED.equals(result)){
LOGGER.debug("Matched {} with {}", this, manifest);
return VersionMatchResult.MATCHED;
}
if(VersionMatchResult.REJECTED.equals(result)){
LOGGER.debug("Rejected {} with {}", this, manifest);
return VersionMatchResult.REJECTED;
}
}
// There were no matches (maybe another matcher will pass)
return VersionMatchResult.SKIPPED;
} | @Test
public void testFails() throws IOException {
Set<MavenInfo> maven = new HashSet<MavenInfo>();
ManifestInfo manifest = new ManifestInfo(new java.util.jar.Manifest(this.getClass().getResourceAsStream("/org/hotswap/agent/versions/matcher/TEST.MF")));
DeploymentInfo info = new DeploymentInfo(maven, Collections.singleton(manifest));
System.err.println(info);
PluginMatcher p = new PluginMatcher(NotMatchingPlugin.class);
assertEquals("Not Matching",VersionMatchResult.REJECTED, p.matches(info));
} |
public void set(String name, String value) {
set(name, value, null);
} | @Test
public void testSettingKeyNull() throws Exception {
Configuration config = new Configuration();
try {
config.set(null, "test");
fail("Should throw an IllegalArgumentException exception ");
} catch (Exception e) {
assertTrue(e instanceof IllegalArgumentException);
assertEquals(e.getMessage(), "Property name must not be null");
}
} |
public SegmentCommitter createSegmentCommitter(SegmentCompletionProtocol.Request.Params params,
String controllerVipUrl)
throws URISyntaxException {
boolean uploadToFs = _streamConfig.isServerUploadToDeepStore();
String peerSegmentDownloadScheme = _tableConfig.getValidationConfig().getPeerSegmentDownloadScheme();
String segmentStoreUri = _indexLoadingConfig.getSegmentStoreURI();
SegmentUploader segmentUploader;
if (uploadToFs || peerSegmentDownloadScheme != null) {
// TODO: peer scheme non-null check exists for backwards compatibility. remove check once users have migrated
segmentUploader = new PinotFSSegmentUploader(segmentStoreUri,
ServerSegmentCompletionProtocolHandler.getSegmentUploadRequestTimeoutMs(), _serverMetrics);
} else {
segmentUploader = new Server2ControllerSegmentUploader(_logger,
_protocolHandler.getFileUploadDownloadClient(),
_protocolHandler.getSegmentCommitUploadURL(params, controllerVipUrl), params.getSegmentName(),
ServerSegmentCompletionProtocolHandler.getSegmentUploadRequestTimeoutMs(), _serverMetrics,
_protocolHandler.getAuthProvider(), _tableConfig.getTableName());
}
return new SplitSegmentCommitter(_logger, _protocolHandler, params, segmentUploader, peerSegmentDownloadScheme);
} | @Test(description = "when controller supports split commit, server should always use split segment commit")
public void testSplitSegmentCommitterIsDefault()
throws URISyntaxException {
TableConfig config = createRealtimeTableConfig("test").build();
ServerSegmentCompletionProtocolHandler protocolHandler =
new ServerSegmentCompletionProtocolHandler(Mockito.mock(ServerMetrics.class), "test_REALTIME");
String controllerVipUrl = "http://localhost:1234";
SegmentCompletionProtocol.Request.Params requestParams = new SegmentCompletionProtocol.Request.Params();
SegmentCommitterFactory factory = new SegmentCommitterFactory(Mockito.mock(Logger.class), protocolHandler, config,
Mockito.mock(IndexLoadingConfig.class), Mockito.mock(ServerMetrics.class));
SegmentCommitter committer = factory.createSegmentCommitter(requestParams, controllerVipUrl);
Assert.assertNotNull(committer);
Assert.assertTrue(committer instanceof SplitSegmentCommitter);
} |
public LinkParameter value(String value) {
this.value = value;
return this;
} | @Test
public void testValue() {
LinkParameter linkParameter = new LinkParameter();
linkParameter.setValue("foo");
linkParameter.setValue("bar");
linkParameter.setValue("baz");
Assert.assertEquals(linkParameter.value("bar"), linkParameter);
Assert.assertEquals(linkParameter.getValue(), "bar");
} |
@Override
public int hashCode() {
int result = 1;
result = 31 * result + status.hashCode();
result = 31 * result + super.hashCode();
return result;
} | @Test
public void testNotEquals() {
HttpResponse ok = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
HttpResponse notFound = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.NOT_FOUND);
assertNotEquals(ok, notFound);
assertNotEquals(ok.hashCode(), notFound.hashCode());
} |
@Override
public Num getValue(int index) {
return averageTrueRangeIndicator.getValue(index);
} | @Test
public void testXls() throws Exception {
BarSeries xlsSeries = xls.getSeries();
Indicator<Num> indicator;
indicator = getIndicator(xlsSeries, 1);
assertIndicatorEquals(xls.getIndicator(1), indicator);
assertEquals(4.8, indicator.getValue(indicator.getBarSeries().getEndIndex()).doubleValue(),
TestUtils.GENERAL_OFFSET);
indicator = getIndicator(xlsSeries, 3);
assertIndicatorEquals(xls.getIndicator(3), indicator);
assertEquals(7.4225, indicator.getValue(indicator.getBarSeries().getEndIndex()).doubleValue(),
TestUtils.GENERAL_OFFSET);
indicator = getIndicator(xlsSeries, 13);
assertIndicatorEquals(xls.getIndicator(13), indicator);
assertEquals(8.8082, indicator.getValue(indicator.getBarSeries().getEndIndex()).doubleValue(),
TestUtils.GENERAL_OFFSET);
} |
public static String intToHex(Integer i) {
return prepareNumberHexString(i.longValue(), true, false, HEX_LEN_MIN, HEX_LEN_INT_MAX);
} | @Test
public void intToHex_Test() {
Assertions.assertEquals("FFF5EE", TbUtils.intToHex(-2578));
Assertions.assertEquals("0xFFD8FFA6", TbUtils.intToHex(0xFFD8FFA6, true, true));
Assertions.assertEquals("0xA6FFD8FF", TbUtils.intToHex(0xFFD8FFA6, false, true));
Assertions.assertEquals("0x7FFFFFFF", TbUtils.intToHex(Integer.MAX_VALUE, true, true));
Assertions.assertEquals("0x80000000", TbUtils.intToHex(Integer.MIN_VALUE, true, true));
Assertions.assertEquals("0xAB", TbUtils.intToHex(0xAB, true, true));
Assertions.assertEquals("0xABCD", TbUtils.intToHex(0xABCD, true, true));
Assertions.assertEquals("0xABCDEF", TbUtils.intToHex(0xABCDEF, true, true));
Assertions.assertEquals("0xCDAB", TbUtils.intToHex(0xABCDEF, false, true, 4));
Assertions.assertEquals("0xAB", TbUtils.intToHex(171, true, true));
Assertions.assertEquals("0xAB", TbUtils.intToHex(0xAB, false, true));
Assertions.assertEquals("0xAB", TbUtils.intToHex(0xAB, true, true, 2));
Assertions.assertEquals("AB", TbUtils.intToHex(0xAB, false, false, 2));
Assertions.assertEquals("AB", TbUtils.intToHex(171, true, false));
Assertions.assertEquals("0xAB", TbUtils.intToHex(0xAB, true, true));
Assertions.assertEquals("0xAB", TbUtils.intToHex(0xAB, false, true));
Assertions.assertEquals("AB", TbUtils.intToHex(0xAB, false, false));
Assertions.assertEquals("0xABCD", TbUtils.intToHex(0xABCD, true, true));
Assertions.assertEquals("0xCDAB", TbUtils.intToHex(0xABCD, false, true));
Assertions.assertEquals("0xCD", TbUtils.intToHex(0xABCD, true, true, 2));
Assertions.assertEquals("AB", TbUtils.intToHex(0xABCD, false, false, 2));
} |
public void updateInstance(Service service, Instance instance, String clientId) {
Service singleton = ServiceManager.getInstance().getSingleton(service);
if (singleton.isEphemeral()) {
throw new NacosRuntimeException(NacosException.INVALID_PARAM,
String.format("Current service %s is ephemeral service, can't update persistent instance.",
singleton.getGroupedServiceName()));
}
final PersistentClientOperationServiceImpl.InstanceStoreRequest request = new PersistentClientOperationServiceImpl.InstanceStoreRequest();
request.setService(service);
request.setInstance(instance);
request.setClientId(clientId);
final WriteRequest writeRequest = WriteRequest.newBuilder().setGroup(group())
.setData(ByteString.copyFrom(serializer.serialize(request))).setOperation(DataOperation.CHANGE.name())
.build();
try {
protocol.write(writeRequest);
} catch (Exception e) {
throw new NacosRuntimeException(NacosException.SERVER_ERROR, e);
}
} | @Test
void updateInstance() throws Exception {
Field clientManagerField = PersistentClientOperationServiceImpl.class.getDeclaredField("clientManager");
clientManagerField.setAccessible(true);
// Test register instance
persistentClientOperationServiceImpl.updateInstance(service, instance, clientId);
verify(cpProtocol).write(any(WriteRequest.class));
} |
@Nullable
public byte[] getValue() {
return mValue;
} | @Test
public void setValue_SFLOAT_hex() {
final MutableData data = new MutableData(new byte[2]);
data.setValue(10.1f, Data.FORMAT_SFLOAT, 0);
assertArrayEquals(new byte[] { 0x65, (byte) 0xF0 }, data.getValue());
} |
@Transactional(readOnly = true)
public User readUserIfValid(String username, String password) {
Optional<User> user = userService.readUserByUsername(username);
if (!isExistUser(user)) {
log.warn("해당 유저가 존재하지 않습니다. username: {}", username);
throw new UserErrorException(UserErrorCode.INVALID_USERNAME_OR_PASSWORD);
}
if (!isValidPassword(password, user.get())) {
log.warn("비밀번호가 일치하지 않습니다. username: {}", username);
throw new UserErrorException(UserErrorCode.INVALID_USERNAME_OR_PASSWORD);
}
return user.get();
} | @DisplayName("로그인 시, username에 해당하는 유저가 존재하지 않으면 UserErrorException을 발생시킨다.")
@Test
void readUserIfNotFound() {
// given
given(userService.readUserByUsername("pennyway")).willThrow(new UserErrorException(UserErrorCode.NOT_FOUND));
// when - then
UserErrorException exception = assertThrows(UserErrorException.class, () -> userGeneralSignService.readUserIfValid("pennyway", "password"));
System.out.println(exception.getExplainError());
} |
public long appendControlMessages(MemoryRecordsCreator valueCreator) {
appendLock.lock();
try {
ByteBuffer buffer = memoryPool.tryAllocate(maxBatchSize);
if (buffer != null) {
try {
forceDrain();
MemoryRecords memoryRecords = valueCreator.create(
nextOffset,
epoch,
compression,
buffer
);
int numberOfRecords = validateMemoryRecordsAndReturnCount(memoryRecords);
completed.add(
new CompletedBatch<>(
nextOffset,
numberOfRecords,
memoryRecords,
memoryPool,
buffer
)
);
nextOffset += numberOfRecords;
} catch (Exception e) {
// Release the buffer now since the buffer was not stored in completed for a delayed release
memoryPool.release(buffer);
throw e;
}
} else {
throw new IllegalStateException("Could not allocate buffer for the control record");
}
return nextOffset - 1;
} finally {
appendLock.unlock();
}
} | @Test
public void testInvalidControlRecordEpoch() {
int leaderEpoch = 17;
long baseOffset = 157;
int lingerMs = 50;
int maxBatchSize = 512;
ByteBuffer buffer = ByteBuffer.allocate(maxBatchSize);
Mockito.when(memoryPool.tryAllocate(maxBatchSize))
.thenReturn(buffer);
BatchAccumulator.MemoryRecordsCreator creator = (offset, epoch, compression, buf) -> {
long now = 1234;
try (MemoryRecordsBuilder builder = controlRecordsBuilder(
offset,
epoch + 1,
compression,
now,
buf
)
) {
builder.appendSnapshotHeaderMessage(
now,
new SnapshotHeaderRecord()
.setVersion(ControlRecordUtils.SNAPSHOT_HEADER_CURRENT_VERSION)
.setLastContainedLogTimestamp(now)
);
return builder.build();
}
};
try (BatchAccumulator<String> acc = buildAccumulator(
leaderEpoch,
baseOffset,
lingerMs,
maxBatchSize
)
) {
assertThrows(IllegalArgumentException.class, () -> acc.appendControlMessages(creator));
}
} |
public Map<StepDependencyType, StepDependencies> getStepDependencies(
String workflowId,
long workflowInstanceId,
long workflowRunId,
String stepId,
String stepAttempt) {
try {
return getStepInstanceFieldByIds(
StepInstanceField.DEPENDENCIES,
workflowId,
workflowInstanceId,
workflowRunId,
stepId,
stepAttempt,
this::getDependencies);
} catch (MaestroNotFoundException ex) {
// step dependency summary is not set
return null;
}
} | @Test
public void testGetStepInstanceStepDependenciesSummary() {
Map<StepDependencyType, StepDependencies> dependencies =
stepDao.getStepDependencies(TEST_WORKFLOW_ID, 1, 1, "job1", "1");
StepDependencies stepDependencies = dependencies.get(StepDependencyType.SIGNAL);
assertFalse(stepDependencies.isSatisfied());
Map<StepDependencyType, StepDependencies> latest =
stepDao.getStepDependencies(TEST_WORKFLOW_ID, 1, 1, "job1", "latest");
assertEquals(dependencies, latest);
} |
static JavaType constructType(Type type) {
try {
return constructTypeInner(type);
} catch (Exception e) {
throw new InvalidDataTableTypeException(type, e);
}
} | @Test
void should_provide_canonical_representation_of_object() {
JavaType javaType = TypeFactory.constructType(Object.class);
assertThat(javaType.getTypeName(), is(Object.class.getTypeName()));
} |
@Override
@CanIgnoreReturnValue
public Key register(Watchable watchable, Iterable<? extends WatchEvent.Kind<?>> eventTypes)
throws IOException {
JimfsPath path = checkWatchable(watchable);
Key key = super.register(path, eventTypes);
Snapshot snapshot = takeSnapshot(path);
synchronized (this) {
snapshots.put(key, snapshot);
if (pollingFuture == null) {
startPolling();
}
}
return key;
} | @Test(timeout = 2000)
public void testWatchForOneEventType() throws IOException, InterruptedException {
JimfsPath path = createDirectory();
watcher.register(path, ImmutableList.of(ENTRY_CREATE));
Files.createFile(path.resolve("foo"));
assertWatcherHasEvents(new Event<>(ENTRY_CREATE, 1, fs.getPath("foo")));
Files.createFile(path.resolve("bar"));
Files.createFile(path.resolve("baz"));
assertWatcherHasEvents(
new Event<>(ENTRY_CREATE, 1, fs.getPath("bar")),
new Event<>(ENTRY_CREATE, 1, fs.getPath("baz")));
} |
public static Set<String> configNames() {
return CONFIG.names();
} | @Test
public void testFromPropsInvalid() {
GroupConfig.configNames().forEach(name -> {
if (GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG.equals(name)) {
assertPropertyInvalid(name, "not_a_number", "-0.1", "1.2");
} else if (GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG.equals(name)) {
assertPropertyInvalid(name, "not_a_number", "-0.1", "1.2");
} else {
assertPropertyInvalid(name, "not_a_number", "-1");
}
});
} |
@Override
public synchronized void execute() {
boolean debugMode = conf.isLoadBalancerDebugModeEnabled() || log.isDebugEnabled();
if (debugMode) {
log.info("Load balancer enabled: {}, Shedding enabled: {}.",
conf.isLoadBalancerEnabled(), conf.isLoadBalancerSheddingEnabled());
}
if (!isLoadBalancerSheddingEnabled()) {
if (debugMode) {
log.info("The load balancer or load balancer shedding already disabled. Skipping.");
}
return;
}
// Remove bundles who have been unloaded for longer than the grace period from the recently unloaded map.
final long timeout = System.currentTimeMillis()
- TimeUnit.MINUTES.toMillis(conf.getLoadBalancerSheddingGracePeriodMinutes());
recentlyUnloadedBundles.keySet().removeIf(e -> recentlyUnloadedBundles.get(e) < timeout);
long asyncOpTimeoutMs = conf.getNamespaceBundleUnloadingTimeoutMs();
synchronized (namespaceUnloadStrategy) {
try {
Boolean isChannelOwner = channel.isChannelOwnerAsync().get(asyncOpTimeoutMs, TimeUnit.MILLISECONDS);
if (!isChannelOwner) {
if (debugMode) {
log.info("Current broker is not channel owner. Skipping.");
}
return;
}
List<String> availableBrokers = context.brokerRegistry().getAvailableBrokersAsync()
.get(asyncOpTimeoutMs, TimeUnit.MILLISECONDS);
if (debugMode) {
log.info("Available brokers: {}", availableBrokers);
}
if (availableBrokers.size() <= 1) {
log.info("Only 1 broker available: no load shedding will be performed. Skipping.");
return;
}
final Set<UnloadDecision> decisions = namespaceUnloadStrategy
.findBundlesForUnloading(context, recentlyUnloadedBundles, recentlyUnloadedBrokers);
if (debugMode) {
log.info("[{}] Unload decision result: {}",
namespaceUnloadStrategy.getClass().getSimpleName(), decisions);
}
if (decisions.isEmpty()) {
if (debugMode) {
log.info("[{}] Unload decision unloads is empty. Skipping.",
namespaceUnloadStrategy.getClass().getSimpleName());
}
return;
}
List<CompletableFuture<Void>> futures = new ArrayList<>();
unloadBrokers.clear();
decisions.forEach(decision -> {
if (decision.getLabel() == Success) {
Unload unload = decision.getUnload();
log.info("[{}] Unloading bundle: {}",
namespaceUnloadStrategy.getClass().getSimpleName(), unload);
futures.add(unloadManager.waitAsync(channel.publishUnloadEventAsync(unload),
unload.serviceUnit(), decision, asyncOpTimeoutMs, TimeUnit.MILLISECONDS)
.thenAccept(__ -> {
unloadBrokers.add(unload.sourceBroker());
recentlyUnloadedBundles.put(unload.serviceUnit(), System.currentTimeMillis());
recentlyUnloadedBrokers.put(unload.sourceBroker(), System.currentTimeMillis());
}));
}
});
FutureUtil.waitForAll(futures)
.whenComplete((__, ex) -> counter.updateUnloadBrokerCount(unloadBrokers.size()))
.get(asyncOpTimeoutMs, TimeUnit.MILLISECONDS);
} catch (Exception ex) {
log.error("[{}] Namespace unload has exception.",
namespaceUnloadStrategy.getClass().getSimpleName(), ex);
} finally {
if (counter.updatedAt() > counterLastUpdatedAt) {
unloadMetrics.set(counter.toMetrics(pulsar.getAdvertisedAddress()));
counterLastUpdatedAt = counter.updatedAt();
}
}
}
} | @Test(timeOut = 30 * 1000)
public void testExecuteMoreThenOnceWhenFirstNotDone() throws InterruptedException {
AtomicReference<List<Metrics>> reference = new AtomicReference<>();
UnloadCounter counter = new UnloadCounter();
LoadManagerContext context = setupContext();
BrokerRegistry registry = context.brokerRegistry();
ServiceUnitStateChannel channel = mock(ServiceUnitStateChannel.class);
UnloadManager unloadManager = mock(UnloadManager.class);
PulsarService pulsar = mock(PulsarService.class);
NamespaceUnloadStrategy unloadStrategy = mock(NamespaceUnloadStrategy.class);
doReturn(CompletableFuture.completedFuture(true)).when(channel).isChannelOwnerAsync();
@Cleanup("shutdownNow")
ExecutorService executor = Executors.newFixedThreadPool(1);
doAnswer(__ -> CompletableFuture.supplyAsync(() -> {
try {
// Delay 5 seconds to finish.
TimeUnit.SECONDS.sleep(1);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return Lists.newArrayList("broker-1", "broker-2");
}, executor)).when(registry).getAvailableBrokersAsync();
UnloadScheduler scheduler = new UnloadScheduler(pulsar, loadManagerExecutor, unloadManager, context,
channel, unloadStrategy, counter, reference);
@Cleanup("shutdownNow")
ExecutorService executorService = Executors.newFixedThreadPool(5);
CountDownLatch latch = new CountDownLatch(5);
for (int i = 0; i < 5; i++) {
executorService.execute(() -> {
scheduler.execute();
latch.countDown();
});
}
latch.await();
verify(registry, times(5)).getAvailableBrokersAsync();
} |
public static <T> Values<T> of(Iterable<T> elems) {
return new Values<>(elems, Optional.absent(), Optional.absent(), false);
} | @Test
public void testPolymorphicType() throws Exception {
thrown.expect(RuntimeException.class);
thrown.expectMessage(Matchers.containsString("Unable to infer a coder"));
// Create won't infer a default coder in this case.
p.apply(Create.of(new Record(), new Record2()));
p.run();
} |
@Nullable
@Override
public RecordAndPosition<E> next() {
final RecordAndPosition<E> next = this.element;
this.element = null;
return next;
} | @Test
void testEmptyConstruction() {
final SingletonResultIterator<Object> iter = new SingletonResultIterator<>();
assertThat(iter.next()).isNull();
} |
public synchronized @Nullable WorkItemServiceState reportSuccess() throws IOException {
checkState(!finalStateSent, "cannot reportSuccess after sending a final state");
checkState(worker != null, "setWorker should be called before reportSuccess");
if (wasAskedToAbort) {
LOG.info("Service already asked to abort work item, not reporting ignored progress.");
return null;
}
WorkItemStatus status = createStatusUpdate(true);
if (worker instanceof SourceOperationExecutor) {
// TODO: Find out a generic way for the DataflowWorkExecutor to report work-specific results
// into the work update.
SourceOperationResponse response = ((SourceOperationExecutor) worker).getResponse();
if (response != null) {
status.setSourceOperationResponse(response);
}
}
LOG.info("Success processing work item {}", uniqueWorkId());
return execute(status);
} | @Test
public void reportSuccess() throws IOException {
when(worker.extractMetricUpdates()).thenReturn(Collections.emptyList());
statusClient.setWorker(worker, executionContext);
statusClient.reportSuccess();
verify(workUnitClient).reportWorkItemStatus(statusCaptor.capture());
WorkItemStatus workStatus = statusCaptor.getValue();
assertThat(workStatus.getWorkItemId(), equalTo(Long.toString(WORK_ID)));
assertThat(workStatus.getCompleted(), equalTo(true));
assertThat(workStatus.getReportIndex(), equalTo(INITIAL_REPORT_INDEX));
assertThat(workStatus.getErrors(), nullValue());
} |
public PublisherAgreement signPublisherAgreement(UserData user) {
checkApiUrl();
var eclipseToken = checkEclipseToken(user);
var headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_JSON);
headers.setBearerAuth(eclipseToken.accessToken);
headers.setAccept(Arrays.asList(MediaType.APPLICATION_JSON));
var data = new SignAgreementParam(publisherAgreementVersion, user.getLoginName());
var request = new HttpEntity<>(data, headers);
var requestUrl = UrlUtil.createApiUrl(eclipseApiUrl, "openvsx", "publisher_agreement");
try {
var json = restTemplate.postForEntity(requestUrl, request, String.class);
// The request was successful: reactivate all previously published extensions
extensions.reactivateExtensions(user);
// Parse the response and store the publisher agreement metadata
return parseAgreementResponse(json);
} catch (RestClientException exc) {
String message = exc.getMessage();
var statusCode = HttpStatus.INTERNAL_SERVER_ERROR;
if (exc instanceof HttpStatusCodeException) {
var excStatus = ((HttpStatusCodeException) exc).getStatusCode();
// The endpoint yields 409 if the specified user has already signed a publisher agreement
if (excStatus == HttpStatus.CONFLICT) {
message = "A publisher agreement is already present for user " + user.getLoginName() + ".";
statusCode = HttpStatus.BAD_REQUEST;
} else if (excStatus == HttpStatus.BAD_REQUEST) {
var matcher = STATUS_400_MESSAGE.matcher(exc.getMessage());
if (matcher.matches()) {
message = matcher.group("message");
}
}
}
if (statusCode == HttpStatus.INTERNAL_SERVER_ERROR) {
message = "Request for signing publisher agreement failed: " + message;
}
String payload;
try {
payload = objectMapper.writeValueAsString(data);
} catch (JsonProcessingException exc2) {
payload = "<" + exc2.getMessage() + ">";
}
logger.error("Post request failed with URL: " + requestUrl + " Payload: " + payload, exc);
throw new ErrorResultException(message, statusCode);
}
} | @Test
public void testSignPublisherAgreement() throws Exception {
var user = mockUser();
Mockito.when(restTemplate.postForEntity(any(String.class), any(), eq(String.class)))
.thenReturn(mockAgreementResponse());
Mockito.when(repositories.findVersionsByUser(user, false))
.thenReturn(Streamable.empty());
var agreement = eclipse.signPublisherAgreement(user);
assertThat(agreement).isNotNull();
assertThat(agreement.isActive).isEqualTo(true);
assertThat(agreement.documentId).isEqualTo("abcd");
assertThat(agreement.version).isEqualTo("1");
assertThat(agreement.timestamp).isEqualTo(LocalDateTime.of(2020, 10, 9, 5, 10, 32));
} |
@Override
public SchemaResult getValueSchema(
final Optional<String> topicName,
final Optional<Integer> schemaId,
final FormatInfo expectedFormat,
final SerdeFeatures serdeFeatures
) {
return getSchema(topicName, schemaId, expectedFormat, serdeFeatures, false);
} | @Test
public void shouldThrowFromGetValueWithIdSchemaOnOtherException() throws Exception {
// Given:
when(srClient.getSchemaBySubjectAndId(any(), anyInt()))
.thenThrow(new IOException("boom"));
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> supplier.getValueSchema(Optional.of(TOPIC_NAME),
Optional.of(42), expectedFormat, SerdeFeatures.of())
);
// Then:
assertThat(e.getMessage(), containsString("Schema registry fetch for topic "
+ "value request failed for topic: " + TOPIC_NAME));
} |
@Override
public void transitionToActive(final StreamTask streamTask, final RecordCollector recordCollector, final ThreadCache newCache) {
if (stateManager.taskType() != TaskType.ACTIVE) {
throw new IllegalStateException("Tried to transition processor context to active but the state manager's " +
"type was " + stateManager.taskType());
}
this.streamTask = streamTask;
this.collector = recordCollector;
this.cache = newCache;
addAllFlushListenersToNewCache();
} | @Test
public void globalWindowStoreShouldBeReadOnly() {
foreachSetUp();
when(stateManager.taskType()).thenReturn(TaskType.ACTIVE);
when(stateManager.getGlobalStore(anyString())).thenReturn(null);
final WindowStore<String, Long> windowStore = mock(WindowStore.class);
when(stateManager.getGlobalStore("GlobalWindowStore")).thenAnswer(answer -> windowStoreMock(windowStore));
context = buildProcessorContextImpl(streamsConfig, stateManager);
final StreamTask task = mock(StreamTask.class);
context.transitionToActive(task, null, null);
mockProcessorNodeWithLocalKeyValueStore();
doTest("GlobalWindowStore", (Consumer<WindowStore<String, Long>>) store -> {
verifyStoreCannotBeInitializedOrClosed(store);
checkThrowsUnsupportedOperation(store::flush, "flush()");
checkThrowsUnsupportedOperation(() -> store.put("1", 1L, 1L), "put()");
assertEquals(iters.get(0), store.fetchAll(0L, 0L));
assertEquals(windowStoreIter, store.fetch(KEY, 0L, 1L));
assertEquals(iters.get(1), store.fetch(KEY, KEY, 0L, 1L));
assertEquals((Long) VALUE, store.fetch(KEY, 1L));
assertEquals(iters.get(2), store.all());
});
} |
public List<PartitionInfo> getTopicMetadata(String topic, boolean allowAutoTopicCreation, Timer timer) {
MetadataRequest.Builder request = new MetadataRequest.Builder(Collections.singletonList(topic), allowAutoTopicCreation);
Map<String, List<PartitionInfo>> topicMetadata = getTopicMetadata(request, timer);
return topicMetadata.get(topic);
} | @Test
public void testGetTopicMetadataInvalidTopic() {
buildFetcher();
assignFromUser(singleton(tp0));
client.prepareResponse(newMetadataResponse(Errors.INVALID_TOPIC_EXCEPTION));
assertThrows(InvalidTopicException.class, () -> topicMetadataFetcher.getTopicMetadata(topicName, true, time.timer(5000L)));
} |
@Override
public Set<Long> calculateUsers(DelegateExecution execution, String param) {
Set<Long> postIds = StrUtils.splitToLongSet(param);
List<AdminUserRespDTO> users = adminUserApi.getUserListByPostIds(postIds);
return convertSet(users, AdminUserRespDTO::getId);
} | @Test
public void testCalculateUsers() {
// 准备参数
String param = "1,2";
// mock 方法
List<AdminUserRespDTO> users = convertList(asSet(11L, 22L),
id -> new AdminUserRespDTO().setId(id));
when(adminUserApi.getUserListByPostIds(eq(asSet(1L, 2L)))).thenReturn(users);
// 调用
Set<Long> results = strategy.calculateUsers(null, param);
// 断言
assertEquals(asSet(11L, 22L), results);
} |
public int[] startBatchWithRunStrategy(
@NotNull String workflowId,
@NotNull RunStrategy runStrategy,
List<WorkflowInstance> instances) {
if (instances == null || instances.isEmpty()) {
return new int[0];
}
return withMetricLogError(
() -> {
Set<String> uuids =
instances.stream().map(WorkflowInstance::getWorkflowUuid).collect(Collectors.toSet());
return withRetryableTransaction(
conn -> {
final long nextInstanceId = getLatestInstanceId(conn, workflowId) + 1;
if (dedupAndCheckIfAllDuplicated(conn, workflowId, uuids)) {
return new int[instances.size()];
}
long lastAssignedInstanceId =
completeInstancesInit(conn, nextInstanceId, uuids, instances);
int[] res;
switch (runStrategy.getRule()) {
case SEQUENTIAL:
case PARALLEL:
case STRICT_SEQUENTIAL:
res = enqueueInstances(conn, workflowId, instances);
break;
case FIRST_ONLY:
res = startFirstOnlyInstances(conn, workflowId, instances);
break;
case LAST_ONLY:
res = startLastOnlyInstances(conn, workflowId, instances);
break;
default:
throw new MaestroInternalError(
"When startBatch, run strategy [%s] is not supported.", runStrategy);
}
if (lastAssignedInstanceId >= nextInstanceId) {
updateLatestInstanceId(conn, workflowId, lastAssignedInstanceId);
}
return res;
});
},
"startBatchWithRunStrategy",
"Failed to start [{}] workflow instances for [{}] with run strategy [{}]",
instances.size(),
workflowId,
runStrategy);
} | @Test
public void testStartBatchRunStrategyWithQueue() throws Exception {
List<WorkflowInstance> batch = prepareBatch();
int[] res =
runStrategyDao.startBatchWithRunStrategy(
TEST_WORKFLOW_ID, RunStrategy.create("PARALLEL"), batch);
assertArrayEquals(new int[] {1, 0, 1}, res);
assertEquals(1, batch.get(0).getWorkflowInstanceId());
assertEquals(0, batch.get(1).getWorkflowInstanceId());
assertEquals(2, batch.get(2).getWorkflowInstanceId());
WorkflowInstance previous = dao.getWorkflowInstanceRun(TEST_WORKFLOW_ID, 1, 1);
WorkflowInstance latestRun = dao.getLatestWorkflowInstanceRun(TEST_WORKFLOW_ID, 2);
assertEquals(1, previous.getWorkflowInstanceId());
assertEquals("wfi1-uuid", previous.getWorkflowUuid());
assertEquals(WorkflowInstance.Status.CREATED, previous.getStatus());
assertEquals(2, latestRun.getWorkflowInstanceId());
assertEquals("wfi3-uuid", latestRun.getWorkflowUuid());
assertEquals(WorkflowInstance.Status.CREATED, latestRun.getStatus());
verifyPublish(1, 0, 0, 0, 0);
MaestroTestHelper.removeWorkflowInstance(dataSource, TEST_WORKFLOW_ID, 2);
} |
@Override
public User registerUser(RegisterRequest registerRequest) {
return userServiceClient.register(registerRequest).getBody();
} | @Test
void registerUser_ValidRegisterRequest_ReturnsUser() {
// Given
RegisterRequest registerRequest = RegisterRequest.builder()
.email("valid.email@example.com")
.password("validPassword123")
.firstName("John")
.lastName("Doe")
.phoneNumber("1234567890100")
.role("user")
.build();
User expectedUser = User.builder()
.id(UUID.randomUUID().toString())
.email("valid.email@example.com")
.firstName("John")
.lastName("Doe")
.phoneNumber("1234567890100")
.userStatus(UserStatus.ACTIVE)
.userType(UserType.USER)
.build();
// When
when(userServiceClient.register(any(RegisterRequest.class)))
.thenReturn(ResponseEntity.ok(expectedUser));
// Then
User result = registerService.registerUser(registerRequest);
assertNotNull(result);
assertEquals(expectedUser, result);
// Verify
verify(userServiceClient, times(1)).register(any(RegisterRequest.class));
} |
public static String[] split(String splittee, String splitChar, boolean truncate) { //NOSONAR
if (splittee == null || splitChar == null) {
return new String[0];
}
final String EMPTY_ELEMENT = "";
int spot;
final int splitLength = splitChar.length();
final String adjacentSplit = splitChar + splitChar;
final int adjacentSplitLength = adjacentSplit.length();
if (truncate) {
while ((spot = splittee.indexOf(adjacentSplit)) != -1) {
splittee = splittee.substring(0, spot + splitLength)
+ splittee.substring(spot + adjacentSplitLength, splittee.length());
}
if (splittee.startsWith(splitChar)) {
splittee = splittee.substring(splitLength);
}
if (splittee.endsWith(splitChar)) { // Remove trailing splitter
splittee = splittee.substring(0, splittee.length() - splitLength);
}
}
List<String> returns = new ArrayList<>();
final int length = splittee.length(); // This is the new length
int start = 0;
spot = 0;
while (start < length && (spot = splittee.indexOf(splitChar, start)) > -1) {
if (spot > 0) {
returns.add(splittee.substring(start, spot));
} else {
returns.add(EMPTY_ELEMENT);
}
start = spot + splitLength;
}
if (start < length) {
returns.add(splittee.substring(start));
} else if (spot == length - splitLength) {// Found splitChar at end of line
returns.add(EMPTY_ELEMENT);
}
return returns.toArray(new String[returns.size()]);
} | @Test
public void testSplitStringStringFalseWithTrailingSplitChars() {
// Test ignore trailing split characters
assertThat("Include the trailing split chars", JOrphanUtils.split("a,bc,,", ",", false),
CoreMatchers.equalTo(new String[]{"a", "bc", "", ""}));
} |
@Override
public Messages process(Messages messages) {
try (Timer.Context ignored = executionTime.time()) {
final State latestState = stateUpdater.getLatestState();
if (latestState.enableRuleMetrics()) {
return process(messages, new RuleMetricsListener(metricRegistry), latestState);
}
return process(messages, new NoopInterpreterListener(), latestState);
}
} | @Test
@SuppressForbidden("Allow using default thread factory")
public void testMetrics() {
final RuleMetricsConfigService ruleMetricsConfigService = mock(RuleMetricsConfigService.class);
when(ruleMetricsConfigService.get()).thenReturn(RuleMetricsConfigDto.createDefault());
final ClusterEventBus clusterEventBus = new ClusterEventBus("cluster-event-bus", Executors.newSingleThreadExecutor());
final RuleService ruleService = new InMemoryRuleService(clusterEventBus);
ruleService.save(RuleDao.create("abc",
"title",
"description",
"rule \"match_all\"\n" +
"when true\n" +
"then\n" +
"end",
Tools.nowUTC(),
null, null, null)
);
final PipelineService pipelineService = new InMemoryPipelineService(new ClusterEventBus());
pipelineService.save(PipelineDao.create("cde", "title", "description",
"pipeline \"pipeline\"\n" +
"stage 0 match all\n" +
" rule \"match_all\";\n" +
"stage 1 match all\n" +
" rule \"match_all\";\n" +
"end\n",
Tools.nowUTC(),
null)
);
final PipelineStreamConnectionsService pipelineStreamConnectionsService = new InMemoryPipelineStreamConnectionsService(clusterEventBus);
pipelineStreamConnectionsService.save(PipelineConnections.create(null,
DEFAULT_STREAM_ID,
Collections.singleton("cde")));
final FunctionRegistry functionRegistry = new FunctionRegistry(Collections.emptyMap());
final PipelineRuleParser parser = new PipelineRuleParser(functionRegistry);
final MetricRegistry metricRegistry = new MetricRegistry();
final ConfigurationStateUpdater stateUpdater = new ConfigurationStateUpdater(ruleService,
pipelineService,
pipelineStreamConnectionsService,
parser,
(config, ruleParser) -> new PipelineResolver(ruleParser, config),
ruleMetricsConfigService,
metricRegistry,
Executors.newScheduledThreadPool(1),
mock(EventBus.class),
(currentPipelines, streamPipelineConnections, ruleMetricsConfig) -> new PipelineInterpreter.State(currentPipelines, streamPipelineConnections, ruleMetricsConfig, new MetricRegistry(), 1, true)
);
final PipelineInterpreter interpreter = new PipelineInterpreter(
mock(MessageQueueAcknowledger.class),
metricRegistry,
stateUpdater);
interpreter.process(messageInDefaultStream("", ""));
final SortedMap<String, Meter> meters = metricRegistry.getMeters((name, metric) -> name.startsWith(name(Pipeline.class, "cde")) || name.startsWith(name(Rule.class, "abc")));
assertThat(meters.keySet()).containsExactlyInAnyOrder(
name(Pipeline.class, "cde", "executed"),
name(Pipeline.class, "cde", "stage", "0", "executed"),
name(Pipeline.class, "cde", "stage", "1", "executed"),
name(Rule.class, "abc", "executed"),
name(Rule.class, "abc", "cde", "0", "executed"),
name(Rule.class, "abc", "cde", "1", "executed"),
name(Rule.class, "abc", "matched"),
name(Rule.class, "abc", "cde", "0", "matched"),
name(Rule.class, "abc", "cde", "1", "matched"),
name(Rule.class, "abc", "not-matched"),
name(Rule.class, "abc", "cde", "0", "not-matched"),
name(Rule.class, "abc", "cde", "1", "not-matched"),
name(Rule.class, "abc", "failed"),
name(Rule.class, "abc", "cde", "0", "failed"),
name(Rule.class, "abc", "cde", "1", "failed")
);
assertThat(meters.get(name(Pipeline.class, "cde", "executed")).getCount()).isEqualTo(1L);
assertThat(meters.get(name(Pipeline.class, "cde", "stage", "0", "executed")).getCount()).isEqualTo(1L);
assertThat(meters.get(name(Pipeline.class, "cde", "stage", "1", "executed")).getCount()).isEqualTo(1L);
assertThat(meters.get(name(Rule.class, "abc", "executed")).getCount()).isEqualTo(2L);
assertThat(meters.get(name(Rule.class, "abc", "cde", "0", "executed")).getCount()).isEqualTo(1L);
assertThat(meters.get(name(Rule.class, "abc", "cde", "1", "executed")).getCount()).isEqualTo(1L);
assertThat(meters.get(name(Rule.class, "abc", "matched")).getCount()).isEqualTo(2L);
assertThat(meters.get(name(Rule.class, "abc", "cde", "0", "matched")).getCount()).isEqualTo(1L);
assertThat(meters.get(name(Rule.class, "abc", "cde", "1", "matched")).getCount()).isEqualTo(1L);
assertThat(meters.get(name(Rule.class, "abc", "not-matched")).getCount()).isEqualTo(0L);
assertThat(meters.get(name(Rule.class, "abc", "cde", "0", "not-matched")).getCount()).isEqualTo(0L);
assertThat(meters.get(name(Rule.class, "abc", "cde", "1", "not-matched")).getCount()).isEqualTo(0L);
assertThat(meters.get(name(Rule.class, "abc", "failed")).getCount()).isEqualTo(0L);
assertThat(meters.get(name(Rule.class, "abc", "cde", "0", "failed")).getCount()).isEqualTo(0L);
assertThat(meters.get(name(Rule.class, "abc", "cde", "1", "failed")).getCount()).isEqualTo(0L);
} |
public boolean isGreaterThanOrEqual(String clusterId, Version version) {
return isGreaterThanOrEqual(() -> clusterVersionGetCommander.getCassandraVersion(clusterId), version);
} | @Test
void isGreaterThanOrEqual() {
// given
BDDMockito.when(clusterVersionGetCommander.getCassandraVersion(CLUSTER_ID)).thenReturn(Version.parse("4.0.1"));
// when
assertThat(clusterVersionEvaluator.isGreaterThanOrEqual(CLUSTER_ID, Version.parse("4.0.0"))).isTrue();
assertThat(clusterVersionEvaluator.isGreaterThanOrEqual(CLUSTER_ID, Version.parse("4.0.1"))).isTrue();
assertThat(clusterVersionEvaluator.isGreaterThanOrEqual(CLUSTER_ID, Version.parse("4.0.2"))).isFalse();
} |
@SuppressWarnings("unchecked")
public static <T> AgentServiceLoader<T> getServiceLoader(final Class<T> service) {
return (AgentServiceLoader<T>) LOADERS.computeIfAbsent(service, AgentServiceLoader::new);
} | @Test
void assertGetServiceLoaderWithEmptyInstances() {
assertTrue(AgentServiceLoader.getServiceLoader(AgentServiceEmptySPIFixture.class).getServices().isEmpty());
} |
public Optional<Object> evaluate(final Map<String, Object> columnPairsMap, final String outputColumn,
final String regexField) {
boolean matching = true;
boolean isRegex =
regexField != null && columnValues.containsKey(regexField) && (boolean) columnValues.get(regexField);
for (Map.Entry<String, Object> columnPairEntry : columnPairsMap.entrySet()) {
Object value = columnValues.get(columnPairEntry.getKey());
matching = isRegex ? isRegexMatching(value.toString(), (String) columnPairEntry.getValue()) :
isMatching(value, columnPairEntry.getValue());
if (!matching) {
break;
}
}
return matching ? Optional.ofNullable(columnValues.get(outputColumn)) : Optional.empty();
} | @Test
void evaluateKeyFoundMultipleNotMatching() {
KiePMMLRow kiePMMLRow = new KiePMMLRow(COLUMN_VALUES);
Map<String, Object> columnPairsMap = IntStream.range(0, 3).boxed()
.collect(Collectors.toMap(i -> "KEY-" + i,
integer -> integer));
columnPairsMap.put("NOT-KEY", 4);
Optional<Object> retrieved = kiePMMLRow.evaluate(columnPairsMap, "KEY-0", null);
assertThat(retrieved).isNotPresent();
} |
@Override
public int length() {
return 1;
} | @Test
public void testLength() {
System.out.println("length");
ExponentialDistribution instance = new ExponentialDistribution(1.0);
instance.rand();
assertEquals(1, instance.length());
} |
public static Config getConfig(
Configuration configuration, @Nullable HostAndPort externalAddress) {
return getConfig(
configuration,
externalAddress,
null,
PekkoUtils.getForkJoinExecutorConfig(
ActorSystemBootstrapTools.getForkJoinExecutorConfiguration(configuration)));
} | @Test
void getConfigDefaultsStartupTimeoutTo10TimesOfAskTimeout() {
final Configuration configuration = new Configuration();
configuration.set(RpcOptions.ASK_TIMEOUT_DURATION, Duration.ofMillis(100));
final Config config =
PekkoUtils.getConfig(configuration, new HostAndPort("localhost", 31337));
assertThat(config.getString("pekko.remote.startup-timeout")).isEqualTo("1000ms");
} |
@Override
public String getOnuStatistics(String target) {
DriverHandler handler = handler();
NetconfController controller = handler.get(NetconfController.class);
MastershipService mastershipService = handler.get(MastershipService.class);
DeviceId ncDeviceId = handler.data().deviceId();
checkNotNull(controller, "Netconf controller is null");
String reply = null;
String[] onuId = null;
if (!mastershipService.isLocalMaster(ncDeviceId)) {
log.warn("Not master for {} Use {} to execute command",
ncDeviceId,
mastershipService.getMasterFor(ncDeviceId));
return null;
}
if (target != null) {
onuId = checkIdString(target);
if (onuId == null) {
log.error("Failed to check ID: {}", target);
return null;
}
}
try {
StringBuilder request = new StringBuilder();
request.append(VOLT_NE_OPEN + VOLT_NE_NAMESPACE);
request.append(ANGLE_RIGHT + NEW_LINE);
request.append(buildStartTag(VOLT_STATISTICS));
if (onuId != null) {
request.append(buildStartTag(ONU_STATISTICS))
.append(buildStartTag(ONU_GEM_STATS))
.append(buildStartTag(GEM_STATS))
.append(buildStartTag(PONLINK_ID, false))
.append(onuId[FIRST_PART])
.append(buildEndTag(PONLINK_ID));
if (onuId.length > ONE) {
request.append(buildStartTag(ONU_ID, false))
.append(onuId[SECOND_PART])
.append(buildEndTag(ONU_ID));
}
request.append(buildEndTag(GEM_STATS))
.append(buildEndTag(ONU_GEM_STATS));
request.append(buildStartTag(ONU_ETH_STATS))
.append(buildStartTag(ETH_STATS))
.append(buildStartTag(PONLINK_ID, false))
.append(onuId[FIRST_PART])
.append(buildEndTag(PONLINK_ID));
if (onuId.length > ONE) {
request.append(buildStartTag(ONU_ID, false))
.append(onuId[SECOND_PART])
.append(buildEndTag(ONU_ID));
}
request.append(buildEndTag(ETH_STATS))
.append(buildEndTag(ONU_ETH_STATS))
.append(buildEndTag(ONU_STATISTICS));
} else {
request.append(buildEmptyTag(ONU_STATISTICS));
}
request.append(buildEndTag(VOLT_STATISTICS))
.append(VOLT_NE_CLOSE);
reply = controller
.getDevicesMap()
.get(ncDeviceId)
.getSession()
.get(request.toString(), REPORT_ALL);
} catch (NetconfException e) {
log.error("Cannot communicate to device {} exception {}", ncDeviceId, e);
}
return reply;
} | @Test
public void testInvalidGetOnuStatsInput() throws Exception {
String reply;
String target;
for (int i = ZERO; i < INVALID_GET_STATS_TCS.length; i++) {
target = INVALID_GET_STATS_TCS[i];
reply = voltConfig.getOnuStatistics(target);
assertNull("Incorrect response for INVALID_GET_STATS_TCS", reply);
}
} |
@Override
public InterpreterResult interpret(String st, InterpreterContext context) {
return helper.interpret(session, st, context);
} | @Test
void should_execute_prepared_and_bound_statements() {
// Given
String queries = "@prepare[ps]=INSERT INTO zeppelin.prepared(key,val) VALUES(?,?)\n" +
"@prepare[select]=SELECT * FROM zeppelin.prepared WHERE key=:key\n" +
"@bind[ps]='myKey','myValue'\n" +
"@bind[select]='myKey'";
// When
final InterpreterResult actual = interpreter.interpret(queries, intrContext);
// Then
assertEquals(Code.SUCCESS, actual.code());
assertEquals("key\tval\nmyKey\tmyValue\n", actual.message().get(0).getData());
} |
public final AccessControlEntry entry() {
return entry;
} | @Test
public void shouldThrowOnAnyPatternType() {
assertThrows(IllegalArgumentException.class,
() -> new AclBinding(new ResourcePattern(ResourceType.TOPIC, "foo", PatternType.ANY), ACL1.entry()));
} |
public static String getStringOrNull(String property, JsonNode node) {
if (!node.has(property)) {
return null;
}
JsonNode pNode = node.get(property);
if (pNode != null && pNode.isNull()) {
return null;
}
return getString(property, node);
} | @Test
public void getStringOrNull() throws JsonProcessingException {
assertThat(JsonUtil.getStringOrNull("x", JsonUtil.mapper().readTree("{}"))).isNull();
assertThat(JsonUtil.getStringOrNull("x", JsonUtil.mapper().readTree("{\"x\": \"23\"}")))
.isEqualTo("23");
assertThat(JsonUtil.getStringOrNull("x", JsonUtil.mapper().readTree("{\"x\": null}"))).isNull();
assertThatThrownBy(
() -> JsonUtil.getStringOrNull("x", JsonUtil.mapper().readTree("{\"x\": 23}")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse to a string value: x: 23");
} |
@Override
public boolean containsKey(Object key) {
return map1.containsKey(key) || map2.containsKey(key);
} | @Test
public void testContainsKey() {
Map<String, String> map1 = new HashMap<>();
map1.put("key1", "value1");
Map<String, String> map2 = new HashMap<>();
map2.put("key2", "value2");
Map<String, String> aggregatingMap = AggregatingMap.aggregate(map1, map2);
assertTrue(aggregatingMap.containsKey("key1"));
assertTrue(aggregatingMap.containsKey("key2"));
assertFalse(aggregatingMap.containsKey("key3"));
} |
private BinPacking() {} | @Test
void testBinPacking() {
assertThat(asList(asList(1, 2), singletonList(3), singletonList(4), singletonList(5)))
.as("Should pack the first 2 values")
.isEqualTo(pack(asList(1, 2, 3, 4, 5), 3));
assertThat(asList(asList(1, 2), singletonList(3), singletonList(4), singletonList(5)))
.as("Should pack the first 2 values")
.isEqualTo(pack(asList(1, 2, 3, 4, 5), 5));
assertThat(asList(asList(1, 2, 3), singletonList(4), singletonList(5)))
.as("Should pack the first 3 values")
.isEqualTo(pack(asList(1, 2, 3, 4, 5), 6));
assertThat(asList(asList(1, 2, 3), singletonList(4), singletonList(5)))
.as("Should pack the first 3 values")
.isEqualTo(pack(asList(1, 2, 3, 4, 5), 8));
assertThat(asList(asList(1, 2, 3), asList(4, 5)))
.as("Should pack the first 3 values, last 2 values")
.isEqualTo(pack(asList(1, 2, 3, 4, 5), 9));
assertThat(asList(asList(1, 2, 3, 4), singletonList(5)))
.as("Should pack the first 4 values")
.isEqualTo(pack(asList(1, 2, 3, 4, 5), 10));
assertThat(asList(asList(1, 2, 3, 4), singletonList(5)))
.as("Should pack the first 4 values")
.isEqualTo(pack(asList(1, 2, 3, 4, 5), 14));
assertThat(singletonList(asList(1, 2, 3, 4, 5)))
.as("Should pack the first 5 values")
.isEqualTo(pack(asList(1, 2, 3, 4, 5), 15));
} |
public static InetAddress findFirstNonLoopbackAddress() {
InetAddress result = null;
try {
int lowest = Integer.MAX_VALUE;
for (Enumeration<NetworkInterface> nics = NetworkInterface.getNetworkInterfaces();
nics.hasMoreElements(); ) {
NetworkInterface ifc = nics.nextElement();
if (isUp(ifc)) {
LOG.debug("Testing interface: " + ifc.getDisplayName());
if (ifc.getIndex() >= lowest && result != null) {
continue;
} else {
lowest = ifc.getIndex();
}
if (!ignoreInterface(ifc.getDisplayName())) {
for (Enumeration<InetAddress> addrs = ifc.getInetAddresses(); addrs.hasMoreElements(); ) {
InetAddress address = addrs.nextElement();
boolean isLegalIpVersion =
InternetAddressUtil.PREFER_IPV6_ADDRESSES ? address instanceof Inet6Address
: address instanceof Inet4Address;
if (isLegalIpVersion && !address.isLoopbackAddress() && isPreferredAddress(address)) {
LOG.debug("Found non-loopback interface: " + ifc.getDisplayName());
result = address;
}
}
}
}
}
} catch (IOException ex) {
LOG.error("Cannot get first non-loopback address", ex);
}
if (result != null) {
return result;
}
try {
return InetAddress.getLocalHost();
} catch (UnknownHostException e) {
LOG.error("Unable to retrieve localhost", e);
}
return null;
} | @Test
void findFirstNonLoopbackAddress() {
InetAddress address = InetUtils.findFirstNonLoopbackAddress();
assertNotNull(address);
assertFalse(address.isLoopbackAddress());
} |
@Override
public void deleteClient(ClientDetailsEntity client) throws InvalidClientException {
if (clientRepository.getById(client.getId()) == null) {
throw new InvalidClientException("Client with id " + client.getClientId() + " was not found");
}
// clean out any tokens that this client had issued
tokenRepository.clearTokensForClient(client);
// clean out any approved sites for this client
approvedSiteService.clearApprovedSitesForClient(client);
// clear out any whitelisted sites for this client
WhitelistedSite whitelistedSite = whitelistedSiteService.getByClientId(client.getClientId());
if (whitelistedSite != null) {
whitelistedSiteService.remove(whitelistedSite);
}
// clear out resource sets registered for this client
Collection<ResourceSet> resourceSets = resourceSetService.getAllForClient(client);
for (ResourceSet rs : resourceSets) {
resourceSetService.remove(rs);
}
// take care of the client itself
clientRepository.deleteClient(client);
statsService.resetCache();
} | @Test(expected = InvalidClientException.class)
public void deleteClient_badId() {
Long id = 12345L;
ClientDetailsEntity client = Mockito.mock(ClientDetailsEntity.class);
Mockito.when(client.getId()).thenReturn(id);
Mockito.when(clientRepository.getById(id)).thenReturn(null);
service.deleteClient(client);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.