focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static boolean hasSchemePattern( String path ) {
return hasSchemePattern( path, PROVIDER_PATTERN_SCHEME );
} | @Test
public void testHasSchemeWithSpaces() {
String vfsFilename = "/tmp/This is a text file4551613284841905296.txt";
String vfsFilenameWithScheme = "hdfs://tmp/This is a text file4551613284841905296.txt";
boolean testVfsFilename = KettleVFS.hasSchemePattern( vfsFilename, PROVIDER_PATTERN_SCHEME );
assertFalse( testVfsFilename );
boolean testVfsFilenameWithScheme = KettleVFS.hasSchemePattern( vfsFilenameWithScheme, PROVIDER_PATTERN_SCHEME );
assertTrue( testVfsFilenameWithScheme );
} |
public InetSocketAddress getRpcServerAddress() {
return this.rpcAddress;
} | @Test
public void testRouterRpcWithNoSubclusters() throws IOException {
Router router = new Router();
router.init(new RouterConfigBuilder(conf).rpc().build());
router.start();
InetSocketAddress serverAddress = router.getRpcServerAddress();
DFSClient dfsClient = new DFSClient(serverAddress, conf);
try {
dfsClient.create("/test.txt", false);
fail("Create with no subclusters should fail");
} catch (RemoteException e) {
assertExceptionContains("Cannot find locations for /test.txt", e);
}
try {
dfsClient.datanodeReport(DatanodeReportType.LIVE);
fail("Get datanode reports with no subclusters should fail");
} catch (IOException e) {
assertExceptionContains("No remote locations available", e);
}
dfsClient.close();
router.stop();
router.close();
} |
public static Range<PartitionKey> createRange(String lowerBound, String upperBound, Column partitionColumn)
throws AnalysisException {
if (lowerBound == null && upperBound == null) {
return null;
}
PartitionValue lowerValue = new PartitionValue(lowerBound);
PartitionValue upperValue;
if (upperBound.equalsIgnoreCase(MaxLiteral.MAX_VALUE.toString())) {
upperValue = PartitionValue.MAX_VALUE;
} else {
upperValue = new PartitionValue(upperBound);
}
PartitionKey lowerBoundPartitionKey = PartitionKey.createPartitionKey(Collections.singletonList(lowerValue),
Collections.singletonList(partitionColumn));
PartitionKey upperBoundPartitionKey = PartitionKey.createPartitionKey(Collections.singletonList(upperValue),
Collections.singletonList(partitionColumn));
return Range.closedOpen(lowerBoundPartitionKey, upperBoundPartitionKey);
} | @Test
public void testMappingRangeList() throws AnalysisException {
Map<String, Range<PartitionKey>> baseRangeMap = Maps.newHashMap();
Map<String, Range<PartitionKey>> result;
baseRangeMap.put("p202001", createRange("2020-01-01", "2020-02-01"));
baseRangeMap.put("p202002", createRange("2020-02-01", "2020-03-01"));
baseRangeMap.put("p202003", createRange("2020-03-01", "2020-04-01"));
baseRangeMap.put("p202004", createMaxValueRange("2020-04-01"));
result = toEagerMappingRanges(baseRangeMap, "month", PrimitiveType.DATE);
Assert.assertTrue(result.containsKey("p202004_999912"));
Assert.assertEquals(1, result.get("p202004_999912").upperEndpoint().getKeys().size());
Assert.assertEquals("9999-12-31", result.get("p202004_999912").upperEndpoint().getKeys().get(0).
getStringValue());
baseRangeMap.clear();
baseRangeMap.put("p202001", createRange("2020-01-01 12:00:25", "2020-02-01 20:01:59"));
baseRangeMap.put("p202002", createRange("2020-02-01 20:01:59", "2020-03-01 02:50:49"));
baseRangeMap.put("p202003", createRange("2020-03-01 02:50:49", "2020-04-01 01:05:06"));
baseRangeMap.put("p202004", createMaxValueRange("2020-04-01 01:05:06"));
result = toEagerMappingRanges(baseRangeMap, "hour", PrimitiveType.DATETIME);
Assert.assertTrue(result.size() == 2175);
} |
protected void refreshDomain() {
Set<String> keySet = new HashSet<>();
for (Map.Entry<String, List<ConsumerConfig>> entry : notifyListeners.entrySet()) {
String directUrl = entry.getKey();
String[] providerStrs = StringUtils.splitWithCommaOrSemicolon(directUrl);
keySet.addAll(Arrays.asList(providerStrs));
}
for (String directUrl : keySet) {
ProviderInfo providerInfo = convertToProviderInfo(directUrl);
List<ProviderInfo> result = directUrl2IpUrl(providerInfo, domainCache.get(directUrl));
domainCache.put(directUrl, result);
}
} | @Test
public void testRefreshDomain() {
ConsumerConfig<Object> consumerConfig = new ConsumerConfig<>();
String directUrl1 = "bolt://alipay.com";
String directUrl2 = "bolt://taobao.com";
String directUrl = directUrl1 + ";" + directUrl2;
consumerConfig.setDirectUrl(directUrl);
domainRegistry.subscribe(consumerConfig);
assertTrue(domainRegistry.notifyListeners.containsKey(directUrl));
domainRegistry.refreshDomain();
assertTrue(domainRegistry.domainCache.containsKey(directUrl1));
assertTrue(domainRegistry.domainCache.containsKey(directUrl2));
} |
@Subscribe
public void onChatMessage(ChatMessage chatMessage)
{
if (chatMessage.getType() != ChatMessageType.TRADE
&& chatMessage.getType() != ChatMessageType.GAMEMESSAGE
&& chatMessage.getType() != ChatMessageType.SPAM
&& chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION)
{
return;
}
String message = chatMessage.getMessage();
Matcher matcher = KILLCOUNT_PATTERN.matcher(message);
if (matcher.find())
{
final String boss = matcher.group("boss");
final int kc = Integer.parseInt(matcher.group("kc"));
final String pre = matcher.group("pre");
final String post = matcher.group("post");
if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post))
{
unsetKc(boss);
return;
}
String renamedBoss = KILLCOUNT_RENAMES
.getOrDefault(boss, boss)
// The config service doesn't support keys with colons in them
.replace(":", "");
if (boss != renamedBoss)
{
// Unset old TOB kc
unsetKc(boss);
unsetPb(boss);
unsetKc(boss.replace(":", "."));
unsetPb(boss.replace(":", "."));
// Unset old story mode
unsetKc("Theatre of Blood Story Mode");
unsetPb("Theatre of Blood Story Mode");
}
setKc(renamedBoss, kc);
// We either already have the pb, or need to remember the boss for the upcoming pb
if (lastPb > -1)
{
log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb);
if (renamedBoss.contains("Theatre of Blood"))
{
// TOB team size isn't sent in the kill message, but can be computed from varbits
int tobTeamSize = tobTeamSize();
lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players");
}
else if (renamedBoss.contains("Tombs of Amascut"))
{
// TOA team size isn't sent in the kill message, but can be computed from varbits
int toaTeamSize = toaTeamSize();
lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players");
}
final double pb = getPb(renamedBoss);
// If a raid with a team size, only update the pb if it is lower than the existing pb
// so that the pb is the overall lowest of any team size
if (lastTeamSize == null || pb == 0 || lastPb < pb)
{
log.debug("Setting overall pb (old: {})", pb);
setPb(renamedBoss, lastPb);
}
if (lastTeamSize != null)
{
log.debug("Setting team size pb: {}", lastTeamSize);
setPb(renamedBoss + " " + lastTeamSize, lastPb);
}
lastPb = -1;
lastTeamSize = null;
}
else
{
lastBossKill = renamedBoss;
lastBossTime = client.getTickCount();
}
return;
}
matcher = DUEL_ARENA_WINS_PATTERN.matcher(message);
if (matcher.find())
{
final int oldWins = getKc("Duel Arena Wins");
final int wins = matcher.group(2).equals("one") ? 1 :
Integer.parseInt(matcher.group(2).replace(",", ""));
final String result = matcher.group(1);
int winningStreak = getKc("Duel Arena Win Streak");
int losingStreak = getKc("Duel Arena Lose Streak");
if (result.equals("won") && wins > oldWins)
{
losingStreak = 0;
winningStreak += 1;
}
else if (result.equals("were defeated"))
{
losingStreak += 1;
winningStreak = 0;
}
else
{
log.warn("unrecognized duel streak chat message: {}", message);
}
setKc("Duel Arena Wins", wins);
setKc("Duel Arena Win Streak", winningStreak);
setKc("Duel Arena Lose Streak", losingStreak);
}
matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message);
if (matcher.find())
{
int losses = matcher.group(1).equals("one") ? 1 :
Integer.parseInt(matcher.group(1).replace(",", ""));
setKc("Duel Arena Losses", losses);
}
matcher = KILL_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = NEW_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = HS_PB_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group("floor"));
String floortime = matcher.group("floortime");
String floorpb = matcher.group("floorpb");
String otime = matcher.group("otime");
String opb = matcher.group("opb");
String pb = MoreObjects.firstNonNull(floorpb, floortime);
setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb));
if (otime != null)
{
pb = MoreObjects.firstNonNull(opb, otime);
setPb("Hallowed Sepulchre", timeStringToSeconds(pb));
}
}
matcher = HS_KC_FLOOR_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group(1));
int kc = Integer.parseInt(matcher.group(2).replaceAll(",", ""));
setKc("Hallowed Sepulchre Floor " + floor, kc);
}
matcher = HS_KC_GHC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hallowed Sepulchre", kc);
}
matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hunter Rumours", kc);
}
if (lastBossKill != null && lastBossTime != client.getTickCount())
{
lastBossKill = null;
lastBossTime = -1;
}
matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message);
if (matcher.find())
{
String item = matcher.group(1);
int petId = findPet(item);
if (petId != -1)
{
final List<Integer> petList = new ArrayList<>(getPetList());
if (!petList.contains(petId))
{
log.debug("New pet added: {}/{}", item, petId);
petList.add(petId);
setPetList(petList);
}
}
}
matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1));
setKc("Guardians of the Rift", kc);
}
} | @Test
public void testReward()
{
ChatMessage chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your reward is: <col=ff0000>1</col> x <col=ff0000>Kebab</col>.", null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
verify(configManager, never()).setRSProfileConfiguration(anyString(), anyString(), anyInt());
} |
public static ThrowableType getThrowableType(Throwable cause) {
final ThrowableAnnotation annotation =
cause.getClass().getAnnotation(ThrowableAnnotation.class);
return annotation == null ? ThrowableType.RecoverableError : annotation.value();
} | @Test
void testThrowableType_EnvironmentError() {
assertThat(ThrowableClassifier.getThrowableType(new TestEnvironmentErrorException()))
.isEqualTo(ThrowableType.EnvironmentError);
} |
public void setTemplateEntriesForChild(CapacitySchedulerConfiguration conf,
QueuePath childQueuePath) {
setTemplateEntriesForChild(conf, childQueuePath, false);
} | @Test
public void testIgnoredTemplateWhenQueuePathIsInvalid() {
QueuePath invalidPath = new QueuePath("a");
conf.set(getTemplateKey(invalidPath, "capacity"), "6w");
AutoCreatedQueueTemplate template =
new AutoCreatedQueueTemplate(conf, invalidPath);
template.setTemplateEntriesForChild(conf, TEST_QUEUE_AB);
Assert.assertEquals("weight is set using invalid queue path", -1f,
conf.getNonLabeledQueueWeight(TEST_QUEUE_AB), 10e-6);
} |
@Override
public KsMaterializedQueryResult<Row> get(
final GenericKey key,
final int partition,
final Optional<Position> position
) {
try {
final KeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = KeyQuery.withKey(key);
StateQueryRequest<ValueAndTimestamp<GenericRow>>
request = inStore(stateStore.getStateStoreName())
.withQuery(query)
.withPartitions(ImmutableSet.of(partition));
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final StateQueryResult<ValueAndTimestamp<GenericRow>>
result = stateStore.getKafkaStreams().query(request);
final QueryResult<ValueAndTimestamp<GenericRow>> queryResult =
result.getPartitionResults().get(partition);
// Some of these failures are retriable, and in the future, we may want to retry
// locally before throwing.
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
} else if (queryResult.getResult() == null) {
return KsMaterializedQueryResult.rowIteratorWithPosition(
Collections.emptyIterator(), queryResult.getPosition());
} else {
final ValueAndTimestamp<GenericRow> row = queryResult.getResult();
return KsMaterializedQueryResult.rowIteratorWithPosition(
ImmutableList.of(Row.of(stateStore.schema(), key, row.value(), row.timestamp()))
.iterator(),
queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
} | @Test
public void shouldCloseIterator_rangeBothBounds() {
// Given:
final StateQueryResult result = new StateQueryResult();
final QueryResult queryResult = QueryResult.forResult(keyValueIterator);
queryResult.setPosition(POSITION);
result.addResult(PARTITION, queryResult);
when(kafkaStreams.query(any())).thenReturn(result);
when(keyValueIterator.hasNext()).thenReturn(true, true, false);
when(keyValueIterator.next())
.thenReturn(KEY_VALUE1)
.thenReturn(KEY_VALUE2);
// When:
Streams.stream(table.get(PARTITION, A_KEY, A_KEY2).rowIterator)
.collect(Collectors.toList());
// Then:
verify(keyValueIterator).close();
} |
public <T extends AwsSyncClientBuilder> void applyHttpClientConfigurations(T builder) {
if (Strings.isNullOrEmpty(httpClientType)) {
httpClientType = CLIENT_TYPE_DEFAULT;
}
switch (httpClientType) {
case CLIENT_TYPE_URLCONNECTION:
UrlConnectionHttpClientConfigurations urlConnectionHttpClientConfigurations =
loadHttpClientConfigurations(UrlConnectionHttpClientConfigurations.class.getName());
urlConnectionHttpClientConfigurations.configureHttpClientBuilder(builder);
break;
case CLIENT_TYPE_APACHE:
ApacheHttpClientConfigurations apacheHttpClientConfigurations =
loadHttpClientConfigurations(ApacheHttpClientConfigurations.class.getName());
apacheHttpClientConfigurations.configureHttpClientBuilder(builder);
break;
default:
throw new IllegalArgumentException("Unrecognized HTTP client type " + httpClientType);
}
} | @Test
public void testUrlHttpClientConfiguration() {
Map<String, String> properties = Maps.newHashMap();
properties.put(HttpClientProperties.CLIENT_TYPE, "urlconnection");
HttpClientProperties httpProperties = new HttpClientProperties(properties);
S3ClientBuilder mockS3ClientBuilder = Mockito.mock(S3ClientBuilder.class);
ArgumentCaptor<SdkHttpClient.Builder> httpClientBuilderCaptor =
ArgumentCaptor.forClass(SdkHttpClient.Builder.class);
httpProperties.applyHttpClientConfigurations(mockS3ClientBuilder);
Mockito.verify(mockS3ClientBuilder).httpClientBuilder(httpClientBuilderCaptor.capture());
SdkHttpClient.Builder capturedHttpClientBuilder = httpClientBuilderCaptor.getValue();
assertThat(capturedHttpClientBuilder)
.as("Should use url connection http client")
.isInstanceOf(UrlConnectionHttpClient.Builder.class);
} |
public static DistCpOptions parse(String[] args)
throws IllegalArgumentException {
CommandLineParser parser = new CustomParser();
CommandLine command;
try {
command = parser.parse(cliOptions, args, true);
} catch (ParseException e) {
throw new IllegalArgumentException("Unable to parse arguments. " +
Arrays.toString(args), e);
}
DistCpOptions.Builder builder = parseSourceAndTargetPaths(command);
builder
.withAtomicCommit(
command.hasOption(DistCpOptionSwitch.ATOMIC_COMMIT.getSwitch()))
.withSyncFolder(
command.hasOption(DistCpOptionSwitch.SYNC_FOLDERS.getSwitch()))
.withDeleteMissing(
command.hasOption(DistCpOptionSwitch.DELETE_MISSING.getSwitch()))
.withIgnoreFailures(
command.hasOption(DistCpOptionSwitch.IGNORE_FAILURES.getSwitch()))
.withOverwrite(
command.hasOption(DistCpOptionSwitch.OVERWRITE.getSwitch()))
.withAppend(
command.hasOption(DistCpOptionSwitch.APPEND.getSwitch()))
.withSkipCRC(
command.hasOption(DistCpOptionSwitch.SKIP_CRC.getSwitch()))
.withBlocking(
!command.hasOption(DistCpOptionSwitch.BLOCKING.getSwitch()))
.withVerboseLog(
command.hasOption(DistCpOptionSwitch.VERBOSE_LOG.getSwitch()))
.withDirectWrite(
command.hasOption(DistCpOptionSwitch.DIRECT_WRITE.getSwitch()))
.withUseIterator(
command.hasOption(DistCpOptionSwitch.USE_ITERATOR.getSwitch()))
.withUpdateRoot(
command.hasOption(DistCpOptionSwitch.UPDATE_ROOT.getSwitch()));
if (command.hasOption(DistCpOptionSwitch.DIFF.getSwitch())) {
String[] snapshots = getVals(command,
DistCpOptionSwitch.DIFF.getSwitch());
checkSnapshotsArgs(snapshots);
builder.withUseDiff(snapshots[0], snapshots[1]);
}
if (command.hasOption(DistCpOptionSwitch.RDIFF.getSwitch())) {
String[] snapshots = getVals(command,
DistCpOptionSwitch.RDIFF.getSwitch());
checkSnapshotsArgs(snapshots);
builder.withUseRdiff(snapshots[0], snapshots[1]);
}
if (command.hasOption(DistCpOptionSwitch.FILTERS.getSwitch())) {
builder.withFiltersFile(
getVal(command, DistCpOptionSwitch.FILTERS.getSwitch()));
}
if (command.hasOption(DistCpOptionSwitch.LOG_PATH.getSwitch())) {
builder.withLogPath(
new Path(getVal(command, DistCpOptionSwitch.LOG_PATH.getSwitch())));
}
if (command.hasOption(DistCpOptionSwitch.WORK_PATH.getSwitch())) {
final String workPath = getVal(command,
DistCpOptionSwitch.WORK_PATH.getSwitch());
if (workPath != null && !workPath.isEmpty()) {
builder.withAtomicWorkPath(new Path(workPath));
}
}
if (command.hasOption(DistCpOptionSwitch.TRACK_MISSING.getSwitch())) {
builder.withTrackMissing(
new Path(getVal(
command,
DistCpOptionSwitch.TRACK_MISSING.getSwitch())));
}
if (command.hasOption(DistCpOptionSwitch.BANDWIDTH.getSwitch())) {
try {
final Float mapBandwidth = Float.parseFloat(
getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch()));
builder.withMapBandwidth(mapBandwidth);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Bandwidth specified is invalid: " +
getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch()), e);
}
}
if (command.hasOption(
DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())) {
try {
final Integer numThreads = Integer.parseInt(getVal(command,
DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch()));
builder.withNumListstatusThreads(numThreads);
} catch (NumberFormatException e) {
throw new IllegalArgumentException(
"Number of liststatus threads is invalid: " + getVal(command,
DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch()), e);
}
}
if (command.hasOption(DistCpOptionSwitch.MAX_MAPS.getSwitch())) {
try {
final Integer maps = Integer.parseInt(
getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch()));
builder.maxMaps(maps);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Number of maps is invalid: " +
getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch()), e);
}
}
if (command.hasOption(DistCpOptionSwitch.COPY_STRATEGY.getSwitch())) {
builder.withCopyStrategy(
getVal(command, DistCpOptionSwitch.COPY_STRATEGY.getSwitch()));
}
if (command.hasOption(DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) {
builder.preserve(
getVal(command, DistCpOptionSwitch.PRESERVE_STATUS.getSwitch()));
}
if (command.hasOption(DistCpOptionSwitch.FILE_LIMIT.getSwitch())) {
LOG.warn(DistCpOptionSwitch.FILE_LIMIT.getSwitch() + " is a deprecated" +
" option. Ignoring.");
}
if (command.hasOption(DistCpOptionSwitch.SIZE_LIMIT.getSwitch())) {
LOG.warn(DistCpOptionSwitch.SIZE_LIMIT.getSwitch() + " is a deprecated" +
" option. Ignoring.");
}
if (command.hasOption(DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch())) {
final String chunkSizeStr = getVal(command,
DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch().trim());
try {
int csize = Integer.parseInt(chunkSizeStr);
csize = csize > 0 ? csize : 0;
LOG.info("Set distcp blocksPerChunk to " + csize);
builder.withBlocksPerChunk(csize);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("blocksPerChunk is invalid: "
+ chunkSizeStr, e);
}
}
if (command.hasOption(DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch())) {
final String copyBufferSizeStr = getVal(command,
DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch().trim());
try {
int copyBufferSize = Integer.parseInt(copyBufferSizeStr);
builder.withCopyBufferSize(copyBufferSize);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("copyBufferSize is invalid: "
+ copyBufferSizeStr, e);
}
}
return builder.build();
} | @Test
public void testTargetPath() {
DistCpOptions options = OptionsParser.parse(new String[] {
"-f",
"hdfs://localhost:8020/source/first",
"hdfs://localhost:8020/target/"});
Assert.assertEquals(options.getTargetPath(), new Path("hdfs://localhost:8020/target/"));
} |
public int doWork()
{
final long nowNs = nanoClock.nanoTime();
cachedNanoClock.update(nowNs);
dutyCycleTracker.measureAndUpdate(nowNs);
int workCount = commandQueue.drain(CommandProxy.RUN_TASK, Configuration.COMMAND_DRAIN_LIMIT);
final int bytesReceived = dataTransportPoller.pollTransports();
totalBytesReceived.getAndAddOrdered(bytesReceived);
final PublicationImage[] publicationImages = this.publicationImages;
for (int lastIndex = publicationImages.length - 1, i = lastIndex; i >= 0; i--)
{
final PublicationImage image = publicationImages[i];
if (image.isConnected(nowNs))
{
image.checkEosForDrainTransition(nowNs);
workCount += image.sendPendingStatusMessage(nowNs);
workCount += image.processPendingLoss();
workCount += image.initiateAnyRttMeasurements(nowNs);
}
else
{
this.publicationImages = 1 == this.publicationImages.length ?
EMPTY_IMAGES : ArrayUtil.remove(this.publicationImages, i);
image.removeFromDispatcher();
image.receiverRelease();
}
}
checkPendingSetupMessages(nowNs);
if (reResolutionCheckIntervalNs > 0 && (reResolutionDeadlineNs - nowNs) < 0)
{
reResolutionDeadlineNs = nowNs + reResolutionCheckIntervalNs;
dataTransportPoller.checkForReResolutions(nowNs, conductorProxy);
}
return workCount + bytesReceived;
} | @Test
void shouldHandleNonZeroTermOffsetCorrectly()
{
final int initialTermOffset = align(TERM_BUFFER_LENGTH / 16, FrameDescriptor.FRAME_ALIGNMENT);
final int alignedDataFrameLength =
align(DataHeaderFlyweight.HEADER_LENGTH + FAKE_PAYLOAD.length, FrameDescriptor.FRAME_ALIGNMENT);
receiverProxy.registerReceiveChannelEndpoint(receiveChannelEndpoint);
receiverProxy.addSubscription(receiveChannelEndpoint, STREAM_ID);
receiver.doWork();
receiver.doWork();
fillSetupFrame(setupHeader, initialTermOffset);
receiveChannelEndpoint.onSetupMessage(setupHeader, setupBuffer, SetupFlyweight.HEADER_LENGTH, senderAddress, 0);
final int commandsRead = drainConductorQueue(
(e) ->
{
final PublicationImage image = new PublicationImage(
CORRELATION_ID,
ctx,
receiveChannelEndpoint,
0,
senderAddress,
SESSION_ID,
STREAM_ID,
INITIAL_TERM_ID,
ACTIVE_TERM_ID,
initialTermOffset,
(short)0,
rawLog,
mockFeedbackDelayGenerator,
POSITIONS,
mockHighestReceivedPosition,
mockRebuildPosition,
SOURCE_IDENTITY,
congestionControl);
receiverProxy.newPublicationImage(receiveChannelEndpoint, image);
});
assertThat(commandsRead, is(1));
verify(mockHighestReceivedPosition).setOrdered(initialTermOffset);
receiver.doWork();
fillDataFrame(dataHeader, initialTermOffset); // initial data frame
receiveChannelEndpoint.onDataPacket(dataHeader, dataBuffer, alignedDataFrameLength, senderAddress, 0);
verify(mockHighestReceivedPosition).setOrdered(initialTermOffset + alignedDataFrameLength);
final int readOutcome = TermReader.read(
termBuffers[ACTIVE_INDEX],
initialTermOffset,
(buffer, offset, length, header) ->
{
assertThat(header.type(), is(HeaderFlyweight.HDR_TYPE_DATA));
assertThat(header.termId(), is(ACTIVE_TERM_ID));
assertThat(header.streamId(), is(STREAM_ID));
assertThat(header.sessionId(), is(SESSION_ID));
assertThat(header.termOffset(), is(initialTermOffset));
assertThat(header.frameLength(), is(DataHeaderFlyweight.HEADER_LENGTH + FAKE_PAYLOAD.length));
},
Integer.MAX_VALUE,
header,
mockErrorHandler,
0,
mockSubscriberPosition);
assertThat(readOutcome, is(1));
} |
@Override
public boolean canHandleReturnType(Class returnType) {
return (Flux.class.isAssignableFrom(returnType)) || (Mono.class
.isAssignableFrom(returnType));
} | @Test
public void testCheckTypes() {
assertThat(reactorRetryAspectExt.canHandleReturnType(Mono.class)).isTrue();
assertThat(reactorRetryAspectExt.canHandleReturnType(Flux.class)).isTrue();
} |
@Override
public SchemaAndValue toConnectData(String topic, byte[] value) {
JsonNode jsonValue;
// This handles a tombstone message
if (value == null) {
return SchemaAndValue.NULL;
}
try {
jsonValue = deserializer.deserialize(topic, value);
} catch (SerializationException e) {
throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e);
}
if (config.schemasEnabled() && (!jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME) || !jsonValue.has(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME)))
throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." +
" If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration.");
// The deserialized data should either be an envelope object containing the schema and the payload or the schema
// was stripped during serialization and we need to fill in an all-encompassing schema.
if (!config.schemasEnabled()) {
ObjectNode envelope = JSON_NODE_FACTORY.objectNode();
envelope.set(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME, null);
envelope.set(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME, jsonValue);
jsonValue = envelope;
}
Schema schema = asConnectSchema(jsonValue.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
return new SchemaAndValue(
schema,
convertToConnect(schema, jsonValue.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME), config)
);
} | @Test
public void decimalToConnectWithDefaultValue() {
BigDecimal reference = new BigDecimal(new BigInteger("156"), 2);
Schema schema = Decimal.builder(2).defaultValue(reference).build();
String msg = "{ \"schema\": { \"type\": \"bytes\", \"name\": \"org.apache.kafka.connect.data.Decimal\", \"version\": 1, \"default\": \"AJw=\", \"parameters\": { \"scale\": \"2\" } }, \"payload\": null }";
SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes());
assertEquals(schema, schemaAndValue.schema());
assertEquals(reference, schemaAndValue.value());
} |
public static FieldScope ignoringFieldDescriptors(
FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest) {
return FieldScopeImpl.createIgnoringFieldDescriptors(asList(firstFieldDescriptor, rest));
} | @Test
public void testIgnoreFieldOfSubMessage() {
// Ignore o_int of sub message fields.
Message message = parse("o_int: 1 o_sub_test_message: { o_int: 2 r_string: \"foo\" }");
Message diffMessage1 = parse("o_int: 2 o_sub_test_message: { o_int: 2 r_string: \"foo\" }");
Message diffMessage2 = parse("o_int: 1 o_sub_test_message: { o_int: 2 r_string: \"bar\" }");
Message eqMessage = parse("o_int: 1 o_sub_test_message: { o_int: 3 r_string: \"foo\" }");
FieldDescriptor fieldDescriptor =
getFieldDescriptor("o_sub_test_message").getMessageType().findFieldByName("o_int");
FieldScope partialScope = FieldScopes.ignoringFieldDescriptors(fieldDescriptor);
expectThat(diffMessage1).withPartialScope(partialScope).isNotEqualTo(message);
expectThat(diffMessage2).withPartialScope(partialScope).isNotEqualTo(message);
expectThat(eqMessage).withPartialScope(partialScope).isEqualTo(message);
expectFailureWhenTesting().that(diffMessage1).withPartialScope(partialScope).isEqualTo(message);
expectIsEqualToFailed();
expectThatFailure().hasMessageThat().contains("modified: o_int: 1 -> 2");
expectFailureWhenTesting().that(diffMessage2).withPartialScope(partialScope).isEqualTo(message);
expectIsEqualToFailed();
expectThatFailure()
.hasMessageThat()
.contains("modified: o_sub_test_message.r_string[0]: \"foo\" -> \"bar\"");
} |
public static KStreamHolder<GenericKey> build(
final KStreamHolder<?> stream,
final StreamSelectKeyV1 selectKey,
final RuntimeBuildContext buildContext
) {
final LogicalSchema sourceSchema = stream.getSchema();
final CompiledExpression expression = buildExpressionEvaluator(
selectKey,
buildContext,
sourceSchema
);
final ProcessingLogger processingLogger = buildContext
.getProcessingLogger(selectKey.getProperties().getQueryContext());
final String errorMsg = "Error extracting new key using expression "
+ selectKey.getKeyExpression();
final Function<GenericRow, Object> evaluator = val -> expression
.evaluate(val, null, processingLogger, () -> errorMsg);
final LogicalSchema resultSchema = new StepSchemaResolver(buildContext.getKsqlConfig(),
buildContext.getFunctionRegistry()).resolve(selectKey, sourceSchema);
final KStream<?, GenericRow> kstream = stream.getStream();
final KStream<GenericKey, GenericRow> rekeyed = kstream
.filter((key, val) -> val != null && evaluator.apply(val) != null)
.selectKey((key, val) -> GenericKey.genericKey(evaluator.apply(val)));
return new KStreamHolder<>(
rekeyed,
resultSchema,
ExecutionKeyFactory.unwindowed(buildContext)
);
} | @Test
public void shouldFilterOutNullValues() {
// When:
selectKey.build(planBuilder, planInfo);
// Then:
verify(kstream).filter(predicateCaptor.capture());
final Predicate<GenericKey, GenericRow> predicate = getPredicate();
assertThat(predicate.test(SOURCE_KEY, null), is(false));
} |
public static boolean deleteQuietly(@Nullable File file) {
if (file == null) {
return false;
}
return deleteQuietly(file.toPath());
} | @Test
public void deleteQuietly_does_not_fail_if_argument_is_null() {
FileUtils.deleteQuietly((File) null);
FileUtils.deleteQuietly((Path) null);
} |
@VisibleForTesting
static String generateLogUrl(String pattern, String jobId, String taskManagerId) {
String generatedUrl = pattern.replaceAll("<jobid>", jobId);
if (null != taskManagerId) {
generatedUrl = generatedUrl.replaceAll("<tmid>", taskManagerId);
}
return generatedUrl;
} | @Test
void testGenerateJobManagerLogUrl() {
final String pattern = "http://localhost/<jobid>/log";
final String jobId = "jobid";
final String generatedUrl = GeneratedLogUrlHandler.generateLogUrl(pattern, jobId, null);
assertThat(generatedUrl).isEqualTo(pattern.replace("<jobid>", jobId));
} |
@Override
public Batch toBatch() {
return new SparkBatch(
sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode());
} | @TestTemplate
public void testUnpartitionedYears() throws Exception {
createUnpartitionedTable(spark, tableName);
SparkScanBuilder builder = scanBuilder();
YearsFunction.TimestampToYearsFunction function = new YearsFunction.TimestampToYearsFunction();
UserDefinedScalarFunc udf = toUDF(function, expressions(fieldRef("ts")));
Predicate predicate =
new Predicate(
"=",
expressions(
udf, intLit(timestampStrToYearOrdinal("2017-11-22T00:00:00.000000+00:00"))));
pushFilters(builder, predicate);
Batch scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
// NOT Equal
builder = scanBuilder();
predicate = new Not(predicate);
pushFilters(builder, predicate);
scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
} |
@Override
public boolean processData(DistroData distroData) {
switch (distroData.getType()) {
case ADD:
case CHANGE:
ClientSyncData clientSyncData = ApplicationUtils.getBean(Serializer.class)
.deserialize(distroData.getContent(), ClientSyncData.class);
handlerClientSyncData(clientSyncData);
return true;
case DELETE:
String deleteClientId = distroData.getDistroKey().getResourceKey();
Loggers.DISTRO.info("[Client-Delete] Received distro client sync data {}", deleteClientId);
clientManager.clientDisconnected(deleteClientId);
return true;
default:
return false;
}
} | @Test
void testProcessDataForChangeClient() {
distroData.setType(DataOperation.CHANGE);
assertEquals(0L, client.getRevision());
assertEquals(0, client.getAllPublishedService().size());
distroClientDataProcessor.processData(distroData);
verify(clientManager).syncClientConnected(CLIENT_ID, clientSyncData.getAttributes());
assertEquals(1L, client.getRevision());
assertEquals(1, client.getAllPublishedService().size());
} |
public static void setLocalDirStickyBit(String dir) {
try {
// Support for sticky bit is platform specific. Check if the path starts with "/" and if so,
// assume that the host supports the chmod command.
if (dir.startsWith(AlluxioURI.SEPARATOR)) {
// TODO(peis): This is very slow. Consider removing this.
Runtime.getRuntime().exec("chmod +t " + dir);
}
} catch (IOException e) {
LOG.info("Can not set the sticky bit of the directory: {}", dir, e);
}
} | @Test
public void setLocalDirStickyBit() throws IOException {
File tempFolder = mTestFolder.newFolder("dirToModify");
// Only test this functionality of the absolute path of the temporary directory starts with "/",
// which implies the host should support "chmod".
if (tempFolder.getAbsolutePath().startsWith(AlluxioURI.SEPARATOR)) {
FileUtils.setLocalDirStickyBit(tempFolder.getAbsolutePath());
List<String> commands = new ArrayList<>();
commands.add("/bin/ls");
commands.add("-ld");
commands.add(tempFolder.getAbsolutePath());
try {
ProcessBuilder builder = new ProcessBuilder(commands);
Process process = builder.start();
process.waitFor();
BufferedReader stdInput = new BufferedReader(new
InputStreamReader(process.getInputStream()));
String line = stdInput.readLine();
// we are just concerned about the first and the last permission bits
assertTrue(line.matches("^d[rwx-]{8}t.*$"));
} catch (InterruptedException e) {
e.printStackTrace();
}
}
} |
public static MetricName getMetricName(
String group,
String typeName,
String name
) {
return getMetricName(
group,
typeName,
name,
null
);
} | @Test
public void testTaggedMetricName() {
LinkedHashMap<String, String> tags = new LinkedHashMap<>();
tags.put("foo", "bar");
tags.put("bar", "baz");
tags.put("baz", "raz.taz");
MetricName metricName = KafkaYammerMetrics.getMetricName(
"kafka.metrics",
"TestMetrics",
"TaggedMetric",
tags
);
assertEquals("kafka.metrics", metricName.getGroup());
assertEquals("TestMetrics", metricName.getType());
assertEquals("TaggedMetric", metricName.getName());
// MBean name should preserve initial ordering
assertEquals("kafka.metrics:type=TestMetrics,name=TaggedMetric,foo=bar,bar=baz,baz=raz.taz",
metricName.getMBeanName());
// Scope should be sorted by key
assertEquals("bar.baz.baz.raz_taz.foo.bar", metricName.getScope());
} |
@Override
public void handleTenantMenu(TenantMenuHandler handler) {
// 如果禁用,则不执行逻辑
if (isTenantDisable()) {
return;
}
// 获得租户,然后获得菜单
TenantDO tenant = getTenant(TenantContextHolder.getRequiredTenantId());
Set<Long> menuIds;
if (isSystemTenant(tenant)) { // 系统租户,菜单是全量的
menuIds = CollectionUtils.convertSet(menuService.getMenuList(), MenuDO::getId);
} else {
menuIds = tenantPackageService.getTenantPackage(tenant.getPackageId()).getMenuIds();
}
// 执行处理器
handler.handle(menuIds);
} | @Test // 普通租户的情况
public void testHandleTenantMenu_normal() {
// 准备参数
TenantMenuHandler handler = mock(TenantMenuHandler.class);
// mock 未禁用
when(tenantProperties.getEnable()).thenReturn(true);
// mock 租户
TenantDO dbTenant = randomPojo(TenantDO.class, o -> o.setPackageId(200L));
tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据
TenantContextHolder.setTenantId(dbTenant.getId());
// mock 菜单
when(tenantPackageService.getTenantPackage(eq(200L))).thenReturn(randomPojo(TenantPackageDO.class,
o -> o.setMenuIds(asSet(100L, 101L))));
// 调用
tenantService.handleTenantMenu(handler);
// 断言
verify(handler).handle(asSet(100L, 101L));
} |
public void processOnce() throws IOException {
// set status of query to OK.
ctx.getState().reset();
executor = null;
// reset sequence id of MySQL protocol
final MysqlChannel channel = ctx.getMysqlChannel();
channel.setSequenceId(0);
// read packet from channel
try {
packetBuf = channel.fetchOnePacket();
if (packetBuf == null) {
throw new RpcException(ctx.getRemoteIP(), "Error happened when receiving packet.");
}
} catch (AsynchronousCloseException e) {
// when this happened, timeout checker close this channel
// killed flag in ctx has been already set, just return
return;
}
// dispatch
dispatch();
// finalize
finalizeCommand();
ctx.setCommand(MysqlCommand.COM_SLEEP);
} | @Test
public void testChangeUser() throws IOException {
ConnectContext ctx = initMockContext(mockChannel(changeUserPacket), GlobalStateMgr.getCurrentState());
ConnectProcessor processor = new ConnectProcessor(ctx);
processor.processOnce();
Assert.assertEquals(MysqlCommand.COM_CHANGE_USER, myContext.getCommand());
Assert.assertTrue(myContext.getState().toResponsePacket() instanceof MysqlOkPacket);
Assert.assertFalse(myContext.isKilled());
} |
private List<ColumnStatistics> toColumnStatistics(HiveWriterVersion hiveWriterVersion, List<DwrfProto.ColumnStatistics> columnStatistics, boolean isRowGroup)
{
if (columnStatistics == null) {
return ImmutableList.of();
}
return columnStatistics.stream()
.map(statistics -> toColumnStatistics(hiveWriterVersion, statistics, isRowGroup, null))
.collect(toImmutableList());
} | @Test(dataProvider = "columnStatisticsSupplier")
public void testToColumnStatisticsRoundtrip(ColumnStatistics input, ColumnStatistics output, DwrfMetadataReader dwrfMetadataReader)
{
DwrfProto.ColumnStatistics dwrfColumnStatistics = DwrfMetadataWriter.toColumnStatistics(input);
ColumnStatistics actual = dwrfMetadataReader.toColumnStatistics(HiveWriterVersion.ORC_HIVE_8732, dwrfColumnStatistics, false, null);
assertEquals(actual, output);
} |
@Transactional
public void updateCustomChecklist(User user, CustomChecklistUpdateRequest request) {
List<Integer> questionIds = request.questionIds();
validateCustomChecklistQuestionsIsNotEmpty(questionIds);
validateCustomChecklistQuestionsDuplication(questionIds);
customChecklistQuestionRepository.deleteAllByUser(user);
List<CustomChecklistQuestion> customChecklistQuestions = questionIds.stream()
.map(Question::fromId)
.map(question -> new CustomChecklistQuestion(user, question))
.toList();
customChecklistQuestionRepository.saveAll(customChecklistQuestions);
} | @DisplayName("커스텀 체크리스트 업데이트 실패 : 질문 id가 유효하지 않을 때")
@Test
void updateCustomChecklist_invalidQuestionId_exception() {
// given
CustomChecklistUpdateRequest request = CUSTOM_CHECKLIST_UPDATE_REQUEST_INVALID;
// when & then
assertThatThrownBy(() -> checklistService.updateCustomChecklist(USER1, request))
.isInstanceOf(BangggoodException.class)
.hasMessage(ExceptionCode.QUESTION_INVALID.getMessage());
} |
@Activate
public void activate(ComponentContext context) {
cfgService.registerProperties(getClass());
modified(context);
Security.addProvider(new BouncyCastleProvider());
clusterCommunicator.<NetconfProxyMessage>addSubscriber(
SEND_REQUEST_SUBJECT_STRING,
SERIALIZER::decode,
this::handleProxyMessage,
remoteRequestExecutor);
clusterCommunicator.<NetconfProxyMessage>addSubscriber(
SEND_REQUEST_SUBJECT_SET_STRING,
SERIALIZER::decode,
this::handleProxyMessage,
remoteRequestExecutor);
clusterCommunicator.<NetconfProxyMessage>addSubscriber(
SEND_REPLY_SUBJECT_STRING,
SERIALIZER::decode,
this::handleProxyReplyMessage,
remoteRequestExecutor);
clusterCommunicator.<NetconfProxyMessage>addSubscriber(
SEND_REPLY_SUBJECT_SET_STRING,
SERIALIZER::decode,
this::handleProxyReplyMessage,
remoteRequestExecutor);
localNodeId = Optional.ofNullable(clusterService.getLocalNode())
.map(ControllerNode::id)
.orElseGet(() -> new NodeId("nullNodeId"));
log.info("Started");
} | @Test
public void testActivate() {
assertEquals("Incorrect NetConf connect timeout, should be default",
5, ctrl.netconfConnectTimeout);
assertEquals("Incorrect NetConf reply timeout, should be default",
5, ctrl.netconfReplyTimeout);
ctrl.activate(null);
assertEquals("Incorrect NetConf connect timeout, should be default",
5, ctrl.netconfConnectTimeout);
assertEquals("Incorrect NetConf reply timeout, should be default",
5, ctrl.netconfReplyTimeout);
} |
public static Resource getResource(File workingDir, String path) {
if (path.startsWith(Resource.CLASSPATH_COLON)) {
path = removePrefix(path);
File file = classPathToFile(path);
if (file != null) {
return new FileResource(file, true, path);
}
List<Resource> resources = new ArrayList<>();
synchronized (SCAN_RESULT) {
ResourceList rl = SCAN_RESULT.getResourcesWithPath(path);
if (rl == null) {
rl = ResourceList.emptyList();
}
rl.forEachByteArrayIgnoringIOException((res, bytes) -> {
URI uri = res.getURI();
if ("file".equals(uri.getScheme())) {
File found = Paths.get(uri).toFile();
resources.add(new FileResource(found, true, res.getPath()));
} else {
resources.add(new JarResource(bytes, res.getPath(), uri));
}
});
}
if (resources.isEmpty()) {
throw new RuntimeException("not found: " + path);
}
return resources.get(0);
} else {
path = path.replace('\\', '/'); // windows fix
File file = new File(removePrefix(path));
if (!file.exists()) {
throw new RuntimeException("not found: " + path);
}
Path relativePath = workingDir.toPath().relativize(file.getAbsoluteFile().toPath());
return new FileResource(file, false, relativePath.toString());
}
} | @Test
void testGetClassPathFileByPath() {
Resource resource = ResourceUtils.getResource(wd, "classpath:com/intuit/karate/resource/test1.txt");
assertTrue(resource.isFile());
assertTrue(resource.isClassPath());
assertEquals("com/intuit/karate/resource/test1.txt", resource.getRelativePath());
assertEquals("classpath:com/intuit/karate/resource/test1.txt", resource.getPrefixedPath());
assertEquals("foo", FileUtils.toString(resource.getStream()));
} |
public PipelineNode.PTransformNode getProducer(PipelineNode.PCollectionNode pcollection) {
return (PipelineNode.PTransformNode)
Iterables.getOnlyElement(pipelineNetwork.predecessors(pcollection));
} | @Test
public void getProducer() {
Pipeline p = Pipeline.create();
PCollection<Long> longs = p.apply("BoundedRead", Read.from(CountingSource.upTo(100L)));
PCollectionList.of(longs).and(longs).and(longs).apply("flatten", Flatten.pCollections());
Components components = PipelineTranslation.toProto(p).getComponents();
QueryablePipeline qp = QueryablePipeline.forPrimitivesIn(components);
String impulseOutputName =
getOnlyElement(
PipelineNode.pTransform(
"BoundedRead-Impulse", components.getTransformsOrThrow("BoundedRead-Impulse"))
.getTransform()
.getOutputsMap()
.values());
PTransformNode impulseProducer =
PipelineNode.pTransform(
"BoundedRead-Impulse", components.getTransformsOrThrow("BoundedRead-Impulse"));
PCollectionNode impulseOutput =
PipelineNode.pCollection(
impulseOutputName, components.getPcollectionsOrThrow(impulseOutputName));
String flattenOutputName =
getOnlyElement(
PipelineNode.pTransform("flatten", components.getTransformsOrThrow("flatten"))
.getTransform()
.getOutputsMap()
.values());
PTransformNode flattenProducer =
PipelineNode.pTransform("flatten", components.getTransformsOrThrow("flatten"));
PCollectionNode flattenOutput =
PipelineNode.pCollection(
flattenOutputName, components.getPcollectionsOrThrow(flattenOutputName));
assertThat(qp.getProducer(impulseOutput), equalTo(impulseProducer));
assertThat(qp.getProducer(flattenOutput), equalTo(flattenProducer));
} |
public static Caffeine<Object, Object> from(CaffeineSpec spec) {
Caffeine<Object, Object> builder = spec.toBuilder();
builder.strictParsing = false;
return builder;
} | @Test
public void fromString_null() {
assertThrows(NullPointerException.class, () -> Caffeine.from((String) null));
} |
public String getDiscriminatingValue(ILoggingEvent event) {
// http://jira.qos.ch/browse/LBCLASSIC-213
Map<String, String> mdcMap = event.getMDCPropertyMap();
if (mdcMap == null) {
return defaultValue;
}
String mdcValue = mdcMap.get(key);
if (mdcValue == null) {
return defaultValue;
} else {
return mdcValue;
}
} | @Test
public void nullMDC() {
event = new LoggingEvent("a", logger, Level.DEBUG, "", null, null);
assertTrue(event.getMDCPropertyMap().isEmpty());
String discriminatorValue = discriminator.getDiscriminatingValue(event);
assertEquals(DEFAULT_VAL, discriminatorValue);
} |
@Override
public int compare(EvictableEntryView e1, EvictableEntryView e2) {
long time1 = Math.max(e1.getCreationTime(), e1.getLastAccessTime());
long time2 = Math.max(e2.getCreationTime(), e2.getLastAccessTime());
return Long.compare(time1, time2);
} | @Test
public void lru_comparator_does_not_prematurely_select_newly_created_entries() {
// 0. Entries to sort
List<TestEntryView> givenEntries = new LinkedList<>();
givenEntries.add(new TestEntryView(1, 1, 0));
givenEntries.add(new TestEntryView(2, 2, 3));
givenEntries.add(new TestEntryView(3, 2, 0));
givenEntries.add(new TestEntryView(4, 4, 4));
givenEntries.add(new TestEntryView(5, 5, 20));
givenEntries.add(new TestEntryView(6, 6, 6));
givenEntries.add(new TestEntryView(7, 7, 0));
givenEntries.add(new TestEntryView(8, 9, 15));
givenEntries.add(new TestEntryView(9, 10, 10));
givenEntries.add(new TestEntryView(10, 10, 0));
// 1. Create expected list of ordered elements by
// sorting entries based on their idle-times. Longest
// idle time must be the first element of the list.
List<TestEntryView> descOrderByIdleTimes = new LinkedList<>(givenEntries);
Collections.sort(descOrderByIdleTimes, (o1, o2) -> -Long.compare(idleTime(o1), idleTime(o2)));
// 2. Then sort given entries by using LRU eviction comparator.
Collections.sort(givenEntries, (o1, o2) -> LRUEvictionPolicyComparator.INSTANCE.compare(o1, o2));
// 3. Check both lists are equal
assertEquals(descOrderByIdleTimes, givenEntries);
} |
static void applySchemaUpdates(Table table, SchemaUpdate.Consumer updates) {
if (updates == null || updates.empty()) {
// no updates to apply
return;
}
Tasks.range(1)
.retry(IcebergSinkConfig.SCHEMA_UPDATE_RETRIES)
.run(notUsed -> commitSchemaUpdates(table, updates));
} | @Test
public void testApplySchemaUpdatesNoUpdates() {
Table table = mock(Table.class);
when(table.schema()).thenReturn(SIMPLE_SCHEMA);
SchemaUtils.applySchemaUpdates(table, null);
verify(table, times(0)).refresh();
verify(table, times(0)).updateSchema();
SchemaUtils.applySchemaUpdates(table, new SchemaUpdate.Consumer());
verify(table, times(0)).refresh();
verify(table, times(0)).updateSchema();
} |
@Override
public Long createFileConfig(FileConfigSaveReqVO createReqVO) {
FileConfigDO fileConfig = FileConfigConvert.INSTANCE.convert(createReqVO)
.setConfig(parseClientConfig(createReqVO.getStorage(), createReqVO.getConfig()))
.setMaster(false); // 默认非 master
fileConfigMapper.insert(fileConfig);
return fileConfig.getId();
} | @Test
public void testCreateFileConfig_success() {
// 准备参数
Map<String, Object> config = MapUtil.<String, Object>builder().put("basePath", "/yunai")
.put("domain", "https://www.iocoder.cn").build();
FileConfigSaveReqVO reqVO = randomPojo(FileConfigSaveReqVO.class,
o -> o.setStorage(FileStorageEnum.LOCAL.getStorage()).setConfig(config))
.setId(null); // 避免 id 被赋值
// 调用
Long fileConfigId = fileConfigService.createFileConfig(reqVO);
// 断言
assertNotNull(fileConfigId);
// 校验记录的属性是否正确
FileConfigDO fileConfig = fileConfigMapper.selectById(fileConfigId);
assertPojoEquals(reqVO, fileConfig, "id", "config");
assertFalse(fileConfig.getMaster());
assertEquals("/yunai", ((LocalFileClientConfig) fileConfig.getConfig()).getBasePath());
assertEquals("https://www.iocoder.cn", ((LocalFileClientConfig) fileConfig.getConfig()).getDomain());
// 验证 cache
assertNull(fileConfigService.getClientCache().getIfPresent(fileConfigId));
} |
public Trans loadTransFromFilesystem( String initialDir, String filename, String jarFilename, Serializable base64Zip ) throws Exception {
Trans trans = null;
File zip;
if ( base64Zip != null && ( zip = decodeBase64ToZipFile( base64Zip, true ) ) != null ) {
// update filename to a meaningful, 'ETL-file-within-zip' syntax
filename = "zip:file:" + File.separator + File.separator + zip.getAbsolutePath() + "!" + filename;
}
// Try to load the transformation from file
if ( !Utils.isEmpty( filename ) ) {
String filepath = filename;
// If the filename starts with scheme like zip:, then isAbsolute() will return false even though the
// the path following the zip is absolute. Check for isAbsolute only if the fileName does not start with scheme
if ( !KettleVFS.startsWithScheme( filename ) && !FileUtil.isFullyQualified( filename ) ) {
filepath = initialDir + filename;
}
logDebug( "Pan.Log.LoadingTransXML", "" + filepath );
TransMeta transMeta = new TransMeta( filepath );
trans = new Trans( transMeta );
}
if ( !Utils.isEmpty( jarFilename ) ) {
try {
logDebug( "Pan.Log.LoadingTransJar", jarFilename );
InputStream inputStream = PanCommandExecutor.class.getResourceAsStream( jarFilename );
StringBuilder xml = new StringBuilder();
int c;
while ( ( c = inputStream.read() ) != -1 ) {
xml.append( (char) c );
}
inputStream.close();
Document document = XMLHandler.loadXMLString( xml.toString() );
TransMeta transMeta = new TransMeta( XMLHandler.getSubNode( document, "transformation" ), null );
trans = new Trans( transMeta );
} catch ( Exception e ) {
System.out.println( BaseMessages.getString( getPkgClazz(), "Pan.Error.ReadingJar", e.toString() ) );
System.out.println( Const.getStackTracker( e ) );
throw e;
}
}
if ( trans != null ) {
trans.setMetaStore( getMetaStore() );
}
return trans;
} | @Test
public void testMetastoreFromFilesystemAddedIn() throws Exception {
String fullPath = getClass().getResource( SAMPLE_KTR ).getPath();
Trans trans = mockedPanCommandExecutor.loadTransFromFilesystem( "", fullPath, "", "" );
assertNotNull( trans );
assertNotNull( trans.getMetaStore() );
} |
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (name != null ? name.hashCode() : 0);
result = 31 * result + (managerPrefix != null ? managerPrefix.hashCode() : 0);
result = 31 * result + (uriString != null ? uriString.hashCode() : 0);
result = 31 * result + (userCodeNamespace != null ? userCodeNamespace.hashCode() : 0);
return result;
} | @Test
public void testHashCode() {
CacheConfig cacheConfig = new CacheConfig();
assertTrue(cacheConfig.hashCode() != 0);
} |
@Override
public long getAnalysisDate() {
checkState(analysisDate.isInitialized(), "Analysis date has not been set");
return this.analysisDate.getProperty();
} | @Test
public void getAnalysisDate_throws_ISE_when_holder_is_not_initialized() {
assertThatThrownBy(() -> new AnalysisMetadataHolderImpl(editionProvider).getAnalysisDate())
.isInstanceOf(IllegalStateException.class)
.hasMessage("Analysis date has not been set");
} |
public void wakeOrSkipNextWait() {
// Take early lock to avoid race-conditions. Lock is also taken in wake() (lock is re-entrant)
synchronized (lock) {
final boolean awoken = wake();
if (!awoken) {
LOG.debug("Waiter not waiting, instructing to skip next wait.");
this.skipNextWait = true;
}
}
} | @Test
public void should_not_wait_if_instructed_to_skip_next()
throws ExecutionException, InterruptedException {
Waiter waiter = new Waiter(Duration.ofMillis(1000));
waiter.wakeOrSkipNextWait(); // set skip
Future<Long> waitTime = executor.submit(new WaitForWaiter(waiter));
assertTrue(waitTime.get() < 100L, "Waited: " + waitTime.get());
} |
@Override
public boolean isDataNodeAvailable(long dataNodeId) {
// DataNode and ComputeNode is exchangeable in SHARED_DATA mode
return availableID2ComputeNode.containsKey(dataNodeId);
} | @Test
public void testIsDataNodeAvailable() {
HostBlacklist blockList = SimpleScheduler.getHostBlacklist();
blockList.hostBlacklist.clear();
SystemInfoService sysInfo = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo();
List<Long> availList = prepareNodeAliveAndBlock(sysInfo, blockList);
WorkerProvider provider = newWorkerProvider();
for (long id = -1; id < 16; id++) {
boolean isAvail = provider.isDataNodeAvailable(id);
ComputeNode worker = provider.getWorkerById(id);
if (!availList.contains(id)) {
Assert.assertFalse(isAvail);
} else {
Assert.assertEquals(id2AllNodes.get(id), worker);
Assert.assertTrue(isAvail);
}
}
} |
public static <K, V> AsMap<K, V> asMap() {
return new AsMap<>(false);
} | @Test
@Category(NeedsRunner.class)
public void testMapInMemorySideInput() {
final PCollectionView<Map<String, Integer>> view =
pipeline
.apply("CreateSideInput", Create.of(KV.of("a", 1), KV.of("b", 3)))
.apply(View.<String, Integer>asMap().inMemory());
PCollection<KV<String, Integer>> output =
pipeline
.apply("CreateMainInput", Create.of("apple", "banana", "blackberry"))
.apply(
"OutputSideInputs",
ParDo.of(
new DoFn<String, KV<String, Integer>>() {
@ProcessElement
public void processElement(ProcessContext c) {
c.output(
KV.of(
c.element(),
c.sideInput(view).get(c.element().substring(0, 1))));
}
})
.withSideInputs(view));
PAssert.that(output)
.containsInAnyOrder(KV.of("apple", 1), KV.of("banana", 3), KV.of("blackberry", 3));
pipeline.run();
} |
BackgroundJobRunner getBackgroundJobRunner(Job job) {
assertJobExists(job.getJobDetails());
return backgroundJobRunners.stream()
.filter(jobRunner -> jobRunner.supports(job))
.findFirst()
.orElseThrow(() -> problematicConfigurationException("Could not find a BackgroundJobRunner: either no JobActivator is registered, your Background Job Class is not registered within the IoC container or your Job does not have a default no-arg constructor."));
} | @Test
void getBackgroundJobRunnerForNonIoCStaticJobWithoutInstance() {
jobActivator.clear();
final Job job = anEnqueuedJob()
.withJobDetails(StaticTestService::doWorkInStaticMethodWithoutParameter)
.build();
assertThat(backgroundJobServer.getBackgroundJobRunner(job))
.isNotNull()
.isInstanceOf(BackgroundStaticJobWithoutIocRunner.class);
} |
public static int pixelYToTileY(double pixelY, byte zoomLevel, int tileSize) {
return (int) Math.min(Math.max(pixelY / tileSize, 0), Math.pow(2, zoomLevel) - 1);
} | @Test
public void pixelYToTileYTest() {
for (int tileSize : TILE_SIZES) {
for (byte zoomLevel = ZOOM_LEVEL_MIN; zoomLevel <= ZOOM_LEVEL_MAX; ++zoomLevel) {
Assert.assertEquals(0, MercatorProjection.pixelYToTileY(0, zoomLevel, tileSize));
Assert.assertEquals(0, MercatorProjection.pixelYToTileYWithScaleFactor(0, MercatorProjection.zoomLevelToScaleFactor(zoomLevel), tileSize));
}
}
} |
public void acceptRow(final List<?> key, final GenericRow value) {
try {
if (passedLimit()) {
return;
}
final KeyValue<List<?>, GenericRow> row = keyValue(key, value);
final KeyValueMetadata<List<?>, GenericRow> keyValueMetadata = new KeyValueMetadata<>(row);
while (!closed) {
if (rowQueue.offer(keyValueMetadata, offerTimeoutMs, TimeUnit.MILLISECONDS)) {
onQueued();
totalRowsQueued.incrementAndGet();
break;
}
}
} catch (final InterruptedException e) {
// Forced shutdown?
Thread.currentThread().interrupt();
}
} | @Test
public void shouldQueue() {
// When:
queue.acceptRow(KEY_ONE, VAL_ONE);
queue.acceptRow(KEY_TWO, VAL_TWO);
// Then:
assertThat(drainValues(), contains(keyValue(KEY_ONE, VAL_ONE), keyValue(KEY_TWO, VAL_TWO)));
} |
public String getActions() {
return actions;
} | @Test
public void testGetActions() {
Permission permission = new Permission("classname", "name", "actions");
assertEquals("actions", permission.getActions());
} |
@Override
public long position() throws IOException {
checkOpen();
long pos;
synchronized (this) {
boolean completed = false;
try {
begin(); // don't call beginBlocking() because this method doesn't block
if (!isOpen()) {
return 0; // AsynchronousCloseException will be thrown
}
pos = this.position;
completed = true;
} finally {
end(completed);
}
}
return pos;
} | @Test
public void testPosition() throws IOException {
FileChannel channel = channel(regularFile(10), READ);
assertEquals(0, channel.position());
assertSame(channel, channel.position(100));
assertEquals(100, channel.position());
} |
@OnMessage
public void onMessage(final String message, final Session session) {
if (!Objects.equals(message, DataEventTypeEnum.MYSELF.name())
&& !Objects.equals(message, DataEventTypeEnum.RUNNING_MODE.name())) {
return;
}
if (Objects.equals(message, DataEventTypeEnum.RUNNING_MODE.name())) {
if (LOG.isDebugEnabled()) {
LOG.debug("websocket fetching running mode info...");
}
// check if this node is master
boolean isMaster = true;
String runningMode = RunningModeEnum.STANDALONE.name();
String masterUrl = StringUtils.EMPTY;
ClusterProperties clusterProperties = SpringBeanUtils.getInstance().getBean(ClusterProperties.class);
if (clusterProperties.isEnabled()) {
ClusterSelectMasterService clusterSelectMasterService = SpringBeanUtils.getInstance().getBean(ClusterSelectMasterService.class);
runningMode = RunningModeEnum.CLUSTER.name();
isMaster = clusterSelectMasterService.isMaster();
masterUrl = clusterSelectMasterService.getMasterUrl();
}
Map<String, Object> map = Maps.newHashMap();
map.put(RunningModeConstants.EVENT_TYPE, DataEventTypeEnum.RUNNING_MODE.name());
map.put(RunningModeConstants.IS_MASTER, isMaster);
map.put(RunningModeConstants.RUNNING_MODE, runningMode);
map.put(RunningModeConstants.MASTER_URL, masterUrl
.replace("http", "ws")
.replace("https", "ws")
.concat("/websocket"));
if (isMaster) {
ThreadLocalUtils.put(SESSION_KEY, session);
}
sendMessageBySession(session, JsonUtils.toJson(map));
return;
}
if (Objects.equals(message, DataEventTypeEnum.MYSELF.name())) {
try {
ThreadLocalUtils.put(SESSION_KEY, session);
SpringBeanUtils.getInstance().getBean(SyncDataService.class).syncAll(DataEventTypeEnum.MYSELF);
} finally {
ThreadLocalUtils.clear();
}
}
} | @Test
public void testOnMessage() {
ConfigurableApplicationContext context = mock(ConfigurableApplicationContext.class);
SpringBeanUtils.getInstance().setApplicationContext(context);
when(SpringBeanUtils.getInstance().getBean(SyncDataService.class)).thenReturn(syncDataService);
when(syncDataService.syncAll(DataEventTypeEnum.MYSELF)).thenReturn(true);
websocketCollector.onOpen(session);
websocketCollector.onMessage(DataEventTypeEnum.MYSELF.name(), session);
assertEquals(1L, getSessionSetSize());
verify(syncDataService, times(1)).syncAll(DataEventTypeEnum.MYSELF);
doNothing().when(loggerSpy).warn(anyString(), anyString());
websocketCollector.onClose(session);
} |
@Override
Map<KeyValueSegment, WriteBatch> getWriteBatches(final Collection<ConsumerRecord<byte[], byte[]>> records) {
final Map<KeyValueSegment, WriteBatch> writeBatchMap = new HashMap<>();
for (final ConsumerRecord<byte[], byte[]> record : records) {
final long timestamp = WindowKeySchema.extractStoreTimestamp(record.key());
observedStreamTime = Math.max(observedStreamTime, timestamp);
minTimestamp = Math.min(minTimestamp, timestamp);
final long segmentId = segments.segmentId(timestamp);
final KeyValueSegment segment = segments.getOrCreateSegmentIfLive(segmentId, context, observedStreamTime);
if (segment != null) {
//null segment is if it has expired, so we don't want those records
ChangelogRecordDeserializationHelper.applyChecksAndUpdatePosition(
record,
consistencyEnabled,
position
);
try {
final WriteBatch batch = writeBatchMap.computeIfAbsent(segment, s -> new WriteBatch());
final byte[] baseKey = TimeFirstWindowKeySchema.fromNonPrefixWindowKey(record.key());
segment.addToBatch(new KeyValue<>(baseKey, record.value()), batch);
} catch (final RocksDBException e) {
throw new ProcessorStateException("Error restoring batch to store " + name(), e);
}
}
}
return writeBatchMap;
} | @Test
public void shouldCreateEmptyWriteBatches() {
final Collection<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
final Map<KeyValueSegment, WriteBatch> writeBatchMap = bytesStore.getWriteBatches(records);
assertEquals(0, writeBatchMap.size());
} |
@Override
protected MeterProvider defaultProvider() {
return defaultProvider;
} | @Test
public void testAddBatchFromMeterProgrammable() {
initMeterStore();
List<MeterOperation> operations = ImmutableList.of(new MeterOperation(mProgrammable, MeterOperation.Type.ADD));
manager.defaultProvider().performMeterOperation(PROGRAMMABLE_DID, new MeterOperations(operations));
TestTools.assertAfter(500, () -> {
assertEquals("The meter was not added", 1, meterOperations.size());
assertEquals("Wrong Meter Operation", meterOperations.get(0).meter().id(), mProgrammable.id());
});
} |
public DataTable subTable(int fromRow, int fromColumn) {
return subTable(fromRow, fromColumn, height(), width());
} | @Test
void subTable_throws_for_invalid_from_to_column() {
DataTable table = createSimpleTable();
assertThrows(IllegalArgumentException.class, () -> table.subTable(0, 2, 1, 1));
} |
@Override
public List<String> splitAndEvaluate() {
try (ReflectContext context = new ReflectContext(JAVA_CLASSPATH)) {
if (Strings.isNullOrEmpty(inlineExpression)) {
return Collections.emptyList();
}
return flatten(evaluate(context, GroovyUtils.split(handlePlaceHolder(inlineExpression))));
}
} | @Test
void assertEvaluateForCalculate() {
List<String> expected = createInlineExpressionParser("t_${[\"new${1+2}\",'old']}_order_${1..2}").splitAndEvaluate();
assertThat(expected.size(), is(4));
assertThat(expected, hasItems("t_new3_order_1", "t_new3_order_2", "t_old_order_1", "t_old_order_2"));
} |
public static Date parseDateTime(String s) {
try {
return Date.from(OffsetDateTime.parse(s, formatter).toInstant());
} catch (DateTimeParseException e) {
throw new IllegalArgumentException("Fail to parse date: " + s, e);
}
} | @Test
public void fail_if_bad_format() {
try {
UtcDateUtils.parseDateTime("2014-01-14");
fail();
} catch (IllegalArgumentException e) {
assertThat(e).hasMessage("Fail to parse date: 2014-01-14");
}
} |
@Override
public void upload(UploadTask uploadTask) throws IOException {
Throwable error = getErrorSafe();
if (error != null) {
LOG.debug("don't persist {} changesets, already failed", uploadTask.changeSets.size());
uploadTask.fail(error);
return;
}
LOG.debug("persist {} changeSets", uploadTask.changeSets.size());
try {
long size = uploadTask.getSize();
synchronized (lock) {
while (!uploadThrottle.hasCapacity()) {
lock.wait();
}
uploadThrottle.seizeCapacity(size);
if (!uploadThrottle.hasCapacity()) {
availabilityHelper.resetUnavailable();
}
scheduledBytesCounter += size;
scheduled.add(wrapWithSizeUpdate(uploadTask, size));
scheduleUploadIfNeeded();
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
uploadTask.fail(e);
throw new IOException(e);
} catch (Exception e) {
uploadTask.fail(e);
throw e;
}
} | @Test
void testUploadTimeout() throws Exception {
AtomicReference<List<SequenceNumber>> failed = new AtomicReference<>();
UploadTask upload =
new UploadTask(getChanges(4), unused -> {}, (sqn, error) -> failed.set(sqn));
ManuallyTriggeredScheduledExecutorService executorService =
new ManuallyTriggeredScheduledExecutorService();
try (BatchingStateChangeUploadScheduler store =
scheduler(1, executorService, new BlockingUploader(), 1)) {
store.upload(upload);
Deadline deadline = Deadline.fromNow(Duration.ofMinutes(5));
while (!upload.finished.get() && deadline.hasTimeLeft()) {
executorService.triggerScheduledTasks();
executorService.triggerAll();
Thread.sleep(10);
}
}
assertThat(upload.finished.get()).isTrue();
assertThat(
upload.changeSets.stream()
.map(StateChangeSet::getSequenceNumber)
.collect(Collectors.toSet()))
.isEqualTo(new HashSet<>(failed.get()));
} |
@Override
public List<User> list(UserSearchCriteria s) {
// @formatter:off
return getSpi().list(getApiVersion(), s.getContext(), s.getPage(), s.getPerPage(), s.getSearch(), s.getExclude(),
s.getInclude(), s.getOffset(), s.getOrder(), s.getOrderBy(), s.getSlug(),
s.getRoles());
// @formatter:on
} | @Test
public void testListUsers() {
final UserSearchCriteria criteria = new UserSearchCriteria();
criteria.setPage(1);
criteria.setPerPage(10);
final List<User> users = serviceUsers.list(criteria);
assertThat(users, is(not(emptyCollectionOf(User.class))));
assertThat(users.size(), is(3));
} |
static Map<String, Object> of(final Task task) {
return Map.of(
"id", task.getId(),
"type", task.getType()
);
} | @Test
void shouldGetEmptyVariables() {
Map<String, Object> variables = new RunVariables.DefaultBuilder().build(new RunContextLogger());
Assertions.assertEquals(Map.of("envs", Map.of(), "globals", Map.of()), variables);
} |
public static String fix(final String raw) {
if ( raw == null || "".equals( raw.trim() )) {
return raw;
}
MacroProcessor macroProcessor = new MacroProcessor();
macroProcessor.setMacros( macros );
return macroProcessor.parse( raw );
} | @Test
public void testMoreAssertCraziness() {
final String raw = "foobar(); (insert(new String(\"blah\").get()); bangBangYudoHono();)";
assertEqualsIgnoreWhitespace( "foobar(); (drools.insert(new String(\"blah\").get()); bangBangYudoHono();)",
KnowledgeHelperFixerTest.fixer.fix( raw ) );
} |
static Entry<ScramMechanism, String> parsePerMechanismArgument(String input) {
input = input.trim();
int equalsIndex = input.indexOf('=');
if (equalsIndex < 0) {
throw new FormatterException("Failed to find equals sign in SCRAM " +
"argument '" + input + "'");
}
String mechanismString = input.substring(0, equalsIndex);
String configString = input.substring(equalsIndex + 1);
ScramMechanism mechanism = ScramMechanism.forMechanismName(mechanismString);
if (mechanism == null) {
throw new FormatterException("The add-scram mechanism " + mechanismString +
" is not supported.");
}
if (!configString.startsWith("[")) {
throw new FormatterException("Expected configuration string to start with [");
}
if (!configString.endsWith("]")) {
throw new FormatterException("Expected configuration string to end with ]");
}
return new AbstractMap.SimpleImmutableEntry<>(mechanism,
configString.substring(1, configString.length() - 1));
} | @Test
public void testParsePerMechanismArgumentWithUnsupportedScramMethod() {
assertEquals("The add-scram mechanism SCRAM-SHA-UNSUPPORTED is not supported.",
assertThrows(FormatterException.class,
() -> ScramParser.parsePerMechanismArgument(
"SCRAM-SHA-UNSUPPORTED=[name=scram-admin,password=scram-user-secret]")).
getMessage());
} |
public static PredicateTreeAnalyzerResult analyzePredicateTree(Predicate predicate) {
AnalyzerContext context = new AnalyzerContext();
int treeSize = aggregatePredicateStatistics(predicate, false, context);
int minFeature = ((int)Math.ceil(findMinFeature(predicate, false, context))) + (context.hasNegationPredicate ? 1 : 0);
return new PredicateTreeAnalyzerResult(minFeature, treeSize, context.subTreeSizes);
} | @Test
void require_that_minfeature_rounds_up() {
Predicate p =
or(
feature("foo").inSet("bar"),
feature("foo").inSet("bar"),
feature("foo").inSet("bar"));
PredicateTreeAnalyzerResult r = PredicateTreeAnalyzer.analyzePredicateTree(p);
assertEquals(1, r.minFeature);
assertEquals(3, r.treeSize);
} |
static String generateJdbcPassword() {
int numLower = 2;
int numUpper = 2;
int numSpecial = 2;
return generatePassword(
MIN_PASSWORD_LENGTH,
MAX_PASSWORD_LENGTH,
numLower,
numUpper,
numSpecial,
ALLOWED_SPECIAL_CHARS);
} | @Test
public void testGeneratePasswordMeetsRequirements() {
for (int i = 0; i < 10000; i++) {
String password = generateJdbcPassword();
int lower = 0;
int upper = 0;
int special = 0;
for (int j = 0; j < password.length(); j++) {
char c = password.charAt(j);
String s = String.valueOf(c);
lower += s.toLowerCase().equals(s) ? 1 : 0;
upper += s.toUpperCase().equals(s) ? 1 : 0;
special += ALLOWED_SPECIAL_CHARS.contains(c) ? 1 : 0;
}
assertThat(lower).isAtLeast(2);
assertThat(upper).isAtLeast(2);
assertThat(special).isAtLeast(2);
}
} |
@VisibleForTesting
public NotifyTemplateDO validateNotifyTemplate(String templateCode) {
// 获得站内信模板。考虑到效率,从缓存中获取
NotifyTemplateDO template = notifyTemplateService.getNotifyTemplateByCodeFromCache(templateCode);
// 站内信模板不存在
if (template == null) {
throw exception(NOTICE_NOT_FOUND);
}
return template;
} | @Test
public void testCheckMailTemplateValid_notExists() {
// 准备参数
String templateCode = randomString();
// mock 方法
// 调用,并断言异常
assertServiceException(() -> notifySendService.validateNotifyTemplate(templateCode),
NOTICE_NOT_FOUND);
} |
public boolean shouldRestartConnector(ConnectorStatus status) {
return !onlyFailed || status.state() == AbstractStatus.State.FAILED;
} | @Test
public void restartOnlyFailedConnector() {
RestartRequest restartRequest = new RestartRequest(CONNECTOR_NAME, true, false);
assertTrue(restartRequest.shouldRestartConnector(createConnectorStatus(AbstractStatus.State.FAILED)));
assertFalse(restartRequest.shouldRestartConnector(createConnectorStatus(AbstractStatus.State.RUNNING)));
assertFalse(restartRequest.shouldRestartConnector(createConnectorStatus(AbstractStatus.State.PAUSED)));
} |
boolean isVisible(String key, Optional<EntityDto> component) {
if (isAdmin(component)) {
return true;
}
return hasPermission(GlobalPermission.SCAN, UserRole.SCAN, component) || !isProtected(key);
} | @Test
public void isVisible() {
openMocks(this);
when(userSession.isSystemAdministrator()).thenReturn(isAdmin);
when(userSession.hasPermission(GlobalPermission.SCAN)).thenReturn(hasGlobalPermission);
when(userSession.hasEntityPermission(UserRole.SCAN, componentDto)).thenReturn(hasComponentPermission);
boolean isVisible = settingsWsSupport.isVisible(property, Optional.of(componentDto));
assertThat(isVisible).isEqualTo(expectedIsVisible);
} |
public void removeExpiration(
long now, long timeout, Collection<PartitionRequestListener> timeoutListeners) {
Iterator<Map.Entry<InputChannelID, PartitionRequestListener>> iterator =
listeners.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<InputChannelID, PartitionRequestListener> entry = iterator.next();
PartitionRequestListener partitionRequestListener = entry.getValue();
if ((now - partitionRequestListener.getCreateTimestamp()) > timeout) {
timeoutListeners.add(partitionRequestListener);
iterator.remove();
}
}
} | @Test
void testRemoveExpiration() {
PartitionRequestListenerManager partitionRequestListenerManager =
new PartitionRequestListenerManager();
assertThat(partitionRequestListenerManager.isEmpty()).isTrue();
List<PartitionRequestListener> listenerList = new ArrayList<>();
List<PartitionRequestListener> expireListenerList = new ArrayList<>();
NettyPartitionRequestListener listener1 =
new NettyPartitionRequestListener(
TestingResultPartitionProvider.newBuilder().build(),
TestingSubpartitionCreatedViewReader.newBuilder()
.setReceiverId(new InputChannelID())
.build(),
new ResultSubpartitionIndexSet(0),
new ResultPartitionID(),
0L);
partitionRequestListenerManager.registerListener(listener1);
expireListenerList.add(listener1);
NettyPartitionRequestListener listener2 =
new NettyPartitionRequestListener(
TestingResultPartitionProvider.newBuilder().build(),
TestingSubpartitionCreatedViewReader.newBuilder()
.setReceiverId(new InputChannelID())
.build(),
new ResultSubpartitionIndexSet(1),
new ResultPartitionID(),
0L);
partitionRequestListenerManager.registerListener(listener2);
expireListenerList.add(listener2);
long currentTimestamp = System.currentTimeMillis();
NettyPartitionRequestListener listener3 =
new NettyPartitionRequestListener(
TestingResultPartitionProvider.newBuilder().build(),
TestingSubpartitionCreatedViewReader.newBuilder()
.setReceiverId(new InputChannelID())
.build(),
new ResultSubpartitionIndexSet(2),
new ResultPartitionID(),
currentTimestamp);
partitionRequestListenerManager.registerListener(listener3);
listenerList.add(listener3);
List<PartitionRequestListener> removeExpireListenerList = new ArrayList<>();
partitionRequestListenerManager.removeExpiration(
currentTimestamp, 1L, removeExpireListenerList);
assertThat(partitionRequestListenerManager.getPartitionRequestListeners())
.hasSize(listenerList.size());
assertThat(listenerList)
.containsAll(partitionRequestListenerManager.getPartitionRequestListeners());
assertThat(removeExpireListenerList).hasSize(expireListenerList.size());
assertThat(expireListenerList).containsAll(removeExpireListenerList);
} |
@Override
public boolean contains(Object o) {
if (o instanceof Integer) {
int value = (Integer) o;
return value >= from && value < to;
}
return false;
} | @Test
void testContains() {
RangeSet rangeSet = new RangeSet(5, 10);
assertTrue(rangeSet.contains(5));
assertTrue(rangeSet.contains(9));
assertFalse(rangeSet.contains(10));
assertFalse(rangeSet.contains(4));
} |
@Udf
public <T> List<T> distinct(
@UdfParameter(description = "Array of values to distinct") final List<T> input) {
if (input == null) {
return null;
}
final Set<T> distinctVals = Sets.newLinkedHashSetWithExpectedSize(input.size());
distinctVals.addAll(input);
return new ArrayList<>(distinctVals);
} | @Test
public void shouldConsiderNullAsDistinctValue() {
final List<Object> result = udf.distinct(Arrays.asList(1, 2, 1, null, 2, null, 3, 1));
assertThat(result, contains(1, 2, null, 3));
} |
public boolean isDisabled() {
return _disabled;
} | @Test
public void withDisabledTrue()
throws JsonProcessingException {
String confStr = "{\"disabled\": true}";
IndexConfig config = JsonUtils.stringToObject(confStr, IndexConfig.class);
assertTrue(config.isDisabled(), "Unexpected disabled");
} |
void archive(ScanResults scanResults) throws InvalidProtocolBufferException {
if (!Strings.isNullOrEmpty(options.localOutputFilename)) {
archive(rawFileArchiver, options.localOutputFilename, options.localOutputFormat, scanResults);
}
if (!Strings.isNullOrEmpty(options.gcsOutputFileUrl)) {
GoogleCloudStorageArchiver archiver =
googleCloudStorageArchiverFactory.create(getGcsStorage());
archive(archiver, options.gcsOutputFileUrl, options.gcsOutputFormat, scanResults);
}
} | @Test
public void archive_withNoStorageEnabled_storesNothing() throws InvalidProtocolBufferException {
options.localOutputFilename = "";
options.gcsOutputFileUrl = "";
scanResultsArchiver.archive(SCAN_RESULTS);
fakeRawFileArchiver.assertNoDataStored();
fakeGoogleCloudStorageArchivers.assertNoDataStored();
} |
public static String formatExpression(final Expression expression) {
return formatExpression(expression, FormatOptions.of(s -> false));
} | @Test
public void shouldFormatMaxDoubleLiteral() {
assertThat(ExpressionFormatter.formatExpression(
new DoubleLiteral(Double.MAX_VALUE)),
equalTo("1.7976931348623157E308"));
} |
public FEELFnResult<Boolean> invoke(@ParameterName( "point" ) Comparable point, @ParameterName( "range" ) Range range) {
if ( point == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be null"));
}
if ( range == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range", "cannot be null"));
}
try {
boolean result = ( range.getLowBoundary() == Range.RangeBoundary.CLOSED && point.compareTo( range.getLowEndPoint() ) == 0 );
return FEELFnResult.ofResult( result );
} catch( Exception e ) {
// points are not comparable
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be compared to range"));
}
} | @Test
void invokeParamsCantBeCompared() {
FunctionTestUtil.assertResultError( startsFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, 1, 2, Range.RangeBoundary.CLOSED ) ), InvalidParametersEvent.class );
} |
@Override
public Object fromBody(TypedInput body, Type type) throws ConversionException {
try (BufferedReader reader = new BufferedReader(new InputStreamReader(body.in()))) {
String json = reader.readLine();
log.debug("Converting response from influxDb: {}", json);
Map result = getResultObject(json);
List<Map> seriesList = (List<Map>) result.get("series");
if (CollectionUtils.isEmpty(seriesList)) {
log.warn("Received no data from Influxdb.");
return null;
}
Map series = seriesList.get(0);
List<String> seriesColumns = (List<String>) series.get("columns");
List<List> seriesValues = (List<List>) series.get("values");
List<InfluxDbResult> influxDbResultsList = new ArrayList<InfluxDbResult>(seriesValues.size());
// TODO(joerajeev): if returning tags (other than the field names) we will need to skip tags
// from this loop,
// and to extract and set the tag values to the influxDb result.
for (int i = 1;
i < seriesColumns.size();
i++) { // Starting from index 1 to skip 'time' column
String id = seriesColumns.get(i);
long firstTimeMillis = extractTimeInMillis(seriesValues, 0);
long stepMillis = calculateStep(seriesValues, firstTimeMillis);
List<Double> values = new ArrayList<>(seriesValues.size());
for (List<Object> valueRow : seriesValues) {
if (valueRow.get(i) != null) {
String val = valueRow.get(i).toString();
values.add(Double.valueOf(val));
}
}
influxDbResultsList.add(new InfluxDbResult(id, firstTimeMillis, stepMillis, null, values));
}
log.debug("Converted response: {} ", influxDbResultsList);
return influxDbResultsList;
} catch (IOException e) {
e.printStackTrace();
}
return null;
} | @Test
public void deserializeWrongValue() throws Exception {
TypedInput input = new TypedByteArray(MIME_TYPE, "{\"foo\":\"bar\"}".getBytes());
assertThrows(
ConversionException.class, () -> influxDbResponseConverter.fromBody(input, List.class));
} |
@Override
// Camel calls this method if the endpoint isSynchronous(), as the
// KafkaEndpoint creates a SynchronousDelegateProducer for it
public void process(Exchange exchange) throws Exception {
// is the message body a list or something that contains multiple values
Message message = exchange.getIn();
if (transactionId != null) {
startKafkaTransaction(exchange);
}
if (endpoint.getConfiguration().isUseIterator() && isIterable(message.getBody())) {
processIterableSync(exchange, message);
} else {
processSingleMessageSync(exchange, message);
}
} | @Test
public void processSendsMessageWithOverrideTopicHeaderAndEndPoint() throws Exception {
endpoint.getConfiguration().setTopic("sometopic");
Mockito.when(exchange.getIn()).thenReturn(in);
Mockito.when(exchange.getMessage()).thenReturn(in);
in.setHeader(KafkaConstants.PARTITION_KEY, 4);
in.setHeader(KafkaConstants.OVERRIDE_TOPIC, "anotherTopic");
in.setHeader(KafkaConstants.KEY, "someKey");
// test using a string value instead of long
String time = String.valueOf(LocalDateTime.now().atZone(ZoneId.systemDefault()).toInstant().toEpochMilli());
in.setHeader(KafkaConstants.OVERRIDE_TIMESTAMP, time);
producer.process(exchange);
// the header is now removed
assertNull(in.getHeader(KafkaConstants.OVERRIDE_TOPIC));
verifySendMessage(4, "anotherTopic", "someKey");
assertRecordMetadataExists();
} |
public static String substVars(String val, PropertyContainer pc1) {
return substVars(val, pc1, null);
} | @Test
public void testSubstVarsRecursive() {
context.putProperty("v1", "if");
context.putProperty("v2", "${v3}");
context.putProperty("v3", "works");
String result = OptionHelper.substVars(text, context);
assertEquals(expected, result);
} |
@Override
public void execute(Map<String, List<String>> parameters, PrintWriter output) throws Exception {
final List<String> loggerNames = getLoggerNames(parameters);
final Level loggerLevel = getLoggerLevel(parameters);
final Duration duration = getDuration(parameters);
for (String loggerName : loggerNames) {
Logger logger = ((LoggerContext) loggerContext).getLogger(loggerName);
String message = String.format("Configured logging level for %s to %s", loggerName, loggerLevel);
if (loggerLevel != null && duration != null) {
final long millis = duration.toMillis();
getTimer().schedule(new TimerTask() {
@Override
public void run() {
logger.setLevel(null);
}
}, millis);
message += String.format(" for %s milliseconds", millis);
}
logger.setLevel(loggerLevel);
output.println(message);
output.flush();
}
} | @Test
void configuresDefaultLevelForALogger() throws Exception {
// given
Level oneEffectiveBefore = logger1.getEffectiveLevel();
Level twoEffectiveBefore = logger2.getEffectiveLevel();
Map<String, List<String>> parameters = Map.of("logger", List.of("logger.one"));
// when
task.execute(parameters, output);
// then
assertThat(logger1.getLevel()).isNull();
assertThat(logger1.getEffectiveLevel()).isEqualTo(oneEffectiveBefore);
assertThat(logger2.getEffectiveLevel()).isEqualTo(twoEffectiveBefore);
assertThat(stringWriter).hasToString(String.format("Configured logging level for logger.one to null%n"));
} |
public long addPublication(final String channel, final int streamId)
{
final long correlationId = toDriverCommandBuffer.nextCorrelationId();
final int length = PublicationMessageFlyweight.computeLength(channel.length());
final int index = toDriverCommandBuffer.tryClaim(ADD_PUBLICATION, length);
if (index < 0)
{
throw new AeronException("could not write add publication command");
}
publicationMessage
.wrap(toDriverCommandBuffer.buffer(), index)
.streamId(streamId)
.channel(channel)
.clientId(clientId)
.correlationId(correlationId);
toDriverCommandBuffer.commit(index);
return correlationId;
} | @Test
void threadSendsAddChannelMessage()
{
threadSendsChannelMessage(() -> conductor.addPublication(CHANNEL, STREAM_ID), ADD_PUBLICATION);
} |
@Override
public final boolean offer(int ordinal, @Nonnull Object item) {
if (ordinal == -1) {
return offerInternal(allEdges, item);
} else {
if (ordinal == bucketCount()) {
// ordinal beyond bucketCount will add to snapshot queue, which we don't allow through this method
throw new IllegalArgumentException("Illegal edge ordinal: " + ordinal);
}
singleEdge[0] = ordinal;
return offerInternal(singleEdge, item);
}
} | @Test
public void when_offer1FailsAndDifferentItemOffered_then_fail() {
do_when_offerDifferent_then_fail(e -> outbox.offer(e));
} |
public URL getInterNodeListener(
final Function<URL, Integer> portResolver
) {
return getInterNodeListener(portResolver, LOGGER);
} | @Test
public void shouldResolveInterNodeListenerToInternalListenerSetToIpv4Loopback() {
// Given:
final URL expected = url("https://127.0.0.2:12345");
final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.<String, Object>builder()
.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
.put(INTERNAL_LISTENER_CONFIG, expected.toString())
.build()
);
// When:
final URL actual = config.getInterNodeListener(portResolver, logger);
// Then:
assertThat(actual, is(expected));
verifyLogsInterNodeListener(expected, QUOTED_INTERNAL_LISTENER_CONFIG);
verifyLogsLoopBackWarning(expected, QUOTED_INTERNAL_LISTENER_CONFIG);
verifyNoMoreInteractions(logger);
} |
public static NodeRef nodeFromPod(Pod pod) {
return new NodeRef(
pod.getMetadata().getName(),
ReconcilerUtils.getPodIndexFromPodName(pod.getMetadata().getName()),
ReconcilerUtils.getPoolNameFromPodName(clusterNameFromLabel(pod), pod.getMetadata().getName()),
hasRole(pod, Labels.STRIMZI_CONTROLLER_ROLE_LABEL),
hasRole(pod, Labels.STRIMZI_BROKER_ROLE_LABEL));
} | @Test
public void testNodeRefFromPod() {
NodeRef node = ReconcilerUtils.nodeFromPod(new PodBuilder()
.withNewMetadata()
.withName("my-cluster-new-brokers-1")
.withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, "my-cluster"))
.endMetadata()
.build());
assertThat(node.podName(), is("my-cluster-new-brokers-1"));
assertThat(node.nodeId(), is(1));
assertThat(node.poolName(), is("new-brokers"));
assertThat(node.controller(), is(false));
assertThat(node.broker(), is(false));
node = ReconcilerUtils.nodeFromPod(new PodBuilder()
.withNewMetadata()
.withName("my-cluster-new-brokers-1")
.withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, "my-cluster", Labels.STRIMZI_CONTROLLER_ROLE_LABEL, "true", Labels.STRIMZI_BROKER_ROLE_LABEL, "false"))
.endMetadata()
.build());
assertThat(node.podName(), is("my-cluster-new-brokers-1"));
assertThat(node.nodeId(), is(1));
assertThat(node.poolName(), is("new-brokers"));
assertThat(node.controller(), is(true));
assertThat(node.broker(), is(false));
node = ReconcilerUtils.nodeFromPod(new PodBuilder()
.withNewMetadata()
.withName("my-cluster-new-brokers-1")
.withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, "my-cluster", Labels.STRIMZI_CONTROLLER_ROLE_LABEL, "false", Labels.STRIMZI_BROKER_ROLE_LABEL, "true"))
.endMetadata()
.build());
assertThat(node.podName(), is("my-cluster-new-brokers-1"));
assertThat(node.nodeId(), is(1));
assertThat(node.poolName(), is("new-brokers"));
assertThat(node.controller(), is(false));
assertThat(node.broker(), is(true));
} |
@Override
protected int command() {
if (!validateConfigFilePresent()) {
return 1;
}
final MigrationConfig config;
try {
config = MigrationConfig.load(getConfigFile());
} catch (KsqlException | MigrationException e) {
LOGGER.error(e.getMessage());
return 1;
}
return command(
config,
MigrationsUtil::getKsqlClient,
getMigrationsDir(getConfigFile(), config),
Clock.systemDefaultZone()
);
} | @Test
public void shouldApplyCreateConnectorStatement() throws Exception {
// Given:
command = PARSER.parse("-v", "3");
createMigrationFile(1, NAME, migrationsDir, COMMAND);
createMigrationFile(3, NAME, migrationsDir,CREATE_CONNECTOR );
givenCurrentMigrationVersion("1");
givenAppliedMigration(1, NAME, MigrationState.MIGRATED);
// When:
final int result = command.command(config, (cfg, headers) -> ksqlClient, migrationsDir, Clock.fixed(
Instant.ofEpochMilli(1000), ZoneId.systemDefault()));
// Then:
assertThat(result, is(0));
final InOrder inOrder = inOrder(ksqlClient);
verifyMigratedVersion(inOrder, 3, "1", MigrationState.MIGRATED,
() -> inOrder.verify(ksqlClient).createConnector("`WOOF`", false, CONNECTOR_PROPERTIES, false));
inOrder.verify(ksqlClient).close();
inOrder.verifyNoMoreInteractions();
} |
public ClusterStateBundle.FeedBlock inferContentClusterFeedBlockOrNull(ContentCluster cluster) {
if (!feedBlockEnabled) {
return null;
}
var nodeInfos = cluster.getNodeInfos();
var exhaustions = enumerateNodeResourceExhaustionsAcrossAllNodes(nodeInfos);
if (exhaustions.isEmpty()) {
return null;
}
int maxDescriptions = 3;
String description = exhaustions.stream()
.limit(maxDescriptions)
.map(NodeResourceExhaustion::toExhaustionAddedDescription)
.collect(Collectors.joining(", "));
if (exhaustions.size() > maxDescriptions) {
description += String.format(" (... and %d more)", exhaustions.size() - maxDescriptions);
}
description = decoratedMessage(cluster, description);
// FIXME we currently will trigger a cluster state recomputation even if the number of
// exhaustions is greater than what is returned as part of the description. Though at
// that point, cluster state recomputations will be the least of your worries...!
return ClusterStateBundle.FeedBlock.blockedWith(description, exhaustions);
} | @Test
void retained_node_feed_block_cleared_once_hysteresis_threshold_is_passed() {
var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.48)));
var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1);
// Node 1 goes from 0.48 to 0.39. Should be unblocked
// Node 2 is at 0.49 but was not previously blocked and should not be blocked now either.
var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.39)),
forNode(2, usage("disk", 0.3), usage("memory", 0.49)));
var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster());
assertNull(feedBlock);
} |
@VisibleForTesting
static Function<List<String>, ProcessBuilder> defaultProcessBuilderFactory(
String dockerExecutable, ImmutableMap<String, String> dockerEnvironment) {
return dockerSubCommand -> {
List<String> dockerCommand = new ArrayList<>(1 + dockerSubCommand.size());
dockerCommand.add(dockerExecutable);
dockerCommand.addAll(dockerSubCommand);
ProcessBuilder processBuilder = new ProcessBuilder(dockerCommand);
Map<String, String> environment = processBuilder.environment();
environment.putAll(dockerEnvironment);
return processBuilder;
};
} | @Test
public void testDefaultProcessorBuilderFactory_customEnvironment() {
ImmutableMap<String, String> environment = ImmutableMap.of("Key1", "Value1");
Map<String, String> expectedEnvironment = new HashMap<>(System.getenv());
expectedEnvironment.putAll(environment);
ProcessBuilder processBuilder =
CliDockerClient.defaultProcessBuilderFactory("docker", environment)
.apply(Collections.emptyList());
Assert.assertEquals(expectedEnvironment, processBuilder.environment());
} |
public void checkOpen() {
if (!open.get()) {
throw new ClosedFileSystemException();
}
} | @Test
public void testCheckOpen() throws IOException {
state.checkOpen(); // does not throw
state.close();
try {
state.checkOpen();
fail();
} catch (ClosedFileSystemException expected) {
}
} |
@JsonProperty
@Override
public String getId()
{
return id;
} | @Test
public void testCompatibility()
{
String goldenValue = "{\n" +
" \"id\" : \"20160128_214710_00012_rk68b\",\n" +
" \"infoUri\" : \"http://localhost:54855/query.html?20160128_214710_00012_rk68b\",\n" +
" \"columns\" : [ {\n" +
" \"name\" : \"_col0\",\n" +
" \"type\" : \"bigint\",\n" +
" \"typeSignature\" : {\n" +
" \"rawType\" : \"bigint\",\n" +
" \"typeArguments\" : [ ],\n" +
" \"literalArguments\" : [ ],\n" +
" \"arguments\" : [ ]\n" +
" }\n" +
" } ],\n" +
" \"data\" : [ [ 123 ] ],\n" +
" \"stats\" : {\n" +
" \"state\" : \"FINISHED\",\n" +
" \"queued\" : false,\n" +
" \"scheduled\" : false,\n" +
" \"nodes\" : 0,\n" +
" \"totalSplits\" : 0,\n" +
" \"queuedSplits\" : 0,\n" +
" \"runningSplits\" : 0,\n" +
" \"completedSplits\" : 0,\n" +
" \"cpuTimeMillis\" : 0,\n" +
" \"wallTimeMillis\" : 0,\n" +
" \"queuedTimeMillis\" : 0,\n" +
" \"elapsedTimeMillis\" : 0,\n" +
" \"processedRows\" : 0,\n" +
" \"processedBytes\" : 0,\n" +
" \"peakMemoryBytes\" : 0\n" +
" }\n" +
"}";
QueryResults results = QUERY_RESULTS_CODEC.fromJson(goldenValue);
assertEquals(results.getId(), "20160128_214710_00012_rk68b");
} |
@SuppressWarnings("deprecation")
static Object[] buildArgs(final Object[] positionalArguments,
final ResourceMethodDescriptor resourceMethod,
final ServerResourceContext context,
final DynamicRecordTemplate template,
final ResourceMethodConfig resourceMethodConfig)
{
List<Parameter<?>> parameters = resourceMethod.getParameters();
Object[] arguments = Arrays.copyOf(positionalArguments, parameters.size());
fixUpComplexKeySingletonArraysInArguments(arguments);
boolean attachmentsDesired = false;
for (int i = positionalArguments.length; i < parameters.size(); ++i)
{
Parameter<?> param = parameters.get(i);
try
{
if (param.getParamType() == Parameter.ParamType.KEY || param.getParamType() == Parameter.ParamType.ASSOC_KEY_PARAM)
{
Object value = context.getPathKeys().get(param.getName());
if (value != null)
{
arguments[i] = value;
continue;
}
}
else if (param.getParamType() == Parameter.ParamType.CALLBACK)
{
continue;
}
else if (param.getParamType() == Parameter.ParamType.PARSEQ_CONTEXT_PARAM || param.getParamType() == Parameter.ParamType.PARSEQ_CONTEXT)
{
continue; // don't know what to fill in yet
}
else if (param.getParamType() == Parameter.ParamType.HEADER)
{
HeaderParam headerParam = param.getAnnotations().get(HeaderParam.class);
String value = context.getRequestHeaders().get(headerParam.value());
arguments[i] = value;
continue;
}
//Since we have multiple different types of MaskTrees that can be passed into resource methods,
//we must evaluate based on the param type (annotation used)
else if (param.getParamType() == Parameter.ParamType.PROJECTION || param.getParamType() == Parameter.ParamType.PROJECTION_PARAM)
{
arguments[i] = context.getProjectionMask();
continue;
}
else if (param.getParamType() == Parameter.ParamType.METADATA_PROJECTION_PARAM)
{
arguments[i] = context.getMetadataProjectionMask();
continue;
}
else if (param.getParamType() == Parameter.ParamType.PAGING_PROJECTION_PARAM)
{
arguments[i] = context.getPagingProjectionMask();
continue;
}
else if (param.getParamType() == Parameter.ParamType.CONTEXT || param.getParamType() == Parameter.ParamType.PAGING_CONTEXT_PARAM)
{
PagingContext ctx = RestUtils.getPagingContext(context, (PagingContext) param.getDefaultValue());
arguments[i] = ctx;
continue;
}
else if (param.getParamType() == Parameter.ParamType.PATH_KEYS || param.getParamType() == Parameter.ParamType.PATH_KEYS_PARAM)
{
arguments[i] = context.getPathKeys();
continue;
}
else if (param.getParamType() == Parameter.ParamType.PATH_KEY_PARAM) {
Object value = context.getPathKeys().get(param.getName());
if (value != null)
{
arguments[i] = value;
continue;
}
}
else if (param.getParamType() == Parameter.ParamType.RESOURCE_CONTEXT || param.getParamType() == Parameter.ParamType.RESOURCE_CONTEXT_PARAM)
{
arguments[i] = context;
continue;
}
else if (param.getParamType() == Parameter.ParamType.VALIDATOR_PARAM)
{
RestLiDataValidator validator = new RestLiDataValidator(resourceMethod.getResourceModel().getResourceClass().getAnnotations(),
resourceMethod.getResourceModel().getValueClass(), resourceMethod.getMethodType());
arguments[i] = validator;
continue;
}
else if (param.getParamType() == Parameter.ParamType.RESTLI_ATTACHMENTS_PARAM)
{
arguments[i] = context.getRequestAttachmentReader();
attachmentsDesired = true;
continue;
}
else if (param.getParamType() == Parameter.ParamType.UNSTRUCTURED_DATA_WRITER_PARAM)
{
// The OutputStream is passed to the resource implementation in a synchronous call. Upon return of the
// resource method, all the bytes would haven't written to the OutputStream. The EntityStream would have
// contained all the bytes by the time data is requested. The ownership of the OutputStream is passed to
// the ByteArrayOutputStreamWriter, which is responsible of closing the OutputStream if necessary.
ByteArrayOutputStream out = new ByteArrayOutputStream();
context.setResponseEntityStream(EntityStreams.newEntityStream(new ByteArrayOutputStreamWriter(out)));
arguments[i] = new UnstructuredDataWriter(out, context);
continue;
}
else if (param.getParamType() == Parameter.ParamType.UNSTRUCTURED_DATA_REACTIVE_READER_PARAM)
{
arguments[i] = new UnstructuredDataReactiveReader(context.getRequestEntityStream(), context.getRawRequest().getHeader(RestConstants.HEADER_CONTENT_TYPE));
continue;
}
else if (param.getParamType() == Parameter.ParamType.POST)
{
// handle action parameters
if (template != null)
{
DataMap data = template.data();
if (data.containsKey(param.getName()))
{
arguments[i] = template.getValue(param);
continue;
}
}
}
else if (param.getParamType() == Parameter.ParamType.QUERY)
{
Object value;
if (DataTemplate.class.isAssignableFrom(param.getType()))
{
value = buildDataTemplateArgument(context.getStructuredParameter(param.getName()), param,
resourceMethodConfig.shouldValidateQueryParams());
}
else
{
value = buildRegularArgument(context, param, resourceMethodConfig.shouldValidateQueryParams());
}
if (value != null)
{
arguments[i] = value;
continue;
}
}
else if (param.getParamType() == Parameter.ParamType.BATCH || param.getParamType() == Parameter.ParamType.RESOURCE_KEY)
{
// should not come to this routine since it should be handled by passing in positionalArguments
throw new RoutingException("Parameter '" + param.getName() + "' should be passed in as a positional argument",
HttpStatus.S_400_BAD_REQUEST.getCode());
}
else
{
// unknown param type
throw new RoutingException(
"Parameter '" + param.getName() + "' has an unknown parameter type '" + param.getParamType().name() + "'",
HttpStatus.S_400_BAD_REQUEST.getCode());
}
}
catch (TemplateRuntimeException e)
{
throw new RoutingException("Parameter '" + param.getName() + "' is invalid", HttpStatus.S_400_BAD_REQUEST.getCode());
}
try
{
// Handling null-valued parameters not provided in resource context or entity body
// check if it is optional parameter
if (param.isOptional() && param.hasDefaultValue())
{
arguments[i] = param.getDefaultValue();
}
else if (param.isOptional() && !param.getType().isPrimitive())
{
// optional primitive parameter must have default value or provided
arguments[i] = null;
}
else
{
throw new RoutingException("Parameter '" + param.getName() + "' is required", HttpStatus.S_400_BAD_REQUEST.getCode());
}
}
catch (ResourceConfigException e)
{
// Parameter default value format exception should result in server error code 500.
throw new RestLiServiceException(HttpStatus.S_500_INTERNAL_SERVER_ERROR,
"Parameter '" + param.getName() + "' default value is invalid", e);
}
}
//Verify that if the resource method did not expect attachments, and attachments were present, that we drain all
//incoming attachments and send back a bad request. We must take precaution here since simply ignoring the request
//attachments is not correct behavior here. Ignoring other request level constructs such as headers or query parameters
//that were not needed is safe, but not for request attachments.
if (!attachmentsDesired && context.getRequestAttachmentReader() != null)
{
throw new RestLiServiceException(HttpStatus.S_400_BAD_REQUEST,
"Resource method endpoint invoked does not accept any request attachments.");
}
return arguments;
} | @Test
@SuppressWarnings("deprecation")
public void testResourceContextParameterType()
{
String testParamKey = "testParam";
ServerResourceContext mockResourceContext = EasyMock.createMock(ServerResourceContext.class);
List<Parameter<?>> parameters = new ArrayList<>();
Parameter<ResourceContext> param1 = new Parameter<>(testParamKey, ResourceContext.class, null,
false, null, Parameter.ParamType.RESOURCE_CONTEXT, false, AnnotationSet.EMPTY);
Parameter<ResourceContext> param2 = new Parameter<>(testParamKey, ResourceContext.class, null,
false, null, Parameter.ParamType.RESOURCE_CONTEXT_PARAM, false, AnnotationSet.EMPTY);
parameters.add(param1);
parameters.add(param2);
EasyMock.expect(mockResourceContext.getRequestAttachmentReader()).andReturn(null);
Object[] results = ArgumentBuilder.buildArgs(new Object[0], getMockResourceMethod(parameters), mockResourceContext, null, getMockResourceMethodConfig(false));
Assert.assertEquals(results[0], mockResourceContext);
Assert.assertEquals(results[1], mockResourceContext);
} |
public boolean setNewAuthor(DefaultIssue issue, @Nullable String newAuthorLogin, IssueChangeContext context) {
if (isNullOrEmpty(newAuthorLogin)) {
return false;
}
checkState(issue.authorLogin() == null, "It's not possible to update the author with this method, please use setAuthorLogin()");
issue.setFieldChange(context, AUTHOR, null, newAuthorLogin);
issue.setAuthorLogin(newAuthorLogin);
issue.setUpdateDate(context.date());
issue.setChanged(true);
// do not send notifications to prevent spam when installing the developer cockpit plugin
return true;
} | @Test
void not_set_new_author_if_new_author_is_null() {
boolean updated = underTest.setNewAuthor(issue, null, context);
assertThat(updated).isFalse();
assertThat(issue.currentChange()).isNull();
assertThat(issue.mustSendNotifications()).isFalse();
} |
public static FunctionConfig validateUpdate(FunctionConfig existingConfig, FunctionConfig newConfig) {
FunctionConfig mergedConfig = existingConfig.toBuilder().build();
if (!existingConfig.getTenant().equals(newConfig.getTenant())) {
throw new IllegalArgumentException("Tenants differ");
}
if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) {
throw new IllegalArgumentException("Namespaces differ");
}
if (!existingConfig.getName().equals(newConfig.getName())) {
throw new IllegalArgumentException("Function Names differ");
}
if (!StringUtils.isEmpty(newConfig.getClassName())) {
mergedConfig.setClassName(newConfig.getClassName());
}
if (!StringUtils.isEmpty(newConfig.getJar())) {
mergedConfig.setJar(newConfig.getJar());
}
if (newConfig.getInputSpecs() == null) {
newConfig.setInputSpecs(new HashMap<>());
}
if (mergedConfig.getInputSpecs() == null) {
mergedConfig.setInputSpecs(new HashMap<>());
}
if (newConfig.getInputs() != null) {
newConfig.getInputs().forEach((topicName -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder().isRegexPattern(false).build());
}));
}
if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) {
newConfig.getInputSpecs().put(newConfig.getTopicsPattern(),
ConsumerConfig.builder()
.isRegexPattern(true)
.build());
}
if (newConfig.getCustomSerdeInputs() != null) {
newConfig.getCustomSerdeInputs().forEach((topicName, serdeClassName) -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder()
.serdeClassName(serdeClassName)
.isRegexPattern(false)
.build());
});
}
if (newConfig.getCustomSchemaInputs() != null) {
newConfig.getCustomSchemaInputs().forEach((topicName, schemaClassname) -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder()
.schemaType(schemaClassname)
.isRegexPattern(false)
.build());
});
}
if (!newConfig.getInputSpecs().isEmpty()) {
newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> {
if (!existingConfig.getInputSpecs().containsKey(topicName)) {
throw new IllegalArgumentException("Input Topics cannot be altered");
}
if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) {
throw new IllegalArgumentException(
"isRegexPattern for input topic " + topicName + " cannot be altered");
}
mergedConfig.getInputSpecs().put(topicName, consumerConfig);
});
}
if (!StringUtils.isEmpty(newConfig.getOutputSerdeClassName()) && !newConfig.getOutputSerdeClassName()
.equals(existingConfig.getOutputSerdeClassName())) {
throw new IllegalArgumentException("Output Serde mismatch");
}
if (!StringUtils.isEmpty(newConfig.getOutputSchemaType()) && !newConfig.getOutputSchemaType()
.equals(existingConfig.getOutputSchemaType())) {
throw new IllegalArgumentException("Output Schema mismatch");
}
if (!StringUtils.isEmpty(newConfig.getLogTopic())) {
mergedConfig.setLogTopic(newConfig.getLogTopic());
}
if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees()
.equals(existingConfig.getProcessingGuarantees())) {
throw new IllegalArgumentException("Processing Guarantees cannot be altered");
}
if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering()
.equals(existingConfig.getRetainOrdering())) {
throw new IllegalArgumentException("Retain Ordering cannot be altered");
}
if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering()
.equals(existingConfig.getRetainKeyOrdering())) {
throw new IllegalArgumentException("Retain Key Ordering cannot be altered");
}
if (!StringUtils.isEmpty(newConfig.getOutput())) {
mergedConfig.setOutput(newConfig.getOutput());
}
if (newConfig.getUserConfig() != null) {
mergedConfig.setUserConfig(newConfig.getUserConfig());
}
if (newConfig.getSecrets() != null) {
mergedConfig.setSecrets(newConfig.getSecrets());
}
if (newConfig.getRuntime() != null && !newConfig.getRuntime().equals(existingConfig.getRuntime())) {
throw new IllegalArgumentException("Runtime cannot be altered");
}
if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) {
throw new IllegalArgumentException("AutoAck cannot be altered");
}
if (newConfig.getMaxMessageRetries() != null) {
mergedConfig.setMaxMessageRetries(newConfig.getMaxMessageRetries());
}
if (!StringUtils.isEmpty(newConfig.getDeadLetterTopic())) {
mergedConfig.setDeadLetterTopic(newConfig.getDeadLetterTopic());
}
if (!StringUtils.isEmpty(newConfig.getSubName()) && !newConfig.getSubName()
.equals(existingConfig.getSubName())) {
throw new IllegalArgumentException("Subscription Name cannot be altered");
}
if (newConfig.getParallelism() != null) {
mergedConfig.setParallelism(newConfig.getParallelism());
}
if (newConfig.getResources() != null) {
mergedConfig
.setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources()));
}
if (newConfig.getWindowConfig() != null) {
mergedConfig.setWindowConfig(newConfig.getWindowConfig());
}
if (newConfig.getTimeoutMs() != null) {
mergedConfig.setTimeoutMs(newConfig.getTimeoutMs());
}
if (newConfig.getCleanupSubscription() != null) {
mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription());
}
if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) {
mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags());
}
if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) {
mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions());
}
if (newConfig.getProducerConfig() != null) {
mergedConfig.setProducerConfig(newConfig.getProducerConfig());
}
return mergedConfig;
} | @Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Processing Guarantees cannot be altered")
public void testMergeDifferentProcessingGuarantees() {
FunctionConfig functionConfig = createFunctionConfig();
FunctionConfig newFunctionConfig = createUpdatedFunctionConfig("processingGuarantees", EFFECTIVELY_ONCE);
FunctionConfigUtils.validateUpdate(functionConfig, newFunctionConfig);
} |
@Override
public URL use(ApplicationId applicationId, String resourceKey)
throws YarnException {
Path resourcePath = null;
UseSharedCacheResourceRequest request = Records.newRecord(
UseSharedCacheResourceRequest.class);
request.setAppId(applicationId);
request.setResourceKey(resourceKey);
try {
UseSharedCacheResourceResponse response = this.scmClient.use(request);
if (response != null && response.getPath() != null) {
resourcePath = new Path(response.getPath());
}
} catch (Exception e) {
// Just catching IOException isn't enough.
// RPC call can throw ConnectionException.
// We don't handle different exceptions separately at this point.
throw new YarnException(e);
}
if (resourcePath != null) {
URL pathURL = URL.fromPath(resourcePath);
return pathURL;
} else {
// The resource was not in the cache.
return null;
}
} | @Test
public void testUseCacheHit() throws Exception {
Path file = new Path("viewfs://test/path");
URL useUrl = URL.fromPath(new Path("viewfs://test/path"));
UseSharedCacheResourceResponse response =
new UseSharedCacheResourceResponsePBImpl();
response.setPath(file.toString());
when(cProtocol.use(isA(UseSharedCacheResourceRequest.class))).thenReturn(
response);
URL newURL = client.use(mock(ApplicationId.class), "key");
assertEquals("The paths are not equal!", useUrl, newURL);
} |
static MetricRegistry getOrCreateMetricRegistry(Registry camelRegistry, String registryName) {
LOG.debug("Looking up MetricRegistry from Camel Registry for name \"{}\"", registryName);
MetricRegistry result = getMetricRegistryFromCamelRegistry(camelRegistry, registryName);
if (result == null) {
LOG.debug("MetricRegistry not found from Camel Registry for name \"{}\"", registryName);
LOG.info("Creating new default MetricRegistry");
result = createMetricRegistry();
}
return result;
} | @Test
public void testGetOrCreateMetricRegistryNotFoundInCamelRegistry() {
when(camelRegistry.lookupByNameAndType("name", MetricRegistry.class)).thenReturn(null);
when(camelRegistry.findByType(MetricRegistry.class)).thenReturn(Collections.<MetricRegistry> emptySet());
MetricRegistry result = MetricsComponent.getOrCreateMetricRegistry(camelRegistry, "name");
assertThat(result, is(notNullValue()));
assertThat(result, is(not(metricRegistry)));
inOrder.verify(camelRegistry, times(1)).lookupByNameAndType("name", MetricRegistry.class);
inOrder.verify(camelRegistry, times(1)).findByType(MetricRegistry.class);
inOrder.verifyNoMoreInteractions();
} |
<K, V> List<ConsumerRecord<K, V>> fetchRecords(FetchConfig fetchConfig,
Deserializers<K, V> deserializers,
int maxRecords) {
// Error when fetching the next record before deserialization.
if (corruptLastRecord)
throw new KafkaException("Received exception when fetching the next record from " + partition
+ ". If needed, please seek past the record to "
+ "continue consumption.", cachedRecordException);
if (isConsumed)
return Collections.emptyList();
List<ConsumerRecord<K, V>> records = new ArrayList<>();
try {
for (int i = 0; i < maxRecords; i++) {
// Only move to next record if there was no exception in the last fetch. Otherwise, we should
// use the last record to do deserialization again.
if (cachedRecordException == null) {
corruptLastRecord = true;
lastRecord = nextFetchedRecord(fetchConfig);
corruptLastRecord = false;
}
if (lastRecord == null)
break;
Optional<Integer> leaderEpoch = maybeLeaderEpoch(currentBatch.partitionLeaderEpoch());
TimestampType timestampType = currentBatch.timestampType();
ConsumerRecord<K, V> record = parseRecord(deserializers, partition, leaderEpoch, timestampType, lastRecord);
records.add(record);
recordsRead++;
bytesRead += lastRecord.sizeInBytes();
nextFetchOffset = lastRecord.offset() + 1;
// In some cases, the deserialization may have thrown an exception and the retry may succeed,
// we allow user to move forward in this case.
cachedRecordException = null;
}
} catch (SerializationException se) {
cachedRecordException = se;
if (records.isEmpty())
throw se;
} catch (KafkaException e) {
cachedRecordException = e;
if (records.isEmpty())
throw new KafkaException("Received exception when fetching the next record from " + partition
+ ". If needed, please seek past the record to "
+ "continue consumption.", e);
}
return records;
} | @Test
public void testNegativeFetchCount() {
long fetchOffset = 0;
int startingOffset = 0;
int numRecords = 10;
FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData()
.setRecords(newRecords(startingOffset, numRecords, fetchOffset));
try (final Deserializers<String, String> deserializers = newStringDeserializers()) {
CompletedFetch completedFetch = newCompletedFetch(fetchOffset, partitionData);
FetchConfig fetchConfig = newFetchConfig(IsolationLevel.READ_UNCOMMITTED, true);
List<ConsumerRecord<String, String>> records = completedFetch.fetchRecords(fetchConfig, deserializers, -10);
assertEquals(0, records.size());
}
} |
public static Pair<Optional<Method>, Optional<TypedExpression>> resolveMethodWithEmptyCollectionArguments(
final MethodCallExpr methodExpression,
final MvelCompilerContext mvelCompilerContext,
final Optional<TypedExpression> scope,
List<TypedExpression> arguments,
List<Integer> emptyCollectionArgumentsIndexes) {
Objects.requireNonNull(methodExpression, "MethodExpression parameter cannot be null as the method searches methods based on this expression!");
Objects.requireNonNull(mvelCompilerContext, "MvelCompilerContext parameter cannot be null!");
Objects.requireNonNull(arguments, "Arguments parameter cannot be null! Use an empty list instance if needed instead.");
Objects.requireNonNull(emptyCollectionArgumentsIndexes, "EmptyListArgumentIndexes parameter cannot be null! Use an empty list instance if needed instead.");
if (emptyCollectionArgumentsIndexes.size() > arguments.size()) {
throw new IllegalArgumentException("There cannot be more empty collection arguments than all arguments! emptyCollectionArgumentsIndexes parameter has more items than arguments parameter. "
+ "(" + emptyCollectionArgumentsIndexes.size() + " > " + arguments.size() + ")");
} else {
final List<TypedExpression> coercedArgumentsTypesList = new ArrayList<>(arguments);
Pair<Optional<Method>, Optional<TypedExpression>> resolveMethodResult =
MethodResolutionUtils.resolveMethod(methodExpression, mvelCompilerContext, scope, coercedArgumentsTypesList);
if (resolveMethodResult.a.isPresent()) {
return resolveMethodResult;
} else {
// Rather work only with the argumentsType and when a method is resolved, flip the arguments list based on it.
// This needs to go through all possible combinations.
final int indexesListSize = emptyCollectionArgumentsIndexes.size();
for (int numberOfProcessedIndexes = 0; numberOfProcessedIndexes < indexesListSize; numberOfProcessedIndexes++) {
for (int indexOfEmptyListIndex = numberOfProcessedIndexes; indexOfEmptyListIndex < indexesListSize; indexOfEmptyListIndex++) {
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex));
resolveMethodResult =
MethodResolutionUtils.resolveMethod(methodExpression, mvelCompilerContext, scope, coercedArgumentsTypesList);
if (resolveMethodResult.a.isPresent()) {
modifyArgumentsBasedOnCoercedCollectionArguments(arguments, coercedArgumentsTypesList);
return resolveMethodResult;
}
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex));
}
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(numberOfProcessedIndexes));
}
// No method found, return empty.
return new Pair<>(Optional.empty(), scope);
}
}
} | @Test
public void resolveMethodWithEmptyCollectionArguments() {
final MethodCallExpr methodExpression = new MethodCallExpr("setAddresses", new ListCreationLiteralExpression(null, NodeList.nodeList()));
final List<TypedExpression> arguments = List.of(new ListExprT(new ListCreationLiteralExpression(null, NodeList.nodeList())));
final TypedExpression scope = new ObjectCreationExpressionT(arguments, Person.class);
final Pair<Optional<Method>, Optional<TypedExpression>> resolvedMethodResult =
MethodResolutionUtils.resolveMethodWithEmptyCollectionArguments(
methodExpression,
new MvelCompilerContext(null),
Optional.of(scope),
arguments,
List.of(0));
Assertions.assertThat(resolvedMethodResult.a).isPresent();
Assertions.assertThat(getTypedExpressionsClasses(arguments))
.containsExactlyElementsOf(List.of(ListExprT.class));
} |
public static Properties loadProps(String filename) throws IOException {
return loadProps(filename, null);
} | @Test
public void testLoadProps() throws IOException {
File tempFile = TestUtils.tempFile();
try {
String testContent = "a=1\nb=2\n#a comment\n\nc=3\nd=";
Files.write(tempFile.toPath(), testContent.getBytes());
Properties props = Utils.loadProps(tempFile.getPath());
assertEquals(4, props.size());
assertEquals("1", props.get("a"));
assertEquals("2", props.get("b"));
assertEquals("3", props.get("c"));
assertEquals("", props.get("d"));
Properties restrictedProps = Utils.loadProps(tempFile.getPath(), Arrays.asList("b", "d", "e"));
assertEquals(2, restrictedProps.size());
assertEquals("2", restrictedProps.get("b"));
assertEquals("", restrictedProps.get("d"));
} finally {
Files.deleteIfExists(tempFile.toPath());
}
} |
public Optional<Measure> toMeasure(@Nullable LiveMeasureDto measureDto, Metric metric) {
requireNonNull(metric);
if (measureDto == null) {
return Optional.empty();
}
Double value = measureDto.getValue();
String data = measureDto.getDataAsString();
switch (metric.getType().getValueType()) {
case INT:
return toIntegerMeasure(value, data);
case LONG:
return toLongMeasure(value, data);
case DOUBLE:
return toDoubleMeasure(value, data);
case BOOLEAN:
return toBooleanMeasure(value, data);
case STRING:
return toStringMeasure(data);
case LEVEL:
return toLevelMeasure(data);
case NO_VALUE:
return toNoValueMeasure();
default:
throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType());
}
} | @Test
public void toMeasure_returns_long_part_of_value_in_dto_for_Long_Metric() {
Optional<Measure> measure = underTest.toMeasure(new LiveMeasureDto().setValue(1.5d), SOME_LONG_METRIC);
assertThat(measure).isPresent();
assertThat(measure.get().getValueType()).isEqualTo(Measure.ValueType.LONG);
assertThat(measure.get().getLongValue()).isOne();
} |
public RewriteGroupedCode rewrite(String context) {
BlockStatementGrouperVisitor visitor =
new BlockStatementGrouperVisitor(maxMethodLength, parameters);
visitor.visitStatement(topStatement, context);
final Map<String, List<String>> groupStrings = visitor.rewrite(rewriter);
return new RewriteGroupedCode(rewriter.getText(), groupStrings);
} | @Test
public void testExtractIfInWhileGroups() {
String parameters = "a, b";
String givenBlock = readResource("groups/code/IfInWhile.txt");
String expectedBlock = readResource("groups/expected/IfInWhile.txt");
BlockStatementGrouper grouper = new BlockStatementGrouper(givenBlock, 10, parameters);
RewriteGroupedCode rewriteGroupedCode = grouper.rewrite("myFun");
String rewriteCode = rewriteGroupedCode.getRewriteCode();
Map<String, List<String>> groups = rewriteGroupedCode.getGroups();
// Trying to mitigate any indentation issues between all sort of platforms by simply
// trim every line of the "class". Before this change, code-splitter test could fail on
// Windows machines while passing on Unix.
assertThat(trimLines(rewriteCode)).isEqualTo(trimLines(expectedBlock));
assertThat(groups).hasSize(4);
List<String> group1 = groups.get("myFun_rewriteGroup0_1_rewriteGroup3");
assertThat(group1).hasSize(2);
assertThat(group1.get(0)).isEqualTo("myFun_whileBody0_0(a, b);");
assertThat(trimLines(group1.get(1)))
.isEqualTo(
trimLines(
""
+ " if (a[0] > 0) {\n"
+ " myFun_whileBody0_0_ifBody0(a, b);\n"
+ " } else {\n"
+ " myFun_whileBody0_0_ifBody1(a, b);\n"
+ " }"));
List<String> group2 = groups.get("myFun_rewriteGroup0_1_rewriteGroup5");
assertThat(group2).hasSize(3);
assertThat(group2.get(0)).isEqualTo("a[2] += b[2];");
assertThat(group2.get(1)).isEqualTo("b[3] += a[3];");
assertThat(trimLines(group2.get(2)))
.isEqualTo(
trimLines(
"if (a[0] > 0) {\n"
+ " System.out.println(\"Hello\");\n"
+ " } else {\n"
+ " System.out.println(\"World\");\n"
+ " }"));
List<String> group3 = groups.get("myFun_rewriteGroup6");
assertThat(group3).hasSize(3);
assertThat(group3.get(0)).isEqualTo("a[0] += b[1];");
assertThat(group3.get(1)).isEqualTo("b[1] += a[1];");
assertThat(trimLines(group3.get(2)))
.isEqualTo(
trimLines(
" while (counter > 0) {\n"
+ " myFun_rewriteGroup0_1_rewriteGroup3(a, b);\n"
+ " \n"
+ " myFun_rewriteGroup0_1_rewriteGroup5(a, b);\n"
+ " \n"
+ " counter--;\n"
+ "}"));
List<String> group4 = groups.get("myFun_rewriteGroup7");
assertThat(group4).containsExactly("a[4] += b[4];", "b[5] += a[5];");
} |
public ProjectList searchProjects(String gitlabUrl, String personalAccessToken, @Nullable String projectName,
@Nullable Integer pageNumber, @Nullable Integer pageSize) {
String url = format("%s/projects?archived=false&simple=true&membership=true&order_by=name&sort=asc&search=%s%s%s",
gitlabUrl,
projectName == null ? "" : urlEncode(projectName),
pageNumber == null ? "" : format("&page=%d", pageNumber),
pageSize == null ? "" : format("&per_page=%d", pageSize)
);
LOG.debug("get projects : [{}]", url);
Request request = new Request.Builder()
.addHeader(PRIVATE_TOKEN, personalAccessToken)
.url(url)
.get()
.build();
try (Response response = client.newCall(request).execute()) {
Headers headers = response.headers();
checkResponseIsSuccessful(response, "Could not get projects from GitLab instance");
List<Project> projectList = Project.parseJsonArray(response.body().string());
int returnedPageNumber = parseAndGetIntegerHeader(headers.get("X-Page"));
int returnedPageSize = parseAndGetIntegerHeader(headers.get("X-Per-Page"));
String xtotal = headers.get("X-Total");
Integer totalProjects = Strings.isEmpty(xtotal) ? null : parseAndGetIntegerHeader(xtotal);
return new ProjectList(projectList, returnedPageNumber, returnedPageSize, totalProjects);
} catch (JsonSyntaxException e) {
throw new IllegalArgumentException("Could not parse GitLab answer to search projects. Got a non-json payload as result.");
} catch (IOException e) {
logException(url, e);
throw new IllegalStateException(e.getMessage(), e);
}
} | @Test
public void should_throw_IllegalArgumentException_when_invalide_json_in_401_response() {
MockResponse response = new MockResponse()
.setResponseCode(401)
.setBody("error in pat");
server.enqueue(response);
assertThatThrownBy(() -> underTest.searchProjects(gitlabUrl, "pat", "example", 1, 2))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Invalid personal access token");
} |
@Override
public void cancel(ExecutionAttemptID executionAttemptId) {
cancelLogicalSlotRequest(executionAttemptId.getExecutionVertexId(), null);
} | @Test
void testLogicalSlotCancelsPhysicalSlotRequestAndRemovesSharedSlot() throws Exception {
// physical slot request is not completed and does not complete logical requests
testLogicalSlotRequestCancellationOrRelease(
true,
true,
(context, assignment) -> {
context.getAllocator().cancel(assignment.getExecutionAttemptId());
assertThatThrownBy(
() -> {
context.getAllocator()
.cancel(assignment.getExecutionAttemptId());
assignment.getLogicalSlotFuture().get();
})
.as("The logical future must finish with the cancellation exception.")
.hasCauseInstanceOf(CancellationException.class);
});
} |
public static String encode(String plain) {
Preconditions.checkNotNull(plain, "Cannot encode null object");
String encoded;
try {
encoded = URLEncoder.encode(plain, CHARSET);
} catch (UnsupportedEncodingException uee) {
throw new OAuthException("Charset not found while encoding string: " + CHARSET, uee);
}
for (Map.Entry<String, String> rule : ENCODING_RULES.entrySet()) {
encoded = applyRule(encoded, rule.getKey(), rule.getValue());
}
return encoded;
} | @Test
public void shouldPercentEncodeString() {
final String plain = "this is a test &^";
final String encoded = "this%20is%20a%20test%20%26%5E";
assertEquals(encoded, OAuthEncoder.encode(plain));
} |
public RemotingChannel removeConsumerChannel(ProxyContext ctx, String group, Channel channel) {
return removeChannel(buildConsumerKey(group), channel);
} | @Test
public void testRemoveConsumerChannel() {
String group = "group";
String clientId = RandomStringUtils.randomAlphabetic(10);
{
Channel consumerChannel = createMockChannel();
RemotingChannel consumerRemotingChannel = this.remotingChannelManager.createConsumerChannel(ctx, consumerChannel, group, clientId, new HashSet<>());
assertSame(consumerRemotingChannel, this.remotingChannelManager.removeConsumerChannel(ctx, group, consumerRemotingChannel));
assertTrue(this.remotingChannelManager.groupChannelMap.isEmpty());
}
{
Channel consumerChannel = createMockChannel();
RemotingChannel consumerRemotingChannel = this.remotingChannelManager.createConsumerChannel(ctx, consumerChannel, group, clientId, new HashSet<>());
assertSame(consumerRemotingChannel, this.remotingChannelManager.removeConsumerChannel(ctx, group, consumerChannel));
assertTrue(this.remotingChannelManager.groupChannelMap.isEmpty());
}
} |
public void check(@NotNull Set<Long> partitionIds, long currentTimeMs)
throws CommitRateExceededException, CommitFailedException {
Preconditions.checkNotNull(partitionIds, "partitionIds is null");
// Does not limit the commit rate of compaction transactions
if (transactionState.getSourceType() == TransactionState.LoadJobSourceType.LAKE_COMPACTION) {
return;
}
updateWriteDuration(transactionState);
setAllowCommitTimeOnce(partitionIds);
long txnId = transactionState.getTransactionId();
long abortTime = transactionState.getPrepareTime() + transactionState.getTimeoutMs();
if (transactionState.getAllowCommitTimeMs() >= abortTime) {
throw new CommitFailedException("Txn " + txnId + " timed out due to ingestion slowdown", txnId);
}
if (transactionState.getAllowCommitTimeMs() > currentTimeMs) {
LOG.info("delay commit of txn {} for {}ms, write took {}ms", transactionState.getTransactionId(),
transactionState.getAllowCommitTimeMs() - currentTimeMs,
transactionState.getWriteDurationMs());
throw new CommitRateExceededException(txnId, transactionState.getAllowCommitTimeMs());
}
long upperBound = compactionScoreUpperBound();
if (upperBound > 0 && anyCompactionScoreExceedsUpperBound(partitionIds, upperBound)) {
throw new CommitRateExceededException(txnId, currentTimeMs + 1000/* delay 1s */);
}
} | @Test
public void testCompactionTxn() throws CommitRateExceededException {
long partitionId = 54321;
long currentTimeMs = System.currentTimeMillis();
Set<Long> partitions = new HashSet<>(Collections.singletonList(partitionId));
transactionState = new TransactionState(dbId, Lists.newArrayList(tableId), 123456L, "label", null,
TransactionState.LoadJobSourceType.LAKE_COMPACTION, null, 0, timeoutMs);
transactionState.setPrepareTime(currentTimeMs - 100);
transactionState.setWriteEndTimeMs(currentTimeMs);
limiter = new CommitRateLimiter(compactionMgr, transactionState, tableId);
Assert.assertTrue(ratio > 0.01);
Assert.assertTrue(threshold > 0);
compactionMgr.handleLoadingFinished(new PartitionIdentifier(dbId, tableId, partitionId), 3, currentTimeMs,
Quantiles.compute(Lists.newArrayList(threshold + 100)));
limiter.check(partitions, currentTimeMs);
} |
@Override
public Expression resolveSelect(final int idx, final Expression expression) {
final Expression resolved = columnMappings.get(idx);
return resolved == null ? expression : resolved;
} | @Test
public void shouldResolveUdtfSelectExpressionToInternalName() {
// Given:
final Expression exp = mock(Expression.class);
// When:
final Expression result = flatMapNode.resolveSelect(2, exp);
// Then:
assertThat(result, is(new UnqualifiedColumnReferenceExp(ColumnName.of("KSQL_SYNTH_0"))));
} |
@Override
public void start() {
if (isDisabled()) {
LOG.debug(MESSAGE_SCM_STEP_IS_DISABLED_BY_CONFIGURATION);
return;
}
if (settings.hasKey(SCM_PROVIDER_KEY)) {
settings.get(SCM_PROVIDER_KEY).ifPresent(this::setProviderIfSupported);
} else {
autodetection();
if (this.provider == null) {
considerOldScmUrl();
}
if (this.provider == null) {
String message = "SCM provider autodetection failed. Please use \"" + SCM_PROVIDER_KEY + "\" to define SCM of " +
"your project, or disable the SCM Sensor in the project settings.";
LOG.warn(message);
analysisWarnings.addUnique(message);
}
}
if (isExclusionDisabled()) {
LOG.info(MESSAGE_SCM_EXCLUSIONS_IS_DISABLED_BY_CONFIGURATION);
}
} | @Test
void log_when_exclusion_is_disabled() {
when(settings.getBoolean(CoreProperties.SCM_EXCLUSIONS_DISABLED_KEY)).thenReturn(Optional.of(true));
underTest.start();
assertThat(logTester.logs()).contains(MESSAGE_SCM_EXCLUSIONS_IS_DISABLED_BY_CONFIGURATION);
} |
@Override
public void run() {
try {
final Set<String> distinctRecurringJobSignatures = getDistinctRecurringJobSignaturesThatDoNotExistAnymore();
final Set<String> distinctScheduledJobSignatures = getDistinctScheduledJobSignaturesThatDoNotExistAnymore();
Set<String> jobsThatCannotBeFound = asSet(distinctRecurringJobSignatures, distinctScheduledJobSignatures);
if (!distinctRecurringJobSignatures.isEmpty() || !distinctScheduledJobSignatures.isEmpty()) {
String jobStateThatIsNotFound = jobTypeNotFoundLabel(distinctRecurringJobSignatures, distinctScheduledJobSignatures);
LOGGER.warn("JobRunr found {} jobs that do not exist anymore in your code. These jobs will fail with a JobNotFoundException (due to a ClassNotFoundException or a MethodNotFoundException)." +
"\n\tBelow you can find the method signatures of the jobs that cannot be found anymore: {}",
jobStateThatIsNotFound,
jobsThatCannotBeFound.stream().map(sign -> "\n\t" + sign + ",").collect(Collectors.joining())
);
}
} catch (Exception e) {
LOGGER.error("Unexpected exception running `CheckIfAllJobsExistTask`", shouldNotHappenException(e));
}
} | @Test
void onRunItLogsAllScheduledAndRecurringJobsThatDoNotExist() {
when(storageProvider.getRecurringJobs()).thenReturn(new RecurringJobsResult(asList(
aDefaultRecurringJob().build(),
aDefaultRecurringJob().withJobDetails(classThatDoesNotExistJobDetails()).build()
)));
when(storageProvider.getDistinctJobSignatures(SCHEDULED)).thenReturn(Set.of(
getJobSignature(defaultJobDetails().build()),
getJobSignature(methodThatDoesNotExistJobDetails().build())
));
checkIfAllJobsExistTask.run();
assertThat(logger)
.hasWarningMessageContaining("JobRunr found RECURRING AND SCHEDULED jobs that do not exist anymore in your code.")
.hasWarningMessageContaining("i.dont.exist.Class.notImportant(java.lang.Integer)")
.hasWarningMessageContaining("org.jobrunr.stubs.TestService.doWorkThatDoesNotExist(java.lang.Integer)")
.hasNoErrorLogMessages();
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
AbstractBaseReplicatedRecordStore that = (AbstractBaseReplicatedRecordStore) o;
if (!Objects.equals(name, that.name)) {
return false;
}
return storageRef.get().equals(that.storageRef.get());
} | @Test
public void testEquals() {
assertEquals(recordStore, recordStore);
assertEquals(recordStoreSameAttributes, recordStore);
assertNotEquals(null, recordStore);
assertNotEquals(new Object(), recordStore);
assertNotEquals(recordStoreOtherStorage, recordStore);
assertNotEquals(recordStoreOtherName, recordStore);
} |
public static <C> AsyncBuilder<C> builder() {
return new AsyncBuilder<>();
} | @Test
void ensureRetryerClonesItself() throws Throwable {
server.enqueue(new MockResponse().setResponseCode(503).setBody("foo 1"));
server.enqueue(new MockResponse().setResponseCode(200).setBody("foo 2"));
server.enqueue(new MockResponse().setResponseCode(503).setBody("foo 3"));
server.enqueue(new MockResponse().setResponseCode(200).setBody("foo 4"));
MockRetryer retryer = new MockRetryer();
TestInterfaceAsync api = AsyncFeign.builder().retryer(retryer)
.errorDecoder(
(methodKey, response) -> new RetryableException(response.status(), "play it again sam!",
HttpMethod.POST, NON_RETRYABLE, response.request()))
.target(TestInterfaceAsync.class, "http://localhost:" + server.getPort());
unwrap(api.post());
unwrap(api.post()); // if retryer instance was reused, this statement will throw an exception
assertThat(server.getRequestCount()).isEqualTo(4);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.