focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public <T extends Tuple> DataSource<T> tupleType(Class<T> targetType) {
Preconditions.checkNotNull(targetType, "The target type class must not be null.");
if (!Tuple.class.isAssignableFrom(targetType)) {
throw new IllegalArgumentException(
"The target type must be a subclass of " + Tuple.class.getName());
}
@SuppressWarnings("unchecked")
TupleTypeInfo<T> typeInfo = (TupleTypeInfo<T>) TypeExtractor.createTypeInfo(targetType);
CsvInputFormat<T> inputFormat =
new TupleCsvInputFormat<T>(
path, this.lineDelimiter, this.fieldDelimiter, typeInfo, this.includedMask);
Class<?>[] classes = new Class<?>[typeInfo.getArity()];
for (int i = 0; i < typeInfo.getArity(); i++) {
classes[i] = typeInfo.getTypeAt(i).getTypeClass();
}
configureInputFormat(inputFormat);
return new DataSource<T>(
executionContext, inputFormat, typeInfo, Utils.getCallLocationName());
} | @Test
void testUnsupportedPartialitem() {
CsvReader reader = getCsvReader();
assertThatThrownBy(() -> reader.tupleType(PartialItem.class))
.withFailMessage("tupleType() accepted an underspecified generic class.")
.isInstanceOf(Exception.class);
} |
public RemotingParser isRemoting(Object bean, String beanName) {
for (RemotingParser remotingParser : allRemotingParsers) {
if (remotingParser.isRemoting(bean, beanName)) {
return remotingParser;
}
}
return null;
} | @Test
public void testIsRemoting() {
SimpleRemoteBean remoteBean = new SimpleRemoteBean();
RemotingParser parser = remotingParser.isRemoting(remoteBean, remoteBean.getClass().getName());
assertInstanceOf(SimpleRemotingParser.class, parser);
} |
@Override
public int read() throws IOException {
if (mPosition == mLength) { // at end of file
return -1;
}
updateStreamIfNeeded();
int res = mUfsInStream.get().read();
if (res == -1) {
return -1;
}
mPosition++;
Metrics.BYTES_READ_FROM_UFS.inc(1);
return res;
} | @Test
public void manyBytesReadByteBuffer() throws IOException, AlluxioException {
AlluxioURI ufsPath = getUfsPath();
createFile(ufsPath, CHUNK_SIZE);
ByteBuffer buffer = ByteBuffer.allocate(CHUNK_SIZE);
try (FileInStream inStream = getStream(ufsPath)) {
assertEquals(CHUNK_SIZE, inStream.read(buffer));
assertTrue(BufferUtils.equalIncreasingByteBuffer(0, CHUNK_SIZE, buffer));
}
} |
@Override
public KStream<K, V> merge(final KStream<K, V> stream) {
return merge(stream, NamedInternal.empty());
} | @Test
public void shouldNotAllowNullKStreamOnMerge() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.merge(null));
assertThat(exception.getMessage(), equalTo("stream can't be null"));
} |
public static InputStream getResourceAsStream(String resource) throws IOException {
ClassLoader loader = ResourceUtils.class.getClassLoader();
return getResourceAsStream(loader, resource);
} | @Test
void testGetResourceAsStreamForClasspathFromSystem() throws IOException {
try (InputStream inputStream = ResourceUtils.getResourceAsStream(null, "test-tls-cert.pem")) {
assertNotNull(inputStream);
}
} |
@Override
@SneakyThrows
public String createFile(String name, String path, byte[] content) {
// 计算默认的 path 名
String type = FileTypeUtils.getMineType(content, name);
if (StrUtil.isEmpty(path)) {
path = FileUtils.generatePath(content, name);
}
// 如果 name 为空,则使用 path 填充
if (StrUtil.isEmpty(name)) {
name = path;
}
// 上传到文件存储器
FileClient client = fileConfigService.getMasterFileClient();
Assert.notNull(client, "客户端(master) 不能为空");
String url = client.upload(content, path, type);
// 保存到数据库
FileDO file = new FileDO();
file.setConfigId(client.getId());
file.setName(name);
file.setPath(path);
file.setUrl(url);
file.setType(type);
file.setSize(content.length);
fileMapper.insert(file);
return url;
} | @Test
public void testCreateFile_success() throws Exception {
// 准备参数
String path = randomString();
byte[] content = ResourceUtil.readBytes("file/erweima.jpg");
// mock Master 文件客户端
FileClient client = mock(FileClient.class);
when(fileConfigService.getMasterFileClient()).thenReturn(client);
String url = randomString();
when(client.upload(same(content), same(path), eq("image/jpeg"))).thenReturn(url);
when(client.getId()).thenReturn(10L);
String name = "单测文件名";
// 调用
String result = fileService.createFile(name, path, content);
// 断言
assertEquals(result, url);
// 校验数据
FileDO file = fileMapper.selectOne(FileDO::getPath, path);
assertEquals(10L, file.getConfigId());
assertEquals(path, file.getPath());
assertEquals(url, file.getUrl());
assertEquals("image/jpeg", file.getType());
assertEquals(content.length, file.getSize());
} |
public boolean hasTimeLeft() {
return clock.instant().isBefore(endTime);
} | @Test
public void testHasTimeLeft() {
ManualClock clock = new ManualClock();
TimeoutBudget budget = new TimeoutBudget(clock, Duration.ofMillis(7));
assertThat(budget.hasTimeLeft(), is(true));
clock.advance(Duration.ofMillis(1));
assertThat(budget.hasTimeLeft(), is(true));
clock.advance(Duration.ofMillis(5));
assertThat(budget.hasTimeLeft(), is(true));
assertThat(budget.hasTimeLeft(), is(true));
clock.advance(Duration.ofMillis(1));
assertThat(budget.hasTimeLeft(), is(false));
clock.advance(Duration.ofMillis(5));
assertThat(budget.hasTimeLeft(), is(false));
clock.advance(Duration.ofMillis(1));
assertThat(budget.timesUsed(), is("[total: 13 ms]"));
} |
Map<String, Object> sourceAdminConfig(String role) {
Map<String, Object> props = new HashMap<>();
props.putAll(originalsWithPrefix(SOURCE_CLUSTER_PREFIX));
props.keySet().retainAll(MirrorClientConfig.CLIENT_CONFIG_DEF.names());
props.putAll(originalsWithPrefix(ADMIN_CLIENT_PREFIX));
props.putAll(originalsWithPrefix(SOURCE_PREFIX + ADMIN_CLIENT_PREFIX));
addClientId(props, role);
return props;
} | @Test
public void testSourceAdminConfig() {
Map<String, String> connectorProps = makeProps(
MirrorConnectorConfig.ADMIN_CLIENT_PREFIX +
"connections.max.idle.ms", "10000"
);
MirrorConnectorConfig config = new TestMirrorConnectorConfig(connectorProps);
Map<String, Object> connectorAdminProps = config.sourceAdminConfig("test");
Map<String, Object> expectedAdminProps = new HashMap<>();
expectedAdminProps.put("connections.max.idle.ms", "10000");
expectedAdminProps.put("client.id", "source1->target2|ConnectorName|test");
assertEquals(expectedAdminProps, connectorAdminProps,
MirrorConnectorConfig.ADMIN_CLIENT_PREFIX + " source connector admin props not matching");
} |
@SuppressWarnings("FutureReturnValueIgnored")
public void start() {
running.set(true);
configFetcher.start();
memoryMonitor.start();
streamingWorkerHarness.start();
sampler.start();
workerStatusReporter.start();
activeWorkRefresher.start();
} | @Test
public void testKeyTokenInvalidException() throws Exception {
if (streamingEngine) {
// TODO: This test needs to be adapted to work with streamingEngine=true.
return;
}
KvCoder<String, String> kvCoder = KvCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of());
List<ParallelInstruction> instructions =
Arrays.asList(
makeSourceInstruction(kvCoder),
makeDoFnInstruction(new KeyTokenInvalidFn(), 0, kvCoder),
makeSinkInstruction(kvCoder, 1));
server
.whenGetWorkCalled()
.thenReturn(makeInput(0, 0, DEFAULT_KEY_STRING, DEFAULT_SHARDING_KEY));
StreamingDataflowWorker worker =
makeWorker(defaultWorkerParams().setInstructions(instructions).publishCounters().build());
worker.start();
server.waitForEmptyWorkQueue();
server
.whenGetWorkCalled()
.thenReturn(makeInput(1, 0, DEFAULT_KEY_STRING, DEFAULT_SHARDING_KEY));
Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1);
assertEquals(
makeExpectedOutput(1, 0, DEFAULT_KEY_STRING, DEFAULT_SHARDING_KEY, DEFAULT_KEY_STRING)
.build(),
removeDynamicFields(result.get(1L)));
assertEquals(1, result.size());
} |
void forwardToStateService(DeviceStateServiceMsgProto deviceStateServiceMsg, TbCallback callback) {
if (statsEnabled) {
stats.log(deviceStateServiceMsg);
}
stateService.onQueueMsg(deviceStateServiceMsg, callback);
} | @Test
public void givenProcessingFailure_whenForwardingInactivityMsgToStateService_thenOnFailureCallbackIsCalled() {
// GIVEN
var inactivityMsg = TransportProtos.DeviceInactivityProto.newBuilder()
.setTenantIdMSB(tenantId.getId().getMostSignificantBits())
.setTenantIdLSB(tenantId.getId().getLeastSignificantBits())
.setDeviceIdMSB(deviceId.getId().getMostSignificantBits())
.setDeviceIdLSB(deviceId.getId().getLeastSignificantBits())
.setLastInactivityTime(time)
.build();
doCallRealMethod().when(defaultTbCoreConsumerServiceMock).forwardToStateService(inactivityMsg, tbCallbackMock);
var runtimeException = new RuntimeException("Something bad happened!");
doThrow(runtimeException).when(stateServiceMock).onDeviceInactivity(tenantId, deviceId, time);
// WHEN
defaultTbCoreConsumerServiceMock.forwardToStateService(inactivityMsg, tbCallbackMock);
// THEN
then(tbCallbackMock).should(never()).onSuccess();
then(tbCallbackMock).should().onFailure(runtimeException);
} |
public static Getter newFieldGetter(Object object, Getter parent, Field field, String modifier) throws Exception {
return newGetter(object, parent, modifier, field.getType(), field::get,
(t, et) -> new FieldGetter(parent, field, modifier, t, et));
} | @Test
public void newFieldGetter_whenExtractingFromNonEmpty_Collection_nullFirst_FieldAndParentIsNonEmptyMultiResult_thenInferReturnType()
throws Exception {
OuterObject object = new OuterObject("name", null, new InnerObject("inner", 0, 1, 2, 3));
Getter parentGetter = GetterFactory.newFieldGetter(object, null, innersCollectionField, "[any]");
Getter innerObjectNameGetter
= GetterFactory.newFieldGetter(object, parentGetter, innerAttributesCollectionField, "[any]");
Class<?> returnType = innerObjectNameGetter.getReturnType();
assertEquals(Integer.class, returnType);
} |
@Override
public KeyValueStore<K, V> build() {
return new MeteredKeyValueStore<>(
maybeWrapCaching(maybeWrapLogging(storeSupplier.get())),
storeSupplier.metricsScope(),
time,
keySerde,
valueSerde);
} | @Test
public void shouldHaveChangeLoggingStoreWhenLoggingEnabled() {
setUp();
final KeyValueStore<String, String> store = builder
.withLoggingEnabled(Collections.emptyMap())
.build();
final StateStore wrapped = ((WrappedStateStore) store).wrapped();
assertThat(store, instanceOf(MeteredKeyValueStore.class));
assertThat(wrapped, instanceOf(ChangeLoggingKeyValueBytesStore.class));
assertThat(((WrappedStateStore) wrapped).wrapped(), CoreMatchers.equalTo(inner));
} |
@Override
public List<QueuedCommand> getNewCommands(final Duration timeout) {
completeSatisfiedSequenceNumberFutures();
final List<QueuedCommand> commands = Lists.newArrayList();
final Iterable<ConsumerRecord<byte[], byte[]>> records = commandTopic.getNewCommands(timeout);
for (ConsumerRecord<byte[], byte[]> record: records) {
if (record.value() != null) {
Optional<CommandStatusFuture> commandStatusFuture = Optional.empty();
try {
final CommandId commandId =
commandIdDeserializer.deserialize(commandTopicName, record.key());
commandStatusFuture = Optional.ofNullable(commandStatusMap.remove(commandId));
} catch (Exception e) {
LOG.warn(
"Error while attempting to fetch from commandStatusMap for key {}",
record.key(),
e);
}
commands.add(new QueuedCommand(
record.key(),
record.value(),
commandStatusFuture,
record.offset()));
}
}
return commands;
} | @Test
public void shouldFilterNullCommands() {
// Given:
final ConsumerRecords<byte[], byte[]> records = buildRecords(
commandId, null,
commandId, command);
final Deserializer<Command> commandDeserializer = mock(Deserializer.class);
when(commandDeserializer.deserialize(any(), any())).thenReturn(command);
when(commandTopic.getNewCommands(any())).thenReturn(records);
// When:
final List<QueuedCommand> commands =
commandStore.getNewCommands(NEW_CMDS_TIMEOUT);
// Then:
assertThat(commands, hasSize(1));
assertThat(
commands.get(0).getAndDeserializeCommandId(),
equalTo(commandId));
assertThat(
commands.get(0).getAndDeserializeCommand(commandDeserializer),
equalTo(command));
} |
@Override
public Flux<BooleanResponse<RenameCommand>> rename(Publisher<RenameCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getNewName(), "New name must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
byte[] newKeyBuf = toByteArray(command.getNewName());
if (executorService.getConnectionManager().calcSlot(keyBuf) == executorService.getConnectionManager().calcSlot(newKeyBuf)) {
return super.rename(commands);
}
return read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, keyBuf)
.filter(Objects::nonNull)
.zipWith(
Mono.defer(() -> pTtl(command.getKey())
.filter(Objects::nonNull)
.map(ttl -> Math.max(0, ttl))
.switchIfEmpty(Mono.just(0L))
)
)
.flatMap(valueAndTtl -> {
return write(newKeyBuf, StringCodec.INSTANCE, RedisCommands.RESTORE, newKeyBuf, valueAndTtl.getT2(), valueAndTtl.getT1());
})
.thenReturn(new BooleanResponse<>(command, true))
.doOnSuccess((ignored) -> del(command.getKey()));
});
} | @Test
public void testRename_keyNotExist() {
Integer originalSlot = getSlotForKey(originalKey);
newKey = getNewKeyForSlot(new String(originalKey.array()), getTargetSlot(originalSlot));
if (sameSlot) {
// This is a quirk of the implementation - since same-slot renames use the non-cluster version,
// the result is a Redis error. This behavior matches other spring-data-redis implementations
assertThatThrownBy(() -> connection.keyCommands().rename(originalKey, newKey).block())
.isInstanceOf(RedisSystemException.class);
} else {
Boolean response = connection.keyCommands().rename(originalKey, newKey).block();
assertThat(response).isTrue();
final ByteBuffer newKeyValue = connection.stringCommands().get(newKey).block();
assertThat(newKeyValue).isEqualTo(null);
}
} |
@Override
public <T> Stream<T> children(String key, Function<Map<String, Object>, T> childConstructor) {
return Stream.ofNullable(get(key))
.filter(Objects::nonNull)
.map(el -> (List<Map<String, Object>>) el)
.findAny()
.stream()
.flatMap(Collection::stream)
.map(childConstructor);
} | @Test
void shouldRetrieveEmptyStreamForNonExistingChildren() {
var children = document.children(KEY, DocumentImplementation::new);
assertNotNull(children);
assertEquals(0, children.count());
} |
public double[] colMeans() {
double[] x = new double[n];
for (int j = 0; j < n; j++) {
for (int i = 0; i < m; i++) {
x[j] += get(i, j);
}
x[j] /= m;
}
return x;
} | @Test
public void testColMeans() {
System.out.println("colMeans");
double[][] A = {
{ 0.7220180, 0.07121225, 0.6881997f},
{-0.2648886, -0.89044952, 0.3700456f},
{-0.6391588, 0.44947578, 0.6240573f}
};
double[] r = {-0.06067647, -0.12325383, 0.56076753f};
double[] result = BigMatrix.of(A).colMeans();
for (int i = 0; i < r.length; i++) {
assertEquals(result[i], r[i], 1E-7);
}
} |
@Override
public void readLine(String line) {
if (line.startsWith("%") || line.isEmpty()) {
return;
}
if(line.startsWith("descr:") && this.organization == null) {
this.organization = lineValue(line);
}
if(line.startsWith("country:") && this.countryCode == null) {
this.countryCode = lineValue(line);
}
} | @Test
public void testRunDirectMatch() throws Exception {
APNICResponseParser parser = new APNICResponseParser();
for (String line : MATCH.split("\n")) {
parser.readLine(line);
}
assertEquals("SG", parser.getCountryCode());
assertEquals("SIMPLE SOLUTION SYSTEMS PTE LTD", parser.getOrganization());
} |
private boolean detectCharset(byte[] buf) throws IOException {
ByteCharsetDetector detector = new ByteCharsetDetector(new CharsetValidation(), userEncoding);
ByteOrderMark bom = detector.detectBOM(buf);
if (bom != null) {
detectedCharset = Charset.forName(bom.getCharsetName());
stream.skip(bom.length());
return true;
}
detectedCharset = detector.detect(buf);
return detectedCharset != null;
} | @Test
public void always_try_utf8() throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
// this is a valid 2 byte UTF-8.
out.write(194);
out.write(128);
Path filePath = temp.newFile().toPath();
Files.write(filePath, out.toByteArray());
assertThat(detectCharset(filePath, UTF_16)).isEqualTo(UTF_8);
} |
@SuppressWarnings("unchecked")
@Override
public <S extends StateStore> S getStateStore(final String name) {
final StateStore store = stateManager.getGlobalStore(name);
return (S) getReadWriteStore(store);
} | @Test
public void shouldNotAllowInitForTimestampedKeyValueStore() {
when(stateManager.getGlobalStore(GLOBAL_TIMESTAMPED_KEY_VALUE_STORE_NAME)).thenReturn(mock(TimestampedKeyValueStore.class));
final StateStore store = globalContext.getStateStore(GLOBAL_TIMESTAMPED_KEY_VALUE_STORE_NAME);
try {
store.init((StateStoreContext) null, null);
fail("Should have thrown UnsupportedOperationException.");
} catch (final UnsupportedOperationException expected) { }
} |
public static void copy(int[] src, long[] dest, int length) {
for (int i = 0; i < length; i++) {
dest[i] = src[i];
}
} | @Test
public void testCopyFromIntArray() {
ArrayCopyUtils.copy(INT_ARRAY, LONG_BUFFER, COPY_LENGTH);
ArrayCopyUtils.copy(INT_ARRAY, FLOAT_BUFFER, COPY_LENGTH);
ArrayCopyUtils.copy(INT_ARRAY, DOUBLE_BUFFER, COPY_LENGTH);
ArrayCopyUtils.copy(INT_ARRAY, STRING_BUFFER, COPY_LENGTH);
for (int i = 0; i < COPY_LENGTH; i++) {
Assert.assertEquals(LONG_BUFFER[i], (long) INT_ARRAY[i]);
Assert.assertEquals(FLOAT_BUFFER[i], (float) INT_ARRAY[i]);
Assert.assertEquals(DOUBLE_BUFFER[i], (double) INT_ARRAY[i]);
Assert.assertEquals(STRING_BUFFER[i], Integer.toString(INT_ARRAY[i]));
}
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
NodeInfo nodeInfo = (NodeInfo) o;
return name.equals(nodeInfo.name);
} | @Test
public void test_equals_and_hashCode() {
NodeInfo foo = new NodeInfo("foo");
NodeInfo bar = new NodeInfo("bar");
NodeInfo bar2 = new NodeInfo("bar");
assertThat(foo.equals(foo)).isTrue();
assertThat(foo.equals(bar)).isFalse();
assertThat(bar.equals(bar2)).isTrue();
assertThat(bar)
.hasSameHashCodeAs(bar)
.hasSameHashCodeAs(bar2);
} |
public static PartitionKey createPartitionKey(List<String> values, List<Column> columns) throws AnalysisException {
return createPartitionKey(values, columns, Table.TableType.HIVE);
} | @Test
public void testCreateDeltaLakePartitionKey() throws AnalysisException {
PartitionKey partitionKey = createPartitionKey(
Lists.newArrayList("1", "a", "3.0", DeltaLakeTable.PARTITION_NULL_VALUE), partColumns,
Table.TableType.DELTALAKE);
Assert.assertEquals("(\"1\", \"a\", \"3.0\", \"NULL\")", partitionKey.toSql());
} |
@Timed
@Path("/{destination}")
@PUT
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@ManagedAsync
@Operation(
summary = "Send a message",
description = """
Deliver a message to a single recipient. May be authenticated or unauthenticated; if unauthenticated,
an unidentifed-access key or group-send endorsement token must be provided, unless the message is a story.
""")
@ApiResponse(responseCode="200", description="Message was successfully sent", useReturnTypeSchema=true)
@ApiResponse(
responseCode="401",
description="The message is not a story and the authorization, unauthorized access key, or group send endorsement token is missing or incorrect")
@ApiResponse(
responseCode="404",
description="The message is not a story and some the recipient service ID does not correspond to a registered Signal user")
@ApiResponse(
responseCode = "409", description = "Incorrect set of devices supplied for recipient",
content = @Content(schema = @Schema(implementation = AccountMismatchedDevices[].class)))
@ApiResponse(
responseCode = "410", description = "Mismatched registration ids supplied for some recipient devices",
content = @Content(schema = @Schema(implementation = AccountStaleDevices[].class)))
public Response sendMessage(@ReadOnly @Auth Optional<AuthenticatedDevice> source,
@Parameter(description="The recipient's unidentified access key")
@HeaderParam(HeaderUtils.UNIDENTIFIED_ACCESS_KEY) Optional<Anonymous> accessKey,
@Parameter(description="A group send endorsement token covering the recipient. Must not be combined with `Unidentified-Access-Key` or set on a story message.")
@HeaderParam(HeaderUtils.GROUP_SEND_TOKEN)
@Nullable GroupSendTokenHeader groupSendToken,
@HeaderParam(HttpHeaders.USER_AGENT) String userAgent,
@Parameter(description="If true, deliver the message only to recipients that are online when it is sent")
@PathParam("destination") ServiceIdentifier destinationIdentifier,
@Parameter(description="If true, the message is a story; access tokens are not checked and sending to nonexistent recipients is permitted")
@QueryParam("story") boolean isStory,
@Parameter(description="The encrypted message payloads for each recipient device")
@NotNull @Valid IncomingMessageList messages,
@Context ContainerRequestContext context) throws RateLimitExceededException {
final Sample sample = Timer.start();
try {
if (source.isEmpty() && accessKey.isEmpty() && groupSendToken == null && !isStory) {
throw new WebApplicationException(Response.Status.UNAUTHORIZED);
}
if (groupSendToken != null) {
if (!source.isEmpty() || !accessKey.isEmpty()) {
throw new BadRequestException("Group send endorsement tokens should not be combined with other authentication");
} else if (isStory) {
throw new BadRequestException("Group send endorsement tokens should not be sent for story messages");
}
}
final String senderType;
if (source.isPresent()) {
if (source.get().getAccount().isIdentifiedBy(destinationIdentifier)) {
senderType = SENDER_TYPE_SELF;
} else {
senderType = SENDER_TYPE_IDENTIFIED;
}
} else {
senderType = SENDER_TYPE_UNIDENTIFIED;
}
boolean isSyncMessage = source.isPresent() && source.get().getAccount().isIdentifiedBy(destinationIdentifier);
if (isSyncMessage && destinationIdentifier.identityType() == IdentityType.PNI) {
throw new WebApplicationException(Status.FORBIDDEN);
}
Optional<Account> destination;
if (!isSyncMessage) {
destination = accountsManager.getByServiceIdentifier(destinationIdentifier);
} else {
destination = source.map(AuthenticatedDevice::getAccount);
}
final Optional<Response> spamCheck = spamChecker.checkForSpam(
context, source.map(AuthenticatedDevice::getAccount), destination);
if (spamCheck.isPresent()) {
return spamCheck.get();
}
final Optional<byte[]> spamReportToken = switch (senderType) {
case SENDER_TYPE_IDENTIFIED ->
reportSpamTokenProvider.makeReportSpamToken(context, source.get(), destination);
default -> Optional.empty();
};
int totalContentLength = 0;
for (final IncomingMessage message : messages.messages()) {
int contentLength = 0;
if (StringUtils.isNotEmpty(message.content())) {
contentLength += message.content().length();
}
validateContentLength(contentLength, false, userAgent);
validateEnvelopeType(message.type(), userAgent);
totalContentLength += contentLength;
}
try {
rateLimiters.getInboundMessageBytes().validate(destinationIdentifier.uuid(), totalContentLength);
} catch (final RateLimitExceededException e) {
if (dynamicConfigurationManager.getConfiguration().getInboundMessageByteLimitConfiguration().enforceInboundLimit()) {
messageByteLimitEstimator.add(destinationIdentifier.uuid().toString());
throw e;
}
}
try {
if (isStory) {
// Stories will be checked by the client; we bypass access checks here for stories.
} else if (groupSendToken != null) {
checkGroupSendToken(List.of(destinationIdentifier.toLibsignal()), groupSendToken);
if (destination.isEmpty()) {
throw new NotFoundException();
}
} else {
OptionalAccess.verify(source.map(AuthenticatedDevice::getAccount), accessKey, destination,
destinationIdentifier);
}
boolean needsSync = !isSyncMessage && source.isPresent() && source.get().getAccount().getDevices().size() > 1;
// We return 200 when stories are sent to a non-existent account. Since story sends bypass OptionalAccess.verify
// we leak information about whether a destination UUID exists if we return any other code (e.g. 404) from
// these requests.
if (isStory && destination.isEmpty()) {
return Response.ok(new SendMessageResponse(needsSync)).build();
}
// if destination is empty we would either throw an exception in OptionalAccess.verify when isStory is false
// or else return a 200 response when isStory is true.
assert destination.isPresent();
if (source.isPresent() && !isSyncMessage) {
checkMessageRateLimit(source.get(), destination.get(), userAgent);
}
if (isStory) {
rateLimiters.getStoriesLimiter().validate(destination.get().getUuid());
}
final Set<Byte> excludedDeviceIds;
if (isSyncMessage) {
excludedDeviceIds = Set.of(source.get().getAuthenticatedDevice().getId());
} else {
excludedDeviceIds = Collections.emptySet();
}
DestinationDeviceValidator.validateCompleteDeviceList(destination.get(),
messages.messages().stream().map(IncomingMessage::destinationDeviceId).collect(Collectors.toSet()),
excludedDeviceIds);
DestinationDeviceValidator.validateRegistrationIds(destination.get(),
messages.messages(),
IncomingMessage::destinationDeviceId,
IncomingMessage::destinationRegistrationId,
destination.get().getPhoneNumberIdentifier().equals(destinationIdentifier.uuid()));
final String authType;
if (SENDER_TYPE_IDENTIFIED.equals(senderType)) {
authType = AUTH_TYPE_IDENTIFIED;
} else if (isStory) {
authType = AUTH_TYPE_STORY;
} else if (groupSendToken != null) {
authType = AUTH_TYPE_GROUP_SEND_TOKEN;
} else {
authType = AUTH_TYPE_ACCESS_KEY;
}
final List<Tag> tags = List.of(UserAgentTagUtil.getPlatformTag(userAgent),
Tag.of(ENDPOINT_TYPE_TAG_NAME, ENDPOINT_TYPE_SINGLE),
Tag.of(EPHEMERAL_TAG_NAME, String.valueOf(messages.online())),
Tag.of(SENDER_TYPE_TAG_NAME, senderType),
Tag.of(AUTH_TYPE_TAG_NAME, authType),
Tag.of(IDENTITY_TYPE_TAG_NAME, destinationIdentifier.identityType().name()));
for (IncomingMessage incomingMessage : messages.messages()) {
Optional<Device> destinationDevice = destination.get().getDevice(incomingMessage.destinationDeviceId());
if (destinationDevice.isPresent()) {
Metrics.counter(SENT_MESSAGE_COUNTER_NAME, tags).increment();
sendIndividualMessage(
source,
destination.get(),
destinationDevice.get(),
destinationIdentifier,
messages.timestamp(),
messages.online(),
isStory,
messages.urgent(),
incomingMessage,
userAgent,
spamReportToken);
}
}
return Response.ok(new SendMessageResponse(needsSync)).build();
} catch (MismatchedDevicesException e) {
throw new WebApplicationException(Response.status(409)
.type(MediaType.APPLICATION_JSON_TYPE)
.entity(new MismatchedDevices(e.getMissingDevices(),
e.getExtraDevices()))
.build());
} catch (StaleDevicesException e) {
throw new WebApplicationException(Response.status(410)
.type(MediaType.APPLICATION_JSON)
.entity(new StaleDevices(e.getStaleDevices()))
.build());
}
} finally {
sample.stop(SEND_MESSAGE_LATENCY_TIMER);
}
} | @Test
void testSingleDeviceCurrent() throws Exception {
try (final Response response =
resources.getJerseyTest()
.target(String.format("/v1/messages/%s", SINGLE_DEVICE_UUID))
.request()
.header("Authorization", AuthHelper.getAuthHeader(AuthHelper.VALID_UUID, AuthHelper.VALID_PASSWORD))
.put(Entity.entity(SystemMapper.jsonMapper().readValue(jsonFixture("fixtures/current_message_single_device.json"),
IncomingMessageList.class),
MediaType.APPLICATION_JSON_TYPE))) {
assertThat("Good Response", response.getStatus(), is(equalTo(200)));
ArgumentCaptor<Envelope> captor = ArgumentCaptor.forClass(Envelope.class);
verify(messageSender, times(1)).sendMessage(any(Account.class), any(Device.class), captor.capture(), eq(false));
assertTrue(captor.getValue().hasSourceUuid());
assertTrue(captor.getValue().hasSourceDevice());
assertTrue(captor.getValue().getUrgent());
}
} |
public static void ensureAllReadsConsumed(Pipeline pipeline) {
final Set<PCollection<?>> unconsumed = new HashSet<>();
pipeline.traverseTopologically(
new PipelineVisitor.Defaults() {
@Override
public void visitPrimitiveTransform(Node node) {
unconsumed.removeAll(node.getInputs().values());
}
@Override
public void visitValue(PValue value, Node producer) {
String urn = PTransformTranslation.urnForTransformOrNull(producer.getTransform());
if (PTransformTranslation.READ_TRANSFORM_URN.equals(urn)) {
unconsumed.add((PCollection<?>) value);
}
}
});
int i = 0;
for (PCollection<?> unconsumedPCollection : unconsumed) {
consume(unconsumedPCollection, i);
i++;
}
} | @Test
public void matcherProducesUnconsumedValueBoundedRead() {
Bounded<Long> transform = Read.from(CountingSource.upTo(20L));
pipeline.apply(transform);
UnconsumedReads.ensureAllReadsConsumed(pipeline);
validateConsumed();
} |
@Override
public Thread newThread(Runnable target) {
return delegate.newThread(target);
} | @Test
void requireThatThreadFactoryCallsProvider() {
MetricConsumerProvider provider = Mockito.mock(MetricConsumerProvider.class);
ThreadFactory factory = new ContainerThreadFactory(provider);
factory.newThread(Mockito.mock(Runnable.class));
Mockito.verify(provider, Mockito.times(1)).newInstance();
factory.newThread(Mockito.mock(Runnable.class));
Mockito.verify(provider, Mockito.times(2)).newInstance();
} |
public static DataSourceProvider tryGetDataSourceProviderOrNull(Configuration hdpConfig) {
final String configuredPoolingType = MetastoreConf.getVar(hdpConfig,
MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE);
return Iterables.tryFind(FACTORIES, factory -> {
String poolingType = factory.getPoolingType();
return poolingType != null && poolingType.equalsIgnoreCase(configuredPoolingType);
}).orNull();
} | @Test
public void testEvictIdleConnection() throws Exception {
String[] dataSourceType = {HikariCPDataSourceProvider.HIKARI, DbCPDataSourceProvider.DBCP};
try (DataSourceProvider.DataSourceNameConfigurator configurator =
new DataSourceProvider.DataSourceNameConfigurator(conf, "mutex")) {
for (final String type: dataSourceType) {
MetastoreConf.setVar(conf, ConfVars.CONNECTION_POOLING_TYPE, type);
boolean isHikari = HikariCPDataSourceProvider.HIKARI.equals(type);
if (isHikari) {
conf.unset("hikaricp.connectionInitSql");
// The minimum of idleTimeout is 10s
conf.set("hikaricp.idleTimeout", "10000");
System.setProperty("com.zaxxer.hikari.housekeeping.periodMs", "1000");
} else {
conf.set("dbcp.timeBetweenEvictionRunsMillis", "1000");
conf.set("dbcp.softMinEvictableIdleTimeMillis", "3000");
conf.set("dbcp.maxIdle", "0");
}
DataSourceProvider dsp = DataSourceProviderFactory.tryGetDataSourceProviderOrNull(conf);
DataSource ds = dsp.create(conf, 5);
List<Connection> connections = new ArrayList<>();
for (int i = 0; i < 5; i++) {
connections.add(ds.getConnection());
}
HikariPoolMXBean poolMXBean = null;
GenericObjectPool objectPool = null;
if (isHikari) {
poolMXBean = ((HikariDataSource) ds).getHikariPoolMXBean();
Assert.assertEquals(type, 5, poolMXBean.getTotalConnections());
Assert.assertEquals(type, 5, poolMXBean.getActiveConnections());
} else {
objectPool = (GenericObjectPool) MethodUtils.invokeMethod(ds, true, "getPool");
Assert.assertEquals(type, 5, objectPool.getNumActive());
Assert.assertEquals(type, 5, objectPool.getMaxTotal());
}
connections.forEach(connection -> {
try {
connection.close();
} catch (SQLException e) {
throw new RuntimeException(e);
}
});
Thread.sleep(isHikari ? 15000 : 7000);
if (isHikari) {
Assert.assertEquals(type, 2, poolMXBean.getTotalConnections());
Assert.assertEquals(type, 2, poolMXBean.getIdleConnections());
} else {
Assert.assertEquals(type, 0, objectPool.getNumActive());
Assert.assertEquals(type, 0, objectPool.getNumIdle());
}
}
}
} |
@Override
public String name() {
return type.getSimpleName();
} | @Test
void test() {
assertThat(target.name()).isEqualTo("MockTargetTest");
} |
public OkHttpClient build() {
OkHttpClient.Builder builder = new OkHttpClient.Builder();
builder.proxy(proxy);
if (connectTimeoutMs >= 0) {
builder.connectTimeout(connectTimeoutMs, TimeUnit.MILLISECONDS);
}
if (readTimeoutMs >= 0) {
builder.readTimeout(readTimeoutMs, TimeUnit.MILLISECONDS);
}
if (responseTimeoutMs >= 0) {
builder.callTimeout(responseTimeoutMs, TimeUnit.MILLISECONDS);
}
builder.addNetworkInterceptor(this::addHeaders);
if (!acceptGzip) {
builder.addNetworkInterceptor(new GzipRejectorInterceptor());
}
if (proxyLogin != null) {
builder.proxyAuthenticator((route, response) -> {
if (response.request().header(PROXY_AUTHORIZATION) != null) {
// Give up, we've already attempted to authenticate.
return null;
}
if (HttpURLConnection.HTTP_PROXY_AUTH == response.code()) {
String credential = Credentials.basic(proxyLogin, nullToEmpty(proxyPassword), UTF_8);
return response.request().newBuilder().header(PROXY_AUTHORIZATION, credential).build();
}
return null;
});
}
if (followRedirects != null) {
builder.followRedirects(followRedirects);
builder.followSslRedirects(followRedirects);
}
ConnectionSpec tls = new ConnectionSpec.Builder(ConnectionSpec.MODERN_TLS)
.allEnabledTlsVersions()
.allEnabledCipherSuites()
.supportsTlsExtensions(true)
.build();
builder.connectionSpecs(asList(tls, ConnectionSpec.CLEARTEXT));
X509TrustManager trustManager = sslTrustManager != null ? sslTrustManager : systemDefaultTrustManager();
SSLSocketFactory sslFactory = sslSocketFactory != null ? sslSocketFactory : systemDefaultSslSocketFactory(trustManager);
builder.sslSocketFactory(sslFactory, trustManager);
builder.addInterceptor(buildLoggingInterceptor());
return builder.build();
} | @Test
public void build_default_instance_of_OkHttpClient() {
OkHttpClient okHttpClient = underTest.build();
assertThat(okHttpClient.proxy()).isNull();
assertThat(okHttpClient.networkInterceptors()).hasSize(2);
assertThat(okHttpClient.sslSocketFactory()).isNotNull();
assertThat(okHttpClient.followRedirects()).isTrue();
assertThat(okHttpClient.followSslRedirects()).isTrue();
} |
@Override
public ClientDetailsEntity saveNewClient(ClientDetailsEntity client) {
if (client.getId() != null) { // if it's not null, it's already been saved, this is an error
throw new IllegalArgumentException("Tried to save a new client with an existing ID: " + client.getId());
}
if (client.getRegisteredRedirectUri() != null) {
for (String uri : client.getRegisteredRedirectUri()) {
if (blacklistedSiteService.isBlacklisted(uri)) {
throw new IllegalArgumentException("Client URI is blacklisted: " + uri);
}
}
}
// assign a random clientid if it's empty
// NOTE: don't assign a random client secret without asking, since public clients have no secret
if (Strings.isNullOrEmpty(client.getClientId())) {
client = generateClientId(client);
}
// make sure that clients with the "refresh_token" grant type have the "offline_access" scope, and vice versa
ensureRefreshTokenConsistency(client);
// make sure we don't have both a JWKS and a JWKS URI
ensureKeyConsistency(client);
// check consistency when using HEART mode
checkHeartMode(client);
// timestamp this to right now
client.setCreatedAt(new Date());
// check the sector URI
checkSectorIdentifierUri(client);
ensureNoReservedScopes(client);
ClientDetailsEntity c = clientRepository.saveClient(client);
statsService.resetCache();
return c;
} | @Test(expected = IllegalArgumentException.class)
public void heartMode_implicit_redirectUris() {
Mockito.when(config.isHeartMode()).thenReturn(true);
ClientDetailsEntity client = new ClientDetailsEntity();
Set<String> grantTypes = new LinkedHashSet<>();
grantTypes.add("implicit");
client.setGrantTypes(grantTypes);
client.setTokenEndpointAuthMethod(AuthMethod.NONE);
service.saveNewClient(client);
} |
@Override
public HoodieTimeline getTimeline() {
return execute(preferredView::getTimeline, () -> getSecondaryView().getTimeline());
} | @Test
public void testGetTimeline() {
HoodieTimeline actual;
HoodieTimeline expected = new MockHoodieTimeline(Stream.empty(), Stream.empty());
when(primary.getTimeline()).thenReturn(expected);
actual = fsView.getTimeline();
assertEquals(expected, actual);
verify(secondaryViewSupplier, never()).get();
resetMocks();
when(secondaryViewSupplier.get()).thenReturn(secondary);
when(primary.getTimeline()).thenThrow(new RuntimeException());
when(secondary.getTimeline()).thenReturn(expected);
actual = fsView.getTimeline();
assertEquals(expected, actual);
resetMocks();
when(secondary.getTimeline()).thenReturn(expected);
actual = fsView.getTimeline();
assertEquals(expected, actual);
resetMocks();
when(secondary.getTimeline()).thenThrow(new RuntimeException());
assertThrows(RuntimeException.class, () -> {
fsView.getTimeline();
});
} |
@VisibleForTesting
void recover() {
try (DbSession dbSession = dbClient.openSession(false)) {
Profiler profiler = Profiler.create(LOGGER).start();
long beforeDate = system2.now() - minAgeInMs;
IndexingResult result = new IndexingResult();
Collection<EsQueueDto> items = dbClient.esQueueDao().selectForRecovery(dbSession, beforeDate, loopLimit);
while (!items.isEmpty()) {
IndexingResult loopResult = new IndexingResult();
groupItemsByDocType(items).asMap().forEach((type, typeItems) -> loopResult.add(doIndex(dbSession, type, typeItems)));
result.add(loopResult);
if (loopResult.getSuccessRatio() <= CIRCUIT_BREAKER_IN_PERCENT) {
LOGGER.error(LOG_PREFIX + "too many failures [{}/{} documents], waiting for next run", loopResult.getFailures(), loopResult.getTotal());
break;
}
if (loopResult.getTotal() == 0L) {
break;
}
items = dbClient.esQueueDao().selectForRecovery(dbSession, beforeDate, loopLimit);
}
if (result.getTotal() > 0L) {
profiler.stopInfo(LOG_PREFIX + format("%d documents processed [%d failures]", result.getTotal(), result.getFailures()));
}
} catch (Throwable t) {
LOGGER.error(LOG_PREFIX + "fail to recover documents", t);
}
} | @Test
public void recover_multiple_times_the_same_document() {
EsQueueDto item1 = insertItem(FOO_TYPE, "f1");
EsQueueDto item2 = insertItem(FOO_TYPE, item1.getDocId());
EsQueueDto item3 = insertItem(FOO_TYPE, item1.getDocId());
advanceInTime();
SuccessfulFakeIndexer indexer = new SuccessfulFakeIndexer(FOO_TYPE);
underTest = newRecoveryIndexer(indexer);
underTest.recover();
assertThatQueueHasSize(0);
assertThat(indexer.called).hasSize(1);
assertThat(indexer.called.get(0)).extracting(EsQueueDto::getUuid)
.containsExactlyInAnyOrder(item1.getUuid(), item2.getUuid(), item3.getUuid());
assertThatLogsContain(TRACE, "Elasticsearch recovery - processing 3 [foos/foo]");
assertThatLogsContain(INFO, "Elasticsearch recovery - 3 documents processed [0 failures]");
} |
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
if (data.size() != 5 && data.size() != 7) {
onInvalidDataReceived(device, data);
return;
}
final int timeOffset = data.getIntValue(Data.FORMAT_UINT16_LE, 0);
final int warningStatus = data.getIntValue(Data.FORMAT_UINT8, 2);
final int calibrationTempStatus = data.getIntValue(Data.FORMAT_UINT8, 3);
final int sensorStatus = data.getIntValue(Data.FORMAT_UINT8, 4);
final boolean crcPresent = data.size() == 7;
if (crcPresent) {
final int actualCrc = CRC16.MCRF4XX(data.getValue(), 0, 5);
final int expectedCrc = data.getIntValue(Data.FORMAT_UINT16_LE, 5);
if (actualCrc != expectedCrc) {
onContinuousGlucoseMonitorStatusReceivedWithCrcError(device, data);
return;
}
}
final CGMStatus status = new CGMStatus(warningStatus, calibrationTempStatus, sensorStatus);
onContinuousGlucoseMonitorStatusChanged(device, status, timeOffset, crcPresent);
} | @Test
public void onContinuousGlucoseMonitorStatusChanged_withCrc() {
final DataReceivedCallback callback = new CGMStatusDataCallback() {
@Override
public void onContinuousGlucoseMonitorStatusChanged(@NonNull final BluetoothDevice device, @NonNull final CGMStatus status,
final int timeOffset, final boolean secured) {
assertNotNull("Status present", status);
assertTrue(status.sessionStopped);
assertTrue(status.deviceBatteryLow);
assertTrue(status.sensorTypeIncorrectForDevice);
assertTrue(status.sensorMalfunction);
assertTrue(status.deviceSpecificAlert);
assertTrue(status.generalDeviceFault);
assertTrue(status.timeSyncRequired);
assertTrue(status.calibrationNotAllowed);
assertTrue(status.calibrationRecommended);
assertTrue(status.calibrationRequired);
assertTrue(status.sensorTemperatureTooHigh);
assertTrue(status.sensorTemperatureTooLow);
assertTrue(status.sensorResultLowerThenPatientLowLevel);
assertTrue(status.sensorResultHigherThenPatientHighLevel);
assertTrue(status.sensorResultLowerThenHypoLevel);
assertTrue(status.sensorResultHigherThenHyperLevel);
assertTrue(status.sensorRateOfDecreaseExceeded);
assertTrue(status.sensorRateOfIncreaseExceeded);
assertTrue(status.sensorResultLowerThenDeviceCanProcess);
assertTrue(status.sensorResultHigherThenDeviceCanProcess);
assertEquals("Time offset", 5, timeOffset);
assertTrue(secured);
}
@Override
public void onContinuousGlucoseMonitorStatusReceivedWithCrcError(@NonNull final BluetoothDevice device, @NonNull final Data data) {
assertEquals("Correct data reported as CRC error", 1, 2);
}
@Override
public void onInvalidDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
assertEquals("Correct data reported as invalid", 1, 2);
}
};
final MutableData data = new MutableData(new byte[7]);
data.setValue(5, Data.FORMAT_UINT16_LE, 0);
data.setValue(0xff3f3f, Data.FORMAT_UINT24_LE, 2); // all flags set
data.setValue(0xE0A7, Data.FORMAT_UINT16_LE, 5);
callback.onDataReceived(null, data);
} |
@Override
public V get(K key) {
begin();
V value = transactionalMap.get(key);
commit();
return value;
} | @Test
public void testGet() {
map.put(42, "foobar");
String result = adapter.get(42);
assertEquals("foobar", result);
} |
public static ValueLabel formatPacketRate(long packets) {
return new ValueLabel(packets, PACKETS_UNIT).perSec();
} | @Test
public void formatPacketRateKilo() {
vl = TopoUtils.formatPacketRate(1024);
assertEquals(AM_WL, "1 Kpps", vl.toString());
} |
public StorageEntity queryResourcesFileInfo(String userName, String fullName) throws Exception {
return resourceService.queryFileStatus(userName, fullName);
} | @Test
public void testQueryResourcesFileInfo() throws Exception {
User user = getTestUser();
StorageEntity storageEntity = getTestResource();
Mockito.when(resourcesService.queryFileStatus(user.getUserName(), storageEntity.getFullName()))
.thenReturn(storageEntity);
StorageEntity result = pythonGateway.queryResourcesFileInfo(user.getUserName(), storageEntity.getFullName());
Assertions.assertEquals(result.getId(), storageEntity.getId());
} |
@Override
public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
checkInitialized();
for (Callback callback : callbacks) {
if (callback instanceof OAuthBearerTokenCallback) {
handleTokenCallback((OAuthBearerTokenCallback) callback);
} else if (callback instanceof SaslExtensionsCallback) {
handleExtensionsCallback((SaslExtensionsCallback) callback);
} else {
throw new UnsupportedCallbackException(callback);
}
}
} | @Test
public void testNotConfigured() {
OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler();
assertThrowsWithMessage(IllegalStateException.class, () -> handler.handle(new Callback[] {}), "first call the configure or init method");
} |
public long getLastAccessTime() {
return lastAccessTime;
} | @Test
public void getLastAccessTime() {
long lastAccessTime = replicatedRecord.getLastAccessTime();
sleepAtLeastMillis(100);
replicatedRecord.setValue("newValue", 0);
assertTrue("replicatedRecord.getLastAccessTime() should return a greater access time",
replicatedRecord.getLastAccessTime() > lastAccessTime);
} |
public static ConfigDefinitionKey createConfigDefinitionKeyFromDefFile(File file) throws IOException {
String[] fileName = file.getName().split("\\.");
assert (fileName.length >= 2);
String name = fileName[fileName.length - 2];
byte[] content = IOUtils.readFileBytes(file);
return createConfigDefinitionKeyFromDefContent(name, content);
} | @Test
public void testCreateConfigDefinitionKeyFromDefFile() {
ConfigDefinitionKey def = null;
try {
def = ConfigUtils.createConfigDefinitionKeyFromDefFile(new File("src/test/resources/configs/def-files/app.def"));
} catch (IOException e) {
e.printStackTrace();
fail();
}
assertEquals("app", def.getName());
assertEquals("foo", def.getNamespace());
try {
def = ConfigUtils.createConfigDefinitionKeyFromDefFile(new File("src/test/resources/configs/def-files/testnamespace.def"));
} catch (IOException e) {
e.printStackTrace();
fail();
}
assertEquals("testnamespace", def.getName());
assertEquals("foo", def.getNamespace());
try {
byte[] content = IOUtils.readFileBytes(new File("src/test/resources/configs/def-files/app.def"));
def = ConfigUtils.createConfigDefinitionKeyFromDefContent("app", content);
} catch (IOException e) {
fail();
}
assertEquals("app", def.getName());
assertEquals("foo", def.getNamespace());
try {
byte[] content = IOUtils.readFileBytes(new File("src/test/resources/configs/def-files-nogen/app.def"));
def = ConfigUtils.createConfigDefinitionKeyFromDefContent("app", content);
} catch (IOException e) {
fail();
}
assertEquals("app", def.getName());
assertEquals("mynamespace", def.getNamespace());
} |
@Override
public String getOperationName(Exchange exchange, Endpoint endpoint) {
Map<String, String> queryParameters = toQueryParameters(endpoint.getEndpointUri());
String opName = queryParameters.get("operation");
if (opName != null) {
return opName;
}
return super.getOperationName(exchange, endpoint);
} | @Test
public void testGetOperationName() {
Endpoint endpoint = Mockito.mock(Endpoint.class);
Mockito.when(endpoint.getEndpointUri()).thenReturn(MONGODB_STATEMENT);
SpanDecorator decorator = new MongoDBSpanDecorator();
assertEquals("findOneByQuery", decorator.getOperationName(null, endpoint));
} |
public static String encodeSetCookie(HttpCookie cookie)
{
if (cookie == null)
{
return null;
}
StringBuilder sb = new StringBuilder();
sb.append(cookie.getName()).append("=").append(cookie.getValue());
if (cookie.getPath() != null)
{
sb.append(";Path=").append(cookie.getPath());
}
if (cookie.getDomain() != null)
{
sb.append(";Domain=").append(cookie.getDomain());
}
if (cookie.getPortlist() != null)
{
// Port value should be quoted according to RFC 2965 Section 3.2.2.
sb.append(";Port=\"").append(cookie.getPortlist()).append('"');
}
sb.append(";Max-Age=").append(Long.toString(cookie.getMaxAge()));
sb.append(";Version=").append(Integer.toString(cookie.getVersion()));
if (cookie.getDiscard())
{
sb.append(";Discard");
}
if (cookie.getSecure())
{
sb.append(";Secure");
}
if (cookie.isHttpOnly())
{
sb.append(";HttpOnly");
}
if (cookie.getComment() != null)
{
sb.append(";Comment=").append(cookie.getComment());
}
if (cookie.getCommentURL() != null)
{
// CommentURL value should be quoted according to RFC 2965 Section 3.2.2.
sb.append(";CommentURL=\"").append(cookie.getCommentURL()).append('"');
}
return sb.toString();
} | @Test
public void testCookieAttributeEncoding()
{
String encodedCookie = CookieUtil.encodeSetCookie(cookieA);
Assert.assertTrue(encodedCookie.contains("Domain=.android.com"));
Assert.assertTrue(encodedCookie.contains("Path=/source/"));
Assert.assertTrue(encodedCookie.contains("Max-Age=125"));
Assert.assertTrue(encodedCookie.contains("HttpOnly"));
} |
public AlluxioURI joinUnsafe(String suffix) {
String path = getPath();
StringBuilder sb = new StringBuilder(path.length() + 1 + suffix.length());
return new AlluxioURI(this,
sb.append(path).append(AlluxioURI.SEPARATOR).append(suffix).toString(), false);
} | @Test
public void joinUnsafe() {
assertEquals(new AlluxioURI("/a"), new AlluxioURI("/").joinUnsafe("a"));
assertEquals(new AlluxioURI("/a/b"), new AlluxioURI("/a").joinUnsafe("b"));
assertEquals(new AlluxioURI("a/b"), new AlluxioURI("a").joinUnsafe("b"));
assertEquals(new AlluxioURI("a/b.txt"), new AlluxioURI("a").joinUnsafe("/b.txt"));
assertEquals(new AlluxioURI("alluxio:/a/b.txt"),
new AlluxioURI("alluxio:/a").joinUnsafe("/b.txt"));
assertEquals(new AlluxioURI("C:\\\\a\\b"), new AlluxioURI("C:\\\\a").joinUnsafe("\\b"));
assertEquals(new AlluxioURI("/a/b"), new AlluxioURI("/a").joinUnsafe("///b///"));
final String pathWithSpecialChar = "����,��b����$o����[| =B����";
assertEquals(new AlluxioURI("/" + pathWithSpecialChar),
new AlluxioURI("/").joinUnsafe(pathWithSpecialChar));
final String pathWithSpecialCharAndColon = "����,��b����$o����[| =B��:��";
assertEquals(new AlluxioURI("/" + pathWithSpecialCharAndColon),
new AlluxioURI("/").joinUnsafe(pathWithSpecialCharAndColon));
// The following joins are not "safe", because the new path component requires normalization.
assertNotEquals(new AlluxioURI("/a/c"), new AlluxioURI("/a").joinUnsafe("b/../c"));
assertNotEquals(new AlluxioURI("a/b.txt"), new AlluxioURI("a").joinUnsafe("/c/../b.txt"));
assertNotEquals(new AlluxioURI("alluxio:/a/b.txt"),
new AlluxioURI("alluxio:/a/c.txt").joinUnsafe("/../b.txt"));
// join empty string
assertEquals(new AlluxioURI("/a"), new AlluxioURI("/a").joinUnsafe(""));
} |
public static UBinary create(Kind binaryOp, UExpression lhs, UExpression rhs) {
checkArgument(
OP_CODES.containsKey(binaryOp), "%s is not a supported binary operation", binaryOp);
return new AutoValue_UBinary(binaryOp, lhs, rhs);
} | @Test
public void greaterThanOrEqual() {
assertUnifiesAndInlines(
"4 >= 17",
UBinary.create(Kind.GREATER_THAN_EQUAL, ULiteral.intLit(4), ULiteral.intLit(17)));
} |
@Override
public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException {
return invoker.invoke(invocation);
} | @SuppressWarnings("unchecked")
@Test
void testRuntimeException() {
ExceptionFilter exceptionFilter = new ExceptionFilter();
RpcInvocation invocation = new RpcInvocation(
"sayHello", DemoService.class.getName(), "", new Class<?>[] {String.class}, new Object[] {"world"});
AppResponse appResponse = new AppResponse();
appResponse.setException(new LocalException("localException"));
Invoker<DemoService> invoker = mock(Invoker.class);
when(invoker.invoke(invocation)).thenReturn(appResponse);
when(invoker.getInterface()).thenReturn(DemoService.class);
Result newResult = exceptionFilter.invoke(invoker, invocation);
Assertions.assertEquals(appResponse.getException(), newResult.getException());
} |
public static <T> T toObj(byte[] json, Class<T> cls) {
try {
return mapper.readValue(json, cls);
} catch (Exception e) {
throw new NacosDeserializationException(cls, e);
}
} | @Test
void testToObject13() {
assertThrows(Exception.class, () -> {
JacksonUtils.toObj(new ByteArrayInputStream("{\"key\":\"value\"}".getBytes()), Object.class.getGenericSuperclass());
});
} |
@Override
public Num calculate(BarSeries series, Position position) {
if (position.isClosed()) {
Num profit = excludeCosts ? position.getGrossProfit() : position.getProfit();
return profit.isPositive() ? profit : series.zero();
}
return series.zero();
} | @Test
public void calculateProfitWithShortPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 95, 100, 70, 80, 85, 100);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.sellAt(0, series), Trade.buyAt(1, series),
Trade.sellAt(2, series), Trade.buyAt(5, series));
AnalysisCriterion profit = getCriterion(false);
assertNumEquals(0, profit.calculate(series, tradingRecord));
} |
@Override
public synchronized void markEvent() {
eventTimestamps.add(clock.absoluteTimeMillis());
eventCount++;
} | @Test
void testMarkEvent() {
final ThresholdMeter thresholdMeter = createLargeThresholdMeter();
thresholdMeter.markEvent();
clock.advanceTime(SLEEP, TimeUnit.MILLISECONDS);
assertThat(thresholdMeter.getCount()).isOne();
assertThat(thresholdMeter.getRate()).isCloseTo(toPerSecondRate(1), within(ERROR));
thresholdMeter.markEvent();
assertThat(thresholdMeter.getCount()).isEqualTo(2);
clock.advanceTime(SLEEP, TimeUnit.MILLISECONDS);
assertThat(thresholdMeter.getRate()).isCloseTo(toPerSecondRate(2), within(ERROR));
} |
static JarFileWithEntryClass findOnlyEntryClass(Iterable<File> jarFiles) throws IOException {
List<JarFileWithEntryClass> jarsWithEntryClasses = new ArrayList<>();
for (File jarFile : jarFiles) {
findEntryClass(jarFile)
.ifPresent(
entryClass ->
jarsWithEntryClasses.add(
new JarFileWithEntryClass(jarFile, entryClass)));
}
int size = jarsWithEntryClasses.size();
if (size == 0) {
throw new NoSuchElementException("No JAR with manifest attribute for entry class");
}
if (size == 1) {
return jarsWithEntryClasses.get(0);
}
// else: size > 1
throw new IllegalArgumentException(
"Multiple JARs with manifest attribute for entry class: " + jarsWithEntryClasses);
} | @Test
void testFindOnlyEntryClassEmptyArgument() {
assertThatThrownBy(() -> JarManifestParser.findOnlyEntryClass(Collections.emptyList()))
.isInstanceOf(NoSuchElementException.class);
} |
@Override
public boolean checkMasterWritable() {
testMasterWritableJT.setDataSource(jt.getDataSource());
// Prevent the login interface from being too long because the main library is not available
testMasterWritableJT.setQueryTimeout(1);
String sql = " SELECT @@read_only ";
try {
Integer result = testMasterWritableJT.queryForObject(sql, Integer.class);
if (result == null) {
return false;
} else {
return result == 0;
}
} catch (CannotGetJdbcConnectionException e) {
LOGGER.error("[db-error] " + e.toString(), e);
return false;
}
} | @Test
void testCheckMasterWritable() {
when(testMasterWritableJT.queryForObject(eq(" SELECT @@read_only "), eq(Integer.class))).thenReturn(0);
assertTrue(service.checkMasterWritable());
} |
@Override
public boolean isDisposed() {
return !running.get();
} | @Test
void testIssue416() {
TestResources resources = TestResources.get();
TestResources.set(ConnectionProvider.create("testIssue416"));
assertThat(resources.provider.isDisposed()).isTrue();
assertThat(resources.loops.isDisposed()).isFalse();
TestResources.set(LoopResources.create("test"));
assertThat(resources.loops.isDisposed()).isTrue();
assertThat(resources.isDisposed()).isTrue();
} |
@Override
public boolean isAdded(Component component) {
checkComponent(component);
if (analysisMetadataHolder.isFirstAnalysis()) {
return true;
}
return addedComponents.contains(component);
} | @Test
public void isAdded_returns_false_for_unregistered_component_type_when_not_on_first_analysis() {
when(analysisMetadataHolder.isFirstAnalysis()).thenReturn(false);
Arrays.stream(Component.Type.values()).forEach(type -> {
Component component = newComponent(type);
assertThat(underTest.isAdded(component)).isFalse();
});
} |
@Override
public Path move(final Path file, final Path target, final TransferStatus status, final Delete.Callback delete, final ConnectionCallback callback) throws BackgroundException {
try {
final EueApiClient client = new EueApiClient(session);
if(status.isExists()) {
if(!new CaseInsensitivePathPredicate(file).test(target)) {
if(log.isWarnEnabled()) {
log.warn(String.format("Trash file %s to be replaced with %s", target, file));
}
new EueTrashFeature(session, fileid).delete(Collections.singletonMap(target, status), callback, delete);
}
}
final String resourceId = fileid.getFileId(file);
if(!new SimplePathPredicate(file.getParent()).test(target.getParent())) {
final ResourceMoveResponseEntries resourceMoveResponseEntries;
final String parentResourceId = fileid.getFileId(target.getParent());
switch(parentResourceId) {
case EueResourceIdProvider.ROOT:
case EueResourceIdProvider.TRASH:
resourceMoveResponseEntries = new MoveChildrenForAliasApiApi(client)
.resourceAliasAliasChildrenMovePost(parentResourceId,
Collections.singletonList(String.format("%s/resource/%s",
session.getBasePath(), resourceId)), null, null, null,
"rename", null);
break;
default:
resourceMoveResponseEntries = new MoveChildrenApi(client)
.resourceResourceIdChildrenMovePost(parentResourceId,
Collections.singletonList(String.format("%s/resource/%s",
session.getBasePath(), resourceId)), null, null, null,
"rename", null);
}
if(null == resourceMoveResponseEntries) {
// Move of single file will return 200 status code with empty response body
}
else {
for(ResourceMoveResponseEntry resourceMoveResponseEntry : resourceMoveResponseEntries.values()) {
switch(resourceMoveResponseEntry.getStatusCode()) {
case HttpStatus.SC_OK:
break;
default:
log.warn(String.format("Failure %s moving file %s", resourceMoveResponseEntries, file));
final ResourceCreationResponseEntryEntity entity = resourceMoveResponseEntry.getEntity();
if(null == entity) {
throw new EueExceptionMappingService().map(new ApiException(resourceMoveResponseEntry.getReason(),
null, resourceMoveResponseEntry.getStatusCode(), client.getResponseHeaders()));
}
throw new EueExceptionMappingService().map(new ApiException(resourceMoveResponseEntry.getEntity().getError(),
null, resourceMoveResponseEntry.getStatusCode(), client.getResponseHeaders()));
}
}
}
}
if(!StringUtils.equals(file.getName(), target.getName())) {
final ResourceUpdateModel resourceUpdateModel = new ResourceUpdateModel();
final ResourceUpdateModelUpdate resourceUpdateModelUpdate = new ResourceUpdateModelUpdate();
final Uifs uifs = new Uifs();
uifs.setName(target.getName());
resourceUpdateModelUpdate.setUifs(uifs);
resourceUpdateModel.setUpdate(resourceUpdateModelUpdate);
final ResourceMoveResponseEntries resourceMoveResponseEntries = new UpdateResourceApi(client).resourceResourceIdPatch(resourceId,
resourceUpdateModel, null, null, null);
if(null == resourceMoveResponseEntries) {
// Move of single file will return 200 status code with empty response body
}
else {
for(ResourceMoveResponseEntry resourceMoveResponseEntry : resourceMoveResponseEntries.values()) {
switch(resourceMoveResponseEntry.getStatusCode()) {
case HttpStatus.SC_CREATED:
break;
default:
log.warn(String.format("Failure %s renaming file %s", resourceMoveResponseEntry, file));
throw new EueExceptionMappingService().map(new ApiException(resourceMoveResponseEntry.getReason(),
null, resourceMoveResponseEntry.getStatusCode(), client.getResponseHeaders()));
}
}
}
}
fileid.cache(file, null);
return target;
}
catch(ApiException e) {
throw new EueExceptionMappingService().map("Cannot rename {0}", e, file);
}
} | @Test(expected = NotfoundException.class)
public void testMoveNotFound() throws Exception {
final EueResourceIdProvider fileid = new EueResourceIdProvider(session);
final Path test = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
test.attributes().setFileId(new AlphanumericRandomStringService().random());
new EueMoveFeature(session, fileid).move(test, new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback());
} |
public void await() {
mSync.acquire(IGNORED_ARG);
} | @Test
public void await() throws Exception {
Assert.assertEquals(0, mLatch.getState());
mLatch.inc();
Assert.assertEquals(1, mLatch.getState());
mLatch.inc();
Assert.assertEquals(2, mLatch.getState());
BlockingThread await = new BlockingThread(mLatch::await);
await.start();
Assert.assertEquals(STILL_BLOCKED, await.getBlockedTimeMillis());
Thread.sleep(SLEEP_MILLIS);
mLatch.dec();
Assert.assertEquals(1, mLatch.getState());
Assert.assertEquals(STILL_BLOCKED, await.getBlockedTimeMillis());
Thread.sleep(SLEEP_MILLIS);
mLatch.dec();
Assert.assertEquals(0, mLatch.getState());
await.join();
Assert.assertTrue(String.format("BlockedTimeMillis: %s", await.getBlockedTimeMillis()),
await.getBlockedTimeMillis() >= 2 * SLEEP_MILLIS);
Assert.assertEquals(-1, mLatch.getState());
} |
public static Map<TopicPartition, Long> fetchCommittedOffsets(final Set<TopicPartition> partitions,
final Consumer<byte[], byte[]> consumer) {
if (partitions.isEmpty()) {
return Collections.emptyMap();
}
final Map<TopicPartition, Long> committedOffsets;
try {
// those which do not have a committed offset would default to 0
committedOffsets = consumer.committed(partitions).entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue() == null ? 0L : e.getValue().offset()));
} catch (final TimeoutException timeoutException) {
LOG.warn("The committed offsets request timed out, try increasing the consumer client's default.api.timeout.ms", timeoutException);
throw timeoutException;
} catch (final KafkaException fatal) {
LOG.warn("The committed offsets request failed.", fatal);
throw new StreamsException(String.format("Failed to retrieve end offsets for %s", partitions), fatal);
}
return committedOffsets;
} | @Test
public void fetchCommittedOffsetsShouldRethrowTimeoutException() {
@SuppressWarnings("unchecked")
final Consumer<byte[], byte[]> consumer = mock(Consumer.class);
when(consumer.committed(PARTITIONS)).thenThrow(new TimeoutException());
assertThrows(TimeoutException.class, () -> fetchCommittedOffsets(PARTITIONS, consumer));
} |
public void isEqualTo(@Nullable Object expected) {
standardIsEqualTo(expected);
} | @Test
@SuppressWarnings("TruthIncompatibleType") // test of a mistaken call
public void disambiguationWithSameToString() {
expectFailure.whenTesting().that(new StringBuilder("foo")).isEqualTo(new StringBuilder("foo"));
assertFailureKeys("expected", "but was");
assertFailureValue("expected", "foo");
assertFailureValue(
"but was", "(non-equal instance of same class with same string representation)");
} |
@Override
public List<PluginWrapper> getUnresolvedPlugins() {
return Collections.emptyList();
} | @Test
public void getUnresolvedPlugins() {
assertNotNull(wrappedPluginManager);
assertNotNull(wrappedPluginManager.getUnresolvedPlugins());
assertTrue(wrappedPluginManager.getUnresolvedPlugins().isEmpty());
} |
public static <T> IterableCoder<T> of(Coder<T> elemCoder) {
return new IterableCoder<>(elemCoder);
} | @Test
public void testCoderIsSerializableWithWellKnownCoderType() throws Exception {
CoderProperties.coderSerializable(ListCoder.of(GlobalWindow.Coder.INSTANCE));
} |
public static <T> RestResult<T> failed() {
return RestResult.<T>builder().withCode(500).build();
} | @Test
void testSuccessWithFull() {
RestResult<String> restResult = RestResultUtils.failed(400, "content", "test");
assertRestResult(restResult, 400, "test", "content", false);
} |
public static Iterator<Row> computeUpdates(
Iterator<Row> rowIterator, StructType rowType, String[] identifierFields) {
Iterator<Row> carryoverRemoveIterator = removeCarryovers(rowIterator, rowType);
ChangelogIterator changelogIterator =
new ComputeUpdateIterator(carryoverRemoveIterator, rowType, identifierFields);
return Iterators.filter(changelogIterator, Objects::nonNull);
} | @Test
public void testUpdatedRowsWithDuplication() {
List<Row> rowsWithDuplication =
Lists.newArrayList(
// two rows with same identifier fields(id, name)
new GenericRowWithSchema(new Object[] {1, "a", "data", DELETE, 0, 0}, null),
new GenericRowWithSchema(new Object[] {1, "a", "data", DELETE, 0, 0}, null),
new GenericRowWithSchema(new Object[] {1, "a", "new_data", INSERT, 0, 0}, null),
new GenericRowWithSchema(new Object[] {1, "a", "new_data", INSERT, 0, 0}, null));
Iterator<Row> iterator =
ChangelogIterator.computeUpdates(rowsWithDuplication.iterator(), SCHEMA, IDENTIFIER_FIELDS);
assertThatThrownBy(() -> Lists.newArrayList(iterator))
.isInstanceOf(IllegalStateException.class)
.hasMessage(
"Cannot compute updates because there are multiple rows with the same identifier fields([id,name]). Please make sure the rows are unique.");
// still allow extra insert rows
rowsWithDuplication =
Lists.newArrayList(
new GenericRowWithSchema(new Object[] {1, "a", "data", DELETE, 0, 0}, null),
new GenericRowWithSchema(new Object[] {1, "a", "new_data1", INSERT, 0, 0}, null),
new GenericRowWithSchema(new Object[] {1, "a", "new_data2", INSERT, 0, 0}, null));
Iterator<Row> iterator1 =
ChangelogIterator.computeUpdates(rowsWithDuplication.iterator(), SCHEMA, IDENTIFIER_FIELDS);
assertEquals(
"Rows should match.",
Lists.newArrayList(
new Object[] {1, "a", "data", UPDATE_BEFORE, 0, 0},
new Object[] {1, "a", "new_data1", UPDATE_AFTER, 0, 0},
new Object[] {1, "a", "new_data2", INSERT, 0, 0}),
rowsToJava(Lists.newArrayList(iterator1)));
} |
@SuppressWarnings({"BooleanExpressionComplexity", "CyclomaticComplexity"})
public static boolean isScalablePushQuery(
final Statement statement,
final KsqlExecutionContext ksqlEngine,
final KsqlConfig ksqlConfig,
final Map<String, Object> overrides
) {
if (!isPushV2Enabled(ksqlConfig, overrides)) {
return false;
}
if (! (statement instanceof Query)) {
return false;
}
final Query query = (Query) statement;
final SourceFinder sourceFinder = new SourceFinder();
sourceFinder.process(query.getFrom(), null);
// It will be present if it's not a join, which we don't handle
if (!sourceFinder.getSourceName().isPresent()) {
return false;
}
// Find all of the writers to this particular source.
final SourceName sourceName = sourceFinder.getSourceName().get();
final Set<QueryId> upstreamQueries = ksqlEngine.getQueriesWithSink(sourceName);
// See if the config or override have set the stream to be "latest"
final boolean isLatest = isLatest(ksqlConfig, overrides);
// Cannot be a pull query, i.e. must be a push
return !query.isPullQuery()
// Group by is not supported
&& !query.getGroupBy().isPresent()
// Windowing is not supported
&& !query.getWindow().isPresent()
// Having clause is not supported
&& !query.getHaving().isPresent()
// Partition by is not supported
&& !query.getPartitionBy().isPresent()
// There must be an EMIT CHANGES clause
&& (query.getRefinement().isPresent()
&& query.getRefinement().get().getOutputRefinement() == OutputRefinement.CHANGES)
// Must be reading from "latest"
&& isLatest
// We only handle a single sink source at the moment from a CTAS/CSAS
&& upstreamQueries.size() == 1
// ROWPARTITION and ROWOFFSET are not currently supported in SPQs
&& !containsDisallowedColumns(query);
} | @Test
public void isScalablePushQuery_true() {
try(MockedStatic<ColumnExtractor> columnExtractor = mockStatic(ColumnExtractor.class)) {
// Given:
expectIsSPQ(ColumnName.of("foo"), columnExtractor);
// When:
final boolean isScalablePush = ScalablePushUtil.isScalablePushQuery(
query,
ksqlEngine,
ksqlConfig,
overrides
);
// Then:
assert(isScalablePush);
}
} |
public void put(final T object) {
PortablePreconditions.checkNotNull("Object can not be null",
object);
final UUIDKey uuidKey = UUIDKey.getUUIDKey(object.keys());
if (keys.contains(uuidKey)) {
throw new IllegalArgumentException("UUID already already in use. You are trying to add the same object twice.");
}
keys.add(uuidKey);
for (final Key additionalKey : object.keys()) {
put(additionalKey,
object);
}
} | @Test
void testReAdd() throws Exception {
assertThrows(IllegalArgumentException.class, () -> {
put(toni);
});
} |
public static void getSemanticPropsSingleFromString(
SingleInputSemanticProperties result,
String[] forwarded,
String[] nonForwarded,
String[] readSet,
TypeInformation<?> inType,
TypeInformation<?> outType) {
getSemanticPropsSingleFromString(
result, forwarded, nonForwarded, readSet, inType, outType, false);
} | @Test
void testReadFieldsSpaces() {
String[] readFields = {" f1 ; f2 "};
SingleInputSemanticProperties sp = new SingleInputSemanticProperties();
SemanticPropUtil.getSemanticPropsSingleFromString(
sp, null, null, readFields, threeIntTupleType, threeIntTupleType);
FieldSet fs = sp.getReadFields(0);
assertThat(fs).hasSize(2).contains(2, 1);
} |
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
} | @Test
public void loginButton() {
String text = "login";
String url = "https://pengrad.herokuapp.com/hello";
SendResponse response = bot.execute(
new SendMessage(chatId, "Login button").replyMarkup(new InlineKeyboardMarkup(
new InlineKeyboardButton(text).loginUrl(new LoginUrl(url)
.forwardText("forwarded login")
.botUsername("pengrad_test_bot")
.requestWriteAccess(true)))));
assertTrue(response.isOk());
InlineKeyboardButton button = response.message().replyMarkup().inlineKeyboard()[0][0];
assertEquals(text, button.text());
assertEquals(url, button.url());
} |
public static String get(String urlString, Charset customCharset) {
return HttpRequest.get(urlString).charset(customCharset).execute().body();
} | @Test
@Disabled
public void sinajsTest(){
final String s = HttpUtil.get("http://hq.sinajs.cn/list=sh600519");
Console.log(s);
} |
public Range<PartitionKey> handleNewSinglePartitionDesc(Map<ColumnId, Column> schema, SingleRangePartitionDesc desc,
long partitionId, boolean isTemp) throws DdlException {
Range<PartitionKey> range;
try {
range = checkAndCreateRange(schema, desc, isTemp);
setRangeInternal(partitionId, isTemp, range);
} catch (IllegalArgumentException e) {
// Range.closedOpen may throw this if (lower > upper)
throw new DdlException("Invalid key range: " + e.getMessage());
}
idToDataProperty.put(partitionId, desc.getPartitionDataProperty());
idToReplicationNum.put(partitionId, desc.getReplicationNum());
idToInMemory.put(partitionId, desc.isInMemory());
idToStorageCacheInfo.put(partitionId, desc.getDataCacheInfo());
return range;
} | @Test(expected = DdlException.class)
public void testInt() throws DdlException, AnalysisException {
Column k1 = new Column("k1", new ScalarType(PrimitiveType.INT), true, null, "", "");
partitionColumns.add(k1);
singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p1",
new PartitionKeyDesc(Lists.newArrayList(new PartitionValue("-2147483648"))),
null));
partitionInfo = new RangePartitionInfo(partitionColumns);
for (SingleRangePartitionDesc singleRangePartitionDesc : singleRangePartitionDescs) {
singleRangePartitionDesc.analyze(1, null);
partitionInfo.handleNewSinglePartitionDesc(MetaUtils.buildIdToColumn(partitionColumns),
singleRangePartitionDesc, 20000L, false);
}
} |
@Override
public void destroy() {
path2Invoker.clear();
} | @Test
void testDestroy() {
Assertions.assertEquals(INVOKER, getInvokerByPath("/abc"));
{
PATH_RESOLVER.add("/bcd", INVOKER);
Assertions.assertEquals(INVOKER, getInvokerByPath("/bcd"));
}
PATH_RESOLVER.destroy();
Assertions.assertNull(getInvokerByPath("/abc"));
Assertions.assertNull(getInvokerByPath("/bcd"));
} |
@Description("decode the 64-bit big-endian binary in IEEE 754 double-precision floating-point format")
@ScalarFunction("from_ieee754_64")
@SqlType(StandardTypes.DOUBLE)
public static double fromIEEE754Binary64(@SqlType(StandardTypes.VARBINARY) Slice slice)
{
checkCondition(slice.length() == Double.BYTES, INVALID_FUNCTION_ARGUMENT, "Input floating-point value must be exactly 8 bytes long");
return Double.longBitsToDouble(Long.reverseBytes(slice.getLong(0)));
} | @Test
public void testFromIEEE754Binary64()
{
assertFunction("from_ieee754_64(from_hex('0000000000000000'))", DOUBLE, 0.0);
assertFunction("from_ieee754_64(from_hex('3FF0000000000000'))", DOUBLE, 1.0);
assertFunction("from_ieee754_64(to_ieee754_64(3.1415926))", DOUBLE, 3.1415926);
assertFunction("from_ieee754_64(to_ieee754_64(NAN()))", DOUBLE, Double.NaN);
assertFunction("from_ieee754_64(to_ieee754_64(INFINITY()))", DOUBLE, Double.POSITIVE_INFINITY);
assertFunction("from_ieee754_64(to_ieee754_64(-INFINITY()))", DOUBLE, Double.NEGATIVE_INFINITY);
assertFunction("from_ieee754_64(to_ieee754_64(1.7976931348623157E308))", DOUBLE, 1.7976931348623157E308);
assertFunction("from_ieee754_64(to_ieee754_64(-1.7976931348623157E308))", DOUBLE, -1.7976931348623157E308);
assertFunction("from_ieee754_64(to_ieee754_64(4.9E-324))", DOUBLE, 4.9E-324);
assertFunction("from_ieee754_64(to_ieee754_64(-4.9E-324))", DOUBLE, -4.9E-324);
assertInvalidFunction("from_ieee754_64(from_hex('00000000'))", "Input floating-point value must be exactly 8 bytes long");
} |
@Override
public SelJodaDateTimeZone assignOps(SelOp op, SelType rhs) {
if (op == SelOp.ASSIGN) {
SelTypeUtil.checkTypeMatch(this.type(), rhs.type());
this.val = ((SelJodaDateTimeZone) rhs).val;
return this;
}
throw new UnsupportedOperationException(type() + " DO NOT support assignment operation " + op);
} | @Test
public void testAssignOps() {
one.assignOps(SelOp.ASSIGN, another);
assertEquals("DATETIME_ZONE: America/Los_Angeles", one.type() + ": " + one);
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof AlluxioURI)) {
return false;
}
AlluxioURI that = (AlluxioURI) o;
return mUri.equals(that.mUri);
} | @Test
public void queryEquals() {
Map<String, String> queryMap = new HashMap<>();
queryMap.put("a", "b");
queryMap.put("c", "d");
assertTrue(new AlluxioURI("scheme://host:123/a.txt?a=b&c=d")
.equals(new AlluxioURI("scheme://host:123/a.txt?a=b&c=d")));
// There is no guarantee which order the queryMap will create the query string.
assertTrue(new AlluxioURI("scheme://host:123/a.txt?c=d&a=b")
.equals(new AlluxioURI("scheme", Authority.fromString("host:123"), "/a.txt", queryMap))
|| new AlluxioURI("scheme://host:123/a.txt?a=b&c=d")
.equals(new AlluxioURI("scheme", Authority.fromString("host:123"), "/a.txt", queryMap)));
assertFalse(new AlluxioURI("scheme://host:123/a.txt?a=b&c=d&e=f")
.equals(new AlluxioURI("scheme://host:123/a.txt?a=b&c=d")));
assertFalse(new AlluxioURI("scheme://host:123/a.txt?a=b&c=d&e=f")
.equals(new AlluxioURI("scheme", Authority.fromString("host:123"), "/a.txt", queryMap)));
} |
public static ArchivedExecutionGraph createFrom(ExecutionGraph executionGraph) {
return createFrom(executionGraph, null);
} | @Test
void testSerialization() throws IOException, ClassNotFoundException {
ArchivedExecutionGraph archivedGraph = ArchivedExecutionGraph.createFrom(runtimeGraph);
verifySerializability(archivedGraph);
} |
@Deprecated
public static Type resolveLastTypeParameter(Type genericContext, Class<?> supertype)
throws IllegalStateException {
return Types.resolveLastTypeParameter(genericContext, supertype);
} | @Test
void unboundWildcardIsObject() throws Exception {
Type context =
LastTypeParameter.class.getDeclaredField("PARAMETERIZED_DECODER_UNBOUND").getGenericType();
Type last = resolveLastTypeParameter(context, ParameterizedDecoder.class);
assertThat(last).isEqualTo(Object.class);
} |
@Override
public boolean alterOffsets(Map<String, String> config, Map<Map<String, ?>, Map<String, ?>> offsets) {
for (Map.Entry<Map<String, ?>, Map<String, ?>> offsetEntry : offsets.entrySet()) {
Map<String, ?> sourceOffset = offsetEntry.getValue();
if (sourceOffset == null) {
// We allow tombstones for anything; if there's garbage in the offsets for the connector, we don't
// want to prevent users from being able to clean it up using the REST API
continue;
}
Map<String, ?> sourcePartition = offsetEntry.getKey();
if (sourcePartition == null) {
throw new ConnectException("Source partitions may not be null");
}
MirrorUtils.validateSourcePartitionString(sourcePartition, SOURCE_CLUSTER_ALIAS_KEY);
MirrorUtils.validateSourcePartitionString(sourcePartition, TARGET_CLUSTER_ALIAS_KEY);
MirrorUtils.validateSourceOffset(sourcePartition, sourceOffset, true);
}
// We don't actually use these offsets in the task class, so no additional effort is required beyond just validating
// the format of the user-supplied offsets
return true;
} | @Test
public void testAlterOffsetsMultiplePartitions() {
MirrorHeartbeatConnector connector = new MirrorHeartbeatConnector();
Map<String, ?> partition1 = sourcePartition("primary", "backup");
Map<String, ?> partition2 = sourcePartition("backup", "primary");
Map<Map<String, ?>, Map<String, ?>> offsets = new HashMap<>();
offsets.put(partition1, SOURCE_OFFSET);
offsets.put(partition2, SOURCE_OFFSET);
assertTrue(connector.alterOffsets(null, offsets));
} |
@Override
public void deregisterInstance(String serviceName, String ip, int port) throws NacosException {
deregisterInstance(serviceName, ip, port, Constants.DEFAULT_CLUSTER_NAME);
} | @Test
void testDeregisterInstance5() throws NacosException {
//given
String serviceName = "service1";
Instance instance = new Instance();
//when
client.deregisterInstance(serviceName, instance);
//then
verify(proxy, times(1)).deregisterService(serviceName, Constants.DEFAULT_GROUP, instance);
} |
@Operation(summary = "queryClusterByCode", description = "QUERY_CLUSTER_BY_CODE_NOTES")
@Parameters({
@Parameter(name = "clusterCode", description = "CLUSTER_CODE", required = true, schema = @Schema(implementation = long.class, example = "100"))
})
@GetMapping(value = "/query-by-code")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_CLUSTER_BY_CODE_ERROR)
public Result<ClusterDto> queryClusterByCode(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("clusterCode") Long clusterCode) {
ClusterDto clusterDto = clusterService.queryClusterByCode(clusterCode);
return Result.success(clusterDto);
} | @Test
public void testQueryClusterByCode() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("clusterCode", clusterCode);
MvcResult mvcResult = mockMvc.perform(get("/cluster/query-by-code")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
logger.info(result.toString());
Assertions.assertTrue(result != null && result.isSuccess());
logger.info(mvcResult.getResponse().getContentAsString());
logger.info("query cluster by id :{}, return result:{}", clusterCode,
mvcResult.getResponse().getContentAsString());
} |
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
} | @Test
public void underlineStrikethroughMessageEntity() {
String cap = "<u>under1</u> <ins>under2</ins> <s>strike1</s> <strike>strike2</strike> <del>strike3</del>";
cap += " <u><del>nested-tag</del></u>";
ParseMode parseMode = ParseMode.HTML;
SendAudio sendAudio = new SendAudio(chatId, audioFileId).caption(cap).parseMode(parseMode);
Message message = bot.execute(sendAudio).message();
MessageTest.checkMessage(message);
String htmlCaption = cap
.replace("<u>", "").replace("</u>", "")
.replace("<ins>", "").replace("</ins>", "")
.replace("<s>", "").replace("</s>", "")
.replace("<strike>", "").replace("</strike>", "")
.replace("<del>", "").replace("</del>", "");
assertEquals(htmlCaption, message.caption());
assertEquals(7, message.captionEntities().length);
MessageEntity captionEntity = message.captionEntities()[0];
assertEquals(MessageEntity.Type.underline, captionEntity.type());
assertEquals((Integer) 0, captionEntity.offset());
assertEquals((Integer) 6, captionEntity.length());
captionEntity = message.captionEntities()[1];
assertEquals(MessageEntity.Type.underline, captionEntity.type());
assertEquals((Integer) 7, captionEntity.offset());
assertEquals((Integer) 6, captionEntity.length());
captionEntity = message.captionEntities()[2];
assertEquals(MessageEntity.Type.strikethrough, captionEntity.type());
assertEquals((Integer) 14, captionEntity.offset());
assertEquals((Integer) 7, captionEntity.length());
captionEntity = message.captionEntities()[3];
assertEquals(MessageEntity.Type.strikethrough, captionEntity.type());
assertEquals((Integer) 22, captionEntity.offset());
assertEquals((Integer) 7, captionEntity.length());
captionEntity = message.captionEntities()[4];
assertEquals(MessageEntity.Type.strikethrough, captionEntity.type());
assertEquals((Integer) 30, captionEntity.offset());
assertEquals((Integer) 7, captionEntity.length());
captionEntity = message.captionEntities()[5];
assertEquals(MessageEntity.Type.underline, captionEntity.type());
assertEquals((Integer) 38, captionEntity.offset());
assertEquals((Integer) 10, captionEntity.length());
captionEntity = message.captionEntities()[6];
assertEquals(MessageEntity.Type.strikethrough, captionEntity.type());
assertEquals((Integer) 38, captionEntity.offset());
assertEquals((Integer) 10, captionEntity.length());
} |
public String anonymize(final ParseTree tree) {
return build(tree);
} | @Test
public void shouldAnonymizeCreateTypeCorrectly() {
// simple statement
Assert.assertEquals("CREATE TYPE type AS INTEGER;",
anon.anonymize("CREATE TYPE ADDRESS AS INTEGER;"));
// more elaborate statement
final String output = anon.anonymize(
"CREATE TYPE ADDRESS AS STRUCT<number INTEGER, street VARCHAR, city VARCHAR>;");
Approvals.verify(output);
} |
public ConcurrentHashMap<String, ConcurrentHashMap<Channel, ClientChannelInfo>> getGroupChannelTable() {
return groupChannelTable;
} | @Test
public void testGetGroupChannelTable() throws Exception {
producerManager.registerProducer(group, clientInfo);
Map<Channel, ClientChannelInfo> oldMap = producerManager.getGroupChannelTable().get(group);
producerManager.unregisterProducer(group, clientInfo);
assertThat(oldMap.size()).isEqualTo(0);
} |
@Override
public void process(Exchange exchange) throws Exception {
Object payload = exchange.getMessage().getBody();
if (payload == null) {
return;
}
JsonNode answer = computeIfAbsent(exchange);
if (answer != null) {
exchange.setProperty(SchemaHelper.CONTENT_SCHEMA, answer);
exchange.setProperty(SchemaHelper.CONTENT_SCHEMA_TYPE, SchemaType.JSON.type());
exchange.setProperty(SchemaHelper.CONTENT_CLASS, SchemaHelper.resolveContentClass(exchange, this.contentClass));
}
} | @Test
void shouldReadSchemaFromSchema() throws Exception {
Exchange exchange = new DefaultExchange(camelContext);
exchange.setProperty(SchemaHelper.CONTENT_CLASS, Person.class.getName());
String schemaString = new String(this.getClass().getResourceAsStream("person.schema.json").readAllBytes());
exchange.setProperty(SchemaHelper.SCHEMA, schemaString);
exchange.getMessage().setBody(person);
JsonSchemaResolver schemaResolver = new JsonSchemaResolver();
schemaResolver.process(exchange);
Assertions.assertNotNull(exchange.getProperty(SchemaHelper.CONTENT_SCHEMA));
Assertions.assertEquals(ObjectNode.class, exchange.getProperty(SchemaHelper.CONTENT_SCHEMA).getClass());
Assertions.assertEquals(SchemaType.JSON.type(), exchange.getProperty(SchemaHelper.CONTENT_SCHEMA_TYPE));
Assertions.assertEquals(Person.class.getName(), exchange.getProperty(SchemaHelper.CONTENT_CLASS));
} |
@Override
Class<?> getReturnType() {
throw new IllegalArgumentException("Non applicable for PortableGetter");
} | @Test(expected = IllegalArgumentException.class)
public void getReturnType() {
new PortableGetter(null).getReturnType();
} |
@Override
public Response onApply(WriteRequest request) {
final Lock lock = readLock;
lock.lock();
try {
final InstanceStoreRequest instanceRequest = serializer.deserialize(request.getData().toByteArray());
final DataOperation operation = DataOperation.valueOf(request.getOperation());
switch (operation) {
case ADD:
onInstanceRegister(instanceRequest.service, instanceRequest.instance,
instanceRequest.getClientId());
break;
case DELETE:
onInstanceDeregister(instanceRequest.service, instanceRequest.getClientId());
break;
case CHANGE:
if (instanceAndServiceExist(instanceRequest)) {
onInstanceRegister(instanceRequest.service, instanceRequest.instance,
instanceRequest.getClientId());
}
break;
default:
return Response.newBuilder().setSuccess(false).setErrMsg("unsupport operation : " + operation)
.build();
}
return Response.newBuilder().setSuccess(true).build();
} catch (Exception e) {
Loggers.RAFT.warn("Persistent client operation failed. ", e);
return Response.newBuilder().setSuccess(false)
.setErrMsg("Persistent client operation failed. " + e.getMessage()).build();
} finally {
lock.unlock();
}
} | @Test
void testOnApply() {
PersistentClientOperationServiceImpl.InstanceStoreRequest request = new PersistentClientOperationServiceImpl.InstanceStoreRequest();
Service service1 = Service.newService("A", "B", "C");
request.setService(service1);
request.setClientId("xxxx");
request.setInstance(new Instance());
Mockito.when(serializer.deserialize(Mockito.any())).thenReturn(request);
Mockito.when(clientManager.contains(Mockito.anyString())).thenReturn(true);
IpPortBasedClient ipPortBasedClient = Mockito.mock(IpPortBasedClient.class);
Mockito.when(clientManager.getClient(Mockito.anyString())).thenReturn(ipPortBasedClient);
WriteRequest writeRequest = WriteRequest.newBuilder().setOperation(DataOperation.ADD.name()).build();
Response response = persistentClientOperationServiceImpl.onApply(writeRequest);
assertTrue(response.getSuccess());
assertTrue(ServiceManager.getInstance().containSingleton(service1));
writeRequest = WriteRequest.newBuilder().setOperation(DataOperation.DELETE.name()).build();
response = persistentClientOperationServiceImpl.onApply(writeRequest);
assertTrue(response.getSuccess());
ServiceManager.getInstance().removeSingleton(service1);
writeRequest = WriteRequest.newBuilder().setOperation(DataOperation.VERIFY.name()).build();
response = persistentClientOperationServiceImpl.onApply(writeRequest);
assertFalse(response.getSuccess());
writeRequest = WriteRequest.newBuilder().setOperation(DataOperation.CHANGE.name()).build();
response = persistentClientOperationServiceImpl.onApply(writeRequest);
assertTrue(response.getSuccess());
assertFalse(ServiceManager.getInstance().containSingleton(service1));
} |
public RemotingChannel removeProducerChannel(ProxyContext ctx, String group, Channel channel) {
return removeChannel(buildProducerKey(group), channel);
} | @Test
public void testRemoveProducerChannel() {
String group = "group";
String clientId = RandomStringUtils.randomAlphabetic(10);
{
Channel producerChannel = createMockChannel();
RemotingChannel producerRemotingChannel = this.remotingChannelManager.createProducerChannel(ctx, producerChannel, group, clientId);
assertSame(producerRemotingChannel, this.remotingChannelManager.removeProducerChannel(ctx, group, producerRemotingChannel));
assertTrue(this.remotingChannelManager.groupChannelMap.isEmpty());
}
{
Channel producerChannel = createMockChannel();
RemotingChannel producerRemotingChannel = this.remotingChannelManager.createProducerChannel(ctx, producerChannel, group, clientId);
assertSame(producerRemotingChannel, this.remotingChannelManager.removeProducerChannel(ctx, group, producerChannel));
assertTrue(this.remotingChannelManager.groupChannelMap.isEmpty());
}
} |
public void startAsync() {
try {
udfLoader.load();
ProcessingLogServerUtils.maybeCreateProcessingLogTopic(
serviceContext.getTopicClient(),
processingLogConfig,
ksqlConfig);
if (processingLogConfig.getBoolean(ProcessingLogConfig.STREAM_AUTO_CREATE)) {
log.warn("processing log auto-create is enabled, but this is not supported "
+ "for headless mode.");
}
rocksDBConfigSetterHandler.accept(ksqlConfig);
processesQueryFile(readQueriesFile(queriesFile));
showWelcomeMessage();
final Properties properties = new Properties();
ksqlConfig.originals().forEach((key, value) -> {
if (nonNull(value)) {
properties.put(key, value.toString());
}
});
versionChecker.start(KsqlModuleType.SERVER, properties);
} catch (final Exception e) {
log.error("Failed to start KSQL Server with query file: " + queriesFile, e);
throw e;
}
} | @Test(expected = RuntimeException.class)
public void shouldThrowIfParseThrows() {
// Given:
when(ksqlEngine.parse(any())).thenThrow(new RuntimeException("Boom!"));
// When:
standaloneExecutor.startAsync();
} |
@Override
public void put(ExecutionGraphInfo serializableExecutionGraphInfo) throws IOException {
serializableExecutionGraphInfos.put(
serializableExecutionGraphInfo.getJobId(), serializableExecutionGraphInfo);
} | @Test
public void testPut() throws IOException {
assertPutJobGraphWithStatus(JobStatus.FINISHED);
} |
@Override
public boolean tryClaim(StreamProgress streamProgress) {
if (shouldStop) {
return false;
}
// We perform copy instead of assignment because we want to ensure all references of
// streamProgress gets updated.
this.streamProgress = streamProgress;
return true;
} | @Test
public void testTryClaim() {
final StreamProgress streamProgress = new StreamProgress();
final ReadChangeStreamPartitionProgressTracker tracker =
new ReadChangeStreamPartitionProgressTracker(streamProgress);
assertEquals(streamProgress, tracker.currentRestriction());
ChangeStreamContinuationToken changeStreamContinuationToken =
ChangeStreamContinuationToken.create(Range.ByteStringRange.create("a", "b"), "1234");
final StreamProgress streamProgress2 =
new StreamProgress(
changeStreamContinuationToken, Instant.now(), BigDecimal.ONE, Instant.now(), false);
assertTrue(tracker.tryClaim(streamProgress2));
assertEquals(streamProgress2, tracker.currentRestriction());
assertEquals(
streamProgress2.getEstimatedLowWatermark(),
tracker.currentRestriction().getEstimatedLowWatermark());
assertNull(tracker.trySplit(0.5));
assertEquals(streamProgress2, tracker.currentRestriction());
assertEquals(
streamProgress2.getEstimatedLowWatermark(),
tracker.currentRestriction().getEstimatedLowWatermark());
try {
tracker.checkDone();
fail("Should not reach here because checkDone should have thrown an exception");
} catch (IllegalStateException e) {
assertTrue("There's more work to be done. CheckDone threw an exception", true);
}
final SplitResult<StreamProgress> splitResult = SplitResult.of(null, streamProgress2);
assertEquals(splitResult, tracker.trySplit(0));
assertFalse(tracker.tryClaim(streamProgress2));
// No exception thrown, it is done.
tracker.checkDone();
} |
public static double lchoose(int n, int k) {
if (k < 0 || k > n) {
throw new IllegalArgumentException(String.format("Invalid n = %d, k = %d", n, k));
}
return lfactorial(n) - lfactorial(k) - lfactorial(n - k);
} | @Test
public void testLogChoose() {
System.out.println("logChoose");
assertEquals(0.0, MathEx.lchoose(10, 0), 1E-6);
assertEquals(2.302585, MathEx.lchoose(10, 1), 1E-6);
assertEquals(3.806662, MathEx.lchoose(10, 2), 1E-6);
assertEquals(4.787492, MathEx.lchoose(10, 3), 1E-6);
assertEquals(5.347108, MathEx.lchoose(10, 4), 1E-6);
} |
public static boolean webSocketHostPathMatches(String hostPath, String targetPath) {
boolean exactPathMatch = true;
if (ObjectHelper.isEmpty(hostPath) || ObjectHelper.isEmpty(targetPath)) {
// This scenario should not really be possible as the input args come from the vertx-websocket consumer / producer URI
return false;
}
// Paths ending with '*' are Vert.x wildcard routes so match on the path prefix
if (hostPath.endsWith("*")) {
exactPathMatch = false;
hostPath = hostPath.substring(0, hostPath.lastIndexOf('*'));
}
String normalizedHostPath = HttpUtils.normalizePath(hostPath + "/");
String normalizedTargetPath = HttpUtils.normalizePath(targetPath + "/");
String[] hostPathElements = normalizedHostPath.split("/");
String[] targetPathElements = normalizedTargetPath.split("/");
if (exactPathMatch && hostPathElements.length != targetPathElements.length) {
return false;
}
if (exactPathMatch) {
return normalizedHostPath.equals(normalizedTargetPath);
} else {
return normalizedTargetPath.startsWith(normalizedHostPath);
}
} | @Test
void webSocketHostWildcardPathNotMatches() {
String hostPath = "/foo/bar/cheese/wine*";
String targetPath = "/foo/bar/cheese/win";
assertFalse(VertxWebsocketHelper.webSocketHostPathMatches(hostPath, targetPath));
} |
protected boolean shouldAnalyze() {
if (analyzer instanceof FileTypeAnalyzer) {
final FileTypeAnalyzer fileTypeAnalyzer = (FileTypeAnalyzer) analyzer;
return fileTypeAnalyzer.accept(dependency.getActualFile());
}
return true;
} | @Test
public void shouldAnalyzeReturnsFalseIfTheFileTypeAnalyzerDoesNotAcceptTheDependency() {
final File dependencyFile = new File("");
new Expectations() {{
dependency.getActualFile();
result = dependencyFile;
fileTypeAnalyzer.accept(dependencyFile);
result = false;
}};
AnalysisTask analysisTask = new AnalysisTask(fileTypeAnalyzer, dependency, null, null);
boolean shouldAnalyze = analysisTask.shouldAnalyze();
assertFalse(shouldAnalyze);
} |
public static <K, V> VersionedKeyQuery<K, V> withKey(final K key) {
Objects.requireNonNull(key, "key cannot be null.");
return new VersionedKeyQuery<>(key, Optional.empty());
} | @Test
public void shouldThrowNPEWithNullKey() {
final Exception exception = assertThrows(NullPointerException.class, () -> VersionedKeyQuery.withKey(null));
assertEquals("key cannot be null.", exception.getMessage());
} |
public static Map<String, String> toStringMap(String... pairs) {
Map<String, String> parameters = new HashMap<>();
if (ArrayUtils.isEmpty(pairs)) {
return parameters;
}
if (pairs.length > 0) {
if (pairs.length % 2 != 0) {
throw new IllegalArgumentException("pairs must be even.");
}
for (int i = 0; i < pairs.length; i = i + 2) {
parameters.put(pairs[i], pairs[i + 1]);
}
}
return parameters;
} | @Test
void testStringMap2() {
Assertions.assertThrows(IllegalArgumentException.class, () -> toStringMap("key", "value", "odd"));
} |
public boolean isFound() {
return found;
} | @Test
public void testCalcInstructionsForTurn() {
// The street turns left, but there is not turn
Weighting weighting = new SpeedWeighting(mixedCarSpeedEnc);
Path p = new Dijkstra(roundaboutGraph.g, weighting, TraversalMode.NODE_BASED)
.calcPath(11, 13);
assertTrue(p.isFound());
InstructionList wayList = InstructionsFromEdges.calcInstructions(p, p.graph, weighting, mixedEncodingManager, tr);
// Contain start, turn, and finish instruction
assertEquals(3, wayList.size());
// Assert turn right
assertEquals(2, wayList.get(1).getSign());
} |
public MethodBuilder name(String name) {
this.name = name;
return getThis();
} | @Test
void name() {
MethodBuilder builder = MethodBuilder.newBuilder();
builder.name("name");
Assertions.assertEquals("name", builder.build().getName());
} |
@Override
public Response call(Request req) {
if (!logger.isDebugEnabled()) {
return delegate.call(req);
}
logger
.atDebug()
.addKeyValue("url", () -> req.uri().toString())
.addKeyValue(
"headers",
() ->
req.headers().stream()
.map(h -> h.name() + ": " + h.value())
.collect(Collectors.joining("\n")))
.addKeyValue("method", req::method)
.addKeyValue(
"body", () -> req.body() != null ? new String(req.body(), StandardCharsets.UTF_8) : "")
.log("request: %s %s".formatted(req.method(), req.uri()));
var res = delegate.call(req);
logger
.atDebug()
.addKeyValue("url", () -> req.uri().toString())
.addKeyValue("status", () -> Integer.toString(res.status()))
.addKeyValue(
"headers",
() ->
res.headers().stream()
.map(h -> h.name() + ": " + h.value())
.collect(Collectors.joining("\n")))
.addKeyValue("method", req::method)
.addKeyValue(
"body", () -> res.body() != null ? new String(res.body(), StandardCharsets.UTF_8) : "")
.log("response: %s %s %d".formatted(req.method(), req.uri(), res.status()));
return res;
} | @Test
void infoLevel() {
var res = new HttpClient.Response(200, List.of(), null);
var delegate = mock(HttpClient.class);
when(delegate.call(any())).thenReturn(res);
var sut = new LoggingHttpClient(delegate);
logger.setLevel(Level.INFO);
// when
sut.call(
new HttpClient.Request(URI.create("http://localhost:1234/test"), "GET", List.of(), null));
// then
assertTrue(logs.list.isEmpty());
} |
@Override
public Optional<Track<T>> clean(Track<T> track) {
TreeSet<Point<T>> points = new TreeSet<>(track.points());
Iterator<Point<T>> iter = points.iterator();
Long tau = null;
while (iter.hasNext()) {
Point point = iter.next();
//the 1st time through this loop set tau and ensure the 1st point isn't removed
if (tau == null) {
tau = point.time().toEpochMilli();
continue;
}
long t = point.time().toEpochMilli();
if ((t - tau) < minAllowableTimeDelta) {
iter.remove();
} else {
tau = t;
}
}
return Optional.of(Track.of(points));
} | @Test
public void testDownSampling() {
Duration maxTimeDelta = Duration.ofSeconds(5);
TimeDownSampler<String> smoother = new TimeDownSampler<>(maxTimeDelta);
Track<String> cleanedTrack = smoother.clean(testTrack()).get();
Point last = null;
for (Point point : cleanedTrack.points()) {
if (last != null) {
Duration timeDelta = durationBtw(last.time(), point.time());
assertTrue(timeDelta.toMillis() <= maxTimeDelta.toMillis());
}
}
assertEquals(2, cleanedTrack.size());
assertEquals(EPOCH, cleanedTrack.points().first().time());
assertEquals(EPOCH.plusSeconds(5), cleanedTrack.points().last().time());
} |
@Override
public Collection<SlotOffer> offerSlots(
TaskManagerLocation taskManagerLocation,
TaskManagerGateway taskManagerGateway,
Collection<SlotOffer> offers) {
assertHasBeenStarted();
if (!isTaskManagerRegistered(taskManagerLocation.getResourceID())) {
log.debug(
"Ignoring offered slots from unknown task manager {}.",
taskManagerLocation.getResourceID());
return Collections.emptyList();
}
return declarativeSlotPool.offerSlots(
offers, taskManagerLocation, taskManagerGateway, clock.relativeTimeMillis());
} | @Test
void testSlotOfferingOfUnknownTaskManagerIsIgnored() throws Exception {
try (DeclarativeSlotPoolService declarativeSlotPoolService =
createDeclarativeSlotPoolService()) {
final Collection<SlotOffer> slotOffers =
Collections.singletonList(
new SlotOffer(new AllocationID(), 0, ResourceProfile.UNKNOWN));
final LocalTaskManagerLocation taskManagerLocation = new LocalTaskManagerLocation();
final Collection<SlotOffer> acceptedSlots =
declarativeSlotPoolService.offerSlots(
taskManagerLocation,
new RpcTaskManagerGateway(
new TestingTaskExecutorGatewayBuilder()
.createTestingTaskExecutorGateway(),
jobMasterId),
slotOffers);
assertThat(acceptedSlots).isEmpty();
}
} |
public List<R> scanForResourcesUri(URI classpathResourceUri) {
requireNonNull(classpathResourceUri, "classpathResourceUri must not be null");
if (CLASSPATH_SCHEME.equals(classpathResourceUri.getScheme())) {
return scanForClasspathResource(resourceName(classpathResourceUri), NULL_FILTER);
}
return findResourcesForUri(classpathResourceUri, DEFAULT_PACKAGE_NAME, NULL_FILTER, createUriResource());
} | @Test
void scanForResourcesJarUri() {
URI jarFileUri = new File("src/test/resources/io/cucumber/core/resource/test/jar-resource.jar").toURI();
URI resourceUri = URI
.create("jar:file://" + jarFileUri.getSchemeSpecificPart() + "!/com/example/package-jar-resource.txt");
List<URI> resources = resourceScanner.scanForResourcesUri(resourceUri);
assertThat(resources, contains(resourceUri));
} |
@Override
public List<String> listTableNames(String dbName) {
try (Connection connection = getConnection()) {
try (ResultSet resultSet = schemaResolver.getTables(connection, dbName)) {
ImmutableList.Builder<String> list = ImmutableList.builder();
while (resultSet.next()) {
String tableName = resultSet.getString("TABLE_NAME");
list.add(tableName);
}
return list.build();
}
} catch (SQLException e) {
throw new StarRocksConnectorException("list table names for JDBC catalog fail!", e);
}
} | @Test
public void testListTableNames() {
try {
JDBCMetadata jdbcMetadata = new JDBCMetadata(properties, "catalog", dataSource);
List<String> result = jdbcMetadata.listTableNames("test");
List<String> expectResult = Lists.newArrayList("tbl1", "tbl2", "tbl3");
Assert.assertEquals(expectResult, result);
} catch (Exception e) {
Assert.fail();
}
} |
@Override
public ObjectNode encode(Criterion criterion, CodecContext context) {
EncodeCriterionCodecHelper encoder = new EncodeCriterionCodecHelper(criterion, context);
return encoder.encode();
} | @Test
public void matchOduSignalTypeTest() {
OduSignalType signalType = OduSignalType.ODU2;
Criterion criterion = Criteria.matchOduSignalType(signalType);
ObjectNode result = criterionCodec.encode(criterion, context);
assertThat(result, matchesCriterion(criterion));
} |
@Override
public Point<NopHit> next() {
Point<NopHit> returnMe = nextPoint;
this.nextPoint = getNext();
return returnMe;
} | @Test
public void testNext() throws Exception {
File testFile = buildTestFile("testNopFileB.txt");
PointIterator iter = new PointIterator(new NopParser(testFile));
int numPoints = 0;
while (iter.hasNext()) {
Point next = iter.next();
if (numPoints == 0) {
assertEquals(new LatLong(032.35068,-084.54589), next.latLong());
}
if (numPoints == 1) {
assertEquals(LatLong.of(032.72556, -082.78178), next.latLong());
}
numPoints++;
}
assertEquals(2, numPoints);
} |
public String table(TableIdentifier ident) {
return SLASH.join(
"v1",
prefix,
"namespaces",
RESTUtil.encodeNamespace(ident.namespace()),
"tables",
RESTUtil.encodeString(ident.name()));
} | @Test
public void testTableWithSlash() {
TableIdentifier ident = TableIdentifier.of("n/s", "tab/le");
assertThat(withPrefix.table(ident)).isEqualTo("v1/ws/catalog/namespaces/n%2Fs/tables/tab%2Fle");
assertThat(withoutPrefix.table(ident)).isEqualTo("v1/namespaces/n%2Fs/tables/tab%2Fle");
} |
public static Criterion matchSctpDst(TpPort sctpPort) {
return new SctpPortCriterion(sctpPort, Type.SCTP_DST);
} | @Test
public void testMatchSctpDstMethod() {
Criterion matchSctpDst = Criteria.matchSctpDst(tpPort1);
SctpPortCriterion sctpPortCriterion =
checkAndConvert(matchSctpDst,
Criterion.Type.SCTP_DST,
SctpPortCriterion.class);
assertThat(sctpPortCriterion.sctpPort(), is(equalTo(tpPort1)));
} |
@Override
public ManageSnapshots createBranch(String name) {
Snapshot currentSnapshot = transaction.currentMetadata().currentSnapshot();
if (currentSnapshot != null) {
return createBranch(name, currentSnapshot.snapshotId());
}
SnapshotRef existingRef = transaction.currentMetadata().ref(name);
Preconditions.checkArgument(existingRef == null, "Ref %s already exists", name);
// Create an empty snapshot for the branch
transaction.newFastAppend().toBranch(name).commit();
return this;
} | @TestTemplate
public void testCreateBranch() {
table.newAppend().appendFile(FILE_A).commit();
long snapshotId = table.currentSnapshot().snapshotId();
// Test a basic case of creating a branch
table.manageSnapshots().createBranch("branch1", snapshotId).commit();
SnapshotRef expectedBranch = table.ops().refresh().ref("branch1");
assertThat(expectedBranch).isNotNull().isEqualTo(SnapshotRef.branchBuilder(snapshotId).build());
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.