focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public String toBaseMessageIdString(Object messageId) {
if (messageId == null) {
return null;
} else if (messageId instanceof String) {
String stringId = (String) messageId;
// If the given string has a type encoding prefix,
// we need to escape it as an encoded string (even if
// the existing encoding prefix was also for string)
if (hasTypeEncodingPrefix(stringId)) {
return AMQP_STRING_PREFIX + stringId;
} else {
return stringId;
}
} else if (messageId instanceof UUID) {
return AMQP_UUID_PREFIX + messageId.toString();
} else if (messageId instanceof UnsignedLong) {
return AMQP_ULONG_PREFIX + messageId.toString();
} else if (messageId instanceof Binary) {
ByteBuffer dup = ((Binary) messageId).asByteBuffer();
byte[] bytes = new byte[dup.remaining()];
dup.get(bytes);
String hex = convertBinaryToHexString(bytes);
return AMQP_BINARY_PREFIX + hex;
} else {
throw new IllegalArgumentException("Unsupported type provided: " + messageId.getClass());
}
} | @Test
public void testToBaseMessageIdStringWithStringBeginningWithEncodingPrefixForUUID() {
String uuidStringMessageId = AMQPMessageIdHelper.AMQP_UUID_PREFIX + UUID.randomUUID();
String expected = AMQPMessageIdHelper.AMQP_STRING_PREFIX + uuidStringMessageId;
String baseMessageIdString = messageIdHelper.toBaseMessageIdString(uuidStringMessageId);
assertNotNull("null string should not have been returned", baseMessageIdString);
assertEquals("expected base id string was not returned", expected, baseMessageIdString);
} |
@Override
public boolean isSharable() {
return decoder.isSharable();
} | @Test
public void testIsSharable() {
testIsSharable(true);
} |
public static FuryBuilder builder() {
return new FuryBuilder();
} | @Test(dataProvider = "enableCodegen")
public void testSerializeJDKObject(boolean enableCodegen) {
Fury fury =
Fury.builder()
.withLanguage(Language.JAVA)
.withJdkClassSerializableCheck(false)
.requireClassRegistration(false)
.withCodegen(enableCodegen)
.build();
StringTokenizer tokenizer = new StringTokenizer("abc,1,23", ",");
assertEquals(serDe(fury, tokenizer).countTokens(), tokenizer.countTokens());
} |
@Override
public void write(final PostgreSQLPacketPayload payload, final Object value) {
throw new UnsupportedSQLOperationException("PostgreSQLInt8ArrayBinaryProtocolValue.write()");
} | @Test
void assertWrite() {
assertThrows(UnsupportedSQLOperationException.class, () -> newInstance().write(new PostgreSQLPacketPayload(null, StandardCharsets.UTF_8), "val"));
} |
@Override
public <T> Future<T> background(final BackgroundAction<T> action) {
if(registry.contains(action)) {
log.warn(String.format("Skip duplicate background action %s found in registry", action));
return ConcurrentUtils.constantFuture(null);
}
return DefaultBackgroundExecutor.get().execute(this, registry, action);
} | @Test
public void testBackground() throws Exception {
final AbstractController controller = new AbstractController() {
@Override
public void invoke(final MainAction runnable, final boolean wait) {
runnable.run();
}
};
final AbstractBackgroundAction<Object> action = new RegistryBackgroundAction<Object>(controller, SessionPool.DISCONNECTED) {
@Override
public void init() {
assertEquals("main", Thread.currentThread().getName());
}
@Override
public Object run(final Session<?> session) {
return null;
}
@Override
public void cleanup() {
super.cleanup();
assertFalse(controller.getRegistry().contains(this));
}
};
controller.background(action).get();
} |
@Override
public ByteBuf copy() {
return copy(readerIndex, readableBytes());
} | @Test
public void copyBoundaryCheck4() {
assertThrows(IndexOutOfBoundsException.class, new Executable() {
@Override
public void execute() {
buffer.copy(buffer.capacity(), 1);
}
});
} |
@Override
public void flush() throws IOException {
mLocalOutputStream.flush();
} | @Test
@PrepareForTest(OBSOutputStream.class)
public void testFlush() throws Exception {
PowerMockito.whenNew(BufferedOutputStream.class)
.withArguments(any(DigestOutputStream.class)).thenReturn(mLocalOutputStream);
OBSOutputStream stream = new OBSOutputStream("testBucketName", "testKey", mObsClient,
sConf.getList(PropertyKey.TMP_DIRS));
stream.flush();
stream.close();
Mockito.verify(mLocalOutputStream).flush();
} |
static void warmField(Class<?> context, Field field, ExecutorService compilationService) {
Class<?> fieldRawType = field.getType();
if (fieldRawType.isPrimitive()
|| fieldRawType == String.class
|| fieldRawType == Object.class) {
return;
}
if (TypeUtils.isBoxed(fieldRawType)) {
return;
}
if (fieldRawType == context) {
// avoid duplicate build.
return;
}
if (!fieldRawType.getName().startsWith("java")) {
compilationService.submit(
() -> {
// use a flag to avoid blocking thread.
AtomicBoolean flag = flags.computeIfAbsent(fieldRawType, k -> new AtomicBoolean(false));
if (flag.compareAndSet(false, true)) {
getAllDescriptorsMap(fieldRawType);
}
});
} else if (TypeUtils.isCollection(fieldRawType) || TypeUtils.isMap(fieldRawType)) {
// warm up generic type, sun.reflect.generics.repository.FieldRepository
// is expensive.
compilationService.submit(() -> warmGenericTask(TypeRef.of(field.getGenericType())));
} else if (fieldRawType.isArray()) {
Class<?> componentType = fieldRawType.getComponentType();
if (!componentType.isPrimitive()) {
compilationService.submit(() -> warmGenericTask(TypeRef.of(field.getGenericType())));
}
}
} | @Test
public void testWarmField() throws Exception {
Assert.assertEquals(int.class.getName(), "int");
Assert.assertEquals(Integer.class.getName(), "java.lang.Integer");
Descriptor.warmField(
BeanA.class, BeanA.class.getDeclaredField("beanB"), CodeGenerator.getCompilationService());
Descriptor.getAllDescriptorsMap(BeanA.class);
Descriptor.clearDescriptorCache();
Descriptor.getAllDescriptorsMap(BeanA.class);
} |
public static DeltaLakeTable convertDeltaToSRTable(String catalog, String dbName, String tblName, String path,
Engine deltaEngine, long createTime) {
SnapshotImpl snapshot;
try (Timer ignored = Tracers.watchScope(EXTERNAL, "DeltaLake.getSnapshot")) {
Table deltaTable = Table.forPath(deltaEngine, path);
snapshot = (SnapshotImpl) deltaTable.getLatestSnapshot(deltaEngine);
} catch (TableNotFoundException e) {
LOG.error("Failed to find Delta table for {}.{}.{}, {}", catalog, dbName, tblName, e.getMessage());
throw new SemanticException("Failed to find Delta table for " + catalog + "." + dbName + "." + tblName);
} catch (Exception e) {
LOG.error("Failed to get latest snapshot for {}.{}.{}, {}", catalog, dbName, tblName, e.getMessage());
throw new SemanticException("Failed to get latest snapshot for " + catalog + "." + dbName + "." + tblName);
}
StructType deltaSchema = snapshot.getSchema(deltaEngine);
if (deltaSchema == null) {
throw new IllegalArgumentException(String.format("Unable to find Schema information in Delta log for " +
"%s.%s.%s", catalog, dbName, tblName));
}
List<Column> fullSchema = Lists.newArrayList();
for (StructField field : deltaSchema.fields()) {
DataType dataType = field.getDataType();
Type type;
try {
type = ColumnTypeConverter.fromDeltaLakeType(dataType);
} catch (InternalError | Exception e) {
LOG.error("Failed to convert delta type {} on {}.{}.{}", dataType.toString(), catalog, dbName, tblName, e);
type = Type.UNKNOWN_TYPE;
}
Column column = new Column(field.getName(), type, true);
fullSchema.add(column);
}
return new DeltaLakeTable(CONNECTOR_ID_GENERATOR.getNextId().asInt(), catalog, dbName, tblName,
fullSchema, Lists.newArrayList(snapshot.getMetadata().getPartitionColNames()), snapshot, path,
deltaEngine, createTime);
} | @Test
public void testConvertDeltaToSRTableWithException2() {
expectedEx.expect(SemanticException.class);
expectedEx.expectMessage("Failed to get latest snapshot for catalog.db.tbl");
Table table = new Table() {
public Table forPath(Engine engine, String path) {
return this;
}
@Override
public String getPath(Engine engine) {
return null;
}
@Override
public SnapshotImpl getLatestSnapshot(Engine engine) {
throw new RuntimeException("Failed to get latest snapshot");
}
@Override
public Snapshot getSnapshotAsOfVersion(Engine engine, long versionId) throws TableNotFoundException {
return null;
}
@Override
public Snapshot getSnapshotAsOfTimestamp(Engine engine, long millisSinceEpochUTC)
throws TableNotFoundException {
return null;
}
@Override
public TransactionBuilder createTransactionBuilder(Engine engine, String engineInfo, Operation operation) {
return null;
}
@Override
public void checkpoint(Engine engine, long version)
throws TableNotFoundException, CheckpointAlreadyExistsException, IOException {
}
};
new MockUp<TableImpl>() {
@Mock
public Table forPath(Engine engine, String path) {
return table;
}
};
DeltaUtils.convertDeltaToSRTable("catalog", "db", "tbl", "path",
DeltaLakeEngine.create(new Configuration()), 0);
} |
public static <T> T toObj(byte[] json, Class<T> cls) {
try {
return mapper.readValue(json, cls);
} catch (Exception e) {
throw new NacosDeserializationException(cls, e);
}
} | @Test
void testToObject8() {
assertThrows(Exception.class, () -> {
JacksonUtils.toObj(new ByteArrayInputStream("{not_A}Json:String}".getBytes()), Object.class);
});
} |
public static void convertAttachment(
DefaultHttp2Headers headers, Map<String, Object> attachments, boolean needConvertHeaderKey) {
if (attachments == null) {
return;
}
Map<String, String> needConvertKey = new HashMap<>();
for (Map.Entry<String, Object> entry : attachments.entrySet()) {
String key = lruHeaderMap.get(entry.getKey());
if (key == null) {
final String lowerCaseKey = entry.getKey().toLowerCase(Locale.ROOT);
lruHeaderMap.put(entry.getKey(), lowerCaseKey);
key = lowerCaseKey;
}
if (TripleHeaderEnum.containsExcludeAttachments(key)) {
continue;
}
final Object v = entry.getValue();
if (v == null) {
continue;
}
if (needConvertHeaderKey && !key.equals(entry.getKey())) {
needConvertKey.put(key, entry.getKey());
}
convertSingleAttachment(headers, key, v);
}
if (!needConvertKey.isEmpty()) {
String needConvertJson = JsonUtils.toJson(needConvertKey);
headers.add(TripleHeaderEnum.TRI_HEADER_CONVERT.getHeader(), TriRpcStatus.encodeMessage(needConvertJson));
}
} | @Test
void testConvertAttachment() throws InterruptedException {
ExecutorService executorService = Executors.newFixedThreadPool(10);
DefaultHttp2Headers headers = new DefaultHttp2Headers();
headers.add("key", "value");
Map<String, Object> attachments = new HashMap<>();
attachments.put(TripleHeaderEnum.PATH_KEY.getHeader(), "value");
attachments.put("key1111", "value");
attachments.put("Upper", "Upper");
attachments.put("obj", new Object());
StreamUtils.convertAttachment(headers, attachments, false);
Assertions.assertNull(headers.get(TripleHeaderEnum.PATH_KEY.getHeader()));
Assertions.assertNull(headers.get("Upper"));
Assertions.assertNull(headers.get("obj"));
headers = new DefaultHttp2Headers();
headers.add("key", "value");
StreamUtils.convertAttachment(headers, attachments, true);
Assertions.assertNull(headers.get(TripleHeaderEnum.PATH_KEY.getHeader()));
Assertions.assertNull(headers.get("Upper"));
Assertions.assertNull(headers.get("obj"));
String jsonRaw =
headers.get(TripleHeaderEnum.TRI_HEADER_CONVERT.getHeader()).toString();
String json = TriRpcStatus.decodeMessage(jsonRaw);
System.out.println(jsonRaw + "---" + json);
Map<String, String> upperMap = JsonUtils.toJavaObject(json, Map.class);
Assertions.assertArrayEquals(
"Upper".getBytes(StandardCharsets.UTF_8), upperMap.get("upper").getBytes(StandardCharsets.UTF_8));
int count = 10000;
CountDownLatch latch = new CountDownLatch(count);
for (int i = 0; i < count; i++) {
String randomKey = "key" + i;
String randomValue = "value" + i;
Map<String, Object> attachments2 = new HashMap<>();
attachments2.put(TripleHeaderEnum.PATH_KEY.getHeader(), "value");
attachments2.put("key1111", "value");
attachments2.put("Upper", "Upper");
attachments2.put("obj", new Object());
attachments2.put(randomKey, randomValue);
executorService.execute(() -> {
DefaultHttp2Headers headers2 = new DefaultHttp2Headers();
headers2.add("key", "value");
StreamUtils.convertAttachment(headers2, attachments2, true);
if (headers2.get(TripleHeaderEnum.PATH_KEY.getHeader()) != null) {
return;
}
if (headers2.get("Upper") != null) {
return;
}
if (headers2.get("obj") != null) {
return;
}
if (!headers2.get(randomKey).toString().equals(randomValue)) {
return;
}
latch.countDown();
});
}
latch.await(10, TimeUnit.SECONDS);
Assertions.assertEquals(0, latch.getCount());
executorService.shutdown();
} |
@Override
public List<String> listPartitionNames(String databaseName, String tableName, TableVersionRange version) {
return deltaOps.getPartitionKeys(databaseName, tableName);
} | @Test
public void testListPartitionNames(@Mocked SnapshotImpl snapshot, @Mocked ScanBuilder scanBuilder,
@Mocked Scan scan) {
new MockUp<DeltaUtils>() {
@Mock
public DeltaLakeTable convertDeltaToSRTable(String catalog, String dbName, String tblName, String path,
Engine deltaEngine, long createTime) {
return new DeltaLakeTable(1, "delta0", "db1", "table1",
Lists.newArrayList(), Lists.newArrayList("ts"), snapshot,
"s3://bucket/path/to/table", null, 0);
}
};
// mock schema:
// struct<add:struct<path:string,partitionValues:map<string,string>>>
List<FilteredColumnarBatch> filteredColumnarBatches = Lists.newArrayList();
ColumnVector[] addFileCols = new ColumnVector[2];
addFileCols[0] = new DefaultBinaryVector(BasePrimitiveType.createPrimitive("string"),
3, new byte[][] {new byte[] {'0', '0', '0', '0'},
new byte[] {'0', '0', '0', '1'}, new byte[] {'0', '0', '0', '2'}});
int[] offsets = new int[] {0, 1, 2, 3};
DataType mapType = new MapType(StringType.STRING, StringType.STRING, true);
addFileCols[1] = new DefaultMapVector(3, mapType, Optional.empty(), offsets,
new DefaultBinaryVector(BasePrimitiveType.createPrimitive("string"),
3, new byte[][] {new byte[] {'t', 's'}, new byte[] {'t', 's'}, new byte[] {'t', 's'}}),
new DefaultBinaryVector(BasePrimitiveType.createPrimitive("string"),
3, new byte[][] {new byte[] {'1', '9', '9', '9'}, new byte[] {'2', '0', '0', '0'},
new byte[] {'2', '0', '0', '1'}})
);
// addFile schema, here we only care about the partitionValues, so not use all fields
StructType addFileSchema = new StructType(Lists.newArrayList(
new StructField("path", BasePrimitiveType.createPrimitive("string"), true, null),
new StructField("partitionValues", mapType, true, null)));
DefaultStructVector addFile = new DefaultStructVector(3, addFileSchema, Optional.empty(), addFileCols);
// construct a columnar batch which only contains addFile
ColumnarBatch columnarBatch = new DefaultColumnarBatch(3,
new StructType(Lists.newArrayList(new StructField("add", addFileSchema, true))),
new DefaultStructVector[] {addFile});
FilteredColumnarBatch filteredColumnarBatch = new FilteredColumnarBatch(columnarBatch, Optional.empty());
filteredColumnarBatches.add(filteredColumnarBatch);
CloseableIterator<FilteredColumnarBatch> scanFilesAsBatches = new CloseableIterator<FilteredColumnarBatch>() {
private int index = 0;
@Override
public boolean hasNext() {
return index < filteredColumnarBatches.size();
}
@Override
public FilteredColumnarBatch next() {
return filteredColumnarBatches.get(index++);
}
@Override
public void close() {
}
};
new Expectations() {
{
snapshot.getScanBuilder((Engine) any);
result = scanBuilder;
minTimes = 0;
scanBuilder.build();
result = scan;
minTimes = 0;
scan.getScanFiles((Engine) any);
result = scanFilesAsBatches;
minTimes = 0;
}
};
List<String> partitionNames = deltaLakeMetadata.listPartitionNames("db1", "table1",
TableVersionRange.empty());
Assert.assertEquals(3, partitionNames.size());
Assert.assertEquals("ts=1999", partitionNames.get(0));
Assert.assertEquals("ts=2000", partitionNames.get(1));
Assert.assertEquals("ts=2001", partitionNames.get(2));
} |
@SuppressWarnings("unused") // Part of required API.
public void execute(
final ConfiguredStatement<InsertValues> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final InsertValues insertValues = statement.getStatement();
final MetaStore metaStore = executionContext.getMetaStore();
final KsqlConfig config = statement.getSessionConfig().getConfig(true);
final DataSource dataSource = getDataSource(config, metaStore, insertValues);
validateInsert(insertValues.getColumns(), dataSource);
final ProducerRecord<byte[], byte[]> record =
buildRecord(statement, metaStore, dataSource, serviceContext);
try {
producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps());
} catch (final TopicAuthorizationException e) {
// TopicAuthorizationException does not give much detailed information about why it failed,
// except which topics are denied. Here we just add the ACL to make the error message
// consistent with other authorization error messages.
final Exception rootCause = new KsqlTopicAuthorizationException(
AclOperation.WRITE,
e.unauthorizedTopics()
);
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause);
} catch (final ClusterAuthorizationException e) {
// ClusterAuthorizationException is thrown when using idempotent producers
// and either a topic write permission or a cluster-level idempotent write
// permission (only applicable for broker versions no later than 2.8) is
// missing. In this case, we include additional context to help the user
// distinguish this type of failure from other permissions exceptions
// such as the ones thrown above when TopicAuthorizationException is caught.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} catch (final KafkaException e) {
if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) {
// The error message thrown when an idempotent producer is missing permissions
// is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException,
// as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException.
// ksqlDB handles these two the same way, accordingly.
// See https://issues.apache.org/jira/browse/KAFKA-14138 for more.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} else {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} catch (final Exception e) {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} | @Test
public void shouldThrowWhenInsertingValuesOnSourceStream() {
// Given:
givenDataSourceWithSchema("source_stream_1", SCHEMA,
SerdeFeatures.of(), SerdeFeatures.of(), false, true);
final KsqlConfig ksqlConfig = new KsqlConfig(ImmutableMap.of());
final ConfiguredStatement<InsertValues> statement = ConfiguredStatement.of(
PreparedStatement.of(
"",
new InsertValues(SourceName.of("TOPIC"),
allAndPseudoColumnNames(SCHEMA),
ImmutableList.of(
new LongLiteral(1L),
new StringLiteral("str"),
new StringLiteral("str"),
new LongLiteral(2L)
))),
SessionConfig.of(ksqlConfig, ImmutableMap.of())
);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> executor.execute(statement, mock(SessionProperties.class), engine, serviceContext)
);
// Then:
assertThat(e.getMessage(),
containsString("Cannot insert values into read-only stream: TOPIC"));
} |
public static DatabaseType get(final String url) {
Collection<DatabaseType> databaseTypes = ShardingSphereServiceLoader.getServiceInstances(DatabaseType.class).stream().filter(each -> matchURLs(url, each)).collect(Collectors.toList());
ShardingSpherePreconditions.checkNotEmpty(databaseTypes, () -> new UnsupportedStorageTypeException(url));
for (DatabaseType each : databaseTypes) {
if (!each.getTrunkDatabaseType().isPresent()) {
return each;
}
}
return databaseTypes.iterator().next();
} | @Test
void assertGetDatabaseTypeWithUnrecognizedURL() {
assertThrows(UnsupportedStorageTypeException.class, () -> DatabaseTypeFactory.get("jdbc:not-existed:test"));
} |
public CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> acknowledge(
String memberId,
String groupId,
Map<TopicIdPartition, List<ShareAcknowledgementBatch>> acknowledgeTopics
) {
log.trace("Acknowledge request for topicIdPartitions: {} with groupId: {}",
acknowledgeTopics.keySet(), groupId);
this.shareGroupMetrics.shareAcknowledgement();
Map<TopicIdPartition, CompletableFuture<Errors>> futures = new HashMap<>();
acknowledgeTopics.forEach((topicIdPartition, acknowledgePartitionBatches) -> {
SharePartition sharePartition = partitionCacheMap.get(sharePartitionKey(groupId, topicIdPartition));
if (sharePartition != null) {
CompletableFuture<Errors> future = sharePartition.acknowledge(memberId, acknowledgePartitionBatches).thenApply(throwable -> {
if (throwable.isPresent()) {
return Errors.forException(throwable.get());
}
acknowledgePartitionBatches.forEach(batch -> {
batch.acknowledgeTypes().forEach(this.shareGroupMetrics::recordAcknowledgement);
});
return Errors.NONE;
});
futures.put(topicIdPartition, future);
} else {
futures.put(topicIdPartition, CompletableFuture.completedFuture(Errors.UNKNOWN_TOPIC_OR_PARTITION));
}
});
CompletableFuture<Void> allFutures = CompletableFuture.allOf(
futures.values().toArray(new CompletableFuture[0]));
return allFutures.thenApply(v -> {
Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = new HashMap<>();
futures.forEach((topicIdPartition, future) -> result.put(topicIdPartition, new ShareAcknowledgeResponseData.PartitionData()
.setPartitionIndex(topicIdPartition.partition())
.setErrorCode(future.join().code())));
return result;
});
} | @Test
public void testAcknowledgeIncorrectMemberId() {
String groupId = "grp";
String memberId = Uuid.randomUuid().toString();
TopicIdPartition tp = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0));
SharePartition sp = mock(SharePartition.class);
when(sp.acknowledge(ArgumentMatchers.eq(memberId), any())).thenReturn(CompletableFuture.completedFuture(
Optional.of(new InvalidRequestException("Member is not the owner of batch record"))
));
Map<SharePartitionManager.SharePartitionKey, SharePartition> partitionCacheMap = new HashMap<>();
partitionCacheMap.put(new SharePartitionManager.SharePartitionKey(groupId, tp), sp);
SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder()
.withPartitionCacheMap(partitionCacheMap).build();
Map<TopicIdPartition, List<ShareAcknowledgementBatch>> acknowledgeTopics = new HashMap<>();
acknowledgeTopics.put(tp, Arrays.asList(
new ShareAcknowledgementBatch(12, 20, Collections.singletonList((byte) 1)),
new ShareAcknowledgementBatch(24, 56, Collections.singletonList((byte) 1))
));
CompletableFuture<Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData>> resultFuture =
sharePartitionManager.acknowledge(memberId, groupId, acknowledgeTopics);
Map<TopicIdPartition, ShareAcknowledgeResponseData.PartitionData> result = resultFuture.join();
assertEquals(1, result.size());
assertTrue(result.containsKey(tp));
assertEquals(0, result.get(tp).partitionIndex());
assertEquals(Errors.INVALID_REQUEST.code(), result.get(tp).errorCode());
} |
public String lockName() {
return kind + "::" + namespace + "::" + name;
} | @Test
public void testLockName() {
SimplifiedReconciliation r1 = new SimplifiedReconciliation("kind", "my-namespace", "my-name", "watch");
String lockName = r1.lockName();
assertThat(lockName, is("kind::my-namespace::my-name"));
} |
public static <T> T readStaticFieldOrNull(String className, String fieldName) {
try {
Class<?> clazz = Class.forName(className);
return readStaticField(clazz, fieldName);
} catch (ClassNotFoundException | NoSuchFieldException | IllegalAccessException | SecurityException e) {
return null;
}
} | @Test
public void readStaticFieldOrNull_readFromPrivateField() {
String field = ReflectionUtils.readStaticFieldOrNull(MyClass.class.getName(), "staticPrivateField");
assertEquals("staticPrivateFieldContent", field);
} |
public String getCustomError(HttpRequestWrapper req, HttpResponseWrapper res) {
for (MatcherAndError m : matchersAndLogs) {
if (m.getMatcher().matchResponse(req, res)) {
return m.getCustomError().customError(req, res);
}
}
return null;
} | @Test
public void testNotMatchesCodeAndUrlContains() throws IOException {
HttpRequestWrapper request = createHttpRequest(BQ_TABLES_LIST_URL);
HttpResponseWrapper response = createHttpResponse(404);
CustomHttpErrors.Builder builder = new CustomHttpErrors.Builder();
builder.addErrorForCodeAndUrlContains(403, "/doesnotmatch?", "Custom Error Msg");
CustomHttpErrors customErrors = builder.build();
String errorMessage = customErrors.getCustomError(request, response);
assertNull(errorMessage);
} |
@Override
@Transactional(rollbackFor = Exception.class)
@LogRecord(type = SYSTEM_ROLE_TYPE, subType = SYSTEM_ROLE_CREATE_SUB_TYPE, bizNo = "{{#role.id}}",
success = SYSTEM_ROLE_CREATE_SUCCESS)
public Long createRole(RoleSaveReqVO createReqVO, Integer type) {
// 1. 校验角色
validateRoleDuplicate(createReqVO.getName(), createReqVO.getCode(), null);
// 2. 插入到数据库
RoleDO role = BeanUtils.toBean(createReqVO, RoleDO.class)
.setType(ObjectUtil.defaultIfNull(type, RoleTypeEnum.CUSTOM.getType()))
.setStatus(CommonStatusEnum.ENABLE.getStatus())
.setDataScope(DataScopeEnum.ALL.getScope()); // 默认可查看所有数据。原因是,可能一些项目不需要项目权限
roleMapper.insert(role);
// 3. 记录操作日志上下文
LogRecordContext.putVariable("role", role);
return role.getId();
} | @Test
public void testCreateRole() {
// 准备参数
RoleSaveReqVO reqVO = randomPojo(RoleSaveReqVO.class)
.setId(null); // 防止 id 被赋值
// 调用
Long roleId = roleService.createRole(reqVO, null);
// 断言
RoleDO roleDO = roleMapper.selectById(roleId);
assertPojoEquals(reqVO, roleDO, "id");
assertEquals(RoleTypeEnum.CUSTOM.getType(), roleDO.getType());
assertEquals(CommonStatusEnum.ENABLE.getStatus(), roleDO.getStatus());
assertEquals(DataScopeEnum.ALL.getScope(), roleDO.getDataScope());
} |
public static String capitalize(String string) {
return string == null ? null : string.substring( 0, 1 ).toUpperCase( Locale.ROOT ) + string.substring( 1 );
} | @Test
@DefaultLocale("tr")
public void capitalizeTurkish() {
String international = Strings.capitalize( "international" );
assertThat( international ).isEqualTo( "International" );
} |
@Override
public boolean perform(AbstractBuild<?, ?> build, Launcher launcher, BuildListener listener) throws InterruptedException, IOException {
if (this instanceof SimpleBuildStep) {
// delegate to the overloaded version defined in SimpleBuildStep
final SimpleBuildStep step = (SimpleBuildStep) this;
final FilePath workspace = build.getWorkspace();
if (step.requiresWorkspace() && workspace == null) {
throw new AbortException("no workspace for " + build);
}
if (workspace != null) { // if we have one, provide it regardless of whether it's _required_
step.perform(build, workspace, build.getEnvironment(listener), launcher, listener);
} else {
step.perform(build, build.getEnvironment(listener), listener);
}
return true;
} else if (build instanceof Build) {
// delegate to the legacy signature deprecated in 1.312
return perform((Build) build, launcher, listener);
} else {
return true;
}
} | @Issue("JENKINS-18734")
@Test
@SuppressWarnings("deprecation") /* testing deprecated variant */
public void testPerformExpectAbstractMethodError() {
FreeStyleBuild mock = Mockito.mock(FreeStyleBuild.class, Mockito.CALLS_REAL_METHODS);
BuildStepCompatibilityLayer bscl = new BuildStepCompatibilityLayer() {};
assertThrows(AbstractMethodError.class, () -> bscl.perform(mock, null, null));
} |
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
} | @Test
public void testFetchWithNoTopicId() {
// Should work and default to using old request type.
buildFetcher();
TopicIdPartition noId = new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("noId", 0));
assignFromUser(noId.topicPartition());
subscriptions.seek(noId.topicPartition(), 0);
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
// Fetch should use request version 12
client.prepareResponse(
fetchRequestMatcher((short) 12, noId, 0, Optional.of(validLeaderEpoch)),
fullFetchResponse(noId, records, Errors.NONE, 100L, 0)
);
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchRecords();
assertTrue(partitionRecords.containsKey(noId.topicPartition()));
List<ConsumerRecord<byte[], byte[]>> records = partitionRecords.get(noId.topicPartition());
assertEquals(3, records.size());
assertEquals(4L, subscriptions.position(noId.topicPartition()).offset); // this is the next fetching position
long offset = 1;
for (ConsumerRecord<byte[], byte[]> record : records) {
assertEquals(offset, record.offset());
offset += 1;
}
} |
public int doWork()
{
final long nowNs = nanoClock.nanoTime();
trackTime(nowNs);
int workCount = 0;
workCount += processTimers(nowNs);
if (!asyncClientCommandInFlight)
{
workCount += clientCommandAdapter.receive();
}
workCount += drainCommandQueue();
workCount += trackStreamPositions(workCount, nowNs);
workCount += nameResolver.doWork(cachedEpochClock.time());
workCount += freeEndOfLifeResources(ctx.resourceFreeLimit());
return workCount;
} | @Test
void shouldErrorWhenConflictingUnreliableSubscriptionAdded()
{
driverProxy.addSubscription(CHANNEL_4000, STREAM_ID_1);
driverConductor.doWork();
final long id2 = driverProxy.addSubscription(CHANNEL_4000 + "|reliable=false", STREAM_ID_1);
driverConductor.doWork();
verify(mockClientProxy).onError(eq(id2), any(ErrorCode.class), anyString());
} |
public CardinalityEstimatorConfig setAsyncBackupCount(int asyncBackupCount) {
this.asyncBackupCount = checkAsyncBackupCount(backupCount, asyncBackupCount);
return this;
} | @Test(expected = IllegalArgumentException.class)
public void testSetAsyncBackupCount_withNegativeValue() {
config.setAsyncBackupCount(-1);
} |
public List<Integer> findLongestMatch(String key) {
TST current;
List<Integer> ordinals;
do {
current = prefixIndexVolatile;
long nodeIndex = current.findLongestMatch(key);
ordinals = current.getOrdinals(nodeIndex);
} while (current != this.prefixIndexVolatile);
return ordinals;
} | @Test
public void testLongestPrefixMatch() throws Exception {
// "The Matrix"
// "Blood Diamond"
// "Rush"
// "Rocky"
// "The Matrix Reloaded"
// "The Matrix Resurrections"
for (Movie movie : getSimpleList()) {
objectMapper.add(movie);
}
StateEngineRoundTripper.roundTripSnapshot(writeStateEngine, readStateEngine);
HollowTokenizedPrefixIndex tokenizedPrefixIndex = new HollowTokenizedPrefixIndex(readStateEngine, "SimpleMovie", "name.value", false);
List<Integer> match;
match = tokenizedPrefixIndex.findLongestMatch("rush");
Assert.assertTrue(match.get(0) > -1);
match = tokenizedPrefixIndex.findLongestMatch("rushing");
Assert.assertTrue(match.get(0) > -1);
match = tokenizedPrefixIndex.findLongestMatch("the");
Assert.assertTrue(match.get(0) > -1);
match = tokenizedPrefixIndex.findLongestMatch("doesnotexist");
Assert.assertTrue(match.size() == 0);
match = tokenizedPrefixIndex.findLongestMatch("resurrect");
Assert.assertTrue(match.size() == 0);
match = tokenizedPrefixIndex.findLongestMatch(""); // empty string is not indexed in prefix index but supported in hollow type state
Assert.assertTrue(match.size() == 0);
match = tokenizedPrefixIndex.findLongestMatch(null); // null value is not supported in hollow type state
Assert.assertTrue(match.size() == 0);
} |
static QueryId buildId(
final Statement statement,
final EngineContext engineContext,
final QueryIdGenerator idGenerator,
final OutputNode outputNode,
final boolean createOrReplaceEnabled,
final Optional<String> withQueryId) {
if (withQueryId.isPresent()) {
final String queryId = withQueryId.get().toUpperCase();
validateWithQueryId(queryId);
return new QueryId(queryId);
}
if (statement instanceof CreateTable && ((CreateTable) statement).isSource()) {
// Use the CST name as part of the QueryID
final String suffix = ((CreateTable) statement).getName().text().toUpperCase()
+ "_" + idGenerator.getNext().toUpperCase();
return new QueryId(ReservedQueryIdsPrefixes.CST + suffix);
}
if (!outputNode.getSinkName().isPresent()) {
final String prefix =
"transient_" + outputNode.getSource().getLeftmostSourceNode().getAlias().text() + "_";
return new QueryId(prefix + Math.abs(ThreadLocalRandom.current().nextLong()));
}
final KsqlStructuredDataOutputNode structured = (KsqlStructuredDataOutputNode) outputNode;
if (!structured.createInto()) {
return new QueryId(ReservedQueryIdsPrefixes.INSERT + idGenerator.getNext());
}
final SourceName sink = outputNode.getSinkName().get();
final Set<QueryId> queriesForSink = engineContext.getQueryRegistry().getQueriesWithSink(sink);
if (queriesForSink.size() > 1) {
throw new KsqlException("REPLACE for sink " + sink + " is not supported because there are "
+ "multiple queries writing into it: " + queriesForSink);
} else if (!queriesForSink.isEmpty()) {
if (!createOrReplaceEnabled) {
final String type = outputNode.getNodeOutputType().getKsqlType().toLowerCase();
throw new UnsupportedOperationException(
String.format(
"Cannot add %s '%s': A %s with the same name already exists",
type,
sink.text(),
type));
}
return Iterables.getOnlyElement(queriesForSink);
}
final String suffix = outputNode.getId().toString().toUpperCase()
+ "_" + idGenerator.getNext().toUpperCase();
return new QueryId(
outputNode.getNodeOutputType() == DataSourceType.KTABLE
? ReservedQueryIdsPrefixes.CTAS + suffix
: ReservedQueryIdsPrefixes.CSAS + suffix
);
} | @Test
public void shouldReuseExistingQueryId() {
// Given:
when(plan.getSinkName()).thenReturn(Optional.of(SINK));
when(plan.createInto()).thenReturn(true);
when(queryRegistry.getQueriesWithSink(SINK))
.thenReturn(ImmutableSet.of(new QueryId("CTAS_FOO_10")));
// When:
final QueryId queryId = QueryIdUtil.buildId(statement, engineContext, idGenerator, plan,
true, Optional.empty());
// Then:
assertThat(queryId, is(new QueryId("CTAS_FOO_10")));
} |
@Override
public boolean saveAndCheckUserLoginStatus(Long userId) throws Exception {
Long add = redisTemplate.opsForSet().add(LOGIN_STATUS_PREFIX, userId.toString());
if (add == 0){
return false ;
}else {
return true ;
}
} | @Test
public void checkUserLoginStatus() throws Exception {
boolean status = userInfoCacheService.saveAndCheckUserLoginStatus(2000L);
log.info("status={}", status);
} |
@SuppressWarnings("unused") // Part of required API.
public void execute(
final ConfiguredStatement<InsertValues> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final InsertValues insertValues = statement.getStatement();
final MetaStore metaStore = executionContext.getMetaStore();
final KsqlConfig config = statement.getSessionConfig().getConfig(true);
final DataSource dataSource = getDataSource(config, metaStore, insertValues);
validateInsert(insertValues.getColumns(), dataSource);
final ProducerRecord<byte[], byte[]> record =
buildRecord(statement, metaStore, dataSource, serviceContext);
try {
producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps());
} catch (final TopicAuthorizationException e) {
// TopicAuthorizationException does not give much detailed information about why it failed,
// except which topics are denied. Here we just add the ACL to make the error message
// consistent with other authorization error messages.
final Exception rootCause = new KsqlTopicAuthorizationException(
AclOperation.WRITE,
e.unauthorizedTopics()
);
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause);
} catch (final ClusterAuthorizationException e) {
// ClusterAuthorizationException is thrown when using idempotent producers
// and either a topic write permission or a cluster-level idempotent write
// permission (only applicable for broker versions no later than 2.8) is
// missing. In this case, we include additional context to help the user
// distinguish this type of failure from other permissions exceptions
// such as the ones thrown above when TopicAuthorizationException is caught.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} catch (final KafkaException e) {
if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) {
// The error message thrown when an idempotent producer is missing permissions
// is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException,
// as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException.
// ksqlDB handles these two the same way, accordingly.
// See https://issues.apache.org/jira/browse/KAFKA-14138 for more.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} else {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} catch (final Exception e) {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} | @Test
public void shouldHandleOutOfOrderSchema() {
// Given:
final ConfiguredStatement<InsertValues> statement = givenInsertValues(
ImmutableList.of(COL1, COL0, K0),
ImmutableList.of(
new LongLiteral(2L),
new StringLiteral("str"),
new StringLiteral("key")
)
);
// When:
executor.execute(statement, mock(SessionProperties.class), engine, serviceContext);
// Then:
verify(keySerializer).serialize(TOPIC_NAME, genericKey("key"));
verify(valueSerializer).serialize(TOPIC_NAME, genericRow("str", 2L));
verify(producer).send(new ProducerRecord<>(TOPIC_NAME, null, 1L, KEY, VALUE));
} |
public boolean isAdmin(Admin admin) {
return !isSecurityEnabled() || noAdminsConfigured() || adminsConfig.isAdmin(admin, rolesConfig.memberRoles(admin));
} | @Test
public void shouldKnowIfUserIsAdmin() throws Exception {
SecurityConfig security = security(null, admins(user("chris")));
assertThat(security.isAdmin(new AdminUser(new CaseInsensitiveString("chris"))), is(true));
assertThat(security.isAdmin(new AdminUser(new CaseInsensitiveString("evilHacker"))), is(true));
security = security(passwordFileAuthConfig(), admins(user("chris")));
assertThat(security.isAdmin(new AdminUser(new CaseInsensitiveString("chris"))), is(true));
assertThat(security.isAdmin(new AdminUser(new CaseInsensitiveString("evilHacker"))), is(false));
} |
@Override
public void loadAll(boolean replaceExistingValues) {
map.loadAll(replaceExistingValues);
} | @Test
public void testLoadAll() {
mapWithLoader.put(23, "value-23");
mapStore.setKeys(singleton(23));
adapterWithLoader.loadAll(true);
adapterWithLoader.waitUntilLoaded();
assertEquals("newValue-23", mapWithLoader.get(23));
} |
@Override
public void check(Model model) {
if (model == null)
return;
List<Model> secondPhaseModels = new ArrayList<>();
deepFindAllModelsOfType(AppenderModel.class, secondPhaseModels, model);
deepFindAllModelsOfType(LoggerModel.class, secondPhaseModels, model);
deepFindAllModelsOfType(RootLoggerModel.class, secondPhaseModels, model);
List<Pair<Model, Model>> nestedPairs = deepFindNestedSubModelsOfType(IfModel.class, secondPhaseModels);
if (nestedPairs.isEmpty())
return;
addWarn("<if> elements cannot be nested within an <appender>, <logger> or <root> element");
addWarn("See also " + NESTED_IF_WARNING_URL);
for (Pair<Model, Model> pair : nestedPairs) {
Model p = pair.first;
int pLine = p.getLineNumber();
Model s = pair.second;
int sLine = s.getLineNumber();
addWarn("Element <"+p.getTag()+"> at line " + pLine + " contains a nested <"+s.getTag()+"> element at line " +sLine);
}
} | @Test
public void singleAppender() {
ClassicTopModel topModel = new ClassicTopModel();
AppenderModel appenderModel0 = new AppenderModel();
appenderModel0.setLineNumber(1);
topModel.addSubModel(appenderModel0);
inwspeChecker.check(topModel);
statusChecker.assertIsWarningOrErrorFree();
} |
@Description("encode value as a big endian varbinary according to IEEE 754 single-precision floating-point format")
@ScalarFunction("to_ieee754_32")
@SqlType(StandardTypes.VARBINARY)
public static Slice toIEEE754Binary32(@SqlType(StandardTypes.REAL) long value)
{
Slice slice = Slices.allocate(Float.BYTES);
slice.setInt(0, Integer.reverseBytes((int) value));
return slice;
} | @Test
public void testToIEEE754Binary32()
{
assertFunction("to_ieee754_32(CAST(0.0 AS REAL))", VARBINARY, sqlVarbinaryHex("00000000"));
assertFunction("to_ieee754_32(CAST(1.0 AS REAL))", VARBINARY, sqlVarbinaryHex("3F800000"));
assertFunction("to_ieee754_32(CAST(3.14 AS REAL))", VARBINARY, sqlVarbinaryHex("4048F5C3"));
assertFunction("to_ieee754_32(CAST(NAN() AS REAL))", VARBINARY, sqlVarbinaryHex("7FC00000"));
assertFunction("to_ieee754_32(CAST(INFINITY() AS REAL))", VARBINARY, sqlVarbinaryHex("7F800000"));
assertFunction("to_ieee754_32(CAST(-INFINITY() AS REAL))", VARBINARY, sqlVarbinaryHex("FF800000"));
assertFunction("to_ieee754_32(CAST(3.4028235E38 AS REAL))", VARBINARY, sqlVarbinaryHex("7F7FFFFF"));
assertFunction("to_ieee754_32(CAST(-3.4028235E38 AS REAL))", VARBINARY, sqlVarbinaryHex("FF7FFFFF"));
assertFunction("to_ieee754_32(CAST(1.4E-45 AS REAL))", VARBINARY, sqlVarbinaryHex("00000001"));
assertFunction("to_ieee754_32(CAST(-1.4E-45 AS REAL))", VARBINARY, sqlVarbinaryHex("80000001"));
} |
@Nonnull
@Override
public CreatedAggregations<AggregationBuilder> doCreateAggregation(Direction direction, String name, Pivot pivot, Time timeSpec, OSGeneratedQueryContext queryContext, Query query) {
AggregationBuilder root = null;
AggregationBuilder leaf = null;
final Interval interval = timeSpec.interval();
final TimeRange timerange = query.timerange();
if (interval instanceof AutoInterval autoInterval
&& isAllMessages(timerange)) {
for (String timeField : timeSpec.fields()) {
final AutoDateHistogramAggregationBuilder builder = new AutoDateHistogramAggregationBuilder(name)
.field(timeField)
.setNumBuckets((int) (BASE_NUM_BUCKETS / autoInterval.scaling()))
.format(DATE_TIME_FORMAT);
if (root == null && leaf == null) {
root = builder;
leaf = builder;
} else {
leaf.subAggregation(builder);
leaf = builder;
}
}
} else {
for (String timeField : timeSpec.fields()) {
final DateHistogramInterval dateHistogramInterval = new DateHistogramInterval(interval.toDateInterval(query.effectiveTimeRange(pivot)).toString());
final List<BucketOrder> ordering = orderListForPivot(pivot, queryContext, defaultOrder);
final DateHistogramAggregationBuilder builder = AggregationBuilders.dateHistogram(name)
.field(timeField)
.order(ordering)
.format(DATE_TIME_FORMAT);
setInterval(builder, dateHistogramInterval);
if (root == null && leaf == null) {
root = builder;
leaf = builder;
} else {
leaf.subAggregation(builder);
leaf = builder;
}
}
}
return CreatedAggregations.create(root, leaf);
} | @Test
public void autoDateHistogramAggregationBuilderUsedForAutoIntervalAndAllMessages() {
final AutoInterval interval = mock(AutoInterval.class);
final RelativeRange allMessagesRange = RelativeRange.allTime();
doReturn(allMessagesRange).when(query).timerange();
when(time.interval()).thenReturn(interval);
final BucketSpecHandler.CreatedAggregations<AggregationBuilder> createdAggregations = this.osTimeHandler.doCreateAggregation(BucketSpecHandler.Direction.Row, "foobar", pivot, time, queryContext, query);
assertEquals(createdAggregations.root(), createdAggregations.leaf());
assertTrue(createdAggregations.root() instanceof AutoDateHistogramAggregationBuilder);
assertEquals("foobar", createdAggregations.root().getName());
assertEquals("foobar", ((AutoDateHistogramAggregationBuilder) createdAggregations.root()).field());
assertEquals(DATE_TIME_FORMAT, ((AutoDateHistogramAggregationBuilder) createdAggregations.root()).format());
} |
@Override
public <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final JoinWindows windows) {
return leftJoin(otherStream, toValueJoinerWithKey(joiner), windows);
} | @Test
public void shouldNotAllowNullMapperOnLeftJoinWithGlobalTable() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.leftJoin(testGlobalTable, null, MockValueJoiner.TOSTRING_JOINER));
assertThat(exception.getMessage(), equalTo("keySelector can't be null"));
} |
public static CharSequence parseElement(XmlPullParser parser) throws XmlPullParserException, IOException {
return parseElement(parser, false);
} | @Test
public void parseElementMultipleNamespace()
throws ParserConfigurationException,
FactoryConfigurationError, XmlPullParserException,
IOException, TransformerException, SAXException {
// @formatter:off
final String stanza = XMLBuilder.create("outer", "outerNamespace").a("outerAttribute", "outerValue")
.element("inner", "innerNamespace").a("innerAttribute", "innerValue")
.element("innermost")
.t("some text")
.asString();
// @formatter:on
XmlPullParser parser = TestUtils.getParser(stanza, "outer");
CharSequence result = PacketParserUtils.parseElement(parser, true);
assertXmlSimilar(stanza, result.toString());
} |
@GET
@TreeResponse
public ExportedPipelineFunction[] doPipelineStepMetadata() {
List<ExportedPipelineFunction> pd = new ArrayList<>();
// POST to this with parameter names
// e.g. json:{"time": "1", "unit": "NANOSECONDS", "stapler-class": "org.jenkinsci.plugins.workflow.steps.TimeoutStep", "$class": "org.jenkinsci.plugins.workflow.steps.TimeoutStep"}
for (StepDescriptor d : StepDescriptor.all()) {
if (includeStep(d)) {
ExportedPipelineStep step = getStepMetadata(d);
if (step != null) {
pd.add(step);
}
}
}
List<Descriptor<?>> metaStepDescriptors = new ArrayList<Descriptor<?>>();
populateMetaSteps(metaStepDescriptors, Builder.class);
populateMetaSteps(metaStepDescriptors, Publisher.class);
populateMetaSteps(metaStepDescriptors, BuildWrapper.class);
for (Descriptor<?> d : metaStepDescriptors) {
ExportedPipelineFunction metaStep = getStepMetadata(d);
if (metaStep != null) {
pd.add(metaStep);
}
}
return pd.toArray(new ExportedPipelineFunction[pd.size()]);
} | @Test
public void verifyFunctionNames() throws Exception {
PipelineMetadataService svc = new PipelineMetadataService();
List<ExportedDescribableModel> steps = new ArrayList<>(Arrays.asList(svc.doPipelineStepMetadata()));
assertFalse(steps.isEmpty());
// Verify we have a Symbol-provided Builder or Publisher
assertThat(steps, hasItem(stepWithName("archiveArtifacts")));
// Verify that we don't have steps blacklisted by Declarative
assertThat(steps, not(hasItem(stepWithName("properties"))));
// Verify that we don't have advanced steps
assertThat(steps, not(hasItem(stepWithName("archive"))));
// Verify that we *do* have advanced steps that are explicitly whitelisted in.
assertThat(steps, hasItem(stepWithName("catchError")));
// Verify that we have a Symbol-provided SimpleBuildWrapper
assertThat(steps, hasItem(stepWithName("withSonarQubeEnv")));
} |
public static boolean isTableActiveVersionNode(final String path) {
return Pattern.compile(getMetaDataNode() + TABLES_PATTERN + ACTIVE_VERSION_SUFFIX, Pattern.CASE_INSENSITIVE).matcher(path).find();
} | @Test
void assertIsTableActiveVersionNode() {
assertTrue(TableMetaDataNode.isTableActiveVersionNode("/metadata/foo_db/schemas/foo_schema/tables/foo_table/active_version"));
} |
static String removeWhiteSpaceFromJson(String json) {
//reparse the JSON to ensure that all whitespace formatting is uniform
String flattend = FLAT_GSON.toJson(JsonParser.parseString(json));
return flattend;
} | @Test
public void removeWhiteSpaceFromJson_noOp() {
String input = "{\"a\":123,\"b\":456}";
String output = "{\"a\":123,\"b\":456}";
assertThat(
removeWhiteSpaceFromJson(input),
is(output)
);
} |
@Override
public String convertToDatabaseColumn(MonetaryAmount amount) {
return amount == null ? null
: String.format("%s %s", amount.getCurrency().toString(), amount.getNumber().toString());
} | @Test
void doesNotFormatLargeValues() {
assertThat(converter.convertToDatabaseColumn(Money.of(123456, "EUR"))).isEqualTo("EUR 123456");
} |
@Override
public Data getValue() {
return value;
} | @Test
public void testGetValue() {
assertEquals(VALUE, record.getValue());
assertEquals(VALUE, recordSameAttributes.getValue());
assertNotEquals(VALUE, recordOtherKeyAndValue.getValue());
} |
public void schedule(String eventDefinitionId) {
final EventDefinitionDto eventDefinition = getEventDefinitionOrThrowIAE(eventDefinitionId);
createJobDefinitionAndTriggerIfScheduledType(eventDefinition);
} | @Test
@MongoDBFixtures("event-processors-without-schedule.json")
public void schedule() {
assertThat(eventDefinitionService.get("54e3deadbeefdeadbeef0000")).isPresent();
assertThat(jobDefinitionService.streamAll().count()).isEqualTo(0);
assertThat(jobTriggerService.all()).isEmpty();
handler.schedule("54e3deadbeefdeadbeef0000");
assertThat(eventDefinitionService.get("54e3deadbeefdeadbeef0000")).isPresent();
assertThat(jobDefinitionService.getByConfigField("event_definition_id", "54e3deadbeefdeadbeef0000"))
.get()
.satisfies(definition -> {
assertThat(definition.title()).isEqualTo("Test");
assertThat(definition.description()).isEqualTo("A test event definition");
assertThat(definition.config()).isInstanceOf(EventProcessorExecutionJob.Config.class);
final EventProcessorExecutionJob.Config config = (EventProcessorExecutionJob.Config) definition.config();
assertThat(config.processingWindowSize()).isEqualTo(300000);
assertThat(config.processingHopSize()).isEqualTo(60000);
assertThat(jobTriggerService.nextRunnableTrigger()).get().satisfies(trigger -> {
assertThat(trigger.jobDefinitionId()).isEqualTo(definition.id());
assertThat(trigger.schedule()).isInstanceOf(IntervalJobSchedule.class);
final IntervalJobSchedule schedule = (IntervalJobSchedule) trigger.schedule();
assertThat(schedule.interval()).isEqualTo(60000);
assertThat(schedule.unit()).isEqualTo(TimeUnit.MILLISECONDS);
});
});
assertThat(jobDefinitionService.get("54e3deadbeefdeadbeef0001")).isNotPresent();
assertThat(jobTriggerService.get("54e3deadbeefdeadbeef0002")).isNotPresent();
} |
public String forDisplay(List<ConfigurationProperty> propertiesToDisplay) {
ArrayList<String> list = new ArrayList<>();
for (ConfigurationProperty property : propertiesToDisplay) {
if (!property.isSecure()) {
list.add(format("%s=%s", property.getConfigurationKey().getName().toLowerCase(), property.getConfigurationValue().getValue()));
}
}
return format("[%s]", StringUtils.join(list, ", "));
} | @Test
void shouldGetConfigForDisplay() {
ConfigurationProperty property1 = new ConfigurationProperty(new ConfigurationKey("key1"), new ConfigurationValue("value1"), null, null);
ConfigurationProperty property2 = new ConfigurationProperty(new ConfigurationKey("key2"), new ConfigurationValue("value2"), null, null);
Configuration config = new Configuration(property1, property2);
assertThat(config.forDisplay(List.of(property1))).isEqualTo("[key1=value1]");
assertThat(config.forDisplay(List.of(property1, property2))).isEqualTo("[key1=value1, key2=value2]");
} |
@VisibleForTesting
State getState() {
return state;
} | @Test
void testInitialState() throws Exception {
final AdaptiveScheduler scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
mainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.build();
assertThat(scheduler.getState()).isInstanceOf(Created.class);
} |
public static Expression generateFilterExpression(SearchArgument sarg) {
return translate(sarg.getExpression(), sarg.getLeaves());
} | @Test
public void testBooleanType() {
SearchArgument.Builder builder = SearchArgumentFactory.newBuilder();
SearchArgument arg =
builder.startAnd().equals("boolean", PredicateLeaf.Type.BOOLEAN, true).end().build();
UnboundPredicate expected = Expressions.equal("boolean", true);
UnboundPredicate actual =
(UnboundPredicate) HiveIcebergFilterFactory.generateFilterExpression(arg);
assertPredicatesMatch(expected, actual);
} |
public Stream<Hit> stream() {
if (nPostingLists == 0) {
return Stream.empty();
}
return StreamSupport.stream(new PredicateSpliterator(), false);
} | @Test
void requireThatIntervalSortingWorksAsUnsigned() {
PredicateSearch search = createPredicateSearch(
new byte[]{1},
postingList(SubqueryBitmap.ALL_SUBQUERIES,
entry(0, 0x00010001)),
postingList(SubqueryBitmap.ALL_SUBQUERIES,
entry(0, 0x00fe0001, 0x00ff00fe)));
assertEquals(List.of(new Hit(0)).toString(), search.stream().toList().toString());
} |
@Override
protected TableRecords getUndoRows() {
return sqlUndoLog.getAfterImage();
} | @Test
public void getUndoRows() {
Assertions.assertEquals(executor.getUndoRows(), executor.getSqlUndoLog().getAfterImage());
} |
@Override
public GroupAssignment assign(
GroupSpec groupSpec,
SubscribedTopicDescriber subscribedTopicDescriber
) throws PartitionAssignorException {
if (groupSpec.memberIds().isEmpty())
return new GroupAssignment(Collections.emptyMap());
if (groupSpec.subscriptionType().equals(HOMOGENEOUS)) {
return assignHomogenous(groupSpec, subscribedTopicDescriber);
} else {
return assignHeterogeneous(groupSpec, subscribedTopicDescriber);
}
} | @Test
public void testAssignWithTwoMembersAndTwoTopicsHomogeneous() {
Map<Uuid, TopicMetadata> topicMetadata = new HashMap<>();
topicMetadata.put(TOPIC_1_UUID, new TopicMetadata(
TOPIC_1_UUID,
TOPIC_1_NAME,
3,
Collections.emptyMap()
));
topicMetadata.put(TOPIC_3_UUID, new TopicMetadata(
TOPIC_3_UUID,
TOPIC_3_NAME,
2,
Collections.emptyMap()
));
Map<String, MemberSubscriptionAndAssignmentImpl> members = new TreeMap<>();
members.put(MEMBER_A, new MemberSubscriptionAndAssignmentImpl(
Optional.empty(),
Optional.empty(),
mkSet(TOPIC_1_UUID, TOPIC_3_UUID),
Assignment.EMPTY
));
members.put(MEMBER_B, new MemberSubscriptionAndAssignmentImpl(
Optional.empty(),
Optional.empty(),
mkSet(TOPIC_1_UUID, TOPIC_3_UUID),
Assignment.EMPTY
));
GroupSpec groupSpec = new GroupSpecImpl(
members,
HOMOGENEOUS,
Collections.emptyMap()
);
SubscribedTopicDescriberImpl subscribedTopicMetadata = new SubscribedTopicDescriberImpl(topicMetadata);
GroupAssignment computedAssignment = assignor.assign(
groupSpec,
subscribedTopicMetadata
);
Map<String, Map<Uuid, Set<Integer>>> expectedAssignment = new HashMap<>();
expectedAssignment.put(MEMBER_A, mkAssignment(
mkTopicAssignment(TOPIC_1_UUID, 0, 1, 2),
mkTopicAssignment(TOPIC_3_UUID, 0, 1)
));
expectedAssignment.put(MEMBER_B, mkAssignment(
mkTopicAssignment(TOPIC_1_UUID, 0, 1, 2),
mkTopicAssignment(TOPIC_3_UUID, 0, 1)
));
assertAssignment(expectedAssignment, computedAssignment);
} |
public void verifyPropertyParam(String param, String name)
throws BadParam {
verifyParam(param, name);
Matcher m = PROPERTY_ID.matcher(param);
if (!m.matches()) {
throw new BadParam("Invalid property name " + name);
}
} | @Test
public void testVerifyPropertyParam() {
// HIVE-15410: Though there are not restrictions to Hive table property key and it could be any
// combination of the letters, digits and even punctuations, we support conventional property
// name in WebHCat (e.g. prepery name starting with a letter or digit probably with period (.),
// underscore (_) and hyphen (-) only in the middle like auto.purge, last_modified_by etc)
String [] validTblProperties = {"abcd", "Abcd", "1Abcd", "abc1d", "Abcd.efgh", "Abcd-efgh",
"Abcd_efgh", "A", "b", "1"};
for (String propertyKey : validTblProperties) {
try {
server.verifyPropertyParam(propertyKey, ":property");
} catch (Exception e) {
fail(propertyKey + " should be a valid table property name in WebHCat.");
}
}
String [] invalidTblProperties = {".abcd", "-Abcd", "_1Abcd", "abc1d.", "Abcd_", "Abcd-",
"Abcd ", " Abcd", ".", "-", "_", " ", "$"};
for (String propertyKey : invalidTblProperties) {
boolean throwException = false;
try {
server.verifyPropertyParam(propertyKey, ":property");
} catch (Exception e) {
throwException = true;
}
if (!throwException) {
fail(propertyKey + " should not be a valid table property name in WebHCat.");
}
}
} |
public ResT receive(long timeoutMs) throws IOException {
if (mCompleted) {
return null;
}
if (mCanceled) {
throw new CancelledException(formatErrorMessage("Stream is already canceled."));
}
long startMs = System.currentTimeMillis();
while (true) {
long waitedForMs = System.currentTimeMillis() - startMs;
if (waitedForMs >= timeoutMs) {
throw new DeadlineExceededException(formatErrorMessage(
"Timeout waiting for response after %dms. clientClosed: %s clientCancelled: %s "
+ "serverClosed: %s", timeoutMs, mClosed, mCanceled, mClosedFromRemote));
}
// Wait for a minute max
long waitMs = Math.min(timeoutMs - waitedForMs, Constants.MINUTE_MS);
try {
Object response = mResponses.poll(waitMs, TimeUnit.MILLISECONDS);
if (response == null) {
checkError(); // The stream could have errored while we were waiting
// Log a warning before looping again
LOG.warn("Client did not receive message from stream, will wait again. totalWaitMs: {} "
+ "clientClosed: {} clientCancelled: {} serverClosed: {} description: {}",
System.currentTimeMillis() - startMs, mClosed, mCanceled, mClosedFromRemote,
mDescription);
continue;
}
if (response == mResponseObserver) {
mCompleted = true;
return null;
}
checkError();
return (ResT) response;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new CancelledException(
formatErrorMessage("Interrupted while waiting for response."), e);
}
}
} | @Test
public void receiveError() throws Exception {
mResponseObserver.onError(Status.UNAUTHENTICATED.asRuntimeException());
Exception e = assertThrows(UnauthenticatedException.class, () ->
mStream.receive(TIMEOUT));
assertTrue(e.getMessage().contains(TEST_MESSAGE));
} |
public Map<String, Object> getKsqlStreamConfigProps(final String applicationId) {
final Map<String, Object> map = new HashMap<>(getKsqlStreamConfigProps());
map.put(
MetricCollectors.RESOURCE_LABEL_PREFIX
+ StreamsConfig.APPLICATION_ID_CONFIG,
applicationId
);
// Streams client metrics aren't used in Confluent deployment
possiblyConfigureConfluentTelemetry(map);
return Collections.unmodifiableMap(map);
} | @Test
public void shouldSetLogAndContinueExceptionHandlerByDefault() {
final KsqlConfig ksqlConfig = new KsqlConfig(Collections.emptyMap());
final Object result = ksqlConfig.getKsqlStreamConfigProps().get(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG);
assertThat(result, equalTo(LogMetricAndContinueExceptionHandler.class));
} |
boolean hasProjectionMaskApi(JClass definedClass, ClassTemplateSpec templateSpec)
{
return _hasProjectionMaskCache.computeIfAbsent(definedClass, (jClass) ->
{
try
{
final Class<?> clazz = _classLoader.loadClass(jClass.fullName());
return Arrays.stream(clazz.getClasses()).anyMatch(
c -> c.getSimpleName().equals(JavaDataTemplateGenerator.PROJECTION_MASK_CLASSNAME));
}
catch (ClassNotFoundException e)
{
// Ignore, and check if the class will be generated from a source PDL
}
return isGeneratedFromSource(templateSpec);
});
} | @Test
public void testHasProjectionMaskApiGeneratedFromSource() throws Exception
{
ProjectionMaskApiChecker projectionMaskApiChecker = new ProjectionMaskApiChecker(
_templateSpecGenerator, _sourceFiles, _mockClassLoader);
Mockito.when(_nestedTypeSource.getAbsolutePath()).thenReturn(pegasusDir + FS + "Bar.pdl");
Mockito.when(_nestedType.fullName()).thenReturn("com.linkedin.common.AuditStamp");
Mockito.when(_mockClassLoader.loadClass("com.linkedin.common.AuditStamp")).thenThrow(
new ClassNotFoundException());
Assert.assertTrue(projectionMaskApiChecker.hasProjectionMaskApi(_nestedType, _templateSpec));
Mockito.verify(_mockClassLoader, Mockito.times(1)).loadClass(Mockito.anyString());
Mockito.verify(_nestedType, Mockito.times(1)).fullName();
Mockito.verify(_nestedTypeSource, Mockito.times(1)).getAbsolutePath();
// Check caching
Assert.assertTrue(projectionMaskApiChecker.hasProjectionMaskApi(_nestedType, _templateSpec));
Mockito.verifyNoMoreInteractions(_mockClassLoader);
} |
@Override
public MetricsCollector create(final MetricConfiguration metricConfig) {
switch (metricConfig.getType()) {
case COUNTER:
return new PrometheusMetricsCounterCollector(metricConfig);
case GAUGE:
return new PrometheusMetricsGaugeCollector(metricConfig);
case HISTOGRAM:
return new PrometheusMetricsHistogramCollector(metricConfig);
case SUMMARY:
return new PrometheusMetricsSummaryCollector(metricConfig);
case GAUGE_METRIC_FAMILY:
return new PrometheusMetricsGaugeMetricFamilyCollector(metricConfig);
default:
throw new UnsupportedOperationException(String.format("Can not support type `%s`.", metricConfig.getType()));
}
} | @Test
void assertCreateHistogramCollector() {
MetricConfiguration config = new MetricConfiguration("test_histogram", MetricCollectorType.HISTOGRAM, null, Collections.emptyList(), Collections.emptyMap());
assertThat(new PrometheusMetricsCollectorFactory().create(config), instanceOf(PrometheusMetricsHistogramCollector.class));
} |
@Override
public ObjectNode encode(MappingInstruction instruction, CodecContext context) {
checkNotNull(instruction, "Mapping instruction cannot be null");
return new EncodeMappingInstructionCodecHelper(instruction, context).encode();
} | @Test
public void unicastWeightInstrutionTest() {
final UnicastMappingInstruction.WeightMappingInstruction instruction =
(UnicastMappingInstruction.WeightMappingInstruction)
MappingInstructions.unicastWeight(UNICAST_WEIGHT);
final ObjectNode instructionJson =
instructionCodec.encode(instruction, context);
assertThat(instructionJson, matchesInstruction(instruction));
} |
@PostMapping("/server/leave")
@Secured(resource = Commons.NACOS_CORE_CONTEXT + "/cluster", action = ActionTypes.WRITE, signType = SignType.CONSOLE)
public RestResult<String> leave(@RequestBody Collection<String> params,
@RequestParam(defaultValue = "true") Boolean notifyOtherMembers) throws Exception {
return RestResultUtils.failed(405, null, "/v1/core/cluster/server/leave API not allow to use temporarily.");
} | @Test
void testLeave() throws Exception {
RestResult<String> result = nacosClusterController.leave(Collections.singletonList("1.1.1.1"), true);
assertFalse(result.ok());
assertEquals(405, result.getCode());
} |
@Override
public Mono<UserNotificationPreference> getByUser(String username) {
var configName = buildUserPreferenceConfigMapName(username);
return client.fetch(ConfigMap.class, configName)
.map(config -> {
if (config.getData() == null) {
return new UserNotificationPreference();
}
String s = config.getData().get(NOTIFICATION_PREFERENCE);
if (StringUtils.isNotBlank(s)) {
return JsonUtils.jsonToObject(s, UserNotificationPreference.class);
}
return new UserNotificationPreference();
})
.defaultIfEmpty(new UserNotificationPreference());
} | @Test
void getByUserWhenNotFound() {
when(client.fetch(ConfigMap.class, "user-preferences-guqing"))
.thenReturn(Mono.empty());
userNotificationPreferenceService.getByUser("guqing")
.as(StepVerifier::create)
.consumeNextWith(preference ->
assertThat(preference.getReasonTypeNotifier()).isNotNull()
)
.verifyComplete();
verify(client).fetch(ConfigMap.class, "user-preferences-guqing");
} |
public int calculateBufferSize(long totalBufferSizeInBytes, int totalBuffers) {
checkArgument(totalBufferSizeInBytes >= 0, "Size of buffer should be non negative");
checkArgument(totalBuffers > 0, "Number of buffers should be positive");
// Since the result value is always limited by max buffer size while the instant value is
// potentially unlimited. It can lead to an instant change from min to max value in case
// when the instant value is significantly larger than the possible max value.
// The solution is to limit the instant buffer size by twice of current buffer size in order
// to have the same growth and shrink speeds. for example if the instant value is equal to 0
// and the current value is 16000 we can decrease it at maximum by 1600(suppose alfa=0.1) .
// The idea is to allow increase and decrease size by the same number. So if the instant
// value would be large(for example 100000) it will be possible to increase the current
// value by 1600(the same as decreasing) because the limit will be 2 * currentValue = 32000.
// Example of change speed:
// growing = 32768, 29647, 26823, 24268, 21956, 19864
// shrinking = 19864, 21755, 23826, 26095, 28580, 31301, 32768
long desirableBufferSize =
Math.min(totalBufferSizeInBytes / totalBuffers, 2L * lastBufferSize);
lastBufferSize += alpha * (desirableBufferSize - lastBufferSize);
return lastBufferSize = Math.max(minBufferSize, Math.min(lastBufferSize, maxBufferSize));
} | @Test
void testSizeGreaterThanMaxSize() {
BufferSizeEMA calculator = new BufferSizeEMA(200, 10, 3);
// Decrease value to less than max.
assertThat(calculator.calculateBufferSize(0, 1)).isEqualTo(100);
// Impossible to exceed maximum.
assertThat(calculator.calculateBufferSize(1000, 1)).isEqualTo(150);
assertThat(calculator.calculateBufferSize(1000, 1)).isEqualTo(200);
assertThat(calculator.calculateBufferSize(1000, 1)).isEqualTo(200);
} |
public void setType( int type ) {
this.type = type;
} | @Test
public void setType() {
DragAndDropContainer dnd = new DragAndDropContainer( DragAndDropContainer.TYPE_BASE_JOB_ENTRY, "Step Name" );
dnd.setType( DragAndDropContainer.TYPE_BASE_STEP_TYPE );
assertEquals( DragAndDropContainer.TYPE_BASE_STEP_TYPE, dnd.getType() );
} |
public Number evaluate(final List<KiePMMLDefineFunction> defineFunctions,
final List<KiePMMLDerivedField> derivedFields,
final List<KiePMMLOutputField> outputFields,
final Map<String, Object> inputData) {
final List<KiePMMLNameValue> kiePMMLNameValues = getKiePMMLNameValuesFromInputDataMap(inputData);
ProcessingDTO processingDTO = new ProcessingDTO(defineFunctions, derivedFields, outputFields, Collections.emptyList(), kiePMMLNameValues, Collections.emptyList(), Collections.emptyList());
Object toReturn = expression.evaluate(processingDTO);
return toReturn != null ? (Number) toReturn : null;
} | @Test
void evaluateFromFieldRef() {
// <ComplexPartialScore>
// <FieldRef field="PARAM_1"/>
// </ComplexPartialScore>
final KiePMMLFieldRef kiePMMLFieldRef = new KiePMMLFieldRef(PARAM_1, Collections.emptyList(), null);
final KiePMMLComplexPartialScore complexPartialScore = new KiePMMLComplexPartialScore(CUSTOM_FIELD, Collections.emptyList(),
kiePMMLFieldRef);
final Map<String, Object> inputData = new HashMap<>();
inputData.put(PARAM_1, value1);
Object retrieved = complexPartialScore.evaluate(Collections.emptyList(), Collections.emptyList(), Collections.emptyList(),
inputData);
assertThat(retrieved).isEqualTo(value1);
} |
public boolean isSuppressed(Device device) {
if (suppressedDeviceType.contains(device.type())) {
return true;
}
final Annotations annotations = device.annotations();
if (containsSuppressionAnnotation(annotations)) {
return true;
}
return false;
} | @Test
public void testSuppressedDeviceAnnotation() {
Annotations annotation = DefaultAnnotations.builder()
.set("no-lldp", "random")
.build();
Device device = new DefaultDevice(PID,
NON_SUPPRESSED_DID,
Device.Type.SWITCH,
MFR, HW, SW1, SN, CID, annotation);
assertTrue(rules.isSuppressed(device));
} |
public static boolean isNullOrEmpty(final Collection<?> list) {
return list == null || list.size() == 0;
} | @Test
public void testIsNullOrEmptyCollection() {
assertTrue(Utils.isNullOrEmpty((Collection<Void>) null));
assertTrue(Utils.isNullOrEmpty(Collections.EMPTY_SET));
assertFalse(Utils.isNullOrEmpty(Collections.singletonList(null)));
assertFalse(Utils.isNullOrEmpty(Arrays.asList(new Object())));
Collection<Object> l = new ArrayList<Object>();
l.add(new Object());
assertFalse(Utils.isNullOrEmpty(l));
} |
@Description("returns index of first occurrence of a substring (or 0 if not found)")
@ScalarFunction("strpos")
@LiteralParameters({"x", "y"})
@SqlType(StandardTypes.BIGINT)
public static long stringPosition(@SqlType("varchar(x)") Slice string, @SqlType("varchar(y)") Slice substring)
{
return stringPositionFromStart(string, substring, 1);
} | @Test
public void testStringPosition()
{
testStrPosAndPosition("high", "ig", 2L);
testStrPosAndPosition("high", "igx", 0L);
testStrPosAndPosition("Quadratically", "a", 3L);
testStrPosAndPosition("foobar", "foobar", 1L);
testStrPosAndPosition("foobar", "obar", 3L);
testStrPosAndPosition("zoo!", "!", 4L);
testStrPosAndPosition("x", "", 1L);
testStrPosAndPosition("", "", 1L);
testStrPosAndPosition("\u4FE1\u5FF5,\u7231,\u5E0C\u671B", "\u7231", 4L);
testStrPosAndPosition("\u4FE1\u5FF5,\u7231,\u5E0C\u671B", "\u5E0C\u671B", 6L);
testStrPosAndPosition("\u4FE1\u5FF5,\u7231,\u5E0C\u671B", "nice", 0L);
testStrPosAndPosition(null, "", null);
testStrPosAndPosition("", null, null);
testStrPosAndPosition(null, null, null);
assertFunction("STRPOS('abc/xyz/foo/bar', '/', 1)", BIGINT, 4L);
assertFunction("STRPOS('abc/xyz/foo/bar', '/', 2)", BIGINT, 8L);
assertFunction("STRPOS('abc/xyz/foo/bar', '/', 3)", BIGINT, 12L);
assertFunction("STRPOS('abc/xyz/foo/bar', '/', 4)", BIGINT, 0L);
assertFunction("STRPOS('highhigh', 'ig', 1)", BIGINT, 2L);
assertFunction("STRPOS('foobarfoo', 'fb', 1)", BIGINT, 0L);
assertFunction("STRPOS('foobarfoo', 'oo', 1)", BIGINT, 2L);
// Assert invalid instance argument
assertInvalidFunction("STRPOS('abc/xyz/foo/bar', '/', 0)", "'instance' must be a positive number.");
assertInvalidFunction("STRPOS('', '', 0)", "'instance' must be a positive number.");
assertInvalidFunction("STRPOS('highhigh', 'ig', -1)", "'instance' must be a positive number.");
assertInvalidFunction("STRPOS('foobarfoo', 'oo', -2)", "'instance' must be a positive number.");
} |
@Override
public void add(final TimerTask timerTask) {
if (Objects.isNull(timerTask)) {
throw new NullPointerException("timer task null");
}
this.readLock.lock();
try {
start();
long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
this.addTimerTaskEntry(new TimerTaskList.TimerTaskEntry(this, timerTask, timerTask.getDelayMs() + millis));
} finally {
this.readLock.unlock();
}
} | @Test
public void testListForeach() {
TimerTask timerTask = new TimerTask(100000) {
@Override
public void run(final TaskEntity taskEntity) {
}
};
timerTaskList.add(new TimerTaskList.TimerTaskEntry(timer, timerTask, -1L));
assertEquals(taskCount.get(), 1);
timerTaskList.foreach(timerTask1 -> assertSame(timerTask1, timerTask1));
} |
public static String formatChineseDate(Date date, boolean isUppercase, boolean withTime) {
if (null == date) {
return null;
}
if (false == isUppercase) {
return (withTime ? DatePattern.CHINESE_DATE_TIME_FORMAT : DatePattern.CHINESE_DATE_FORMAT).format(date);
}
return CalendarUtil.formatChineseDate(CalendarUtil.calendar(date), withTime);
} | @Test
public void formatChineseDateTest() {
String formatChineseDate = DateUtil.formatChineseDate(DateUtil.parse("2018-02-24"), true, false);
assertEquals("二〇一八年二月二十四日", formatChineseDate);
formatChineseDate = DateUtil.formatChineseDate(DateUtil.parse("2018-02-14"), true, false);
assertEquals("二〇一八年二月十四日", formatChineseDate);
} |
int getMaxLevel(int maxLevel) {
return (maxLevel <= 0 || maxLevel > this.maxLevelAllowed) ? this.maxLevelAllowed : maxLevel;
} | @Test
public void givenMaxLevelPositive_whenGetMaxLevel_thenValueTheSame() {
assertThat(repo.getMaxLevel(1), equalTo(1));
assertThat(repo.getMaxLevel(2), equalTo(2));
assertThat(repo.getMaxLevel(repo.getMaxLevelAllowed()), equalTo(repo.getMaxLevelAllowed()));
assertThat(repo.getMaxLevel(repo.getMaxLevelAllowed() + 1), equalTo(repo.getMaxLevelAllowed()));
assertThat(repo.getMaxLevel(Integer.MAX_VALUE), equalTo(repo.getMaxLevelAllowed()));
} |
@Override
public BitMask resetAll(BitMask mask) {
if (mask instanceof LongBitMask) {
this.mask &= (-1L - ((LongBitMask) mask).asLong());
} else if (mask instanceof AllSetBitMask) {
this.mask &= 0;
} else if (mask instanceof AllSetButLastBitMask) {
this.mask &= Long.MIN_VALUE;
} else if (mask instanceof EmptyButLastBitMask) {
reset(0);
} else if (!(mask instanceof EmptyBitMask)) {
throw new RuntimeException("Cannot resetAll a LongBitMask with a " + mask.getClass().getSimpleName());
}
return this;
} | @Test
public void testResetAll() {
assertThat(new LongBitMask().resetAll(new LongBitMask()).toString()).isEqualTo("0");
assertThat(new LongBitMask().resetAll(AllSetBitMask.get()).toString()).isEqualTo("0");
assertThat(new LongBitMask().resetAll(AllSetButLastBitMask.get()).toString()).isEqualTo("0");
assertThat(new LongBitMask().resetAll(EmptyButLastBitMask.get()).toString()).isEqualTo("0");
assertThat(new LongBitMask().resetAll(EmptyBitMask.get()).toString()).isEqualTo("0");
thrown.expect(RuntimeException.class);
new LongBitMask().resetAll(new OpenBitSet()).toString();
} |
public static Collection<String> getTrimmedStringCollection(String str,
String delim) {
List<String> values = new ArrayList<String>();
if (str == null)
return values;
StringTokenizer tokenizer = new StringTokenizer(str, delim);
while (tokenizer.hasMoreTokens()) {
String next = tokenizer.nextToken();
if (next == null || next.trim().isEmpty()) {
continue;
}
values.add(next.trim());
}
return values;
} | @Test
public void testGetUniqueNonEmptyTrimmedStrings (){
final String TO_SPLIT = ",foo, bar,baz,,blah,blah,bar,";
Collection<String> col = StringUtils.getTrimmedStringCollection(TO_SPLIT);
assertEquals(4, col.size());
assertTrue(col.containsAll(Arrays.asList(new String[]{"foo","bar","baz","blah"})));
} |
@Implementation
protected HttpResponse execute(
HttpHost httpHost, HttpRequest httpRequest, HttpContext httpContext)
throws HttpException, IOException {
if (FakeHttp.getFakeHttpLayer().isInterceptingHttpRequests()) {
return FakeHttp.getFakeHttpLayer()
.emulateRequest(httpHost, httpRequest, httpContext, realObject);
} else {
FakeHttp.getFakeHttpLayer()
.addRequestInfo(new HttpRequestInfo(httpRequest, httpHost, httpContext, redirector));
HttpResponse response = redirector.execute(httpHost, httpRequest, httpContext);
if (FakeHttp.getFakeHttpLayer().isInterceptingResponseContent()) {
interceptResponseContent(response);
}
FakeHttp.getFakeHttpLayer().addHttpResponse(response);
return response;
}
} | @Test
public void shouldReturnRequestsByRule() throws Exception {
FakeHttp.addHttpResponseRule(
HttpGet.METHOD_NAME,
"http://some.uri",
new TestHttpResponse(200, "a cheery response body"));
HttpResponse response = requestDirector.execute(null, new HttpGet("http://some.uri"), null);
assertNotNull(response);
assertThat(response.getStatusLine().getStatusCode()).isEqualTo(200);
assertThat(getStringContent(response)).isEqualTo("a cheery response body");
} |
public static List<TypeRef<?>> getTypeArguments(TypeRef typeRef) {
if (typeRef.getType() instanceof ParameterizedType) {
ParameterizedType parameterizedType = (ParameterizedType) typeRef.getType();
return Arrays.stream(parameterizedType.getActualTypeArguments())
.map(TypeRef::of)
.collect(Collectors.toList());
} else {
return new ArrayList<>();
}
} | @Test
public void getTypeArguments() {
TypeRef<Tuple2<String, Map<String, Integer>>> typeRef =
new TypeRef<Tuple2<String, Map<String, Integer>>>() {};
assertEquals(TypeUtils.getTypeArguments(typeRef).size(), 2);
} |
public static BigDecimal convertToDecimal(Schema schema, Object value, int scale) {
if (value == null) {
throw new DataException("Unable to convert a null value to a schema that requires a value");
}
return convertToDecimal(Decimal.schema(scale), value);
} | @Test
public void shouldFailToConvertNullToDecimal() {
assertThrows(DataException.class, () -> Values.convertToDecimal(null, null, 1));
} |
int getMinimumTokens(String languageKey) {
return settings.getInt("sonar.cpd." + languageKey + ".minimumTokens").orElse(100);
} | @Test
public void defaultMinimumTokens() {
when(configuration.getInt(anyString())).thenReturn(Optional.empty());
assertThat(cpdSettings.getMinimumTokens("java")).isEqualTo(100);
} |
public static boolean createFile(final Path filePath) {
try {
final Path parent = filePath.getParent();
if (parent == null) {
return false;
}
if (Files.notExists(parent)) {
Files.createDirectories(parent);
}
if (Files.notExists(filePath)) {
Files.createFile(filePath);
}
return true;
} catch (final Exception e) {
return false;
}
} | @Test
void testCreateFileLinkDir() throws IOException {
Path link = Paths.get(linkFolder.toFile().getPath(), "link");
Files.createSymbolicLink(link, realFolder);
Path linkDirHistoryFile = Paths.get(link.toAbsolutePath().toString(), "history.file");
Path realLinkDirHistoryFile = Paths.get(realFolder.toFile().getPath(), "history.file");
CliUtils.createFile(linkDirHistoryFile);
assertThat(Files.exists(linkDirHistoryFile)).isTrue();
assertThat(Files.exists(realLinkDirHistoryFile)).isTrue();
} |
public static RawTransaction decode(final String hexTransaction) {
final byte[] transaction = Numeric.hexStringToByteArray(hexTransaction);
TransactionType transactionType = getTransactionType(transaction);
switch (transactionType) {
case EIP1559:
return decodeEIP1559Transaction(transaction);
case EIP4844:
return decodeEIP4844Transaction(transaction);
case EIP2930:
return decodeEIP2930Transaction(transaction);
default:
return decodeLegacyTransaction(transaction);
}
} | @Test
public void testDecoding1559AccessList() {
final RawTransaction rawTransaction = createEip1559RawTransactionAccessList();
final Transaction1559 transaction1559 = (Transaction1559) rawTransaction.getTransaction();
final byte[] encodedMessage = TransactionEncoder.encode(rawTransaction);
final String hexMessage = Numeric.toHexString(encodedMessage);
final RawTransaction result = TransactionDecoder.decode(hexMessage);
assertTrue(result.getTransaction() instanceof Transaction1559);
final Transaction1559 resultTransaction1559 = (Transaction1559) result.getTransaction();
assertNotNull(result);
assertEquals(transaction1559.getChainId(), resultTransaction1559.getChainId());
assertEquals(transaction1559.getNonce(), resultTransaction1559.getNonce());
assertEquals(transaction1559.getMaxFeePerGas(), resultTransaction1559.getMaxFeePerGas());
assertEquals(
transaction1559.getMaxPriorityFeePerGas(),
resultTransaction1559.getMaxPriorityFeePerGas());
assertEquals(transaction1559.getGasLimit(), resultTransaction1559.getGasLimit());
assertEquals(transaction1559.getTo(), resultTransaction1559.getTo());
assertEquals(transaction1559.getValue(), resultTransaction1559.getValue());
assertEquals(transaction1559.getData(), resultTransaction1559.getData());
assertEquals(transaction1559.getAccessList(), resultTransaction1559.getAccessList());
} |
public static Connection OpenConnection( String serveur, int port, String username, String password,
boolean useKey, String keyFilename, String passPhrase, int timeOut, VariableSpace space, String proxyhost,
int proxyport, String proxyusername, String proxypassword ) throws KettleException {
Connection conn = null;
char[] content = null;
boolean isAuthenticated = false;
try {
// perform some checks
if ( useKey ) {
if ( Utils.isEmpty( keyFilename ) ) {
throw new KettleException( BaseMessages.getString( SSHMeta.PKG, "SSH.Error.PrivateKeyFileMissing" ) );
}
FileObject keyFileObject = KettleVFS.getFileObject( keyFilename );
if ( !keyFileObject.exists() ) {
throw new KettleException( BaseMessages.getString( SSHMeta.PKG, "SSH.Error.PrivateKeyNotExist", keyFilename ) );
}
FileContent keyFileContent = keyFileObject.getContent();
CharArrayWriter charArrayWriter = new CharArrayWriter( (int) keyFileContent.getSize() );
try ( InputStream in = keyFileContent.getInputStream() ) {
IOUtils.copy( in, charArrayWriter );
}
content = charArrayWriter.toCharArray();
}
// Create a new connection
conn = createConnection( serveur, port );
/* We want to connect through a HTTP proxy */
if ( !Utils.isEmpty( proxyhost ) ) {
/* Now connect */
// if the proxy requires basic authentication:
if ( !Utils.isEmpty( proxyusername ) ) {
conn.setProxyData( new HTTPProxyData( proxyhost, proxyport, proxyusername, proxypassword ) );
} else {
conn.setProxyData( new HTTPProxyData( proxyhost, proxyport ) );
}
}
// and connect
if ( timeOut == 0 ) {
conn.connect();
} else {
conn.connect( null, 0, timeOut * 1000 );
}
// authenticate
if ( useKey ) {
isAuthenticated =
conn.authenticateWithPublicKey( username, content, space.environmentSubstitute( passPhrase ) );
} else {
isAuthenticated = conn.authenticateWithPassword( username, password );
}
if ( isAuthenticated == false ) {
throw new KettleException( BaseMessages.getString( SSHMeta.PKG, "SSH.Error.AuthenticationFailed", username ) );
}
} catch ( Exception e ) {
// Something wrong happened
// do not forget to disconnect if connected
if ( conn != null ) {
conn.close();
}
throw new KettleException( BaseMessages.getString( SSHMeta.PKG, "SSH.Error.ErrorConnecting", serveur, username ), e );
}
return conn;
} | @Test
public void testOpenConnectionUseKey_2() throws Exception {
when( fileObject.exists() ).thenReturn( true );
when( fileObject.getContent() ).thenReturn( fileContent );
when( fileContent.getSize() ).thenReturn( 1000L );
when( fileContent.getInputStream() ).thenReturn( new ByteArrayInputStream( new byte[] { 1, 2, 3, 4, 5 } ) );
when( variableSpace.environmentSubstitute( passPhrase ) ).thenReturn( passPhrase );
when( connection.authenticateWithPublicKey( eq( username ), any( char[].class ), eq( passPhrase ) ) ).thenReturn( true );
SSHData.OpenConnection( server, port, username, null, true, keyFilePath,
passPhrase, 0, variableSpace, null, 0, null, null );
verify( connection ).connect();
verify( connection ).authenticateWithPublicKey( eq( username ), any( char[].class ), eq( passPhrase ) );
} |
@Override
public List<byte[]> clusterGetKeysInSlot(int slot, Integer count) {
RFuture<List<byte[]>> f = executorService.readAsync((String)null, ByteArrayCodec.INSTANCE, CLUSTER_GETKEYSINSLOT, slot, count);
return syncFuture(f);
} | @Test
public void testClusterGetKeysInSlot() {
List<byte[]> keys = connection.clusterGetKeysInSlot(12, 10);
assertThat(keys).isEmpty();
} |
@Override
public void invoke() throws Exception {
// --------------------------------------------------------------------
// Initialize
// --------------------------------------------------------------------
LOG.debug(getLogString("Start registering input and output"));
// initialize OutputFormat
initOutputFormat();
// initialize input readers
try {
initInputReaders();
} catch (Exception e) {
throw new RuntimeException(
"Initializing the input streams failed"
+ (e.getMessage() == null ? "." : ": " + e.getMessage()),
e);
}
LOG.debug(getLogString("Finished registering input and output"));
// --------------------------------------------------------------------
// Invoke
// --------------------------------------------------------------------
LOG.debug(getLogString("Starting data sink operator"));
RuntimeContext ctx = createRuntimeContext();
final Counter numRecordsIn;
{
Counter tmpNumRecordsIn;
try {
InternalOperatorIOMetricGroup ioMetricGroup =
((InternalOperatorMetricGroup) ctx.getMetricGroup()).getIOMetricGroup();
ioMetricGroup.reuseInputMetricsForTask();
ioMetricGroup.reuseOutputMetricsForTask();
tmpNumRecordsIn = ioMetricGroup.getNumRecordsInCounter();
} catch (Exception e) {
LOG.warn("An exception occurred during the metrics setup.", e);
tmpNumRecordsIn = new SimpleCounter();
}
numRecordsIn = tmpNumRecordsIn;
}
if (RichOutputFormat.class.isAssignableFrom(this.format.getClass())) {
((RichOutputFormat) this.format).setRuntimeContext(ctx);
LOG.debug(getLogString("Rich Sink detected. Initializing runtime context."));
}
ExecutionConfig executionConfig = getExecutionConfig();
boolean objectReuseEnabled = executionConfig.isObjectReuseEnabled();
try {
// initialize local strategies
MutableObjectIterator<IT> input1;
switch (this.config.getInputLocalStrategy(0)) {
case NONE:
// nothing to do
localStrategy = null;
input1 = reader;
break;
case SORT:
// initialize sort local strategy
try {
// get type comparator
TypeComparatorFactory<IT> compFact =
this.config.getInputComparator(0, getUserCodeClassLoader());
if (compFact == null) {
throw new Exception(
"Missing comparator factory for local strategy on input " + 0);
}
// initialize sorter
Sorter<IT> sorter =
ExternalSorter.newBuilder(
getEnvironment().getMemoryManager(),
this,
this.inputTypeSerializerFactory.getSerializer(),
compFact.createComparator())
.maxNumFileHandles(this.config.getFilehandlesInput(0))
.enableSpilling(
getEnvironment().getIOManager(),
this.config.getSpillingThresholdInput(0))
.memoryFraction(this.config.getRelativeMemoryInput(0))
.objectReuse(
this.getExecutionConfig().isObjectReuseEnabled())
.largeRecords(this.config.getUseLargeRecordHandler())
.build(this.reader);
this.localStrategy = sorter;
input1 = sorter.getIterator();
} catch (Exception e) {
throw new RuntimeException(
"Initializing the input processing failed"
+ (e.getMessage() == null ? "." : ": " + e.getMessage()),
e);
}
break;
default:
throw new RuntimeException("Invalid local strategy for DataSinkTask");
}
// read the reader and write it to the output
final TypeSerializer<IT> serializer = this.inputTypeSerializerFactory.getSerializer();
final MutableObjectIterator<IT> input = input1;
final OutputFormat<IT> format = this.format;
// check if task has been canceled
if (this.taskCanceled) {
return;
}
LOG.debug(getLogString("Starting to produce output"));
// open
format.open(
new InitializationContext() {
@Override
public int getNumTasks() {
return getEnvironment().getTaskInfo().getNumberOfParallelSubtasks();
}
@Override
public int getTaskNumber() {
return getEnvironment().getTaskInfo().getIndexOfThisSubtask();
}
@Override
public int getAttemptNumber() {
return getEnvironment().getTaskInfo().getAttemptNumber();
}
});
if (objectReuseEnabled) {
IT record = serializer.createInstance();
// work!
while (!this.taskCanceled && ((record = input.next(record)) != null)) {
numRecordsIn.inc();
format.writeRecord(record);
}
} else {
IT record;
// work!
while (!this.taskCanceled && ((record = input.next()) != null)) {
numRecordsIn.inc();
format.writeRecord(record);
}
}
// close. We close here such that a regular close throwing an exception marks a task as
// failed.
if (!this.taskCanceled) {
this.format.close();
this.format = null;
}
} catch (Exception ex) {
// make a best effort to clean up
try {
if (!cleanupCalled && format instanceof CleanupWhenUnsuccessful) {
cleanupCalled = true;
((CleanupWhenUnsuccessful) format).tryCleanupOnError();
}
} catch (Throwable t) {
LOG.error("Cleanup on error failed.", t);
}
ex = ExceptionInChainedStubException.exceptionUnwrap(ex);
if (ex instanceof CancelTaskException) {
// forward canceling exception
throw ex;
}
// drop, if the task was canceled
else if (!this.taskCanceled) {
if (LOG.isErrorEnabled()) {
LOG.error(getLogString("Error in user code: " + ex.getMessage()), ex);
}
throw ex;
}
} finally {
if (this.format != null) {
// close format, if it has not been closed, yet.
// This should only be the case if we had a previous error, or were canceled.
try {
this.format.close();
} catch (Throwable t) {
if (LOG.isWarnEnabled()) {
LOG.warn(getLogString("Error closing the output format"), t);
}
}
}
// close local strategy if necessary
if (localStrategy != null) {
try {
this.localStrategy.close();
} catch (Throwable t) {
LOG.error("Error closing local strategy", t);
}
}
BatchTask.clearReaders(new MutableReader<?>[] {inputReader});
}
if (!this.taskCanceled) {
LOG.debug(getLogString("Finished data sink operator"));
} else {
LOG.debug(getLogString("Data sink operator cancelled"));
}
} | @Test
void testFailingDataSinkTask() {
int keyCnt = 100;
int valCnt = 20;
super.initEnvironment(MEMORY_MANAGER_SIZE, NETWORK_BUFFER_SIZE);
super.addInput(new UniformRecordGenerator(keyCnt, valCnt, false), 0);
DataSinkTask<Record> testTask = new DataSinkTask<>(this.mockEnv);
Configuration stubParams = new Configuration();
File tempTestFile = new File(tempFolder.toFile(), UUID.randomUUID().toString());
super.registerFileOutputTask(
MockFailingOutputFormat.class, tempTestFile.toURI().toString(), stubParams);
boolean stubFailed = false;
try {
testTask.invoke();
} catch (Exception e) {
stubFailed = true;
}
assertThat(stubFailed).withFailMessage("Function exception was not forwarded.").isTrue();
// assert that temp file was removed
assertThat(tempTestFile)
.withFailMessage("Temp output file has not been removed")
.doesNotExist();
} |
public T evolve(int generation) {
return evolve(generation, Double.POSITIVE_INFINITY);
} | @Test
public void test() {
System.out.println("Genetic Algorithm");
BitString[] seeds = new BitString[100];
// The mutation parameters are set higher than usual to prevent premature convergence.
for (int i = 0; i < seeds.length; i++) {
seeds[i] = new BitString(15, new Knapnack(), Crossover.UNIFORM, 1.0, 0.2);
}
GeneticAlgorithm<BitString> instance = new GeneticAlgorithm<>(seeds, Selection.Tournament(3, 0.95), 2);
BitString result = instance.evolve(1000, 18);
assertEquals(18, result.fitness(), 1E-7);
int[] best = {1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
for (int i = 0; i < best.length; i++) {
assertEquals(best[i], result.bits()[i]);
}
} |
@Override
public GenericAvroRecord read(byte[] bytes, int offset, int length) {
try {
if (offset == 0 && this.offset > 0) {
offset = this.offset;
}
Decoder decoder = DecoderFactory.get().binaryDecoder(bytes, offset, length - offset, null);
org.apache.avro.generic.GenericRecord avroRecord =
(org.apache.avro.generic.GenericRecord) reader.read(
null,
decoder);
return new GenericAvroRecord(schemaVersion, schema, fields, avroRecord);
} catch (IOException | IndexOutOfBoundsException e) {
throw new SchemaSerializationException(e);
}
} | @Test
public void testGenericAvroReaderByReaderSchema() {
byte[] fooV2Bytes = fooV2Schema.encode(fooV2);
GenericAvroReader genericAvroSchemaByReaderSchema = new GenericAvroReader(fooV2Schema.getAvroSchema(), fooSchemaNotNull.getAvroSchema(), new byte[10]);
GenericRecord genericRecordByReaderSchema = genericAvroSchemaByReaderSchema.read(fooV2Bytes);
assertEquals(genericRecordByReaderSchema.getField("fieldUnableNull"), "defaultValue");
assertEquals(genericRecordByReaderSchema.getField("field1"), "foo1");
assertEquals(genericRecordByReaderSchema.getField("field3"), 10);
} |
public URL getInterNodeListener(
final Function<URL, Integer> portResolver
) {
return getInterNodeListener(portResolver, LOGGER);
} | @Test
public void shouldResolveInterNodeListenerToFirstListenerSetToIpv6Loopback() {
// Given:
final URL expected = url("https://[::1]:12345");
final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.<String, Object>builder()
.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
.put(LISTENERS_CONFIG, expected.toString() + ",http://localhost:2589")
.build()
);
// When:
final URL actual = config.getInterNodeListener(portResolver, logger);
// Then:
assertThat(actual, is(expected));
verifyLogsInterNodeListener(expected, QUOTED_FIRST_LISTENER_CONFIG);
verifyLogsLoopBackWarning(expected, QUOTED_FIRST_LISTENER_CONFIG);
verifyNoMoreInteractions(logger);
} |
public static File unzip(String zipFilePath) throws UtilException {
return unzip(zipFilePath, DEFAULT_CHARSET);
} | @Test
@Disabled
public void issue3018Test() {
ZipUtil.unzip(
FileUtil.getInputStream("d:/test/default.zip")
, FileUtil.file("d:/test/"), CharsetUtil.CHARSET_UTF_8);
} |
@Override
public <T extends State> T state(StateNamespace namespace, StateTag<T> address) {
return workItemState.get(namespace, address, StateContexts.nullContext());
} | @Test
public void testOrderedListMergePendingAdds() {
SettableFuture<Map<Range<Instant>, RangeSet<Long>>> orderedListFuture = SettableFuture.create();
orderedListFuture.set(null);
SettableFuture<Map<Range<Instant>, RangeSet<Instant>>> deletionsFuture =
SettableFuture.create();
deletionsFuture.set(null);
when(mockReader.valueFuture(
systemKey(NAMESPACE, "orderedList" + IdTracker.IDS_AVAILABLE_STR),
STATE_FAMILY,
IdTracker.IDS_AVAILABLE_CODER))
.thenReturn(orderedListFuture);
when(mockReader.valueFuture(
systemKey(NAMESPACE, "orderedList" + IdTracker.DELETIONS_STR),
STATE_FAMILY,
IdTracker.SUBRANGE_DELETIONS_CODER))
.thenReturn(deletionsFuture);
SettableFuture<Iterable<TimestampedValue<String>>> fromStorage = SettableFuture.create();
when(mockReader.orderedListFuture(
FULL_ORDERED_LIST_RANGE,
key(NAMESPACE, "orderedList"),
STATE_FAMILY,
StringUtf8Coder.of()))
.thenReturn(fromStorage);
StateTag<OrderedListState<String>> addr =
StateTags.orderedList("orderedList", StringUtf8Coder.of());
OrderedListState<String> orderedListState = underTest.state(NAMESPACE, addr);
orderedListState.add(TimestampedValue.of("second", Instant.ofEpochMilli(1)));
orderedListState.add(TimestampedValue.of("third", Instant.ofEpochMilli(2)));
orderedListState.add(TimestampedValue.of("fourth", Instant.ofEpochMilli(2)));
orderedListState.add(TimestampedValue.of("eighth", Instant.ofEpochMilli(10)));
orderedListState.add(TimestampedValue.of("ninth", Instant.ofEpochMilli(15)));
fromStorage.set(
ImmutableList.of(
TimestampedValue.of("first", Instant.ofEpochMilli(-1)),
TimestampedValue.of("fifth", Instant.ofEpochMilli(5)),
TimestampedValue.of("sixth", Instant.ofEpochMilli(5)),
TimestampedValue.of("seventh", Instant.ofEpochMilli(5)),
TimestampedValue.of("tenth", Instant.ofEpochMilli(20))));
TimestampedValue[] expected =
Iterables.toArray(
ImmutableList.of(
TimestampedValue.of("first", Instant.ofEpochMilli(-1)),
TimestampedValue.of("second", Instant.ofEpochMilli(1)),
TimestampedValue.of("third", Instant.ofEpochMilli(2)),
TimestampedValue.of("fourth", Instant.ofEpochMilli(2)),
TimestampedValue.of("fifth", Instant.ofEpochMilli(5)),
TimestampedValue.of("sixth", Instant.ofEpochMilli(5)),
TimestampedValue.of("seventh", Instant.ofEpochMilli(5)),
TimestampedValue.of("eighth", Instant.ofEpochMilli(10)),
TimestampedValue.of("ninth", Instant.ofEpochMilli(15)),
TimestampedValue.of("tenth", Instant.ofEpochMilli(20))),
TimestampedValue.class);
TimestampedValue[] read = Iterables.toArray(orderedListState.read(), TimestampedValue.class);
assertArrayEquals(expected, read);
} |
public static Schema getOutputSchema(
Schema inputSchema, FieldAccessDescriptor fieldAccessDescriptor) {
return getOutputSchemaTrackingNullable(inputSchema, fieldAccessDescriptor, false);
} | @Test
public void testNullableSchemaMap() {
FieldAccessDescriptor fieldAccessDescriptor1 =
FieldAccessDescriptor.withFieldNames("nestedMap.field1").resolve(NESTED_NULLABLE_SCHEMA);
Schema schema1 = SelectHelpers.getOutputSchema(NESTED_NULLABLE_SCHEMA, fieldAccessDescriptor1);
Schema expectedSchema1 =
Schema.builder()
.addNullableField("field1", FieldType.map(FieldType.STRING, FieldType.STRING))
.build();
assertEquals(expectedSchema1, schema1);
FieldAccessDescriptor fieldAccessDescriptor2 =
FieldAccessDescriptor.withFieldNames("nestedMap.*").resolve(NESTED_NULLABLE_SCHEMA);
Schema schema2 = SelectHelpers.getOutputSchema(NESTED_NULLABLE_SCHEMA, fieldAccessDescriptor2);
Schema expectedSchema2 =
Schema.builder()
.addNullableField("field1", FieldType.map(FieldType.STRING, FieldType.STRING))
.addNullableField("field2", FieldType.map(FieldType.STRING, FieldType.INT32))
.addNullableField("field3", FieldType.map(FieldType.STRING, FieldType.DOUBLE))
.addNullableField("field_extra", FieldType.map(FieldType.STRING, FieldType.STRING))
.build();
assertEquals(expectedSchema2, schema2);
} |
public static Class<?> getLiteral(String className, String literal) {
LiteralAnalyzer analyzer = ANALYZERS.get( className );
Class result = null;
if ( analyzer != null ) {
analyzer.validate( literal );
result = analyzer.getLiteral();
}
return result;
} | @Test
public void testNonSupportedPrimitiveType() {
assertThat( getLiteral( void.class.getCanonicalName(), "0xFFFF_FFFF_FFFF" ) ).isNull();
} |
@Override
public boolean hasConflict(ConcurrentOperation thisOperation, ConcurrentOperation otherOperation) {
// TODO : UUID's can clash even for insert/insert, handle that case.
Set<String> partitionBucketIdSetForFirstInstant = thisOperation
.getMutatedPartitionAndFileIds()
.stream()
.map(partitionAndFileId ->
BucketIdentifier.partitionBucketIdStr(partitionAndFileId.getLeft(), BucketIdentifier.bucketIdFromFileId(partitionAndFileId.getRight()))
).collect(Collectors.toSet());
Set<String> partitionBucketIdSetForSecondInstant = otherOperation
.getMutatedPartitionAndFileIds()
.stream()
.map(partitionAndFileId ->
BucketIdentifier.partitionBucketIdStr(partitionAndFileId.getLeft(), BucketIdentifier.bucketIdFromFileId(partitionAndFileId.getRight()))
).collect(Collectors.toSet());
Set<String> intersection = new HashSet<>(partitionBucketIdSetForFirstInstant);
intersection.retainAll(partitionBucketIdSetForSecondInstant);
if (!intersection.isEmpty()) {
LOG.info("Found conflicting writes between first operation = " + thisOperation
+ ", second operation = " + otherOperation + " , intersecting bucket ids " + intersection);
return true;
}
return false;
} | @Test
public void testConcurrentWritesWithInterleavingSuccessfulCommit() throws Exception {
createCommit(metaClient.createNewInstantTime());
HoodieActiveTimeline timeline = metaClient.getActiveTimeline();
// consider commits before this are all successful
Option<HoodieInstant> lastSuccessfulInstant = timeline.getCommitsTimeline().filterCompletedInstants().lastInstant();
// writer 1 starts
String currentWriterInstant = metaClient.createNewInstantTime();
createInflightCommit(currentWriterInstant, HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH);
// writer 2 starts and finishes
String newInstantTime = metaClient.createNewInstantTime();
createCommit(newInstantTime);
Option<HoodieInstant> currentInstant = Option.of(new HoodieInstant(State.INFLIGHT, HoodieTimeline.COMMIT_ACTION, currentWriterInstant));
SimpleConcurrentFileWritesConflictResolutionStrategy strategy = new BucketIndexConcurrentFileWritesConflictResolutionStrategy();
HoodieCommitMetadata currentMetadata = createCommitMetadata(currentWriterInstant, HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH);
metaClient.reloadActiveTimeline();
List<HoodieInstant> candidateInstants = strategy.getCandidateInstants(metaClient, currentInstant.get(), lastSuccessfulInstant).collect(
Collectors.toList());
// writer 1 conflicts with writer 2
Assertions.assertEquals(1, candidateInstants.size());
ConcurrentOperation thatCommitOperation = new ConcurrentOperation(candidateInstants.get(0), metaClient);
ConcurrentOperation thisCommitOperation = new ConcurrentOperation(currentInstant.get(), currentMetadata);
Assertions.assertTrue(strategy.hasConflict(thisCommitOperation, thatCommitOperation));
try {
strategy.resolveConflict(null, thisCommitOperation, thatCommitOperation);
Assertions.fail("Cannot reach here, writer 1 and writer 2 should have thrown a conflict");
} catch (HoodieWriteConflictException e) {
// expected
}
} |
public StringSubject factValue(String key) {
return doFactValue(key, null);
} | @Test
public void factValueIntFailNegative() {
try {
assertThat(fact("foo", "the foo")).factValue("foo", -1);
fail();
} catch (IllegalArgumentException expected) {
}
} |
@Nonnull
public static List<Future<?>> getAllDone(Collection<Future<?>> futures) {
List<Future<?>> doneFutures = new ArrayList<>();
for (Future<?> f : futures) {
if (f.isDone()) {
doneFutures.add(f);
}
}
return doneFutures;
} | @Test
public void testGetAllDone_whenSomeFuturesAreCompleted() {
Future<?> completedFuture = InternalCompletableFuture.newCompletedFuture(null);
Collection<Future<?>> futures =
asList(new UncancellableFuture<>(), completedFuture, new UncancellableFuture<>());
assertEquals(1, FutureUtil.getAllDone(futures).size());
assertEquals(completedFuture, FutureUtil.getAllDone(futures).get(0));
} |
@Override
protected int rsv(WebSocketFrame msg) {
return msg.rsv() | WebSocketExtension.RSV1;
} | @Test
public void testFramementedFrame() {
EmbeddedChannel encoderChannel = new EmbeddedChannel(new PerFrameDeflateEncoder(9, 15, false));
EmbeddedChannel decoderChannel = new EmbeddedChannel(
ZlibCodecFactory.newZlibDecoder(ZlibWrapper.NONE));
// initialize
byte[] payload1 = new byte[100];
random.nextBytes(payload1);
byte[] payload2 = new byte[100];
random.nextBytes(payload2);
byte[] payload3 = new byte[100];
random.nextBytes(payload3);
BinaryWebSocketFrame frame1 = new BinaryWebSocketFrame(false,
WebSocketExtension.RSV3, Unpooled.wrappedBuffer(payload1));
ContinuationWebSocketFrame frame2 = new ContinuationWebSocketFrame(false,
WebSocketExtension.RSV3, Unpooled.wrappedBuffer(payload2));
ContinuationWebSocketFrame frame3 = new ContinuationWebSocketFrame(true,
WebSocketExtension.RSV3, Unpooled.wrappedBuffer(payload3));
// execute
assertTrue(encoderChannel.writeOutbound(frame1));
assertTrue(encoderChannel.writeOutbound(frame2));
assertTrue(encoderChannel.writeOutbound(frame3));
BinaryWebSocketFrame compressedFrame1 = encoderChannel.readOutbound();
ContinuationWebSocketFrame compressedFrame2 = encoderChannel.readOutbound();
ContinuationWebSocketFrame compressedFrame3 = encoderChannel.readOutbound();
// test
assertNotNull(compressedFrame1);
assertNotNull(compressedFrame2);
assertNotNull(compressedFrame3);
assertEquals(WebSocketExtension.RSV1 | WebSocketExtension.RSV3, compressedFrame1.rsv());
assertEquals(WebSocketExtension.RSV1 | WebSocketExtension.RSV3, compressedFrame2.rsv());
assertEquals(WebSocketExtension.RSV1 | WebSocketExtension.RSV3, compressedFrame3.rsv());
assertFalse(compressedFrame1.isFinalFragment());
assertFalse(compressedFrame2.isFinalFragment());
assertTrue(compressedFrame3.isFinalFragment());
assertTrue(decoderChannel.writeInbound(compressedFrame1.content()));
assertTrue(decoderChannel.writeInbound(DeflateDecoder.FRAME_TAIL.duplicate()));
ByteBuf uncompressedPayload1 = decoderChannel.readInbound();
byte[] finalPayload1 = new byte[100];
uncompressedPayload1.readBytes(finalPayload1);
assertArrayEquals(finalPayload1, payload1);
uncompressedPayload1.release();
assertTrue(decoderChannel.writeInbound(compressedFrame2.content()));
assertTrue(decoderChannel.writeInbound(DeflateDecoder.FRAME_TAIL.duplicate()));
ByteBuf uncompressedPayload2 = decoderChannel.readInbound();
byte[] finalPayload2 = new byte[100];
uncompressedPayload2.readBytes(finalPayload2);
assertArrayEquals(finalPayload2, payload2);
uncompressedPayload2.release();
assertTrue(decoderChannel.writeInbound(compressedFrame3.content()));
assertTrue(decoderChannel.writeInbound(DeflateDecoder.FRAME_TAIL.duplicate()));
ByteBuf uncompressedPayload3 = decoderChannel.readInbound();
byte[] finalPayload3 = new byte[100];
uncompressedPayload3.readBytes(finalPayload3);
assertArrayEquals(finalPayload3, payload3);
uncompressedPayload3.release();
} |
@Override
public MapSettings setProperty(String key, String value) {
return (MapSettings) super.setProperty(key, value);
} | @Test
public void ignore_case_of_boolean_values() {
Settings settings = new MapSettings();
settings.setProperty("foo", "true");
settings.setProperty("bar", "TRUE");
// labels in UI
settings.setProperty("baz", "True");
assertThat(settings.getBoolean("foo")).isTrue();
assertThat(settings.getBoolean("bar")).isTrue();
assertThat(settings.getBoolean("baz")).isTrue();
} |
public static TableSchema protoTableSchemaFromAvroSchema(Schema schema) {
Preconditions.checkState(!schema.getFields().isEmpty());
TableSchema.Builder builder = TableSchema.newBuilder();
for (Schema.Field field : schema.getFields()) {
builder.addFields(fieldDescriptorFromAvroField(field));
}
return builder.build();
} | @Test
public void testNestedFromSchema() {
DescriptorProto descriptor =
TableRowToStorageApiProto.descriptorSchemaFromTableSchema(
AvroGenericRecordToStorageApiProto.protoTableSchemaFromAvroSchema(NESTED_SCHEMA),
true,
false);
Map<String, Type> expectedBaseTypes =
BASE_SCHEMA_PROTO.getFieldList().stream()
.collect(
Collectors.toMap(FieldDescriptorProto::getName, FieldDescriptorProto::getType));
Map<String, Type> types =
descriptor.getFieldList().stream()
.collect(
Collectors.toMap(FieldDescriptorProto::getName, FieldDescriptorProto::getType));
Map<String, String> typeNames =
descriptor.getFieldList().stream()
.collect(
Collectors.toMap(FieldDescriptorProto::getName, FieldDescriptorProto::getTypeName));
Map<String, Label> typeLabels =
descriptor.getFieldList().stream()
.collect(
Collectors.toMap(FieldDescriptorProto::getName, FieldDescriptorProto::getLabel));
assertEquals(2, types.size());
Map<String, DescriptorProto> nestedTypes =
descriptor.getNestedTypeList().stream()
.collect(Collectors.toMap(DescriptorProto::getName, Functions.identity()));
assertEquals(2, nestedTypes.size());
assertEquals(Type.TYPE_MESSAGE, types.get("nested"));
assertEquals(Label.LABEL_OPTIONAL, typeLabels.get("nested"));
String nestedTypeName1 = typeNames.get("nested");
Map<String, Type> nestedTypes1 =
nestedTypes.get(nestedTypeName1).getFieldList().stream()
.collect(
Collectors.toMap(FieldDescriptorProto::getName, FieldDescriptorProto::getType));
assertEquals(expectedBaseTypes, nestedTypes1);
assertEquals(Type.TYPE_MESSAGE, types.get("nestedarray"));
assertEquals(Label.LABEL_REPEATED, typeLabels.get("nestedarray"));
String nestedTypeName2 = typeNames.get("nestedarray");
Map<String, Type> nestedTypes2 =
nestedTypes.get(nestedTypeName2).getFieldList().stream()
.collect(
Collectors.toMap(FieldDescriptorProto::getName, FieldDescriptorProto::getType));
assertEquals(expectedBaseTypes, nestedTypes2);
} |
public HollowHashIndexResult findMatches(Object... query) {
if (hashStateVolatile == null) {
throw new IllegalStateException(this + " wasn't initialized");
}
int hashCode = 0;
for(int i=0;i<query.length;i++) {
if(query[i] == null)
throw new IllegalArgumentException("querying by null unsupported; i=" + i);
hashCode ^= HashCodes.hashInt(keyHashCode(query[i], i));
}
HollowHashIndexResult result;
HollowHashIndexState hashState;
do {
result = null;
hashState = hashStateVolatile;
long bucket = hashCode & hashState.getMatchHashMask();
long hashBucketBit = bucket * hashState.getBitsPerMatchHashEntry();
boolean bucketIsEmpty = hashState.getMatchHashTable().getElementValue(hashBucketBit, hashState.getBitsPerTraverserField()[0]) == 0;
while (!bucketIsEmpty) {
if (matchIsEqual(hashState.getMatchHashTable(), hashBucketBit, query)) {
int selectSize = (int) hashState.getMatchHashTable().getElementValue(hashBucketBit + hashState.getBitsPerMatchHashKey(), hashState.getBitsPerSelectTableSize());
long selectBucketPointer = hashState.getMatchHashTable().getElementValue(hashBucketBit + hashState.getBitsPerMatchHashKey() + hashState.getBitsPerSelectTableSize(), hashState.getBitsPerSelectTablePointer());
result = new HollowHashIndexResult(hashState, selectBucketPointer, selectSize);
break;
}
bucket = (bucket + 1) & hashState.getMatchHashMask();
hashBucketBit = bucket * hashState.getBitsPerMatchHashEntry();
bucketIsEmpty = hashState.getMatchHashTable().getElementValue(hashBucketBit, hashState.getBitsPerTraverserField()[0]) == 0;
}
} while (hashState != hashStateVolatile);
return result;
} | @Test
public void testIndexingListOfIntTypeField() throws Exception {
mapper.add(new TypeListOfTypeString(10, 20, 30, 40, 10, 12));
mapper.add(new TypeListOfTypeString(10, 20, 30));
mapper.add(new TypeListOfTypeString(50, 51, 52));
roundTripSnapshot();
HollowHashIndex index = new HollowHashIndex(readStateEngine, "TypeListOfTypeString", "", "data.element.data.value");
Assert.assertNull(index.findMatches(10000));
Assert.assertNull(index.findMatches(-1));
assertIteratorContainsAll(index.findMatches(40).iterator(), 0);
assertIteratorContainsAll(index.findMatches(10).iterator(), 0, 1);
assertIteratorContainsAll(index.findMatches(50).iterator(), 2);
} |
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
// will throw UnsupportedOperationException; delegate anyway for testability
return underlying().merge(key, value, remappingFunction);
} | @Test
public void testDelegationOfUnsupportedFunctionMerge() {
final BiFunction<Object, Object, Object> mockBiFunction = mock(BiFunction.class);
new PCollectionsHashMapWrapperDelegationChecker<>()
.defineMockConfigurationForUnsupportedFunction(mock -> mock.merge(eq(this), eq(this), eq(mockBiFunction)))
.defineWrapperUnsupportedFunctionInvocation(wrapper -> wrapper.merge(this, this, mockBiFunction))
.doUnsupportedFunctionDelegationCheck();
} |
public static void sqlExecute(final Object[] newRow, final PreparedStatement statement) throws SQLException {
if (ObjectUtils.isEmpty(newRow[0])) {
statement.setObject(1, UUIDUtils.getInstance().generateShortUuid());
for (int i = 1; i < newRow.length - 2; i++) {
statement.setObject(i + 1, newRow[i]);
}
statement.executeUpdate();
}
} | @Test
public void sqlExecute() {
final Object[] newRow = new Object[4];
newRow[0] = null;
newRow[1] = new Object();
newRow[2] = new Object();
newRow[3] = new Object();
Assertions.assertDoesNotThrow(() -> BaseTrigger.sqlExecute(newRow, mock(PreparedStatement.class)));
} |
@Override
public Stream<MappingField> resolveAndValidateFields(
boolean isKey,
List<MappingField> userFields,
Map<String, String> options,
InternalSerializationService serializationService
) {
Map<QueryPath, MappingField> fieldsByPath = extractFields(userFields, isKey);
PortableId portableId = getPortableId(fieldsByPath, options, isKey);
ClassDefinition classDefinition = serializationService.getPortableContext()
.lookupClassDefinition(portableId);
// Fallback option for the case, when the portable objects were not de/serialized yet
// and user fields were not provided by the user explicitly. In this case we try to
// manually create a Portable instance and register its ClassDefinition.
if (userFields.isEmpty() && classDefinition == null) {
SerializationServiceV1 ss = (SerializationServiceV1) serializationService;
// Try to create a Portable instance with the default constructor,
// register its ClassDefinition, and throw object away.
var tempPortableObj = ss.getPortableSerializer()
.createNewPortableInstance(portableId.getFactoryId(), portableId.getClassId());
if (tempPortableObj != null) {
try {
ss.getPortableContext().lookupOrRegisterClassDefinition(tempPortableObj);
} catch (Exception e) {
// If the default constructor doesn't make Portable fields non-null,we're done:
// we can't register the class, so we interrupt the execution with the exception.
throw QueryException.error("Cannot create mapping for Portable type. "
+ "Please, provide the explicit definition for all columns.");
}
classDefinition = serializationService.getPortableContext().lookupClassDefinition(portableId);
}
}
return userFields.isEmpty()
? resolveFields(isKey, classDefinition)
: resolveAndValidateFields(isKey, fieldsByPath, classDefinition);
} | @Test
@Parameters({
"true, __key",
"false, this"
})
public void when_duplicateExternalName_then_throws(boolean key, String prefix) {
InternalSerializationService ss = new DefaultSerializationServiceBuilder().build();
ClassDefinition classDefinition =
new ClassDefinitionBuilder(1, 2, 3)
.addIntField("field")
.build();
ss.getPortableContext().registerClassDefinition(classDefinition);
Map<String, String> options = ImmutableMap.of(
(key ? OPTION_KEY_FACTORY_ID : OPTION_VALUE_FACTORY_ID), String.valueOf(classDefinition.getFactoryId()),
(key ? OPTION_KEY_CLASS_ID : OPTION_VALUE_CLASS_ID), String.valueOf(classDefinition.getClassId()),
(key ? OPTION_KEY_CLASS_VERSION : OPTION_VALUE_CLASS_VERSION), String.valueOf(classDefinition.getVersion())
);
assertThatThrownBy(() -> MetadataPortableResolver.INSTANCE.resolveAndValidateFields(
key,
asList(
field("field1", QueryDataType.INT, prefix + ".field"),
field("field2", QueryDataType.VARCHAR, prefix + ".field")
),
options,
ss
)).isInstanceOf(QueryException.class)
.hasMessageMatching("Duplicate external name: (__key|this).field");
} |
public static boolean matchesScope(String actualScope, Set<String> scopes) {
if (scopes.isEmpty() || scopes.contains(actualScope)) {
return true;
}
// If there is no perfect match, a stage name-level match is tried.
// This is done by a substring search over the levels of the scope.
// e.g. a scope "A/B/C/D" is matched by "A/B", but not by "A/C".
for (String scope : scopes) {
if (subPathMatches(actualScope, scope)) {
return true;
}
}
return false;
} | @Test
public void testMatchesScope() {
assertTrue(matchesScopeWithSingleFilter("Top1/Outer1/Inner1/Bottom1", "Top1"));
assertTrue(
matchesScopeWithSingleFilter("Top1/Outer1/Inner1/Bottom1", "Top1/Outer1/Inner1/Bottom1"));
assertTrue(matchesScopeWithSingleFilter("Top1/Outer1/Inner1/Bottom1", "Top1/Outer1"));
assertTrue(matchesScopeWithSingleFilter("Top1/Outer1/Inner1/Bottom1", "Top1/Outer1/Inner1"));
assertFalse(matchesScopeWithSingleFilter("Top1/Outer1/Inner1/Bottom1", "Top1/Inner1"));
assertFalse(matchesScopeWithSingleFilter("Top1/Outer1/Inner1/Bottom1", "Top1/Outer1/Inn"));
} |
@Override
public OverlayData createOverlayData(ComponentName remoteApp) {
if (!OS_SUPPORT_FOR_ACCENT) {
return EMPTY;
}
try {
final ActivityInfo activityInfo =
mLocalContext
.getPackageManager()
.getActivityInfo(remoteApp, PackageManager.GET_META_DATA);
final Context context =
mLocalContext.createPackageContext(remoteApp.getPackageName(), CONTEXT_IGNORE_SECURITY);
context.setTheme(activityInfo.getThemeResource());
fetchRemoteColors(mCurrentOverlayData, context);
Logger.d(
"OverlyDataCreatorForAndroid",
"For component %s we fetched %s",
remoteApp,
mCurrentOverlayData);
return mCurrentOverlayData;
} catch (Exception e) {
Logger.w("OverlyDataCreatorForAndroid", e, "Failed to fetch colors for %s", remoteApp);
return EMPTY;
}
} | @Test
@Config(sdk = Build.VERSION_CODES.KITKAT)
public void testAlwaysInvalidWhenPriorToLollipop() {
setupReturnedColors(R.style.HappyPathRawColors);
Assert.assertFalse(mUnderTest.createOverlayData(mComponentName).isValid());
} |
public int terminateQueuedInstances(
String workflowId, int limit, WorkflowInstance.Status status, String reason) {
TimelineEvent timelineEvent =
TimelineLogEvent.warn(TERMINATION_MESSAGE_TEMPLATE, status.name(), reason);
String timelineEventStr = toJson(timelineEvent);
return withMetricLogError(
() ->
withRetryableTransaction(
conn -> {
List<WorkflowInstance> stoppedInstances = new ArrayList<>();
try (PreparedStatement stmt =
conn.prepareStatement(TERMINATE_QUEUED_INSTANCES_QUERY)) {
int idx = 0;
stmt.setString(++idx, status.name());
stmt.setString(++idx, timelineEventStr);
stmt.setString(++idx, workflowId);
stmt.setInt(++idx, limit);
try (ResultSet result = stmt.executeQuery()) {
while (result.next()) {
WorkflowInstance instance =
fromJson(result.getString(1), WorkflowInstance.class);
stoppedInstances.add(instance);
}
}
if (!stoppedInstances.isEmpty()) {
WorkflowInstanceUpdateJobEvent jobEvent =
WorkflowInstanceUpdateJobEvent.create(
stoppedInstances, status, System.currentTimeMillis());
publisher.publishOrThrow(
jobEvent,
"Failed sending an update job event to notify stopping workflow instances.");
}
}
return stoppedInstances.size();
}),
"terminateQueuedInstances",
"Failed to terminate the queued workflow instances for workflow {}",
workflowId);
} | @Test
public void testTerminateQueuedInstances() throws Exception {
WorkflowInstance wfi1 = loadObject(TEST_WORKFLOW_INSTANCE, WorkflowInstance.class);
wfi1.setWorkflowUuid("wfi1-uuid");
wfi1.setWorkflowInstanceId(100L);
WorkflowInstance wfi2 = loadObject(TEST_WORKFLOW_INSTANCE, WorkflowInstance.class);
wfi2.setWorkflowUuid("wfi2-uuid");
wfi2.setWorkflowInstanceId(101L);
WorkflowInstance wfi3 = loadObject(TEST_WORKFLOW_INSTANCE, WorkflowInstance.class);
wfi3.setWorkflowUuid("wfi3-uuid");
wfi3.setWorkflowInstanceId(102L);
Optional<Details> res =
instanceDao.runWorkflowInstances(TEST_WORKFLOW_ID, Arrays.asList(wfi1, wfi2, wfi3), 2);
assertFalse(res.isPresent());
int cnt =
instanceDao.terminateQueuedInstances(
TEST_WORKFLOW_ID, 3, WorkflowInstance.Status.STOPPED, "test-reason");
assertEquals(3, cnt);
verify(publisher, times(1)).publishOrThrow(any(WorkflowInstanceUpdateJobEvent.class), any());
cnt =
instanceDao.terminateQueuedInstances(
TEST_WORKFLOW_ID, 2, WorkflowInstance.Status.FAILED, "test-reason");
assertEquals(1, cnt);
verify(publisher, times(2)).publishOrThrow(any(WorkflowInstanceUpdateJobEvent.class), any());
cnt =
instanceDao.terminateQueuedInstances(
TEST_WORKFLOW_ID, 1, WorkflowInstance.Status.STOPPED, "test-reason");
assertEquals(0, cnt);
verify(publisher, times(2)).publishOrThrow(any(WorkflowInstanceUpdateJobEvent.class), any());
MaestroTestHelper.removeWorkflowInstance(dataSource, TEST_WORKFLOW_ID, 100);
MaestroTestHelper.removeWorkflowInstance(dataSource, TEST_WORKFLOW_ID, 101);
MaestroTestHelper.removeWorkflowInstance(dataSource, TEST_WORKFLOW_ID, 102);
} |
public T send() throws IOException {
return web3jService.send(this, responseType);
} | @Test
public void testEthCompileSolidity() throws Exception {
web3j.ethCompileSolidity(
"contract test { function multiply(uint a) returns(uint d) { return a * 7; } }")
.send();
verifyResult(
"{\"jsonrpc\":\"2.0\",\"method\":\"eth_compileSolidity\","
+ "\"params\":[\"contract test { function multiply(uint a) returns(uint d) {"
+ " return a * 7; } }\"],\"id\":1}");
} |
public static String getCheckJobResultPath(final String jobId, final String checkJobId) {
return String.join("/", getCheckJobIdsRootPath(jobId), checkJobId);
} | @Test
void assertGetCheckJobResultPath() {
assertThat(PipelineMetaDataNode.getCheckJobResultPath(jobId, "j02fx123"), is(jobCheckRootPath + "/job_ids/j02fx123"));
} |
public void stop() {
if (stopped.compareAndSet(false, true)) {
LOG.debug("Stopping the PooledConnectionFactory, number of connections in cache: {}",
connectionsPool != null ? connectionsPool.getNumActive() : 0);
try {
if (connectionsPool != null) {
connectionsPool.close();
connectionsPool = null;
}
} catch (Exception e) {
}
}
} | @Test(timeout = 60000)
public void testInstanceOf() throws Exception {
PooledConnectionFactory pcf = new PooledConnectionFactory();
assertTrue(pcf instanceof QueueConnectionFactory);
assertTrue(pcf instanceof TopicConnectionFactory);
pcf.stop();
} |
@Override
public Set<K8sNode> nodes() {
return nodeStore.nodes();
} | @Test
public void testGetNodesByType() {
assertEquals(ERR_SIZE, 2, target.nodes(MINION).size());
assertTrue(ERR_NOT_FOUND, target.nodes(MINION).contains(MINION_2));
assertTrue(ERR_NOT_FOUND, target.nodes(MINION).contains(MINION_3));
} |
@Nullable
public static String getValueFromStaticMapping(String mapping, String key) {
Map<String, String> m = Splitter.on(";")
.omitEmptyStrings()
.trimResults()
.withKeyValueSeparator("=")
.split(mapping);
return m.get(key);
} | @Test
public void getValueFromStaticMapping() throws Exception {
String mapping = "k=v; a=a; alice=bob; id1=userA; foo=bar";
assertEquals("v", CommonUtils.getValueFromStaticMapping(mapping, "k"));
assertEquals("a", CommonUtils.getValueFromStaticMapping(mapping, "a"));
assertEquals("bob", CommonUtils.getValueFromStaticMapping(mapping, "alice"));
assertEquals("userA", CommonUtils.getValueFromStaticMapping(mapping, "id1"));
assertEquals("bar", CommonUtils.getValueFromStaticMapping(mapping, "foo"));
assertEquals(null, CommonUtils.getValueFromStaticMapping(mapping, ""));
assertEquals(null, CommonUtils.getValueFromStaticMapping(mapping, "/"));
assertEquals(null, CommonUtils.getValueFromStaticMapping(mapping, "v"));
assertEquals(null, CommonUtils.getValueFromStaticMapping(mapping, "nonexist"));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.