focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public void parse(InputStream stream, ContentHandler handler, Metadata metadata,
ParseContext context) throws IOException, SAXException, TikaException {
// Get the mime4j configuration, or use a default one
MimeConfig config =
new MimeConfig.Builder().setMaxLineLen(100000).setMaxHeaderLen(100000).build();
config = context.get(MimeConfig.class, config);
Detector localDetector = context.get(Detector.class);
if (localDetector == null) {
//lazily load this if necessary
if (detector == null) {
EmbeddedDocumentUtil embeddedDocumentUtil = new EmbeddedDocumentUtil(context);
detector = embeddedDocumentUtil.getDetector();
}
localDetector = detector;
}
MimeStreamParser parser =
new MimeStreamParser(config, null, new DefaultBodyDescriptorBuilder());
XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata);
MailContentHandler mch = new MailContentHandler(xhtml, localDetector, metadata, context,
config.isStrictParsing(), extractAllAlternatives);
parser.setContentHandler(mch);
parser.setContentDecoding(true);
parser.setNoRecurse();
xhtml.startDocument();
TikaInputStream tstream = TikaInputStream.get(stream);
try {
parser.parse(tstream);
} catch (IOException e) {
tstream.throwIfCauseOf(e);
throw new TikaException("Failed to parse an email message", e);
} catch (MimeException e) {
// Unwrap the exception in case it was not thrown by mime4j
Throwable cause = e.getCause();
if (cause instanceof TikaException) {
throw (TikaException) cause;
} else if (cause instanceof SAXException) {
throw (SAXException) cause;
} else {
throw new TikaException("Failed to parse an email message", e);
}
}
xhtml.endDocument();
} | @Test
public void testLongHeader() throws Exception {
StringBuilder inputBuilder = new StringBuilder();
for (int i = 0; i < 2000; ++i) {
inputBuilder.append( //len > 50
"really really really really really really long name ");
}
String name = inputBuilder.toString();
byte[] data = ("Status: 520\r\nFrom: " + name + "\r\n\r\n").getBytes(US_ASCII);
ContentHandler handler = new DefaultHandler();
Metadata metadata = new Metadata();
ParseContext context = new ParseContext();
try {
EXTRACT_ALL_ALTERNATIVES_PARSER
.parse(new ByteArrayInputStream(data), handler, metadata, context);
fail();
} catch (TikaException expected) {
}
MimeConfig config = new MimeConfig.Builder().setMaxHeaderLen(-1).setMaxLineLen(-1).build();
context.set(MimeConfig.class, config);
EXTRACT_ALL_ALTERNATIVES_PARSER
.parse(new ByteArrayInputStream(data), handler, metadata, context);
assertEquals(name.trim(), metadata.get(TikaCoreProperties.CREATOR));
} |
public T send() throws IOException {
return web3jService.send(this, responseType);
} | @Test
public void testEthSubmitWork() throws Exception {
web3j.ethSubmitWork(
"0x0000000000000001",
"0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
"0xD1FE5700000000000000000000000000D1FE5700000000000000000000000000")
.send();
verifyResult(
"{\"jsonrpc\":\"2.0\",\"method\":\"eth_submitWork\","
+ "\"params\":[\"0x0000000000000001\","
+ "\"0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef\","
+ "\"0xD1FE5700000000000000000000000000D1FE5700000000000000000000000000\"],"
+ "\"id\":1}");
} |
@Override
public long size() throws IOException {
return delegate.size();
} | @Test
public void testSize() throws IOException {
assertEquals(delegate.size(), channelUnderTest.size());
} |
static String consumerGroupJoinKey(String groupId, String memberId) {
return "join-" + groupId + "-" + memberId;
} | @Test
public void testConsumerGroupMemberUsingClassicProtocolFencedWhenJoinTimeout() {
String groupId = "group-id";
String memberId = Uuid.randomUuid().toString();
int rebalanceTimeout = 500;
List<ConsumerGroupMemberMetadataValue.ClassicProtocol> protocols = Collections.singletonList(
new ConsumerGroupMemberMetadataValue.ClassicProtocol()
.setName("range")
.setMetadata(Utils.toArray(ConsumerProtocol.serializeSubscription(
new ConsumerPartitionAssignor.Subscription(
Collections.singletonList("foo"),
null,
Collections.emptyList()
)
)))
);
// Consumer group with a member using the classic protocol whose member epoch is smaller than the group epoch.
GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
.withConsumerGroupAssignors(Collections.singletonList(new MockPartitionAssignor("range")))
.withConsumerGroup(new ConsumerGroupBuilder(groupId, 10)
.withMember(new ConsumerGroupMember.Builder(memberId)
.setRebalanceTimeoutMs(rebalanceTimeout)
.setClassicMemberMetadata(
new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata()
.setSessionTimeoutMs(5000)
.setSupportedProtocols(protocols)
)
.setMemberEpoch(9)
.build()))
.build();
// Heartbeat to schedule the join timeout.
HeartbeatRequestData request = new HeartbeatRequestData()
.setGroupId(groupId)
.setMemberId(memberId)
.setGenerationId(9);
assertEquals(
Errors.REBALANCE_IN_PROGRESS.code(),
context.sendClassicGroupHeartbeat(request).response().errorCode()
);
context.assertSessionTimeout(groupId, memberId, 5000);
context.assertJoinTimeout(groupId, memberId, rebalanceTimeout);
// Advance clock by rebalance timeout + 1.
List<ExpiredTimeout<Void, CoordinatorRecord>> timeouts = context.sleep(rebalanceTimeout + 1);
// The member is fenced from the group.
assertEquals(1, timeouts.size());
ExpiredTimeout<Void, CoordinatorRecord> timeout = timeouts.get(0);
assertEquals(consumerGroupJoinKey(groupId, memberId), timeout.key);
assertRecordsEquals(
Arrays.asList(
// The member is removed.
GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId),
GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId),
GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId),
// The group epoch is bumped.
GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11)
),
timeout.result.records()
);
} |
@Override
@CacheEvict(cacheNames = RedisKeyConstants.NOTIFY_TEMPLATE,
allEntries = true) // allEntries 清空所有缓存,因为可能修改到 code 字段,不好清理
public void updateNotifyTemplate(NotifyTemplateSaveReqVO updateReqVO) {
// 校验存在
validateNotifyTemplateExists(updateReqVO.getId());
// 校验站内信编码是否重复
validateNotifyTemplateCodeDuplicate(updateReqVO.getId(), updateReqVO.getCode());
// 更新
NotifyTemplateDO updateObj = BeanUtils.toBean(updateReqVO, NotifyTemplateDO.class);
updateObj.setParams(parseTemplateContentParams(updateObj.getContent()));
notifyTemplateMapper.updateById(updateObj);
} | @Test
public void testUpdateNotifyTemplate_success() {
// mock 数据
NotifyTemplateDO dbNotifyTemplate = randomPojo(NotifyTemplateDO.class);
notifyTemplateMapper.insert(dbNotifyTemplate);// @Sql: 先插入出一条存在的数据
// 准备参数
NotifyTemplateSaveReqVO reqVO = randomPojo(NotifyTemplateSaveReqVO.class, o -> {
o.setId(dbNotifyTemplate.getId()); // 设置更新的 ID
o.setStatus(randomCommonStatus());
});
// 调用
notifyTemplateService.updateNotifyTemplate(reqVO);
// 校验是否更新正确
NotifyTemplateDO notifyTemplate = notifyTemplateMapper.selectById(reqVO.getId()); // 获取最新的
assertPojoEquals(reqVO, notifyTemplate);
} |
Map<String, File> scanExistingUsers() throws IOException {
Map<String, File> users = new HashMap<>();
File[] userDirectories = listUserDirectories();
if (userDirectories != null) {
for (File directory : userDirectories) {
String userId = idStrategy.idFromFilename(directory.getName());
users.put(userId, directory);
}
}
addEmptyUsernameIfExists(users);
return users;
} | @Test
public void scanExistingUsersNoUsersDirectory() throws IOException {
UserIdMigrator migrator = createUserIdMigrator();
Map<String, File> userMappings = migrator.scanExistingUsers();
assertThat(userMappings.keySet(), empty());
} |
@Override
public int hashCode() {
return Objects.hash(targetImage, imageDigest, imageId, tags, imagePushed);
} | @Test
public void testEquality_differentTargetImage() {
JibContainer container1 = new JibContainer(targetImage1, digest1, digest2, tags1, true);
JibContainer container2 = new JibContainer(targetImage2, digest1, digest2, tags1, true);
Assert.assertNotEquals(container1, container2);
Assert.assertNotEquals(container1.hashCode(), container2.hashCode());
} |
public String getGetterName(String propertyName, JType type, JsonNode node) {
propertyName = getPropertyNameForAccessor(propertyName, node);
String prefix = type.equals(type.owner()._ref(boolean.class)) ? "is" : "get";
String getterName;
if (propertyName.length() > 1 && Character.isUpperCase(propertyName.charAt(1))) {
getterName = prefix + propertyName;
} else {
getterName = prefix + capitalize(propertyName);
}
if (getterName.equals("getClass")) {
getterName = "getClass_";
}
return getterName;
} | @Test
public void testGetterNamedCorrectly() {
assertThat(nameHelper.getGetterName("foo", new JCodeModel().BOOLEAN, NODE), is("isFoo"));
assertThat(nameHelper.getGetterName("foo", new JCodeModel().INT, NODE), is("getFoo"));
assertThat(nameHelper.getGetterName("oAuth2State", new JCodeModel().INT, NODE), is("getoAuth2State"));
assertThat(nameHelper.getGetterName("URL", new JCodeModel().INT, NODE), is("getUrl"));
} |
@Override
public ExportResult<MediaContainerResource> export(UUID jobId, AD authData,
Optional<ExportInformation> exportInfo) throws Exception {
ExportResult<PhotosContainerResource> per = exportPhotos(jobId, authData, exportInfo);
if (per.getThrowable().isPresent()) {
return new ExportResult<>(per.getThrowable().get());
}
ExportResult<VideosContainerResource> ver = exportVideos(jobId, authData, exportInfo);
if (ver.getThrowable().isPresent()) {
return new ExportResult<>(ver.getThrowable().get());
}
return mergeResults(per, ver);
} | @Test
public void shouldMergePhotoAndVideoResults() throws Exception {
MediaContainerResource mcr = new MediaContainerResource(albums, photos, videos);
ExportResult<MediaContainerResource> exp = new ExportResult<>(ResultType.END, mcr);
Optional<ExportInformation> ei = Optional.of(new ExportInformation(null, mcr));
ExportResult<MediaContainerResource> res = mediaExporter.export(null, null, ei);
assertEquals(exp, res);
} |
public boolean isValid(String value) {
if (value == null) {
return false;
}
URI uri; // ensure value is a valid URI
try {
uri = new URI(value);
} catch (URISyntaxException e) {
return false;
}
// OK, perfom additional validation
String scheme = uri.getScheme();
if (!isValidScheme(scheme)) {
return false;
}
String authority = uri.getRawAuthority();
if ("file".equals(scheme) && (authority == null || "".equals(authority))) { // Special case - file: allows an empty authority
return true; // this is a local file - nothing more to do here
} else if ("file".equals(scheme) && authority != null && authority.contains(":")) {
return false;
} else {
// Validate the authority
if (!isValidAuthority(authority)) {
return false;
}
}
if (!isValidPath(uri.getRawPath())) {
return false;
}
if (!isValidQuery(uri.getRawQuery())) {
return false;
}
if (!isValidFragment(uri.getRawFragment())) {
return false;
}
return true;
} | @Test
public void testValidator380() {
UrlValidator validator = new UrlValidator();
assertTrue(validator.isValid("http://www.apache.org:80/path"));
assertTrue(validator.isValid("http://www.apache.org:8/path"));
assertTrue(validator.isValid("http://www.apache.org:/path"));
} |
@Override
public ConfigOperateResult insertOrUpdateTagCas(final ConfigInfo configInfo, final String tag, final String srcIp,
final String srcUser) {
if (findConfigInfo4TagState(configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant(), tag) == null) {
return addConfigInfo4Tag(configInfo, tag, srcIp, srcUser);
} else {
return updateConfigInfo4TagCas(configInfo, tag, srcIp, srcUser);
}
} | @Test
void testInsertOrUpdateTagCasOfAdd() {
String dataId = "dataId111222";
String group = "group";
String tenant = "tenant";
String appName = "appname1234";
String content = "c12345";
ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content);
configInfo.setEncryptedDataKey("key23456");
configInfo.setMd5("casMd5");
//mock query config state empty and return obj after insert
ConfigInfoStateWrapper configInfoStateWrapper = new ConfigInfoStateWrapper();
configInfoStateWrapper.setLastModified(System.currentTimeMillis());
configInfoStateWrapper.setId(234567890L);
String tag = "tag123";
Mockito.when(databaseOperate.queryOne(anyString(), eq(new Object[] {dataId, group, tenant, tag}),
eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenReturn(null).thenReturn(configInfoStateWrapper);
String srcIp = "ip345678";
String srcUser = "user1234567";
ConfigOperateResult configOperateResult = embeddedConfigInfoTagPersistService.insertOrUpdateTagCas(configInfo, tag, srcIp, srcUser);
//verify insert to be invoked
//mock insert invoked.
embeddedStorageContextHolderMockedStatic.verify(
() -> EmbeddedStorageContextHolder.addSqlContext(anyString(), eq(dataId), eq(group), eq(tenant), eq(tag), eq(appName),
eq(content), eq(MD5Utils.md5Hex(content, Constants.PERSIST_ENCODE)), eq(srcIp), eq(srcUser), any(Timestamp.class),
any(Timestamp.class)), times(1));
assertEquals(configInfoStateWrapper.getId(), configOperateResult.getId());
assertEquals(configInfoStateWrapper.getLastModified(), configOperateResult.getLastModified());
} |
public static List<Annotation> scanMethod(Method method) {
return AnnotationScanner.TYPE_HIERARCHY.getAnnotationsIfSupport(method);
} | @Test
public void scanMethodTest() {
// TargetClass -> TargetSuperClass
// -> TargetSuperInterface
final Method method = ReflectUtil.getMethod(TargetClass.class, "testMethod");
assertNotNull(method);
final List<Annotation> annotations = AnnotationUtil.scanMethod(method);
assertEquals(3, annotations.size());
assertEquals("TargetClass", ((AnnotationForTest)annotations.get(0)).value());
assertEquals("TargetSuperClass", ((AnnotationForTest)annotations.get(1)).value());
assertEquals("TargetSuperInterface", ((AnnotationForTest)annotations.get(2)).value());
} |
public static OptExpression bind(Pattern pattern, GroupExpression groupExpression) {
Binder binder = new Binder(pattern, groupExpression);
return binder.next();
} | @Test
public void testBinder3() {
OptExpression expr = OptExpression.create(new MockOperator(OperatorType.LOGICAL_OLAP_SCAN),
new OptExpression(new MockOperator(OperatorType.LOGICAL_JOIN)),
new OptExpression(new MockOperator(OperatorType.LOGICAL_OLAP_SCAN)));
Pattern pattern = Pattern.create(OperatorType.PATTERN_LEAF)
.addChildren(Pattern.create(OperatorType.LOGICAL_JOIN))
.addChildren(Pattern.create(OperatorType.PATTERN_LEAF));
Memo memo = new Memo();
OptExpression result = Binder.bind(pattern, memo.init(expr));
assertEquals(OperatorType.LOGICAL_OLAP_SCAN, result.getOp().getOpType());
assertEquals(OperatorType.LOGICAL_JOIN, result.inputAt(0).getOp().getOpType());
assertEquals(OperatorType.LOGICAL_OLAP_SCAN, result.inputAt(1).getOp().getOpType());
} |
@CanIgnoreReturnValue
public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) {
List<@Nullable Object> expected =
(varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs);
return containsExactlyElementsIn(
expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable);
} | @Test
public void iterableContainsExactlyWithDuplicates() {
assertThat(asList(1, 2, 2, 2, 3)).containsExactly(1, 2, 2, 2, 3);
} |
public void setBaseResource(Resource baseResource) {
handler.setBaseResource(baseResource);
} | @Test
void setsBaseResource(@TempDir Path tempDir) throws Exception {
final Resource testResource = Resource.newResource(tempDir.resolve("dir").toUri());
environment.setBaseResource(testResource);
assertThat(handler.getBaseResource()).isEqualTo(testResource);
} |
@Override
public void transform(Message message, DataType fromType, DataType toType) {
if (message.getHeaders().containsKey(Ddb2Constants.ITEM) ||
message.getHeaders().containsKey(Ddb2Constants.KEY)) {
return;
}
JsonNode jsonBody = getBodyAsJsonNode(message);
String operation
= Optional.ofNullable(jsonBody.get("operation")).map(JsonNode::asText).orElse(Ddb2Operations.PutItem.name());
if (message.getExchange().hasProperties() && message.getExchange().getProperty("operation", String.class) != null) {
operation = message.getExchange().getProperty("operation", String.class);
}
if (message.getHeaders().containsKey(Ddb2Constants.OPERATION)) {
operation = message.getHeader(Ddb2Constants.OPERATION, Ddb2Operations.class).name();
}
JsonNode key = jsonBody.get("key");
JsonNode item = jsonBody.get("item");
Map<String, Object> keyProps;
if (key != null) {
keyProps = dataFormat.getObjectMapper().convertValue(key, new TypeReference<>() {
});
} else {
keyProps = dataFormat.getObjectMapper().convertValue(jsonBody, new TypeReference<>() {
});
}
Map<String, Object> itemProps;
if (item != null) {
itemProps = dataFormat.getObjectMapper().convertValue(item, new TypeReference<>() {
});
} else {
itemProps = keyProps;
}
final Map<String, AttributeValue> keyMap = getAttributeValueMap(keyProps);
switch (Ddb2Operations.valueOf(operation)) {
case PutItem:
message.setHeader(Ddb2Constants.OPERATION, Ddb2Operations.PutItem);
message.setHeader(Ddb2Constants.ITEM, getAttributeValueMap(itemProps));
setHeaderIfNotPresent(Ddb2Constants.RETURN_VALUES, ReturnValue.ALL_OLD.toString(), message);
break;
case UpdateItem:
message.setHeader(Ddb2Constants.OPERATION, Ddb2Operations.UpdateItem);
message.setHeader(Ddb2Constants.KEY, keyMap);
message.setHeader(Ddb2Constants.UPDATE_VALUES, getAttributeValueUpdateMap(itemProps));
setHeaderIfNotPresent(Ddb2Constants.RETURN_VALUES, ReturnValue.ALL_NEW.toString(), message);
break;
case DeleteItem:
message.setHeader(Ddb2Constants.OPERATION, Ddb2Operations.DeleteItem);
message.setHeader(Ddb2Constants.KEY, keyMap);
setHeaderIfNotPresent(Ddb2Constants.RETURN_VALUES, ReturnValue.ALL_OLD.toString(), message);
break;
default:
throw new UnsupportedOperationException(String.format("Unsupported operation '%s'", operation));
}
} | @Test
@SuppressWarnings("unchecked")
void shouldMapPutItemHeaders() throws Exception {
Exchange exchange = new DefaultExchange(camelContext);
exchange.getMessage().setBody(Json.mapper().readTree(itemJson));
exchange.setProperty("operation", Ddb2Operations.PutItem.name());
transformer.transform(exchange.getMessage(), DataType.ANY, new DataType(AWS_2_DDB_APPLICATION_JSON_TRANSFORMER));
Assertions.assertTrue(exchange.getMessage().hasHeaders());
Assertions.assertEquals(Ddb2Operations.PutItem, exchange.getMessage().getHeader(Ddb2Constants.OPERATION));
Assertions.assertEquals(ReturnValue.ALL_OLD.toString(), exchange.getMessage().getHeader(Ddb2Constants.RETURN_VALUES));
assertAttributeValueMap(exchange.getMessage().getHeader(Ddb2Constants.ITEM, Map.class));
} |
@Description("count number of set bits in 2's complement representation")
@ScalarFunction
@SqlType(StandardTypes.BIGINT)
public static long bitCount(@SqlType(StandardTypes.BIGINT) long num, @SqlType(StandardTypes.BIGINT) long bits)
{
if (bits == MAX_BITS) {
return Long.bitCount(num);
}
if (bits <= 1 || bits > MAX_BITS) {
throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "Bits specified in bit_count must be between 2 and 64, got " + bits);
}
long lowBitsMask = (1L << (bits - 1)) - 1; // set the least (bits - 1) bits
if (num > lowBitsMask || num < ~lowBitsMask) {
throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "Number must be representable with the bits specified. " + num + " can not be represented with " + bits + " bits");
}
long mask = (1L << bits) - 1;
return Long.bitCount(num & mask);
} | @Test
public void testBitCount()
{
assertFunction("bit_count(0, 64)", BIGINT, 0L);
assertFunction("bit_count(7, 64)", BIGINT, 3L);
assertFunction("bit_count(24, 64)", BIGINT, 2L);
assertFunction("bit_count(-8, 64)", BIGINT, 61L);
assertFunction("bit_count(" + Integer.MAX_VALUE + ", 64)", BIGINT, 31L);
assertFunction("bit_count(" + Integer.MIN_VALUE + ", 64)", BIGINT, 33L);
assertFunction("bit_count(" + Long.MAX_VALUE + ", 64)", BIGINT, 63L);
assertFunction("bit_count(-" + Long.MAX_VALUE + "-1, 64)", BIGINT, 1L); // bit_count(MIN_VALUE, 64)
assertFunction("bit_count(0, 32)", BIGINT, 0L);
assertFunction("bit_count(CAST (-8 AS SMALLINT), 6)", BIGINT, 3L);
assertFunction("bit_count(7, 32)", BIGINT, 3L);
assertFunction("bit_count(24, 32)", BIGINT, 2L);
assertFunction("bit_count(-8, 32)", BIGINT, 29L);
assertFunction("bit_count(" + Integer.MAX_VALUE + ", 32)", BIGINT, 31L);
assertFunction("bit_count(" + Integer.MIN_VALUE + ", 32)", BIGINT, 1L);
assertInvalidFunction("bit_count(" + (Integer.MAX_VALUE + 1L) + ", 32)", "Number must be representable with the bits specified. 2147483648 can not be represented with 32 bits");
assertInvalidFunction("bit_count(" + (Integer.MIN_VALUE - 1L) + ", 32)", "Number must be representable with the bits specified. -2147483649 can not be represented with 32 bits");
assertFunction("bit_count(1152921504598458367, 62)", BIGINT, 59L);
assertFunction("bit_count(-1, 62)", BIGINT, 62L);
assertFunction("bit_count(33554132, 26)", BIGINT, 20L);
assertFunction("bit_count(-1, 26)", BIGINT, 26L);
assertInvalidFunction("bit_count(1152921504598458367, 60)", "Number must be representable with the bits specified. 1152921504598458367 can not be represented with 60 bits");
assertInvalidFunction("bit_count(33554132, 25)", "Number must be representable with the bits specified. 33554132 can not be represented with 25 bits");
assertInvalidFunction("bit_count(0, -1)", "Bits specified in bit_count must be between 2 and 64, got -1");
assertInvalidFunction("bit_count(0, 1)", "Bits specified in bit_count must be between 2 and 64, got 1");
assertInvalidFunction("bit_count(0, 65)", "Bits specified in bit_count must be between 2 and 64, got 65");
} |
protected void removeAllModels() {
int numModelsRemoved = models.size();
pauseModelListNotifications();
models.clear();
resumeModelListNotifications();
notifyItemRangeRemoved(0, numModelsRemoved);
} | @Test
public void testRemoveAllModels() {
for (int i = 0; i < 10; i++) {
TestModel model = new TestModel();
testAdapter.addModels(model);
}
testAdapter.removeAllModels();
verify(observer).onItemRangeRemoved(0, 10);
assertEquals(0, testAdapter.models.size());
checkDifferState();
} |
Record deserialize(Object data) {
return (Record) fieldDeserializer.value(data);
} | @Test
public void testListDeserialize() {
Schema schema =
new Schema(optional(1, "list_type", Types.ListType.ofOptional(2, Types.LongType.get())));
StructObjectInspector inspector =
ObjectInspectorFactory.getStandardStructObjectInspector(
Arrays.asList("list_type"),
Arrays.asList(
ObjectInspectorFactory.getStandardListObjectInspector(
PrimitiveObjectInspectorFactory.writableLongObjectInspector)));
Deserializer deserializer =
new Deserializer.Builder()
.schema(schema)
.writerInspector((StructObjectInspector) IcebergObjectInspector.create(schema))
.sourceInspector(inspector)
.build();
Record expected = GenericRecord.create(schema);
expected.set(0, Collections.singletonList(1L));
Object[] data = new Object[] {new Object[] {new LongWritable(1L)}};
Record actual = deserializer.deserialize(data);
assertThat(actual).isEqualTo(expected);
} |
public static SchemaAndValue parseString(String value) {
if (value == null) {
return NULL_SCHEMA_AND_VALUE;
}
if (value.isEmpty()) {
return new SchemaAndValue(Schema.STRING_SCHEMA, value);
}
ValueParser parser = new ValueParser(new Parser(value));
return parser.parse(false);
} | @Test
public void shouldParseDateStringAsDateInArray() throws Exception {
String dateStr = "2019-08-23";
String arrayStr = "[" + dateStr + "]";
SchemaAndValue result = Values.parseString(arrayStr);
assertEquals(Type.ARRAY, result.schema().type());
Schema elementSchema = result.schema().valueSchema();
assertEquals(Type.INT32, elementSchema.type());
assertEquals(Date.LOGICAL_NAME, elementSchema.name());
java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_DATE_FORMAT_PATTERN).parse(dateStr);
assertEquals(Collections.singletonList(expected), result.value());
} |
public void addIndexes(int maxIndex, int[] dictionaryIndexes, int indexCount)
{
if (indexCount == 0 && indexRetainedBytes > 0) {
// Ignore empty segment, since there are other segments present.
return;
}
checkState(maxIndex >= lastMaxIndex, "LastMax is greater than the current max");
lastMaxIndex = maxIndex;
if (maxIndex <= Byte.MAX_VALUE) {
byte[] byteIndexes = new byte[indexCount];
for (int i = 0; i < indexCount; i++) {
byteIndexes[i] = (byte) dictionaryIndexes[i];
}
appendByteIndexes(byteIndexes);
}
else if (maxIndex <= Short.MAX_VALUE) {
short[] shortIndexes = new short[indexCount];
for (int i = 0; i < indexCount; i++) {
shortIndexes[i] = (short) dictionaryIndexes[i];
}
appendShortIndexes(shortIndexes);
}
else {
int[] intIndexes = Arrays.copyOf(dictionaryIndexes, indexCount);
appendIntegerIndexes(intIndexes);
}
} | @Test
public void testByteIndexes()
{
int[] dictionaryIndexes = createIndexArray(Byte.MAX_VALUE + 1, MAX_DICTIONARY_INDEX);
for (int length : ImmutableList.of(0, 10, dictionaryIndexes.length)) {
DictionaryRowGroupBuilder rowGroupBuilder = new DictionaryRowGroupBuilder();
rowGroupBuilder.addIndexes(Byte.MAX_VALUE, dictionaryIndexes, length);
byte[] byteIndexes = getByteIndexes(rowGroupBuilder);
assertEquals(length, byteIndexes.length);
for (int i = 0; i < length; i++) {
assertEquals(dictionaryIndexes[i], byteIndexes[i]);
}
}
} |
@Override
public Flux<BooleanResponse<RenameCommand>> rename(Publisher<RenameCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getNewKey(), "New name must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
byte[] newKeyBuf = toByteArray(command.getNewKey());
if (executorService.getConnectionManager().calcSlot(keyBuf) == executorService.getConnectionManager().calcSlot(newKeyBuf)) {
return super.rename(commands);
}
return read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, keyBuf)
.filter(Objects::nonNull)
.zipWith(
Mono.defer(() -> pTtl(command.getKey())
.filter(Objects::nonNull)
.map(ttl -> Math.max(0, ttl))
.switchIfEmpty(Mono.just(0L))
)
)
.flatMap(valueAndTtl -> {
return write(newKeyBuf, StringCodec.INSTANCE, RedisCommands.RESTORE, newKeyBuf, valueAndTtl.getT2(), valueAndTtl.getT1());
})
.thenReturn(new BooleanResponse<>(command, true))
.doOnSuccess((ignored) -> del(command.getKey()));
});
} | @Test
public void testRename() {
testInClusterReactive(connection -> {
connection.stringCommands().set(originalKey, value).block();
if (hasTtl) {
connection.keyCommands().expire(originalKey, Duration.ofSeconds(1000)).block();
}
Integer originalSlot = getSlotForKey(originalKey, (RedissonReactiveRedisClusterConnection) connection);
newKey = getNewKeyForSlot(new String(originalKey.array()), getTargetSlot(originalSlot), connection);
Boolean response = connection.keyCommands().rename(originalKey, newKey).block();
assertThat(response).isTrue();
final ByteBuffer newKeyValue = connection.stringCommands().get(newKey).block();
assertThat(newKeyValue).isEqualTo(value);
if (hasTtl) {
assertThat(connection.keyCommands().ttl(newKey).block()).isGreaterThan(0);
} else {
assertThat(connection.keyCommands().ttl(newKey).block()).isEqualTo(-1);
}
});
} |
public static void trace(Logger logger, String msg) {
if (logger == null) {
return;
}
if (logger.isTraceEnabled()) {
logger.trace(msg);
}
} | @Test
void testTrace() {
Logger logger = Mockito.mock(Logger.class);
when(logger.isTraceEnabled()).thenReturn(true);
LogHelper.trace(logger, "trace");
verify(logger).trace("trace");
Throwable t = new RuntimeException();
LogHelper.trace(logger, t);
verify(logger).trace(t);
LogHelper.trace(logger, "trace", t);
verify(logger).trace("trace", t);
} |
@Override
public Collection<LocalDataQueryResultRow> getRows(final ShowComputeNodeInfoStatement sqlStatement, final ContextManager contextManager) {
ComputeNodeInstance instance = contextManager.getComputeNodeInstanceContext().getInstance();
InstanceMetaData instanceMetaData = instance.getMetaData();
String modeType = contextManager.getComputeNodeInstanceContext().getModeConfiguration().getType();
return Collections.singletonList(new LocalDataQueryResultRow(instanceMetaData.getId(), instanceMetaData.getIp(),
instanceMetaData instanceof ProxyInstanceMetaData ? ((ProxyInstanceMetaData) instanceMetaData).getPort() : -1,
instance.getState().getCurrentState(), modeType, instance.getWorkerId(), String.join(",", instance.getLabels()),
instanceMetaData.getVersion()));
} | @Test
void assertExecute() {
ShowComputeNodeInfoExecutor executor = new ShowComputeNodeInfoExecutor();
ContextManager contextManager = mock(ContextManager.class);
ComputeNodeInstanceContext computeNodeInstanceContext = createInstanceContext();
when(contextManager.getComputeNodeInstanceContext()).thenReturn(computeNodeInstanceContext);
Collection<LocalDataQueryResultRow> actual = executor.getRows(mock(ShowComputeNodeInfoStatement.class), contextManager);
assertThat(actual.size(), is(1));
LocalDataQueryResultRow row = actual.iterator().next();
assertThat(row.getCell(1), is("foo"));
assertThat(row.getCell(2), is("127.0.0.1"));
assertThat(row.getCell(3), is("3309"));
assertThat(row.getCell(4), is("OK"));
assertThat(row.getCell(5), is("Standalone"));
assertThat(row.getCell(6), is("0"));
assertThat(row.getCell(7), is(""));
assertThat(row.getCell(8), is("foo_version"));
} |
@Override
public List<BlockWorkerInfo> getPreferredWorkers(WorkerClusterView workerClusterView,
String fileId, int count) throws ResourceExhaustedException {
if (workerClusterView.size() < count) {
throw new ResourceExhaustedException(String.format(
"Not enough workers in the cluster %d workers in the cluster but %d required",
workerClusterView.size(), count));
}
Set<WorkerIdentity> workerIdentities = workerClusterView.workerIds();
mHashProvider.refresh(workerIdentities);
List<WorkerIdentity> workers = mHashProvider.getMultiple(fileId, count);
if (workers.size() != count) {
throw new ResourceExhaustedException(String.format(
"Found %d workers from the hash ring but %d required", workers.size(), count));
}
ImmutableList.Builder<BlockWorkerInfo> builder = ImmutableList.builder();
for (WorkerIdentity worker : workers) {
Optional<WorkerInfo> optionalWorkerInfo = workerClusterView.getWorkerById(worker);
final WorkerInfo workerInfo;
if (optionalWorkerInfo.isPresent()) {
workerInfo = optionalWorkerInfo.get();
} else {
// the worker returned by the policy does not exist in the cluster view
// supplied by the client.
// this can happen when the membership changes and some callers fail to update
// to the latest worker cluster view.
// in this case, just skip this worker
LOG.debug("Inconsistency between caller's view of cluster and that of "
+ "the consistent hash policy's: worker {} selected by policy does not exist in "
+ "caller's view {}. Skipping this worker.",
worker, workerClusterView);
continue;
}
BlockWorkerInfo blockWorkerInfo = new BlockWorkerInfo(
worker, workerInfo.getAddress(), workerInfo.getCapacityBytes(),
workerInfo.getUsedBytes(), workerInfo.getState() == WorkerState.LIVE
);
builder.add(blockWorkerInfo);
}
List<BlockWorkerInfo> infos = builder.build();
return infos;
} | @Test
public void workerAddrUpdateWithIdUnchanged() throws Exception {
KetamaHashPolicy policy = new KetamaHashPolicy(mConf);
List<WorkerInfo> workers = new ArrayList<>();
workers.add(new WorkerInfo().setIdentity(WorkerIdentityTestUtils.ofLegacyId(1L))
.setAddress(new WorkerNetAddress().setHost("host1"))
.setCapacityBytes(0)
.setUsedBytes(0)
.setState(WorkerState.LIVE));
workers.add(new WorkerInfo().setIdentity(WorkerIdentityTestUtils.ofLegacyId(2L))
.setAddress(new WorkerNetAddress().setHost("host2"))
.setCapacityBytes(0)
.setUsedBytes(0)
.setState(WorkerState.LIVE));
List<BlockWorkerInfo> selectedWorkers =
policy.getPreferredWorkers(new WorkerClusterView(workers), "fileId", 2);
assertEquals("host1",
selectedWorkers.stream()
.filter(w -> w.getIdentity().equals(WorkerIdentityTestUtils.ofLegacyId(1L)))
.findFirst()
.get()
.getNetAddress()
.getHost());
// now the worker 1 has migrated to host 3
workers.set(0, new WorkerInfo().setIdentity(WorkerIdentityTestUtils.ofLegacyId(1L))
.setAddress(new WorkerNetAddress().setHost("host3"))
.setCapacityBytes(0)
.setUsedBytes(0)
.setState(WorkerState.LIVE));
List<BlockWorkerInfo> updatedWorkers =
policy.getPreferredWorkers(new WorkerClusterView(workers), "fileId", 2);
assertEquals(
selectedWorkers.stream().map(BlockWorkerInfo::getIdentity).collect(Collectors.toList()),
updatedWorkers.stream().map(BlockWorkerInfo::getIdentity).collect(Collectors.toList()));
assertEquals("host3",
updatedWorkers.stream()
.filter(w -> w.getIdentity().equals(WorkerIdentityTestUtils.ofLegacyId(1L)))
.findFirst()
.get()
.getNetAddress()
.getHost());
} |
protected void declareConstraintNotIn(final String patternType, final List<Object> values) {
String constraints = getInNotInConstraint(values);
builder.not().pattern(patternType).constraint(constraints);
} | @Test
void declareConstraintNotIn() {
List<Object> values = Arrays.asList("3", "8.5");
String patternType = "INPUT2";
KiePMMLDescrLhsFactory.factory(lhsBuilder).declareConstraintNotIn(patternType, values);
final List<BaseDescr> descrs = lhsBuilder.getDescr().getDescrs();
assertThat(descrs).isNotNull();
assertThat(descrs).hasSize(1);
assertThat(descrs.get(0)).isInstanceOf(NotDescr.class);
NotDescr notDescr = (NotDescr) descrs.get(0);
assertThat(notDescr.getDescrs()).hasSize(1);
assertThat(notDescr.getDescrs().get(0)).isInstanceOf(PatternDescr.class);
PatternDescr patternDescr = (PatternDescr) notDescr.getDescrs().get(0);
assertThat(patternDescr.getObjectType()).isEqualTo(patternType);
assertThat(patternDescr.getIdentifier()).isNull();
assertThat(patternDescr.getConstraint()).isInstanceOf(AndDescr.class);
AndDescr andDescr = (AndDescr) patternDescr.getConstraint();
assertThat(andDescr.getDescrs()).hasSize(1);
assertThat(andDescr.getDescrs().get(0)).isInstanceOf(ExprConstraintDescr.class);
ExprConstraintDescr exprConstraintDescr = (ExprConstraintDescr) andDescr.getDescrs().get(0);
assertThat(exprConstraintDescr.isNegated()).isFalse();
assertThat(exprConstraintDescr.getType()).isEqualTo(ExprConstraintDescr.Type.NAMED);
String expected = "value in (3, 8.5)";
assertThat(exprConstraintDescr.getExpression()).isEqualTo(expected);
} |
static <T> int[] getValidIndexes(T[] inputs) {
int[] validIndexes = new int[inputs.length];
int idx = 0;
for (int i = 0; i < inputs.length; i++) {
if (inputs[i] != null) {
validIndexes[idx++] = i;
}
}
return Arrays.copyOf(validIndexes, idx);
} | @Test
public void testGetValidIndexes() {
byte[][] inputs = new byte[numInputs][];
inputs[0] = new byte[chunkSize];
inputs[1] = new byte[chunkSize];
inputs[7] = new byte[chunkSize];
inputs[8] = new byte[chunkSize];
int[] validIndexes = CoderUtil.getValidIndexes(inputs);
assertEquals(4, validIndexes.length);
// Check valid indexes
assertEquals(0, validIndexes[0]);
assertEquals(1, validIndexes[1]);
assertEquals(7, validIndexes[2]);
assertEquals(8, validIndexes[3]);
} |
@Override
public FileStatus[] listStatus(Path path) throws IOException {
LOG.debug("listStatus({})", path);
if (mStatistics != null) {
mStatistics.incrementReadOps(1);
}
AlluxioURI uri = getAlluxioPath(path);
List<URIStatus> statuses;
try {
ListStatusPOptions listStatusPOptions = ListStatusPOptions.getDefaultInstance().toBuilder()
.setExcludeMountInfo(mExcludeMountInfoOnListStatus).build();
statuses = mFileSystem.listStatus(uri, listStatusPOptions);
} catch (FileDoesNotExistException e) {
throw new FileNotFoundException(getAlluxioPath(path).toString());
} catch (InvalidArgumentRuntimeException e) {
throw new IllegalArgumentException(e);
} catch (AlluxioRuntimeException e) {
throw toHdfsIOException(e);
} catch (AlluxioException e) {
throw new IOException(e);
}
FileStatus[] ret = new FileStatus[statuses.size()];
for (int k = 0; k < statuses.size(); k++) {
URIStatus status = statuses.get(k);
ret[k] = new AlluxioFileStatus(status, getFsPath(mAlluxioHeader, status));
}
return ret;
} | @Test
public void listStatus() throws Exception {
FileInfo fileInfo1 = new FileInfo()
.setLastModificationTimeMs(111L)
.setLastAccessTimeMs(123L)
.setFolder(false)
.setOwner("user1")
.setGroup("group1")
.setMode(00755);
FileInfo fileInfo2 = new FileInfo()
.setLastModificationTimeMs(222L)
.setLastAccessTimeMs(234L)
.setFolder(true)
.setOwner("user2")
.setGroup("group2")
.setMode(00644);
Path path = new Path("/dir");
alluxio.client.file.FileSystem alluxioFs =
mock(alluxio.client.file.FileSystem.class);
FileSystem alluxioHadoopFs = new FileSystem(alluxioFs);
URI uri = URI.create(Constants.HEADER + "host:1");
alluxioHadoopFs.initialize(uri, getConf());
ListStatusPOptions listStatusPOptions = ListStatusPOptions.getDefaultInstance().toBuilder()
.setExcludeMountInfo(alluxioHadoopFs.mAlluxioConf.getBoolean(
PropertyKey.USER_HDFS_CLIENT_EXCLUDE_MOUNT_INFO_ON_LIST_STATUS)).build();
when(alluxioFs.listStatus(new AlluxioURI(HadoopUtils.getPathWithoutScheme(path)),
listStatusPOptions))
.thenReturn(Lists.newArrayList(new URIStatus(fileInfo1), new URIStatus(fileInfo2)));
FileStatus[] fileStatuses = alluxioHadoopFs.listStatus(path);
assertFileInfoEqualsFileStatus(fileInfo1, fileStatuses[0]);
assertFileInfoEqualsFileStatus(fileInfo2, fileStatuses[1]);
alluxioHadoopFs.close();
} |
public List<PermissionInfo> getPermissions(String role) {
List<PermissionInfo> permissionInfoList = permissionInfoMap.get(role);
if (!authConfigs.isCachingEnabled() || permissionInfoList == null) {
Page<PermissionInfo> permissionInfoPage = getPermissionsFromDatabase(role, DEFAULT_PAGE_NO,
Integer.MAX_VALUE);
if (permissionInfoPage != null) {
permissionInfoList = permissionInfoPage.getPageItems();
if (!CollectionUtils.isEmpty(permissionInfoList)) {
permissionInfoMap.put(role, permissionInfoList);
}
}
}
return permissionInfoList;
} | @Test
void getPermissions() {
boolean cachingEnabled = authConfigs.isCachingEnabled();
assertFalse(cachingEnabled);
List<PermissionInfo> permissions = nacosRoleService.getPermissions("role-admin");
assertEquals(permissions, Collections.emptyList());
} |
public static <T> T[] clone(T[] array) {
if (array == null) {
return null;
}
return array.clone();
} | @Test
public void cloneTest() {
Integer[] b = {1, 2, 3};
Integer[] cloneB = ArrayUtil.clone(b);
assertArrayEquals(b, cloneB);
int[] a = {1, 2, 3};
int[] clone = ArrayUtil.clone(a);
assertArrayEquals(a, clone);
} |
@Override
public Map<String, String> generationCodes(Long tableId) {
// 校验是否已经存在
CodegenTableDO table = codegenTableMapper.selectById(tableId);
if (table == null) {
throw exception(CODEGEN_TABLE_NOT_EXISTS);
}
List<CodegenColumnDO> columns = codegenColumnMapper.selectListByTableId(tableId);
if (CollUtil.isEmpty(columns)) {
throw exception(CODEGEN_COLUMN_NOT_EXISTS);
}
// 如果是主子表,则加载对应的子表信息
List<CodegenTableDO> subTables = null;
List<List<CodegenColumnDO>> subColumnsList = null;
if (CodegenTemplateTypeEnum.isMaster(table.getTemplateType())) {
// 校验子表存在
subTables = codegenTableMapper.selectListByTemplateTypeAndMasterTableId(
CodegenTemplateTypeEnum.SUB.getType(), tableId);
if (CollUtil.isEmpty(subTables)) {
throw exception(CODEGEN_MASTER_GENERATION_FAIL_NO_SUB_TABLE);
}
// 校验子表的关联字段存在
subColumnsList = new ArrayList<>();
for (CodegenTableDO subTable : subTables) {
List<CodegenColumnDO> subColumns = codegenColumnMapper.selectListByTableId(subTable.getId());
if (CollUtil.findOne(subColumns, column -> column.getId().equals(subTable.getSubJoinColumnId())) == null) {
throw exception(CODEGEN_SUB_COLUMN_NOT_EXISTS, subTable.getId());
}
subColumnsList.add(subColumns);
}
}
// 执行生成
return codegenEngine.execute(table, columns, subTables, subColumnsList);
} | @Test
public void testGenerationCodes_one_success() {
// mock 数据(CodegenTableDO)
CodegenTableDO table = randomPojo(CodegenTableDO.class,
o -> o.setScene(CodegenSceneEnum.ADMIN.getScene())
.setTemplateType(CodegenTemplateTypeEnum.ONE.getType()));
codegenTableMapper.insert(table);
// mock 数据(CodegenColumnDO)
CodegenColumnDO column01 = randomPojo(CodegenColumnDO.class, o -> o.setTableId(table.getId()));
codegenColumnMapper.insert(column01);
CodegenColumnDO column02 = randomPojo(CodegenColumnDO.class, o -> o.setTableId(table.getId()));
codegenColumnMapper.insert(column02);
// mock 执行生成
Map<String, String> codes = MapUtil.of(randomString(), randomString());
when(codegenEngine.execute(eq(table), argThat(columns -> {
assertEquals(2, columns.size());
assertEquals(column01, columns.get(0));
assertEquals(column02, columns.get(1));
return true;
}), isNull(), isNull())).thenReturn(codes);
// 准备参数
Long tableId = table.getId();
// 调用
Map<String, String> result = codegenService.generationCodes(tableId);
// 断言
assertSame(codes, result);
} |
public static <T> T convert(Class<T> type, Object value) throws ConvertException {
return convert((Type) type, value);
} | @Test
public void localDateTimeToLocalDateTest() {
final LocalDateTime localDateTime = LocalDateTime.now();
final LocalDate convert = Convert.convert(LocalDate.class, localDateTime);
assertEquals(localDateTime.toLocalDate(), convert);
} |
protected boolean needFiltering(Exchange exchange) {
// exchange property takes precedence over data format property
return exchange == null
? filterNonXmlChars : exchange.getProperty(Exchange.FILTER_NON_XML_CHARS, filterNonXmlChars, Boolean.class);
} | @Test
public void testNeedFilteringTruePropagates() {
Exchange exchange = new DefaultExchange(camelContext);
exchange.setProperty(Exchange.FILTER_NON_XML_CHARS, Boolean.TRUE);
assertTrue(jaxbDataFormat.needFiltering(exchange));
} |
public NetworkId networkId() {
return networkId;
} | @Test
public void testEquality() {
DefaultVirtualDevice device1 =
new DefaultVirtualDevice(NetworkId.networkId(0), DID1);
DefaultVirtualDevice device2 =
new DefaultVirtualDevice(NetworkId.networkId(0), DID2);
ConnectPoint cpA = new ConnectPoint(device1.id(), PortNumber.portNumber(1));
ConnectPoint cpB = new ConnectPoint(device1.id(), PortNumber.portNumber(2));
ConnectPoint cpC = new ConnectPoint(device2.id(), PortNumber.portNumber(2));
DefaultVirtualPort port1 =
new DefaultVirtualPort(NetworkId.networkId(0), device1,
PortNumber.portNumber(1), cpA);
DefaultVirtualPort port2 =
new DefaultVirtualPort(NetworkId.networkId(0), device1,
PortNumber.portNumber(1), cpA);
DefaultVirtualPort port3 =
new DefaultVirtualPort(NetworkId.networkId(0), device1,
PortNumber.portNumber(2), cpB);
DefaultVirtualPort port4 =
new DefaultVirtualPort(NetworkId.networkId(1), device2,
PortNumber.portNumber(2), cpC);
new EqualsTester().addEqualityGroup(port1, port2).addEqualityGroup(port3)
.addEqualityGroup(port4).testEquals();
} |
boolean empty() {
return empty;
} | @Test
public void testEmpty() {
LogReplayTracker tracker = new LogReplayTracker.Builder().build();
assertTrue(tracker.empty());
tracker.replay(new NoOpRecord());
assertFalse(tracker.empty());
} |
@Override
public long getPeriod() {
return config.getLong(PERIOD_IN_MILISECONDS_PROPERTY).orElse(10_000L);
} | @Test
public void getPeriod_returnNumberFromConfig() {
config.put("sonar.server.monitoring.other.period", "100000");
long delay = underTest.getPeriod();
assertThat(delay).isEqualTo(100_000L);
} |
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
} | @Test
void functionInvocationEmptyParams() {
String inputExpression = "my.test.Function()";
BaseNode functionBase = parse( inputExpression );
assertThat( functionBase).isInstanceOf(FunctionInvocationNode.class);
assertThat( functionBase.getText()).isEqualTo(inputExpression);
FunctionInvocationNode function = (FunctionInvocationNode) functionBase;
assertThat( function.getName()).isInstanceOf(QualifiedNameNode.class);
assertThat( function.getName().getText()).isEqualTo("my.test.Function");
assertThat( function.getParams()).isInstanceOf(ListNode.class);
assertThat( function.getParams().getElements()).isEmpty();
} |
@Override
public JobDetails postProcess(JobDetails jobDetails) {
if (isNotNullOrEmpty(substringBetween(jobDetails.getClassName(), "$$", "$$"))) {
return new JobDetails(
substringBefore(jobDetails.getClassName(), "$$"),
jobDetails.getStaticFieldName(),
jobDetails.getMethodName(),
jobDetails.getJobParameters()
);
}
return jobDetails;
} | @Test
void postProcessWithSpringCGLibReturnsUpdatedJobDetails() {
// GIVEN
final JobDetails jobDetails = defaultJobDetails().withClassName(TestService.class.getName() + "$$EnhancerBySpringCGLIB$$6aee664d").build();
// WHEN
final JobDetails result = cgLibPostProcessor.postProcess(jobDetails);
// THEN
assertThat(result)
.isNotSameAs(jobDetails)
.hasClass(TestService.class);
} |
public static <V> Read<V> read() {
return new AutoValue_SparkReceiverIO_Read.Builder<V>().build();
} | @Test
public void testReadObjectCreationFailsIfStartPollTimeoutSecIsNull() {
assertThrows(
IllegalArgumentException.class,
() -> SparkReceiverIO.<String>read().withStartPollTimeoutSec(null));
} |
@Override
public <T extends State> T state(StateNamespace namespace, StateTag<T> address) {
return workItemState.get(namespace, address, StateContexts.nullContext());
} | @Test
public void testWatermarkAddBeforeReadEarliest() throws Exception {
StateTag<WatermarkHoldState> addr =
StateTags.watermarkStateInternal("watermark", TimestampCombiner.EARLIEST);
WatermarkHoldState bag = underTest.state(NAMESPACE, addr);
SettableFuture<Instant> future = SettableFuture.create();
when(mockReader.watermarkFuture(key(NAMESPACE, "watermark"), STATE_FAMILY)).thenReturn(future);
bag.readLater();
bag.add(new Instant(3000));
waitAndSet(future, new Instant(2000), 200);
assertThat(bag.read(), Matchers.equalTo(new Instant(2000)));
Mockito.verify(mockReader, times(2)).watermarkFuture(key(NAMESPACE, "watermark"), STATE_FAMILY);
Mockito.verifyNoMoreInteractions(mockReader);
// Adding another value doesn't create another future, but does update the result.
bag.add(new Instant(1000));
assertThat(bag.read(), Matchers.equalTo(new Instant(1000)));
Mockito.verifyNoMoreInteractions(mockReader);
} |
public static AssertionResult getResult(SMIMEAssertionTestElement testElement, SampleResult response, String name) {
checkForBouncycastle();
AssertionResult res = new AssertionResult(name);
try {
MimeMessage msg;
final int msgPos = testElement.getSpecificMessagePositionAsInt();
if (msgPos < 0){ // means counting from end
SampleResult[] subResults = response.getSubResults();
final int pos = subResults.length + msgPos;
log.debug("Getting message number: {} of {}", pos, subResults.length);
msg = getMessageFromResponse(response,pos);
} else {
log.debug("Getting message number: {}", msgPos);
msg = getMessageFromResponse(response, msgPos);
}
SMIMESignedParser signedParser = null;
if(log.isDebugEnabled()) {
log.debug("Content-type: {}", msg.getContentType());
}
if (msg.isMimeType("multipart/signed")) { // $NON-NLS-1$
MimeMultipart multipart = (MimeMultipart) msg.getContent();
signedParser = new SMIMESignedParser(new BcDigestCalculatorProvider(), multipart);
} else if (msg.isMimeType("application/pkcs7-mime") // $NON-NLS-1$
|| msg.isMimeType("application/x-pkcs7-mime")) { // $NON-NLS-1$
signedParser = new SMIMESignedParser(new BcDigestCalculatorProvider(), msg);
}
if (null != signedParser) {
log.debug("Found signature");
if (testElement.isNotSigned()) {
res.setFailure(true);
res.setFailureMessage("Mime message is signed");
} else if (testElement.isVerifySignature() || !testElement.isSignerNoCheck()) {
res = verifySignature(testElement, signedParser, name);
}
} else {
log.debug("Did not find signature");
if (!testElement.isNotSigned()) {
res.setFailure(true);
res.setFailureMessage("Mime message is not signed");
}
}
} catch (MessagingException e) {
String msg = "Cannot parse mime msg: " + e.getMessage();
log.warn(msg, e);
res.setFailure(true);
res.setFailureMessage(msg);
} catch (CMSException e) {
res.setFailure(true);
res.setFailureMessage("Error reading the signature: "
+ e.getMessage());
} catch (SMIMEException e) {
res.setFailure(true);
res.setFailureMessage("Cannot extract signed body part from signature: "
+ e.getMessage());
} catch (IOException e) { // should never happen
log.error("Cannot read mime message content: {}", e.getMessage(), e);
res.setError(true);
res.setFailureMessage(e.getMessage());
}
return res;
} | @Test
public void testSignerSerial() {
SMIMEAssertionTestElement testElement = new SMIMEAssertionTestElement();
testElement.setSignerCheckConstraints(true);
testElement.setSignerSerial("0xc8c46f8fbf9ebea4");
AssertionResult result = SMIMEAssertion.getResult(testElement, parent,
"Test");
assertFalse(result.isError(), "Result should not be an error");
assertFalse(result.isFailure(), "Result should not fail: " + result.getFailureMessage());
} |
public static Config fromMap(Map<String, ?> map) {
return fromMap("Map@" + System.identityHashCode(map), map);
} | @Test
void testFromMap() {
var config = MapConfigFactory.fromMap(Map.of(
"field1", "value1",
"field2", 2,
"field3", List.of(1, "2"),
"field4", Map.of(
"f1", 1
)
));
assertThat(config.get(ConfigValuePath.root().child("field1")))
.isInstanceOf(ConfigValue.StringValue.class)
.hasFieldOrPropertyWithValue("value", "value1");
assertThat(config.get(ConfigValuePath.root().child("field2")))
.isInstanceOf(ConfigValue.NumberValue.class)
.hasFieldOrPropertyWithValue("value", 2);
assertThat(config.get(ConfigValuePath.root().child("field3")))
.isInstanceOf(ConfigValue.ArrayValue.class);
assertThat(config.get(ConfigValuePath.root().child("field3").child(0)))
.isInstanceOf(ConfigValue.NumberValue.class)
.hasFieldOrPropertyWithValue("value", 1);
assertThat(config.get(ConfigValuePath.root().child("field3").child(1)))
.isInstanceOf(ConfigValue.StringValue.class)
.hasFieldOrPropertyWithValue("value", "2");
assertThat(config.get(ConfigValuePath.root().child("field4")))
.isInstanceOf(ConfigValue.ObjectValue.class);
assertThat(config.get(ConfigValuePath.root().child("field4").child("f1")))
.isInstanceOf(ConfigValue.NumberValue.class)
.hasFieldOrPropertyWithValue("value", 1);
} |
@Override
public void handleExecutionsTermination(Collection<ExecutionState> terminatedExecutionStates) {
final Set<ExecutionState> notFinishedExecutionStates =
checkNotNull(terminatedExecutionStates).stream()
.filter(state -> state != ExecutionState.FINISHED)
.collect(Collectors.toSet());
if (notFinishedExecutionStates.isEmpty()) {
handleExecutionsFinished();
} else {
handleAnyExecutionNotFinished(notFinishedExecutionStates);
}
} | @Test
void testSavepointCreationFailureWithFailingExecutions() {
// no global fail-over is expected to be triggered by the stop-with-savepoint despite the
// execution failure
assertSavepointCreationFailure(
testInstance ->
testInstance.handleExecutionsTermination(
Collections.singletonList(ExecutionState.FAILED)));
} |
@VisibleForTesting
void removeQueues(String args, SchedConfUpdateInfo updateInfo) {
if (args == null) {
return;
}
List<String> queuesToRemove = Arrays.asList(args.split(";"));
updateInfo.setRemoveQueueInfo(new ArrayList<>(queuesToRemove));
} | @Test(timeout = 10000)
public void testRemoveQueues() {
SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo();
cli.removeQueues("root.a;root.b;root.c.c1", schedUpdateInfo);
List<String> removeInfo = schedUpdateInfo.getRemoveQueueInfo();
assertEquals(3, removeInfo.size());
assertEquals("root.a", removeInfo.get(0));
assertEquals("root.b", removeInfo.get(1));
assertEquals("root.c.c1", removeInfo.get(2));
} |
@Override
public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) {
LOG.debug("Merging statistics: [aggregateColStats:{}, newColStats: {}]", aggregateColStats, newColStats);
LongColumnStatsDataInspector aggregateData = longInspectorFromStats(aggregateColStats);
LongColumnStatsDataInspector newData = longInspectorFromStats(newColStats);
Long lowValue = mergeLowValue(getLowValue(aggregateData), getLowValue(newData));
if (lowValue != null) {
aggregateData.setLowValue(lowValue);
}
Long highValue = mergeHighValue(getHighValue(aggregateData), getHighValue(newData));
if (highValue != null) {
aggregateData.setHighValue(highValue);
}
aggregateData.setNumNulls(mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
NumDistinctValueEstimator oldNDVEst = aggregateData.getNdvEstimator();
NumDistinctValueEstimator newNDVEst = newData.getNdvEstimator();
List<NumDistinctValueEstimator> ndvEstimatorsList = Arrays.asList(oldNDVEst, newNDVEst);
aggregateData.setNumDVs(mergeNumDistinctValueEstimator(aggregateColStats.getColName(),
ndvEstimatorsList, aggregateData.getNumDVs(), newData.getNumDVs()));
aggregateData.setNdvEstimator(ndvEstimatorsList.get(0));
KllHistogramEstimator oldKllEst = aggregateData.getHistogramEstimator();
KllHistogramEstimator newKllEst = newData.getHistogramEstimator();
aggregateData.setHistogramEstimator(mergeHistogramEstimator(aggregateColStats.getColName(), oldKllEst, newKllEst));
aggregateColStats.getStatsData().setLongStats(aggregateData);
} | @Test
public void testMergeNonNullWithNullValues() {
ColumnStatisticsObj aggrObj = createColumnStatisticsObj(new ColStatsBuilder<>(long.class)
.low(1L)
.high(3L)
.numNulls(4)
.numDVs(2)
.hll(1, 3, 3)
.kll(1, 3, 3)
.build());
ColumnStatisticsObj newObj = createColumnStatisticsObj(new ColStatsBuilder<>(long.class)
.low(null)
.high(null)
.numNulls(2)
.numDVs(0)
.build());
merger.merge(aggrObj, newObj);
ColumnStatisticsData expectedColumnStatisticsData = new ColStatsBuilder<>(long.class)
.low(1L)
.high(3L)
.numNulls(6)
.numDVs(2)
.hll(1, 3, 3)
.kll(1, 3, 3)
.build();
assertEquals(expectedColumnStatisticsData, aggrObj.getStatsData());
} |
@Override
public void reload(AppSettings settings) throws IOException {
if (ClusterSettings.isClusterEnabled(settings)) {
throw new IllegalStateException("Restart is not possible with cluster mode");
}
AppSettings reloaded = settingsLoader.load();
ensureUnchangedConfiguration(settings.getProps(), reloaded.getProps());
settings.reload(reloaded.getProps());
fileSystem.reset();
logging.configure();
appState.reset();
} | @Test
public void throw_ISE_if_cluster_is_enabled() throws IOException {
AppSettings settings = new TestAppSettings(ImmutableMap.of(CLUSTER_ENABLED.getKey(), "true"));
assertThatThrownBy(() -> {
underTest.reload(settings);
verifyNoInteractions(logging);
verifyNoInteractions(state);
verifyNoInteractions(fs);
})
.isInstanceOf(IllegalStateException.class)
.hasMessage("Restart is not possible with cluster mode");
} |
public static void writeAndFlushWithClosePromise(ChannelOutboundInvoker ctx, ByteBuf msg) {
ctx.writeAndFlush(msg).addListener(ChannelFutureListener.CLOSE);
} | @Test
public void testWriteAndFlushWithClosePromise() {
final ChannelOutboundInvoker ctx = mock(ChannelOutboundInvoker.class);
final ChannelPromise promise = mock(ChannelPromise.class);
final byte[] data = "test".getBytes(StandardCharsets.UTF_8);
final ByteBuf byteBuf = Unpooled.wrappedBuffer(data, 0, data.length);
when(ctx.writeAndFlush(same(byteBuf))).thenReturn(promise);
try {
NettyChannelUtil.writeAndFlushWithClosePromise(ctx, byteBuf);
verify(ctx).writeAndFlush(same(byteBuf));
verify(promise).addListener(same(ChannelFutureListener.CLOSE));
} finally {
byteBuf.release();
}
} |
public static SubqueryTableSegment bind(final SubqueryTableSegment segment, final SQLStatementBinderContext binderContext,
final Map<String, TableSegmentBinderContext> tableBinderContexts, final Map<String, TableSegmentBinderContext> outerTableBinderContexts) {
fillPivotColumnNamesInBinderContext(segment, binderContext);
SQLStatementBinderContext subqueryBinderContext = new SQLStatementBinderContext(segment.getSubquery().getSelect(), binderContext.getMetaData(), binderContext.getCurrentDatabaseName());
subqueryBinderContext.getExternalTableBinderContexts().putAll(binderContext.getExternalTableBinderContexts());
SelectStatement boundSubSelect = new SelectStatementBinder(outerTableBinderContexts).bind(segment.getSubquery().getSelect(), subqueryBinderContext);
SubquerySegment boundSubquerySegment = new SubquerySegment(segment.getSubquery().getStartIndex(), segment.getSubquery().getStopIndex(), boundSubSelect, segment.getSubquery().getText());
boundSubquerySegment.setSubqueryType(segment.getSubquery().getSubqueryType());
IdentifierValue subqueryTableName = segment.getAliasSegment().map(AliasSegment::getIdentifier).orElseGet(() -> new IdentifierValue(""));
SubqueryTableSegment result = new SubqueryTableSegment(segment.getStartIndex(), segment.getStopIndex(), boundSubquerySegment);
segment.getAliasSegment().ifPresent(result::setAlias);
tableBinderContexts.put(subqueryTableName.getValue().toLowerCase(), new SimpleTableSegmentBinderContext(
SubqueryTableBindUtils.createSubqueryProjections(boundSubSelect.getProjections().getProjections(), subqueryTableName, binderContext.getDatabaseType())));
return result;
} | @Test
void assertBindWithSubqueryTableAlias() {
MySQLSelectStatement selectStatement = mock(MySQLSelectStatement.class);
when(selectStatement.getDatabaseType()).thenReturn(databaseType);
when(selectStatement.getFrom()).thenReturn(Optional.of(new SimpleTableSegment(new TableNameSegment(0, 0, new IdentifierValue("t_order")))));
ProjectionsSegment projectionsSegment = new ProjectionsSegment(0, 0);
projectionsSegment.getProjections().add(new ShorthandProjectionSegment(0, 0));
when(selectStatement.getProjections()).thenReturn(projectionsSegment);
SubqueryTableSegment subqueryTableSegment = new SubqueryTableSegment(0, 0, new SubquerySegment(0, 0, selectStatement, ""));
subqueryTableSegment.setAlias(new AliasSegment(0, 0, new IdentifierValue("temp")));
ShardingSphereMetaData metaData = createMetaData();
Map<String, TableSegmentBinderContext> tableBinderContexts = new LinkedHashMap<>();
SubqueryTableSegment actual = SubqueryTableSegmentBinder.bind(subqueryTableSegment, new SQLStatementBinderContext(metaData, DefaultDatabase.LOGIC_NAME, databaseType, Collections.emptySet()),
tableBinderContexts, Collections.emptyMap());
assertTrue(actual.getAlias().isPresent());
assertTrue(tableBinderContexts.containsKey("temp"));
List<ProjectionSegment> projectionSegments = new ArrayList<>(tableBinderContexts.get("temp").getProjectionSegments());
assertThat(projectionSegments.size(), is(3));
assertThat(projectionSegments.get(0), instanceOf(ColumnProjectionSegment.class));
assertTrue(((ColumnProjectionSegment) projectionSegments.get(0)).getColumn().getOwner().isPresent());
assertThat(((ColumnProjectionSegment) projectionSegments.get(0)).getColumn().getOwner().get().getIdentifier().getValue(), is("temp"));
assertThat(((ColumnProjectionSegment) projectionSegments.get(0)).getColumn().getIdentifier().getValue(), is("order_id"));
assertThat(projectionSegments.get(1), instanceOf(ColumnProjectionSegment.class));
assertTrue(((ColumnProjectionSegment) projectionSegments.get(1)).getColumn().getOwner().isPresent());
assertThat(((ColumnProjectionSegment) projectionSegments.get(1)).getColumn().getOwner().get().getIdentifier().getValue(), is("temp"));
assertThat(((ColumnProjectionSegment) projectionSegments.get(1)).getColumn().getIdentifier().getValue(), is("user_id"));
assertThat(projectionSegments.get(2), instanceOf(ColumnProjectionSegment.class));
assertTrue(((ColumnProjectionSegment) projectionSegments.get(2)).getColumn().getOwner().isPresent());
assertThat(((ColumnProjectionSegment) projectionSegments.get(2)).getColumn().getOwner().get().getIdentifier().getValue(), is("temp"));
assertThat(((ColumnProjectionSegment) projectionSegments.get(2)).getColumn().getIdentifier().getValue(), is("status"));
} |
public static AwsCredentialsProvider create(boolean isCloud,
@Nullable String stsRegion,
@Nullable String accessKey,
@Nullable String secretKey,
@Nullable String assumeRoleArn) {
AwsCredentialsProvider awsCredentials = isCloud ? getCloudAwsCredentialsProvider(accessKey, secretKey) :
getAwsCredentialsProvider(accessKey, secretKey);
// Apply the Assume Role ARN Authorization if specified. All AWSCredentialsProviders support this.
if (!isNullOrEmpty(assumeRoleArn) && !isNullOrEmpty(stsRegion)) {
LOG.debug("Creating cross account assume role credentials");
return buildStsCredentialsProvider(awsCredentials, stsRegion, assumeRoleArn, accessKey);
}
return awsCredentials;
} | @Test
public void testAutomaticAuthIsFailingInCloudWithInvalidSecretKey() {
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() ->
AWSAuthFactory.create(true, null, "key", null, null))
.withMessageContaining("Secret key");
} |
@Override
public Path move(final Path source, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) throws BackgroundException {
Path target;
if(source.attributes().getCustom().containsKey(KEY_DELETE_MARKER)) {
// Delete marker, copy not supported but we have to retain the delete marker at the target
target = new Path(renamed);
target.attributes().setVersionId(null);
delete.delete(Collections.singletonMap(target, status), connectionCallback, callback);
try {
// Find version id of moved delete marker
final Path bucket = containerService.getContainer(renamed);
final VersionOrDeleteMarkersChunk marker = session.getClient().listVersionedObjectsChunked(
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(renamed),
String.valueOf(Path.DELIMITER), 1, null, null, false);
if(marker.getItems().length == 1) {
final BaseVersionOrDeleteMarker markerObject = marker.getItems()[0];
target.attributes().withVersionId(markerObject.getVersionId()).setCustom(Collections.singletonMap(KEY_DELETE_MARKER, Boolean.TRUE.toString()));
delete.delete(Collections.singletonMap(source, status), connectionCallback, callback);
}
else {
throw new NotfoundException(String.format("Unable to find delete marker %s", renamed.getName()));
}
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, renamed);
}
}
else {
try {
target = proxy.copy(source, renamed, status.withLength(source.attributes().getSize()), connectionCallback, new DisabledStreamListener());
// Copy source path and nullify version id to add a delete marker
delete.delete(Collections.singletonMap(new Path(source).withAttributes(new PathAttributes(source.attributes()).withVersionId(null)), status),
connectionCallback, callback);
}
catch(NotfoundException e) {
if(source.getType().contains(Path.Type.placeholder)) {
// No placeholder object to copy, create a new one at the target
target = session.getFeature(Directory.class).mkdir(renamed, new TransferStatus().withRegion(source.attributes().getRegion()));
}
else {
throw e;
}
}
}
return target;
} | @Test
public void testMoveVersioned() throws Exception {
final Path container = new Path("versioning-test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
Path test = new Path(container, new AsciiRandomStringService().random(), EnumSet.of(Path.Type.file));
final S3AccessControlListFeature acl = new S3AccessControlListFeature(session);
assertNotNull(new S3TouchFeature(session, acl).touch(test, new TransferStatus()).attributes().getVersionId());
assertTrue(new S3FindFeature(session, acl).find(test));
// Write some data to add a new version
final S3WriteFeature feature = new S3WriteFeature(session, acl);
final byte[] content = RandomUtils.nextBytes(10);
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
status.setChecksum(new SHA256ChecksumCompute().compute(new ByteArrayInputStream(content), status));
final HttpResponseOutputStream<StorageObject> out = feature.write(test, status, new DisabledConnectionCallback());
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
out.close();
// Get new path with updated version id
final AttributedList<Path> list = new S3ListService(session, acl).list(container, new DisabledListProgressListener());
for(Path path : list) {
if(new SimplePathPredicate(test).test(path)) {
test = path;
break;
}
}
final Path renamed = new Path(container, String.format("%s-renamed", test.getName()), EnumSet.of(Path.Type.file));
new S3MoveFeature(session, acl).move(test, renamed, new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback());
assertTrue(new S3FindFeature(session, acl).find(test));
assertTrue(new S3FindFeature(session, acl).find(renamed));
// Ensure that the latest version of the source file is a delete marker
for(Path path : new S3ListService(session, acl).list(container, new DisabledListProgressListener())) {
if(new SimplePathPredicate(test).test(path)) {
assertTrue(path.attributes().isDuplicate());
assertTrue(new S3AttributesFinderFeature(session, acl).find(path).isDuplicate());
assertTrue(new S3AttributesFinderFeature(session, acl).find(path).isDuplicate());
break;
}
}
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(renamed), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
static PublicationParams getPublicationParams(
final ChannelUri channelUri,
final MediaDriver.Context ctx,
final DriverConductor driverConductor,
final boolean isIpc)
{
final PublicationParams params = new PublicationParams(ctx, isIpc);
params.getEntityTag(channelUri, driverConductor);
params.getSessionId(channelUri, driverConductor);
params.getTermBufferLength(channelUri);
params.getMtuLength(channelUri);
params.getLingerTimeoutNs(channelUri);
params.getEos(channelUri);
params.getSparse(channelUri, ctx);
params.getSpiesSimulateConnection(channelUri, ctx);
params.getUntetheredWindowLimitTimeout(channelUri, ctx);
params.getUntetheredRestingTimeout(channelUri, ctx);
params.getMaxResend(channelUri);
int count = 0;
final String initialTermIdStr = channelUri.get(INITIAL_TERM_ID_PARAM_NAME);
count = initialTermIdStr != null ? count + 1 : count;
final String termIdStr = channelUri.get(TERM_ID_PARAM_NAME);
count = termIdStr != null ? count + 1 : count;
final String termOffsetStr = channelUri.get(TERM_OFFSET_PARAM_NAME);
count = termOffsetStr != null ? count + 1 : count;
if (count > 0)
{
if (count < 3)
{
throw new IllegalArgumentException("params must be used as a complete set: " +
INITIAL_TERM_ID_PARAM_NAME + " " + TERM_ID_PARAM_NAME + " " + TERM_OFFSET_PARAM_NAME + " channel=" +
channelUri);
}
params.initialTermId = Integer.parseInt(initialTermIdStr);
params.termId = Integer.parseInt(termIdStr);
params.termOffset = Integer.parseInt(termOffsetStr);
if (params.termOffset > params.termLength)
{
throw new IllegalArgumentException(
TERM_OFFSET_PARAM_NAME + "=" + params.termOffset + " > " +
TERM_LENGTH_PARAM_NAME + "=" + params.termLength + ": channel=" + channelUri);
}
if (params.termOffset < 0 || params.termOffset > LogBufferDescriptor.TERM_MAX_LENGTH)
{
throw new IllegalArgumentException(
TERM_OFFSET_PARAM_NAME + "=" + params.termOffset + " out of range: channel=" + channelUri);
}
if ((params.termOffset & (FrameDescriptor.FRAME_ALIGNMENT - 1)) != 0)
{
throw new IllegalArgumentException(
TERM_OFFSET_PARAM_NAME + "=" + params.termOffset +
" must be a multiple of FRAME_ALIGNMENT: channel=" + channelUri);
}
if (params.termId - params.initialTermId < 0)
{
throw new IllegalStateException(
"difference greater than 2^31 - 1: " + INITIAL_TERM_ID_PARAM_NAME + "=" +
params.initialTermId + " when " + TERM_ID_PARAM_NAME + "=" + params.termId + " channel=" +
channelUri);
}
params.hasPosition = true;
}
params.isResponse = CONTROL_MODE_RESPONSE.equals(channelUri.get(MDC_CONTROL_MODE_PARAM_NAME));
params.responseCorrelationId = Long.parseLong(channelUri.get(RESPONSE_CORRELATION_ID_PARAM_NAME, "-1"));
return params;
} | @Test
void hasInvalidMaxRetransmits()
{
final ChannelUri uri = ChannelUri.parse("aeron:udp?endpoint=localhost:1010|" +
CommonContext.MAX_RESEND_PARAM_NAME + "=notanumber");
final IllegalArgumentException exception = assertThrows(
IllegalArgumentException.class,
() -> PublicationParams.getPublicationParams(uri, ctx, conductor, false));
assertTrue(exception.getMessage().contains("must be a number"));
} |
void decode(int streamId, ByteBuf in, Http2Headers headers, boolean validateHeaders) throws Http2Exception {
Http2HeadersSink sink = new Http2HeadersSink(
streamId, headers, maxHeaderListSize, validateHeaders);
// Check for dynamic table size updates, which must occur at the beginning:
// https://www.rfc-editor.org/rfc/rfc7541.html#section-4.2
decodeDynamicTableSizeUpdates(in);
decode(in, sink);
// Now that we've read all of our headers we can perform the validation steps. We must
// delay throwing until this point to prevent dynamic table corruption.
sink.finish();
} | @Test
public void testIncompleteIndex() throws Http2Exception {
byte[] compressed = StringUtil.decodeHexDump("FFF0");
final ByteBuf in = Unpooled.wrappedBuffer(compressed);
try {
assertEquals(2, in.readableBytes());
assertThrows(Http2Exception.class, new Executable() {
@Override
public void execute() throws Throwable {
hpackDecoder.decode(0, in, mockHeaders, true);
}
});
} finally {
in.release();
}
} |
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Tuple2<?, ?> tuple2 = (Tuple2<?, ?>) o;
return Objects.equals(f0, tuple2.f0) && Objects.equals(f1, tuple2.f1);
} | @Test
public void testEquals() {
assertEquals(Tuple2.of(1, "a"), Tuple2.of(1, "a"));
assertEquals(Tuple2.of(1, "a").hashCode(), Tuple2.of(1, "a").hashCode());
} |
Set<SourceName> analyzeExpression(
final Expression expression,
final String clauseType
) {
final Validator extractor = new Validator(clauseType);
extractor.process(expression, null);
return extractor.referencedSources;
} | @Test
public void shouldThrowOnNoSources() {
// Given:
final Expression expression = new UnqualifiedColumnReferenceExp(
ColumnName.of("just-name")
);
when(sourceSchemas.sourcesWithField(any(), any()))
.thenReturn(ImmutableSet.of());
// When:
final Exception e = assertThrows(
UnknownColumnException.class,
() -> analyzer.analyzeExpression(expression, CLAUSE_TYPE)
);
// Then:
assertThat(e.getMessage(), containsString(
CLAUSE_TYPE + " column 'just-name' cannot be resolved."));
} |
@Override
public void releaseAll() throws Exception {
Collection<String> children = getAllHandles();
Exception exception = null;
for (String child : children) {
try {
release(child);
} catch (Exception e) {
exception = ExceptionUtils.firstOrSuppressed(e, exception);
}
}
if (exception != null) {
throw new Exception("Could not properly release all state nodes.", exception);
}
} | @Test
void testReleaseAll() throws Exception {
final TestingLongStateHandleHelper longStateStorage = new TestingLongStateHandleHelper();
ZooKeeperStateHandleStore<TestingLongStateHandleHelper.LongStateHandle> zkStore =
new ZooKeeperStateHandleStore<>(getZooKeeperClient(), longStateStorage);
final Collection<String> paths = Arrays.asList("/state1", "/state2", "/state3");
for (String path : paths) {
zkStore.addAndLock(path, new TestingLongStateHandleHelper.LongStateHandle(42L));
}
for (String path : paths) {
Stat stat =
getZooKeeperClient().checkExists().forPath(zkStore.getInstanceLockPath(path));
assertThat(stat).as("Expecte and existing lock.").isNotNull();
}
zkStore.releaseAll();
for (String path : paths) {
Stat stat =
getZooKeeperClient()
.checkExists()
.forPath(ZooKeeperStateHandleStore.getRootLockPath(path));
assertThat(stat.getNumChildren()).isZero();
}
} |
public FEELFnResult<List<BigDecimal>> invoke(@ParameterName( "list" ) List list, @ParameterName( "match" ) Object match) {
if ( list == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null"));
}
final List<BigDecimal> result = new ArrayList<>();
for( int i = 0; i < list.size(); i++ ) {
Object o = list.get( i );
if ( o == null && match == null) {
result.add( BigDecimal.valueOf( i+1L ) );
} else if ( o != null && match != null ) {
if ( equalsAsBigDecimals(o, match) || o.equals(match) ) {
result.add( BigDecimal.valueOf( i+1L ) );
}
}
}
return FEELFnResult.ofResult( result );
} | @Test
void invokeBigDecimal() {
FunctionTestUtil.assertResult(indexOfFunction.invoke(Arrays.asList("test", null, 12), BigDecimal.valueOf(12))
, Collections.emptyList());
FunctionTestUtil.assertResult(
indexOfFunction.invoke(Arrays.asList("test", null, BigDecimal.valueOf(12)), BigDecimal.valueOf(12)),
Collections.singletonList(BigDecimal.valueOf(3)));
FunctionTestUtil.assertResult(
indexOfFunction.invoke(
Arrays.asList("test", null, BigDecimal.valueOf(12)),
BigDecimal.valueOf(12).setScale(4, BigDecimal.ROUND_HALF_UP)),
Collections.singletonList(BigDecimal.valueOf(3)));
FunctionTestUtil.assertResult(
indexOfFunction.invoke(
Arrays.asList(BigDecimal.valueOf(12.00), "test", null, BigDecimal.valueOf(12)),
BigDecimal.valueOf(12)),
Arrays.asList(BigDecimal.valueOf(1), BigDecimal.valueOf(4)));
} |
@Override
public boolean alterOffsets(Map<String, String> config, Map<Map<String, ?>, Map<String, ?>> offsets) {
for (Map.Entry<Map<String, ?>, Map<String, ?>> offsetEntry : offsets.entrySet()) {
Map<String, ?> sourceOffset = offsetEntry.getValue();
if (sourceOffset == null) {
// We allow tombstones for anything; if there's garbage in the offsets for the connector, we don't
// want to prevent users from being able to clean it up using the REST API
continue;
}
Map<String, ?> sourcePartition = offsetEntry.getKey();
if (sourcePartition == null) {
throw new ConnectException("Source partitions may not be null");
}
MirrorUtils.validateSourcePartitionString(sourcePartition, SOURCE_CLUSTER_ALIAS_KEY);
MirrorUtils.validateSourcePartitionString(sourcePartition, TARGET_CLUSTER_ALIAS_KEY);
MirrorUtils.validateSourceOffset(sourcePartition, sourceOffset, true);
}
// We don't actually use these offsets in the task class, so no additional effort is required beyond just validating
// the format of the user-supplied offsets
return true;
} | @Test
public void testAlterOffsetsIncorrectPartitionKey() {
MirrorHeartbeatConnector connector = new MirrorHeartbeatConnector();
assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Collections.singletonMap(
Collections.singletonMap("unused_partition_key", "unused_partition_value"),
SOURCE_OFFSET
)));
// null partitions are invalid
assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Collections.singletonMap(
null,
SOURCE_OFFSET
)));
} |
public static String limitSizeTo1KB(String desc) {
if (desc.length() < 1024) {
return desc;
} else {
return desc.substring(0, 1024);
}
} | @Test
void limitSizeTo1KB() {
String a = "a";
for (int i = 0; i < 11; i++) {
a += a;
}
Assertions.assertEquals(1024, TriRpcStatus.limitSizeTo1KB(a).length());
Assertions.assertEquals(1, TriRpcStatus.limitSizeTo1KB("a").length());
} |
public static Map<String, String> getStreamConfigMap(TableConfig tableConfig) {
String tableNameWithType = tableConfig.getTableName();
Preconditions.checkState(tableConfig.getTableType() == TableType.REALTIME,
"Cannot fetch streamConfigs for OFFLINE table: %s", tableNameWithType);
Map<String, String> streamConfigMap = null;
if (tableConfig.getIngestionConfig() != null
&& tableConfig.getIngestionConfig().getStreamIngestionConfig() != null) {
List<Map<String, String>> streamConfigMaps =
tableConfig.getIngestionConfig().getStreamIngestionConfig().getStreamConfigMaps();
Preconditions.checkState(streamConfigMaps.size() == 1, "Only 1 stream supported per table");
streamConfigMap = streamConfigMaps.get(0);
}
if (streamConfigMap == null && tableConfig.getIndexingConfig() != null) {
streamConfigMap = tableConfig.getIndexingConfig().getStreamConfigs();
}
if (streamConfigMap == null) {
throw new IllegalStateException("Could not find streamConfigs for REALTIME table: " + tableNameWithType);
}
return streamConfigMap;
} | @Test
public void testGetStreamConfigMap() {
TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName("myTable").build();
try {
IngestionConfigUtils.getStreamConfigMap(tableConfig);
Assert.fail("Should fail for OFFLINE table");
} catch (IllegalStateException e) {
// expected
}
tableConfig =
new TableConfigBuilder(TableType.REALTIME).setTableName("myTable").setTimeColumnName("timeColumn").build();
// get from ingestion config (when not present in indexing config)
Map<String, String> streamConfigMap = Collections.singletonMap("streamType", "kafka");
IngestionConfig ingestionConfig = new IngestionConfig();
ingestionConfig.setStreamIngestionConfig(new StreamIngestionConfig(Collections.singletonList(streamConfigMap)));
tableConfig.setIngestionConfig(ingestionConfig);
Map<String, String> actualStreamConfigsMap = IngestionConfigUtils.getStreamConfigMap(tableConfig);
Assert.assertEquals(actualStreamConfigsMap.size(), 1);
Assert.assertEquals(actualStreamConfigsMap.get("streamType"), "kafka");
// get from ingestion config (even if present in indexing config)
Map<String, String> deprecatedStreamConfigMap = new HashMap<>();
deprecatedStreamConfigMap.put("streamType", "foo");
deprecatedStreamConfigMap.put("customProp", "foo");
IndexingConfig indexingConfig = new IndexingConfig();
indexingConfig.setStreamConfigs(deprecatedStreamConfigMap);
tableConfig.setIndexingConfig(indexingConfig);
actualStreamConfigsMap = IngestionConfigUtils.getStreamConfigMap(tableConfig);
Assert.assertEquals(actualStreamConfigsMap.size(), 1);
Assert.assertEquals(actualStreamConfigsMap.get("streamType"), "kafka");
// fail if multiple found
ingestionConfig.setStreamIngestionConfig(
new StreamIngestionConfig(Arrays.asList(streamConfigMap, deprecatedStreamConfigMap)));
try {
IngestionConfigUtils.getStreamConfigMap(tableConfig);
Assert.fail("Should fail for multiple stream configs");
} catch (IllegalStateException e) {
// expected
}
// get from indexing config
tableConfig.setIngestionConfig(null);
actualStreamConfigsMap = IngestionConfigUtils.getStreamConfigMap(tableConfig);
Assert.assertEquals(actualStreamConfigsMap.size(), 2);
Assert.assertEquals(actualStreamConfigsMap.get("streamType"), "foo");
// fail if found nowhere
tableConfig.setIndexingConfig(null);
try {
IngestionConfigUtils.getStreamConfigMap(tableConfig);
Assert.fail("Should fail for no stream config found");
} catch (IllegalStateException e) {
// expected
}
} |
public static Bech32Data decode(final String str) throws AddressFormatException {
boolean lower = false, upper = false;
if (str.length() < 8)
throw new AddressFormatException.InvalidDataLength("Input too short: " + str.length());
if (str.length() > 90)
throw new AddressFormatException.InvalidDataLength("Input too long: " + str.length());
for (int i = 0; i < str.length(); ++i) {
char c = str.charAt(i);
if (c < 33 || c > 126) throw new AddressFormatException.InvalidCharacter(c, i);
if (c >= 'a' && c <= 'z') {
if (upper)
throw new AddressFormatException.InvalidCharacter(c, i);
lower = true;
}
if (c >= 'A' && c <= 'Z') {
if (lower)
throw new AddressFormatException.InvalidCharacter(c, i);
upper = true;
}
}
final int pos = str.lastIndexOf('1');
if (pos < 1) throw new AddressFormatException.InvalidPrefix("Missing human-readable part");
final int dataPartLength = str.length() - 1 - pos;
if (dataPartLength < 6) throw new AddressFormatException.InvalidDataLength("Data part too short: " + dataPartLength);
byte[] values = new byte[dataPartLength];
for (int i = 0; i < dataPartLength; ++i) {
char c = str.charAt(i + pos + 1);
if (CHARSET_REV[c] == -1) throw new AddressFormatException.InvalidCharacter(c, i + pos + 1);
values[i] = CHARSET_REV[c];
}
String hrp = str.substring(0, pos).toLowerCase(Locale.ROOT);
Encoding encoding = verifyChecksum(hrp, values);
if (encoding == null) throw new AddressFormatException.InvalidChecksum();
return new Bech32Data(encoding, hrp, Arrays.copyOfRange(values, 0, values.length - 6));
} | @Test(expected = AddressFormatException.InvalidCharacter.class)
public void decode_invalidCharacter_upperLowerMix() {
Bech32.decode("A12UeL5X");
} |
String buildCustomMessage(EventNotificationContext ctx, SlackEventNotificationConfig config, String template) throws PermanentEventNotificationException {
final List<MessageSummary> backlog = getMessageBacklog(ctx, config);
Map<String, Object> model = getCustomMessageModel(ctx, config.type(), backlog, config.timeZone());
try {
LOG.debug("customMessage: template = {} model = {}", template, model);
return templateEngine.transform(template, model);
} catch (Exception e) {
String error = "Invalid Custom Message template.";
LOG.error(error + "[{}]", e.toString());
throw new PermanentEventNotificationException(error + e, e.getCause());
}
} | @Test
public void testCustomMessage() throws PermanentEventNotificationException {
SlackEventNotificationConfig slackConfig = SlackEventNotificationConfig.builder()
.backlogSize(5)
.build();
String message = slackEventNotification.buildCustomMessage(eventNotificationContext, slackConfig, "Ich spreche Deutsch");
assertThat(message).isEqualTo("Ich spreche Deutsch");
} |
public Optional<DispatchEvent> build(final String databaseName, final DataChangedEvent event) {
for (RuleNodePathProvider each : ShardingSphereServiceLoader.getServiceInstances(RuleNodePathProvider.class)) {
Optional<DispatchEvent> result = build(each.getRuleNodePath(), databaseName, event);
if (result.isPresent()) {
return result;
}
}
return Optional.empty();
} | @Test
void assertBuildWithoutRuleNodePathProvider() {
when(ShardingSphereServiceLoader.getServiceInstances(RuleNodePathProvider.class)).thenReturn(Collections.emptyList());
assertFalse(new RuleConfigurationEventBuilder().build("foo_db", new DataChangedEvent("k", "v", Type.IGNORED)).isPresent());
} |
@Override
protected EnumDeclaration create(CompilationUnit compilationUnit) {
EnumDeclaration lambdaClass = super.create(compilationUnit);
boolean hasDroolsParameter = lambdaParameters.stream().anyMatch(this::isDroolsParameter);
if (hasDroolsParameter) {
bitMaskVariables.forEach(vd -> vd.generateBitMaskField(lambdaClass));
}
return lambdaClass;
} | @Test
public void createConsequenceWithMultipleFactUpdate() {
ArrayList<String> personFields = new ArrayList<>();
personFields.add("\"name\"");
MaterializedLambda.BitMaskVariable bitMaskPerson = new MaterializedLambda.BitMaskVariableWithFields("DomainClassesMetadataB45236F6195B110E0FA3A5447BC53274.org_drools_modelcompiler_domain_Person_Metadata_INSTANCE", personFields, "mask_$person");
ArrayList<String> petFields = new ArrayList<>();
petFields.add("\"age\"");
MaterializedLambda.BitMaskVariable bitMaskPet = new MaterializedLambda.BitMaskVariableWithFields("DomainClassesMetadataB45236F6195B110E0FA3A5447BC53274.org_drools_modelcompiler_domain_Pet_Metadata_INSTANCE", petFields, "mask_$pet");
String consequenceBlock = "(org.drools.model.Drools drools, org.drools.model.codegen.execmodel.domain.Pet $pet, org.drools.model.codegen.execmodel.domain.Person $person) -> {{ ($person).setName(\"George\");drools.update($person, mask_$person); ($pet).setAge($pet.getAge() + 1); drools.update($pet, mask_$pet); }}";
CreatedClass aClass = new MaterializedLambdaConsequence("defaultpkg",
"defaultpkg.RulesB45236F6195B110E0FA3A5447BC53274",
Arrays.asList(bitMaskPerson, bitMaskPet))
.create(consequenceBlock, new ArrayList<>(), new ArrayList<>());
String classNameWithPackage = aClass.getClassNameWithPackage();
// There is no easy way to retrieve the originally created "hashcode" because it is calculated over a CompilationUnit that soon after is modified;
// so current "CreatedClass" contains a CompilationUnit that is different from the one used to calculate the hashcode
String expectedPackageName = classNameWithPackage.substring(0, classNameWithPackage.lastIndexOf('.'));
String expectedClassName = classNameWithPackage.substring(classNameWithPackage.lastIndexOf('.')+1);
//language=JAVA
String expectedResult = "" +
"package PACKAGE_TOREPLACE;\n" +
"import static defaultpkg.RulesB45236F6195B110E0FA3A5447BC53274.*; " +
"import org.drools.modelcompiler.dsl.pattern.D; " +
"" +
" \n"+
"@org.drools.compiler.kie.builder.MaterializedLambda() " +
"public enum CLASS_TOREPLACE implements org.drools.model.functions.Block3<org.drools.model.Drools, org.drools.model.codegen.execmodel.domain.Pet, org.drools.model.codegen.execmodel.domain.Person>, org.drools.model.functions.HashedExpression {\n" +
"\n" +
" INSTANCE;\n" +
" public static final String EXPRESSION_HASH = \"2070FC5A885B4BE208D2534D37796F3F\";\n" +
" public java.lang.String getExpressionHash() {\n" +
" return EXPRESSION_HASH;\n" +
" }" +
" private final org.drools.model.BitMask mask_$person = org.drools.model.BitMask.getPatternMask(DomainClassesMetadataB45236F6195B110E0FA3A5447BC53274.org_drools_modelcompiler_domain_Person_Metadata_INSTANCE, \"name\");\n" +
"\n" +
" private final org.drools.model.BitMask mask_$pet = org.drools.model.BitMask.getPatternMask(DomainClassesMetadataB45236F6195B110E0FA3A5447BC53274.org_drools_modelcompiler_domain_Pet_Metadata_INSTANCE, \"age\");\n" +
"\n" +
" @Override()\n" +
" public void execute(org.drools.model.Drools drools, org.drools.model.codegen.execmodel.domain.Pet $pet, org.drools.model.codegen.execmodel.domain.Person $person) throws java.lang.Exception {\n" +
" {\n" +
" ($person).setName(\"George\");\n" +
" drools.update($person, mask_$person);\n" +
" ($pet).setAge($pet.getAge() + 1);\n" +
" drools.update($pet, mask_$pet);\n" +
" }\n" +
" }\n" +
"}";
// Workaround to keep the "//language=JAVA" working
expectedResult = expectedResult
.replace("PACKAGE_TOREPLACE", expectedPackageName)
.replace("CLASS_TOREPLACE", expectedClassName);
verifyCreatedClass(aClass, expectedResult);
} |
public void upgrade() {
viewService.streamAll().forEach(view -> {
final Optional<User> user = view.owner().map(userService::load);
if (user.isPresent() && !user.get().isLocalAdmin()) {
final GRNType grnType = ViewDTO.Type.DASHBOARD.equals(view.type()) ? GRNTypes.DASHBOARD : GRNTypes.SEARCH;
final GRN target = grnType.toGRN(view.id());
ensureGrant(user.get(), target);
}
});
} | @Test
@DisplayName("don't migrate non-existing owner")
void dontmigrateNonExistingOwner() {
final GRN testuserGRN = GRNTypes.USER.toGRN("olduser");
final GRN dashboard = GRNTypes.DASHBOARD.toGRN("54e3deadbeefdeadbeef0003");
when(userService.load(anyString())).thenReturn(null);
migration.upgrade();
assertThat(grantService.hasGrantFor(testuserGRN, Capability.OWN, dashboard)).isFalse();
} |
public static ShenyuAdminResult error(final String msg) {
return error(CommonErrorCode.ERROR, msg);
} | @Test
public void testErrorWithMsg() {
final ShenyuAdminResult result = ShenyuAdminResult.error(0, "msg");
assertEquals(0, result.getCode().intValue());
assertEquals("msg", result.getMessage());
assertNull(result.getData());
assertEquals(3390718, result.hashCode());
assertEquals("ShenyuAdminResult{code=0, message='msg', data=null}", result.toString());
} |
public List<ShardingCondition> createShardingConditions(final InsertStatementContext sqlStatementContext, final List<Object> params) {
List<ShardingCondition> result = null == sqlStatementContext.getInsertSelectContext()
? createShardingConditionsWithInsertValues(sqlStatementContext, params)
: createShardingConditionsWithInsertSelect(sqlStatementContext, params);
appendGeneratedKeyConditions(sqlStatementContext, result);
return result;
} | @Test
void assertCreateShardingConditionsSelectStatementWithGeneratedKeyContext() {
when(insertStatementContext.getGeneratedKeyContext()).thenReturn(Optional.of(mock(GeneratedKeyContext.class)));
when(insertStatementContext.getInsertSelectContext()).thenReturn(mock(InsertSelectContext.class));
assertTrue(shardingConditionEngine.createShardingConditions(insertStatementContext, Collections.emptyList()).isEmpty());
} |
public static <K, V> V getOrPutSynchronized(ConcurrentMap<K, V> map, K key, final Object mutex,
ConstructorFunction<K, V> func) {
if (mutex == null) {
throw new NullPointerException();
}
V value = map.get(key);
if (value == null) {
synchronized (mutex) {
value = map.get(key);
if (value == null) {
value = func.createNew(key);
map.put(key, value);
}
}
}
return value;
} | @SuppressWarnings("ConstantConditions")
@Test(expected = NullPointerException.class)
public void testGetOrPutSynchronized_whenMutexIsNull_thenThrowException() {
ConcurrencyUtil.getOrPutSynchronized(map, 5, (Object) null, constructorFunction);
} |
public static DeleteGroupsResponseData.DeletableGroupResultCollection getErrorResultCollection(
List<String> groupIds,
Errors error
) {
DeleteGroupsResponseData.DeletableGroupResultCollection resultCollection =
new DeleteGroupsResponseData.DeletableGroupResultCollection();
groupIds.forEach(groupId -> resultCollection.add(
new DeleteGroupsResponseData.DeletableGroupResult()
.setGroupId(groupId)
.setErrorCode(error.code())
));
return resultCollection;
} | @Test
public void testGetErrorResultCollection() {
String groupId1 = "group-id-1";
String groupId2 = "group-id-2";
DeleteGroupsRequestData data = new DeleteGroupsRequestData()
.setGroupsNames(Arrays.asList(groupId1, groupId2));
DeleteGroupsResponseData.DeletableGroupResultCollection expectedResultCollection =
new DeleteGroupsResponseData.DeletableGroupResultCollection(Arrays.asList(
new DeleteGroupsResponseData.DeletableGroupResult()
.setGroupId(groupId1)
.setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()),
new DeleteGroupsResponseData.DeletableGroupResult()
.setGroupId(groupId2)
.setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code())
).iterator());
assertEquals(expectedResultCollection, getErrorResultCollection(data.groupsNames(), Errors.COORDINATOR_LOAD_IN_PROGRESS));
} |
public static boolean canDrop(
FilterPredicate pred, List<ColumnChunkMetaData> columns, DictionaryPageReadStore dictionaries) {
Objects.requireNonNull(pred, "pred cannnot be null");
Objects.requireNonNull(columns, "columns cannnot be null");
return pred.accept(new DictionaryFilter(columns, dictionaries));
} | @Test
public void testInFixed() throws Exception {
BinaryColumn b = binaryColumn("fixed_field");
// Only V2 supports dictionary encoding for FIXED_LEN_BYTE_ARRAY values
if (version == PARQUET_2_0) {
Set<Binary> set1 = new HashSet<>();
set1.add(toBinary("-2", 17));
set1.add(toBinary("-22", 17));
set1.add(toBinary("12345", 17));
FilterPredicate predIn1 = in(b, set1);
FilterPredicate predNotIn1 = notIn(b, set1);
assertTrue("Should drop block for in (-2, -22, 12345)", canDrop(predIn1, ccmd, dictionaries));
assertFalse("Should not drop block for notIn (-2, -22, 12345)", canDrop(predNotIn1, ccmd, dictionaries));
Set<Binary> set2 = new HashSet<>();
set2.add(toBinary("-1", 17));
set2.add(toBinary("0", 17));
set2.add(toBinary("12345", 17));
assertFalse("Should not drop block for in (-1, 0, 12345)", canDrop(in(b, set2), ccmd, dictionaries));
assertFalse("Should not drop block for in (-1, 0, 12345)", canDrop(notIn(b, set2), ccmd, dictionaries));
}
Set<Binary> set3 = new HashSet<>();
set3.add(null);
FilterPredicate predIn3 = in(b, set3);
FilterPredicate predNotIn3 = notIn(b, set3);
assertFalse("Should not drop block for null", canDrop(predIn3, ccmd, dictionaries));
assertFalse("Should not drop block for null", canDrop(predNotIn3, ccmd, dictionaries));
} |
public long put(final K key, final V value, final long timestamp) {
if (timestampedStore != null) {
timestampedStore.put(key, ValueAndTimestamp.make(value, timestamp));
return PUT_RETURN_CODE_IS_LATEST;
}
if (versionedStore != null) {
return versionedStore.put(key, value, timestamp);
}
throw new IllegalStateException("KeyValueStoreWrapper must be initialized with either timestamped or versioned store");
} | @Test
public void shouldPutToVersionedStore() {
givenWrapperWithVersionedStore();
when(versionedStore.put(KEY, VALUE_AND_TIMESTAMP.value(), VALUE_AND_TIMESTAMP.timestamp())).thenReturn(12L);
final long putReturnCode = wrapper.put(KEY, VALUE_AND_TIMESTAMP.value(), VALUE_AND_TIMESTAMP.timestamp());
assertThat(putReturnCode, equalTo(12L));
} |
public AstNode rewrite(final AstNode node, final C context) {
return rewriter.process(node, context);
} | @Test
public void shouldRewriteInsertInto() {
// Given:
final InsertInto ii = new InsertInto(location, sourceName, query, insertIntoProperties);
when(mockRewriter.apply(query, context)).thenReturn(rewrittenQuery);
// When:
final AstNode rewritten = rewriter.rewrite(ii, context);
// Then:
assertThat(
rewritten,
equalTo(new InsertInto(location, sourceName, rewrittenQuery, insertIntoProperties))
);
} |
@Override
public ClusterInfo clusterGetClusterInfo() {
RFuture<Map<String, String>> f = executorService.readAsync((String)null, StringCodec.INSTANCE, RedisCommands.CLUSTER_INFO);
Map<String, String> entries = syncFuture(f);
Properties props = new Properties();
for (Entry<String, String> entry : entries.entrySet()) {
props.setProperty(entry.getKey(), entry.getValue());
}
return new ClusterInfo(props);
} | @Test
public void testClusterGetClusterInfo() {
ClusterInfo info = connection.clusterGetClusterInfo();
assertThat(info.getSlotsFail()).isEqualTo(0);
assertThat(info.getSlotsOk()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT);
assertThat(info.getSlotsAssigned()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT);
} |
public void shutdown() {
DefaultMetricsSystem.shutdown();
} | @Test
public void testResourceCheck() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration();
File basedir = new File(MiniDFSCluster.getBaseDirectory(),
GenericTestUtils.getMethodName());
MiniDFSCluster tmpCluster = new MiniDFSCluster.Builder(conf, basedir)
.numDataNodes(0)
.nnTopology(MiniDFSNNTopology.simpleHATopology())
.build();
try {
MockNameNodeResourceChecker mockResourceChecker =
new MockNameNodeResourceChecker(conf);
tmpCluster.getNameNode(0).getNamesystem()
.setNNResourceChecker(mockResourceChecker);
NNHAServiceTarget haTarget = new NNHAServiceTarget(conf,
DFSUtil.getNamenodeNameServiceId(
new HdfsConfiguration()), "nn1");
HAServiceProtocol rpc = haTarget.getHealthMonitorProxy(conf, conf.getInt(
HA_HM_RPC_TIMEOUT_KEY, HA_HM_RPC_TIMEOUT_DEFAULT));
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
for (long i = 0; i < 10; i++) {
rpc.monitorHealth();
assertQuantileGauges("ResourceCheckTime1s", rb);
}
} finally {
if (tmpCluster != null) {
tmpCluster.shutdown();
}
}
} |
public List<MetricsKey> getMetricsKeys() {
return metricsKeys;
} | @Override
@Test
public void testSerialize() throws JsonProcessingException {
ClientConfigMetricRequest clientMetrics = new ClientConfigMetricRequest();
clientMetrics.putAllHeader(HEADERS);
clientMetrics.getMetricsKeys()
.add(ClientConfigMetricRequest.MetricsKey.build(CACHE_DATA, String.join("+", KEY)));
clientMetrics.getMetricsKeys()
.add(ClientConfigMetricRequest.MetricsKey.build(SNAPSHOT_DATA, String.join("+", KEY)));
final String requestId = injectRequestUuId(clientMetrics);
String json = mapper.writeValueAsString(clientMetrics);
assertTrue(json.contains("\"type\":\"" + "cacheData" + "\""));
assertTrue(json.contains("\"type\":\"" + "snapshotData" + "\""));
assertTrue(json.contains("\"key\":\"" + String.join("+", KEY) + "\""));
assertTrue(json.contains("\"module\":\"" + Constants.Config.CONFIG_MODULE));
assertTrue(json.contains("\"requestId\":\"" + requestId));
} |
@Operation(summary = "list", description = "List components")
@GetMapping
public ResponseEntity<List<ComponentVO>> list(@PathVariable Long clusterId) {
return ResponseEntity.success(componentService.list(clusterId));
} | @Test
void listReturnsEmptyForNoComponents() {
Long clusterId = 1L;
when(componentService.list(clusterId)).thenReturn(List.of());
ResponseEntity<List<ComponentVO>> response = componentController.list(clusterId);
assertTrue(response.isSuccess());
assertTrue(response.getData().isEmpty());
} |
public LinkedHashMap<String, String> getKeyPropertyList(ObjectName mbeanName) {
LinkedHashMap<String, String> keyProperties = keyPropertiesPerBean.get(mbeanName);
if (keyProperties == null) {
keyProperties = new LinkedHashMap<>();
String properties = mbeanName.getKeyPropertyListString();
Matcher match = PROPERTY_PATTERN.matcher(properties);
while (match.lookingAt()) {
keyProperties.put(match.group(1), match.group(2));
properties = properties.substring(match.end());
if (properties.startsWith(",")) {
properties = properties.substring(1);
}
match.reset(properties);
}
keyPropertiesPerBean.put(mbeanName, keyProperties);
}
return keyProperties;
} | @Test
public void testQuotedObjectNameWithQuote() throws Throwable {
JmxMBeanPropertyCache testCache = new JmxMBeanPropertyCache();
LinkedHashMap<String, String> parameterList =
testCache.getKeyPropertyList(
new ObjectName("com.organisation:name=\"value\\\"more\",name2=value2"));
assertSameElementsAndOrder(parameterList, "name", "\"value\\\"more\"", "name2", "value2");
} |
static void populateMissingOutputFieldDataType(final Model model, final List<DataField> dataFields) {
if (model.getOutput() != null &&
model.getOutput().getOutputFields() != null) {
populateMissingOutputFieldDataType(model.getOutput().getOutputFields(),
model.getMiningSchema().getMiningFields(),
dataFields);
}
} | @Test
void populateMissingOutputFieldDataType() {
Random random = new Random();
List<String> fieldNames = IntStream.range(0, 6)
.mapToObj(i -> RandomStringUtils.random(6, true, false))
.collect(Collectors.toList());
List<DataField> dataFields = fieldNames.stream()
.map(fieldName -> {
DataField toReturn = new DataField();
toReturn.setName(fieldName);
DataType dataType = DataType.values()[random.nextInt(DataType.values().length)];
toReturn.setDataType(dataType);
return toReturn;
})
.collect(Collectors.toList());
List<MiningField> miningFields = IntStream.range(0, dataFields.size() - 1)
.mapToObj(dataFields::get)
.map(dataField -> {
MiningField toReturn = new MiningField();
toReturn.setName(dataField.getName());
toReturn.setUsageType(MiningField.UsageType.ACTIVE);
return toReturn;
})
.collect(Collectors.toList());
DataField lastDataField = dataFields.get(dataFields.size() - 1);
MiningField targetMiningField = new MiningField();
targetMiningField.setName(lastDataField.getName());
targetMiningField.setUsageType(MiningField.UsageType.TARGET);
miningFields.add(targetMiningField);
// Following OutputFields should be populated based on "ResultFeature.PROBABILITY"
List<OutputField> outputFields = IntStream.range(0, 3)
.mapToObj(i -> {
OutputField toReturn = new OutputField();
toReturn.setName(RandomStringUtils.random(6, true, false));
toReturn.setResultFeature(ResultFeature.PROBABILITY);
return toReturn;
})
.collect(Collectors.toList());
// Following OutputField should be populated based on "ResultFeature.PREDICTED_VALUE"
OutputField targetOutputField = new OutputField();
targetOutputField.setName(RandomStringUtils.random(6, true, false));
targetOutputField.setResultFeature(ResultFeature.PREDICTED_VALUE);
outputFields.add(targetOutputField);
// Following OutputField should be populated based on "TargetField" property
OutputField targetingOutputField = new OutputField();
targetingOutputField.setName(RandomStringUtils.random(6, true, false));
targetingOutputField.setTargetField(targetMiningField.getName());
outputFields.add(targetingOutputField);
outputFields.forEach(outputField -> assertThat(outputField.getDataType()).isNull());
IntStream.range(0, 2)
.forEach(i -> {
OutputField toAdd = new OutputField();
toAdd.setName(RandomStringUtils.random(6, true, false));
DataType dataType = DataType.values()[random.nextInt(DataType.values().length)];
toAdd.setDataType(dataType);
outputFields.add(toAdd);
});
KiePMMLUtil.populateMissingOutputFieldDataType(outputFields, miningFields, dataFields);
outputFields.forEach(outputField -> assertThat(outputField.getDataType()).isNotNull());
} |
public synchronized void addLocation(URI location, TaskId remoteSourceTaskId)
{
requireNonNull(location, "location is null");
// Ignore new locations after close
// NOTE: this MUST happen before checking no more locations is checked
if (closed.get()) {
return;
}
// ignore duplicate locations
if (allClients.containsKey(location)) {
return;
}
// already removed
if (removedRemoteSourceTaskIds.contains(remoteSourceTaskId)) {
return;
}
checkState(!noMoreLocations, "No more locations already set");
RpcShuffleClient resultClient;
switch (location.getScheme().toLowerCase(Locale.ENGLISH)) {
case "http":
case "https":
resultClient = new HttpRpcShuffleClient(httpClient, location);
break;
case "thrift":
resultClient = new ThriftRpcShuffleClient(driftClient, location);
break;
default:
throw new PrestoException(GENERIC_INTERNAL_ERROR, "unsupported task result client scheme " + location.getScheme());
}
PageBufferClient client = new PageBufferClient(
resultClient,
maxErrorDuration,
acknowledgePages,
location,
new ExchangeClientCallback(),
scheduler,
pageBufferClientCallbackExecutor);
allClients.put(location, client);
checkState(taskIdToLocationMap.put(remoteSourceTaskId, location) == null, "Duplicate remoteSourceTaskId: " + remoteSourceTaskId);
queuedClients.add(client);
scheduleRequestIfNecessary();
} | @Test(timeOut = 10000)
public void testAddLocation()
throws Exception
{
DataSize bufferCapacity = new DataSize(32, MEGABYTE);
DataSize maxResponseSize = new DataSize(10, MEGABYTE);
MockExchangeRequestProcessor processor = new MockExchangeRequestProcessor(maxResponseSize);
ExchangeClient exchangeClient = createExchangeClient(processor, bufferCapacity, maxResponseSize);
URI location1 = URI.create("http://localhost:8081/foo");
processor.addPage(location1, createPage(1));
processor.addPage(location1, createPage(2));
processor.addPage(location1, createPage(3));
processor.setComplete(location1);
exchangeClient.addLocation(location1, TaskId.valueOf("foo.0.0.0.0"));
assertFalse(exchangeClient.isClosed());
assertPageEquals(getNextPage(exchangeClient), createPage(1));
assertFalse(exchangeClient.isClosed());
assertPageEquals(getNextPage(exchangeClient), createPage(2));
assertFalse(exchangeClient.isClosed());
assertPageEquals(getNextPage(exchangeClient), createPage(3));
assertFalse(tryGetFutureValue(exchangeClient.isBlocked(), 10, MILLISECONDS).isPresent());
assertFalse(exchangeClient.isClosed());
URI location2 = URI.create("http://localhost:8082/bar");
processor.addPage(location2, createPage(4));
processor.addPage(location2, createPage(5));
processor.addPage(location2, createPage(6));
processor.setComplete(location2);
exchangeClient.addLocation(location2, TaskId.valueOf("bar.0.0.0.0"));
assertFalse(exchangeClient.isClosed());
assertPageEquals(getNextPage(exchangeClient), createPage(4));
assertFalse(exchangeClient.isClosed());
assertPageEquals(getNextPage(exchangeClient), createPage(5));
assertFalse(exchangeClient.isClosed());
assertPageEquals(getNextPage(exchangeClient), createPage(6));
assertFalse(tryGetFutureValue(exchangeClient.isBlocked(), 10, MILLISECONDS).isPresent());
assertFalse(exchangeClient.isClosed());
exchangeClient.noMoreLocations();
// The transition to closed may happen asynchronously, since it requires that all the HTTP clients
// receive a final GONE response, so just spin until it's closed or the test times out.
while (!exchangeClient.isClosed()) {
Thread.sleep(1);
}
ImmutableMap<URI, PageBufferClientStatus> statuses = uniqueIndex(exchangeClient.getStatus().getPageBufferClientStatuses(), PageBufferClientStatus::getUri);
assertStatus(statuses.get(location1), location1, "closed", 3, 3, 3, "not scheduled");
assertStatus(statuses.get(location2), location2, "closed", 3, 3, 3, "not scheduled");
} |
public static UnifiedDiff parseUnifiedDiff(InputStream stream) throws IOException, UnifiedDiffParserException {
UnifiedDiffReader parser = new UnifiedDiffReader(new BufferedReader(new InputStreamReader(stream)));
return parser.parse();
} | @Test
public void testParseIssue107_2() throws IOException {
UnifiedDiff diff = UnifiedDiffReader.parseUnifiedDiff(
UnifiedDiffReaderTest.class.getResourceAsStream("problem_diff_issue107.diff"));
assertThat(diff.getFiles().size()).isEqualTo(2);
UnifiedDiffFile file1 = diff.getFiles().get(0);
assertThat(file1.getFromFile()).isEqualTo("Main.java");
assertThat(file1.getPatch().getDeltas().size()).isEqualTo(1);
} |
public static void ensureTopic(
final String name,
final KsqlConfig ksqlConfig,
final KafkaTopicClient topicClient
) {
if (topicClient.isTopicExists(name)) {
validateTopicConfig(name, ksqlConfig, topicClient);
return;
}
final short replicationFactor = ksqlConfig
.getShort(KsqlConfig.KSQL_INTERNAL_TOPIC_REPLICAS_PROPERTY);
if (replicationFactor < 2) {
log.warn("Creating topic {} with replication factor of {} which is less than 2. "
+ "This is not advisable in a production environment. ",
name, replicationFactor);
}
final short minInSyncReplicas = ksqlConfig
.getShort(KsqlConfig.KSQL_INTERNAL_TOPIC_MIN_INSYNC_REPLICAS_PROPERTY);
topicClient.createTopic(
name,
INTERNAL_TOPIC_PARTITION_COUNT,
replicationFactor,
ImmutableMap.<String, Object>builder()
.putAll(INTERNAL_TOPIC_CONFIG)
.put(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, minInSyncReplicas)
.build()
);
} | @Test
public void shouldCreateInternalTopicIfItDoesNotExist() {
// When:
KsqlInternalTopicUtils.ensureTopic(TOPIC_NAME, ksqlConfig, topicClient);
// Then:
verify(topicClient).createTopic(TOPIC_NAME, 1, NREPLICAS, commandTopicConfig);
} |
@Override
public boolean enableSendingOldValues(final boolean forceMaterialization) {
if (queryableName != null) {
sendOldValues = true;
return true;
}
if (parent.enableSendingOldValues(forceMaterialization)) {
sendOldValues = true;
}
return sendOldValues;
} | @Test
public void shouldEnableSendOldValuesWhenNotMaterializedAlreadyButForcedToMaterialize() {
final StreamsBuilder builder = new StreamsBuilder();
final String topic1 = "topic1";
final KTableImpl<String, Integer, Integer> table1 =
(KTableImpl<String, Integer, Integer>) builder.table(topic1, consumed);
final KTableImpl<String, Integer, Integer> table2 =
(KTableImpl<String, Integer, Integer>) table1.filter(predicate);
table2.enableSendingOldValues(true);
assertThat(table1.sendingOldValueEnabled(), is(true));
assertThat(table2.sendingOldValueEnabled(), is(true));
doTestSendingOldValue(builder, table1, table2, topic1);
} |
public long brokerEpoch(int brokerId) {
BrokerRegistration brokerRegistration = broker(brokerId);
if (brokerRegistration == null) {
return -1L;
}
return brokerRegistration.epoch();
} | @Test
public void testBrokerEpoch() {
assertEquals(123L, IMAGE1.brokerEpoch(2));
} |
public static Format of(final FormatInfo formatInfo) {
final Format format = fromName(formatInfo.getFormat().toUpperCase());
format.validateProperties(formatInfo.getProperties());
return format;
} | @Test
public void shouldNotThrowWhenCreatingFromSupportedProperty() {
// Given:
final FormatInfo avroFormatInfo = FormatInfo.of("AVRO",
ImmutableMap.of("schemaId", "1", "fullSchemaName", "avroName"));
final FormatInfo protobufFormatInfo = FormatInfo.of("PROTOBUF",
ImmutableMap.of("schemaId", "1", "fullSchemaName", "protoName"));
final FormatInfo jsonSRFormatInfo = FormatInfo.of("JSON_SR",
ImmutableMap.of("schemaId", "123", "fullSchemaName", "jsonName"));
// When: Then:
assertThat(FormatFactory.of(avroFormatInfo), is(FormatFactory.AVRO));
assertThat(FormatFactory.of(protobufFormatInfo), is(FormatFactory.PROTOBUF));
assertThat(FormatFactory.of(jsonSRFormatInfo), is(FormatFactory.JSON_SR));
} |
@Override
public long extract(final Object key, final GenericRow value) {
final String colValue = (String) extractor.extract(key, value);
try {
return timestampParser.parse(colValue);
} catch (final KsqlException e) {
throw new KsqlException("Unable to parse string timestamp."
+ " timestamp=" + value
+ " timestamp_format=" + format,
e);
}
} | @SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT")
@Test
public void shouldPassRecordToColumnExtractor() {
// When:
extractor.extract(key, value);
// Then:
verify(columnExtractor).extract(key, value);
} |
@Nullable public String remoteServiceName() {
return remoteServiceName;
} | @Test void remoteServiceNameCoercesEmptyToNull() {
MutableSpan span = new MutableSpan();
span.remoteServiceName("FavStar");
span.remoteServiceName("");
assertThat(span.remoteServiceName()).isNull();
} |
public static CoderProvider fromStaticMethods(Class<?> rawType, Class<?> coderClazz) {
checkArgument(
Coder.class.isAssignableFrom(coderClazz),
"%s is not a subtype of %s",
coderClazz.getName(),
Coder.class.getSimpleName());
return new CoderProviderFromStaticMethods(rawType, coderClazz);
} | @Test
public void testCoderProvidersFromStaticMethodsForParameterlessTypes() throws Exception {
CoderProvider factory = CoderProviders.fromStaticMethods(String.class, StringUtf8Coder.class);
assertEquals(
StringUtf8Coder.of(), factory.coderFor(TypeDescriptors.strings(), Collections.emptyList()));
factory = CoderProviders.fromStaticMethods(Double.class, DoubleCoder.class);
assertEquals(
DoubleCoder.of(), factory.coderFor(TypeDescriptors.doubles(), Collections.emptyList()));
factory = CoderProviders.fromStaticMethods(byte[].class, ByteArrayCoder.class);
assertEquals(
ByteArrayCoder.of(),
factory.coderFor(TypeDescriptor.of(byte[].class), Collections.emptyList()));
} |
static void checkManifestPlatform(
BuildContext buildContext, ContainerConfigurationTemplate containerConfig)
throws PlatformNotFoundInBaseImageException {
Optional<Path> path = buildContext.getBaseImageConfiguration().getTarPath();
String baseImageName =
path.map(Path::toString)
.orElse(buildContext.getBaseImageConfiguration().getImage().toString());
Set<Platform> platforms = buildContext.getContainerConfiguration().getPlatforms();
Verify.verify(!platforms.isEmpty());
if (platforms.size() != 1) {
String msg =
String.format(
"cannot build for multiple platforms since the base image '%s' is not a manifest list.",
baseImageName);
throw new PlatformNotFoundInBaseImageException(msg);
} else {
Platform platform = platforms.iterator().next();
if (!platform.getArchitecture().equals(containerConfig.getArchitecture())
|| !platform.getOs().equals(containerConfig.getOs())) {
// Unfortunately, "platforms" has amd64/linux by default even if the user didn't explicitly
// configure it. Skip reporting to suppress false alarm.
if (!(platform.getArchitecture().equals("amd64") && platform.getOs().equals("linux"))) {
String msg =
String.format(
"the configured platform (%s/%s) doesn't match the platform (%s/%s) of the base image (%s)",
platform.getArchitecture(),
platform.getOs(),
containerConfig.getArchitecture(),
containerConfig.getOs(),
baseImageName);
throw new PlatformNotFoundInBaseImageException(msg);
}
}
}
} | @Test
public void testCheckManifestPlatform_tarBaseImage() {
Path tar = Paths.get("/foo/bar.tar");
Mockito.when(buildContext.getBaseImageConfiguration())
.thenReturn(ImageConfiguration.builder(ImageReference.scratch()).setTarPath(tar).build());
Mockito.when(containerConfig.getPlatforms())
.thenReturn(ImmutableSet.of(new Platform("amd64", "linux"), new Platform("arch", "os")));
Exception ex =
assertThrows(
PlatformNotFoundInBaseImageException.class,
() ->
PlatformChecker.checkManifestPlatform(
buildContext, new ContainerConfigurationTemplate()));
assertThat(ex)
.hasMessageThat()
.isEqualTo(
"cannot build for multiple platforms since the base image '"
+ tar
+ "' is not a manifest list.");
} |
public static void removeMostServicingBrokersForNamespace(
final String assignedBundleName,
final Set<String> candidates,
final ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>>
brokerToNamespaceToBundleRange) {
if (candidates.isEmpty()) {
return;
}
final String namespaceName = getNamespaceNameFromBundleName(assignedBundleName);
int leastBundles = Integer.MAX_VALUE;
for (final String broker : candidates) {
int bundles = (int) brokerToNamespaceToBundleRange
.computeIfAbsent(broker,
k -> ConcurrentOpenHashMap.<String,
ConcurrentOpenHashSet<String>>newBuilder().build())
.computeIfAbsent(namespaceName,
k -> ConcurrentOpenHashSet.<String>newBuilder().build())
.size();
leastBundles = Math.min(leastBundles, bundles);
if (leastBundles == 0) {
break;
}
}
// Since `brokerToNamespaceToBundleRange` can be updated by other threads,
// `leastBundles` may differ from the actual value.
final int finalLeastBundles = leastBundles;
candidates.removeIf(
broker -> brokerToNamespaceToBundleRange.computeIfAbsent(broker,
k -> ConcurrentOpenHashMap.<String,
ConcurrentOpenHashSet<String>>newBuilder().build())
.computeIfAbsent(namespaceName,
k -> ConcurrentOpenHashSet.<String>newBuilder().build())
.size() > finalLeastBundles);
} | @Test
public void testRemoveMostServicingBrokersForNamespace() {
String namespace = "tenant1/ns1";
String assignedBundle = namespace + "/0x00000000_0x40000000";
Set<String> candidates = new HashSet<>();
ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>> map =
ConcurrentOpenHashMap.<String,
ConcurrentOpenHashMap<String, ConcurrentOpenHashSet<String>>>newBuilder()
.build();
LoadManagerShared.removeMostServicingBrokersForNamespace(assignedBundle, candidates, map);
Assert.assertEquals(candidates.size(), 0);
candidates = Sets.newHashSet("broker1");
LoadManagerShared.removeMostServicingBrokersForNamespace(assignedBundle, candidates, map);
Assert.assertEquals(candidates.size(), 1);
Assert.assertTrue(candidates.contains("broker1"));
candidates = Sets.newHashSet("broker1");
fillBrokerToNamespaceToBundleMap(map, "broker1", namespace, "0x40000000_0x80000000");
LoadManagerShared.removeMostServicingBrokersForNamespace(assignedBundle, candidates, map);
Assert.assertEquals(candidates.size(), 1);
Assert.assertTrue(candidates.contains("broker1"));
candidates = Sets.newHashSet("broker1", "broker2");
LoadManagerShared.removeMostServicingBrokersForNamespace(assignedBundle, candidates, map);
Assert.assertEquals(candidates.size(), 1);
Assert.assertTrue(candidates.contains("broker2"));
candidates = Sets.newHashSet("broker1", "broker2");
fillBrokerToNamespaceToBundleMap(map, "broker2", namespace, "0x80000000_0xc0000000");
LoadManagerShared.removeMostServicingBrokersForNamespace(assignedBundle, candidates, map);
Assert.assertEquals(candidates.size(), 2);
Assert.assertTrue(candidates.contains("broker1"));
Assert.assertTrue(candidates.contains("broker2"));
candidates = Sets.newHashSet("broker1", "broker2");
fillBrokerToNamespaceToBundleMap(map, "broker2", namespace, "0xc0000000_0xd0000000");
LoadManagerShared.removeMostServicingBrokersForNamespace(assignedBundle, candidates, map);
Assert.assertEquals(candidates.size(), 1);
Assert.assertTrue(candidates.contains("broker1"));
candidates = Sets.newHashSet("broker1", "broker2", "broker3");
fillBrokerToNamespaceToBundleMap(map, "broker3", namespace, "0xd0000000_0xffffffff");
LoadManagerShared.removeMostServicingBrokersForNamespace(assignedBundle, candidates, map);
Assert.assertEquals(candidates.size(), 2);
Assert.assertTrue(candidates.contains("broker1"));
Assert.assertTrue(candidates.contains("broker3"));
} |
@Override
public void execute(Exchange exchange) throws SmppException {
SubmitMulti[] submitMulties = createSubmitMulti(exchange);
List<SubmitMultiResult> results = new ArrayList<>(submitMulties.length);
for (SubmitMulti submitMulti : submitMulties) {
SubmitMultiResult result;
if (log.isDebugEnabled()) {
log.debug("Sending multiple short messages for exchange id '{}'...", exchange.getExchangeId());
}
try {
result = session.submitMultiple(
submitMulti.getServiceType(),
TypeOfNumber.valueOf(submitMulti.getSourceAddrTon()),
NumberingPlanIndicator.valueOf(submitMulti.getSourceAddrNpi()),
submitMulti.getSourceAddr(),
(Address[]) submitMulti.getDestAddresses(),
new ESMClass(submitMulti.getEsmClass()),
submitMulti.getProtocolId(),
submitMulti.getPriorityFlag(),
submitMulti.getScheduleDeliveryTime(),
submitMulti.getValidityPeriod(),
new RegisteredDelivery(submitMulti.getRegisteredDelivery()),
new ReplaceIfPresentFlag(submitMulti.getReplaceIfPresentFlag()),
DataCodings.newInstance(submitMulti.getDataCoding()),
submitMulti.getSmDefaultMsgId(),
submitMulti.getShortMessage(),
submitMulti.getOptionalParameters());
results.add(result);
} catch (Exception e) {
throw new SmppException(e);
}
}
if (log.isDebugEnabled()) {
log.debug("Sent multiple short messages for exchange id '{}' and received results '{}'", exchange.getExchangeId(),
results);
}
List<String> messageIDs = new ArrayList<>(results.size());
// {messageID : [{destAddr : address, error : errorCode}]}
Map<String, List<Map<String, Object>>> errors = new HashMap<>();
for (SubmitMultiResult result : results) {
UnsuccessDelivery[] deliveries = result.getUnsuccessDeliveries();
if (deliveries != null) {
List<Map<String, Object>> undelivered = new ArrayList<>();
for (UnsuccessDelivery delivery : deliveries) {
Map<String, Object> error = new HashMap<>();
error.put(SmppConstants.DEST_ADDR, delivery.getDestinationAddress().getAddress());
error.put(SmppConstants.ERROR, delivery.getErrorStatusCode());
undelivered.add(error);
}
if (!undelivered.isEmpty()) {
errors.put(result.getMessageId(), undelivered);
}
}
messageIDs.add(result.getMessageId());
}
Message message = ExchangeHelper.getResultMessage(exchange);
message.setHeader(SmppConstants.ID, messageIDs);
message.setHeader(SmppConstants.SENT_MESSAGE_COUNT, messageIDs.size());
if (!errors.isEmpty()) {
message.setHeader(SmppConstants.ERROR, errors);
}
} | @Test
public void execute() throws Exception {
Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut);
exchange.getIn().setHeader(SmppConstants.COMMAND, "SubmitMulti");
exchange.getIn().setHeader(SmppConstants.ID, "1");
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR_TON, TypeOfNumber.NATIONAL.value());
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR_NPI, NumberingPlanIndicator.NATIONAL.value());
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR, "1818");
exchange.getIn().setHeader(SmppConstants.DEST_ADDR_TON, TypeOfNumber.INTERNATIONAL.value());
exchange.getIn().setHeader(SmppConstants.DEST_ADDR_NPI, NumberingPlanIndicator.INTERNET.value());
exchange.getIn().setHeader(SmppConstants.DEST_ADDR, Arrays.asList("1919"));
exchange.getIn().setHeader(SmppConstants.SCHEDULE_DELIVERY_TIME, new Date(1111111));
exchange.getIn().setHeader(SmppConstants.VALIDITY_PERIOD, new Date(2222222));
exchange.getIn().setHeader(SmppConstants.PROTOCOL_ID, (byte) 1);
exchange.getIn().setHeader(SmppConstants.PRIORITY_FLAG, (byte) 2);
exchange.getIn().setHeader(SmppConstants.REGISTERED_DELIVERY,
new RegisteredDelivery(SMSCDeliveryReceipt.FAILURE).value());
exchange.getIn().setHeader(SmppConstants.REPLACE_IF_PRESENT_FLAG, ReplaceIfPresentFlag.REPLACE.value());
exchange.getIn().setBody("short message body");
when(session.submitMultiple(eq("CMT"), eq(TypeOfNumber.NATIONAL), eq(NumberingPlanIndicator.NATIONAL), eq("1818"),
eq(new Address[] { new Address(TypeOfNumber.INTERNATIONAL, NumberingPlanIndicator.INTERNET, "1919") }),
eq(new ESMClass()), eq((byte) 1), eq((byte) 2), eq("-300101001831100+"), eq("-300101003702200+"),
eq(new RegisteredDelivery(SMSCDeliveryReceipt.FAILURE)),
eq(ReplaceIfPresentFlag.REPLACE), eq(DataCodings.newInstance((byte) 0)), eq((byte) 0),
eq("short message body".getBytes())))
.thenReturn(new SubmitMultiResult("1", null, null));
command.execute(exchange);
assertEquals(Collections.singletonList("1"), exchange.getMessage().getHeader(SmppConstants.ID));
assertEquals(1, exchange.getMessage().getHeader(SmppConstants.SENT_MESSAGE_COUNT));
assertNull(exchange.getMessage().getHeader(SmppConstants.ERROR));
} |
@Override
public void accept(ServerWebExchange exchange, CachedResponse cachedResponse) {
ServerHttpResponse response = exchange.getResponse();
long calculatedMaxAgeInSeconds = calculateMaxAgeInSeconds(exchange.getRequest(), cachedResponse,
configuredTimeToLive);
rewriteCacheControlMaxAge(response.getHeaders(), calculatedMaxAgeInSeconds);
} | @Test
void maxAgeIsZero_whenTimeToLiveIsNegative() {
inputExchange.getResponse().getHeaders().setCacheControl((String) null);
Duration timeToLive = Duration.ofSeconds(-1);
CachedResponse inputCachedResponse = CachedResponse.create(HttpStatus.OK).timestamp(clock.instant()).build();
SetMaxAgeHeaderAfterCacheExchangeMutator toTest = new SetMaxAgeHeaderAfterCacheExchangeMutator(timeToLive,
clock, false);
toTest.accept(inputExchange, inputCachedResponse);
assertThat(parseMaxAge(inputExchange.getResponse()).get()).isEqualTo(0L);
} |
static InMemorySymbolTable generate(SymbolTableNameHandler symbolTableNameHandler, Set<DataSchema> resourceSchemas)
{
Set<String> symbols = new HashSet<>();
addFrameworkSymbols(symbols);
Set<DataSchema> frameworkSchemas = new HashSet<>();
collectFrameworkSchemas(frameworkSchemas);
Set<DataSchema> processedSchemas = new HashSet<>();
frameworkSchemas.forEach(schema -> expandAndCollectSymbols(schema, processedSchemas, symbols));
resourceSchemas.forEach(schema -> expandAndCollectSymbols(schema, processedSchemas, symbols));
// Sort symbols to ensure stable ordering across invocations for the same input.
List<String> symbolList = new ArrayList<>(symbols);
Collections.sort(symbolList);
String symbolTableName = symbolTableNameHandler.generateName(symbolList);
return new InMemorySymbolTable(symbolTableName, symbolList);
} | @Test
public void testSymbolTableGenerator()
{
DataSchema schema = DataTemplateUtil.getSchema(SimpleGreeting.class);
SymbolTableNameHandler handler = new SymbolTableNameHandler("Haha", "https://localhost:1000/service");
InMemorySymbolTable symbolTable = RuntimeSymbolTableGenerator.generate(handler, Collections.singleton(schema));
Assert.assertEquals(37, symbolTable.size());
} |
public static DynamicThreadPoolExecutor buildDynamicPool(ThreadPoolInitParam initParam) {
Assert.notNull(initParam);
DynamicThreadPoolExecutor dynamicThreadPoolExecutor;
try {
dynamicThreadPoolExecutor = new DynamicThreadPoolExecutor(
initParam.getCorePoolNum(),
initParam.getMaximumPoolSize(),
initParam.getKeepAliveTime(),
initParam.getTimeUnit(),
initParam.getExecuteTimeOut(),
initParam.getWaitForTasksToCompleteOnShutdown(),
initParam.getAwaitTerminationMillis(),
initParam.getWorkQueue(),
initParam.getThreadPoolId(),
initParam.getThreadFactory(),
initParam.getRejectedExecutionHandler());
} catch (IllegalArgumentException ex) {
throw new IllegalArgumentException(String.format("Error creating thread pool parameter. threadPool id: %s", initParam.getThreadPoolId()), ex);
}
dynamicThreadPoolExecutor.setTaskDecorator(initParam.getTaskDecorator());
dynamicThreadPoolExecutor.allowCoreThreadTimeOut(initParam.allowCoreThreadTimeOut);
return dynamicThreadPoolExecutor;
} | @Test
public void testBuildDynamicPool() {
initParam.setWaitForTasksToCompleteOnShutdown(true);
initParam.setAwaitTerminationMillis(5000L);
ThreadPoolExecutor executor = AbstractBuildThreadPoolTemplate.buildDynamicPool(initParam);
AtomicInteger count = new AtomicInteger(0);
executor.submit(() -> {
ThreadUtil.sleep(100L);
return count.incrementAndGet();
});
executor.submit(() -> {
ThreadUtil.sleep(100L);
count.incrementAndGet();
});
// waiting for shutdown
executor.shutdown();
while (!executor.isTerminated()) {
}
Assert.assertEquals(2, count.get());
} |
static boolean isValidIfPresent(@Nullable String email) {
return isEmpty(email) || isValidEmail(email);
} | @Test
public void various_examples_of_unusual_but_valid_emails() {
assertThat(isValidIfPresent("info@sonarsource.com")).isTrue();
assertThat(isValidIfPresent("guillaume.jambet+sonarsource-emailvalidatortest@gmail.com")).isTrue();
assertThat(isValidIfPresent("webmaster@kiné-beauté.fr")).isTrue();
assertThat(isValidIfPresent("Chuck Norris <coup-de-pied-retourné@chucknorris.com>")).isTrue();
assertThat(isValidIfPresent("\"Fred Bloggs\"@example.com")).isTrue();
assertThat(isValidIfPresent("pipo@127.0.0.1")).isTrue();
assertThat(isValidIfPresent("admin@admin")).isTrue();
} |
@Nonnull
public List<String> value() {
return parsedValue;
} | @Test
void valueWithHttpProtocol() {
final RemoteReindexAllowlist allowlist = new RemoteReindexAllowlist(URI.create("http://example.com:9200"), "http://example.com:9200");
Assertions.assertThat(allowlist.value())
.hasSize(1)
.contains("example.com:9200");
} |
@Override
public String load(ImageTarball imageTarball, Consumer<Long> writtenByteCountListener)
throws InterruptedException, IOException {
// Runs 'docker load'.
Process dockerProcess = docker("load");
try (NotifyingOutputStream stdin =
new NotifyingOutputStream(dockerProcess.getOutputStream(), writtenByteCountListener)) {
imageTarball.writeTo(stdin);
} catch (IOException ex) {
// Tries to read from stderr. Not using getStderrOutput(), as we want to show the error
// message from the tarball I/O write failure when reading from stderr fails.
String error;
try (InputStreamReader stderr =
new InputStreamReader(dockerProcess.getErrorStream(), StandardCharsets.UTF_8)) {
error = CharStreams.toString(stderr);
} catch (IOException ignored) {
// This ignores exceptions from reading stderr and uses the original exception from
// writing to stdin.
error = ex.getMessage();
}
throw new IOException("'docker load' command failed with error: " + error, ex);
}
try (InputStreamReader stdout =
new InputStreamReader(dockerProcess.getInputStream(), StandardCharsets.UTF_8)) {
String output = CharStreams.toString(stdout);
if (dockerProcess.waitFor() != 0) {
throw new IOException(
"'docker load' command failed with error: " + getStderrOutput(dockerProcess));
}
return output;
}
} | @Test
public void testLoad_stdinFail() throws InterruptedException {
DockerClient testDockerClient = new CliDockerClient(ignored -> mockProcessBuilder);
Mockito.when(mockProcess.getOutputStream())
.thenReturn(
new OutputStream() {
@Override
public void write(int b) throws IOException {
throw new IOException();
}
});
Mockito.when(mockProcess.getErrorStream())
.thenReturn(new ByteArrayInputStream("error".getBytes(StandardCharsets.UTF_8)));
try {
testDockerClient.load(imageTarball, ignored -> {});
Assert.fail("Write should have failed");
} catch (IOException ex) {
Assert.assertEquals("'docker load' command failed with error: error", ex.getMessage());
}
} |
@Override
public DeviceEvent markOffline(DeviceId deviceId) {
return markOffline(deviceId, deviceClockService.getTimestamp(deviceId));
} | @Test
public final void testMarkOffline() {
putDevice(DID1, SW1);
assertTrue(deviceStore.isAvailable(DID1));
Capture<InternalDeviceEvent> message = Capture.newInstance();
Capture<MessageSubject> subject = Capture.newInstance();
Capture<Function<InternalDeviceEvent, byte[]>> encoder = Capture.newInstance();
resetCommunicatorExpectingSingleBroadcast(message, subject, encoder);
DeviceEvent event = deviceStore.markOffline(DID1);
assertEquals(DEVICE_AVAILABILITY_CHANGED, event.type());
assertDevice(DID1, SW1, event.subject());
assertFalse(deviceStore.isAvailable(DID1));
verify(clusterCommunicator);
// TODO: verify broadcast message
assertTrue(message.hasCaptured());
resetCommunicatorExpectingNoBroadcast(message, subject, encoder);
DeviceEvent event2 = deviceStore.markOffline(DID1);
assertNull("No change, no event", event2);
verify(clusterCommunicator);
assertFalse(message.hasCaptured());
} |
@Override
public AttributedList<Path> search(final Path workdir, final Filter<Path> filter, final ListProgressListener listener) throws BackgroundException {
return session.getFeature(ListService.class).list(workdir, new SearchListProgressListener(filter, listener)).filter(filter);
} | @Test
public void testSearch() throws Exception {
final Path workdir = new Path("/", EnumSet.of(Path.Type.directory));
final Path f1 = new Path(workdir, "f1", EnumSet.of(Path.Type.file));
final Path f2 = new Path(workdir, "f2", EnumSet.of(Path.Type.file));
final DefaultSearchFeature feature = new DefaultSearchFeature(new NullSession(new Host(new TestProtocol())) {
@Override
public AttributedList<Path> list(final Path folder, final ListProgressListener listener) throws BackgroundException {
if(folder.equals(workdir)) {
final AttributedList<Path> list = new AttributedList<>(Arrays.asList(f1, f2));
listener.chunk(folder, list);
return list;
}
return AttributedList.emptyList();
}
});
final Filter<Path> filter = new NullFilter<Path>() {
@Override
public boolean accept(final Path file) {
if(file.isDirectory()) {
return true;
}
return file.getName().equals("f1");
}
};
final AttributedList<Path> search = feature.search(workdir, filter, new DisabledListProgressListener());
assertTrue(search.contains(f1));
assertFalse(search.contains(f2));
assertEquals(1, search.size());
} |
public static String toLowerHex(long v) {
char[] data = RecyclableBuffers.parseBuffer();
writeHexLong(data, 0, v);
return new String(data, 0, 16);
} | @Test void toLowerHex_midValue() {
assertThat(toLowerHex(3405691582L)).isEqualTo("00000000cafebabe");
} |
@Override
public HealthStatus getStatus() {
if (cr.getStatus() == ComponentStatus.INITIALIZING) return HealthStatus.INITIALIZING;
PartitionHandlingManager partitionHandlingManager = cr.getComponent(PartitionHandlingManager.class);
if (!isComponentHealthy() || partitionHandlingManager.getAvailabilityMode() == AvailabilityMode.DEGRADED_MODE) {
return HealthStatus.DEGRADED;
}
DistributionManager distributionManager = cr.getDistributionManager();
if (distributionManager != null && distributionManager.isRehashInProgress()) {
return HealthStatus.HEALTHY_REBALANCING;
}
return HealthStatus.HEALTHY;
} | @Test
public void testUnhealthyStatusWithDegradedPartition() {
//given
ComponentRegistry componentRegistryMock = mock(ComponentRegistry.class);
doReturn(ComponentStatus.RUNNING).when(componentRegistryMock).getStatus();
PartitionHandlingManager partitionHandlingManagerMock = mock(PartitionHandlingManager.class);
doReturn(AvailabilityMode.DEGRADED_MODE).when(partitionHandlingManagerMock).getAvailabilityMode();
doReturn(partitionHandlingManagerMock).when(componentRegistryMock).getComponent(eq(PartitionHandlingManager.class));
CacheHealth cacheHealth = new CacheHealthImpl(componentRegistryMock);
//when
HealthStatus status = cacheHealth.getStatus();
//then
assertEquals(status, HealthStatus.DEGRADED);
} |
@Override
public String stem(String word) {
b = word.toCharArray();
k = word.length() - 1;
if (k > 1) {
step1();
step2();
step3();
step4();
step5();
step6();
}
return new String(b, 0, k+1);
} | @Test
public void testStem() {
System.out.println("stem");
String[] words = {"consign", "consigned", "consigning", "consignment",
"consist", "consisted", "consistency", "consistent", "consistently",
"consisting", "consists", "consolation", "consolations", "consolatory",
"console", "consoled", "consoles", "consolidate", "consolidated",
"consolidating", "consoling", "consolingly", "consols", "consonant",
"consort", "consorted", "consorting", "conspicuous", "conspicuously",
"conspiracy", "conspirator", "conspirators", "conspire", "conspired",
"conspiring", "constable", "constables", "constance", "constancy",
"constant", "knack", "knackeries", "knacks", "knag", "knave",
"knaves", "knavish", "kneaded", "kneading", "knee", "kneel",
"kneeled", "kneeling", "kneels", "knees", "knell", "knelt", "knew",
"knick", "knif", "knife", "knight", "knightly", "knights", "knit",
"knits", "knitted", "knitting", "knives", "knob", "knobs", "knock",
"knocked", "knocker", "knockers", "knocking", "knocks", "knopp",
"knot", "knots"
};
String[] expResult = {"consign", "consign", "consign", "consign",
"consist", "consist", "consist", "consist", "consist", "consist",
"consist", "consol", "consol", "consolatori", "consol", "consol",
"consol", "consolid", "consolid", "consolid", "consol", "consolingli",
"consol", "conson", "consort", "consort", "consort", "conspicu",
"conspicu", "conspiraci", "conspir", "conspir", "conspir", "conspir",
"conspir", "constabl", "constabl", "constanc", "constanc", "constant",
"knack", "knackeri", "knack", "knag", "knave", "knave", "knavish",
"knead", "knead", "knee", "kneel", "kneel", "kneel", "kneel", "knee",
"knell", "knelt", "knew", "knick", "knif", "knife", "knight",
"knightli", "knight", "knit", "knit", "knit", "knit", "knive", "knob",
"knob", "knock", "knock", "knocker", "knocker", "knock", "knock",
"knopp", "knot", "knot"
};
PorterStemmer instance = new PorterStemmer();
for (int i = 0; i < words.length; i++) {
String result = instance.stem(words[i]);
assertEquals(expResult[i], result);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.