focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public boolean matches(String matchUrl) {
if (url.equals(matchUrl)) return true;
Iterator<UrlPathPart> iter1 = new MatchUrl(matchUrl).pathParts.iterator();
Iterator<UrlPathPart> iter2 = pathParts.iterator();
while (iter1.hasNext() && iter2.hasNext())
if (!iter1.next().matches(iter2.next())) return false;
return !iter1.hasNext() && !iter2.hasNext();
} | @Test
void testNoMatchWithParams() {
boolean matches = new MatchUrl("/api/jobs/enqueued/wrong").matches("/api/jobs/:state/test");
assertThat(matches).isFalse();
} |
@Override
public List<String> getGroups(String userName) throws IOException {
return new ArrayList(getUnixGroups(userName));
} | @Test
public void testGetGroupsNotResolvable() throws Exception {
TestGroupNotResolvable mapping = new TestGroupNotResolvable();
List<String> groups = mapping.getGroups("user");
assertTrue(groups.size() == 2);
assertTrue(groups.contains("abc"));
assertTrue(groups.contains("def"));
} |
@Override
public boolean containsSlot(AllocationID allocationId) {
return registeredSlots.containsKey(allocationId);
} | @Test
void testContainsSlot() {
final DefaultAllocatedSlotPool slotPool = new DefaultAllocatedSlotPool();
final AllocatedSlot allocatedSlot = createAllocatedSlot(null);
slotPool.addSlots(Collections.singleton(allocatedSlot), 0);
assertThat(slotPool.containsSlot(allocatedSlot.getAllocationId())).isTrue();
assertThat(slotPool.containsSlot(new AllocationID())).isFalse();
} |
@Override
public RemoteData.Builder serialize() {
final RemoteData.Builder remoteBuilder = RemoteData.newBuilder();
remoteBuilder.addDataObjectStrings(value.toStorageData());
remoteBuilder.addDataLongs(getTimeBucket());
remoteBuilder.addDataStrings(entityId);
remoteBuilder.addDataStrings(serviceId);
return remoteBuilder;
} | @Test
public void testSerialize() {
function.accept(MeterEntity.newService("service-test", Layer.GENERAL), HTTP_CODE_COUNT_1);
MinLabeledFunction function2 = new MinLabeledFunctionInst();
function2.deserialize(function.serialize().build());
assertThat(function2.getEntityId()).isEqualTo(function.getEntityId());
assertThat(function2.getTimeBucket()).isEqualTo(function.getTimeBucket());
assertThat(function2.getServiceId()).isEqualTo(function.getServiceId());
assertThat(function2.getValue()).isEqualTo(function.getValue());
} |
public Optional<Long> validateAndGetTimestamp(final ExternalServiceCredentials credentials) {
final String[] parts = requireNonNull(credentials).password().split(DELIMITER);
final String timestampSeconds;
final String actualSignature;
// making sure password format matches our expectations based on the generator configuration
if (parts.length == 3 && prependUsername) {
final String username = usernameIsTimestamp() ? parts[0] + DELIMITER + parts[1] : parts[0];
// username has to match the one from `credentials`
if (!credentials.username().equals(username)) {
return Optional.empty();
}
timestampSeconds = parts[1];
actualSignature = parts[2];
} else if (parts.length == 2 && !prependUsername) {
timestampSeconds = parts[0];
actualSignature = parts[1];
} else {
// unexpected password format
return Optional.empty();
}
final String signedData = usernameIsTimestamp() ? credentials.username() : credentials.username() + DELIMITER + timestampSeconds;
final String expectedSignature = truncateSignature
? hmac256TruncatedToHexString(key, signedData, TRUNCATED_SIGNATURE_LENGTH)
: hmac256ToHexString(key, signedData);
// if the signature is valid it's safe to parse the `timestampSeconds` string into Long
return hmacHexStringsEqual(expectedSignature, actualSignature)
? Optional.of(Long.valueOf(timestampSeconds))
: Optional.empty();
} | @Test
public void testValidateValid() throws Exception {
assertEquals(standardGenerator.validateAndGetTimestamp(standardCredentials).orElseThrow(), TIME_SECONDS);
} |
@Override
public boolean betterThan(Num criterionValue1, Num criterionValue2) {
return criterionValue1.isGreaterThan(criterionValue2);
} | @Test
public void betterThan() {
AnalysisCriterion criterion = getCriterion();
assertTrue(criterion.betterThan(numOf(6), numOf(3)));
assertFalse(criterion.betterThan(numOf(4), numOf(7)));
} |
@Override
public void export(RegisterTypeEnum registerType) {
if (this.exported) {
return;
}
if (getScopeModel().isLifeCycleManagedExternally()) {
// prepare model for reference
getScopeModel().getDeployer().prepare();
} else {
// ensure start module, compatible with old api usage
getScopeModel().getDeployer().start();
}
synchronized (this) {
if (this.exported) {
return;
}
if (!this.isRefreshed()) {
this.refresh();
}
if (this.shouldExport()) {
this.init();
if (shouldDelay()) {
// should register if delay export
doDelayExport();
} else if (Integer.valueOf(-1).equals(getDelay())
&& Boolean.parseBoolean(ConfigurationUtils.getProperty(
getScopeModel(), CommonConstants.DUBBO_MANUAL_REGISTER_KEY, "false"))) {
// should not register by default
doExport(RegisterTypeEnum.MANUAL_REGISTER);
} else {
doExport(registerType);
}
}
}
} | @Test
void testMethodConfigWithConfiguredArgumentIndex() {
ServiceConfig<DemoServiceImpl> service = new ServiceConfig<>();
service.setInterface(DemoService.class);
service.setRef(new DemoServiceImpl());
service.setProtocol(new ProtocolConfig() {
{
setName("dubbo");
}
});
MethodConfig methodConfig = new MethodConfig();
methodConfig.setName("sayName");
// invalid argument index.
methodConfig.setArguments(Lists.newArrayList(new ArgumentConfig() {
{
setIndex(0);
setCallback(false);
}
}));
service.setMethods(Lists.newArrayList(methodConfig));
service.export();
assertFalse(service.getExportedUrls().isEmpty());
assertEquals("false", service.getExportedUrls().get(0).getParameters().get("sayName.0.callback"));
} |
public static String extractArgumentsFromAttributeName(String attributeNameWithArguments) {
int start = StringUtil.lastIndexOf(attributeNameWithArguments, '[');
int end = StringUtil.lastIndexOf(attributeNameWithArguments, ']');
if (start > 0 && end > 0 && end > start) {
return attributeNameWithArguments.substring(start + 1, end);
}
if (start < 0 && end < 0) {
return null;
}
throw new IllegalArgumentException("Wrong argument input passed " + attributeNameWithArguments);
} | @Test(expected = IllegalArgumentException.class)
public void extractArgument_wrongArguments_noArgument() {
extractArgumentsFromAttributeName("car.wheel[");
} |
public Result waitForCondition(Config config, Supplier<Boolean>... conditionCheck) {
return finishOrTimeout(
config,
conditionCheck,
() -> jobIsDoneOrFinishing(config.project(), config.region(), config.jobId()));
} | @Test
public void testWaitForConditionJobFinished() throws IOException {
when(client.getJobStatus(any(), any(), any()))
.thenReturn(JobState.RUNNING)
.thenReturn(JobState.CANCELLED);
Result result = new PipelineOperator(client).waitForCondition(DEFAULT_CONFIG, () -> false);
assertThat(result).isEqualTo(Result.LAUNCH_FINISHED);
} |
@Override
public ServiceInstance doChoose(String serviceName, List<ServiceInstance> instances) {
final int index = Math.abs(position.incrementAndGet());
return instances.get(index % instances.size());
} | @Test
public void doChoose() {
int port1 = 9999;
int port2 = 8888;
String serviceName = "round";
final List<ServiceInstance> serviceInstances = Arrays
.asList(CommonUtils.buildInstance(serviceName, port1), CommonUtils.buildInstance(serviceName, port2));
final RoundRobinLoadbalancer roundRobinLoadbalancer = new RoundRobinLoadbalancer();
// The simulation is invoked twice, once selected for each instance
int count = 2;
for (int i = 0; i < count; i++) {
final Optional<ServiceInstance> choose = roundRobinLoadbalancer.choose(serviceName, serviceInstances);
Assert.assertTrue(choose.isPresent());
if (port1 == choose.get().getPort()) {
port1--;
}
if (port2 == choose.get().getPort()) {
port2--;
}
}
Assert.assertTrue(port1 == 9998 && port2 == 8887);
} |
@Bean
@ConditionalOnBean(ShenyuThreadPoolExecutor.class)
public ShenyuThreadPoolExecutorDestructor shenyuThreadPoolExecutorDestructor() {
return new ShenyuThreadPoolExecutorDestructor();
} | @Test
public void testShenyuThreadPoolExecutorDestructor() {
ShenyuThreadPoolConfiguration.ShenyuThreadPoolExecutorDestructor shenyuThreadPoolExecutorDestructor =
shenyuThreadPoolConfiguration.shenyuThreadPoolExecutorDestructor();
SpringBeanUtils.getInstance().setApplicationContext(mock(ConfigurableApplicationContext.class));
when(SpringBeanUtils.getInstance().getBean(ShenyuThreadPoolExecutor.class))
.thenReturn(mock(ShenyuThreadPoolExecutor.class));
shenyuThreadPoolExecutorDestructor.onApplicationEvent(mock(ContextClosedEvent.class));
assertNotNull(shenyuThreadPoolExecutorDestructor);
} |
@Override
public List<ServiceDefinition> getServices(String name) {
final Lookup lookup = cache.computeIfAbsent(name, this::createLookup);
final Record[] records = lookup.run();
List<ServiceDefinition> services;
if (Objects.nonNull(records) && lookup.getResult() == Lookup.SUCCESSFUL) {
services = Arrays.stream(records)
.filter(SRVRecord.class::isInstance)
.map(SRVRecord.class::cast)
.sorted(COMPARATOR)
.map(srvRecord -> asService(name, srvRecord))
.collect(Collectors.toList());
} else {
services = Collections.emptyList();
}
return services;
} | @Test
void testServiceDiscovery() throws IOException {
DnsConfiguration configuration = new DnsConfiguration();
try (DnsServiceDiscovery discovery = new DnsServiceDiscovery(configuration)) {
configuration.setDomain("jabber.com");
configuration.setProto("_tcp");
List<ServiceDefinition> services = discovery.getServices("_xmpp-server");
assertNotNull(services);
assertFalse(services.isEmpty());
for (ServiceDefinition service : services) {
assertFalse(service.getMetadata().isEmpty());
assertNotNull(service.getMetadata().get("priority"));
assertNotNull(service.getMetadata().get("weight"));
}
}
} |
public TurnServerOptions getRoutingFor(
@Nonnull final UUID aci,
@Nonnull final Optional<InetAddress> clientAddress,
final int instanceLimit
) {
try {
return getRoutingForInner(aci, clientAddress, instanceLimit);
} catch(Exception e) {
logger.error("Failed to perform routing", e);
return new TurnServerOptions(this.configTurnRouter.getHostname(), null, this.configTurnRouter.randomUrls());
}
} | @Test
public void testLimitReturnsPreferredProtocolAndPrioritizesPerformance() throws UnknownHostException {
when(performanceTable.getDatacentersFor(any(), any(), any(), any()))
.thenReturn(List.of("dc-performance3", "dc-performance2", "dc-performance1"));
assertThat(router().getRoutingFor(aci, Optional.of(InetAddress.getByName("0.0.0.1")), 3))
.isEqualTo(optionsWithUrls(List.of(
"turn:9.9.9.4",
"turn:9.9.9.4:80?transport=tcp",
"turns:9.9.9.4:443?transport=tcp",
"turn:9.9.9.3",
"turn:9.9.9.3:80?transport=tcp",
"turns:9.9.9.3:443?transport=tcp",
"turn:[2222:1111:0:abc3:0:0:0:0]",
"turn:[2222:1111:0:abc3:0:0:0:0]:80?transport=tcp",
"turns:[2222:1111:0:abc3:0:0:0:0]:443?transport=tcp"
)));
assertThat(router().getRoutingFor(aci, Optional.of(InetAddress.getByName("2222:1111:0:abc2:0:0:0:1")), 3))
.isEqualTo(optionsWithUrls(List.of(
"turn:9.9.9.4",
"turn:9.9.9.4:80?transport=tcp",
"turns:9.9.9.4:443?transport=tcp",
"turn:[2222:1111:0:abc3:0:0:0:0]",
"turn:[2222:1111:0:abc3:0:0:0:0]:80?transport=tcp",
"turns:[2222:1111:0:abc3:0:0:0:0]:443?transport=tcp",
"turn:[2222:1111:0:abc2:0:0:0:0]",
"turn:[2222:1111:0:abc2:0:0:0:0]:80?transport=tcp",
"turns:[2222:1111:0:abc2:0:0:0:0]:443?transport=tcp"
)));
} |
@Override
public ShadowRule build(final ShadowRuleConfiguration ruleConfig, final String databaseName, final DatabaseType protocolType,
final ResourceMetaData resourceMetaData, final Collection<ShardingSphereRule> builtRules, final ComputeNodeInstanceContext computeNodeInstanceContext) {
return new ShadowRule(ruleConfig);
} | @SuppressWarnings({"rawtypes", "unchecked"})
@Test
void assertBuild() {
ShadowRuleConfiguration ruleConfig = new ShadowRuleConfiguration();
DatabaseRuleBuilder builder = OrderedSPILoader.getServices(DatabaseRuleBuilder.class, Collections.singleton(ruleConfig)).get(ruleConfig);
assertThat(builder.build(ruleConfig, "", new MySQLDatabaseType(), mock(ResourceMetaData.class), Collections.emptyList(),
mock(ComputeNodeInstanceContext.class)), instanceOf(ShadowRule.class));
} |
@Override
public FilterScope getFilterScope() {
return filterScope;
} | @Test
public void getFilterScope_always_returns_the_same_instance() {
String fieldName = randomAlphabetic(12);
boolean sticky = RANDOM.nextBoolean();
SimpleFieldTopAggregationDefinition underTest = new SimpleFieldTopAggregationDefinition(fieldName, sticky);
Set<TopAggregationDefinition.FilterScope> filterScopes = IntStream.range(0, 2 + RANDOM.nextInt(200))
.mapToObj(i -> underTest.getFilterScope())
.collect(Collectors.toSet());
assertThat(filterScopes).hasSize(1);
} |
public static boolean isKeepAlive(HttpMessage message) {
return !message.headers().containsValue(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE, true) &&
(message.protocolVersion().isKeepAliveDefault() ||
message.headers().containsValue(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE, true));
} | @Test
public void testKeepAliveIfConnectionHeaderAbsent() {
HttpMessage http11Message = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET,
"http:localhost/http_1_1");
assertTrue(HttpUtil.isKeepAlive(http11Message));
HttpMessage http10Message = new DefaultHttpRequest(HttpVersion.HTTP_1_0, HttpMethod.GET,
"http:localhost/http_1_0");
assertFalse(HttpUtil.isKeepAlive(http10Message));
} |
public static String decoratorPathWithSlash(final String contextPath) {
return StringUtils.endsWith(contextPath, AdminConstants.URI_SLASH_SUFFIX) ? contextPath : contextPath + AdminConstants.URI_SLASH_SUFFIX;
} | @Test
public void testDecoratorPathWithSlash() {
String uri = PathUtils.decoratorPathWithSlash(URI);
assertThat(uri, is(URI + AdminConstants.URI_SLASH_SUFFIX));
uri = PathUtils.decoratorContextPath(URI_SLASH);
assertThat(uri, is(URI + AdminConstants.URI_SLASH_SUFFIX));
} |
public static ObjectInspector create(@Nullable Schema schema) {
if (schema == null) {
return IcebergRecordObjectInspector.empty();
}
return TypeUtil.visit(schema, new IcebergObjectInspector());
} | @Test
public void testIcebergObjectInspector() {
ObjectInspector oi = IcebergObjectInspector.create(schema);
Assert.assertNotNull(oi);
Assert.assertEquals(ObjectInspector.Category.STRUCT, oi.getCategory());
StructObjectInspector soi = (StructObjectInspector) oi;
// binary
StructField binaryField = soi.getStructFieldRef("binary_field");
Assert.assertEquals(1, binaryField.getFieldID());
Assert.assertEquals("binary_field", binaryField.getFieldName());
Assert.assertEquals("binary comment", binaryField.getFieldComment());
Assert.assertEquals(IcebergBinaryObjectInspector.get(), binaryField.getFieldObjectInspector());
// boolean
StructField booleanField = soi.getStructFieldRef("boolean_field");
Assert.assertEquals(2, booleanField.getFieldID());
Assert.assertEquals("boolean_field", booleanField.getFieldName());
Assert.assertEquals("boolean comment", booleanField.getFieldComment());
Assert.assertEquals(getPrimitiveObjectInspector(boolean.class), booleanField.getFieldObjectInspector());
// date
StructField dateField = soi.getStructFieldRef("date_field");
Assert.assertEquals(3, dateField.getFieldID());
Assert.assertEquals("date_field", dateField.getFieldName());
Assert.assertEquals("date comment", dateField.getFieldComment());
if (HiveVersion.min(HiveVersion.HIVE_3)) {
Assert.assertEquals(
"org.apache.iceberg.mr.hive.serde.objectinspector.IcebergDateObjectInspectorHive3",
dateField.getFieldObjectInspector().getClass().getName());
} else {
Assert.assertEquals(
"org.apache.iceberg.mr.hive.serde.objectinspector.IcebergDateObjectInspector",
dateField.getFieldObjectInspector().getClass().getName());
}
// decimal
StructField decimalField = soi.getStructFieldRef("decimal_field");
Assert.assertEquals(4, decimalField.getFieldID());
Assert.assertEquals("decimal_field", decimalField.getFieldName());
Assert.assertEquals("decimal comment", decimalField.getFieldComment());
Assert.assertEquals(IcebergDecimalObjectInspector.get(38, 18), decimalField.getFieldObjectInspector());
// double
StructField doubleField = soi.getStructFieldRef("double_field");
Assert.assertEquals(5, doubleField.getFieldID());
Assert.assertEquals("double_field", doubleField.getFieldName());
Assert.assertEquals("double comment", doubleField.getFieldComment());
Assert.assertEquals(getPrimitiveObjectInspector(double.class), doubleField.getFieldObjectInspector());
// fixed
StructField fixedField = soi.getStructFieldRef("fixed_field");
Assert.assertEquals(6, fixedField.getFieldID());
Assert.assertEquals("fixed_field", fixedField.getFieldName());
Assert.assertEquals("fixed comment", fixedField.getFieldComment());
Assert.assertEquals(IcebergFixedObjectInspector.get(), fixedField.getFieldObjectInspector());
// float
StructField floatField = soi.getStructFieldRef("float_field");
Assert.assertEquals(7, floatField.getFieldID());
Assert.assertEquals("float_field", floatField.getFieldName());
Assert.assertEquals("float comment", floatField.getFieldComment());
Assert.assertEquals(getPrimitiveObjectInspector(float.class), floatField.getFieldObjectInspector());
// integer
StructField integerField = soi.getStructFieldRef("integer_field");
Assert.assertEquals(8, integerField.getFieldID());
Assert.assertEquals("integer_field", integerField.getFieldName());
Assert.assertEquals("integer comment", integerField.getFieldComment());
Assert.assertEquals(getPrimitiveObjectInspector(int.class), integerField.getFieldObjectInspector());
// long
StructField longField = soi.getStructFieldRef("long_field");
Assert.assertEquals(9, longField.getFieldID());
Assert.assertEquals("long_field", longField.getFieldName());
Assert.assertEquals("long comment", longField.getFieldComment());
Assert.assertEquals(getPrimitiveObjectInspector(long.class), longField.getFieldObjectInspector());
// string
StructField stringField = soi.getStructFieldRef("string_field");
Assert.assertEquals(10, stringField.getFieldID());
Assert.assertEquals("string_field", stringField.getFieldName());
Assert.assertEquals("string comment", stringField.getFieldComment());
Assert.assertEquals(getPrimitiveObjectInspector(String.class), stringField.getFieldObjectInspector());
// timestamp without tz
StructField timestampField = soi.getStructFieldRef("timestamp_field");
Assert.assertEquals(11, timestampField.getFieldID());
Assert.assertEquals("timestamp_field", timestampField.getFieldName());
Assert.assertEquals("timestamp comment", timestampField.getFieldComment());
Assert.assertEquals(IcebergTimestampObjectInspectorHive3.get(), timestampField.getFieldObjectInspector());
// timestamp with tz
StructField timestampTzField = soi.getStructFieldRef("timestamptz_field");
Assert.assertEquals(12, timestampTzField.getFieldID());
Assert.assertEquals("timestamptz_field", timestampTzField.getFieldName());
Assert.assertEquals("timestamptz comment", timestampTzField.getFieldComment());
Assert.assertEquals(IcebergTimestampWithZoneObjectInspectorHive3.get(), timestampTzField.getFieldObjectInspector());
// UUID
StructField uuidField = soi.getStructFieldRef("uuid_field");
Assert.assertEquals(13, uuidField.getFieldID());
Assert.assertEquals("uuid_field", uuidField.getFieldName());
Assert.assertEquals("uuid comment", uuidField.getFieldComment());
Assert.assertEquals(IcebergUUIDObjectInspector.get(), uuidField.getFieldObjectInspector());
// list
StructField listField = soi.getStructFieldRef("list_field");
Assert.assertEquals(14, listField.getFieldID());
Assert.assertEquals("list_field", listField.getFieldName());
Assert.assertEquals("list comment", listField.getFieldComment());
Assert.assertEquals(getListObjectInspector(String.class), listField.getFieldObjectInspector());
// map
StructField mapField = soi.getStructFieldRef("map_field");
Assert.assertEquals(16, mapField.getFieldID());
Assert.assertEquals("map_field", mapField.getFieldName());
Assert.assertEquals("map comment", mapField.getFieldComment());
Assert.assertEquals(getMapObjectInspector(String.class, int.class), mapField.getFieldObjectInspector());
// struct
StructField structField = soi.getStructFieldRef("struct_field");
Assert.assertEquals(19, structField.getFieldID());
Assert.assertEquals("struct_field", structField.getFieldName());
Assert.assertEquals("struct comment", structField.getFieldComment());
ObjectInspector expectedObjectInspector = new IcebergRecordObjectInspector(
(Types.StructType) schema.findType(19), ImmutableList.of(getPrimitiveObjectInspector(String.class)));
Assert.assertEquals(expectedObjectInspector, structField.getFieldObjectInspector());
// time
StructField timeField = soi.getStructFieldRef("time_field");
Assert.assertEquals(21, timeField.getFieldID());
Assert.assertEquals("time_field", timeField.getFieldName());
Assert.assertEquals("time comment", timeField.getFieldComment());
Assert.assertEquals(IcebergTimeObjectInspector.get(), timeField.getFieldObjectInspector());
} |
@CanIgnoreReturnValue
public final Ordered containsAtLeastEntriesIn(Multimap<?, ?> expectedMultimap) {
checkNotNull(expectedMultimap, "expectedMultimap");
checkNotNull(actual);
ListMultimap<?, ?> missing = difference(expectedMultimap, actual);
// TODO(kak): Possible enhancement: Include "[1 copy]" if the element does appear in
// the subject but not enough times. Similarly for unexpected extra items.
if (!missing.isEmpty()) {
failWithActual(
fact("missing", countDuplicatesMultimap(annotateEmptyStringsMultimap(missing))),
simpleFact("---"),
fact("expected to contain at least", annotateEmptyStringsMultimap(expectedMultimap)));
return ALREADY_FAILED;
}
return new MultimapInOrder(/* allowUnexpected = */ true, expectedMultimap);
} | @Test
public void containsAtLeastEntriesIn() {
ImmutableListMultimap<Integer, String> actual =
ImmutableListMultimap.of(3, "one", 3, "six", 3, "two", 4, "five", 4, "four");
ImmutableSetMultimap<Integer, String> expected =
ImmutableSetMultimap.of(3, "one", 3, "six", 3, "two", 4, "five");
assertThat(actual).containsAtLeastEntriesIn(expected);
} |
protected <T> T initialize(T proxy) throws HibernateException {
if (!Hibernate.isInitialized(proxy)) {
Hibernate.initialize(proxy);
}
return proxy;
} | @Test
void initializesProxies() throws Exception {
final LazyInitializer initializer = mock(LazyInitializer.class);
when(initializer.isUninitialized()).thenReturn(true);
final HibernateProxy proxy = mock(HibernateProxy.class);
doCallRealMethod().when(proxy).asHibernateProxy();
when(proxy.getHibernateLazyInitializer()).thenReturn(initializer);
dao.initialize(proxy);
verify(initializer).initialize();
} |
@Override
public void processElement(StreamRecord<T> element) throws Exception {
writer.write(element.getValue());
} | @TestTemplate
public void testTableWithoutSnapshot() throws Exception {
try (OneInputStreamOperatorTestHarness<RowData, WriteResult> testHarness =
createIcebergStreamWriter()) {
assertThat(testHarness.extractOutputValues()).isEmpty();
}
// Even if we closed the iceberg stream writer, there's no orphan data file.
assertThat(scanDataFiles()).isEmpty();
try (OneInputStreamOperatorTestHarness<RowData, WriteResult> testHarness =
createIcebergStreamWriter()) {
testHarness.processElement(SimpleDataUtil.createRowData(1, "hello"), 1);
// Still not emit the data file yet, because there is no checkpoint.
assertThat(testHarness.extractOutputValues()).isEmpty();
}
// Once we closed the iceberg stream writer, there will left an orphan data file.
assertThat(scanDataFiles()).hasSize(1);
} |
@Override
public void define(WebService.NewController controller) {
controller.createAction(VALIDATE_ACTION)
.setDescription("Check credentials.")
.setSince("3.3")
.setHandler(ServletFilterHandler.INSTANCE)
.setResponseExample(Resources.getResource(this.getClass(), "example-validate.json"));
} | @Test
public void define_shouldDefineWS() {
String controllerKey = "foo";
WebService.Context context = new WebService.Context();
WebService.NewController newController = context.createController(controllerKey);
underTest.define(newController);
newController.done();
WebService.Action validate = context.controller(controllerKey).action("validate");
assertThat(validate).isNotNull();
assertThat(validate.handler()).isInstanceOf(ServletFilterHandler.class);
assertThat(validate.responseExampleAsString()).isNotEmpty();
assertThat(validate.params()).isEmpty();
} |
public ValidationResult validate(final Map<String, InternalTopicConfig> topicConfigs) {
log.info("Starting to validate internal topics {}.", topicConfigs.keySet());
final long now = time.milliseconds();
final long deadline = now + retryTimeoutMs;
final ValidationResult validationResult = new ValidationResult();
final Set<String> topicDescriptionsStillToValidate = new HashSet<>(topicConfigs.keySet());
final Set<String> topicConfigsStillToValidate = new HashSet<>(topicConfigs.keySet());
while (!topicDescriptionsStillToValidate.isEmpty() || !topicConfigsStillToValidate.isEmpty()) {
Map<String, KafkaFuture<TopicDescription>> descriptionsForTopic = Collections.emptyMap();
if (!topicDescriptionsStillToValidate.isEmpty()) {
final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topicDescriptionsStillToValidate);
descriptionsForTopic = describeTopicsResult.topicNameValues();
}
Map<String, KafkaFuture<Config>> configsForTopic = Collections.emptyMap();
if (!topicConfigsStillToValidate.isEmpty()) {
final DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs(
topicConfigsStillToValidate.stream()
.map(topic -> new ConfigResource(Type.TOPIC, topic))
.collect(Collectors.toSet())
);
configsForTopic = describeConfigsResult.values().entrySet().stream()
.collect(Collectors.toMap(entry -> entry.getKey().name(), Map.Entry::getValue));
}
while (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
if (!descriptionsForTopic.isEmpty()) {
doValidateTopic(
validationResult,
descriptionsForTopic,
topicConfigs,
topicDescriptionsStillToValidate,
(streamsSide, brokerSide) -> validatePartitionCount(validationResult, streamsSide, brokerSide)
);
}
if (!configsForTopic.isEmpty()) {
doValidateTopic(
validationResult,
configsForTopic,
topicConfigs,
topicConfigsStillToValidate,
(streamsSide, brokerSide) -> validateCleanupPolicy(validationResult, streamsSide, brokerSide)
);
}
maybeThrowTimeoutException(
Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate),
deadline,
String.format("Could not validate internal topics within %d milliseconds. " +
"This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs)
);
if (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
Utils.sleep(100);
}
}
maybeSleep(
Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate),
deadline,
"validated"
);
}
log.info("Completed validation of internal topics {}.", topicConfigs.keySet());
return validationResult;
} | @Test
public void shouldReportMultipleMisconfigurationsForSameTopic() {
final long retentionMs = 1000;
final long shorterRetentionMs = 900;
final Map<String, String> windowedChangelogConfig = windowedChangelogConfig(shorterRetentionMs);
windowedChangelogConfig.put(TopicConfig.RETENTION_BYTES_CONFIG, "1024");
setupTopicInMockAdminClient(topic1, windowedChangelogConfig);
final InternalTopicConfig internalTopicConfig1 = setupWindowedChangelogTopicConfig(topic1, 1, retentionMs);
final ValidationResult validationResult = internalTopicManager.validate(mkMap(
mkEntry(topic1, internalTopicConfig1)
));
final Map<String, List<String>> misconfigurationsForTopics = validationResult.misconfigurationsForTopics();
assertThat(validationResult.missingTopics(), empty());
assertThat(misconfigurationsForTopics.size(), is(1));
assertThat(misconfigurationsForTopics, hasKey(topic1));
assertThat(misconfigurationsForTopics.get(topic1).size(), is(2));
assertThat(
misconfigurationsForTopics.get(topic1).get(0),
is("Retention time (" + TopicConfig.RETENTION_MS_CONFIG + ") of existing internal topic " +
topic1 + " is " + shorterRetentionMs + " but should be " + retentionMs + " or larger.")
);
assertThat(
misconfigurationsForTopics.get(topic1).get(1),
is("Retention byte (" + TopicConfig.RETENTION_BYTES_CONFIG + ") of existing internal topic " +
topic1 + " is set but it should be unset.")
);
} |
public static Validator parses(final Function<String, ?> parser) {
return (name, val) -> {
if (val != null && !(val instanceof String)) {
throw new IllegalArgumentException("validator should only be used with STRING defs");
}
try {
parser.apply((String)val);
} catch (final Exception e) {
throw new ConfigException("Configuration " + name + " is invalid: " + e.getMessage());
}
};
} | @Test(expected = IllegalArgumentException.class)
public void shouldThrowOnNonStringFromParse() {
// Given:
final Validator validator = ConfigValidators.parses(parser);
// When:
validator.ensureValid("propName", 10);
} |
void removeFactMapping(FactMapping toRemove) {
factMappings.remove(toRemove);
} | @Test
public void removeFactMapping() {
FactMapping retrieved = modelDescriptor.addFactMapping(factIdentifier, expressionIdentifier);
modelDescriptor.removeFactMapping(retrieved);
assertThat(modelDescriptor.getUnmodifiableFactMappings()).doesNotContain(retrieved);
} |
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
return this.list(directory, listener, String.valueOf(Path.DELIMITER));
} | @Test
public void testDirectory() throws Exception {
final Path bucket = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final S3AccessControlListFeature acl = new S3AccessControlListFeature(session);
final Path directory = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir(new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final S3ObjectListService feature = new S3ObjectListService(session, acl);
assertTrue(feature.list(bucket, new DisabledListProgressListener()).contains(directory));
final AtomicBoolean callback = new AtomicBoolean();
assertTrue(feature.list(directory, new DisabledListProgressListener() {
@Override
public void chunk(final Path parent, final AttributedList<Path> list) {
assertNotSame(AttributedList.EMPTY, list);
callback.set(true);
}
}).isEmpty());
assertTrue(callback.get());
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(directory), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(feature.list(bucket, new DisabledListProgressListener()).contains(directory));
try {
feature.list(directory, new DisabledListProgressListener());
fail();
}
catch(NotfoundException e) {
}
} |
@Override
public ListenableFuture<?> spill(Iterator<Page> pageIterator)
{
requireNonNull(pageIterator, "pageIterator is null");
checkNoSpillInProgress();
spillInProgress = executor.submit(() -> writePages(pageIterator));
return spillInProgress;
} | @Test
public void testSpill()
throws Exception
{
assertSpill(false, false);
} |
@Override
public List<String> getMountPoints(final String str) throws IOException {
verifyMountTable();
String path = RouterAdmin.normalizeFileSystemPath(str);
if (isTrashPath(path)) {
path = subtractTrashCurrentPath(path);
}
readLock.lock();
try {
String from = path;
String to = path + Character.MAX_VALUE;
SortedMap<String, MountTable> subMap = this.tree.subMap(from, to);
return FileSubclusterResolver.getMountPoints(path, subMap.keySet());
} finally {
readLock.unlock();
}
} | @Test
public void testTrailingSlashInInputPath() throws IOException {
// Check mount points beneath the path with trailing slash.
getMountPoints(true);
} |
@Override
public boolean dropNamespace(Namespace namespace) {
if (!isValidateNamespace(namespace)) {
return false;
}
try {
clients.run(
client -> {
client.dropDatabase(
namespace.level(0),
false /* deleteData */,
false /* ignoreUnknownDb */,
false /* cascade */);
return null;
});
LOG.info("Dropped namespace: {}", namespace);
return true;
} catch (InvalidOperationException e) {
throw new NamespaceNotEmptyException(
e, "Namespace %s is not empty. One or more tables exist.", namespace);
} catch (NoSuchObjectException e) {
return false;
} catch (TException e) {
throw new RuntimeException("Failed to drop namespace " + namespace + " in Hive Metastore", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(
"Interrupted in call to drop dropDatabase(name) " + namespace + " in Hive Metastore", e);
}
} | @Test
public void dropNamespace() {
Namespace namespace = Namespace.of("dbname_drop");
TableIdentifier identifier = TableIdentifier.of(namespace, "table");
Schema schema = getTestSchema();
catalog.createNamespace(namespace, META);
catalog.createTable(identifier, schema);
Map<String, String> nameMata = catalog.loadNamespaceMetadata(namespace);
assertThat(nameMata).containsEntry("owner", "apache");
assertThat(nameMata).containsEntry("group", "iceberg");
assertThatThrownBy(() -> catalog.dropNamespace(namespace))
.isInstanceOf(NamespaceNotEmptyException.class)
.hasMessage("Namespace dbname_drop is not empty. One or more tables exist.");
assertThat(catalog.dropTable(identifier, true)).isTrue();
assertThat(catalog.dropNamespace(namespace))
.as("Should fail to drop namespace if it is not empty")
.isTrue();
assertThat(catalog.dropNamespace(Namespace.of("db.ns1")))
.as("Should fail to drop when namespace doesn't exist")
.isFalse();
assertThatThrownBy(() -> catalog.loadNamespaceMetadata(namespace))
.isInstanceOf(NoSuchNamespaceException.class)
.hasMessage("Namespace does not exist: dbname_drop");
} |
public static List<AclEntry> mergeAclEntries(List<AclEntry> existingAcl,
List<AclEntry> inAclSpec) throws AclException {
ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec);
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
List<AclEntry> foundAclSpecEntries =
Lists.newArrayListWithCapacity(MAX_ENTRIES);
EnumMap<AclEntryScope, AclEntry> providedMask =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class);
EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class);
for (AclEntry existingEntry: existingAcl) {
AclEntry aclSpecEntry = aclSpec.findByKey(existingEntry);
if (aclSpecEntry != null) {
foundAclSpecEntries.add(aclSpecEntry);
scopeDirty.add(aclSpecEntry.getScope());
if (aclSpecEntry.getType() == MASK) {
providedMask.put(aclSpecEntry.getScope(), aclSpecEntry);
maskDirty.add(aclSpecEntry.getScope());
} else {
aclBuilder.add(aclSpecEntry);
}
} else {
if (existingEntry.getType() == MASK) {
providedMask.put(existingEntry.getScope(), existingEntry);
} else {
aclBuilder.add(existingEntry);
}
}
}
// ACL spec entries that were not replacements are new additions.
for (AclEntry newEntry: aclSpec) {
if (Collections.binarySearch(foundAclSpecEntries, newEntry,
ACL_ENTRY_COMPARATOR) < 0) {
scopeDirty.add(newEntry.getScope());
if (newEntry.getType() == MASK) {
providedMask.put(newEntry.getScope(), newEntry);
maskDirty.add(newEntry.getScope());
} else {
aclBuilder.add(newEntry);
}
}
}
copyDefaultsIfNeeded(aclBuilder);
calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty);
return buildAndValidateAcl(aclBuilder);
} | @Test(expected=AclException.class)
public void testMergeAclEntriesInputTooLarge() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
mergeAclEntries(existing, ACL_SPEC_TOO_LARGE);
} |
@Override public boolean remove(long key1, int key2) {
return super.remove0(key1, key2);
} | @Test
public void testRemove() {
final long key1 = randomKey();
final int key2 = randomKey();
insert(key1, key2);
assertTrue(hsa.remove(key1, key2));
assertFalse(hsa.remove(key1, key2));
} |
public EventTimer getEventTimer(String eventName) {
EventTimer eventTimer;
synchronized (mTrackTimer) {
eventTimer = mTrackTimer.get(eventName);
mTrackTimer.remove(eventName);
}
return eventTimer;
} | @Test
public void getEventTimer() {
mInstance.addEventTimer("EventTimer", new EventTimer(TimeUnit.SECONDS, 10000L));
Assert.assertNotNull(mInstance.getEventTimer("EventTimer"));
} |
public long getWorkerTabletNum(String workerIpPort) {
try {
WorkerInfo workerInfo = client.getWorkerInfo(serviceId, workerIpPort);
return workerInfo.getTabletNum();
} catch (StarClientException e) {
LOG.info("Failed to get worker tablet num from starMgr, Error: {}.", e.getMessage());
}
return 0;
} | @Test
public void testGetWorkerTabletNum() throws StarClientException {
String serviceId = "1";
String workerIpPort = "127.0.0.1:8093";
long workerId = 20000L;
int expectedTabletNum = 10086;
Deencapsulation.setField(starosAgent, "serviceId", serviceId);
WorkerInfo worker = newWorkerInfo(workerId, workerIpPort, 9050, 9060, 8040, 8060, expectedTabletNum);
new Expectations() {
{
client.getWorkerInfo(serviceId, workerIpPort);
minTimes = 1;
result = worker;
}
};
long tabletNum = starosAgent.getWorkerTabletNum(workerIpPort);
Assert.assertEquals(expectedTabletNum, worker.getTabletNum());
Assert.assertEquals(expectedTabletNum, tabletNum);
} |
@Override
public Map<String, Collection<String>> getDataSourceMapper() {
Map<String, Collection<String>> result = new HashMap<>(dataSourceGroupRules.size(), 1F);
for (ReadwriteSplittingDataSourceGroupRule each : dataSourceGroupRules) {
result.put(each.getName(), each.getReadwriteSplittingGroup().getAllDataSources());
}
return result;
} | @Test
void assertGetDataSourceMapper() {
Map<String, Collection<String>> actual = new ReadwriteSplittingDataSourceMapperRuleAttribute(Collections.singleton(createDataSourceGroupRule())).getDataSourceMapper();
Map<String, Collection<String>> expected = Collections.singletonMap("readwrite", Arrays.asList("write_ds", "read_ds_0", "read_ds_1"));
assertThat(actual, is(expected));
} |
public static void validateImageInDaemonConf(Map<String, Object> conf) {
List<String> allowedImages = getAllowedImages(conf, true);
if (allowedImages.isEmpty()) {
LOG.debug("{} is not configured; skip image validation", DaemonConfig.STORM_OCI_ALLOWED_IMAGES);
} else {
String defaultImage = (String) conf.get(DaemonConfig.STORM_OCI_IMAGE);
validateImage(allowedImages, defaultImage, DaemonConfig.STORM_OCI_IMAGE);
}
} | @Test
public void validateImageInDaemonConfWrongPattern() {
assertThrows(IllegalArgumentException.class, () -> {
Map<String, Object> conf = new HashMap<>();
List<String> allowedImages = new ArrayList<>();
allowedImages.add("*");
conf.put(DaemonConfig.STORM_OCI_ALLOWED_IMAGES, allowedImages);
conf.put(DaemonConfig.STORM_OCI_IMAGE, "a-strange@image-name");
OciUtils.validateImageInDaemonConf(conf);
});
} |
public static String nameOf(String group, String plural, String name) {
if (StringUtils.isBlank(group)) {
return String.join("/", plural, name);
}
return String.join(".", plural, group) + "/" + name;
} | @Test
void testNameOf() {
String s = MeterUtils.nameOf("content.halo.run", "posts", "fake-post");
assertThat(s).isEqualTo("posts.content.halo.run/fake-post");
} |
@Override
public void execute(ComputationStep.Context context) {
DuplicationVisitor visitor = new DuplicationVisitor();
new DepthTraversalTypeAwareCrawler(visitor).visit(treeRootHolder.getReportTreeRoot());
context.getStatistics().add("duplications", visitor.count);
} | @Test
public void loads_duplication_with_otherFileRef_as_inProject_duplication() {
reportReader.putDuplications(FILE_1_REF, createDuplication(singleLineTextRange(LINE), createInProjectDuplicate(FILE_2_REF, LINE + 1)));
TestComputationStepContext context = new TestComputationStepContext();
underTest.execute(context);
assertDuplications(FILE_1_REF, singleLineDetailedTextBlock(1, LINE), new InProjectDuplicate(treeRootHolder.getComponentByRef(FILE_2_REF), singleLineTextBlock(LINE + 1)));
assertNoDuplication(FILE_2_REF);
assertNbOfDuplications(context, 1);
} |
@Override
public Set<String> getNames() {
return unmodifiableSet(probeInstances.values().stream()
.map(probeInstance -> probeInstance.descriptor.metricString())
.collect(Collectors.toSet()));
} | @Test
public void getNames() {
Set<String> metrics = new HashSet<>();
metrics.add("first");
metrics.add("second");
metrics.add("third");
Set<String> expected = new HashSet<>();
expected.add("[metric=first]");
expected.add("[metric=second]");
expected.add("[metric=third]");
for (String name : metrics) {
metricsRegistry.registerStaticProbe(this, name, ProbeLevel.MANDATORY,
(LongProbeFunction<Object>) obj -> 0);
}
Set<String> names = metricsRegistry.getNames();
for (String name : expected) {
assertContains(names, name);
}
} |
@Override
public Long time(RedisClusterNode node) {
RedisClient entry = getEntry(node);
RFuture<Long> f = executorService.readAsync(entry, LongCodec.INSTANCE, RedisCommands.TIME_LONG);
return syncFuture(f);
} | @Test
public void testTime() {
RedisClusterNode master = getFirstMaster();
Long time = connection.time(master);
assertThat(time).isGreaterThan(1000);
} |
@Override public String method() {
return DubboParser.method(invocation);
} | @Test void method() {
when(invocation.getMethodName()).thenReturn("sayHello");
assertThat(request.method()).isEqualTo("sayHello");
} |
@Override
public AppendFiles appendManifest(ManifestFile manifest) {
Preconditions.checkArgument(
!manifest.hasExistingFiles(), "Cannot append manifest with existing files");
Preconditions.checkArgument(
!manifest.hasDeletedFiles(), "Cannot append manifest with deleted files");
Preconditions.checkArgument(
manifest.snapshotId() == null || manifest.snapshotId() == -1,
"Snapshot id must be assigned during commit");
Preconditions.checkArgument(
manifest.sequenceNumber() == -1, "Sequence must be assigned during commit");
add(manifest);
return this;
} | @TestTemplate
public void testAppendManifestCleanup() throws IOException {
// inject 5 failures
TestTables.TestTableOperations ops = table.ops();
ops.failCommits(5);
ManifestFile manifest = writeManifest(FILE_A, FILE_B);
AppendFiles append = table.newAppend().appendManifest(manifest);
Snapshot pending = apply(append, branch);
ManifestFile newManifest = pending.allManifests(table.io()).get(0);
assertThat(new File(newManifest.path())).exists();
if (formatVersion == 1) {
assertThat(newManifest.path()).isNotEqualTo(manifest.path());
} else {
assertThat(newManifest.path()).isEqualTo(manifest.path());
}
assertThatThrownBy(() -> commit(table, append, branch))
.isInstanceOf(CommitFailedException.class)
.hasMessage("Injected failure");
V2Assert.assertEquals(
"Last sequence number should be 0", 0, readMetadata().lastSequenceNumber());
V1Assert.assertEquals(
"Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
if (formatVersion == 1) {
assertThat(new File(newManifest.path())).doesNotExist();
} else {
assertThat(new File(newManifest.path())).exists();
}
} |
public int startWithRunStrategy(
@NotNull WorkflowInstance instance, @NotNull RunStrategy runStrategy) {
return withMetricLogError(
() ->
withRetryableTransaction(
conn -> {
final long nextInstanceId =
getLatestInstanceId(conn, instance.getWorkflowId()) + 1;
if (isDuplicated(conn, instance)) {
return 0;
}
completeInstanceInit(conn, nextInstanceId, instance);
int res;
if (instance.getStatus().isTerminal()) {
// Save it directly and send a terminate event
res = addTerminatedInstance(conn, instance);
} else {
switch (runStrategy.getRule()) {
case SEQUENTIAL:
case PARALLEL:
case STRICT_SEQUENTIAL:
res = insertInstance(conn, instance, true, null);
break;
case FIRST_ONLY:
res = startFirstOnlyInstance(conn, instance);
break;
case LAST_ONLY:
res = startLastOnlyInstance(conn, instance);
break;
default:
throw new MaestroInternalError(
"When start, run strategy [%s] is not supported.", runStrategy);
}
}
if (instance.getWorkflowInstanceId() == nextInstanceId) {
updateLatestInstanceId(conn, instance.getWorkflowId(), nextInstanceId);
}
return res;
}),
"startWithRunStrategy",
"Failed to start a workflow [{}][{}] with run strategy [{}]",
instance.getWorkflowId(),
instance.getWorkflowUuid(),
runStrategy);
} | @Test
public void testStartWorkflowInstanceWithSameUuid() {
wfi.setWorkflowUuid("8a0bd56f-745f-4a2c-b81b-1b2f89127e73");
int res = runStrategyDao.startWithRunStrategy(wfi, Defaults.DEFAULT_RUN_STRATEGY);
assertEquals(0, res);
assertEquals(1, wfi.getWorkflowInstanceId());
assertEquals(1, wfi.getWorkflowRunId());
assertEquals("8a0bd56f-745f-4a2c-b81b-1b2f89127e73", wfi.getWorkflowUuid());
verifyPublish(0, 0, 0, 0, 0);
} |
@Override
public List<Operation> parse(String statement) {
CalciteParser parser = calciteParserSupplier.get();
FlinkPlannerImpl planner = validatorSupplier.get();
Optional<Operation> command = EXTENDED_PARSER.parse(statement);
if (command.isPresent()) {
return Collections.singletonList(command.get());
}
// parse the sql query
// use parseSqlList here because we need to support statement end with ';' in sql client.
SqlNodeList sqlNodeList = parser.parseSqlList(statement);
List<SqlNode> parsed = sqlNodeList.getList();
Preconditions.checkArgument(parsed.size() == 1, "only single statement supported");
return Collections.singletonList(
SqlNodeToOperationConversion.convert(planner, catalogManager, parsed.get(0))
.orElseThrow(() -> new TableException("Unsupported query: " + statement)));
} | @Test
void testPartialParseWithStatementSet() {
assertThatThrownBy(
() ->
parser.parse(
"Execute Statement Set Begin insert into A select * from B"))
.isInstanceOf(SqlParserEOFException.class);
} |
public static MaterializedDataPredicates differenceDataPredicates(
MaterializedDataPredicates baseTablePredicatesInfo,
MaterializedDataPredicates viewPredicatesInfo,
Map<String, String> viewToBaseTablePredicatesKeyMap)
{
return differenceDataPredicates(baseTablePredicatesInfo, viewPredicatesInfo, viewToBaseTablePredicatesKeyMap, ImmutableMap.of());
} | @Test
public void testDifferenceDataPredicates()
{
TestingTypeManager typeManager = new TestingTypeManager();
TestingSemiTransactionalHiveMetastore testMetastore = TestingSemiTransactionalHiveMetastore.create();
List<String> keys = ImmutableList.of("ds");
Column dsColumn = new Column("ds", HIVE_STRING, Optional.empty(), Optional.empty());
List<Column> partitionColumns = ImmutableList.of(dsColumn);
List<String> partitions = ImmutableList.of(
"ds=2020-01-01",
"ds=2020-01-02",
"ds=2020-01-03",
"ds=2020-01-04",
"ds=2020-01-05",
"ds=2020-01-06");
testMetastore.setPartitionNames(partitions);
MaterializedDataPredicates baseDataPredicates =
getMaterializedDataPredicates(testMetastore, metastoreContext, typeManager, getTable(partitionColumns), DateTimeZone.UTC);
List<String> viewPartitions = ImmutableList.of(
"ds=2020-01-02",
"ds=2020-01-03",
"ds=2020-01-05");
testMetastore.setPartitionNames(viewPartitions);
MaterializedDataPredicates materializedDataPredicates =
getMaterializedDataPredicates(testMetastore, metastoreContext, typeManager, getTable(partitionColumns), DateTimeZone.UTC);
Map<String, String> materializedViewToBaseColumnMap = ImmutableMap.of("ds", "ds");
ImmutableList.Builder<List<TestingPartitionResult>> partitionResults = ImmutableList.builder();
partitionResults.add(ImmutableList.of(
new TestingPartitionResult("ds", VARCHAR, "CAST('2020-01-01' AS varchar)")));
partitionResults.add(ImmutableList.of(
new TestingPartitionResult("ds", VARCHAR, "CAST('2020-01-04' AS varchar)")));
partitionResults.add(ImmutableList.of(
new TestingPartitionResult("ds", VARCHAR, "CAST('2020-01-06' AS varchar)")));
MaterializedDataPredicates diffDataPredicates = differenceDataPredicates(baseDataPredicates, materializedDataPredicates, materializedViewToBaseColumnMap);
comparePredicates(diffDataPredicates, keys, partitionResults.build());
} |
public static <T> T retry(Callable<T> callable, int retries) {
return retry(callable, retries, Collections.emptyList());
} | @Test(expected = HazelcastException.class)
public void retryNonRetryableKeywords()
throws Exception {
// given
given(callable.call()).willThrow(new HazelcastException(NON_RETRYABLE_KEYWORDS)).willThrow(new RuntimeException());
// when
String result = RetryUtils.retry(callable, RETRIES, Collections.singletonList(NON_RETRYABLE_KEYWORDS));
// then
// throws exception
} |
public boolean deleteTenantCapacity(final String tenant) {
try {
TenantCapacityMapper tenantCapacityMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.TENANT_CAPACITY);
PreparedStatementCreator preparedStatementCreator = connection -> {
PreparedStatement ps = connection.prepareStatement(
tenantCapacityMapper.delete(Collections.singletonList("tenant_id")));
ps.setString(1, tenant);
return ps;
};
return jdbcTemplate.update(preparedStatementCreator) == 1;
} catch (CannotGetJdbcConnectionException e) {
FATAL_LOG.error("[db-error]", e);
throw e;
}
} | @Test
void testDeleteTenantCapacity() {
when(jdbcTemplate.update(any(PreparedStatementCreator.class))).thenReturn(1);
assertTrue(service.deleteTenantCapacity("test"));
//mock get connection fail
when(jdbcTemplate.update(any(PreparedStatementCreator.class))).thenThrow(new CannotGetJdbcConnectionException("conn fail"));
try {
service.deleteTenantCapacity("test");
assertTrue(false);
} catch (Exception e) {
assertEquals("conn fail", e.getMessage());
}
} |
@Override
public Object evaluateUnsafe(EvaluationContext context) {
final Object idxObj = this.index.evaluateUnsafe(context);
final Object indexable = indexableObject.evaluateUnsafe(context);
if (idxObj == null || indexable == null) {
return null;
}
if (idxObj instanceof Long) {
int idx = Ints.saturatedCast((long) idxObj);
if (indexable.getClass().isArray()) {
return Array.get(indexable, idx);
} else if (indexable instanceof List) {
return ((List) indexable).get(idx);
} else if (indexable instanceof Iterable) {
return Iterables.get((Iterable) indexable, idx);
}
throw new IllegalArgumentException(
context.pipelineErrorMessage("Object '" + indexable + "' is not an Array, List or Iterable."));
} else if (idxObj instanceof String) {
final String idx = idxObj.toString();
if (indexable instanceof Map) {
return ((Map) indexable).get(idx);
}
throw new IllegalArgumentException(
context.pipelineErrorMessage("Object '" + indexable + "' is not a Map."));
}
throw new IllegalArgumentException(
context.pipelineErrorMessage("Index '" + idxObj + "' is not a Long or String."));
} | @Test
public void accessIterable() {
final Iterable<Integer> iterable = () -> new AbstractIterator<Integer>() {
private boolean done = false;
@Override
protected Integer computeNext() {
if (done) {
return endOfData();
}
done = true;
return 23;
}
};
final IndexedAccessExpression idxExpr = new IndexedAccessExpression(START, obj(iterable), num(0));
final Object evaluate = idxExpr.evaluateUnsafe(context);
assertThat(evaluate).isOfAnyClassIn(Integer.class);
assertThat(evaluate).isEqualTo(23);
} |
@Udf
public <T extends Comparable<? super T>> List<T> arraySortDefault(@UdfParameter(
description = "The array to sort") final List<T> input) {
return arraySortWithDirection(input, "ASC");
} | @Test
public void shouldSortDoubles() {
final List<Double> input =
Arrays.asList(Double.valueOf(1.1), Double.valueOf(3.1), Double.valueOf(-1.1));
final List<Double> output = udf.arraySortDefault(input);
assertThat(output, contains(Double.valueOf(-1.1), Double.valueOf(1.1), Double.valueOf(3.1)));
} |
@Override
public void close() throws IOException {
IOException ioeFromFlush = null;
try {
flush();
} catch (IOException e) {
ioeFromFlush = e;
throw e;
} finally {
try {
this.out.close();
} catch (IOException e) {
// If there was an Exception during flush(), the Azure SDK will throw back the
// same when we call close on the same stream. When try and finally both throw
// Exception, Java will use Throwable#addSuppressed for one of the Exception so
// that the caller will get one exception back. When within this, if both
// Exceptions are equal, it will throw back IllegalStateException. This makes us
// to throw back a non IOE. The below special handling is to avoid this.
if (ioeFromFlush == e) {
// Do nothing..
// The close() call gave back the same IOE which flush() gave. Just swallow it
LOG.debug("flush() and close() throwing back same Exception. Just swallowing the latter", e);
} else {
// Let Java handle 2 different Exceptions been thrown from try and finally.
throw e;
}
}
}
} | @Test
public void testCloseWhenFlushThrowingIOException() throws Exception {
MockOutputStream out = new MockOutputStream();
SyncableDataOutputStream sdos = new SyncableDataOutputStream(out);
out.flushThrowIOE = true;
LambdaTestUtils.intercept(IOException.class, "An IOE from flush", () -> sdos.close());
MockOutputStream out2 = new MockOutputStream();
out2.flushThrowIOE = true;
LambdaTestUtils.intercept(IOException.class, "An IOE from flush", () -> {
try (SyncableDataOutputStream sdos2 = new SyncableDataOutputStream(out2)) {
}
});
} |
public String buildRealData(final ConditionData condition, final ServerWebExchange exchange) {
return ParameterDataFactory.builderData(condition.getParamType(), condition.getParamName(), exchange);
} | @Test
public void testBuildRealDataHostBranch() {
conditionData.setParamType(ParamTypeEnum.HOST.getName());
assertEquals("localhost", abstractMatchStrategy.buildRealData(conditionData, exchange));
} |
protected JavaRDD<GenericRecord> maybeAppendKafkaOffsets(JavaRDD<ConsumerRecord<Object, Object>> kafkaRDD) {
if (this.shouldAddOffsets) {
AvroConvertor convertor = new AvroConvertor(schemaProvider.getSourceSchema());
return kafkaRDD.map(convertor::withKafkaFieldsAppended);
} else {
return kafkaRDD.map(consumerRecord -> (GenericRecord) consumerRecord.value());
}
} | @Test
public void testAppendKafkaOffsets() throws IOException {
UtilitiesTestBase.Helpers.saveStringsToDFS(
new String[] {dataGen.generateGenericRecord().getSchema().toString()}, hoodieStorage(),
SCHEMA_PATH);
ConsumerRecord<Object, Object> recordConsumerRecord =
new ConsumerRecord<Object, Object>("test", 0, 1L,
"test", dataGen.generateGenericRecord());
JavaRDD<ConsumerRecord<Object, Object>> rdd =
jsc().parallelize(Arrays.asList(recordConsumerRecord));
TypedProperties props = new TypedProperties();
props.put("hoodie.streamer.source.kafka.topic", "test");
props.put("hoodie.streamer.schemaprovider.source.schema.file", SCHEMA_PATH);
SchemaProvider schemaProvider = UtilHelpers.wrapSchemaProviderWithPostProcessor(
UtilHelpers.createSchemaProvider(FilebasedSchemaProvider.class.getName(), props, jsc()),
props, jsc(), new ArrayList<>());
AvroKafkaSource avroKafkaSource =
new AvroKafkaSource(props, jsc(), spark(), schemaProvider, null);
GenericRecord withoutKafkaOffsets =
avroKafkaSource.maybeAppendKafkaOffsets(rdd).collect().get(0);
props.put(HoodieStreamerConfig.KAFKA_APPEND_OFFSETS.key(), "true");
schemaProvider = UtilHelpers.wrapSchemaProviderWithPostProcessor(
UtilHelpers.createSchemaProvider(FilebasedSchemaProvider.class.getName(), props, jsc()),
props, jsc(), new ArrayList<>());
avroKafkaSource = new AvroKafkaSource(props, jsc(), spark(), schemaProvider, null);
GenericRecord withKafkaOffsets = avroKafkaSource.maybeAppendKafkaOffsets(rdd).collect().get(0);
assertEquals(4, withKafkaOffsets.getSchema().getFields().size()
- withoutKafkaOffsets.getSchema().getFields().size());
assertEquals("test", withKafkaOffsets.get("_hoodie_kafka_source_key").toString());
// scenario with null kafka key
ConsumerRecord<Object, Object> recordConsumerRecordNullKafkaKey =
new ConsumerRecord<Object, Object>("test", 0, 1L,
null, dataGen.generateGenericRecord());
JavaRDD<ConsumerRecord<Object, Object>> rddNullKafkaKey =
jsc().parallelize(Arrays.asList(recordConsumerRecordNullKafkaKey));
avroKafkaSource = new AvroKafkaSource(props, jsc(), spark(), schemaProvider, null);
GenericRecord withKafkaOffsetsAndNullKafkaKey =
avroKafkaSource.maybeAppendKafkaOffsets(rddNullKafkaKey).collect().get(0);
assertNull(withKafkaOffsetsAndNullKafkaKey.get("_hoodie_kafka_source_key"));
} |
@Override
public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException {
try {
session.getClient().putDirectory(folder.getAbsolute());
return folder;
}
catch(MantaException e) {
throw new MantaExceptionMappingService().map("Cannot create folder {0}", e, folder);
}
catch(MantaClientHttpResponseException e) {
throw new MantaHttpExceptionMappingService().map("Cannot create folder {0}", e, folder);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Cannot create folder {0}", e, folder);
}
} | @Test
public void testMkdir() throws Exception {
final Path target = new MantaDirectoryFeature(session).mkdir(randomDirectory(), null);
final PathAttributes found = new MantaAttributesFinderFeature(session).find(target);
assertNotEquals(Permission.EMPTY, found.getPermission());
new MantaDeleteFeature(session).delete(Collections.singletonList(target), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public static <T extends Serializable> SerializableCoder<T> of(TypeDescriptor<T> type) {
@SuppressWarnings("unchecked")
Class<T> clazz = (Class<T>) type.getRawType();
return new SerializableCoder<>(clazz, type);
} | @Test
public void coderChecksForEquals() throws Exception {
SerializableCoder.of(ProperEquals.class);
expectedLogs.verifyNotLogged("Can't verify serialized elements of type");
} |
@Override
public synchronized void addAggregateFunctionFactory(
final AggregateFunctionFactory aggregateFunctionFactory) {
final String functionName = aggregateFunctionFactory.getName().toUpperCase();
validateFunctionName(functionName);
if (udfs.containsKey(functionName)) {
throw new KsqlException(
"Aggregate function already registered as non-aggregate: " + functionName);
}
if (udtfs.containsKey(functionName)) {
throw new KsqlException(
"Aggregate function already registered as table function: " + functionName);
}
if (udafs.putIfAbsent(functionName, aggregateFunctionFactory) != null) {
throw new KsqlException("Aggregate function already registered: " + functionName);
}
} | @Test
public void shouldThrowOnInvalidUdafFunctionName() {
// Given:
when(udafFactory.getName()).thenReturn("i am invalid");
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> functionRegistry.addAggregateFunctionFactory(udafFactory)
);
// Then:
assertThat(e.getMessage(), containsString("is not a valid function name"));
} |
public static <
EventTypeT,
EventKeyTypeT,
ResultTypeT,
StateTypeT extends MutableState<EventTypeT, ResultTypeT>>
OrderedEventProcessor<EventTypeT, EventKeyTypeT, ResultTypeT, StateTypeT> create(
OrderedProcessingHandler<EventTypeT, EventKeyTypeT, StateTypeT, ResultTypeT> handler) {
return new AutoValue_OrderedEventProcessor<>(handler);
} | @Test
public void testProcessingOfTheLastInput() throws CannotProvideCoderException {
Event[] events = {
Event.create(0, "id-1", "a"),
Event.create(1, "id-1", "b"),
Event.create(2, "id-1", StringEventExaminer.LAST_INPUT)
};
Collection<KV<String, OrderedProcessingStatus>> expectedStatuses = new ArrayList<>();
expectedStatuses.add(
KV.of(
"id-1",
OrderedProcessingStatus.create(
2L, 0, null, null, events.length, events.length, 0, LAST_EVENT_RECEIVED)));
Collection<KV<String, String>> expectedOutput = new ArrayList<>();
expectedOutput.add(KV.of("id-1", "a"));
expectedOutput.add(KV.of("id-1", "ab"));
expectedOutput.add(KV.of("id-1", "ab" + StringEventExaminer.LAST_INPUT));
testProcessing(
events,
expectedStatuses,
expectedOutput,
EMISSION_FREQUENCY_ON_EVERY_ELEMENT,
INITIAL_SEQUENCE_OF_0,
LARGE_MAX_RESULTS_PER_OUTPUT,
DONT_PRODUCE_STATUS_ON_EVERY_EVENT);
} |
@Override
public void configure(final Map<String, ?> config) {
configure(
config,
new Options(),
org.rocksdb.LRUCache::new,
org.rocksdb.WriteBufferManager::new
);
} | @Test
public void shouldFailIfConfiguredTwiceFromSameInstance() {
// Given:
rocksDBConfig.configure(CONFIG_PROPS);
// Expect:
// When:
final Exception e = assertThrows(
IllegalStateException.class,
() -> rocksDBConfig.configure(CONFIG_PROPS)
);
// Then:
assertThat(e.getMessage(), containsString("KsqlBoundedMemoryRocksDBConfigSetter has already been configured. Cannot re-configure."));
} |
@Override
public String generateSqlType(Dialect dialect) {
return switch (dialect.getId()) {
case PostgreSql.ID, H2.ID -> "INTEGER";
case MsSql.ID -> "INT";
case Oracle.ID -> "NUMBER(38,0)";
default -> throw new IllegalArgumentException("Unsupported dialect id " + dialect.getId());
};
} | @Test
public void generateSqlType_for_MsSql() {
assertThat(underTest.generateSqlType(new MsSql())).isEqualTo("INT");
} |
static void finish(TracingFilter filter,
DubboRequest request, @Nullable Result result, @Nullable Throwable error, Span span) {
if (request instanceof RpcClientRequest) {
filter.clientHandler.handleReceive(
new DubboClientResponse((DubboClientRequest) request, result, error), span);
} else {
filter.serverHandler.handleSend(
new DubboServerResponse((DubboServerRequest) request, result, error), span);
}
} | @Test void finish_result_but_null_error_DubboServerRequest() {
Span span = tracing.tracer().nextSpan().kind(SERVER).start();
FinishSpan.finish(filter, serverRequest, mock(Result.class), null, span);
testSpanHandler.takeRemoteSpan(SERVER);
} |
@VisibleForTesting
WxMpService getWxMpService(Integer userType) {
// 第一步,查询 DB 的配置项,获得对应的 WxMpService 对象
SocialClientDO client = socialClientMapper.selectBySocialTypeAndUserType(
SocialTypeEnum.WECHAT_MP.getType(), userType);
if (client != null && Objects.equals(client.getStatus(), CommonStatusEnum.ENABLE.getStatus())) {
return wxMpServiceCache.getUnchecked(client.getClientId() + ":" + client.getClientSecret());
}
// 第二步,不存在 DB 配置项,则使用 application-*.yaml 对应的 WxMpService 对象
return wxMpService;
} | @Test
public void testGetWxMpService_clientDisable() {
// 准备参数
Integer userType = randomPojo(UserTypeEnum.class).getValue();
// mock 数据
SocialClientDO client = randomPojo(SocialClientDO.class, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())
.setUserType(userType).setSocialType(SocialTypeEnum.WECHAT_MP.getType()));
socialClientMapper.insert(client);
// 调用
WxMpService result = socialClientService.getWxMpService(userType);
// 断言
assertSame(wxMpService, result);
} |
public void setFilePath(String filePath) {
if (filePath == null) {
throw new IllegalArgumentException("File path cannot be null.");
}
// TODO The job-submission web interface passes empty args (and thus empty
// paths) to compute the preview graph. The following is a workaround for
// this situation and we should fix this.
// comment (Stephan Ewen) this should be no longer relevant with the current Java/Scala
// APIs.
if (filePath.isEmpty()) {
setFilePath(new Path());
return;
}
try {
this.setFilePath(new Path(filePath));
} catch (RuntimeException rex) {
throw new RuntimeException(
"Could not create a valid URI from the given file path name: "
+ rex.getMessage());
}
} | @Test
void testSetPathNullPath() {
assertThatThrownBy(() -> new DummyFileInputFormat().setFilePath((Path) null))
.isInstanceOf(IllegalArgumentException.class);
} |
@Override
@SneakyThrows
public <T> T parseObject(String text, Class<T> clazz) {
JavaType javaType = MAPPER.getTypeFactory().constructType(clazz);
return MAPPER.readValue(text, javaType);
} | @Test
public void testParseObject() {
// normal json to boolean
Assertions.assertEquals(true, JACKSON_HANDLER.parseObject("true", Boolean.class));
// normal json to double
Assertions.assertEquals(0.01, JACKSON_HANDLER.parseObject("0.01", Double.class));
// normal json to integer
Assertions.assertEquals(1, JACKSON_HANDLER.parseObject("1", Integer.class));
// normal json to string
Assertions.assertEquals("hello world",
JACKSON_HANDLER.parseObject("\"hello world\"", String.class));
// normal json to object
Assertions.assertEquals(EXPECTED_ENTITY, JACKSON_HANDLER.parseObject(EXPECTED_ENTITY_JSON, Entity.class));
Assertions.assertEquals(
EXPECTED_ENTITY,
JACKSON_HANDLER.parseObject(EXPECTED_ENTITY_JSON, new TypeReference<Entity>() {
}));
// illegal json
Assertions.assertThrows(MismatchedInputException.class,
() -> JACKSON_HANDLER.parseObject(" ", Entity.class));
// null json
Assertions.assertThrows(IllegalArgumentException.class,
() -> JACKSON_HANDLER.parseObject(null, Entity.class));
// illegal type
Assertions.assertThrows(MismatchedInputException.class,
() -> JACKSON_HANDLER.parseObject(EXPECTED_ENTITY_JSON, String.class));
} |
public VersionRange<T> intersectionWith(VersionRange<T> that) {
if (this.isAll())
return that;
if (that.isAll())
return this;
if (!isOverlappedBy(that))
return empty();
T newMinimum;
if (this.minimum == null)
newMinimum = that.minimum;
else if (that.minimum == null)
newMinimum = this.minimum;
else
newMinimum = this.minimum.compareTo(that.minimum) >= 0 ? this.minimum : that.minimum;
T newMaximum;
if (this.maximum == null)
newMaximum = that.maximum;
else if (that.maximum == null)
newMaximum = this.maximum;
else
newMaximum = this.maximum.compareTo(that.maximum) <= 0 ? this.maximum : that.maximum;
return new VersionRange<>(newMinimum, newMaximum);
} | @Test
public void testIntersectionWith() {
assertIntersectionWith(all(), all(), all());
assertIntersectionWith(all(), empty(), empty());
assertIntersectionWith(all(), VersionNumber.between("10", "20"), VersionNumber.between("10", "20"));
assertIntersectionWith(all(), VersionNumber.atLeast("10"), VersionNumber.atLeast("10"));
assertIntersectionWith(all(), VersionNumber.atMost("10"), VersionNumber.atMost("10"));
assertIntersectionWith(empty(), empty(), empty());
assertIntersectionWith(empty(), VersionNumber.between("10", "20"), empty());
assertIntersectionWith(empty(), VersionNumber.atLeast("10"), empty());
assertIntersectionWith(empty(), VersionNumber.atMost("10"), empty());
assertIntersectionWith(VersionNumber.between("10", "20"), VersionNumber.between("10", "20"), VersionNumber.between("10", "20"));
assertIntersectionWith(VersionNumber.between("10", "20"), VersionNumber.between("5", "20"), VersionNumber.between("10", "20"));
assertIntersectionWith(VersionNumber.between("10", "20"), VersionNumber.between("10", "25"), VersionNumber.between("10", "20"));
assertIntersectionWith(VersionNumber.between("10", "20"), VersionNumber.between("5", "25"), VersionNumber.between("10", "20"));
assertIntersectionWith(VersionNumber.between("10", "20"), VersionNumber.between("15", "20"), VersionNumber.between("15", "20"));
assertIntersectionWith(VersionNumber.between("10", "20"), VersionNumber.between("10", "15"), VersionNumber.between("10", "15"));
assertIntersectionWith(VersionNumber.between("10", "20"), VersionNumber.between("14", "16"), VersionNumber.between("14", "16"));
assertIntersectionWith(VersionNumber.between("10", "20"), VersionNumber.atLeast("5"), VersionNumber.between("10", "20"));
assertIntersectionWith(VersionNumber.between("10", "20"), VersionNumber.atLeast("10"), VersionNumber.between("10", "20"));
assertIntersectionWith(VersionNumber.between("10", "20"), VersionNumber.atLeast("15"), VersionNumber.between("15", "20"));
assertIntersectionWith(VersionNumber.between("10", "20"), VersionNumber.atLeast("20"), VersionNumber.between("20", "20"));
assertIntersectionWith(VersionNumber.between("10", "20"), VersionNumber.atLeast("25"), empty());
assertIntersectionWith(VersionNumber.between("10", "20"), VersionNumber.atMost("25"), VersionNumber.between("10", "20"));
assertIntersectionWith(VersionNumber.between("10", "20"), VersionNumber.atMost("20"), VersionNumber.between("10", "20"));
assertIntersectionWith(VersionNumber.between("10", "20"), VersionNumber.atMost("15"), VersionNumber.between("10", "15"));
assertIntersectionWith(VersionNumber.between("10", "20"), VersionNumber.atMost("10"), VersionNumber.between("10", "10"));
assertIntersectionWith(VersionNumber.between("10", "20"), VersionNumber.atMost("5"), empty());
assertIntersectionWith(VersionNumber.atLeast("10"), VersionNumber.atMost("10"), VersionNumber.between("10", "10"));
assertIntersectionWith(VersionNumber.atLeast("10"), VersionNumber.atMost("20"), VersionNumber.between("10", "20"));
assertIntersectionWith(VersionNumber.atLeast("10"), VersionNumber.atMost("5"), empty());
} |
public boolean test(final IndexRange indexRange,
final Set<Stream> validStreams) {
// If index range is incomplete, check the prefix against the valid index sets.
if (indexRange.streamIds() == null) {
return validStreams.stream()
.map(Stream::getIndexSet)
.anyMatch(indexSet -> indexSet.isManagedIndex(indexRange.indexName()));
}
final Set<String> validStreamIds = validStreams.stream()
.map(Stream::getId)
.collect(Collectors.toSet());
// Otherwise check if the index range contains any of the valid stream ids.
return !Collections.disjoint(indexRange.streamIds(), validStreamIds);
} | @Test
public void emptyStreamsShouldNotMatchAnything() {
final IndexRange indexRange = mock(IndexRange.class);
assertThat(toTest.test(indexRange, Collections.emptySet())).isFalse();
} |
public Optional<Set<String>> subscribedTopics() {
return subscribedTopics;
} | @Test
public void testSubscribedTopics() {
// not able to compute it for a newly created group
assertEquals(Optional.empty(), group.subscribedTopics());
JoinGroupRequestProtocolCollection protocols = new JoinGroupRequestProtocolCollection();
protocols.add(new JoinGroupRequestProtocol()
.setName("range")
.setMetadata(ConsumerProtocol.serializeSubscription(
new ConsumerPartitionAssignor.Subscription(
Collections.singletonList("foo")
)).array()));
ClassicGroupMember member = new ClassicGroupMember(
memberId,
Optional.empty(),
clientId,
clientHost,
rebalanceTimeoutMs,
sessionTimeoutMs,
protocolType,
protocols
);
group.transitionTo(PREPARING_REBALANCE);
group.add(member);
group.initNextGeneration();
Set<String> expectedTopics = new HashSet<>(Collections.singleton("foo"));
assertEquals(expectedTopics, group.subscribedTopics().get());
group.transitionTo(PREPARING_REBALANCE);
group.remove(memberId);
group.initNextGeneration();
assertEquals(Optional.of(Collections.emptySet()), group.subscribedTopics());
protocols = new JoinGroupRequestProtocolCollection();
protocols.add(new JoinGroupRequestProtocol()
.setName("range")
.setMetadata(new byte[0]));
ClassicGroupMember memberWithFaultyProtocol = new ClassicGroupMember(
memberId,
Optional.empty(),
clientId,
clientHost,
rebalanceTimeoutMs,
sessionTimeoutMs,
protocolType,
protocols
);
group.transitionTo(PREPARING_REBALANCE);
group.add(memberWithFaultyProtocol);
group.initNextGeneration();
assertEquals(Optional.empty(), group.subscribedTopics());
} |
public Result runExtractor(String value) {
final Matcher matcher = pattern.matcher(value);
final boolean found = matcher.find();
if (!found) {
return null;
}
final int start = matcher.groupCount() > 0 ? matcher.start(1) : -1;
final int end = matcher.groupCount() > 0 ? matcher.end(1) : -1;
final String s;
try {
s = replaceAll ? matcher.replaceAll(replacement) : matcher.replaceFirst(replacement);
} catch (Exception e) {
throw new RuntimeException("Error while trying to replace string", e);
}
return new Result(s, start, end);
} | @Test
public void testReplacementWithNoMatchAndDefaultReplacement() throws Exception {
final Message message = messageFactory.createMessage("Test", "source", Tools.nowUTC());
final RegexReplaceExtractor extractor = new RegexReplaceExtractor(
metricRegistry,
"id",
"title",
0L,
Extractor.CursorStrategy.COPY,
"message",
"message",
ImmutableMap.<String, Object>of("regex", "NO-MATCH"),
"user",
Collections.<Converter>emptyList(),
Extractor.ConditionType.NONE,
null);
extractor.runExtractor(message);
assertThat(message.getMessage()).isEqualTo("Test");
} |
@SuppressWarnings("unchecked")
@Override
public <S extends StateStore> S getStateStore(final String name) {
final StateStore store = stateManager.getGlobalStore(name);
return (S) getReadWriteStore(store);
} | @Test
public void shouldNotAllowCloseForKeyValueStore() {
when(stateManager.getGlobalStore(GLOBAL_KEY_VALUE_STORE_NAME)).thenReturn(mock(KeyValueStore.class));
final StateStore store = globalContext.getStateStore(GLOBAL_KEY_VALUE_STORE_NAME);
try {
store.close();
fail("Should have thrown UnsupportedOperationException.");
} catch (final UnsupportedOperationException expected) { }
} |
@Override
public NamespacedMetric namespace(final String... key) {
final IRubyObject[] rubyfiedKeys = Stream.of(key)
.map(this::getSymbol)
.toArray(IRubyObject[]::new);
return new NamespacedMetricImpl(
this.threadContext,
this.metrics.namespace(this.threadContext, RubyArray.newArray(this.threadContext.getRuntime(), rubyfiedKeys))
);
} | @Test
public void testNamespace() {
final NamespacedMetric metrics = this.getInstance().namespace("test");
final NamespacedMetric namespaced = metrics.namespace("abcdef");
assertThat(namespaced.namespaceName()).containsExactly("test", "abcdef");
final NamespacedMetric namespaced2 = namespaced.namespace("12345", "qwerty");
assertThat(namespaced2.namespaceName()).containsExactly("test", "abcdef", "12345", "qwerty");
} |
@Override
public void write(T record) {
recordConsumer.startMessage();
try {
messageWriter.writeTopLevelMessage(record);
} catch (RuntimeException e) {
Message m = (record instanceof Message.Builder) ? ((Message.Builder) record).build() : (Message) record;
LOG.error("Cannot write message {}: {}", e.getMessage(), m);
throw e;
}
recordConsumer.endMessage();
} | @Test
public void testSimplestMessage() throws Exception {
RecordConsumer readConsumerMock = Mockito.mock(RecordConsumer.class);
ProtoWriteSupport<TestProtobuf.InnerMessage> instance =
createReadConsumerInstance(TestProtobuf.InnerMessage.class, readConsumerMock);
TestProtobuf.InnerMessage.Builder msg = TestProtobuf.InnerMessage.newBuilder();
msg.setOne("oneValue");
instance.write(msg.build());
InOrder inOrder = Mockito.inOrder(readConsumerMock);
inOrder.verify(readConsumerMock).startMessage();
inOrder.verify(readConsumerMock).startField("one", 0);
inOrder.verify(readConsumerMock).addBinary(Binary.fromString("oneValue"));
inOrder.verify(readConsumerMock).endField("one", 0);
inOrder.verify(readConsumerMock).endMessage();
Mockito.verifyNoMoreInteractions(readConsumerMock);
} |
public static boolean loadRules(List<DegradeRule> rules) {
try {
return currentProperty.updateValue(rules);
} catch (Throwable e) {
RecordLog.error("[DefaultCircuitBreakerRuleManager] Unexpected error when loading default rules", e);
return false;
}
} | @Test
public void testLoadRules() {
DegradeRule rule = mock(DegradeRule.class);
List<DegradeRule> ruleList = new ArrayList<DegradeRule>();
ruleList.add(rule);
assertTrue(DefaultCircuitBreakerRuleManager.loadRules(ruleList));
assertFalse(DefaultCircuitBreakerRuleManager.loadRules(ruleList));
} |
@Override
public void onChange(Job job) {
sendObject(job);
if (job.hasState(SUCCEEDED) || job.hasState(FAILED) || job.hasState(DELETED)) {
close();
}
} | @Test
void sseConnectionIsClosedIfJobStateIsDeleted() throws IOException {
JobSseExchange jobSseExchange = new JobSseExchange(httpExchange, storageProvider, new JacksonJsonMapper());
jobSseExchange.onChange(aDeletedJob().build());
verify(storageProvider).removeJobStorageOnChangeListener(jobSseExchange);
} |
public Searcher searcher() {
return new Searcher();
} | @Test
void require_that_multi_term_queries_are_supported() {
ConjunctionIndexBuilder builder = new ConjunctionIndexBuilder();
IndexableFeatureConjunction c1 = indexableConj(
conj(
feature("a").inSet("1"),
feature("b").inSet("3")));
builder.indexConjunction(c1);
ConjunctionIndex index = builder.build();
ConjunctionIndex.Searcher searcher = index.searcher();
PredicateQuery query = new PredicateQuery();
query.addFeature("a", "1");
query.addFeature("a", "2");
assertTrue(searcher.search(query).isEmpty());
query.addFeature("b", "3");
assertHitsEquals(searcher.search(query), c1);
} |
@Override
public <OUT> ProcessConfigurableAndNonKeyedPartitionStream<OUT> process(
OneInputStreamProcessFunction<T, OUT> processFunction) {
validateStates(
processFunction.usesStates(),
new HashSet<>(
Arrays.asList(
StateDeclaration.RedistributionMode.NONE,
StateDeclaration.RedistributionMode.IDENTICAL)));
TypeInformation<OUT> outType =
StreamUtils.getOutputTypeForOneInputProcessFunction(processFunction, getType());
ProcessOperator<T, OUT> operator = new ProcessOperator<>(processFunction);
OneInputTransformation<T, OUT> outputTransform =
StreamUtils.getOneInputTransformation("Process", this, outType, operator);
environment.addOperator(outputTransform);
return StreamUtils.wrapWithConfigureHandle(
new NonKeyedPartitionStreamImpl<>(environment, outputTransform));
} | @Test
void testProcess() throws Exception {
ExecutionEnvironmentImpl env = StreamTestUtils.getEnv();
NonKeyedPartitionStreamImpl<Integer> stream =
new NonKeyedPartitionStreamImpl<>(
env, new TestingTransformation<>("t1", Types.INT, 1));
stream.process(new StreamTestUtils.NoOpOneInputStreamProcessFunction());
List<Transformation<?>> transformations = env.getTransformations();
assertThat(transformations).hasSize(1);
assertProcessType(transformations.get(0), OneInputTransformation.class, Types.LONG);
} |
@Override
public int getOrder() {
return PluginEnum.WEB_SOCKET.getCode();
} | @Test
public void getOrderTest() {
assertEquals(PluginEnum.WEB_SOCKET.getCode(), webSocketPlugin.getOrder());
} |
public static RDA fit(double[][] x, int[] y, Properties params) {
double alpha = Double.parseDouble(params.getProperty("smile.rda.alpha", "0.9"));
double[] priori = Strings.parseDoubleArray(params.getProperty("smile.rda.priori"));
double tol = Double.parseDouble(params.getProperty("smile.rda.tolerance", "1E-4"));
return fit(x, y, alpha, priori, tol);
} | @Test
public void testPenDigits() {
System.out.println("Pen Digits");
MathEx.setSeed(19650218); // to get repeatable results.
ClassificationValidations<RDA> result = CrossValidation.classification(10, PenDigits.x, PenDigits.y,
(x, y) -> RDA.fit(x, y, 0.9));
System.out.println(result);
assertEquals(0.9863, result.avg.accuracy, 1E-4);
} |
public static BlockLease newLease(Block block, Closer... closers)
{
return new ClosingBlockLease(block, closers);
} | @Test
public void testArrayAllocations()
{
CountingArrayAllocator allocator = new CountingArrayAllocator();
assertEquals(allocator.getBorrowedArrayCount(), 0);
int[] array = allocator.borrowIntArray(1);
try (BlockLease ignored = newLease(dummyBlock, () -> allocator.returnArray(array))) {
assertEquals(allocator.getBorrowedArrayCount(), 1);
}
assertEquals(allocator.getBorrowedArrayCount(), 0);
} |
@Override
public Iterable<TimerData> timersIterable() {
FluentIterable<Timer> allTimers = FluentIterable.from(workItem.getTimers().getTimersList());
FluentIterable<Timer> eventTimers = allTimers.filter(IS_WATERMARK);
FluentIterable<Timer> nonEventTimers = allTimers.filter(Predicates.not(IS_WATERMARK));
return eventTimers
.append(nonEventTimers)
.transform(
timer ->
WindmillTimerInternals.windmillTimerToTimerData(
WindmillNamespacePrefix.SYSTEM_NAMESPACE_PREFIX, timer, windowCoder));
} | @Test
public void testTimerOrdering() throws Exception {
Windmill.WorkItem workItem =
Windmill.WorkItem.newBuilder()
.setKey(SERIALIZED_KEY)
.setWorkToken(17)
.setTimers(
Windmill.TimerBundle.newBuilder()
.addTimers(
makeSerializedTimer(STATE_NAMESPACE_1, 0, Windmill.Timer.Type.REALTIME))
.addTimers(
makeSerializedTimer(STATE_NAMESPACE_1, 1, Windmill.Timer.Type.WATERMARK))
.addTimers(
makeSerializedTimer(STATE_NAMESPACE_1, 2, Windmill.Timer.Type.REALTIME))
.addTimers(
makeSerializedTimer(STATE_NAMESPACE_2, 3, Windmill.Timer.Type.WATERMARK))
.build())
.build();
KeyedWorkItem<String, String> keyedWorkItem =
new WindmillKeyedWorkItem<>(KEY, workItem, WINDOW_CODER, WINDOWS_CODER, VALUE_CODER);
assertThat(
keyedWorkItem.timersIterable(),
Matchers.contains(
makeTimer(STATE_NAMESPACE_1, 1, TimeDomain.EVENT_TIME),
makeTimer(STATE_NAMESPACE_2, 3, TimeDomain.EVENT_TIME),
makeTimer(STATE_NAMESPACE_1, 0, TimeDomain.PROCESSING_TIME),
makeTimer(STATE_NAMESPACE_1, 2, TimeDomain.PROCESSING_TIME)));
} |
public static String getSchemasPath(final String databaseName) {
return String.join("/", getDatabaseNamePath(databaseName), SCHEMAS_NODE);
} | @Test
void assertGetSchemasPath() {
assertThat(ShardingSphereDataNode.getSchemasPath("db_name"), is("/statistics/databases/db_name/schemas"));
} |
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
} | @Test
void forExpression() {
String inputExpression = "for item in order.items return item.price * item.quantity";
BaseNode forbase = parse( inputExpression );
assertThat( forbase).isInstanceOf(ForExpressionNode.class);
assertThat( forbase.getText()).isEqualTo(inputExpression);
assertThat( forbase.getResultType()).isEqualTo(BuiltInType.LIST);
ForExpressionNode forExpr = (ForExpressionNode) forbase;
assertThat( forExpr.getIterationContexts()).hasSize(1);
assertThat( forExpr.getExpression()).isInstanceOf(InfixOpNode.class);
assertThat( forExpr.getExpression().getText()).isEqualTo( "item.price * item.quantity");
IterationContextNode ic = forExpr.getIterationContexts().get( 0 );
assertThat( ic.getName().getText()).isEqualTo("item");
assertThat( ic.getExpression()).isInstanceOf(QualifiedNameNode.class);
assertThat( ic.getExpression().getText()).isEqualTo("order.items");
} |
public static void rewind(Buffer buffer) {
buffer.rewind();
} | @Test
public void testRewind() {
ByteBuffer byteBuffer = ByteBuffer.allocate(4);
byteBuffer.putInt(1);
Assertions.assertDoesNotThrow(() -> BufferUtils.rewind(byteBuffer));
} |
public synchronized V get(final K key, final Supplier<V> valueSupplier, final Consumer<V> expireCallback) {
final var value = cache.get(key);
if (value != null) {
value.updateDeadline();
return value.value;
}
final var newValue = new ExpirableValue<>(valueSupplier.get(), expireCallback);
newValue.updateDeadline();
cache.put(key, newValue);
return newValue.value;
} | @Test
public void testExpire() throws InterruptedException {
final var cache = new SimpleCache<Integer, Integer>(executor, 500L, 5);
final var expiredValues = Collections.synchronizedSet(new HashSet<Integer>());
final var allKeys = IntStream.range(0, 5).boxed().collect(Collectors.toSet());
allKeys.forEach(key -> cache.get(key, () -> key + 100, expiredValues::add));
Thread.sleep(400L);
final var recentAccessedKey = Set.of(1, 2);
recentAccessedKey.forEach(key -> cache.get(key, () -> -1, expiredValues::add)); // access these keys
Thread.sleep(300L);
recentAccessedKey.forEach(key -> Assert.assertEquals(key + 100, cache.get(key, () -> -1, __ -> {})));
allKeys.stream().filter(key -> !recentAccessedKey.contains(key))
.forEach(key -> Assert.assertEquals(-1, cache.get(key, () -> -1, __ -> {})));
} |
public List<MavenArtifact> searchSha1(String sha1) throws IOException, TooManyRequestsException {
if (null == sha1 || !sha1.matches("^[0-9A-Fa-f]{40}$")) {
throw new IllegalArgumentException("Invalid SHA1 format");
}
if (cache != null) {
final List<MavenArtifact> cached = cache.get(sha1);
if (cached != null) {
LOGGER.debug("cache hit for Central: " + sha1);
if (cached.isEmpty()) {
throw new FileNotFoundException("Artifact not found in Central");
}
return cached;
}
}
final List<MavenArtifact> result = new ArrayList<>();
final URL url = new URL(String.format(query, rootURL, sha1));
LOGGER.trace("Searching Central url {}", url);
// Determine if we need to use a proxy. The rules:
// 1) If the proxy is set, AND the setting is set to true, use the proxy
// 2) Otherwise, don't use the proxy (either the proxy isn't configured,
// or proxy is specifically set to false)
final URLConnectionFactory factory = new URLConnectionFactory(settings);
final HttpURLConnection conn = factory.createHttpURLConnection(url, useProxy);
conn.setDoOutput(true);
// JSON would be more elegant, but there's not currently a dependency
// on JSON, so don't want to add one just for this
conn.addRequestProperty("Accept", "application/xml");
conn.connect();
if (conn.getResponseCode() == 200) {
boolean missing = false;
try {
final DocumentBuilder builder = XmlUtils.buildSecureDocumentBuilder();
final Document doc = builder.parse(conn.getInputStream());
final XPath xpath = XPathFactory.newInstance().newXPath();
final String numFound = xpath.evaluate("/response/result/@numFound", doc);
if ("0".equals(numFound)) {
missing = true;
} else {
final NodeList docs = (NodeList) xpath.evaluate("/response/result/doc", doc, XPathConstants.NODESET);
for (int i = 0; i < docs.getLength(); i++) {
final String g = xpath.evaluate("./str[@name='g']", docs.item(i));
LOGGER.trace("GroupId: {}", g);
final String a = xpath.evaluate("./str[@name='a']", docs.item(i));
LOGGER.trace("ArtifactId: {}", a);
final String v = xpath.evaluate("./str[@name='v']", docs.item(i));
final NodeList attributes = (NodeList) xpath.evaluate("./arr[@name='ec']/str", docs.item(i), XPathConstants.NODESET);
boolean pomAvailable = false;
boolean jarAvailable = false;
for (int x = 0; x < attributes.getLength(); x++) {
final String tmp = xpath.evaluate(".", attributes.item(x));
if (".pom".equals(tmp)) {
pomAvailable = true;
} else if (".jar".equals(tmp)) {
jarAvailable = true;
}
}
final String centralContentUrl = settings.getString(Settings.KEYS.CENTRAL_CONTENT_URL);
String artifactUrl = null;
String pomUrl = null;
if (jarAvailable) {
//org/springframework/spring-core/3.2.0.RELEASE/spring-core-3.2.0.RELEASE.pom
artifactUrl = centralContentUrl + g.replace('.', '/') + '/' + a + '/'
+ v + '/' + a + '-' + v + ".jar";
}
if (pomAvailable) {
//org/springframework/spring-core/3.2.0.RELEASE/spring-core-3.2.0.RELEASE.pom
pomUrl = centralContentUrl + g.replace('.', '/') + '/' + a + '/'
+ v + '/' + a + '-' + v + ".pom";
}
result.add(new MavenArtifact(g, a, v, artifactUrl, pomUrl));
}
}
} catch (ParserConfigurationException | IOException | SAXException | XPathExpressionException e) {
// Anything else is jacked up XML stuff that we really can't recover from well
final String errorMessage = "Failed to parse MavenCentral XML Response: " + e.getMessage();
throw new IOException(errorMessage, e);
}
if (missing) {
if (cache != null) {
cache.put(sha1, result);
}
throw new FileNotFoundException("Artifact not found in Central");
}
} else if (conn.getResponseCode() == 429) {
final String errorMessage = "Too many requests sent to MavenCentral; additional requests are being rejected.";
throw new TooManyRequestsException(errorMessage);
} else {
final String errorMessage = "Could not connect to MavenCentral (" + conn.getResponseCode() + "): " + conn.getResponseMessage();
throw new IOException(errorMessage);
}
if (cache != null) {
cache.put(sha1, result);
}
return result;
} | @Test(expected = IOException.class)
public void testMissingSha1() throws Exception {
try {
searcher.searchSha1("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA");
} catch (IOException ex) {
//we hit a failure state on the CI
Assume.assumeFalse(StringUtils.contains(ex.getMessage(), "Could not connect to MavenCentral"));
throw ex;
}
} |
@Nullable
DockerCredentialHelper getCredentialHelperFor(String registry) {
List<Predicate<String>> registryMatchers = getRegistryMatchersFor(registry);
Map.Entry<String, String> firstCredHelperMatch =
findFirstInMapByKey(dockerConfigTemplate.getCredHelpers(), registryMatchers);
if (firstCredHelperMatch != null) {
return new DockerCredentialHelper(
firstCredHelperMatch.getKey(),
Paths.get("docker-credential-" + firstCredHelperMatch.getValue()));
}
if (dockerConfigTemplate.getCredsStore() != null) {
return new DockerCredentialHelper(
registry, Paths.get("docker-credential-" + dockerConfigTemplate.getCredsStore()));
}
return null;
} | @Test
public void testGetCredentialHelperFor_withSuffix() throws URISyntaxException, IOException {
Path json = Paths.get(Resources.getResource("core/json/dockerconfig.json").toURI());
DockerConfig dockerConfig =
new DockerConfig(JsonTemplateMapper.readJsonFromFile(json, DockerConfigTemplate.class));
Assert.assertEquals(
Paths.get("docker-credential-credHelper for with.suffix.in.helpers/v2/"),
dockerConfig.getCredentialHelperFor("with.suffix.in.helpers").getCredentialHelper());
} |
@Override
public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay way, IntsRef relationFlags) {
String value = way.getTag("hgv:conditional", "");
int index = value.indexOf("@");
Hgv hgvValue = index > 0 && conditionalWeightToTons(value) == 3.5 ? Hgv.find(value.substring(0, index).trim()) : Hgv.find(way.getTag("hgv"));
hgvEnc.setEnum(false, edgeId, edgeIntAccess, hgvValue);
} | @Test
public void testConditionalTags() {
EdgeIntAccess edgeIntAccess = new ArrayEdgeIntAccess(1);
ReaderWay readerWay = new ReaderWay(1);
readerWay.setTag("highway", "primary");
readerWay.setTag("hgv:conditional", "no @ (weight > 3.5)");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(Hgv.NO, hgvEnc.getEnum(false, edgeId, edgeIntAccess));
edgeIntAccess = new ArrayEdgeIntAccess(1);
// for now we assume "hgv" to be only above 3.5
readerWay.setTag("hgv:conditional", "delivery @ (weight > 7.5)");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(Hgv.MISSING, hgvEnc.getEnum(false, edgeId, edgeIntAccess));
} |
public void removeSelectData(final SelectorData selectorData) {
Optional.ofNullable(selectorData).ifPresent(data -> {
final List<SelectorData> selectorDataList = SELECTOR_MAP.get(data.getPluginName());
synchronized (SELECTOR_MAP) {
Optional.ofNullable(selectorDataList).ifPresent(list -> list.removeIf(e -> e.getId().equals(data.getId())));
}
});
} | @Test
public void testRemoveSelectData() throws NoSuchFieldException, IllegalAccessException {
SelectorData selectorData = SelectorData.builder().id("1").pluginName(mockPluginName1).build();
ConcurrentHashMap<String, List<SelectorData>> selectorMap = getFieldByName(selectorMapStr);
selectorMap.put(mockPluginName1, Lists.newArrayList(selectorData));
BaseDataCache.getInstance().removeSelectData(selectorData);
assertEquals(Lists.newArrayList(), selectorMap.get(mockPluginName1));
} |
public Optional<Long> validateAndGetTimestamp(final ExternalServiceCredentials credentials) {
final String[] parts = requireNonNull(credentials).password().split(DELIMITER);
final String timestampSeconds;
final String actualSignature;
// making sure password format matches our expectations based on the generator configuration
if (parts.length == 3 && prependUsername) {
final String username = usernameIsTimestamp() ? parts[0] + DELIMITER + parts[1] : parts[0];
// username has to match the one from `credentials`
if (!credentials.username().equals(username)) {
return Optional.empty();
}
timestampSeconds = parts[1];
actualSignature = parts[2];
} else if (parts.length == 2 && !prependUsername) {
timestampSeconds = parts[0];
actualSignature = parts[1];
} else {
// unexpected password format
return Optional.empty();
}
final String signedData = usernameIsTimestamp() ? credentials.username() : credentials.username() + DELIMITER + timestampSeconds;
final String expectedSignature = truncateSignature
? hmac256TruncatedToHexString(key, signedData, TRUNCATED_SIGNATURE_LENGTH)
: hmac256ToHexString(key, signedData);
// if the signature is valid it's safe to parse the `timestampSeconds` string into Long
return hmacHexStringsEqual(expectedSignature, actualSignature)
? Optional.of(Long.valueOf(timestampSeconds))
: Optional.empty();
} | @Test
public void testValidateWithExpiration() throws Exception {
final long elapsedSeconds = 10000;
clock.incrementSeconds(elapsedSeconds);
assertEquals(standardGenerator.validateAndGetTimestamp(standardCredentials, elapsedSeconds + 1).orElseThrow(), TIME_SECONDS);
assertTrue(standardGenerator.validateAndGetTimestamp(standardCredentials, elapsedSeconds - 1).isEmpty());
} |
void downloadInterpreter(
InterpreterInstallationRequest request,
DependencyResolver dependencyResolver,
Path interpreterDir,
ServiceCallback<String> serviceCallback) {
try {
LOGGER.info("Start to download a dependency: {}", request.getName());
if (null != serviceCallback) {
serviceCallback.onStart("Starting to download " + request.getName() + " interpreter", null);
}
dependencyResolver.load(request.getArtifact(), interpreterDir.toFile());
interpreterSettingManager.refreshInterpreterTemplates();
LOGGER.info(
"Finish downloading a dependency {} into {}",
request.getName(),
interpreterDir);
if (null != serviceCallback) {
serviceCallback.onSuccess(request.getName() + " downloaded", null);
}
} catch (RepositoryException | IOException e) {
LOGGER.error("Error while downloading dependencies", e);
try {
FileUtils.deleteDirectory(interpreterDir.toFile());
} catch (IOException e1) {
LOGGER.error(
"Error while removing directory. You should handle it manually: {}",
interpreterDir,
e1);
}
if (null != serviceCallback) {
try {
serviceCallback.onFailure(
new Exception("Error while downloading " + request.getName() + " as " +
e.getMessage()), null);
} catch (IOException e1) {
LOGGER.error("ServiceCallback failure", e1);
}
}
}
} | @Test
void downloadInterpreter() throws IOException {
final String interpreterName = "test-interpreter";
String artifactName = "junit:junit:4.11";
Path specificInterpreterPath =
Files.createDirectory(Paths.get(interpreterDir.toString(), interpreterName));
DependencyResolver dependencyResolver =
new DependencyResolver(localRepoDir.toString(), ZeppelinConfiguration.load());
doNothing().when(mockInterpreterSettingManager).refreshInterpreterTemplates();
interpreterService.downloadInterpreter(
new InterpreterInstallationRequest(interpreterName, artifactName),
dependencyResolver,
specificInterpreterPath,
new SimpleServiceCallback<String>() {
@Override
public void onStart(String message, ServiceContext context) {
assertEquals("Starting to download " + interpreterName + " interpreter", message);
}
@Override
public void onSuccess(String message, ServiceContext context) {
assertEquals(interpreterName + " downloaded", message);
}
@Override
public void onFailure(Exception ex, ServiceContext context) {
fail(ex);
}
});
verify(mockInterpreterSettingManager, times(1)).refreshInterpreterTemplates();
} |
public <T> void resolve(T resolvable) {
ParamResolver resolver = this;
if (ParamScope.class.isAssignableFrom(resolvable.getClass())) {
ParamScope newScope = (ParamScope) resolvable;
resolver = newScope.applyOver(resolver);
}
resolveStringLeaves(resolvable, resolver);
resolveNonStringLeaves(resolvable, resolver);
resolveNodes(resolvable, resolver);
} | @Test
public void shouldResolveCollections() {
PipelineConfig pipelineConfig = PipelineConfigMother.createPipelineConfig("cruise", "dev", "ant");
pipelineConfig.setLabelTemplate("2.1-${COUNT}-#{foo}-bar-#{bar}");
HgMaterialConfig materialConfig = MaterialConfigsMother.hgMaterialConfig("http://#{foo}.com/#{bar}");
pipelineConfig.addMaterialConfig(materialConfig);
new ParamResolver(new ParamSubstitutionHandlerFactory(params(param("foo", "pavan"), param("bar", "jj"))), fieldCache).resolve(pipelineConfig);
assertThat(pipelineConfig.getLabelTemplate(), is("2.1-${COUNT}-pavan-bar-jj"));
assertThat(pipelineConfig.materialConfigs().get(1).getUriForDisplay(), is("http://pavan.com/jj"));
} |
@Override
public void process() {
if (realVideoObject == null) {
realVideoObject = new RealVideoObject();
}
realVideoObject.process();
} | @Test
void processDoesNotThrowException() {
assertDoesNotThrow(() -> new VideoObjectProxy().process(), "Process method should not throw any exception");
} |
@Override
public void createTable(ObjectPath tablePath, CatalogBaseTable table, boolean ignoreIfExists)
throws TableAlreadyExistException, DatabaseNotExistException, CatalogException {
checkNotNull(tablePath, "Table path cannot be null");
checkNotNull(table, "Table cannot be null");
if (!databaseExists(tablePath.getDatabaseName())) {
throw new DatabaseNotExistException(getName(), tablePath.getDatabaseName());
}
if (!table.getOptions().getOrDefault(CONNECTOR.key(), "").equalsIgnoreCase(HoodieTableFactory.FACTORY_ID)) {
throw new HoodieCatalogException(String.format("Unsupported connector identity %s, supported identity is %s",
table.getOptions().getOrDefault(CONNECTOR.key(), ""), HoodieTableFactory.FACTORY_ID));
}
if (table instanceof CatalogView) {
throw new HoodieCatalogException("CREATE VIEW is not supported.");
}
try {
boolean isMorTable = OptionsResolver.isMorTable(table.getOptions());
Table hiveTable = instantiateHiveTable(tablePath, table, inferTablePath(tablePath, table), isMorTable);
//create hive table
client.createTable(hiveTable);
//init hoodie metaClient
initTableIfNotExists(tablePath, (CatalogTable) table);
} catch (AlreadyExistsException e) {
if (!ignoreIfExists) {
throw new TableAlreadyExistException(getName(), tablePath, e);
}
} catch (Exception e) {
throw new HoodieCatalogException(
String.format("Failed to create table %s", tablePath.getFullName()), e);
}
} | @Test
public void testCreateHoodieTableWithWrongTableType() {
HashMap<String,String> properties = new HashMap<>();
properties.put(FactoryUtil.CONNECTOR.key(), "hudi");
properties.put("table.type","wrong type");
CatalogTable table =
new CatalogTableImpl(schema, properties, "hudi table");
assertThrows(HoodieCatalogException.class, () -> hoodieCatalog.createTable(tablePath, table, false));
} |
@Udf
public <T> List<T> remove(
@UdfParameter(description = "Array of values") final List<T> array,
@UdfParameter(description = "Value to remove") final T victim) {
if (array == null) {
return null;
}
return array.stream()
.filter(el -> !Objects.equals(el, victim))
.collect(Collectors.toList());
} | @Test
public void shouldReturnNullForNullInputs() {
final List<Long> input1 = null;
final Long input2 = null;
final List<Long> result = udf.remove(input1, input2);
assertThat(result, is(nullValue()));
} |
public FEELFnResult<Boolean> invoke(@ParameterName( "point1" ) Comparable point1, @ParameterName( "point2" ) Comparable point2) {
if ( point1 == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point1", "cannot be null"));
}
if ( point2 == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point2", "cannot be null"));
}
try {
boolean result = point1.compareTo( point2 ) == 0;
return FEELFnResult.ofResult( result );
} catch( Exception e ) {
// points are not comparable
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point1", "cannot be compared to point2"));
}
} | @Test
void invokeParamsCantBeCompared() {
FunctionTestUtil.assertResultError( coincidesFunction.invoke("a", BigDecimal.valueOf(2) ), InvalidParametersEvent.class );
} |
@Override
public Thread newThread(Runnable runnable) {
String name = mPrefix + mThreadNum.getAndIncrement();
Thread ret = new Thread(mGroup, runnable, name, 0);
ret.setDaemon(mDaemon);
return ret;
} | @Test
void testPrefixAndDaemon() {
NamedThreadFactory factory = new NamedThreadFactory("prefix", true);
Thread t = factory.newThread(Mockito.mock(Runnable.class));
assertThat(t.getName(), allOf(containsString("prefix-"), containsString("-thread-")));
assertTrue(t.isDaemon());
} |
@VisibleForTesting
static PhotoModel toCommonPhoto(Photo p, String albumId) {
Preconditions.checkArgument(
!Strings.isNullOrEmpty(p.getOriginalSize().getSource()),
"Photo [" + p.getId() + "] has a null authUrl");
return new PhotoModel(
p.getTitle(),
p.getOriginalSize().getSource(),
p.getDescription(),
toMimeType(p.getOriginalFormat()),
p.getId(),
albumId,
false);
} | @Test
public void toCommonPhoto() {
Photo photo =
FlickrTestUtils.initializePhoto(PHOTO_TITLE, FETCHABLE_URL, PHOTO_DESCRIPTION, MEDIA_TYPE);
PhotoModel photoModel = FlickrPhotosExporter.toCommonPhoto(photo, ALBUM_ID);
assertThat(photoModel.getAlbumId()).isEqualTo(ALBUM_ID);
assertThat(photoModel.getFetchableUrl()).isEqualTo(FETCHABLE_URL);
assertThat(photoModel.getTitle()).isEqualTo(PHOTO_TITLE);
assertThat(photoModel.getDescription()).isEqualTo(PHOTO_DESCRIPTION);
assertThat(photoModel.getMediaType()).isEqualTo("image/jpeg");
} |
@Override
public void symlink(final Path file, final String target) throws BackgroundException {
try {
Files.createSymbolicLink(session.toPath(file), session.toPath(target));
}
catch(IOException e) {
throw new LocalExceptionMappingService().map("Cannot create {0}", e, file);
}
} | @Test
public void testSymlink() throws Exception {
final LocalSession session = new LocalSession(new Host(new LocalProtocol(), new LocalProtocol().getDefaultHostname()));
assumeTrue(session.isPosixFilesystem());
session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback());
session.login(new DisabledLoginCallback(), new DisabledCancelCallback());
final Path workdir = new LocalHomeFinderFeature().find();
final Path target = new Path(workdir, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
new LocalTouchFeature(session).touch(target, new TransferStatus());
final Path link = new Path(workdir, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file, AbstractPath.Type.symboliclink));
new LocalSymlinkFeature(session).symlink(link, target.getName());
assertTrue(new LocalFindFeature(session).find(link));
assertEquals(EnumSet.of(Path.Type.file, AbstractPath.Type.symboliclink),
new LocalListService(session).list(workdir, new DisabledListProgressListener()).get(link).getType());
new LocalDeleteFeature(session).delete(Collections.singletonList(link), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(new LocalFindFeature(session).find(link));
assertTrue(new LocalFindFeature(session).find(target));
new LocalDeleteFeature(session).delete(Collections.singletonList(target), new DisabledLoginCallback(), new Delete.DisabledCallback());
session.close();
} |
public static synchronized void registerProvider(ZuulBlockFallbackProvider provider) {
AssertUtil.notNull(provider, "fallback provider cannot be null");
String route = provider.getRoute();
if ("*".equals(route) || route == null) {
defaultFallbackProvider = provider;
} else {
fallbackProviderCache.put(route, provider);
}
} | @Test
public void testRegisterProvider() throws Exception {
MyNullResponseFallBackProvider myNullResponseFallBackProvider = new MyNullResponseFallBackProvider();
ZuulBlockFallbackManager.registerProvider(myNullResponseFallBackProvider);
Assert.assertEquals(myNullResponseFallBackProvider.getRoute(), ROUTE);
Assert.assertNull(myNullResponseFallBackProvider.fallbackResponse(ROUTE, new FlowException("flow ex")));
} |
static File resolveTempDir(Properties p) {
return new File(Optional.ofNullable(p.getProperty("sonar.path.temp")).orElse("temp"));
} | @Test
public void resolveTempDir_reads_relative_temp_dir_location_from_sonar_path_temp() {
String tempDirPath = "blablabl";
Properties properties = new Properties();
properties.put("sonar.path.temp", tempDirPath);
File file = Shutdowner.resolveTempDir(properties);
assertThat(file).isEqualTo(new File(tempDirPath));
} |
public void start() {
this.lock.lock();
try {
if (this.destroyed) {
return;
}
if (!this.stopped) {
return;
}
this.stopped = false;
if (this.running) {
return;
}
this.running = true;
schedule();
} finally {
this.lock.unlock();
}
} | @Test
public void testStartTrigger() throws Exception {
assertEquals(0, this.timer.counter.get());
this.timer.start();
Thread.sleep(1000);
assertEquals(20, this.timer.counter.get(), 3);
} |
@SuppressWarnings("unused") // Required for automatic type inference
public static <K> Builder0<K> forClass(final Class<K> type) {
return new Builder0<>();
} | @Test(expected = IllegalArgumentException.class)
public void shouldThrowIfHandlerSupplierThrows2() {
HandlerMaps.forClass(BaseType.class).withArgTypes(String.class, Integer.class)
.put(LeafTypeA.class, () -> {
throw new RuntimeException("Boom");
})
.build();
} |
public static <T> T waitWithLogging(
Logger log,
String prefix,
String action,
CompletableFuture<T> future,
Deadline deadline,
Time time
) throws Throwable {
log.info("{}Waiting for {}", prefix, action);
try {
T result = time.waitForFuture(future, deadline.nanoseconds());
log.info("{}Finished waiting for {}", prefix, action);
return result;
} catch (TimeoutException t) {
log.error("{}Timed out while waiting for {}", prefix, action, t);
TimeoutException timeout = new TimeoutException("Timed out while waiting for " + action);
timeout.setStackTrace(t.getStackTrace());
throw timeout;
} catch (Throwable t) {
if (t instanceof ExecutionException) {
ExecutionException executionException = (ExecutionException) t;
t = executionException.getCause();
}
log.error("{}Received a fatal error while waiting for {}", prefix, action, t);
throw new RuntimeException("Received a fatal error while waiting for " + action, t);
}
} | @Test
public void testWaitWithLogging() throws Throwable {
ScheduledExecutorService executorService = new ScheduledThreadPoolExecutor(1);
CompletableFuture<Integer> future = new CompletableFuture<>();
executorService.schedule(() -> future.complete(123), 1000, TimeUnit.NANOSECONDS);
assertEquals(123, FutureUtils.waitWithLogging(log,
"[FutureUtilsTest] ",
"the future to be completed",
future,
Deadline.fromDelay(Time.SYSTEM, 30, TimeUnit.SECONDS),
Time.SYSTEM));
executorService.shutdownNow();
executorService.awaitTermination(1, TimeUnit.MINUTES);
} |
@Override
public void visit(final File f, final String _relativePath) throws IOException {
int mode = IOUtils.mode(f);
// On Windows, the elements of relativePath are separated by
// back-slashes (\), but Zip files need to have their path elements separated
// by forward-slashes (/)
String relativePath = _relativePath.replace('\\', '/');
BasicFileAttributes basicFileAttributes = Files.readAttributes(Util.fileToPath(f), BasicFileAttributes.class);
if (basicFileAttributes.isDirectory()) {
ZipEntry dirZipEntry = new ZipEntry(this.prefix + relativePath + '/');
// Setting this bit explicitly is needed by some unzipping applications (see JENKINS-3294).
dirZipEntry.setExternalAttributes(BITMASK_IS_DIRECTORY);
if (mode != -1) dirZipEntry.setUnixMode(mode);
dirZipEntry.setTime(basicFileAttributes.lastModifiedTime().toMillis());
zip.putNextEntry(dirZipEntry);
zip.closeEntry();
} else {
ZipEntry fileZipEntry = new ZipEntry(this.prefix + relativePath);
if (mode != -1) fileZipEntry.setUnixMode(mode);
fileZipEntry.setTime(basicFileAttributes.lastModifiedTime().toMillis());
fileZipEntry.setSize(basicFileAttributes.size());
zip.putNextEntry(fileZipEntry);
try (InputStream in = FilePath.openInputStream(f, openOptions)) {
int len;
while ((len = in.read(buf)) >= 0)
zip.write(buf, 0, len);
} catch (InvalidPathException e) {
throw new IOException(e);
}
zip.closeEntry();
}
entriesWritten++;
} | @Issue("JENKINS-9942")
@Test
public void backwardsSlashesOnWindows() throws IOException {
// create foo/bar/baz/Test.txt
Path baz = tmp.newFolder().toPath().resolve("foo").resolve("bar").resolve("baz");
Files.createDirectories(baz);
Path tmpFile = baz.resolve("Test.txt");
Files.createFile(tmpFile);
// a file to store the zip archive in
Path zipFile = Files.createTempFile(tmp.getRoot().toPath(), "test", ".zip");
// create zip from tmpDir
try (ZipArchiver archiver = new ZipArchiver(Files.newOutputStream(zipFile))) {
archiver.visit(tmpFile.toFile(), "foo\\bar\\baz\\Test.txt");
}
// examine zip contents and assert that none of the entry names (paths) have
// back-slashes ("\")
try (ZipFile zipFileVerify = new ZipFile(zipFile.toFile())) {
assertEquals(1, zipFileVerify.size());
ZipEntry zipEntry = zipFileVerify.entries().nextElement();
assertEquals("foo/bar/baz/Test.txt", zipEntry.getName());
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.