focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public List<HasMetadata> buildAccompanyingKubernetesResources() throws IOException {
final List<HasMetadata> resources = new ArrayList<>();
if (!StringUtils.isNullOrWhitespaceOnly(securityConfig.getKeytab())
&& !StringUtils.isNullOrWhitespaceOnly(securityConfig.getPrincipal())) {
final File keytab = new File(securityConfig.getKeytab());
if (!keytab.exists()) {
LOG.warn(
"Could not found the kerberos keytab file in {}.",
keytab.getAbsolutePath());
} else {
resources.add(
new SecretBuilder()
.withNewMetadata()
.withName(
getKerberosKeytabSecretName(
kubernetesParameters.getClusterId()))
.endMetadata()
.addToData(
keytab.getName(),
Base64.getEncoder()
.encodeToString(Files.toByteArray(keytab)))
.build());
// Set keytab path in the container. One should make sure this decorator is
// triggered before FlinkConfMountDecorator.
kubernetesParameters
.getFlinkConfiguration()
.set(
SecurityOptions.KERBEROS_LOGIN_KEYTAB,
String.format(
"%s/%s",
Constants.KERBEROS_KEYTAB_MOUNT_POINT, keytab.getName()));
}
}
if (!StringUtils.isNullOrWhitespaceOnly(
kubernetesParameters
.getFlinkConfiguration()
.get(SecurityOptions.KERBEROS_KRB5_PATH))) {
final File krb5Conf =
new File(
kubernetesParameters
.getFlinkConfiguration()
.get(SecurityOptions.KERBEROS_KRB5_PATH));
if (!krb5Conf.exists()) {
LOG.warn(
"Could not found the kerberos config file in {}.",
krb5Conf.getAbsolutePath());
} else {
resources.add(
new ConfigMapBuilder()
.withNewMetadata()
.withName(
getKerberosKrb5confConfigMapName(
kubernetesParameters.getClusterId()))
.endMetadata()
.addToData(
krb5Conf.getName(),
Files.toString(krb5Conf, StandardCharsets.UTF_8))
.build());
}
}
return resources;
} | @Test
void testConfEditWhenBuildAccompanyingKubernetesResources() throws IOException {
kerberosMountDecorator.buildAccompanyingKubernetesResources();
assertThat(
this.testingKubernetesParameters
.getFlinkConfiguration()
.get(SecurityOptions.KERBEROS_LOGIN_KEYTAB))
.isEqualTo(
String.format("%s/%s", Constants.KERBEROS_KEYTAB_MOUNT_POINT, KEYTAB_FILE));
} |
@Override
protected boolean isAddOnEnabledByDefault(@NonNull String addOnId) {
final KeyboardAddOnAndBuilder addOnById = getAddOnById(addOnId);
return super.isAddOnEnabledByDefault(addOnId)
|| (addOnById != null && addOnById.getKeyboardDefaultEnabled());
} | @Test
public void testDefaultKeyboardId() {
final List<KeyboardAddOnAndBuilder> allAddOns = mKeyboardFactory.getAllAddOns();
Assert.assertEquals(13, allAddOns.size());
KeyboardAddOnAndBuilder addon = mKeyboardFactory.getEnabledAddOn();
Assert.assertNotNull(addon);
Assert.assertEquals("c7535083-4fe6-49dc-81aa-c5438a1a343a", addon.getId());
Assert.assertTrue(
mKeyboardFactory.isAddOnEnabledByDefault("c7535083-4fe6-49dc-81aa-c5438a1a343a"));
Assert.assertFalse(
mKeyboardFactory.isAddOnEnabledByDefault("c7535083-4fe6-49dc-81aa-c5438a1a343b"));
} |
@POST
@ApiOperation("Get all views that match given parameter value")
@NoAuditEvent("Only returning matching views, not changing any data")
public Collection<ViewParameterSummaryDTO> forParameter(@Context SearchUser searchUser) {
return qualifyingViewsService.forValue()
.stream()
.filter(searchUser::canReadView)
.collect(Collectors.toSet());
} | @Test
public void returnsSomeViewsIfSomeArePermitted() {
final SearchUser searchUser = TestSearchUser.builder()
.denyView("view1")
.allowView("view2")
.build();
final QualifyingViewsService service = mockViewsService("view1", "view2");
final QualifyingViewsResource resource = new QualifyingViewsResource(service);
final Collection<ViewParameterSummaryDTO> result = resource.forParameter(searchUser);
assertThat(result)
.hasSize(1)
.extracting(ViewParameterSummaryDTO::id)
.containsOnly("view2");
} |
@Override
public void register(String path, ServiceRecord record) throws IOException {
op(path, record, addRecordCommand);
} | @Test
public void testReverseLookup() throws Exception {
ServiceRecord record = getMarshal().fromBytes("somepath",
CONTAINER_RECORD.getBytes());
getRegistryDNS().register(
"/registry/users/root/services/org-apache-slider/test1/components/"
+ "ctr-e50-1451931954322-0016-01-000002",
record);
// start assessing whether correct records are available
List<Record> recs = assertDNSQuery(
"19.0.17.172.in-addr.arpa.", Type.PTR, 1);
assertEquals("wrong result",
"httpd-1.test1.root.dev.test.",
((PTRRecord) recs.get(0)).getTarget().toString());
} |
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
final Map<Path, List<ObjectKeyAndVersion>> map = new HashMap<>();
final List<Path> containers = new ArrayList<>();
for(Path file : files.keySet()) {
if(containerService.isContainer(file)) {
containers.add(file);
continue;
}
callback.delete(file);
final Path bucket = containerService.getContainer(file);
if(file.getType().contains(Path.Type.upload)) {
// In-progress multipart upload
try {
multipartService.delete(new MultipartUpload(file.attributes().getVersionId(),
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file)));
}
catch(NotfoundException ignored) {
log.warn(String.format("Ignore failure deleting multipart upload %s", file));
}
}
else {
final List<ObjectKeyAndVersion> keys = new ArrayList<>();
// Always returning 204 even if the key does not exist. Does not return 404 for non-existing keys
keys.add(new ObjectKeyAndVersion(containerService.getKey(file), file.attributes().getVersionId()));
if(map.containsKey(bucket)) {
map.get(bucket).addAll(keys);
}
else {
map.put(bucket, keys);
}
}
}
// Iterate over all containers and delete list of keys
for(Map.Entry<Path, List<ObjectKeyAndVersion>> entry : map.entrySet()) {
final Path container = entry.getKey();
final List<ObjectKeyAndVersion> keys = entry.getValue();
this.delete(container, keys, prompt);
}
for(Path file : containers) {
callback.delete(file);
// Finally delete bucket itself
try {
final String bucket = containerService.getContainer(file).getName();
session.getClient().deleteBucket(bucket);
session.getClient().getRegionEndpointCache().removeRegionForBucketName(bucket);
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Cannot delete {0}", e, file);
}
}
} | @Test
public void testDeleteNotFoundKey() throws Exception {
final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final List<ObjectKeyAndVersion> keys = new ArrayList<>();
for(int i = 0; i < 1010; i++) {
keys.add(new ObjectKeyAndVersion(new AlphanumericRandomStringService().random()));
}
new S3MultipleDeleteFeature(session, new S3AccessControlListFeature(session)).delete(container, keys, new DisabledLoginCallback());
} |
@GET
@Path("/{entityType}")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8
/* , MediaType.APPLICATION_XML */})
public TimelineEntities getEntities(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
@PathParam("entityType") String entityType,
@QueryParam("primaryFilter") String primaryFilter,
@QueryParam("secondaryFilter") String secondaryFilter,
@QueryParam("windowStart") String windowStart,
@QueryParam("windowEnd") String windowEnd,
@QueryParam("fromId") String fromId,
@QueryParam("fromTs") String fromTs,
@QueryParam("limit") String limit,
@QueryParam("fields") String fields) {
init(res);
try {
return timelineDataManager.getEntities(
parseStr(entityType),
parsePairStr(primaryFilter, ":"),
parsePairsStr(secondaryFilter, ",", ":"),
parseLongStr(windowStart),
parseLongStr(windowEnd),
parseStr(fromId),
parseLongStr(fromTs),
parseLongStr(limit),
parseFieldsStr(fields, ","),
getUser(req));
} catch (NumberFormatException e) {
throw new BadRequestException(
"windowStart, windowEnd, fromTs or limit is not a numeric value: " + e);
} catch (IllegalArgumentException e) {
throw new BadRequestException("requested invalid field: " + e);
} catch (Exception e) {
LOG.error("Error getting entities", e);
throw new WebApplicationException(e,
Response.Status.INTERNAL_SERVER_ERROR);
}
} | @Test
void testGetEntities() throws Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("timeline")
.path("type_1")
.accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
response.getType().toString());
verifyEntities(response.getEntity(TimelineEntities.class));
} |
public static StepRuntimeState retrieveStepRuntimeState(
Map<String, Object> data, ObjectMapper objectMapper) {
Object runtimeSummary =
data.getOrDefault(Constants.STEP_RUNTIME_SUMMARY_FIELD, Collections.emptyMap());
if (runtimeSummary instanceof StepRuntimeSummary) {
return ((StepRuntimeSummary) runtimeSummary).getRuntimeState();
}
Object state = ((Map<String, Object>) runtimeSummary).getOrDefault(RUNTIME_STATE_FIELD, null);
if (state != null) {
return objectMapper.convertValue(state, StepRuntimeState.class);
}
return new StepRuntimeState();
} | @Test
public void testRetrieveStepRuntimeStateJson() {
StepRuntimeState expected = new StepRuntimeState();
expected.setStatus(StepInstance.Status.RUNNING);
Assert.assertEquals(
expected,
StepHelper.retrieveStepRuntimeState(
singletonMap(
Constants.STEP_RUNTIME_SUMMARY_FIELD,
singletonMap("runtime_state", singletonMap("status", "RUNNING"))),
MAPPER));
} |
@Override
public void execute() {
ddbClient.updateTable(UpdateTableRequest.builder().tableName(determineTableName())
.provisionedThroughput(ProvisionedThroughput.builder().readCapacityUnits(determineReadCapacity())
.writeCapacityUnits(determineWriteCapacity()).build())
.build());
} | @Test
public void testExecute() {
command.execute();
assertEquals("DOMAIN1", ddbClient.updateTableRequest.tableName());
assertEquals(Long.valueOf(20), ddbClient.updateTableRequest.provisionedThroughput().readCapacityUnits());
assertEquals(Long.valueOf(30), ddbClient.updateTableRequest.provisionedThroughput().writeCapacityUnits());
} |
public static String getPartitionNameFromPartitionType(MetadataPartitionType partitionType, HoodieTableMetaClient metaClient, String indexName) {
if (MetadataPartitionType.FUNCTIONAL_INDEX.equals(partitionType)) {
checkArgument(metaClient.getIndexMetadata().isPresent(), "Index definition is not present");
return metaClient.getIndexMetadata().get().getIndexDefinitions().get(indexName).getIndexName();
}
return partitionType.getPartitionPath();
} | @Test
public void testGetNonFunctionalIndexPath() {
MetadataPartitionType partitionType = MetadataPartitionType.COLUMN_STATS;
HoodieTableMetaClient metaClient = mock(HoodieTableMetaClient.class);
String result = HoodieIndexUtils.getPartitionNameFromPartitionType(partitionType, metaClient, null);
assertEquals(partitionType.getPartitionPath(), result);
} |
public int getSequenceNumber() {
return this.sequenceNumber;
} | @Test
public void testGetSequenceNumber() throws Exception {
assertEquals(3, buildChunk().getSequenceNumber());
} |
public static SFCertificateTrustPanel sharedCertificateTrustPanel() {
return Rococoa.createClass("SFCertificateTrustPanel", SFCertificateTrustPanel._Class.class).sharedCertificateTrustPanel();
} | @Test
public void sharedCertificateTrustPanel() {
assertNotNull(SFCertificateTrustPanel.sharedCertificateTrustPanel());
} |
@Override
public void commitJob(JobContext originalContext) throws IOException {
commitJobs(Collections.singletonList(originalContext), Operation.OTHER);
} | @Test
public void testSuccessfulUnpartitionedWrite() throws IOException {
HiveIcebergOutputCommitter committer = new HiveIcebergOutputCommitter();
Table table = table(temp.getRoot().getPath(), false);
JobConf conf = jobConf(table, 1);
List<Record> expected = writeRecords(table.name(), 1, 0, true, false, conf);
committer.commitJob(new JobContextImpl(conf, JOB_ID));
HiveIcebergTestUtils.validateFiles(table, conf, JOB_ID, 1);
HiveIcebergTestUtils.validateData(table, expected, 0);
} |
public static String buildRulePath(final String pluginName, final String selectorId, final String ruleId) {
return String.join(PATH_SEPARATOR, buildRuleParentPath(pluginName), String.join(SELECTOR_JOIN_RULE, selectorId, ruleId));
} | @Test
public void testBuildRulePath() {
String pluginName = RandomStringUtils.randomAlphanumeric(10);
String selectorId = RandomStringUtils.randomAlphanumeric(10);
String ruleId = RandomStringUtils.randomAlphanumeric(10);
String rulePath = DefaultPathConstants.buildRulePath(pluginName, selectorId, ruleId);
assertThat(rulePath, notNullValue());
assertThat(String.join(SEPARATOR, RULE_PARENT, pluginName, String.join(SELECTOR_JOIN_RULE, selectorId, ruleId)), equalTo(rulePath));
assertThat(String.join(SEPARATOR, DefaultPathConstants.buildRuleParentPath(pluginName), String.join(SELECTOR_JOIN_RULE, selectorId, ruleId)), equalTo(rulePath));
} |
public static char randomNumber() {
return randomChar(BASE_NUMBER);
} | @Test
public void randomNumberTest() {
final char c = RandomUtil.randomNumber();
assertTrue(c <= '9');
} |
@VisibleForTesting
static void initAddrUseFqdn(List<InetAddress> addrs) {
useFqdn = true;
analyzePriorityCidrs();
String fqdn = null;
if (PRIORITY_CIDRS.isEmpty()) {
// Get FQDN from local host by default.
try {
InetAddress localHost = InetAddress.getLocalHost();
fqdn = localHost.getCanonicalHostName();
String ip = localHost.getHostAddress();
LOG.info("Get FQDN from local host by default, FQDN: {}, ip: {}, v6: {}", fqdn, ip,
localHost instanceof Inet6Address);
} catch (UnknownHostException e) {
LOG.error("failed to get FQDN from local host, will exit", e);
System.exit(-1);
}
if (fqdn == null) {
LOG.error("priority_networks is not set and we cannot get FQDN from local host");
System.exit(-1);
}
// Try to resolve addr from FQDN
InetAddress uncheckedInetAddress = null;
try {
uncheckedInetAddress = InetAddress.getByName(fqdn);
} catch (UnknownHostException e) {
LOG.error("failed to parse FQDN: {}, message: {}", fqdn, e.getMessage(), e);
System.exit(-1);
}
if (null == uncheckedInetAddress) {
LOG.error("failed to parse FQDN: {}", fqdn);
System.exit(-1);
}
// Check whether the InetAddress obtained via FQDN is bound to some network interface
boolean hasInetAddr = false;
for (InetAddress addr : addrs) {
LOG.info("Try to match addr in fqdn mode, ip: {}, FQDN: {}",
addr.getHostAddress(), addr.getCanonicalHostName());
if (addr.getCanonicalHostName()
.equals(uncheckedInetAddress.getCanonicalHostName())) {
hasInetAddr = true;
break;
}
}
if (hasInetAddr) {
localAddr = uncheckedInetAddress;
LOG.info("Using FQDN from local host by default, FQDN: {}, ip: {}, v6: {}",
localAddr.getCanonicalHostName(),
localAddr.getHostAddress(),
localAddr instanceof Inet6Address);
} else {
LOG.error("Cannot find a network interface matching FQDN: {}", fqdn);
System.exit(-1);
}
} else {
LOG.info("using priority_networks in fqdn mode to decide whether ipv6 or ipv4 is preferred");
for (InetAddress addr : addrs) {
String hostAddr = addr.getHostAddress();
String canonicalHostName = addr.getCanonicalHostName();
LOG.info("Try to match addr in fqdn mode, ip: {}, FQDN: {}", hostAddr, canonicalHostName);
if (isInPriorNetwork(hostAddr)) {
localAddr = addr;
fqdn = canonicalHostName;
LOG.info("Using FQDN from matched addr, FQDN: {}, ip: {}, v6: {}",
fqdn, hostAddr, addr instanceof Inet6Address);
break;
}
LOG.info("skip addr {} not belonged to priority networks in FQDN mode", addr);
}
if (fqdn == null) {
LOG.error("priority_networks has been set and we cannot find matched addr, will exit");
System.exit(-1);
}
}
// double-check the reverse resolve
String canonicalHostName = localAddr.getCanonicalHostName();
if (!canonicalHostName.equals(fqdn)) {
LOG.error("The FQDN of the parsed address [{}] is not the same as " +
"the FQDN obtained from the host [{}]", canonicalHostName, fqdn);
System.exit(-1);
}
} | @Test(expected = IllegalAccessException.class)
public void testGetStartWithFQDNGetNameGetNull() {
testInitAddrUseFqdnCommonMock();
List<InetAddress> hosts = NetUtils.getHosts();
new MockUp<InetAddress>() {
@Mock
public InetAddress getLocalHost() throws UnknownHostException {
return addr;
}
@Mock
public String getHostAddress() {
return "127.0.0.10";
}
@Mock
public String getCanonicalHostName() {
return "sandbox";
}
@Mock
public InetAddress getByName(String host) throws UnknownHostException {
return null;
}
};
FrontendOptions.initAddrUseFqdn(hosts);
} |
@SuppressWarnings("unchecked")
public static List<Object> asList(final Object key) {
final Optional<Windowed<Object>> windowed = key instanceof Windowed
? Optional.of((Windowed<Object>) key)
: Optional.empty();
final Object naturalKey = windowed
.map(Windowed::key)
.orElse(key);
if (naturalKey != null && !(naturalKey instanceof GenericKey)) {
throw new IllegalArgumentException("Non generic key: " + key);
}
final Optional<GenericKey> genericKey = Optional.ofNullable((GenericKey) naturalKey);
final List<Object> data = new ArrayList<>(
genericKey.map(GenericKey::size).orElse(0)
+ (windowed.isPresent() ? 2 : 0)
);
genericKey.ifPresent(k -> data.addAll(k.values()));
windowed
.map(Windowed::window)
.ifPresent(wnd -> {
data.add(wnd.start());
data.add(wnd.end());
});
return data;
} | @Test
public void shouldConvertNullKeyToList() {
// Given:
final GenericKey key = GenericKey.genericKey((Object)null);
// When:
final List<?> result = KeyUtil.asList(key);
// Then:
assertThat(result, is(Collections.singletonList((null))));
} |
@Override
public void setConf(Configuration conf) {
if (conf != null) {
conf = addSecurityConfiguration(conf);
}
super.setConf(conf);
} | @Test
public void testMonitoringOperationsWithAutoHaEnabled() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
// Turn on auto-HA
HdfsConfiguration conf = getHAConf();
conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
tool.setConf(conf);
assertEquals(0, runTool("-checkHealth", "nn1"));
Mockito.verify(mockProtocol).monitorHealth();
assertEquals(0, runTool("-getServiceState", "nn1"));
Mockito.verify(mockProtocol).getServiceStatus();
} |
public URI next() {
URI result = _uris.get(_index);
_index = (_index + 1) % _uris.size();
return result;
} | @Test
public void testHostAddressRoundRobin()
throws URISyntaxException, UnknownHostException {
InetAddress[] testWebAddresses = new InetAddress[]{
InetAddress.getByAddress("testweb.com", InetAddresses.forString("192.168.3.1").getAddress()),
InetAddress.getByAddress("testweb.com", InetAddresses.forString("192.168.3.2").getAddress()),
InetAddress.getByAddress("testweb.com", InetAddresses.forString("192.168.3.3").getAddress())
};
InetAddress[] localHostAddresses = new InetAddress[]{
InetAddress.getByAddress("localhost", InetAddresses.forString("127.0.0.1").getAddress()),
InetAddress.getByAddress("localhost", InetAddresses.forString("0:0:0:0:0:0:0:1").getAddress())
};
MockedStatic<InetAddress> mock = Mockito.mockStatic(InetAddress.class);
mock.when(() -> InetAddress.getAllByName("localhost")).thenReturn(localHostAddresses);
mock.when(() -> InetAddress.getAllByName("testweb.com")).thenReturn(testWebAddresses);
TestCase[] testCases = new TestCase[]{
new TestCase("http://127.0.0.1", Collections.singletonList("http://127.0.0.1")),
new TestCase("http://127.0.0.1/", Collections.singletonList("http://127.0.0.1/")),
new TestCase("http://127.0.0.1/?", Collections.singletonList("http://127.0.0.1/?")),
new TestCase("http://127.0.0.1/?it=5", Collections.singletonList("http://127.0.0.1/?it=5")),
new TestCase("http://127.0.0.1/me/out?it=5", Collections.singletonList("http://127.0.0.1/me/out?it=5")),
new TestCase("http://127.0.0.1:20000", Collections.singletonList("http://127.0.0.1:20000")),
new TestCase("http://127.0.0.1:20000/", Collections.singletonList("http://127.0.0.1:20000/")),
new TestCase("http://127.0.0.1:20000/?", Collections.singletonList("http://127.0.0.1:20000/?")),
new TestCase("http://127.0.0.1:20000/?it=5", Collections.singletonList("http://127.0.0.1:20000/?it=5")),
new TestCase("http://127.0.0.1:20000/me/out?it=5",
Collections.singletonList("http://127.0.0.1:20000/me/out?it=5")),
new TestCase("http://localhost", Arrays.asList("http://127.0.0.1", "http://[0:0:0:0:0:0:0:1]")),
new TestCase("http://localhost/", Arrays.asList("http://127.0.0.1/", "http://[0:0:0:0:0:0:0:1]/")),
new TestCase("http://localhost/?", Arrays.asList("http://127.0.0.1/?", "http://[0:0:0:0:0:0:0:1]/?")),
new TestCase("http://localhost/?it=5",
Arrays.asList("http://127.0.0.1/?it=5", "http://[0:0:0:0:0:0:0:1]/?it=5")),
new TestCase("http://localhost/me/out?it=5",
Arrays.asList("http://127.0.0.1/me/out?it=5", "http://[0:0:0:0:0:0:0:1]/me/out?it=5")),
new TestCase("http://localhost:20000",
Arrays.asList("http://127.0.0.1:20000", "http://[0:0:0:0:0:0:0:1]:20000")),
new TestCase("http://localhost:20000/",
Arrays.asList("http://127.0.0.1:20000/", "http://[0:0:0:0:0:0:0:1]:20000/")),
new TestCase("http://localhost:20000/?",
Arrays.asList("http://127.0.0.1:20000/?", "http://[0:0:0:0:0:0:0:1]:20000/?")),
new TestCase("http://localhost:20000/?it=5",
Arrays.asList("http://127.0.0.1:20000/?it=5", "http://[0:0:0:0:0:0:0:1]:20000/?it=5")),
new TestCase("http://localhost:20000/me/out?it=5",
Arrays.asList("http://127.0.0.1:20000/me/out?it=5", "http://[0:0:0:0:0:0:0:1]:20000/me/out?it=5")),
new TestCase("http://testweb.com",
Arrays.asList("http://192.168.3.1", "http://192.168.3.2", "http://192.168.3.3")),
new TestCase("http://testweb.com/",
Arrays.asList("http://192.168.3.1/", "http://192.168.3.2/", "http://192.168.3.3/")),
new TestCase("http://testweb.com/?",
Arrays.asList("http://192.168.3.1/?", "http://192.168.3.2/?", "http://192.168.3.3/?")),
new TestCase("http://testweb.com/?it=5",
Arrays.asList("http://192.168.3.1/?it=5", "http://192.168.3.2/?it=5", "http://192.168.3.3/?it=5")),
new TestCase("http://testweb.com/me/out?it=5",
Arrays.asList("http://192.168.3.1/me/out?it=5", "http://192.168.3.2/me/out?it=5",
"http://192.168.3.3/me/out?it=5")),
new TestCase("http://testweb.com:20000",
Arrays.asList("http://192.168.3.1:20000", "http://192.168.3.2:20000", "http://192.168.3.3:20000")),
new TestCase("http://testweb.com:20000/",
Arrays.asList("http://192.168.3.1:20000/", "http://192.168.3.2:20000/", "http://192.168.3.3:20000/")),
new TestCase("http://testweb.com:20000/?",
Arrays.asList("http://192.168.3.1:20000/?", "http://192.168.3.2:20000/?", "http://192.168.3.3:20000/?")),
new TestCase("http://testweb.com:20000/?it=5",
Arrays.asList("http://192.168.3.1:20000/?it=5", "http://192.168.3.2:20000/?it=5",
"http://192.168.3.3:20000/?it=5")),
new TestCase("http://testweb.com:20000/me/out?it=5",
Arrays.asList("http://192.168.3.1:20000/me/out?it=5", "http://192.168.3.2:20000/me/out?it=5",
"http://192.168.3.3:20000/me/out?it=5")),
new TestCase("https://127.0.0.1", Collections.singletonList("https://127.0.0.1")),
new TestCase("https://127.0.0.1/", Collections.singletonList("https://127.0.0.1/")),
new TestCase("https://127.0.0.1/?", Collections.singletonList("https://127.0.0.1/?")),
new TestCase("https://127.0.0.1/?it=5", Collections.singletonList("https://127.0.0.1/?it=5")),
new TestCase("https://127.0.0.1/me/out?it=5", Collections.singletonList("https://127.0.0.1/me/out?it=5")),
new TestCase("https://127.0.0.1:20000", Collections.singletonList("https://127.0.0.1:20000")),
new TestCase("https://127.0.0.1:20000/", Collections.singletonList("https://127.0.0.1:20000/")),
new TestCase("https://127.0.0.1:20000/?", Collections.singletonList("https://127.0.0.1:20000/?")),
new TestCase("https://127.0.0.1:20000/?it=5", Collections.singletonList("https://127.0.0.1:20000/?it=5")),
new TestCase("https://127.0.0.1:20000/me/out?it=5",
Collections.singletonList("https://127.0.0.1:20000/me/out?it=5")),
new TestCase("https://localhost", Arrays.asList("https://127.0.0.1", "https://[0:0:0:0:0:0:0:1]")),
new TestCase("https://localhost/", Arrays.asList("https://127.0.0.1/", "https://[0:0:0:0:0:0:0:1]/")),
new TestCase("https://localhost/?", Arrays.asList("https://127.0.0.1/?", "https://[0:0:0:0:0:0:0:1]/?")),
new TestCase("https://localhost/?it=5",
Arrays.asList("https://127.0.0.1/?it=5", "https://[0:0:0:0:0:0:0:1]/?it=5")),
new TestCase("https://localhost/me/out?it=5",
Arrays.asList("https://127.0.0.1/me/out?it=5", "https://[0:0:0:0:0:0:0:1]/me/out?it=5")),
new TestCase("https://localhost:20000",
Arrays.asList("https://127.0.0.1:20000", "https://[0:0:0:0:0:0:0:1]:20000")),
new TestCase("https://localhost:20000/",
Arrays.asList("https://127.0.0.1:20000/", "https://[0:0:0:0:0:0:0:1]:20000/")),
new TestCase("https://localhost:20000/?",
Arrays.asList("https://127.0.0.1:20000/?", "https://[0:0:0:0:0:0:0:1]:20000/?")),
new TestCase("https://localhost:20000/?it=5",
Arrays.asList("https://127.0.0.1:20000/?it=5", "https://[0:0:0:0:0:0:0:1]:20000/?it=5")),
new TestCase("https://testweb.com",
Arrays.asList("https://192.168.3.1", "https://192.168.3.2", "https://192.168.3.3")),
new TestCase("https://testweb.com/",
Arrays.asList("https://192.168.3.1/", "https://192.168.3.2/", "https://192.168.3.3/")),
new TestCase("https://testweb.com/?",
Arrays.asList("https://192.168.3.1/?", "https://192.168.3.2/?", "https://192.168.3.3/?")),
new TestCase("https://testweb.com/?it=5",
Arrays.asList("https://192.168.3.1/?it=5", "https://192.168.3.2/?it=5", "https://192.168.3.3/?it=5")),
new TestCase("https://testweb.com/me/out?it=5",
Arrays.asList("https://192.168.3.1/me/out?it=5", "https://192.168.3.2/me/out?it=5",
"https://192.168.3.3/me/out?it=5")),
new TestCase("https://testweb.com:20000",
Arrays.asList("https://192.168.3.1:20000", "https://192.168.3.2:20000", "https://192.168.3.3:20000")),
new TestCase("https://testweb.com:20000/",
Arrays.asList("https://192.168.3.1:20000/", "https://192.168.3.2:20000/", "https://192.168.3.3:20000/")),
new TestCase("https://testweb.com:20000/?",
Arrays.asList("https://192.168.3.1:20000/?", "https://192.168.3.2:20000/?", "https://192.168.3.3:20000/?")),
new TestCase("https://testweb.com:20000/?it=5",
Arrays.asList("https://192.168.3.1:20000/?it=5", "https://192.168.3.2:20000/?it=5",
"https://192.168.3.3:20000/?it=5")),
new TestCase("https://testweb.com:20000/me/out?it=5",
Arrays.asList("https://192.168.3.1:20000/me/out?it=5", "https://192.168.3.2:20000/me/out?it=5",
"https://192.168.3.3:20000/me/out?it=5")),
};
for (TestCase testCase : testCases) {
String uri = testCase._originalUri;
RoundRobinURIProvider uriProvider = new RoundRobinURIProvider(List.of(new URI(uri)), true);
int n = testCase._expectedUris.size();
int previousIndex = -1;
int currentIndex;
for (int i = 0; i < 2 * n; i++) {
String actualUri = uriProvider.next().toString();
currentIndex = testCase._expectedUris.indexOf(actualUri);
Assert.assertTrue(currentIndex != -1);
if (previousIndex != -1) {
Assert.assertEquals((previousIndex + 1) % n, currentIndex);
}
previousIndex = currentIndex;
}
}
} |
@Override
@Transactional(rollbackFor = Exception.class)
public void deleteCodegen(Long tableId) {
// 校验是否已经存在
if (codegenTableMapper.selectById(tableId) == null) {
throw exception(CODEGEN_TABLE_NOT_EXISTS);
}
// 删除 table 表定义
codegenTableMapper.deleteById(tableId);
// 删除 column 字段定义
codegenColumnMapper.deleteListByTableId(tableId);
} | @Test
public void testDeleteCodegen_notExists() {
assertServiceException(() -> codegenService.deleteCodegen(randomLongId()),
CODEGEN_TABLE_NOT_EXISTS);
} |
@Override
public NativeReader<?> create(
CloudObject spec,
@Nullable Coder<?> coder,
@Nullable PipelineOptions options,
@Nullable DataflowExecutionContext executionContext,
DataflowOperationContext operationContext)
throws Exception {
@SuppressWarnings("unchecked")
Coder<Object> typedCoder = (Coder<Object>) coder;
return createTyped(spec, typedCoder, options, executionContext, operationContext);
} | @Test
public void testCreateConcatReaderWithManySubSources() throws Exception {
List<List<String>> allData = createInMemorySourceData(15, 10);
Source source = createSourcesWithInMemorySources(allData);
@SuppressWarnings("unchecked")
NativeReader<String> reader =
(NativeReader<String>) ReaderRegistry.defaultRegistry().create(source, null, null, null);
assertNotNull(reader);
List<String> expected = new ArrayList<>();
for (List<String> data : allData) {
expected.addAll(data);
}
assertThat(readAllFromReader(reader), containsInAnyOrder(expected.toArray()));
} |
@Override
public void preAction(WebService.Action action, Request request) {
Level logLevel = getLogLevel();
String deprecatedSinceEndpoint = action.deprecatedSince();
if (deprecatedSinceEndpoint != null) {
logWebServiceMessage(logLevel, deprecatedSinceEndpoint);
}
action.params().forEach(param -> logParamMessage(request, logLevel, param));
} | @Test
@UseDataProvider("userSessions")
public void preAction_whenParameterIsDeprecatedAndNoReplacementAndBrowserSession_shouldLogWarning(boolean isLoggedIn, boolean isAuthenticatedBrowserSession, Level expectedLogLevel) {
when(userSession.hasSession()).thenReturn(true);
when(userSession.isLoggedIn()).thenReturn(isLoggedIn);
when(userSession.isAuthenticatedBrowserSession()).thenReturn(isAuthenticatedBrowserSession);
WebService.Action action = mock(WebService.Action.class);
when(action.path()).thenReturn("api/issues/search");
when(action.deprecatedSince()).thenReturn(null);
WebService.Param mockParam = mock(WebService.Param.class);
when(mockParam.key()).thenReturn("sansTop25");
when(mockParam.deprecatedSince()).thenReturn("9.7");
when(action.params()).thenReturn(List.of(mockParam));
when(action.param("sansTop25")).thenReturn(mockParam);
Request request = mock(Request.class);
Request.StringParam stringParam = mock(Request.StringParam.class);
when(stringParam.isPresent()).thenReturn(true);
when(request.hasParam("sansTop25")).thenReturn(true);
when(request.getParams()).thenReturn(Map.of("sansTop25", new String[]{}));
underTest.preAction(action, request);
assertThat(logTester.logs(expectedLogLevel))
.contains("Parameter 'sansTop25' is deprecated since 9.7 and will be removed in a future version.");
} |
public Sensor topicLevelSensor(final String threadId,
final String taskId,
final String processorNodeName,
final String topicName,
final String sensorSuffix,
final Sensor.RecordingLevel recordingLevel,
final Sensor... parents) {
final String sensorPrefix = topicSensorPrefix(threadId, taskId, processorNodeName, topicName);
synchronized (topicLevelSensors) {
return getSensors(topicLevelSensors, sensorSuffix, sensorPrefix, recordingLevel, parents);
}
} | @Test
public void shouldGetNewTopicLevelSensor() {
final Metrics metrics = mock(Metrics.class);
final RecordingLevel recordingLevel = RecordingLevel.INFO;
setupGetNewSensorTest(metrics, recordingLevel);
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, VERSION, time);
final Sensor actualSensor = streamsMetrics.topicLevelSensor(
THREAD_ID1,
TASK_ID1,
NODE_ID1,
TOPIC_ID1,
SENSOR_NAME_1,
recordingLevel
);
assertThat(actualSensor, is(equalToObject(sensor)));
} |
@Override
public MapperResult findAllConfigInfoBetaForDumpAllFetchRows(MapperContext context) {
Integer startRow = context.getStartRow();
int pageSize = context.getPageSize();
String sql = "SELECT t.id,data_id,group_id,tenant_id,app_name,content,md5,gmt_modified,beta_ips "
+ " FROM ( SELECT id FROM config_info_beta ORDER BY id OFFSET " + startRow + " ROWS FETCH NEXT "
+ pageSize + " ROWS ONLY )" + " g, config_info_beta t WHERE g.id = t.id";
List<Object> paramList = new ArrayList<>();
paramList.add(startRow);
paramList.add(pageSize);
return new MapperResult(sql, paramList);
} | @Test
void testFindAllConfigInfoBetaForDumpAllFetchRows() {
MapperResult result = configInfoBetaMapperByDerby.findAllConfigInfoBetaForDumpAllFetchRows(context);
String sql = result.getSql();
List<Object> paramList = result.getParamList();
assertEquals(sql, "SELECT t.id,data_id,group_id,tenant_id,app_name,content,md5,gmt_modified,beta_ips FROM "
+ "( SELECT id FROM config_info_beta ORDER BY id OFFSET " + startRow + " ROWS FETCH NEXT " + pageSize + " ROWS ONLY ) g, "
+ "config_info_beta t WHERE g.id = t.id");
assertEquals(paramList, Arrays.asList(startRow, pageSize));
} |
public static <V> SetOnceReference<V> unset() {
return new SetOnceReference<>();
} | @Test
public void testFromUnset() {
checkUnsetReference(SetOnceReference.unset());
} |
public <T> Map<String, Object> schemas(Class<? extends T> cls) {
return this.schemas(cls, false);
} | @SuppressWarnings("unchecked")
@Test
void returnTask() throws URISyntaxException {
Helpers.runApplicationContext((applicationContext) -> {
JsonSchemaGenerator jsonSchemaGenerator = applicationContext.getBean(JsonSchemaGenerator.class);
Map<String, Object> returnSchema = jsonSchemaGenerator.schemas(Return.class);
var definitions = (Map<String, Map<String, Object>>) returnSchema.get("definitions");
var returnTask = definitions.get(Return.class.getName());
var metrics = (List<Object>) returnTask.get("$metrics");
assertThat(metrics.size(), is(2));
var firstMetric = (Map<String, Object>) metrics.getFirst();
assertThat(firstMetric.get("name"), is("length"));
assertThat(firstMetric.get("type"), is("counter"));
var secondMetric = (Map<String, Object>) metrics.get(1);
assertThat(secondMetric.get("name"), is("duration"));
assertThat(secondMetric.get("type"), is("timer"));
});
} |
static Set<PipelineOptionSpec> getOptionSpecs(
Class<? extends PipelineOptions> optionsInterface, boolean skipHidden) {
Iterable<Method> methods = ReflectHelpers.getClosureOfMethodsOnInterface(optionsInterface);
Multimap<String, Method> propsToGetters = getPropertyNamesToGetters(methods);
ImmutableSet.Builder<PipelineOptionSpec> setBuilder = ImmutableSet.builder();
for (Map.Entry<String, Method> propAndGetter : propsToGetters.entries()) {
String prop = propAndGetter.getKey();
Method getter = propAndGetter.getValue();
@SuppressWarnings("unchecked")
Class<? extends PipelineOptions> declaringClass =
(Class<? extends PipelineOptions>) getter.getDeclaringClass();
if (!PipelineOptions.class.isAssignableFrom(declaringClass)) {
continue;
}
if (skipHidden && declaringClass.isAnnotationPresent(Hidden.class)) {
continue;
}
setBuilder.add(PipelineOptionSpec.of(declaringClass, prop, getter));
}
return setBuilder.build();
} | @Test
public void testIncludesHiddenInterfaces() {
Set<PipelineOptionSpec> properties =
PipelineOptionsReflector.getOptionSpecs(HiddenOptions.class, false);
assertThat(properties, hasItem(hasName("foo")));
} |
@Override
public Iterator<QueryableEntry> iterator() {
return new It();
} | @Test
public void contains_matchingPredicate_notInResult() {
Set<QueryableEntry> entries = generateEntries(100000);
List<Set<QueryableEntry>> otherIndexedResults = new ArrayList<>();
otherIndexedResults.add(Collections.emptySet());
AndResultSet resultSet = new AndResultSet(entries, otherIndexedResults, asList(Predicates.alwaysTrue()));
assertNotContains(resultSet, entries.iterator().next());
} |
@Override
public alluxio.grpc.JobInfo toProto() throws IOException {
List<alluxio.grpc.JobInfo> taskInfos = new ArrayList<>();
for (JobInfo taskInfo : mChildren) {
taskInfos.add(taskInfo.toProto());
}
alluxio.grpc.JobInfo.Builder jobInfoBuilder = alluxio.grpc.JobInfo.newBuilder().setId(mId)
.setErrorMessage(mErrorMessage).addAllChildren(taskInfos).setStatus(mStatus.toProto())
.setName(mName).setDescription(mDescription).addAllAffectedPaths(mAffectedPaths)
.setErrorType(mErrorType).setType(JobType.PLAN);
if (mResult != null && !mResult.isEmpty()) {
ByteBuffer result =
mResult == null ? null : ByteBuffer.wrap(SerializationUtils.serialize(mResult));
jobInfoBuilder.setResult(ByteString.copyFrom(result));
}
jobInfoBuilder.setLastUpdated(mLastUpdated);
return jobInfoBuilder.build();
} | @Test
public void testToProto() throws IOException {
PlanInfo planInfo = new PlanInfo(1, "test", Status.COMPLETED, 10, null);
PlanInfo otherPlanInfo = new PlanInfo(planInfo.toProto());
assertEquals(planInfo, otherPlanInfo);
} |
@Override
public void updateDictData(DictDataSaveReqVO updateReqVO) {
// 校验自己存在
validateDictDataExists(updateReqVO.getId());
// 校验字典类型有效
validateDictTypeExists(updateReqVO.getDictType());
// 校验字典数据的值的唯一性
validateDictDataValueUnique(updateReqVO.getId(), updateReqVO.getDictType(), updateReqVO.getValue());
// 更新字典类型
DictDataDO updateObj = BeanUtils.toBean(updateReqVO, DictDataDO.class);
dictDataMapper.updateById(updateObj);
} | @Test
public void testUpdateDictData_success() {
// mock 数据
DictDataDO dbDictData = randomDictDataDO();
dictDataMapper.insert(dbDictData);// @Sql: 先插入出一条存在的数据
// 准备参数
DictDataSaveReqVO reqVO = randomPojo(DictDataSaveReqVO.class, o -> {
o.setId(dbDictData.getId()); // 设置更新的 ID
o.setStatus(randomCommonStatus());
});
// mock 方法,字典类型
when(dictTypeService.getDictType(eq(reqVO.getDictType()))).thenReturn(randomDictTypeDO(reqVO.getDictType()));
// 调用
dictDataService.updateDictData(reqVO);
// 校验是否更新正确
DictDataDO dictData = dictDataMapper.selectById(reqVO.getId()); // 获取最新的
assertPojoEquals(reqVO, dictData);
} |
public RuntimeOptionsBuilder parse(String... args) {
return parse(Arrays.asList(args));
} | @Test
void combines_tag_filters_from_env_if_rerun_file_specified_in_cli() {
RuntimeOptions runtimeOptions = parser
.parse("@src/test/resources/io/cucumber/core/options/runtime-options-rerun.txt")
.build();
RuntimeOptions options = new CucumberPropertiesParser()
.parse(singletonMap(FILTER_TAGS_PROPERTY_NAME, "@should_not_be_clobbered"))
.build(runtimeOptions);
List<String> actual = options.getTagExpressions().stream()
.map(e -> e.toString())
.collect(toList());
assertAll(
() -> assertThat(actual, contains("@should_not_be_clobbered")),
() -> assertThat(options.getLineFilters(),
hasEntry(new File("this/should/be/rerun.feature").toURI(), singleton(12))));
} |
public void checkExecutePrerequisites(final ExecutionContext executionContext) {
ShardingSpherePreconditions.checkState(isValidExecutePrerequisites(executionContext), () -> new TableModifyInTransactionException(getTableName(executionContext)));
} | @Test
void assertCheckExecutePrerequisitesWhenExecuteDMLInXATransaction() {
ExecutionContext executionContext = new ExecutionContext(
new QueryContext(createMySQLInsertStatementContext(), "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class)),
Collections.emptyList(), mock(RouteContext.class));
new ProxySQLExecutor(JDBCDriverType.STATEMENT, databaseConnectionManager, mock(DatabaseConnector.class), mockQueryContext()).checkExecutePrerequisites(executionContext);
} |
public boolean hasEmptyPart() {
// iterator will not contain an empty leaf queue, so check directly
if (leaf.isEmpty()) {
return true;
}
for (String part : this) {
if (part.isEmpty()) {
return true;
}
}
return false;
} | @Test
public void testEmptyPart() {
Assert.assertTrue(QUEUE_PATH_WITH_EMPTY_PART.hasEmptyPart());
Assert.assertTrue(QUEUE_PATH_WITH_EMPTY_LEAF.hasEmptyPart());
Assert.assertFalse(TEST_QUEUE_PATH.hasEmptyPart());
} |
@Override
public InterpreterResult interpret(String st, InterpreterContext context) {
return helper.interpret(session, st, context);
} | @Test
void should_execute_statement_with_consistency_option() {
// Given
String statement = "@consistency=THREE\n" +
"SELECT * FROM zeppelin.artists LIMIT 1;";
// When
final InterpreterResult actual = interpreter.interpret(statement, intrContext);
// Then
assertEquals(Code.ERROR, actual.code());
assertTrue(actual.message().get(0).getData()
.contains("Not enough replicas available for query at consistency THREE (3 required " +
"but only 1 alive)"),
actual.message().get(0).getData());
} |
public static String toJson(UpdateRequirement updateRequirement) {
return toJson(updateRequirement, false);
} | @Test
public void testAssertLastAssignedFieldIdToJson() {
String requirementType = UpdateRequirementParser.ASSERT_LAST_ASSIGNED_FIELD_ID;
int lastAssignedFieldId = 12;
String expected =
String.format(
"{\"type\":\"%s\",\"last-assigned-field-id\":%d}",
requirementType, lastAssignedFieldId);
UpdateRequirement actual = new UpdateRequirement.AssertLastAssignedFieldId(lastAssignedFieldId);
assertThat(UpdateRequirementParser.toJson(actual))
.as("AssertLastAssignedFieldId should convert to the correct JSON value")
.isEqualTo(expected);
} |
public void submitLoggingTask(Collection<Member> connectedMembers, Collection<Member> allMembers) {
if (delayPeriodSeconds <= 0) {
return;
}
if ((submittedLoggingTask != null && !submittedLoggingTask.isDone())) {
submittedLoggingTask.cancel(true);
}
submittedLoggingTask = executor.schedule(()
-> logger.info(connectivityLog(connectedMembers, allMembers)),
delayPeriodSeconds, SECONDS);
} | @Test
void assertTaskCancelledIfSubmittedDuringRunningTask() {
ScheduledFuture mockFuture = mock(ScheduledFuture.class);
List<Member> mockMembers = createMockMembers(2);
HazelcastProperties hzProps = createMockProperties(10);
ClientConnectivityLogger clientConnectivityLogger = new ClientConnectivityLogger(loggingService, executor, hzProps);
when(executor.schedule((Runnable) any(), anyLong(), any())).thenReturn(mockFuture);
clientConnectivityLogger.submitLoggingTask(List.of(mockMembers.get(0)), mockMembers);
//submit another task while the first one is running
clientConnectivityLogger.submitLoggingTask(List.of(mockMembers.get(0)), mockMembers);
verify(mockFuture, times(1)).cancel(true);
verify(executor, times(2)).schedule((Runnable) any(), anyLong(), any());
} |
@Override
protected Optional<ErrorResponse> filter(DiscFilterRequest request) {
String method = request.getMethod();
URI uri = request.getUri();
for (Rule rule : rules) {
if (rule.matches(method, uri)) {
log.log(Level.FINE, () ->
String.format("Request '%h' with method '%s' and uri '%s' matched rule '%s'", request, method, uri, rule.name));
return responseFor(request, rule.name, rule.response);
}
}
return responseFor(request, "default", defaultResponse);
} | @Test
void includes_rule_response_headers_in_response_for_blocked_request() throws IOException {
RuleBasedFilterConfig config = new RuleBasedFilterConfig.Builder()
.dryrun(false)
.defaultRule(new DefaultRule.Builder()
.action(DefaultRule.Action.Enum.ALLOW))
.rule(new Rule.Builder()
.name("rule")
.pathExpressions("/path-to-resource")
.action(Rule.Action.Enum.BLOCK)
.blockResponseHeaders(new Rule.BlockResponseHeaders.Builder()
.name("Response-Header-1").value("first-header")))
.build();
Metric metric = mock(Metric.class);
RuleBasedRequestFilter filter = new RuleBasedRequestFilter(metric, config);
MockResponseHandler responseHandler = new MockResponseHandler();
filter.filter(request("GET", "http://myserver/path-to-resource"), responseHandler);
assertBlocked(responseHandler, metric, 403, "");
Response response = responseHandler.getResponse();
assertResponseHeader(response, "Response-Header-1", "first-header");
} |
public LoggerContext apply(LogLevelConfig logLevelConfig, Props props) {
if (!ROOT_LOGGER_NAME.equals(logLevelConfig.getRootLoggerName())) {
throw new IllegalArgumentException("Value of LogLevelConfig#rootLoggerName must be \"" + ROOT_LOGGER_NAME + "\"");
}
LoggerContext rootContext = getRootContext();
logLevelConfig.getConfiguredByProperties().forEach((key, value) -> applyLevelByProperty(props, rootContext.getLogger(key), value));
logLevelConfig.getConfiguredByHardcodedLevel().forEach((key, value) -> applyHardcodedLevel(rootContext, key, value));
Level propertyValueAsLevel = getPropertyValueAsLevel(props, LOG_LEVEL.getKey());
boolean traceGloballyEnabled = propertyValueAsLevel == Level.TRACE;
logLevelConfig.getOffUnlessTrace().forEach(logger -> applyHardUnlessTrace(rootContext, logger, traceGloballyEnabled));
return rootContext;
} | @Test
public void apply_sets_domain_property_over_process_and_global_property_if_all_set() {
LogLevelConfig config = newLogLevelConfig().levelByDomain("foo", WEB_SERVER, LogDomain.ES).build();
props.set("sonar.log.level", "DEBUG");
props.set("sonar.log.level.web", "DEBUG");
props.set("sonar.log.level.web.es", "TRACE");
LoggerContext context = underTest.apply(config, props);
assertThat(context.getLogger("foo").getLevel()).isEqualTo(Level.TRACE);
} |
@Override
public OkHttpClient get() {
final OkHttpClient.Builder clientBuilder = new OkHttpClient.Builder()
.retryOnConnectionFailure(true)
.connectTimeout(connectTimeout.getQuantity(), connectTimeout.getUnit())
.writeTimeout(writeTimeout.getQuantity(), writeTimeout.getUnit())
.readTimeout(readTimeout.getQuantity(), readTimeout.getUnit());
if (trustManagerAndSocketFactoryProvider != null) {
// always set our own CA, might be overriden in later code
clientBuilder.sslSocketFactory(trustManagerAndSocketFactoryProvider.getSslSocketFactory(), trustManagerAndSocketFactoryProvider.getTrustManager());
}
if (httpProxyUri != null) {
clientBuilder.proxySelector(proxySelectorProvider.get());
if (!isNullOrEmpty(httpProxyUri.getUserInfo())) {
final List<String> list = Splitter.on(":")
.limit(2)
.splitToList(httpProxyUri.getUserInfo());
if (list.size() == 2) {
clientBuilder.proxyAuthenticator(new ProxyAuthenticator(list.get(0), list.get(1)));
}
}
}
return clientBuilder.build();
} | @Test
public void testDynamicProxy() throws URISyntaxException {
final String TEST_PROXY = "http://proxy.dummy.org";
final InetSocketAddress testProxyAddress = new InetSocketAddress(TEST_PROXY, 59001);
final Proxy testProxy = new Proxy(Proxy.Type.HTTP, testProxyAddress);
final ProxySelectorProvider proxyProvider = new ProxySelectorProvider(server.url("/").uri(), null);
ProxySelectorProvider spyProxyProvider = Mockito.spy(proxyProvider);
final OkHttpClientProvider provider = new OkHttpClientProvider(
Duration.milliseconds(100L),
Duration.milliseconds(100L),
Duration.milliseconds(100L),
server.url("/").uri(),
null, spyProxyProvider);
OkHttpClientProvider spyClientProvider = Mockito.spy(provider);
final OkHttpClient client = spyClientProvider.get();
assertThat(client.proxySelector().select(URI.create("http://www.example.com/")))
.hasSize(1)
.first()
.matches(proxy -> proxy.equals(server.toProxyAddress()));
Mockito.doReturn(testProxyAddress).when(spyProxyProvider).getProxyAddress();
assertThat(client.proxySelector().select(URI.create("http://www.example.com/")))
.hasSize(1)
.first()
.matches(proxy -> proxy.equals(testProxy));
} |
@Override
protected Map<String, Object> getOutputFieldValues(PMML4Result pmml4Result, Map<String, Object> resultVariables,
DMNResult dmnr) {
Map<String, Object> toReturn = new HashMap<>();
for (Map.Entry<String, Object> kv : resultVariables.entrySet()) {
String resultName = kv.getKey();
if (resultName == null || resultName.isEmpty()) {
continue;
}
Object r = kv.getValue();
populateWithObject(toReturn, kv.getKey(), r, dmnr);
}
return toReturn;
} | @Test
void getOutputFieldValues() {
List<Object> values = getValues();
Map<String, Object> resultVariables = new HashMap<>();
for (int i = 0; i < values.size(); i++) {
resultVariables.put("Element-" + i, values.get(i));
}
Map<String, Object> retrieved = dmnKiePMMLTrustyInvocationEvaluator.getOutputFieldValues(new PMML4Result(),
resultVariables, null);
resultVariables.forEach((s, value) -> {
assertThat(retrieved).containsKey(s);
Object retObject = retrieved.get(s);
Object expected = NumberEvalHelper.coerceNumber(value);
assertThat(retObject).isEqualTo(expected);
});
} |
public void setIncludedProtocols(String protocols) {
this.includedProtocols = protocols;
} | @Test
public void testSetIncludedProtocols() throws Exception {
configurable.setSupportedProtocols(new String[] { "A", "B", "C", "D" });
configuration.setIncludedProtocols("A,B ,C, D");
configuration.configure(configurable);
assertTrue(Arrays.equals(new String[] { "A", "B", "C", "D" }, configurable.getEnabledProtocols()));
} |
public static Optional<String> extractOrganizationName(String groupName) {
return extractRegexGroupIfMatches(groupName, 1);
} | @Test
public void extractOrganizationName_whenNameIsCorrect_extractsOrganizationName() {
assertThat(GithubTeamConverter.extractOrganizationName("Org1/team1")).isEqualTo(Optional.of("Org1"));
assertThat(GithubTeamConverter.extractOrganizationName("Org1/team1/team2")).isEqualTo(Optional.of("Org1"));
} |
static URI cleanUrl(String originalUrl, String host) {
return URI.create(originalUrl.replaceFirst(host, ""));
} | @Test
void cleanUrlWithMatchingHostAndPart() throws IOException {
URI uri = RibbonClient.cleanUrl("http://questions/questions/answer/123", "questions");
assertThat(uri.toString()).isEqualTo("http:///questions/answer/123");
} |
@Override
public ClusterInfo clusterGetClusterInfo() {
RFuture<Map<String, String>> f = executorService.readAsync((String)null, StringCodec.INSTANCE, RedisCommands.CLUSTER_INFO);
Map<String, String> entries = syncFuture(f);
Properties props = new Properties();
for (Entry<String, String> entry : entries.entrySet()) {
props.setProperty(entry.getKey(), entry.getValue());
}
return new ClusterInfo(props);
} | @Test
public void testClusterGetClusterInfo() {
ClusterInfo info = connection.clusterGetClusterInfo();
assertThat(info.getSlotsFail()).isEqualTo(0);
assertThat(info.getSlotsOk()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT);
assertThat(info.getSlotsAssigned()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT);
} |
@Override
public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
final DownloadBuilder builder = new DbxUserFilesRequests(session.getClient(file))
.downloadBuilder(containerService.getKey(file)).withRev(file.attributes().getVersionId());
if(status.isAppend()) {
final HttpRange range = HttpRange.withStatus(status);
builder.range(range.getStart());
}
final DbxDownloader<FileMetadata> downloader = builder.start();
return downloader.getInputStream();
}
catch(DbxException e) {
throw new DropboxExceptionMappingService().map("Download {0} failed", e, file);
}
} | @Test
public void testReadCloseReleaseEntity() throws Exception {
final TransferStatus status = new TransferStatus();
final byte[] content = RandomUtils.nextBytes(32769);
final TransferStatus writeStatus = new TransferStatus();
writeStatus.setLength(content.length);
final Path directory = new DropboxDirectoryFeature(session).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path test = new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final DropboxWriteFeature writer = new DropboxWriteFeature(session);
final HttpResponseOutputStream<Metadata> out = writer.write(test, writeStatus, new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(writeStatus, writeStatus).transfer(new ByteArrayInputStream(content), out);
final CountingInputStream in = new CountingInputStream(new DropboxReadFeature(session).read(test, status, new DisabledConnectionCallback()));
in.close();
assertEquals(0L, in.getByteCount(), 0L);
new DropboxDeleteFeature(session).delete(Arrays.asList(test, directory), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public UpsertTarget create(ExpressionEvalContext evalContext) {
return new HazelcastJsonUpsertTarget();
} | @Test
public void test_create() {
HazelcastJsonUpsertTargetDescriptor descriptor = HazelcastJsonUpsertTargetDescriptor.INSTANCE;
// when
UpsertTarget target = descriptor.create(mock());
// then
assertThat(target).isInstanceOf(HazelcastJsonUpsertTarget.class);
} |
@Override
public int run(String launcherVersion, String launcherMd5, ServerUrlGenerator urlGenerator, Map<String, String> env, Map<String, String> context) {
int exitValue = 0;
LOG.info("Agent launcher is version: {}", CurrentGoCDVersion.getInstance().fullVersion());
String[] command = new String[]{};
try {
AgentBootstrapperArgs bootstrapperArgs = AgentBootstrapperArgs.fromProperties(context);
ServerBinaryDownloader agentDownloader = new ServerBinaryDownloader(urlGenerator, bootstrapperArgs);
agentDownloader.downloadIfNecessary(DownloadableFile.AGENT);
ServerBinaryDownloader pluginZipDownloader = new ServerBinaryDownloader(urlGenerator, bootstrapperArgs);
pluginZipDownloader.downloadIfNecessary(DownloadableFile.AGENT_PLUGINS);
ServerBinaryDownloader tfsImplDownloader = new ServerBinaryDownloader(urlGenerator, bootstrapperArgs);
tfsImplDownloader.downloadIfNecessary(DownloadableFile.TFS_IMPL);
command = agentInvocationCommand(agentDownloader.getMd5(), launcherMd5, pluginZipDownloader.getMd5(), tfsImplDownloader.getMd5(),
env, context, agentDownloader.getExtraProperties());
LOG.info("Launching Agent with command: {}", join(command, " "));
Process agent = invoke(command);
// The next lines prevent the child process from blocking on Windows
AgentOutputAppender agentOutputAppenderForStdErr = new AgentOutputAppender(GO_AGENT_STDERR_LOG);
AgentOutputAppender agentOutputAppenderForStdOut = new AgentOutputAppender(GO_AGENT_STDOUT_LOG);
if (new SystemEnvironment().consoleOutToStdout()) {
agentOutputAppenderForStdErr.writeTo(AgentOutputAppender.Outstream.STDERR);
agentOutputAppenderForStdOut.writeTo(AgentOutputAppender.Outstream.STDOUT);
}
agent.getOutputStream().close();
AgentConsoleLogThread stdErrThd = new AgentConsoleLogThread(agent.getErrorStream(), agentOutputAppenderForStdErr);
stdErrThd.start();
AgentConsoleLogThread stdOutThd = new AgentConsoleLogThread(agent.getInputStream(), agentOutputAppenderForStdOut);
stdOutThd.start();
Shutdown shutdownHook = new Shutdown(agent);
Runtime.getRuntime().addShutdownHook(shutdownHook);
try {
exitValue = agent.waitFor();
} catch (InterruptedException ie) {
LOG.error("Agent was interrupted. Terminating agent and respawning. {}", ie.toString());
agent.destroy();
} finally {
removeShutdownHook(shutdownHook);
stdErrThd.stopAndJoin();
stdOutThd.stopAndJoin();
}
} catch (Exception e) {
LOG.error("Exception while executing command: {} - {}", join(command, " "), e.toString());
exitValue = EXCEPTION_OCCURRED;
}
return exitValue;
} | @Test
@DisabledOnOs(OS.WINDOWS)
public void shouldNotDownloadPluginsZipIfPresent() throws Exception {
TEST_AGENT_PLUGINS.copyTo(AGENT_PLUGINS_ZIP);
AGENT_PLUGINS_ZIP.setLastModified(System.currentTimeMillis() - 10 * 1000);
long expectedModifiedDate = AGENT_PLUGINS_ZIP.lastModified();
AgentProcessParentImpl bootstrapper = createBootstrapper(new ArrayList<>());
bootstrapper.run("launcher_version", "bar", getURLGenerator(), Map.of(AgentProcessParentImpl.AGENT_STARTUP_ARGS, "foo bar baz with%20some%20space"), context());
assertThat(Downloader.AGENT_PLUGINS_ZIP.lastModified(), is(expectedModifiedDate));
} |
@Override
public boolean match(final String rule) {
boolean matches = rule.matches("^list\\|\\[.+]$");
if (matches) {
String candidateData = rule.substring(6, rule.length() - 1);
return !candidateData.matches("^\\s+$");
}
return false;
} | @Test
public void match() {
assertTrue(generator.match("list|[shen\\,yu,gate\\,way]"));
assertTrue(generator.match("list|[shenyu,gateway]"));
assertFalse(generator.match("list|[shenyu,gateway"));
assertFalse(generator.match("list|[]"));
assertFalse(generator.match("list|[ ]"));
} |
@Override
public void acknowledge(OutputBufferId bufferId, long sequenceId)
{
checkState(!Thread.holdsLock(this), "Can not acknowledge pages while holding a lock on this");
requireNonNull(bufferId, "bufferId is null");
getBuffer(bufferId).acknowledgePages(sequenceId);
} | @Test
public void testAcknowledge()
{
OutputBuffers outputBuffers = createInitialEmptyOutputBuffers(ARBITRARY);
ArbitraryOutputBuffer buffer = createArbitraryBuffer(outputBuffers, sizeOfPages(10));
// add three items
for (int i = 0; i < 3; i++) {
addPage(buffer, createPage(i));
}
outputBuffers = createInitialEmptyOutputBuffers(ARBITRARY).withBuffer(FIRST, BROADCAST_PARTITION_ID);
// add a queue
buffer.setOutputBuffers(outputBuffers);
assertQueueState(buffer, 3, FIRST, 0, 0);
// get the three elements
assertBufferResultEquals(TYPES, getBufferResult(buffer, FIRST, 0, sizeOfPages(10), NO_WAIT), bufferResult(0, createPage(0), createPage(1), createPage(2)));
// acknowledge pages 0 and 1
acknowledgeBufferResult(buffer, FIRST, 2);
// only page 2 is not removed
assertQueueState(buffer, 0, FIRST, 1, 2);
// acknowledge page 2
acknowledgeBufferResult(buffer, FIRST, 3);
// nothing left
assertQueueState(buffer, 0, FIRST, 0, 3);
// acknowledge more pages will fail
try {
acknowledgeBufferResult(buffer, FIRST, 4);
}
catch (IllegalArgumentException e) {
assertEquals(e.getMessage(), "Invalid sequence id");
}
// fill the buffer
for (int i = 3; i < 6; i++) {
addPage(buffer, createPage(i));
}
assertQueueState(buffer, 3, FIRST, 0, 3);
// getting new pages will again acknowledge the previously acknowledged pages but this is ok
buffer.get(FIRST, 3, sizeOfPages(1)).cancel(true);
assertQueueState(buffer, 2, FIRST, 1, 3);
} |
@Override
public boolean isSatisfied(int index, TradingRecord tradingRecord) {
final boolean satisfied = cross.getValue(index);
traceIsSatisfied(index, satisfied);
return satisfied;
} | @Test
public void repeatedlyHittingThresholdAfterCrossUp() {
Indicator<Num> evaluatedIndicator = new FixedDecimalIndicator(series, 9, 10, 11, 10, 11, 10, 11);
CrossedUpIndicatorRule rule = new CrossedUpIndicatorRule(evaluatedIndicator, 10);
assertFalse(rule.isSatisfied(0));
assertFalse(rule.isSatisfied(1));
assertTrue("first cross up", rule.isSatisfied(2));
assertFalse(rule.isSatisfied(3));
assertFalse(rule.isSatisfied(4));
assertFalse(rule.isSatisfied(5));
assertFalse(rule.isSatisfied(6));
} |
public void putUserProperty(final String name, final String value) {
if (MessageConst.STRING_HASH_SET.contains(name)) {
throw new RuntimeException(String.format(
"The Property<%s> is used by system, input another please", name));
}
if (value == null || value.trim().isEmpty()
|| name == null || name.trim().isEmpty()) {
throw new IllegalArgumentException(
"The name or value of property can not be null or blank string!"
);
}
this.putProperty(name, value);
} | @Test(expected = IllegalArgumentException.class)
public void putUserNullValuePropertyWithException() throws Exception {
Message m = new Message();
m.putUserProperty("prop1", null);
} |
@Override
public void run() {
while (!schedulerState.isShuttingDown()) {
if (!schedulerState.isPaused()) {
try {
toRun.run();
} catch (Throwable e) {
LOG.error("Unhandled exception. Will keep running.", e);
schedulerListeners.onSchedulerEvent(SchedulerEventType.UNEXPECTED_ERROR);
}
}
try {
waitBetweenRuns.doWait();
} catch (InterruptedException interruptedException) {
if (schedulerState.isShuttingDown()) {
LOG.debug("Thread '{}' interrupted due to shutdown.", Thread.currentThread().getName());
} else {
LOG.error("Unexpected interruption of thread. Will keep running.", interruptedException);
schedulerListeners.onSchedulerEvent(SchedulerEventType.UNEXPECTED_ERROR);
}
}
}
} | @Test
public void should_wait_on_ok_execution() {
Assertions.assertTimeoutPreemptively(
Duration.ofSeconds(1),
() -> {
runUntilShutdown.run();
assertThat(countingWaiter.counter, is(2));
});
} |
NewExternalIssue mapResult(String driverName, @Nullable Result.Level ruleSeverity, @Nullable Result.Level ruleSeverityForNewTaxonomy, Result result) {
NewExternalIssue newExternalIssue = sensorContext.newExternalIssue();
newExternalIssue.type(DEFAULT_TYPE);
newExternalIssue.engineId(driverName);
newExternalIssue.severity(toSonarQubeSeverity(ruleSeverity));
newExternalIssue.ruleId(requireNonNull(result.getRuleId(), "No ruleId found for issue thrown by driver " + driverName));
newExternalIssue.cleanCodeAttribute(DEFAULT_CLEAN_CODE_ATTRIBUTE);
newExternalIssue.addImpact(DEFAULT_SOFTWARE_QUALITY, toSonarQubeImpactSeverity(ruleSeverityForNewTaxonomy));
mapLocations(result, newExternalIssue);
return newExternalIssue;
} | @Test
public void mapResult_mapsSimpleFieldsCorrectly() {
NewExternalIssue newExternalIssue = resultMapper.mapResult(DRIVER_NAME, WARNING, WARNING, result);
verify(newExternalIssue).type(RuleType.VULNERABILITY);
verify(newExternalIssue).engineId(DRIVER_NAME);
verify(newExternalIssue).severity(DEFAULT_SEVERITY);
verify(newExternalIssue).ruleId(RULE_ID);
} |
public static TableFactoryHelper createTableFactoryHelper(
DynamicTableFactory factory, DynamicTableFactory.Context context) {
return new TableFactoryHelper(factory, context);
} | @Test
void testFactoryHelperWithDeprecatedOptions() {
final Map<String, String> options = new HashMap<>();
options.put("deprecated-target", "MyTarget");
options.put("fallback-buffer-size", "1000");
options.put("value.format", "test-format");
options.put("value.test-format.deprecated-delimiter", "|");
options.put("value.test-format.fallback-fail-on-missing", "true");
final FactoryUtil.TableFactoryHelper helper =
FactoryUtil.createTableFactoryHelper(
new TestDynamicTableFactory(),
FactoryMocks.createTableContext(SCHEMA, options));
helper.discoverDecodingFormat(
DeserializationFormatFactory.class, TestDynamicTableFactory.VALUE_FORMAT);
helper.validate();
} |
@Override
public ObjectNode encode(Criterion criterion, CodecContext context) {
EncodeCriterionCodecHelper encoder = new EncodeCriterionCodecHelper(criterion, context);
return encoder.encode();
} | @Test
public void matchIPv6NDTargetAddressTest() {
Criterion criterion =
Criteria.matchIPv6NDTargetAddress(
Ip6Address.valueOf("1111:2222::"));
ObjectNode result = criterionCodec.encode(criterion, context);
assertThat(result, matchesCriterion(criterion));
} |
@Override
@DSTransactional // 多数据源,使用 @DSTransactional 保证本地事务,以及数据源的切换
public void updateTenant(TenantSaveReqVO updateReqVO) {
// 校验存在
TenantDO tenant = validateUpdateTenant(updateReqVO.getId());
// 校验租户名称是否重复
validTenantNameDuplicate(updateReqVO.getName(), updateReqVO.getId());
// 校验租户域名是否重复
validTenantWebsiteDuplicate(updateReqVO.getWebsite(), updateReqVO.getId());
// 校验套餐被禁用
TenantPackageDO tenantPackage = tenantPackageService.validTenantPackage(updateReqVO.getPackageId());
// 更新租户
TenantDO updateObj = BeanUtils.toBean(updateReqVO, TenantDO.class);
tenantMapper.updateById(updateObj);
// 如果套餐发生变化,则修改其角色的权限
if (ObjectUtil.notEqual(tenant.getPackageId(), updateReqVO.getPackageId())) {
updateTenantRoleMenu(tenant.getId(), tenantPackage.getMenuIds());
}
} | @Test
public void testUpdateTenant_success() {
// mock 数据
TenantDO dbTenant = randomPojo(TenantDO.class, o -> o.setStatus(randomCommonStatus()));
tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据
// 准备参数
TenantSaveReqVO reqVO = randomPojo(TenantSaveReqVO.class, o -> {
o.setId(dbTenant.getId()); // 设置更新的 ID
o.setStatus(randomCommonStatus());
o.setWebsite(randomString());
});
// mock 套餐
TenantPackageDO tenantPackage = randomPojo(TenantPackageDO.class,
o -> o.setMenuIds(asSet(200L, 201L)));
when(tenantPackageService.validTenantPackage(eq(reqVO.getPackageId()))).thenReturn(tenantPackage);
// mock 所有角色
RoleDO role100 = randomPojo(RoleDO.class, o -> o.setId(100L).setCode(RoleCodeEnum.TENANT_ADMIN.getCode()));
role100.setTenantId(dbTenant.getId());
RoleDO role101 = randomPojo(RoleDO.class, o -> o.setId(101L));
role101.setTenantId(dbTenant.getId());
when(roleService.getRoleList()).thenReturn(asList(role100, role101));
// mock 每个角色的权限
when(permissionService.getRoleMenuListByRoleId(eq(101L))).thenReturn(asSet(201L, 202L));
// 调用
tenantService.updateTenant(reqVO);
// 校验是否更新正确
TenantDO tenant = tenantMapper.selectById(reqVO.getId()); // 获取最新的
assertPojoEquals(reqVO, tenant);
// verify 设置角色权限
verify(permissionService).assignRoleMenu(eq(100L), eq(asSet(200L, 201L)));
verify(permissionService).assignRoleMenu(eq(101L), eq(asSet(201L)));
} |
public static DynamicVoter parse(String input) {
input = input.trim();
int atIndex = input.indexOf("@");
if (atIndex < 0) {
throw new IllegalArgumentException("No @ found in dynamic voter string.");
}
if (atIndex == 0) {
throw new IllegalArgumentException("Invalid @ at beginning of dynamic voter string.");
}
String idString = input.substring(0, atIndex);
int nodeId;
try {
nodeId = Integer.parseInt(idString);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Failed to parse node id in dynamic voter string.", e);
}
if (nodeId < 0) {
throw new IllegalArgumentException("Invalid negative node id " + nodeId +
" in dynamic voter string.");
}
input = input.substring(atIndex + 1);
if (input.isEmpty()) {
throw new IllegalArgumentException("No hostname found after node id.");
}
String host;
if (input.startsWith("[")) {
int endBracketIndex = input.indexOf("]");
if (endBracketIndex < 0) {
throw new IllegalArgumentException("Hostname began with left bracket, but no right " +
"bracket was found.");
}
host = input.substring(1, endBracketIndex);
input = input.substring(endBracketIndex + 1);
} else {
int endColonIndex = input.indexOf(":");
if (endColonIndex < 0) {
throw new IllegalArgumentException("No colon following hostname could be found.");
}
host = input.substring(0, endColonIndex);
input = input.substring(endColonIndex);
}
if (!input.startsWith(":")) {
throw new IllegalArgumentException("Port section must start with a colon.");
}
input = input.substring(1);
int endColonIndex = input.indexOf(":");
if (endColonIndex < 0) {
throw new IllegalArgumentException("No colon following port could be found.");
}
String portString = input.substring(0, endColonIndex);
int port;
try {
port = Integer.parseInt(portString);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Failed to parse port in dynamic voter string.", e);
}
if (port < 0 || port > 65535) {
throw new IllegalArgumentException("Invalid port " + port + " in dynamic voter string.");
}
String directoryIdString = input.substring(endColonIndex + 1);
Uuid directoryId;
try {
directoryId = Uuid.fromString(directoryIdString);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Failed to parse directory ID in dynamic voter string.", e);
}
return new DynamicVoter(directoryId, nodeId, host, port);
} | @Test
public void testParseDynamicVoterWithNoColonFollowingPort() {
assertEquals("No colon following port could be found.",
assertThrows(IllegalArgumentException.class,
() -> DynamicVoter.parse("5@[2001:4860:4860::8888]:8020__0IZ-0DRNazJ49kCZ1EMQ")).
getMessage());
} |
public static <T> Global<T> globally() {
return new Global<>();
} | @Test
@Category(NeedsRunner.class)
public void testGloballyWithSchemaAggregateFnNestedFields() {
Collection<OuterAggregate> elements =
ImmutableList.of(
OuterAggregate.of(Aggregate.of(1, 1, 2)),
OuterAggregate.of(Aggregate.of(2, 1, 3)),
OuterAggregate.of(Aggregate.of(3, 2, 4)),
OuterAggregate.of(Aggregate.of(4, 2, 5)));
PCollection<Row> aggregate =
pipeline
.apply(Create.of(elements))
.apply(
Group.<OuterAggregate>globally()
.aggregateField("inner.field1", Sum.ofLongs(), "field1_sum")
.aggregateField("inner.field3", Sum.ofIntegers(), "field3_sum")
.aggregateField("inner.field1", Top.largestLongsFn(1), "field1_top"));
Schema aggregateSchema =
Schema.builder()
.addInt64Field("field1_sum")
.addInt32Field("field3_sum")
.addArrayField("field1_top", FieldType.INT64)
.build();
Row expectedRow = Row.withSchema(aggregateSchema).addValues(10L, 14).addArray(4L).build();
PAssert.that(aggregate).containsInAnyOrder(expectedRow);
pipeline.run();
} |
@Override public V get(Object o) {
if (o == null) return null; // null keys are not allowed
int i = arrayIndexOfKey(o);
return i != -1 ? value(i + 1) : null;
} | @Test void someNullValues() {
array[0] = "1";
array[1] = "one";
array[2] = "2";
array[3] = "two";
array[4] = "3";
Map<String, String> map = builder.build(array);
assertSize(map, 2);
assertBaseCase(map);
assertThat(map).containsOnly(
entry("1", "one"),
entry("2", "two")
);
assertThat(map).hasToString(
"UnsafeArrayMap{1=one,2=two}"
);
assertThat(map.get("1")).isEqualTo("one");
assertThat(map.get("2")).isEqualTo("two");
assertThat(map.get("3")).isNull();
} |
protected Set<MediaType> getSupportedMediaTypesForInput() {
return mSupportedMediaTypesUnmodifiable;
} | @Test
public void testReportsMediaTypesAndClearsOnFinish() {
simulateFinishInputFlow();
EditorInfo info = createEditorInfoTextWithSuggestionsForSetUp();
simulateOnStartInputFlow(false, info);
Assert.assertTrue(mPackageScope.getSupportedMediaTypesForInput().isEmpty());
simulateFinishInputFlow();
Assert.assertTrue(mPackageScope.getSupportedMediaTypesForInput().isEmpty());
EditorInfoCompat.setContentMimeTypes(info, new String[] {"image/jpg"});
simulateOnStartInputFlow(false, info);
Assert.assertTrue(mPackageScope.getSupportedMediaTypesForInput().contains(MediaType.Image));
Assert.assertFalse(mPackageScope.getSupportedMediaTypesForInput().contains(MediaType.Gif));
simulateFinishInputFlow();
Assert.assertTrue(mPackageScope.getSupportedMediaTypesForInput().isEmpty());
EditorInfoCompat.setContentMimeTypes(info, new String[] {"image/gif"});
simulateOnStartInputFlow(false, info);
Assert.assertTrue(mPackageScope.getSupportedMediaTypesForInput().contains(MediaType.Image));
Assert.assertTrue(mPackageScope.getSupportedMediaTypesForInput().contains(MediaType.Gif));
simulateFinishInputFlow();
Assert.assertTrue(mPackageScope.getSupportedMediaTypesForInput().isEmpty());
EditorInfoCompat.setContentMimeTypes(info, new String[] {"image/menny_image"});
simulateOnStartInputFlow(false, info);
Assert.assertTrue(mPackageScope.getSupportedMediaTypesForInput().contains(MediaType.Image));
Assert.assertFalse(mPackageScope.getSupportedMediaTypesForInput().contains(MediaType.Gif));
simulateFinishInputFlow();
Assert.assertTrue(mPackageScope.getSupportedMediaTypesForInput().isEmpty());
} |
@Override
public int read() throws IOException {
int read = input.read();
if (read != -1) fp++;
return read;
} | @Test
public void read() throws IOException {
ss.open();
byte[] buff = new byte[10];
int n = ss.read(buff);
byte[] temp = Arrays.copyOfRange(text, 0, buff.length);
assertArrayEquals(temp, buff);
assertEquals(buff.length, n);
} |
@SuppressWarnings("squid:S1181")
// Yes we really do want to catch Throwable
@Override
public V apply(U input) {
int retryAttempts = 0;
while (true) {
try {
return baseFunction.apply(input);
} catch (Throwable t) {
if (!exceptionClass.isAssignableFrom(t.getClass()) || retryAttempts == maxRetries) {
Throwables.throwIfUnchecked(t);
throw new RetriesExceededException(t);
}
Tools.randomDelay(maxDelayBetweenRetries);
retryAttempts++;
}
}
} | @Test(expected = RetryableException.class)
public void testFailureAfterOneRetry() {
new RetryingFunction<>(this::succeedAfterTwoFailures, RetryableException.class, 1, 10).apply(null);
} |
@Override
public ScheduledReporter build(MetricRegistry registry) {
final File directory = requireNonNull(getFile(), "File is not set");
final boolean creation = directory.mkdirs();
if (!creation && !directory.exists()) {
throw new RuntimeException("Failed to create" + directory.getAbsolutePath());
}
return CsvReporter.forRegistry(registry)
.convertDurationsTo(getDurationUnit())
.convertRatesTo(getRateUnit())
.filter(getFilter())
.formatFor(getLocale())
.build(directory);
} | @Test
void directoryCreatedOnStartup() throws Exception {
File dir = new File("metrics");
dir.delete();
assertThat(dir).doesNotExist();
MetricsFactory config = factory.build(new ResourceConfigurationSourceProvider(), "yaml/metrics.yml");
MetricRegistry metricRegistry = new MetricRegistry();
config.configure(new LifecycleEnvironment(metricRegistry), metricRegistry);
assertThat(dir).exists();
} |
public static DeploymentDescriptor merge(List<DeploymentDescriptor> descriptorHierarchy, MergeMode mode) {
if (descriptorHierarchy == null || descriptorHierarchy.isEmpty()) {
throw new IllegalArgumentException("Descriptor hierarchy list cannot be empty");
}
if (descriptorHierarchy.size() == 1) {
return descriptorHierarchy.get(0);
}
Deque<DeploymentDescriptor> stack = new ArrayDeque<>();
descriptorHierarchy.forEach(stack::push);
while (stack.size() > 1) {
stack.push(merge(stack.pop(), stack.pop(), mode));
}
// last element from the stack is the one that contains all merged descriptors
return stack.pop();
} | @Test
public void testDeploymentDesciptorMergeOverrideEmpty() {
DeploymentDescriptor primary = new DeploymentDescriptorImpl("org.jbpm.domain");
primary.getBuilder()
.addMarshalingStrategy(new ObjectModel("org.jbpm.test.CustomStrategy", new Object[]{"param2"}))
.setLimitSerializationClasses(true);
assertThat(primary).isNotNull();
assertThat(primary.getPersistenceUnit()).isEqualTo("org.jbpm.domain");
assertThat(primary.getAuditPersistenceUnit()).isEqualTo("org.jbpm.domain");
assertThat(primary.getAuditMode()).isEqualTo(AuditMode.JPA);
assertThat(primary.getPersistenceMode()).isEqualTo(PersistenceMode.JPA);
assertThat(primary.getRuntimeStrategy()).isEqualTo(RuntimeStrategy.SINGLETON);
assertThat(primary.getMarshallingStrategies().size()).isEqualTo(1);
assertThat(primary.getConfiguration().size()).isEqualTo(0);
assertThat(primary.getEnvironmentEntries().size()).isEqualTo(0);
assertThat(primary.getEventListeners().size()).isEqualTo(0);
assertThat(primary.getGlobals().size()).isEqualTo(0);
assertThat(primary.getTaskEventListeners().size()).isEqualTo(0);
assertThat(primary.getWorkItemHandlers().size()).isEqualTo(0);
assertThat(primary.getLimitSerializationClasses()).isTrue();
DeploymentDescriptorImpl secondary = new DeploymentDescriptorImpl("org.jbpm.domain");
secondary.getBuilder()
.auditMode(AuditMode.JMS)
.persistenceMode(PersistenceMode.JPA)
.persistenceUnit(null)
.auditPersistenceUnit("");
assertThat(secondary).isNotNull();
assertThat(secondary.getPersistenceUnit()).isEqualTo(null);
assertThat(secondary.getAuditPersistenceUnit()).isEqualTo("");
assertThat(secondary.getAuditMode()).isEqualTo(AuditMode.JMS);
assertThat(secondary.getPersistenceMode()).isEqualTo(PersistenceMode.JPA);
assertThat(secondary.getRuntimeStrategy()).isEqualTo(RuntimeStrategy.SINGLETON);
assertThat(secondary.getMarshallingStrategies().size()).isEqualTo(0);
assertThat(secondary.getConfiguration().size()).isEqualTo(0);
assertThat(secondary.getEnvironmentEntries().size()).isEqualTo(0);
assertThat(secondary.getEventListeners().size()).isEqualTo(0);
assertThat(secondary.getGlobals().size()).isEqualTo(0);
assertThat(secondary.getTaskEventListeners().size()).isEqualTo(0);
assertThat(secondary.getWorkItemHandlers().size()).isEqualTo(0);
secondary.setLimitSerializationClasses(null);
assertThat(secondary.getLimitSerializationClasses()).isNull();
// and now let's merge them
DeploymentDescriptor outcome = DeploymentDescriptorMerger.merge(primary, secondary, MergeMode.OVERRIDE_EMPTY);
assertThat(outcome).isNotNull();
assertThat(outcome.getPersistenceUnit()).isEqualTo("org.jbpm.domain");
assertThat(outcome.getAuditPersistenceUnit()).isEqualTo("org.jbpm.domain");
assertThat(outcome.getAuditMode()).isEqualTo(AuditMode.JMS);
assertThat(outcome.getPersistenceMode()).isEqualTo(PersistenceMode.JPA);
assertThat(outcome.getRuntimeStrategy()).isEqualTo(RuntimeStrategy.SINGLETON);
assertThat(outcome.getMarshallingStrategies().size()).isEqualTo(1);
assertThat(outcome.getConfiguration().size()).isEqualTo(0);
assertThat(outcome.getEnvironmentEntries().size()).isEqualTo(0);
assertThat(outcome.getEventListeners().size()).isEqualTo(0);
assertThat(outcome.getGlobals().size()).isEqualTo(0);
assertThat(outcome.getTaskEventListeners().size()).isEqualTo(0);
assertThat(outcome.getWorkItemHandlers().size()).isEqualTo(0);
assertThat(outcome.getLimitSerializationClasses()).isTrue();
} |
public List<String> toBatchTaskArgumentString() {
List<String> res = new ArrayList<>(Arrays.asList(
CLUSTER_LIMIT_FLAG, String.valueOf(mClusterLimit),
CLUSTER_START_DELAY_FLAG, mClusterStartDelay,
BENCH_TIMEOUT, mBenchTimeout,
START_MS_FLAG, String.valueOf(mStartMs)));
if (!mProfileAgent.isEmpty()) {
res.add(PROFILE_AGENT);
res.add(mProfileAgent);
}
if (!mId.equals(DEFAULT_TASK_ID)) {
res.add(ID_FLAG);
res.add(mId);
}
if (!mIndex.equals(DEFAULT_TASK_ID)) {
res.add(INDEX_FLAG);
res.add(mIndex);
}
if (!mJavaOpts.isEmpty()) {
for (String s : mJavaOpts) {
res.add(JAVA_OPT_FLAG);
res.add(s);
}
}
if (mCluster) {
res.add(CLUSTER_FLAG);
}
if (mDistributed) {
res.add(DISTRIBUTED_FLAG);
}
if (mInProcess) {
res.add(IN_PROCESS_FLAG);
}
if (mHelp) {
res.add(HELP_FLAG);
}
return res;
} | @Test
public void parseParameterToArgumentWithoutJavaOPT() {
String[] inputArgs = new String[]{
// keys with values
"--cluster-limit", "4",
"--cluster-start-delay", "5s",
"--id", "TestID",
// keys with no values
"--cluster",
};
JCommander jc = new JCommander(this);
jc.parse(inputArgs);
List<String> outputArgs = mBaseParameter.toBatchTaskArgumentString();
// validate the --java-opt
assertFalse(outputArgs.contains(JAVA_OPT_FLAG));
} |
@Override
public final void getSize(@NonNull SizeReadyCallback cb) {
sizeDeterminer.getSize(cb);
} | @Test
public void testDoesNotAddMultipleListenersIfMultipleCallbacksAreAdded() {
SizeReadyCallback cb1 = mock(SizeReadyCallback.class);
SizeReadyCallback cb2 = mock(SizeReadyCallback.class);
target.getSize(cb1);
target.getSize(cb2);
view.getViewTreeObserver().dispatchOnPreDraw();
// assertThat(shadowObserver.getPreDrawListeners()).hasSize(1);
} |
@Override
public boolean trySplitAtPosition(Long splitOffset) {
return trySplitAtPosition(splitOffset.longValue());
} | @Test
public void testSplitAtOffsetFailsIfUnstarted() throws Exception {
OffsetRangeTracker tracker = new OffsetRangeTracker(100, 200);
assertFalse(tracker.trySplitAtPosition(150));
} |
private String serializeDelete(SeaTunnelRow row) {
String key = keyExtractor.apply(row);
Map<String, String> deleteMetadata = createMetadata(row, key);
String deleteMetadataStr;
try {
deleteMetadataStr = objectMapper.writeValueAsString(deleteMetadata);
} catch (JsonProcessingException e) {
throw CommonError.jsonOperationError(
"Elasticsearch", "deleteMetadata:" + deleteMetadata.toString(), e);
}
/**
* format example: { "delete" : {"_index" : "${your_index}", "_id" : "${your_document_id}"}
* }
*/
return new StringBuilder()
.append("{ \"delete\" :")
.append(deleteMetadataStr)
.append(" }")
.toString();
} | @Test
public void testSerializeDelete() {
String index = "st_index";
String primaryKey = "id";
Map<String, Object> confMap = new HashMap<>();
confMap.put(SinkConfig.INDEX.key(), index);
confMap.put(SinkConfig.PRIMARY_KEYS.key(), Arrays.asList(primaryKey));
ReadonlyConfig pluginConf = ReadonlyConfig.fromMap(confMap);
ElasticsearchClusterInfo clusterInfo =
ElasticsearchClusterInfo.builder().clusterVersion("8.0.0").build();
IndexInfo indexInfo = new IndexInfo(index, pluginConf);
SeaTunnelRowType schema =
new SeaTunnelRowType(
new String[] {primaryKey, "name"},
new SeaTunnelDataType[] {STRING_TYPE, STRING_TYPE});
final ElasticsearchRowSerializer serializer =
new ElasticsearchRowSerializer(clusterInfo, indexInfo, schema);
String id = "0001";
String name = "jack";
SeaTunnelRow row = new SeaTunnelRow(new Object[] {id, name});
row.setRowKind(RowKind.DELETE);
String expected = "{ \"delete\" :{\"_index\":\"" + index + "\",\"_id\":\"" + id + "\"} }";
String upsertStr = serializer.serializeRow(row);
Assertions.assertEquals(expected, upsertStr);
} |
static <T> Supplier<T> decorateSupplier(Observation observation, Supplier<T> supplier) {
return () -> observation.observe(supplier);
} | @Test
public void shouldDecorateSupplier() throws Throwable {
given(helloWorldService.returnHelloWorld()).willReturn("Hello world");
Supplier<String> timedSupplier = Observations
.decorateSupplier(observation, helloWorldService::returnHelloWorld);
timedSupplier.get();
assertThatObservationWasStartedAndFinishedWithoutErrors();
then(helloWorldService).should().returnHelloWorld();
} |
@Override
public String getMethod() {
return PATH;
} | @Test
public void testSetMyCommandsWithEmptyCommands() {
SetMyCommands setMyCommands = SetMyCommands
.builder()
.languageCode("en")
.scope(BotCommandScopeDefault.builder().build())
.build();
assertEquals("setMyCommands", setMyCommands.getMethod());
Throwable thrown = assertThrows(TelegramApiValidationException.class, setMyCommands::validate);
assertEquals("Commands parameter can't be empty", thrown.getMessage());
} |
@Override
public int size(Version version) {
if (version == Version.INET) {
return ipv4Tree.size();
}
if (version == Version.INET6) {
return ipv6Tree.size();
}
return 0;
} | @Test
public void testSize() {
assertThat("Incorrect size of radix tree for IPv4 maps",
radixTree.size(IpAddress.Version.INET), is(4));
assertThat("Incorrect size of radix tree for IPv6 maps",
radixTree.size(IpAddress.Version.INET6), is(6));
} |
@PostMapping("/batchEnabled")
@RequiresPermissions("system:dict:disable")
public ShenyuAdminResult batchEnabled(@Valid @RequestBody final BatchCommonDTO batchCommonDTO) {
final Integer result = shenyuDictService.enabled(batchCommonDTO.getIds(), batchCommonDTO.getEnabled());
return ShenyuAdminResult.success("batch enable success", result);
} | @Test
public void testBatchEnabled() throws Exception {
BatchCommonDTO batchCommonDTO = new BatchCommonDTO();
batchCommonDTO.setEnabled(false);
batchCommonDTO.setIds(Collections.singletonList("123"));
given(this.shenyuDictService.enabled(batchCommonDTO.getIds(), batchCommonDTO.getEnabled())).willReturn(1);
this.mockMvc.perform(MockMvcRequestBuilders.post("/shenyu-dict/batchEnabled")
.contentType(MediaType.APPLICATION_JSON)
.content(GsonUtils.getInstance().toJson(batchCommonDTO)))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is("batch enable success")))
.andReturn();
} |
@Override
public PackageMaterialPluginInfo pluginInfoFor(GoPluginDescriptor descriptor) {
RepositoryConfiguration repositoryConfiguration = extension.getRepositoryConfiguration(descriptor.id());
com.thoughtworks.go.plugin.api.material.packagerepository.PackageConfiguration packageConfiguration = extension.getPackageConfiguration(descriptor.id());
if (repositoryConfiguration == null) {
throw new RuntimeException(format("Plugin[%s] returned null repository configuration", descriptor.id()));
}
if (packageConfiguration == null) {
throw new RuntimeException(format("Plugin[%s] returned null package configuration", descriptor.id()));
}
PluggableInstanceSettings pluginSettingsAndView = getPluginSettingsAndView(descriptor, extension);
return new PackageMaterialPluginInfo(descriptor, new PluggableInstanceSettings(packageRepoConfigurations(repositoryConfiguration)), new PluggableInstanceSettings(packageRepoConfigurations(packageConfiguration)), pluginSettingsAndView);
} | @Test
public void shouldThrowAnExceptionWhenRepoConfigProvidedByPluginIsNull() {
GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build();
when(extension.getRepositoryConfiguration("plugin1")).thenReturn(null);
assertThatThrownBy(() -> new PackageMaterialPluginInfoBuilder(extension).pluginInfoFor(descriptor))
.hasMessageContaining("Plugin[plugin1] returned null repository configuration");
} |
public static DateTime convertToDateTime(@Nonnull Object value) {
if (value instanceof DateTime) {
return (DateTime) value;
}
if (value instanceof Date) {
return new DateTime(value, DateTimeZone.UTC);
} else if (value instanceof ZonedDateTime) {
final DateTimeZone dateTimeZone = DateTimeZone.forTimeZone(TimeZone.getTimeZone(((ZonedDateTime) value).getZone()));
return new DateTime(Date.from(((ZonedDateTime) value).toInstant()), dateTimeZone);
} else if (value instanceof OffsetDateTime) {
return new DateTime(Date.from(((OffsetDateTime) value).toInstant()), DateTimeZone.UTC);
} else if (value instanceof LocalDateTime) {
final LocalDateTime localDateTime = (LocalDateTime) value;
final ZoneId defaultZoneId = ZoneId.systemDefault();
final ZoneOffset offset = defaultZoneId.getRules().getOffset(localDateTime);
return new DateTime(Date.from(localDateTime.toInstant(offset)));
} else if (value instanceof LocalDate) {
final LocalDate localDate = (LocalDate) value;
final LocalDateTime localDateTime = localDate.atStartOfDay();
final ZoneId defaultZoneId = ZoneId.systemDefault();
final ZoneOffset offset = defaultZoneId.getRules().getOffset(localDateTime);
return new DateTime(Date.from(localDateTime.toInstant(offset)));
} else if (value instanceof Instant) {
return new DateTime(Date.from((Instant) value), DateTimeZone.UTC);
} else if (value instanceof String) {
return ES_DATE_FORMAT_FORMATTER.parseDateTime((String) value);
} else {
throw new IllegalArgumentException("Value of invalid type <" + value.getClass().getSimpleName() + "> provided");
}
} | @Test
void convertFromDateTime() {
final DateTime input = new DateTime(2021, 8, 19, 12, 0, DateTimeZone.UTC);
final DateTime output = DateTimeConverter.convertToDateTime(input);
assertThat(output).isEqualTo(input);
} |
@Override
public ClientDetailsEntity saveNewClient(ClientDetailsEntity client) {
if (client.getId() != null) { // if it's not null, it's already been saved, this is an error
throw new IllegalArgumentException("Tried to save a new client with an existing ID: " + client.getId());
}
if (client.getRegisteredRedirectUri() != null) {
for (String uri : client.getRegisteredRedirectUri()) {
if (blacklistedSiteService.isBlacklisted(uri)) {
throw new IllegalArgumentException("Client URI is blacklisted: " + uri);
}
}
}
// assign a random clientid if it's empty
// NOTE: don't assign a random client secret without asking, since public clients have no secret
if (Strings.isNullOrEmpty(client.getClientId())) {
client = generateClientId(client);
}
// make sure that clients with the "refresh_token" grant type have the "offline_access" scope, and vice versa
ensureRefreshTokenConsistency(client);
// make sure we don't have both a JWKS and a JWKS URI
ensureKeyConsistency(client);
// check consistency when using HEART mode
checkHeartMode(client);
// timestamp this to right now
client.setCreatedAt(new Date());
// check the sector URI
checkSectorIdentifierUri(client);
ensureNoReservedScopes(client);
ClientDetailsEntity c = clientRepository.saveClient(client);
statsService.resetCache();
return c;
} | @Test(expected = IllegalArgumentException.class)
public void heartMode_nonLocalHttpRedirect() {
Mockito.when(config.isHeartMode()).thenReturn(true);
ClientDetailsEntity client = new ClientDetailsEntity();
Set<String> grantTypes = new LinkedHashSet<>();
grantTypes.add("authorization_code");
grantTypes.add("refresh_token");
client.setGrantTypes(grantTypes);
client.setTokenEndpointAuthMethod(AuthMethod.PRIVATE_KEY);
client.setRedirectUris(Sets.newHashSet("http://foo.bar/"));
client.setJwksUri("https://foo.bar/jwks");
service.saveNewClient(client);
} |
public void copy(final IntHashSet that)
{
if (values.length != that.values.length)
{
throw new IllegalArgumentException("cannot copy object: masks not equal");
}
System.arraycopy(that.values, 0, values, 0, values.length);
this.sizeOfArrayValues = that.sizeOfArrayValues;
this.containsMissingValue = that.containsMissingValue;
} | @Test
void copiesOtherIntHashSet()
{
addTwoElements(testSet);
final IntHashSet other = new IntHashSet(100);
other.copy(testSet);
assertContainsElements(other);
} |
@SuppressWarnings("unchecked")
@Override
public void punctuate(final ProcessorNode<?, ?, ?, ?> node,
final long timestamp,
final PunctuationType type,
final Punctuator punctuator) {
if (processorContext.currentNode() != null) {
throw new IllegalStateException(String.format("%sCurrent node is not null", logPrefix));
}
// when punctuating, we need to preserve the timestamp (this can be either system time or event time)
// while other record context are set as dummy: null topic, -1 partition, -1 offset and empty header
final ProcessorRecordContext recordContext = new ProcessorRecordContext(
timestamp,
-1L,
-1,
null,
new RecordHeaders()
);
updateProcessorContext(node, time.milliseconds(), recordContext);
if (log.isTraceEnabled()) {
log.trace("Punctuating processor {} with timestamp {} and punctuation type {}", node.name(), timestamp, type);
}
try {
maybeMeasureLatency(() -> punctuator.punctuate(timestamp), time, punctuateLatencySensor);
} catch (final TimeoutException timeoutException) {
if (!eosEnabled) {
throw timeoutException;
} else {
record = null;
throw new TaskCorruptedException(Collections.singleton(id));
}
} catch (final FailedProcessingException e) {
throw createStreamsException(node.name(), e.getCause());
} catch (final TaskCorruptedException | TaskMigratedException e) {
throw e;
} catch (final RuntimeException processingException) {
final ErrorHandlerContext errorHandlerContext = new DefaultErrorHandlerContext(
null,
recordContext.topic(),
recordContext.partition(),
recordContext.offset(),
recordContext.headers(),
node.name(),
id()
);
final ProcessingExceptionHandler.ProcessingHandlerResponse response;
try {
response = Objects.requireNonNull(
processingExceptionHandler.handle(errorHandlerContext, null, processingException),
"Invalid ProcessingExceptionHandler response."
);
} catch (final RuntimeException fatalUserException) {
log.error(
"Processing error callback failed after processing error for record: {}",
errorHandlerContext,
processingException
);
throw new FailedProcessingException("Fatal user code error in processing error callback", fatalUserException);
}
if (response == ProcessingExceptionHandler.ProcessingHandlerResponse.FAIL) {
log.error("Processing exception handler is set to fail upon" +
" a processing error. If you would rather have the streaming pipeline" +
" continue after a processing error, please set the " +
PROCESSING_EXCEPTION_HANDLER_CLASS_CONFIG + " appropriately.");
throw createStreamsException(node.name(), processingException);
} else {
droppedRecordsSensor.record();
}
} finally {
processorContext.setCurrentNode(null);
}
} | @Test
public void punctuateShouldNotHandleTaskCorruptedExceptionAndThrowItAsIs() {
when(stateManager.taskId()).thenReturn(taskId);
when(stateManager.taskType()).thenReturn(TaskType.ACTIVE);
task = createStatelessTask(createConfig(
AT_LEAST_ONCE,
"100",
LogAndFailExceptionHandler.class.getName(),
LogAndContinueProcessingExceptionHandler.class.getName()
));
final Set<TaskId> tasksIds = new HashSet<>();
tasksIds.add(new TaskId(0, 0));
final TaskCorruptedException expectedException = new TaskCorruptedException(tasksIds, new InvalidOffsetException("Invalid offset") {
@Override
public Set<TopicPartition> partitions() {
return new HashSet<>(Collections.singletonList(new TopicPartition("topic", 0)));
}
});
final TaskCorruptedException taskCorruptedException = assertThrows(
TaskCorruptedException.class,
() -> task.punctuate(processorStreamTime, 1, PunctuationType.STREAM_TIME, timestamp -> {
throw expectedException;
})
);
assertEquals(expectedException, taskCorruptedException);
} |
@VisibleForTesting
public void validateDictDataValueUnique(Long id, String dictType, String value) {
DictDataDO dictData = dictDataMapper.selectByDictTypeAndValue(dictType, value);
if (dictData == null) {
return;
}
// 如果 id 为空,说明不用比较是否为相同 id 的字典数据
if (id == null) {
throw exception(DICT_DATA_VALUE_DUPLICATE);
}
if (!dictData.getId().equals(id)) {
throw exception(DICT_DATA_VALUE_DUPLICATE);
}
} | @Test
public void testValidateDictDataValueUnique_valueDuplicateForCreate() {
// 准备参数
String dictType = randomString();
String value = randomString();
// mock 数据
dictDataMapper.insert(randomDictDataDO(o -> {
o.setDictType(dictType);
o.setValue(value);
}));
// 调用,校验异常
assertServiceException(() -> dictDataService.validateDictDataValueUnique(null, dictType, value),
DICT_DATA_VALUE_DUPLICATE);
} |
@VisibleForTesting
void validateRoleDuplicate(String name, String code, Long id) {
// 0. 超级管理员,不允许创建
if (RoleCodeEnum.isSuperAdmin(code)) {
throw exception(ROLE_ADMIN_CODE_ERROR, code);
}
// 1. 该 name 名字被其它角色所使用
RoleDO role = roleMapper.selectByName(name);
if (role != null && !role.getId().equals(id)) {
throw exception(ROLE_NAME_DUPLICATE, name);
}
// 2. 是否存在相同编码的角色
if (!StringUtils.hasText(code)) {
return;
}
// 该 code 编码被其它角色所使用
role = roleMapper.selectByCode(code);
if (role != null && !role.getId().equals(id)) {
throw exception(ROLE_CODE_DUPLICATE, code);
}
} | @Test
public void testValidateRoleDuplicate_codeDuplicate() {
// mock 数据
RoleDO roleDO = randomPojo(RoleDO.class, o -> o.setCode("code"));
roleMapper.insert(roleDO);
// 准备参数
String code = "code";
// 调用,并断言异常
assertServiceException(() -> roleService.validateRoleDuplicate(randomString(), code, null),
ROLE_CODE_DUPLICATE, code);
} |
@Override
public <VR> KTable<Windowed<K>, VR> aggregate(final Initializer<VR> initializer,
final Aggregator<? super K, ? super V, VR> aggregator) {
return aggregate(initializer, aggregator, Materialized.with(keySerde, null));
} | @Test
public void shouldThrowNullPointerOnMaterializedAggregateIfInitializerIsNull() {
assertThrows(NullPointerException.class, () -> windowedStream.aggregate(null, MockAggregator.TOSTRING_ADDER, Materialized.as("store")));
} |
public static Set<Result> anaylze(String log) {
Set<Result> results = new HashSet<>();
for (Rule rule : Rule.values()) {
Matcher matcher = rule.pattern.matcher(log);
if (matcher.find()) {
results.add(new Result(rule, log, matcher));
}
}
return results;
} | @Test
public void securityException() throws IOException {
CrashReportAnalyzer.Result result = findResultByRule(
CrashReportAnalyzer.anaylze(loadLog("/crash-report/security.txt")),
CrashReportAnalyzer.Rule.FILE_CHANGED);
assertEquals("assets/minecraft/texts/splashes.txt", result.getMatcher().group("file"));
} |
public HollowOrdinalIterator findKeysWithPrefix(String prefix) {
TST current;
HollowOrdinalIterator it;
do {
current = prefixIndexVolatile;
it = current.findKeysWithPrefix(prefix);
} while (current != this.prefixIndexVolatile);
return it;
} | @Test
public void testMovieMapReference() throws Exception {
Map<Integer, String> idActorMap = new HashMap<>();
idActorMap.put(1, "Keanu Reeves");
idActorMap.put(2, "Laurence Fishburne");
idActorMap.put(3, "Carrie-Anne Moss");
MovieMapReference movieMapReference = new MovieMapReference(1, 1999, "The Matrix", idActorMap);
objectMapper.add(movieMapReference);
StateEngineRoundTripper.roundTripSnapshot(writeStateEngine, readStateEngine);
HollowPrefixIndex prefixIndex = new HollowPrefixIndex(readStateEngine, "MovieMapReference", "idActorNameMap.value");
Set<Integer> ordinals = toSet(prefixIndex.findKeysWithPrefix("kea"));
Assert.assertTrue(ordinals.size() == 1);
} |
@Deprecated
public static <T> Task<T> withSideEffect(final Task<T> parent, final Task<?> sideEffect) {
return parent.withSideEffect(t -> sideEffect);
} | @Test
public void testSideEffectCancelled() throws InterruptedException {
// this task will not complete.
Task<String> settableTask = new BaseTask<String>() {
@Override
protected Promise<? extends String> run(Context context) throws Exception {
return Promises.settable();
}
};
Task<String> fastTask = new BaseTask<String>() {
@Override
protected Promise<? extends String> run(Context context) throws Exception {
return Promises.value("fast");
}
};
Task<String> withSideEffect = settableTask.withSideEffect(x -> fastTask);
// add 10 ms delay so that we can cancel settableTask reliably
getEngine().run(delayedValue("value", 10, TimeUnit.MILLISECONDS).andThen(withSideEffect));
assertTrue(settableTask.cancel(new Exception("task cancelled")));
withSideEffect.await();
fastTask.await(10, TimeUnit.MILLISECONDS);
assertTrue(withSideEffect.isDone());
assertFalse(fastTask.isDone());
} |
@Override
public ExecuteContext doAfter(ExecuteContext context) {
if (isHasMethodLoadSpringFactories()) {
// 仅当在高版本采用LoadSpringFactories的方式注入, 高版本存在缓存会更加高效, 仅需注入一次
if (IS_INJECTED.compareAndSet(false, true)) {
injectConfigurations(context.getResult());
}
} else {
final Object rawFactoryType = context.getArguments()[0];
if (rawFactoryType instanceof Class) {
final Class<?> factoryType = (Class<?>) rawFactoryType;
injectConfigurationsWithLowVersion(context.getResult(), factoryType.getName());
}
}
return context;
} | @Test
public void doAfterHighVersion() throws NoSuchMethodException, IllegalAccessException {
final SpringFactoriesInterceptor interceptor = new SpringFactoriesInterceptor();
hasMethodLoadSpringFactoriesFiled.set(interceptor, Boolean.TRUE);
ExecuteContext executeContext = ExecuteContext.forMemberMethod(this, this.getClass().getMethod("doAfterHighVersion"),
null, null, null);
// highVersionTesting
final Map<String, List<String>> cache = new HashMap<>();
cache.put(BOOTSTRAP_FACTORY_NAME, new ArrayList<>());
cache.put(ENABLE_AUTO_CONFIGURATION_FACTORY_NAME, new ArrayList<>());
executeContext.changeResult(cache);
executeContext = interceptor.doAfter(executeContext);
final Map<String, List<String>> result = (Map<String, List<String>>) executeContext.getResult();
Assert.assertTrue(result.get(BOOTSTRAP_FACTORY_NAME).contains(PROPERTY_LOCATOR_CLASS)
&& result.get(ENABLE_AUTO_CONFIGURATION_FACTORY_NAME).contains(EVENT_PUBLISHER_CLASS));
} |
public void setSortOrder(@Nullable SortOrder sortOrder) {
if (sortOrder != null && sortOrder.scope != SortOrder.Scope.INTRA_FEED) {
throw new IllegalArgumentException("The specified sortOrder " + sortOrder
+ " is invalid. Only those with INTRA_FEED scope are allowed.");
}
this.sortOrder = sortOrder;
} | @Test
public void testSetSortOrder_NullAllowed() {
original.setSortOrder(null); // should be okay
} |
public static Serializer getDefault() {
return SERIALIZER_MAP.get(defaultSerializer);
} | @Test
void testListSerialize() {
Serializer serializer = SerializeFactory.getDefault();
List<Integer> logsList = new ArrayList<>();
for (int i = 0; i < 4; i++) {
logsList.add(i);
}
byte[] data = serializer.serialize(logsList);
assertNotEquals(0, data.length);
ArrayList<Integer> list = serializer.deserialize(data, ArrayList.class);
System.out.println(list);
} |
public static Builder builder() {
return new Builder();
} | @Test
public void testBuilderDoesNotCreateInvalidObjects() {
List<String> listContainingNull = Lists.newArrayList("a", null, null);
// updated
assertThatThrownBy(
() -> UpdateNamespacePropertiesResponse.builder().addUpdated((String) null).build())
.isInstanceOf(NullPointerException.class)
.hasMessage("Invalid updated property: null");
assertThatThrownBy(
() ->
UpdateNamespacePropertiesResponse.builder().addUpdated((List<String>) null).build())
.isInstanceOf(NullPointerException.class)
.hasMessage("Invalid updated property list: null");
assertThatThrownBy(
() ->
UpdateNamespacePropertiesResponse.builder().addUpdated(listContainingNull).build())
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Invalid updated property: null");
// removed
assertThatThrownBy(
() -> UpdateNamespacePropertiesResponse.builder().addRemoved((String) null).build())
.isInstanceOf(NullPointerException.class)
.hasMessage("Invalid removed property: null");
assertThatThrownBy(
() ->
UpdateNamespacePropertiesResponse.builder().addRemoved((List<String>) null).build())
.isInstanceOf(NullPointerException.class)
.hasMessage("Invalid removed property list: null");
assertThatThrownBy(
() ->
UpdateNamespacePropertiesResponse.builder().addRemoved(listContainingNull).build())
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Invalid removed property: null");
// missing
assertThatThrownBy(
() -> UpdateNamespacePropertiesResponse.builder().addMissing((String) null).build())
.isInstanceOf(NullPointerException.class)
.hasMessage("Invalid missing property: null");
assertThatThrownBy(
() ->
UpdateNamespacePropertiesResponse.builder().addMissing((List<String>) null).build())
.isInstanceOf(NullPointerException.class)
.hasMessage("Invalid missing property list: null");
assertThatThrownBy(
() ->
UpdateNamespacePropertiesResponse.builder().addMissing(listContainingNull).build())
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Invalid missing property: null");
} |
static String removeWhiteSpaceFromJson(String json) {
//reparse the JSON to ensure that all whitespace formatting is uniform
String flattend = FLAT_GSON.toJson(JsonParser.parseString(json));
return flattend;
} | @Test
public void removeWhiteSpaceFromJson_removesNewLines() {
String input = "{\n\"a\":123,\n\"b\":456\n}";
String output = "{\"a\":123,\"b\":456}";
assertThat(
removeWhiteSpaceFromJson(input),
is(output)
);
} |
public static <E, K, D> Map<K, D> groupBy(Collection<E> collection, Function<E, K> key, Collector<E, ?, D> downstream) {
if (CollUtil.isEmpty(collection)) {
return MapUtil.newHashMap(0);
}
return groupBy(collection, key, downstream, false);
} | @Test
public void testGroupBy() {
// groupBy作为之前所有group函数的公共部分抽取出来,并更接近于jdk原生,灵活性更强
// 参数null测试
Map<Long, List<Student>> map = CollStreamUtil.groupBy(null, Student::getTermId, Collectors.toList());
assertEquals(map, Collections.EMPTY_MAP);
// 参数空数组测试
List<Student> list = new ArrayList<>();
map = CollStreamUtil.groupBy(list, Student::getTermId, Collectors.toList());
assertEquals(map, Collections.EMPTY_MAP);
// 放入元素
list.add(new Student(1, 1, 1, "张三"));
list.add(new Student(1, 2, 1, "李四"));
list.add(new Student(2, 2, 1, "王五"));
// 先根据termId分组,再通过classId比较,找出最大值所属的那个Student,返回的Optional
Map<Long, Optional<Student>> longOptionalMap = CollStreamUtil.groupBy(list, Student::getTermId, Collectors.maxBy(Comparator.comparing(Student::getClassId)));
//noinspection OptionalGetWithoutIsPresent
assertEquals("李四", longOptionalMap.get(1L).get().getName());
// 先根据termId分组,再转换为Map<studentId,name>
Map<Long, HashMap<Long, String>> groupThen = CollStreamUtil.groupBy(list, Student::getTermId, Collector.of(HashMap::new, (m, v) -> m.put(v.getStudentId(), v.getName()), (l, r) -> l));
assertEquals(
MapUtil.builder()
.put(1L, MapUtil.builder().put(1L, "李四").build())
.put(2L, MapUtil.builder().put(1L, "王五").build())
.build(),
groupThen);
// 总之,如果你是想要group分组后还要进行别的操作,用它就对了!
// 并且对null值进行了友好处理,例如
List<Student> students = Arrays.asList(null, null, new Student(1, 1, 1, "张三"),
new Student(1, 2, 1, "李四"));
Map<Long, List<Student>> termIdStudentsMap = CollStreamUtil.groupBy(students, Student::getTermId, Collectors.toList());
Map<Long, List<Student>> termIdStudentsCompareMap = new HashMap<>();
termIdStudentsCompareMap.put(null, Arrays.asList(null, null));
termIdStudentsCompareMap.put(1L, Arrays.asList(new Student(1L, 1, 1, "张三"), new Student(1L, 2, 1, "李四")));
assertEquals(termIdStudentsCompareMap, termIdStudentsMap);
Map<Long, Long> termIdCountMap = CollStreamUtil.groupBy(students, Student::getTermId, Collectors.counting());
Map<Long, Long> termIdCountCompareMap = new HashMap<>();
termIdCountCompareMap.put(null, 2L);
termIdCountCompareMap.put(1L, 2L);
assertEquals(termIdCountCompareMap, termIdCountMap);
} |
public static <T> Iterator<T> iterator(Class<T> expectedType, String factoryId, ClassLoader classLoader) throws Exception {
Iterator<Class<T>> classIterator = classIterator(expectedType, factoryId, classLoader);
return new NewInstanceIterator<>(classIterator);
} | @Test
public void loadServicesSimpleDifferentThreadContextClassLoader() throws Exception {
Class<ServiceLoaderTestInterface> type = ServiceLoaderTestInterface.class;
String factoryId = "com.hazelcast.ServiceLoaderTestInterface";
Thread current = Thread.currentThread();
ClassLoader tccl = current.getContextClassLoader();
current.setContextClassLoader(new URLClassLoader(new URL[0]));
Set<ServiceLoaderTestInterface> implementations = new HashSet<>();
Iterator<ServiceLoaderTestInterface> iterator = ServiceLoader.iterator(type, factoryId, null);
while (iterator.hasNext()) {
implementations.add(iterator.next());
}
current.setContextClassLoader(tccl);
assertEquals(1, implementations.size());
} |
@Override
public boolean isDataNodeAvailable(long dataNodeId) {
// DataNode and ComputeNode is exchangeable in SHARED_DATA mode
return availableID2ComputeNode.containsKey(dataNodeId);
} | @Test
public void testCollocationBackendSelectorWithSharedDataWorkerProvider() {
HostBlacklist blockList = SimpleScheduler.getHostBlacklist();
blockList.hostBlacklist.clear();
SystemInfoService sysInfo = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo();
List<Long> availList = prepareNodeAliveAndBlock(sysInfo, blockList);
int bucketNum = 6;
OlapScanNode scanNode = newOlapScanNode(10, bucketNum);
final Map<Integer, List<Long>> bucketSeqToBackends = ImmutableMap.of(
0, ImmutableList.of(1L),
1, ImmutableList.of(2L),
2, ImmutableList.of(3L),
3, ImmutableList.of(4L),
4, ImmutableList.of(5L),
5, ImmutableList.of(6L)
);
scanNode.bucketSeq2locations = genBucketSeq2Locations(bucketSeqToBackends, 3);
List<TScanRangeLocations> scanLocations = generateScanRangeLocations(id2AllNodes, 10, bucketNum);
WorkerProvider provider = newWorkerProvider();
int nonAvailNum = 0;
for (TScanRangeLocations locations : scanLocations) {
for (TScanRangeLocation location : locations.getLocations()) {
if (!provider.isDataNodeAvailable(location.getBackend_id())) {
++nonAvailNum;
}
}
}
// the scanRangeLocations contains non-avail locations
Assert.assertTrue(nonAvailNum > 0);
{ // normal case
FragmentScanRangeAssignment assignment = new FragmentScanRangeAssignment();
ColocatedBackendSelector.Assignment colAssignment = new ColocatedBackendSelector.Assignment(scanNode, 1);
ColocatedBackendSelector selector =
new ColocatedBackendSelector(scanNode, assignment, colAssignment, false, provider, 1);
// the computation will not fail even though there are non-available locations
ExceptionChecker.expectThrowsNoException(selector::computeScanRangeAssignment);
// check the assignment, should be all in the availList
for (long id : assignment.keySet()) {
Assert.assertTrue(availList.contains(id));
}
}
{ // make only one node available, the final assignment will be all on the single available node
ComputeNode availNode = id2AllNodes.get(availList.get(0));
WorkerProvider provider1 = new DefaultSharedDataWorkerProvider(ImmutableMap.copyOf(id2AllNodes),
ImmutableMap.of(availNode.getId(), availNode));
FragmentScanRangeAssignment assignment = new FragmentScanRangeAssignment();
ColocatedBackendSelector.Assignment colAssignment = new ColocatedBackendSelector.Assignment(scanNode, 1);
ColocatedBackendSelector selector =
new ColocatedBackendSelector(scanNode, assignment, colAssignment, false, provider1, 1);
// the computation will not fail even though there are non-available locations
ExceptionChecker.expectThrowsNoException(selector::computeScanRangeAssignment);
Assert.assertEquals(1, assignment.size());
// check the assignment, should be all in the availList
for (long id : assignment.keySet()) {
Assert.assertEquals(availNode.getId(), id);
}
}
{ // make no node available. Exception throws
WorkerProvider providerNoAvailNode = new DefaultSharedDataWorkerProvider(ImmutableMap.copyOf(id2AllNodes),
ImmutableMap.of());
FragmentScanRangeAssignment assignment = new FragmentScanRangeAssignment();
ColocatedBackendSelector.Assignment colAssignment = new ColocatedBackendSelector.Assignment(scanNode, 1);
ColocatedBackendSelector selector =
new ColocatedBackendSelector(scanNode, assignment, colAssignment, false, providerNoAvailNode, 1);
Assert.assertThrows(NonRecoverableException.class, selector::computeScanRangeAssignment);
}
} |
public static String convertToCamelCase(String str) {
StringBuilder result = new StringBuilder();
if (isNullOrWhitespaceOnly(str)) {
return "";
} else if (!str.contains("_") && !str.contains(" ")) {
return str.toLowerCase();
}
String[] camels = str.split("[_| ]");
for (String camel : camels) {
if (camel.isEmpty()) {
continue;
}
result.append(camel.substring(0, 1).toUpperCase());
result.append(camel.substring(1).toLowerCase());
}
StringBuilder ret = new StringBuilder(result.substring(0, 1).toLowerCase());
ret.append(result.substring(1, result.toString().length()));
return ret.toString();
} | @Test
public void testConvertToCamelCase() {
String str = "AA_BB CC";
String camelCaseStr = StringUtils.convertToCamelCase(str);
Assert.assertEquals("aaBbCc", camelCaseStr);
} |
@Override
public DbSession openSession(boolean batch) {
if (!CACHING_ENABLED.get()) {
return myBatis.openSession(batch);
}
if (batch) {
return new NonClosingDbSession(batchDbSession.get().get());
}
return new NonClosingDbSession(regularDbSession.get().get());
} | @Test
void openSession_without_caching_always_returns_a_new_batch_session_when_parameter_is_true() {
DbSession[] expected = {mock(DbSession.class), mock(DbSession.class), mock(DbSession.class), mock(DbSession.class)};
when(myBatis.openSession(true))
.thenReturn(expected[0])
.thenReturn(expected[1])
.thenReturn(expected[2])
.thenReturn(expected[3])
.thenThrow(oneCallTooMuch());
assertThat(Arrays.stream(expected).map(ignored -> underTest.openSession(true)).toList())
.containsExactly(expected);
} |
public ClusterStateBundle.FeedBlock inferContentClusterFeedBlockOrNull(ContentCluster cluster) {
if (!feedBlockEnabled) {
return null;
}
var nodeInfos = cluster.getNodeInfos();
var exhaustions = enumerateNodeResourceExhaustionsAcrossAllNodes(nodeInfos);
if (exhaustions.isEmpty()) {
return null;
}
int maxDescriptions = 3;
String description = exhaustions.stream()
.limit(maxDescriptions)
.map(NodeResourceExhaustion::toExhaustionAddedDescription)
.collect(Collectors.joining(", "));
if (exhaustions.size() > maxDescriptions) {
description += String.format(" (... and %d more)", exhaustions.size() - maxDescriptions);
}
description = decoratedMessage(cluster, description);
// FIXME we currently will trigger a cluster state recomputation even if the number of
// exhaustions is greater than what is returned as part of the description. Though at
// that point, cluster state recomputations will be the least of your worries...!
return ClusterStateBundle.FeedBlock.blockedWith(description, exhaustions);
} | @Test
void node_must_be_available_in_reported_state_to_trigger_feed_block() {
var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8)));
var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)),
forNode(2, usage("disk", 0.6), usage("memory", 0.6)));
cf.reportStorageNodeState(1, State.DOWN);
cf.reportStorageNodeState(2, State.DOWN);
var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster());
assertNull(feedBlock);
} |
public static void addNumImmutableMemTableMetric(final StreamsMetricsImpl streamsMetrics,
final RocksDBMetricContext metricContext,
final Gauge<BigInteger> valueProvider) {
addMutableMetric(
streamsMetrics,
metricContext,
valueProvider,
NUMBER_OF_IMMUTABLE_MEMTABLES,
NUMBER_OF_IMMUTABLE_MEMTABLES_DESCRIPTION
);
} | @Test
public void shouldAddNumImmutableMemTablesMetric() {
final String name = "num-immutable-mem-table";
final String description = "Number of immutable memtables that have not yet been flushed";
runAndVerifyMutableMetric(
name,
description,
() -> RocksDBMetrics.addNumImmutableMemTableMetric(streamsMetrics, ROCKSDB_METRIC_CONTEXT, VALUE_PROVIDER)
);
} |
public CreateStreamCommand createStreamCommand(final KsqlStructuredDataOutputNode outputNode) {
return new CreateStreamCommand(
outputNode.getSinkName().get(),
outputNode.getSchema(),
outputNode.getTimestampColumn(),
outputNode.getKsqlTopic().getKafkaTopicName(),
Formats.from(outputNode.getKsqlTopic()),
outputNode.getKsqlTopic().getKeyFormat().getWindowInfo(),
Optional.of(outputNode.getOrReplace()),
Optional.of(false)
);
} | @Test
public void shouldThrowIfStreamExists() {
// Given:
final CreateStream ddlStatement =
new CreateStream(SOME_NAME, STREAM_ELEMENTS, false, false, withProperties, false);
// When:
final Exception e = assertThrows(
KsqlException.class, () -> createSourceFactory
.createStreamCommand(ddlStatement, ksqlConfig));
// Then:
assertThat(e.getMessage(),
containsString("Cannot add stream 'bob': A stream with the same name already exists"));
} |
@Override
public ByteBuf readBytes(int length) {
checkReadableBytes(length);
if (length == 0) {
return Unpooled.EMPTY_BUFFER;
}
ByteBuf buf = alloc().buffer(length, maxCapacity);
buf.writeBytes(this, readerIndex, length);
readerIndex += length;
return buf;
} | @Test
public void testReadBytesAfterRelease4() {
final ByteBuf buffer = buffer(8);
try {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().readBytes(buffer, 0, 1);
}
});
} finally {
buffer.release();
}
} |
public static boolean isGE(String base, String other) {
return isGE(base, other, false);
} | @Test
public void testGE() throws Exception {
assertTrue(isGE("2.15.0", "2.15.0"));
assertTrue(isGE("2.15.0", "2.15.1"));
assertTrue(isGE("2.15.0", "2.16.0"));
assertTrue(isGE("2.15.0", "2.16-SNAPSHOT"));
assertTrue(isGE("2.15.0", "2.16-foo"));
assertFalse(isGE("2.15.0", "2.14.3"));
assertFalse(isGE("2.15.0", "2.13.0"));
assertFalse(isGE("2.15.0", "2.13.1"));
assertFalse(isGE("2.15.0", "2.14-SNAPSHOT"));
assertFalse(isGE("2.15.0", "2.14-foo"));
assertTrue(isGE("3.0.0", "3.0.0"));
assertTrue(isGE("3.0.0", "3.1.1"));
assertTrue(isGE("3.1.0", "3.2.0"));
assertTrue(isGE("3.3.0", "3.3.0-SNAPSHOT"));
assertTrue(isGE("3.4.0", "3.4.0-SNAPSHOT"));
assertTrue(isGE("3.3.0", "3.3.0.jdk11-800001-0000001"));
} |
public static Set<Result> anaylze(String log) {
Set<Result> results = new HashSet<>();
for (Rule rule : Rule.values()) {
Matcher matcher = rule.pattern.matcher(log);
if (matcher.find()) {
results.add(new Result(rule, log, matcher));
}
}
return results;
} | @Test
public void fabricConflicts() throws IOException {
CrashReportAnalyzer.Result result = findResultByRule(
CrashReportAnalyzer.anaylze(loadLog("/logs/fabric-mod-conflict.txt")),
CrashReportAnalyzer.Rule.MOD_RESOLUTION_CONFLICT);
assertEquals("phosphor", result.getMatcher().group("sourcemod"));
assertEquals("{starlight @ [*]}", result.getMatcher().group("destmod"));
} |
@Override
public void stop() throws Exception {
LOG.info("Stopping DefaultLeaderRetrievalService.");
synchronized (lock) {
if (!running) {
return;
}
running = false;
}
leaderRetrievalDriver.close();
} | @Test
void testErrorIsIgnoredAfterBeingStop() throws Exception {
new Context() {
{
runTest(
() -> {
final Exception testException = new Exception("test exception");
leaderRetrievalService.stop();
testingLeaderRetrievalDriver.onFatalError(testException);
assertThat(testingListener.getError()).isNull();
});
}
};
} |
@Override
public void write(DataOutput out) throws IOException {
throw new UnsupportedOperationException("LazyHCatRecord is intended to wrap"
+ " an object/object inspector as a HCatRecord "
+ "- it does not need to be written to a DataOutput.");
} | @Test
public void testWrite() throws Exception {
HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector());
boolean sawException = false;
try {
r.write(null);
} catch (UnsupportedOperationException uoe) {
sawException = true;
}
Assert.assertTrue(sawException);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.