focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static String readFile(String path, String fileName) {
File file = openFile(path, fileName);
if (file.exists()) {
return readFile(file);
}
return null;
} | @Test
void testReadFile() {
assertNotNull(DiskUtils.readFile(testFile));
} |
@Override
public RangeBoundary getHighBoundary() {
return highBoundary;
} | @Test
void getHighBoundary() {
final Range.RangeBoundary highBoundary = Range.RangeBoundary.CLOSED;
final RangeImpl rangeImpl = new RangeImpl(Range.RangeBoundary.OPEN, 10, 15, highBoundary);
assertThat(rangeImpl.getHighBoundary()).isEqualTo(highBoundary);
} |
@Override
protected Collection<Address> getPossibleAddresses() {
Iterable<DiscoveryNode> discoveredNodes = checkNotNull(discoveryService.discoverNodes(),
"Discovered nodes cannot be null!");
MemberImpl localMember = node.nodeEngine.getLocalMember();
Set<Address> localAddresses = node.getLocalAddressRegistry().getLocalAddresses();
Collection<Address> possibleMembers = new ArrayList<>();
for (DiscoveryNode discoveryNode : discoveredNodes) {
Address discoveredAddress = usePublicAddress ? discoveryNode.getPublicAddress() : discoveryNode.getPrivateAddress();
if (localAddresses.contains(discoveredAddress)) {
if (!usePublicAddress && discoveryNode.getPublicAddress() != null) {
// enrich member with client public address
localMember.getAddressMap().put(EndpointQualifier.resolve(ProtocolType.CLIENT, "public"),
publicAddress(localMember, discoveryNode));
}
continue;
}
possibleMembers.add(discoveredAddress);
}
return possibleMembers;
} | @Test
public void test_DiscoveryJoiner_returns_private_address_and_enrich_member_with_public_address() {
DiscoveryJoiner joiner = new DiscoveryJoiner(getNode(hz), service, false);
doReturn(discoveryNodes).when(service).discoverNodes();
Collection<Address> addresses = joiner.getPossibleAddresses();
assertContains(addresses, Address.createUnresolvedAddress("127.0.0.1", 5702));
Address clientPublicAddress =
getNode(hz).getLocalMember().getAddressMap().get(CLIENT_PUBLIC_ENDPOINT_QUALIFIER);
assertEquals(Address.createUnresolvedAddress("127.0.0.2", 6701), clientPublicAddress);
} |
public void configure(SSLConfigurable socket) {
socket.setEnabledProtocols(enabledProtocols(
socket.getSupportedProtocols(), socket.getDefaultProtocols()));
socket.setEnabledCipherSuites(enabledCipherSuites(
socket.getSupportedCipherSuites(), socket.getDefaultCipherSuites()));
if (isNeedClientAuth() != null) {
socket.setNeedClientAuth(isNeedClientAuth());
}
if (isWantClientAuth() != null) {
socket.setWantClientAuth(isWantClientAuth());
}
if (hostnameVerification != null) {
addInfo("hostnameVerification="+hostnameVerification);
socket.setHostnameVerification(hostnameVerification);
}
} | @Test
public void testPassDefaultProtocols() throws Exception {
final String[] protocols = new String[] { "A" };
configurable.setDefaultProtocols(protocols);
configuration.configure(configurable);
assertTrue(Arrays.equals(protocols, configurable.getEnabledProtocols()));
} |
public void close() {
runWithLock(
() -> {
closed = true;
if (!udfFinished) {
cacheNotEmpty.signalAll();
waitUDFFinished();
}
udfExecutor.shutdown();
});
} | @Test
void testInitialize() throws ExecutionException, InterruptedException {
CompletableFuture<Object> result = new CompletableFuture<>();
MapPartitionIterator<String> iterator =
new MapPartitionIterator<>(stringIterator -> result.complete(null));
result.get();
assertThat(result).isCompleted();
iterator.close();
} |
public static TableElements parse(final String schema, final TypeRegistry typeRegistry) {
return new SchemaParser(typeRegistry).parse(schema);
} | @Test
public void shouldParseQuotedSchema() {
// Given:
final String schema = "`END` VARCHAR";
// When:
final TableElements elements = parser.parse(schema);
// Then:
assertThat(elements, hasItem(
new TableElement(ColumnName.of("END"), new Type(SqlTypes.STRING))
));
} |
@Override
public Tuple zPopMin(byte[] key) {
Assert.notNull(key, "Key must not be null!");
return write(key, ByteArrayCodec.INSTANCE, ZPOPMIN, (Object) key);
} | @Test
public void testZPopMin() {
connection.zAdd("key".getBytes(), 1, "value1".getBytes());
connection.zAdd("key".getBytes(), 2, "value2".getBytes());
RedisZSetCommands.Tuple r = connection.zPopMin("key".getBytes());
assertThat(r.getValue()).isEqualTo("value1".getBytes());
assertThat(r.getScore()).isEqualTo(1);
} |
public static Collection<Inet6Address> getPossibleInetAddressesFor(final Inet6Address inet6Address) {
if ((!inet6Address.isSiteLocalAddress() && !inet6Address.isLinkLocalAddress())
|| inet6Address.getScopeId() > 0 || inet6Address.getScopedInterface() != null) {
return Collections.singleton(inet6Address);
}
LinkedList<Inet6Address> possibleAddresses = new LinkedList<>();
try {
List<NetworkInterfaceInfo> interfaces = networkInterfacesEnumerator.getNetworkInterfaces();
for (NetworkInterfaceInfo ni : interfaces) {
addPossibleAddress(inet6Address, possibleAddresses, ni);
}
} catch (IOException ignored) {
ignore(ignored);
}
if (possibleAddresses.isEmpty()) {
throw new IllegalArgumentException("Could not find a proper network interface"
+ " to connect to "
+ inet6Address);
}
return possibleAddresses;
} | @Test
public void testGetPossibleInetAddressesFor_whenNotLocalAddress() throws UnknownHostException {
Inet6Address inet6Address = (Inet6Address) Inet6Address.getByName(SOME_NOT_LOCAL_ADDRESS);
assertThat(inet6Address.isSiteLocalAddress()).isFalse();
assertThat(inet6Address.isLinkLocalAddress()).isFalse();
Collection<Inet6Address> actual = AddressUtil.getPossibleInetAddressesFor(inet6Address);
assertEquals(1, actual.size());
assertTrue(actual.contains(inet6Address));
} |
@Override
public byte[] echo(byte[] message) {
return read(null, ByteArrayCodec.INSTANCE, ECHO, message);
} | @Test
public void testEcho() {
assertThat(connection.echo("test".getBytes())).isEqualTo("test".getBytes());
} |
@Override
public boolean verify(final Host host, final PublicKey key) throws BackgroundException {
if(null == database) {
log.warn(String.format("Missing database %s", database));
return super.verify(host, key);
}
final KeyType type = KeyType.fromKey(key);
if(type == KeyType.UNKNOWN) {
return false;
}
boolean foundApplicableHostEntry = false;
for(OpenSSHKnownHosts.KnownHostEntry entry : database.entries()) {
try {
if(entry.appliesTo(type, format(host))) {
foundApplicableHostEntry = true;
if(entry.verify(key)) {
return true;
}
}
}
catch(IOException e) {
log.error(String.format("Failure verifying host key entry %s. %s", entry, e.getMessage()));
return false;
}
}
if(foundApplicableHostEntry) {
try {
return this.isChangedKeyAccepted(host, key);
}
catch(ConnectionCanceledException | ChecksumException e) {
return false;
}
}
try {
return this.isUnknownKeyAccepted(host, key);
}
catch(ConnectionCanceledException | ChecksumException e) {
return false;
}
} | @Test
public void testReadFailure() throws Exception {
final AtomicBoolean unknown = new AtomicBoolean();
final OpenSSHHostKeyVerifier v = new OpenSSHHostKeyVerifier(new Local("/t") {
@Override
public InputStream getInputStream() throws AccessDeniedException {
throw new AccessDeniedException("t");
}
}) {
@Override
protected boolean isUnknownKeyAccepted(final Host hostname, final PublicKey key) {
unknown.set(true);
return true;
}
@Override
protected boolean isChangedKeyAccepted(final Host hostname, final PublicKey key) {
return false;
}
};
final PublicKey key = SecurityUtils.getKeyFactory("RSA").generatePublic(new RSAPublicKeySpec(new BigInteger("a19f65e93926d9a2f5b52072db2c38c54e6cf0113d31fa92ff827b0f3bec609c45ea84264c88e64adba11ff093ed48ee0ed297757654b0884ab5a7e28b3c463bc9074b32837a2b69b61d914abf1d74ccd92b20fa44db3b31fb208c0dd44edaeb4ab097118e8ee374b6727b89ad6ce43f1b70c5a437ccebc36d2dad8ae973caad15cd89ae840fdae02cae42d241baef8fda8aa6bbaa54fd507a23338da6f06f61b34fb07d560e63fbce4a39c073e28573c2962cedb292b14b80d1b4e67b0465f2be0e38526232d0a7f88ce91a055fde082038a87ed91f3ef5ff971e30ea6cccf70d38498b186621c08f8fdceb8632992b480bf57fc218e91f2ca5936770fe9469", 16),
new BigInteger("23", 16)));
assertTrue(v.verify(new Host(new SFTPProtocol(), "h", 22), key));
assertTrue(unknown.get());
} |
public static String replaceAllChars(String source, char search, String replace) {
int indexOf = source.indexOf(search);
if (indexOf == -1) {
return source;
}
int offset = 0;
char[] chars = source.toCharArray();
StringBuilder sb = new StringBuilder(source.length() + 20);
while (indexOf != -1) {
sb.append(chars, offset, indexOf - offset);
sb.append(replace);
offset = indexOf + 1;
indexOf = source.indexOf(search, offset);
}
sb.append(chars, offset, chars.length - offset);
return sb.toString();
} | @Test
public void testreplaceAllChars() {
assertEquals("", JOrphanUtils.replaceAllChars("", ' ', "+"));
assertEquals("source", JOrphanUtils.replaceAllChars("source", ' ', "+"));
assertEquals("so+rce", JOrphanUtils.replaceAllChars("source", 'u', "+"));
assertEquals("+so+urc+", JOrphanUtils.replaceAllChars("esoeurce", 'e', "+"));
assertEquals("AZAZsoAZurcAZ", JOrphanUtils.replaceAllChars("eesoeurce", 'e', "AZ"));
assertEquals("A+B++C+", JOrphanUtils.replaceAllChars("A B C ", ' ', "+"));
assertEquals("A%20B%20%20C%20", JOrphanUtils.replaceAllChars("A B C ", ' ', "%20"));
} |
public boolean readable(final SelectableChannel channel)
{
return readable((Object) channel);
} | @Test(timeout = 5000)
public void testReadable()
{
ZContext ctx = new ZContext();
ZPoller poller = new ZPoller(ctx);
try {
Socket socket = ctx.createSocket(SocketType.XPUB);
poller.register(socket, new EventsHandlerAdapter());
boolean rc = poller.readable(socket);
assertThat(rc, is(false));
rc = poller.isReadable(socket);
assertThat(rc, is(false));
rc = poller.pollin(socket);
assertThat(rc, is(false));
}
finally {
poller.close();
ctx.close();
}
} |
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
final DSTOffset offset = readDSTOffset(data, 0);
if (offset == null) {
onInvalidDataReceived(device, data);
return;
}
onDSTOffsetReceived(device, offset);
} | @Test
public void onDSTOffsetReceived_daylight() {
final Data data = new Data(new byte[] { 4 });
callback.onDataReceived(null, data);
assertTrue(success);
assertSame(DSTOffsetCallback.DSTOffset.DAYLIGHT_TIME, result);
} |
public static EnsemblePlacementPolicyConfig decode(byte[] data) throws ParseEnsemblePlacementPolicyConfigException {
try {
return ENSEMBLE_PLACEMENT_CONFIG_READER.readValue(data);
} catch (IOException e) {
throw new ParseEnsemblePlacementPolicyConfigException("Failed to decode from json", e);
}
} | @Test
public void testDecodeFailed() {
byte[] configBytes = new byte[0];
try {
EnsemblePlacementPolicyConfig.decode(configBytes);
Assert.fail("should failed parse the config from bytes");
} catch (EnsemblePlacementPolicyConfig.ParseEnsemblePlacementPolicyConfigException e) {
// expected error
}
} |
public static Read<JmsRecord> read() {
return new AutoValue_JmsIO_Read.Builder<JmsRecord>()
.setMaxNumRecords(Long.MAX_VALUE)
.setCoder(SerializableCoder.of(JmsRecord.class))
.setCloseTimeout(DEFAULT_CLOSE_TIMEOUT)
.setRequiresDeduping(false)
.setMessageMapper(
new MessageMapper<JmsRecord>() {
@Override
public JmsRecord mapMessage(Message message) throws Exception {
TextMessage textMessage = (TextMessage) message;
Map<String, Object> properties = new HashMap<>();
@SuppressWarnings("rawtypes")
Enumeration propertyNames = textMessage.getPropertyNames();
while (propertyNames.hasMoreElements()) {
String propertyName = (String) propertyNames.nextElement();
properties.put(propertyName, textMessage.getObjectProperty(propertyName));
}
return new JmsRecord(
textMessage.getJMSMessageID(),
textMessage.getJMSTimestamp(),
textMessage.getJMSCorrelationID(),
textMessage.getJMSReplyTo(),
textMessage.getJMSDestination(),
textMessage.getJMSDeliveryMode(),
textMessage.getJMSRedelivered(),
textMessage.getJMSType(),
textMessage.getJMSExpiration(),
textMessage.getJMSPriority(),
properties,
textMessage.getText());
}
})
.build();
} | @Test
public void testDefaultAutoscaler() throws IOException {
JmsIO.Read spec =
JmsIO.read()
.withConnectionFactory(connectionFactory)
.withUsername(USERNAME)
.withPassword(PASSWORD)
.withQueue(QUEUE);
JmsIO.UnboundedJmsSource source = new JmsIO.UnboundedJmsSource(spec);
JmsIO.UnboundedJmsReader reader = source.createReader(PipelineOptionsFactory.create(), null);
// start the reader and check getSplitBacklogBytes and getTotalBacklogBytes values
reader.start();
assertEquals(BACKLOG_UNKNOWN, reader.getSplitBacklogBytes());
assertEquals(BACKLOG_UNKNOWN, reader.getTotalBacklogBytes());
reader.close();
} |
public static AvroGenericCoder of(Schema schema) {
return AvroGenericCoder.of(schema);
} | @Test
public void testAvroCoderEncoding() throws Exception {
AvroCoder<Pojo> coder = AvroCoder.of(Pojo.class);
CoderProperties.coderSerializable(coder);
AvroCoder<Pojo> copy = SerializableUtils.clone(coder);
Pojo pojo = new Pojo("foo", 3, DATETIME_A);
Pojo equalPojo = new Pojo("foo", 3, DATETIME_A);
Pojo otherPojo = new Pojo("bar", -19, DATETIME_B);
CoderProperties.coderConsistentWithEquals(coder, pojo, equalPojo);
CoderProperties.coderConsistentWithEquals(copy, pojo, equalPojo);
CoderProperties.coderConsistentWithEquals(coder, pojo, otherPojo);
CoderProperties.coderConsistentWithEquals(copy, pojo, otherPojo);
} |
public Map<String, String> gitTags(Map<String, String> tags) {
return MetricsSupport.gitTags(tags);
} | @Test
void gitTags() {
ApplicationModel applicationModel = ApplicationModel.defaultModel();
String mockMetrics = "MockMetrics";
applicationModel
.getApplicationConfigManager()
.setApplication(new org.apache.dubbo.config.ApplicationConfig(mockMetrics));
ApplicationMetric applicationMetric = new ApplicationMetric(applicationModel);
Map<String, String> tags = applicationMetric.getTags();
Assertions.assertEquals(tags.get(METADATA_GIT_COMMITID_METRIC.getName()), Version.getLastCommitId());
} |
@Override
public void execute(ComputationStep.Context context) {
new PathAwareCrawler<>(
FormulaExecutorComponentVisitor.newBuilder(metricRepository, measureRepository)
.buildFor(List.of(duplicationFormula)))
.visit(treeRootHolder.getRoot());
} | @Test
public void compute_duplicated_lines_counts_lines_from_original_and_ignores_InProjectDuplicate() {
TextBlock original = new TextBlock(1, 1);
duplicationRepository.addDuplication(FILE_1_REF, original, FILE_2_REF, new TextBlock(2, 2));
setNewLines(FILE_1);
underTest.execute(new TestComputationStepContext());
assertRawMeasureValue(FILE_1_REF, NEW_DUPLICATED_LINES_KEY, 1);
} |
public static UserAgent parse(String userAgentString) {
return UserAgentParser.parse(userAgentString);
} | @Test
public void parseEdgATest(){
// https://gitee.com/dromara/hutool/issues/I4MCBP
final String uaStr = "userAgent: Mozilla/5.0 (Linux; Android 11; MI 9 Transparent Edition) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.55 Mobile Safari/537.36 EdgA/96.0.1054.36";
final UserAgent ua = UserAgentUtil.parse(uaStr);
assertEquals("MSEdge", ua.getBrowser().toString());
assertEquals("96.0.1054.36", ua.getVersion());
assertEquals("Webkit", ua.getEngine().toString());
assertEquals("537.36", ua.getEngineVersion());
assertEquals("Android", ua.getOs().toString());
assertEquals("11", ua.getOsVersion());
assertEquals("Android", ua.getPlatform().toString());
assertTrue(ua.isMobile());
} |
@Override
public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException {
final boolean exists = Files.exists(session.toPath(file), LinkOption.NOFOLLOW_LINKS);
if(exists) {
if(Files.isSymbolicLink(session.toPath(file))) {
return true;
}
if(!file.isRoot()) {
try {
if(!StringUtils.equals(session.toPath(file).toFile().getCanonicalFile().getName(), file.getName())) {
return false;
}
}
catch(IOException e) {
log.warn(String.format("Failure obtaining canonical file reference for %s", file));
}
}
}
return exists;
} | @Test
public void testFindDirectory() throws Exception {
final LocalSession session = new LocalSession(new Host(new LocalProtocol(), new LocalProtocol().getDefaultHostname()));
session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback());
session.login(new DisabledLoginCallback(), new DisabledCancelCallback());
final Path home = new LocalHomeFinderFeature().find();
assertTrue(new LocalFindFeature(session).find(home));
session.close();
} |
@Override
public URL mergeUrl(URL remoteUrl, Map<String, String> localParametersMap) {
Map<String, String> map = new HashMap<>();
Map<String, String> remoteMap = remoteUrl.getParameters();
if (remoteMap != null && remoteMap.size() > 0) {
map.putAll(remoteMap);
// Remove configurations from provider, some items should be affected by provider.
map.remove(THREAD_NAME_KEY);
map.remove(DEFAULT_KEY_PREFIX + THREAD_NAME_KEY);
map.remove(THREADPOOL_KEY);
map.remove(DEFAULT_KEY_PREFIX + THREADPOOL_KEY);
map.remove(CORE_THREADS_KEY);
map.remove(DEFAULT_KEY_PREFIX + CORE_THREADS_KEY);
map.remove(THREADS_KEY);
map.remove(DEFAULT_KEY_PREFIX + THREADS_KEY);
map.remove(QUEUES_KEY);
map.remove(DEFAULT_KEY_PREFIX + QUEUES_KEY);
map.remove(ALIVE_KEY);
map.remove(DEFAULT_KEY_PREFIX + ALIVE_KEY);
map.remove(Constants.TRANSPORTER_KEY);
map.remove(DEFAULT_KEY_PREFIX + Constants.TRANSPORTER_KEY);
}
if (localParametersMap != null && localParametersMap.size() > 0) {
Map<String, String> copyOfLocalMap = new HashMap<>(localParametersMap);
if (map.containsKey(GROUP_KEY)) {
copyOfLocalMap.remove(GROUP_KEY);
}
if (map.containsKey(VERSION_KEY)) {
copyOfLocalMap.remove(VERSION_KEY);
}
if (map.containsKey(GENERIC_KEY)) {
copyOfLocalMap.remove(GENERIC_KEY);
}
copyOfLocalMap.remove(RELEASE_KEY);
copyOfLocalMap.remove(DUBBO_VERSION_KEY);
copyOfLocalMap.remove(METHODS_KEY);
copyOfLocalMap.remove(TIMESTAMP_KEY);
copyOfLocalMap.remove(TAG_KEY);
map.putAll(copyOfLocalMap);
if (remoteMap != null) {
map.put(REMOTE_APPLICATION_KEY, remoteMap.get(APPLICATION_KEY));
// Combine filters and listeners on Provider and Consumer
String remoteFilter = remoteMap.get(REFERENCE_FILTER_KEY);
String localFilter = copyOfLocalMap.get(REFERENCE_FILTER_KEY);
if (remoteFilter != null
&& remoteFilter.length() > 0
&& localFilter != null
&& localFilter.length() > 0) {
map.put(REFERENCE_FILTER_KEY, remoteFilter + "," + localFilter);
}
String remoteListener = remoteMap.get(INVOKER_LISTENER_KEY);
String localListener = copyOfLocalMap.get(INVOKER_LISTENER_KEY);
if (remoteListener != null
&& remoteListener.length() > 0
&& localListener != null
&& localListener.length() > 0) {
map.put(INVOKER_LISTENER_KEY, remoteListener + "," + localListener);
}
}
}
return remoteUrl.clearParameters().addParameters(map);
} | @Test
void testMergeUrl() {
URL providerURL = URL.valueOf("dubbo://localhost:55555");
providerURL = providerURL.setPath("path").setUsername("username").setPassword("password");
providerURL = URLBuilder.from(providerURL)
.addParameter(GROUP_KEY, "dubbo")
.addParameter(VERSION_KEY, "1.2.3")
.addParameter(DUBBO_VERSION_KEY, "2.3.7")
.addParameter(THREADPOOL_KEY, "fixed")
.addParameter(THREADS_KEY, Integer.MAX_VALUE)
.addParameter(THREAD_NAME_KEY, "test")
.addParameter(CORE_THREADS_KEY, Integer.MAX_VALUE)
.addParameter(QUEUES_KEY, Integer.MAX_VALUE)
.addParameter(ALIVE_KEY, Integer.MAX_VALUE)
.addParameter(DEFAULT_KEY_PREFIX + THREADS_KEY, Integer.MAX_VALUE)
.addParameter(DEFAULT_KEY_PREFIX + THREADPOOL_KEY, "fixed")
.addParameter(DEFAULT_KEY_PREFIX + CORE_THREADS_KEY, Integer.MAX_VALUE)
.addParameter(DEFAULT_KEY_PREFIX + QUEUES_KEY, Integer.MAX_VALUE)
.addParameter(DEFAULT_KEY_PREFIX + ALIVE_KEY, Integer.MAX_VALUE)
.addParameter(DEFAULT_KEY_PREFIX + THREAD_NAME_KEY, "test")
.addParameter(APPLICATION_KEY, "provider")
.addParameter(REFERENCE_FILTER_KEY, "filter1,filter2")
.addParameter(TAG_KEY, "TTT")
.build();
URL consumerURL = new URLBuilder(DUBBO_PROTOCOL, "localhost", 55555)
.addParameter(PID_KEY, "1234")
.addParameter(THREADPOOL_KEY, "foo")
.addParameter(APPLICATION_KEY, "consumer")
.addParameter(REFERENCE_FILTER_KEY, "filter3")
.addParameter(TAG_KEY, "UUU")
.build();
URL url = providerURLMergeProcessor.mergeUrl(providerURL, consumerURL.getParameters());
Assertions.assertFalse(url.hasParameter(THREADS_KEY));
Assertions.assertFalse(url.hasParameter(DEFAULT_KEY_PREFIX + THREADS_KEY));
Assertions.assertFalse(url.hasParameter(DEFAULT_KEY_PREFIX + THREADPOOL_KEY));
Assertions.assertFalse(url.hasParameter(CORE_THREADS_KEY));
Assertions.assertFalse(url.hasParameter(DEFAULT_KEY_PREFIX + CORE_THREADS_KEY));
Assertions.assertFalse(url.hasParameter(QUEUES_KEY));
Assertions.assertFalse(url.hasParameter(DEFAULT_KEY_PREFIX + QUEUES_KEY));
Assertions.assertFalse(url.hasParameter(ALIVE_KEY));
Assertions.assertFalse(url.hasParameter(DEFAULT_KEY_PREFIX + ALIVE_KEY));
Assertions.assertFalse(url.hasParameter(THREAD_NAME_KEY));
Assertions.assertFalse(url.hasParameter(DEFAULT_KEY_PREFIX + THREAD_NAME_KEY));
Assertions.assertEquals("path", url.getPath());
Assertions.assertEquals("username", url.getUsername());
Assertions.assertEquals("password", url.getPassword());
Assertions.assertEquals("1234", url.getParameter(PID_KEY));
Assertions.assertEquals("foo", url.getParameter(THREADPOOL_KEY));
Assertions.assertEquals("consumer", url.getApplication());
Assertions.assertEquals("provider", url.getRemoteApplication());
Assertions.assertEquals("filter1,filter2,filter3", url.getParameter(REFERENCE_FILTER_KEY));
Assertions.assertEquals("TTT", url.getParameter(TAG_KEY));
} |
@SuppressWarnings({
"nullness" // TODO(https://github.com/apache/beam/issues/20497)
})
@Override
protected SchemaTransform from(KafkaReadSchemaTransformConfiguration configuration) {
return new KafkaReadSchemaTransform(configuration);
} | @Test
public void testBuildTransformWithJsonSchema() throws IOException {
ServiceLoader<SchemaTransformProvider> serviceLoader =
ServiceLoader.load(SchemaTransformProvider.class);
List<SchemaTransformProvider> providers =
StreamSupport.stream(serviceLoader.spliterator(), false)
.filter(provider -> provider.getClass() == KafkaReadSchemaTransformProvider.class)
.collect(Collectors.toList());
KafkaReadSchemaTransformProvider kafkaProvider =
(KafkaReadSchemaTransformProvider) providers.get(0);
kafkaProvider.from(
KafkaReadSchemaTransformConfiguration.builder()
.setTopic("anytopic")
.setBootstrapServers("anybootstrap")
.setFormat("JSON")
.setSchema(
new String(
ByteStreams.toByteArray(
Objects.requireNonNull(
getClass().getResourceAsStream("/json-schema/basic_json_schema.json"))),
StandardCharsets.UTF_8))
.build());
} |
public static String optimizeErrorMessage(String msg) {
if (msg == null) {
return null;
}
if (SERVER_ID_CONFLICT.matcher(msg).matches()) {
// Optimize the error msg when server id conflict
msg +=
"\nThe 'server-id' in the mysql cdc connector should be globally unique, but conflicts happen now.\n"
+ "The server id conflict may happen in the following situations: \n"
+ "1. The server id has been used by other mysql cdc table in the current job.\n"
+ "2. The server id has been used by the mysql cdc table in other jobs.\n"
+ "3. The server id has been used by other sync tools like canal, debezium and so on.\n";
} else if (MISSING_BINLOG_POSITION_WHEN_BINLOG_EXPIRE.matcher(msg).matches()
|| MISSING_TRANSACTION_WHEN_BINLOG_EXPIRE.matcher(msg).matches()) {
// Optimize the error msg when binlog is unavailable
msg +=
"\nThe required binary logs are no longer available on the server. This may happen in following situations:\n"
+ "1. The speed of CDC source reading is too slow to exceed the binlog expired period. You can consider increasing the binary log expiration period, you can also to check whether there is back pressure in the job and optimize your job.\n"
+ "2. The job runs normally, but something happens in the database and lead to the binlog cleanup. You can try to check why this cleanup happens from MySQL side.";
}
return msg;
} | @Test
public void testOptimizeErrorMessageWhenMissingBinlogPositionInSource() {
assertEquals(
"Cannot replicate because the source purged required binary logs. Replicate the missing transactions from elsewhere, or provision a new slave from backup. Consider increasing the master's binary log expiration period. The GTID set sent by the slave is 'b9d6f3df-79e7-11ed-9a81-0242ac110004:1-33', and the missing transactions are 'b9d6f3df-79e7-11ed-9a81-0242ac110004:34'"
+ "\nThe required binary logs are no longer available on the server. This may happen in following situations:\n"
+ "1. The speed of CDC source reading is too slow to exceed the binlog expired period. You can consider increasing the binary log expiration period, you can also to check whether there is back pressure in the job and optimize your job.\n"
+ "2. The job runs normally, but something happens in the database and lead to the binlog cleanup. You can try to check why this cleanup happens from MySQL side.",
ErrorMessageUtils.optimizeErrorMessage(
"Cannot replicate because the source purged required binary logs. Replicate the missing transactions from elsewhere, or provision a new slave from backup. Consider increasing the master's binary log expiration period. The GTID set sent by the slave is 'b9d6f3df-79e7-11ed-9a81-0242ac110004:1-33', and the missing transactions are 'b9d6f3df-79e7-11ed-9a81-0242ac110004:34'"));
} |
public static String normalize(final String name) {
if (name == null) {
throw new IllegalArgumentException("name cannot be null");
}
StringBuilder sb = new StringBuilder();
for (char c : name.toCharArray()) {
if (RESERVED.contains(c)) {
sb.append('%').append((c < 16) ? "0" : "")
.append(Integer.toHexString(c).toUpperCase(Locale.ROOT));
} else {
sb.append(c);
}
}
return sb.toString();
} | @Test
public void normalizeWithNotPrintableChars() throws Exception {
final String TEST_NAME = new String(
new char[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, '.', 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31});
final String EXPECTED_NAME = "%00%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F" + "." +
"%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F";
assertEquals(EXPECTED_NAME, FilenameUtils.normalize(TEST_NAME));
} |
@Override
protected String transform(ILoggingEvent event, String in) {
AnsiElement element = ELEMENTS.get(getFirstOption());
List<Marker> markers = event.getMarkerList();
if ((markers != null && !markers.isEmpty() && markers.get(0).contains(CRLF_SAFE_MARKER)) || isLoggerSafe(event)) {
return in;
}
String replacement = element == null ? "_" : toAnsiString("_", element);
return in.replaceAll("[\n\r\t]", replacement);
} | @Test
void transformShouldReplaceNewlinesAndCarriageReturnsWithAnsiStringWhenMarkersDoNotContainCRLFSafeMarkerAndLoggerIsNotSafeAndAnsiElementIsNotNull() {
ILoggingEvent event = mock(ILoggingEvent.class);
List<Marker> markers = Collections.emptyList();
when(event.getMarkerList()).thenReturn(markers);
when(event.getLoggerName()).thenReturn("com.mycompany.myapp.example.Logger");
String input = "Test\ninput\rstring";
CRLFLogConverter converter = new CRLFLogConverter();
converter.setOptionList(List.of("red"));
String result = converter.transform(event, input);
assertEquals("Test_input_string", result);
} |
@Override
public Num calculate(BarSeries series, Position position) {
int beginIndex = position.getEntry().getIndex();
int endIndex = series.getEndIndex();
return criterion.calculate(series, createEnterAndHoldTrade(series, beginIndex, endIndex));
} | @Test
public void calculateWithOnePositions() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 105);
Position position = new Position(Trade.buyAt(0, series), Trade.sellAt(1, series));
// buy and hold of ReturnCriterion
AnalysisCriterion buyAndHoldReturn = getCriterion(new ReturnCriterion());
assertNumEquals(105d / 100, buyAndHoldReturn.calculate(series, position));
// sell and hold of ReturnCriterion
AnalysisCriterion sellAndHoldReturn = getCriterion(TradeType.SELL, new ReturnCriterion());
assertNumEquals(0.95, sellAndHoldReturn.calculate(series, position));
// buy and hold of PnlPercentageCriterion
AnalysisCriterion buyAndHoldPnlPercentage = getCriterion(new ProfitLossPercentageCriterion());
assertNumEquals(5, buyAndHoldPnlPercentage.calculate(series, position));
// sell and hold of PnlPercentageCriterion
AnalysisCriterion sellAndHoldPnlPercentage = getCriterion(TradeType.SELL, new ProfitLossPercentageCriterion());
assertNumEquals(-5, sellAndHoldPnlPercentage.calculate(series, position));
} |
public static int getInt(String key, int def) {
String value = get(key);
if (value == null) {
return def;
}
value = value.trim();
try {
return Integer.parseInt(value);
} catch (Exception e) {
// Ignore
}
logger.warn(
"Unable to parse the integer system property '{}':{} - using the default value: {}",
key, value, def
);
return def;
} | @Test
public void getIntWithPropertValueIsInt() {
System.setProperty("key", "123");
assertEquals(123, SystemPropertyUtil.getInt("key", 1));
} |
public Future<KafkaVersionChange> reconcile() {
return getPods()
.compose(this::detectToAndFromVersions)
.compose(i -> prepareVersionChange());
} | @Test
public void testExistingClusterWithRemovedMetadataVersion(VertxTestContext context) {
VersionChangeCreator vcc = mockVersionChangeCreator(
mockKafka(VERSIONS.defaultVersion().version(), "3.4-IV2", null),
mockRos(mockUniformPods(VERSIONS.defaultVersion().version()))
);
Checkpoint async = context.checkpoint();
vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> {
assertThat(c.from(), is(VERSIONS.defaultVersion()));
assertThat(c.to(), is(VERSIONS.defaultVersion()));
assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion()));
async.flag();
})));
} |
public static WriteStreams writeStreams() {
return new AutoValue_RedisIO_WriteStreams.Builder()
.setConnectionConfiguration(RedisConnectionConfiguration.create())
.setMaxLen(0L)
.setApproximateTrim(true)
.build();
} | @Test
public void testWriteStreams() {
/* test data is 10 keys (stream IDs), each with two entries, each entry having one k/v pair of data */
List<String> redisKeys =
IntStream.range(0, 10).boxed().map(idx -> UUID.randomUUID().toString()).collect(toList());
Map<String, String> fooValues = ImmutableMap.of("sensor-id", "1234", "temperature", "19.8");
Map<String, String> barValues = ImmutableMap.of("sensor-id", "9999", "temperature", "18.2");
List<KV<String, Map<String, String>>> allData =
redisKeys.stream()
.flatMap(id -> Stream.of(KV.of(id, fooValues), KV.of(id, barValues)))
.collect(toList());
PCollection<KV<String, Map<String, String>>> write =
p.apply(
Create.of(allData)
.withCoder(
KvCoder.of(
StringUtf8Coder.of(),
MapCoder.of(StringUtf8Coder.of(), StringUtf8Coder.of()))));
write.apply(RedisIO.writeStreams().withEndpoint(REDIS_HOST, port));
p.run();
for (String key : redisKeys) {
List<StreamEntry> streamEntries =
client.xrange(key, (StreamEntryID) null, (StreamEntryID) null, Integer.MAX_VALUE);
assertEquals(2, streamEntries.size());
assertThat(transform(streamEntries, StreamEntry::getFields), hasItems(fooValues, barValues));
}
} |
@VisibleForTesting
void checkRemoteFoldernameField( String remoteFoldernameFieldName, SFTPPutData data ) throws KettleStepException {
// Remote folder fieldname
remoteFoldernameFieldName = environmentSubstitute( remoteFoldernameFieldName );
if ( Utils.isEmpty( remoteFoldernameFieldName ) ) {
// remote folder field is missing
throw new KettleStepException( BaseMessages.getString( PKG, "SFTPPut.Error.RemoteFolderNameFieldMissing" ) );
}
data.indexOfRemoteDirectory = getInputRowMeta().indexOfValue( remoteFoldernameFieldName );
if ( data.indexOfRemoteDirectory == -1 ) {
// remote foldername field is missing
throw new KettleStepException( BaseMessages.getString(
PKG, "SFTPPut.Error.CanNotFindField", remoteFoldernameFieldName ) );
}
} | @Test( expected = KettleStepException.class )
public void checkRemoteFoldernameField_NameIsBlank() throws Exception {
SFTPPutData data = new SFTPPutData();
step.checkRemoteFoldernameField( "", data );
} |
public Canvas canvas() {
Canvas canvas = new Canvas(getLowerBound(), getUpperBound());
canvas.add(this);
if (name != null) {
canvas.setTitle(name);
}
return canvas;
} | @Test
public void testSparseMatrix() throws Exception {
System.out.println("Sparse Matrix");
var sparse = SparseMatrix.text(Paths.getTestData("matrix/mesh2em5.txt"));
var canvas = SparseMatrixPlot.of(sparse).canvas();
canvas.setTitle("mesh2em5");
canvas.window();
} |
@Override
public void onProjectsDeleted(Set<DeletedProject> projects) {
checkNotNull(projects, "projects can't be null");
if (projects.isEmpty()) {
return;
}
Arrays.stream(listeners)
.forEach(safelyCallListener(listener -> listener.onProjectsDeleted(projects)));
} | @Test
@UseDataProvider("oneOrManyDeletedProjects")
public void onProjectsDeleted_calls_all_listeners_even_if_one_throws_an_Error(Set<DeletedProject> projects) {
InOrder inOrder = Mockito.inOrder(listener1, listener2, listener3);
doThrow(new Error("Faking listener2 throwing an Error"))
.when(listener2)
.onProjectsDeleted(any());
underTestWithListeners.onProjectsDeleted(projects);
inOrder.verify(listener1).onProjectsDeleted(same(projects));
inOrder.verify(listener2).onProjectsDeleted(same(projects));
inOrder.verify(listener3).onProjectsDeleted(same(projects));
inOrder.verifyNoMoreInteractions();
} |
@Override
public byte[] decrypt(byte[] bytes, KeyType keyType) {
// 在非使用BC库情况下,blockSize使用默认的算法
if (this.decryptBlockSize < 0 && null == GlobalBouncyCastleProvider.INSTANCE.getProvider()) {
// 加密数据长度 <= 模长-11
this.decryptBlockSize = ((RSAKey) getKeyByType(keyType)).getModulus().bitLength() / 8;
}
return super.decrypt(bytes, keyType);
} | @Test
public void rsaBase64Test() {
final String textBase = "我是一段特别长的测试";
final StringBuilder text = new StringBuilder();
for (int i = 0; i < 10; i++) {
text.append(textBase);
}
final RSA rsa = new RSA();
// 公钥加密,私钥解密
final String encryptStr = rsa.encryptBase64(text.toString(), KeyType.PublicKey);
final String decryptStr = StrUtil.utf8Str(rsa.decrypt(encryptStr, KeyType.PrivateKey));
assertEquals(text.toString(), decryptStr);
// 私钥加密,公钥解密
final String encrypt2 = rsa.encryptBase64(text.toString(), KeyType.PrivateKey);
final String decrypt2 = StrUtil.utf8Str(rsa.decrypt(encrypt2, KeyType.PublicKey));
assertEquals(text.toString(), decrypt2);
} |
protected FileSystem createFileSystem(Configuration namenodeConf)
throws IOException {
String user = UserGroupInformation.getCurrentUser().getShortUserName();
CachedFileSystem newCachedFS = new CachedFileSystem(purgeTimeout);
CachedFileSystem cachedFS = fsCache.putIfAbsent(user, newCachedFS);
if (cachedFS == null) {
cachedFS = newCachedFS;
}
Configuration conf = new Configuration(namenodeConf);
conf.set(HTTPFS_FS_USER, user);
return cachedFS.getFileSystem(conf);
} | @Test
@TestDir
@TestHdfs
public void createFileSystem() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
String services = StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName()));
Configuration hadoopConf = new Configuration(false);
hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
createHadoopConf(hadoopConf);
Configuration conf = new Configuration(false);
conf.set("server.services", services);
conf.set("server.hadoop.filesystem.cache.purge.timeout", "0");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
FileSystemAccess hadoop = server.get(FileSystemAccess.class);
FileSystem fs = hadoop.createFileSystem("u", hadoop.getFileSystemConfiguration());
Assert.assertNotNull(fs);
fs.mkdirs(new Path("/tmp/foo"));
hadoop.releaseFileSystem(fs);
try {
fs.mkdirs(new Path("/tmp/foo"));
Assert.fail();
} catch (IOException ex) {
} catch (Exception ex) {
Assert.fail();
}
server.destroy();
} |
@Override
protected void write(final PostgreSQLPacketPayload payload) {
payload.getByteBuf().writeBytes(PREFIX);
payload.getByteBuf().writeByte(status);
} | @Test
void assertReadWriteWithInTransaction() {
ByteBuf byteBuf = ByteBufTestUtils.createByteBuf(6);
PostgreSQLPacketPayload payload = new PostgreSQLPacketPayload(byteBuf, StandardCharsets.UTF_8);
PostgreSQLReadyForQueryPacket packet = PostgreSQLReadyForQueryPacket.IN_TRANSACTION;
packet.write(payload);
assertThat(byteBuf.writerIndex(), is(6));
assertThat(byteBuf.getByte(5), is((byte) 'T'));
} |
@Override
public boolean createReservation(ReservationId reservationId, String user,
Plan plan, ReservationDefinition contract) throws PlanningException {
LOG.info("placing the following ReservationRequest: " + contract);
try {
boolean res =
planner.createReservation(reservationId, user, plan, contract);
if (res) {
LOG.info("OUTCOME: SUCCESS, Reservation ID: "
+ reservationId.toString() + ", Contract: " + contract.toString());
} else {
LOG.info("OUTCOME: FAILURE, Reservation ID: "
+ reservationId.toString() + ", Contract: " + contract.toString());
}
return res;
} catch (PlanningException e) {
LOG.info("OUTCOME: FAILURE, Reservation ID: " + reservationId.toString()
+ ", Contract: " + contract.toString());
throw e;
}
} | @Test
public void testOrderNoGapImpossible() throws PlanningException {
prepareBasicPlan();
// create a completely utilized segment at time 30
int[] f = { 100, 100 };
ReservationDefinition rDef = ReservationSystemTestUtil
.createSimpleReservationDefinition(30, 30 * step + f.length * step,
f.length * step, 1, recurrenceExpression);
assertTrue(
plan.toString(),
plan.addReservation(new InMemoryReservationAllocation(
ReservationSystemTestUtil.getNewReservationId(), rDef, "u1",
"dedicated", 30 * step, 30 * step + f.length * step,
ReservationSystemTestUtil.generateAllocation(30 * step, step, f),
res, minAlloc), false));
// create a chain of 4 RR, mixing gang and non-gang
ReservationDefinition rr = new ReservationDefinitionPBImpl();
rr.setArrival(0L);
rr.setDeadline(70L);
ReservationRequests reqs = new ReservationRequestsPBImpl();
reqs.setInterpreter(ReservationRequestInterpreter.R_ORDER_NO_GAP);
ReservationRequest r = ReservationRequest.newInstance(
Resource.newInstance(2048, 2), 10, 1, 10);
ReservationRequest r2 = ReservationRequest.newInstance(
Resource.newInstance(1024, 1), 10, 10, 20);
List<ReservationRequest> list = new ArrayList<ReservationRequest>();
list.add(r);
list.add(r2);
list.add(r);
list.add(r2);
reqs.setReservationResources(list);
rr.setReservationRequests(reqs);
ReservationId reservationID = ReservationSystemTestUtil
.getNewReservationId();
boolean result = false;
try {
// submit to agent
result = agent.createReservation(reservationID, "u1", plan, rr);
fail();
} catch (PlanningException p) {
// expected
}
// validate
assertFalse("Agent-based allocation should have failed", result);
assertTrue("Agent-based allocation should have failed", plan
.getAllReservations().size() == 3);
System.out
.println("--------AFTER ORDER_NO_GAP IMPOSSIBLE ALLOCATION (queue: "
+ reservationID + ")----------");
System.out.println(plan.toString());
System.out.println(plan.toCumulativeString());
} |
private boolean concurrentSQL() {
return Boolean.parseBoolean(getProperty("zeppelin.spark.concurrentSQL"));
} | @Test
void testConcurrentSQL() throws InterpreterException, InterruptedException {
sparkInterpreter.interpret("spark.udf.register(\"sleep\", (e:Int) => {Thread.sleep(e*1000); e})", context);
Thread thread1 = new Thread() {
@Override
public void run() {
try {
InterpreterResult result = sqlInterpreter.interpret("select sleep(10)", context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
} catch (InterpreterException e) {
e.printStackTrace();
}
}
};
Thread thread2 = new Thread() {
@Override
public void run() {
try {
InterpreterResult result = sqlInterpreter.interpret("select sleep(10)", context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code());
} catch (InterpreterException e) {
e.printStackTrace();
}
}
};
// start running 2 spark sql, each would sleep 10 seconds, the totally running time should
// be less than 20 seconds, which means they run concurrently.
long start = System.currentTimeMillis();
thread1.start();
thread2.start();
thread1.join();
thread2.join();
long end = System.currentTimeMillis();
assertTrue(((end - start) / 1000) < 20, "running time must be less than 20 seconds");
} |
public FEELFnResult<List<Object>> invoke(@ParameterName("list") Object[] lists) {
if ( lists == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null"));
}
// spec requires us to return a new list
final List<Object> result = new ArrayList<>();
for ( Object list : lists ) {
if ( list == null ) {
// TODO review accordingly to spec, original behavior was: return null;
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "lists", "one of the elements in the list is null"));
} else if ( list instanceof Collection ) {
result.addAll( (Collection) list );
} else {
result.add( list );
}
}
return FEELFnResult.ofResult( result );
} | @Test
void invokeArrayWithoutList() {
FunctionTestUtil.assertResultList(concatenateFunction.invoke(new Object[]{"test", 2,
BigDecimal.valueOf(25.3)}), Arrays.asList("test", 2, BigDecimal.valueOf(25.3)));
} |
public boolean overlap(final Window other) throws IllegalArgumentException {
if (getClass() != other.getClass()) {
throw new IllegalArgumentException("Cannot compare windows of different type. Other window has type "
+ other.getClass() + ".");
}
final SessionWindow otherWindow = (SessionWindow) other;
return !(otherWindow.endMs < startMs || endMs < otherWindow.startMs);
} | @Test
public void shouldNotOverlapIfOtherWindowIsBeforeThisWindow() {
/*
* This: [-------]
* Other: [---]
*/
assertFalse(window.overlap(new SessionWindow(0, 25)));
assertFalse(window.overlap(new SessionWindow(0, start - 1)));
assertFalse(window.overlap(new SessionWindow(start - 1, start - 1)));
} |
public static String getAllExceptionMsg(Throwable e) {
Throwable cause = e;
StringBuilder strBuilder = new StringBuilder();
while (cause != null && !StringUtils.isEmpty(cause.getMessage())) {
strBuilder.append("caused: ").append(cause.getMessage()).append(';');
cause = cause.getCause();
}
return strBuilder.toString();
} | @Test
void testGetAllExceptionMsg() {
String msg = ExceptionUtil.getAllExceptionMsg(nacosRuntimeException);
assertEquals("caused: errCode: 500, errMsg: Test ;caused: I'm caused exception.;", msg);
} |
@SuppressWarnings({"SimplifyBooleanReturn"})
public static Map<String, ParamDefinition> cleanupParams(Map<String, ParamDefinition> params) {
if (params == null || params.isEmpty()) {
return params;
}
Map<String, ParamDefinition> mapped =
params.entrySet().stream()
.collect(
MapHelper.toListMap(
Map.Entry::getKey,
p -> {
ParamDefinition param = p.getValue();
if (param.getType() == ParamType.MAP) {
MapParamDefinition mapParamDef = param.asMapParamDef();
if (mapParamDef.getValue() == null
&& (mapParamDef.getInternalMode() == InternalParamMode.OPTIONAL)) {
return mapParamDef;
}
return MapParamDefinition.builder()
.name(mapParamDef.getName())
.value(cleanupParams(mapParamDef.getValue()))
.expression(mapParamDef.getExpression())
.name(mapParamDef.getName())
.validator(mapParamDef.getValidator())
.tags(mapParamDef.getTags())
.mode(mapParamDef.getMode())
.meta(mapParamDef.getMeta())
.build();
} else {
return param;
}
}));
Map<String, ParamDefinition> filtered =
mapped.entrySet().stream()
.filter(
p -> {
ParamDefinition param = p.getValue();
if (param.getInternalMode() == InternalParamMode.OPTIONAL) {
if (param.getValue() == null && param.getExpression() == null) {
return false;
} else if (param.getType() == ParamType.MAP
&& param.asMapParamDef().getValue() != null
&& param.asMapParamDef().getValue().isEmpty()) {
return false;
} else {
return true;
}
} else {
Checks.checkTrue(
param.getValue() != null || param.getExpression() != null,
String.format(
"[%s] is a required parameter (type=[%s])",
p.getKey(), param.getType()));
return true;
}
})
.collect(MapHelper.toListMap(Map.Entry::getKey, Map.Entry::getValue));
return cleanIntermediateMetadata(filtered);
} | @Test
public void testParameterConversionRemoveInternalModeNestedMap() throws JsonProcessingException {
Map<String, ParamDefinition> allParams =
parseParamDefMap(
"{'tomergemap1': {'type': 'MAP', 'internal_mode': 'RESERVED', 'value': {'tomerge1': {'type': 'STRING','value': 'hello', 'internal_mode': 'RESERVED'}}}}");
Map<String, ParamDefinition> convertedParams = ParamsMergeHelper.cleanupParams(allParams);
assertNull(convertedParams.get("tomergemap1").asMapParamDef().getInternalMode());
assertNull(
convertedParams
.get("tomergemap1")
.asMapParamDef()
.getValue()
.get("tomerge1")
.asStringParamDef()
.getInternalMode());
} |
@PostConstruct
public void synchronize() throws IOException {
try {
synchronizeTask();
} catch (NonTransientDataAccessException | StaleStateException e) {
logger.error("Database exception while synchronizing tasks", e);
}
} | @Test
public void synchronizeTasksBetweenYamlAndDatabase() throws IOException {
Task conf1 = new Task();
conf1.setName("1");
conf1.setCron("1");
Task conf2 = new Task();
conf2.setName("2");
conf2.setCron("200");
Task conf3 = new Task();
conf3.setName("3");
conf3.setCron("3");
Task conf4 = new Task();
conf4.setName("4");
conf4.setCron("4");
List<Task> db = new ArrayList<>(); // removed conf
db.add(conf1);
db.add(conf2);
db.add(conf3);
db.add(conf4);
Mockito.when(repo.findAll()).thenReturn(db);
Mockito.when(repo.save(Mockito.isA(Task.class))).thenReturn(null);
service.synchronize();
Mockito.verify(repo, times(1)).save(any(Task.class)); // save 5
Mockito.verify(repo, times(2)).delete(any(Task.class)); // 3 + 4
} |
@Override
public String getName() {
if (_distinctResult == 1) {
return TransformFunctionType.IS_DISTINCT_FROM.getName();
}
return TransformFunctionType.IS_NOT_DISTINCT_FROM.getName();
} | @Test
public void testDistinctFromLeftNull()
throws Exception {
ExpressionContext expression =
RequestContextUtils.getExpression(String.format(_expression, INT_SV_NULL_COLUMN, INT_SV_COLUMN));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertEquals(transformFunction.getName(), _isDistinctFrom ? "is_distinct_from" : "is_not_distinct_from");
boolean[] expectedIntValues = new boolean[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
if (isEqualRow(i)) {
expectedIntValues[i] = !_isDistinctFrom;
} else if (isNotEqualRow(i)) {
expectedIntValues[i] = _isDistinctFrom;
} else if (isNullRow(i)) {
expectedIntValues[i] = _isDistinctFrom;
}
}
testTransformFunction(expression, expectedIntValues, _projectionBlock, _dataSourceMap);
} |
@Override
public List<ImportValidationFeedback> verifyRule( Object subject ) {
List<ImportValidationFeedback> feedback = new ArrayList<>();
if ( !isEnabled() || !( subject instanceof TransMeta ) ) {
return feedback;
}
TransMeta transMeta = (TransMeta) subject;
String description = transMeta.getDescription();
if ( null != description && minLength <= description.length() ) {
feedback.add( new ImportValidationFeedback(
this, ImportValidationResultType.APPROVAL, "A description is present" ) );
} else {
feedback.add( new ImportValidationFeedback(
this, ImportValidationResultType.ERROR, "A description is not present or is too short." ) );
}
return feedback;
} | @Test
public void testVerifyRule_NullParameter_DisabledRule() {
TransformationHasDescriptionImportRule importRule = getImportRule( 10, false );
List<ImportValidationFeedback> feedbackList = importRule.verifyRule( null );
assertNotNull( feedbackList );
assertTrue( feedbackList.isEmpty() );
} |
@Override
public boolean contains(PipelineConfig o) {
for (PipelineConfigs part : this.parts) {
if (part.contains(o))
return true;
}
return false;
} | @Test
public void shouldReturnTrueWhenContainsPipeline() {
PipelineConfig pipe1 = PipelineConfigMother.pipelineConfig("pipeline1");
PipelineConfigs group = new MergePipelineConfigs(
new BasicPipelineConfigs(pipe1),
new BasicPipelineConfigs());
assertThat(group.contains(pipe1), is(true));
} |
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final GenericRow that = (GenericRow) o;
return Objects.equals(this.values, that.values);
} | @SuppressWarnings("UnstableApiUsage")
@Test
public void testEquals() {
new EqualsTester().
addEqualityGroup(
new GenericRow(),
new GenericRow(10)
)
.addEqualityGroup(
GenericRow.genericRow(new Object())
)
.addEqualityGroup(
genericRow("nr"),
genericRow("nr")
)
.addEqualityGroup(
genericRow(1.0, 94.9238, 1.2550, 0.13242, -1.0285235),
genericRow(1.0, 94.9238, 1.2550, 0.13242, -1.0285235)
)
.testEquals();
} |
public boolean isEmpty() {
return numBytes == 0;
} | @Test
public void testEmpty() {
GapEncodedVariableLengthIntegerReader reader = reader(20);
Assert.assertFalse(reader.isEmpty());
reader = reader();
Assert.assertTrue(reader.isEmpty());
Assert.assertTrue(EMPTY_READER.isEmpty());
} |
public long createSessionFromExisting(ApplicationId applicationId,
boolean internalRedeploy,
TimeoutBudget timeoutBudget,
DeployLogger deployLogger) {
Tenant tenant = getTenant(applicationId);
SessionRepository sessionRepository = tenant.getSessionRepository();
Session fromSession = requireActiveSession(tenant, applicationId);
return sessionRepository.createSessionFromExisting(fromSession, internalRedeploy, timeoutBudget, deployLogger).getSessionId();
} | @Test
public void createFromActiveSession() {
long originalSessionId = deployApp(testApp).sessionId();
long sessionId = createSessionFromExisting(applicationId(), timeoutBudget);
ApplicationMetaData originalApplicationMetaData = getApplicationMetaData(applicationId(), originalSessionId);
ApplicationMetaData applicationMetaData = getApplicationMetaData(applicationId(), sessionId);
assertNotEquals(sessionId, originalSessionId);
assertEquals(originalApplicationMetaData.getApplicationId(), applicationMetaData.getApplicationId());
assertEquals(originalApplicationMetaData.getGeneration().longValue(), applicationMetaData.getPreviousActiveGeneration());
assertNotEquals(originalApplicationMetaData.getGeneration(), applicationMetaData.getGeneration());
} |
public TopicList getTopicsByCluster(String cluster) {
TopicList topicList = new TopicList();
try {
try {
this.lock.readLock().lockInterruptibly();
Set<String> brokerNameSet = this.clusterAddrTable.get(cluster);
for (String brokerName : brokerNameSet) {
for (Entry<String, Map<String, QueueData>> topicEntry : this.topicQueueTable.entrySet()) {
String topic = topicEntry.getKey();
Map<String, QueueData> queueDataMap = topicEntry.getValue();
final QueueData qd = queueDataMap.get(brokerName);
if (qd != null) {
topicList.getTopicList().add(topic);
}
}
}
} finally {
this.lock.readLock().unlock();
}
} catch (Exception e) {
log.error("getTopicsByCluster Exception", e);
}
return topicList;
} | @Test
public void testGetTopicsByCluster() {
byte[] topicList = routeInfoManager.getTopicsByCluster("default-cluster").encode();
assertThat(topicList).isNotNull();
} |
@Override
public void clearLastScreenUrl() {
} | @Test
public void clearLastScreenUrl() {
mSensorsAPI.clearLastScreenUrl();
Assert.assertNull(mSensorsAPI.getLastScreenUrl());
} |
String load() {
return loadQuery;
} | @Test
public void testLoadIsQuoted() {
Queries queries = new Queries(mapping, idColumn, columnMetadata);
String result = queries.load();
assertEquals("SELECT * FROM \"mymapping\" WHERE \"id\" = ?", result);
} |
@Override
public List<AdminUserDO> getUserListByStatus(Integer status) {
return userMapper.selectListByStatus(status);
} | @Test
public void testGetUserListByStatus() {
// mock 数据
AdminUserDO user = randomAdminUserDO(o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()));
userMapper.insert(user);
// 测试 status 不匹配
userMapper.insert(randomAdminUserDO(o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus())));
// 准备参数
Integer status = CommonStatusEnum.DISABLE.getStatus();
// 调用
List<AdminUserDO> result = userService.getUserListByStatus(status);
// 断言
assertEquals(1, result.size());
assertEquals(user, result.get(0));
} |
public static MetaDataContexts create(final MetaDataPersistService persistService, final ContextManagerBuilderParameter param,
final ComputeNodeInstanceContext computeNodeInstanceContext) throws SQLException {
return persistService.getDatabaseMetaDataService().loadAllDatabaseNames().isEmpty() ? createByLocal(persistService, param, computeNodeInstanceContext)
: createByRepository(persistService, param, computeNodeInstanceContext);
} | @Test
void assertCreateWithProxyInstanceMetaData() throws SQLException {
when(databaseMetaDataPersistService.loadAllDatabaseNames()).thenReturn(Collections.singletonList("foo_db"));
when(metaDataPersistService.getDatabaseMetaDataService()).thenReturn(databaseMetaDataPersistService);
try (MetaDataContexts actual = MetaDataContextsFactory.create(metaDataPersistService, createContextManagerBuilderParameter(), mock(ComputeNodeInstanceContext.class, RETURNS_DEEP_STUBS))) {
assertThat(actual.getMetaData().getGlobalRuleMetaData().getRules().size(), is(1));
assertThat(actual.getMetaData().getGlobalRuleMetaData().getRules().iterator().next(), instanceOf(MockedRule.class));
assertTrue(actual.getMetaData().getDatabases().containsKey("foo_db"));
assertThat(actual.getMetaData().getDatabases().size(), is(1));
}
} |
public Map<String, String> getAllConfigPropsWithSecretsObfuscated() {
final Map<String, String> allPropsCleaned = new HashMap<>();
// build a properties map with obfuscated values for sensitive configs.
// Obfuscation is handled by ConfigDef.convertToString
allPropsCleaned.putAll(getKsqlConfigPropsWithSecretsObfuscated());
allPropsCleaned.putAll(
getKsqlStreamConfigPropsWithSecretsObfuscated().entrySet().stream().collect(
Collectors.toMap(
e -> KSQL_STREAMS_PREFIX + e.getKey(), Map.Entry::getValue
)
)
);
return Collections.unmodifiableMap(allPropsCleaned);
} | @Test
public void shouldFilterPropertiesForWhichTypeUnknown() {
final KsqlConfig ksqlConfig = new KsqlConfig(Collections.singletonMap("you.shall.not.pass", "wizard"));
assertThat(
ksqlConfig.getAllConfigPropsWithSecretsObfuscated().keySet(),
not(hasItem("you.shall.not.pass")));
} |
@Override
public double sd() {
return Math.sqrt(1 - p) / p;
} | @Test
public void testSd() {
System.out.println("sd");
GeometricDistribution instance = new GeometricDistribution(0.3);
instance.rand();
assertEquals(2.788867, instance.sd(), 1E-6);
} |
public void subscribeRegistryConfig(String serviceName) {
ConfigSubscriber subscriber;
final RegisterConfig registerConfig = PluginConfigManager.getPluginConfig(RegisterConfig.class);
final RegisterServiceCommonConfig registerCommonConfig =
PluginConfigManager.getPluginConfig(RegisterServiceCommonConfig.class);
if (registerCommonConfig.getRegisterType() == RegisterType.SERVICE_COMB
&& registerConfig.isEnableSpringRegister()) {
// CSE was used
subscriber = new CseGroupConfigSubscriber(serviceName, new RegistryConfigListener(),
"SpringCloudRegistry");
} else {
// Other scenarios
subscriber = new DefaultGroupConfigSubscriber(serviceName, new RegistryConfigListener(),
"SpringCloudRegistry");
}
subscriber.subscribe();
fixGrace();
} | @Test
public void subscribeRegistryConfig() {
final RegistryConfigSubscribeServiceImpl mock = Mockito.mock(RegistryConfigSubscribeServiceImpl.class);
String serviceName = "test";
mock.subscribeRegistryConfig(serviceName);
Mockito.verify(mock, Mockito.times(1)).subscribeRegistryConfig(serviceName);
} |
public static InetSocketAddress getInetSocketAddressFromRpcURL(String rpcURL) throws Exception {
// Pekko URLs have the form schema://systemName@host:port/.... if it's a remote Pekko URL
try {
final Address address = getAddressFromRpcURL(rpcURL);
if (address.host().isDefined() && address.port().isDefined()) {
return new InetSocketAddress(address.host().get(), (int) address.port().get());
} else {
throw new MalformedURLException();
}
} catch (MalformedURLException e) {
throw new Exception("Could not retrieve InetSocketAddress from Pekko URL " + rpcURL);
}
} | @Test
void getHostFromRpcURLHandlesAkkaSslTcpProtocol() throws Exception {
final String url = "pekko.ssl.tcp://flink@localhost:1234/user/jobmanager";
final InetSocketAddress expected = new InetSocketAddress("localhost", 1234);
final InetSocketAddress result = PekkoUtils.getInetSocketAddressFromRpcURL(url);
assertThat(result).isEqualTo(expected);
} |
@Override
public void flush() throws IOException {
mLocalOutputStream.flush();
} | @Test
@PrepareForTest(COSOutputStream.class)
public void testFlush() throws Exception {
PowerMockito.whenNew(BufferedOutputStream.class)
.withArguments(any(DigestOutputStream.class)).thenReturn(mLocalOutputStream);
COSOutputStream stream = new COSOutputStream("testBucketName", "testKey", mCosClient, sConf
.getList(PropertyKey.TMP_DIRS));
stream.flush();
stream.close();
assertEquals(mEtag, stream.getContentHash().get());
Mockito.verify(mLocalOutputStream).flush();
} |
@Override
public ResultSet executeQuery(String sql)
throws SQLException {
validateState();
try {
if (!DriverUtils.queryContainsLimitStatement(sql)) {
sql += " " + LIMIT_STATEMENT + " " + _maxRows;
}
String enabledSql = DriverUtils.enableQueryOptions(sql, _connection.getQueryOptions());
ResultSetGroup resultSetGroup = _session.execute(enabledSql);
if (resultSetGroup.getResultSetCount() == 0) {
_resultSet = PinotResultSet.empty();
return _resultSet;
}
_resultSet = new PinotResultSet(resultSetGroup.getResultSet(0));
return _resultSet;
} catch (PinotClientException e) {
throw new SQLException(String.format("Failed to execute query : %s", sql), e);
}
} | @Test
public void testSetEnableNullHandling()
throws Exception {
Properties props = new Properties();
props.put(QueryOptionKey.ENABLE_NULL_HANDLING, "true");
PinotConnection pinotConnection =
new PinotConnection(props, "dummy", _dummyPinotClientTransport, "dummy", _dummyPinotControllerTransport);
Statement statement = pinotConnection.createStatement();
Assert.assertNotNull(statement);
statement.executeQuery(BASIC_TEST_QUERY);
String expectedSql =
DriverUtils.createSetQueryOptionString(QueryOptionKey.ENABLE_NULL_HANDLING, true) + BASIC_TEST_QUERY;
Assert.assertEquals(_dummyPinotClientTransport.getLastQuery().substring(0, expectedSql.length()), expectedSql);
} |
@Override
public Collection<Subscriber> getFuzzySubscribers(String namespaceId, String serviceName) {
Collection<Subscriber> result = new LinkedList<>(
subscriberServiceLocal.getFuzzySubscribers(namespaceId, serviceName));
if (memberManager.getServerList().size() > 1) {
getSubscribersFromRemotes(namespaceId, serviceName, result);
}
return result;
} | @Test
void testGetFuzzySubscribersByServiceWithLocal() {
Collection<Subscriber> actual = aggregation.getFuzzySubscribers(service);
assertEquals(1, actual.size());
assertEquals("local", actual.iterator().next().getAddrStr());
} |
private String mainHelp() {
final TTable tTable = new TTable(new TTable.ColumnDefine[] {
new TTable.ColumnDefine(TTable.Align.RIGHT), new TTable.ColumnDefine(80, false, TTable.Align.LEFT)
});
final List<Class<?>> classes = commandHelper.getAllCommandClass();
Collections.sort(classes, new Comparator<Class<?>>() {
@Override
public int compare(Class<?> o1, Class<?> o2) {
final Integer o1s = o1.getAnnotation(Cmd.class).sort();
final Integer o2s = o2.getAnnotation(Cmd.class).sort();
return o1s.compareTo(o2s);
}
});
for (Class<?> clazz : classes) {
if (clazz.isAnnotationPresent(Cmd.class)) {
final Cmd cmd = clazz.getAnnotation(Cmd.class);
tTable.addRow(cmd.name(), cmd.summary());
}
}
return tTable.padding(1).rendering();
} | @Test
void testMainHelp() {
Help help = new Help(FrameworkModel.defaultModel());
String output = help.execute(Mockito.mock(CommandContext.class), null);
assertThat(output, containsString("greeting"));
assertThat(output, containsString("help"));
assertThat(output, containsString("ls"));
assertThat(output, containsString("online"));
assertThat(output, containsString("offline"));
assertThat(output, containsString("quit"));
} |
public void reportCheckpointMetrics(
long id, ExecutionAttemptID attemptId, CheckpointMetrics metrics) {
statsTracker.reportIncompleteStats(id, attemptId, metrics);
} | @Test
void testAbortedCheckpointStatsUpdatedAfterFailure() throws Exception {
testReportStatsAfterFailure(
1L,
(coordinator, execution, metrics) -> {
coordinator.reportCheckpointMetrics(1L, execution.getAttemptId(), metrics);
return null;
});
} |
@Override // mappedStatementId 参数,暂时没有用。以后,可以基于 mappedStatementId + DataPermission 进行缓存
public List<DataPermissionRule> getDataPermissionRule(String mappedStatementId) {
// 1. 无数据权限
if (CollUtil.isEmpty(rules)) {
return Collections.emptyList();
}
// 2. 未配置,则默认开启
DataPermission dataPermission = DataPermissionContextHolder.get();
if (dataPermission == null) {
return rules;
}
// 3. 已配置,但禁用
if (!dataPermission.enable()) {
return Collections.emptyList();
}
// 4. 已配置,只选择部分规则
if (ArrayUtil.isNotEmpty(dataPermission.includeRules())) {
return rules.stream().filter(rule -> ArrayUtil.contains(dataPermission.includeRules(), rule.getClass()))
.collect(Collectors.toList()); // 一般规则不会太多,所以不采用 HashSet 查询
}
// 5. 已配置,只排除部分规则
if (ArrayUtil.isNotEmpty(dataPermission.excludeRules())) {
return rules.stream().filter(rule -> !ArrayUtil.contains(dataPermission.excludeRules(), rule.getClass()))
.collect(Collectors.toList()); // 一般规则不会太多,所以不采用 HashSet 查询
}
// 6. 已配置,全部规则
return rules;
} | @Test
public void testGetDataPermissionRule_03() {
// 准备参数
String mappedStatementId = randomString();
// mock 方法
DataPermissionContextHolder.add(AnnotationUtils.findAnnotation(TestClass03.class, DataPermission.class));
// 调用
List<DataPermissionRule> result = dataPermissionRuleFactory.getDataPermissionRule(mappedStatementId);
// 断言
assertTrue(result.isEmpty());
} |
public static Node getDOM(String text) {
log.debug("Start : getDOM1");
Node node = getParser()
.parseDOM(
new ByteArrayInputStream(
text.getBytes(StandardCharsets.UTF_8)), null);
if (log.isDebugEnabled()) {
log.debug("node : {}", node);
}
log.debug("End : getDOM1");
return node;
} | @Test
public void testGetDom() throws Exception {
HtmlParsingUtils.getDOM("<HTML></HTML>");
HtmlParsingUtils.getDOM("");
} |
@Override
public void openExisting(final ProcessorContext context, final long streamTime) {
metricsRecorder.init(ProcessorContextUtils.metricsImpl(context), context.taskId());
super.openExisting(context, streamTime);
} | @Test
public void shouldUpdateSegmentFileNameFromOldColonFormatToNewFormat() throws Exception {
final String storeDirectoryPath = stateDirectory.getAbsolutePath() + File.separator + storeName;
final File storeDirectory = new File(storeDirectoryPath);
//noinspection ResultOfMethodCallIgnored
storeDirectory.mkdirs();
for (int segmentId = 0; segmentId < NUM_SEGMENTS; ++segmentId) {
final File oldSegment = new File(storeDirectoryPath + File.separator + storeName + ":" + segmentId * (RETENTION_PERIOD / (NUM_SEGMENTS - 1)));
//noinspection ResultOfMethodCallIgnored
Files.createFile(oldSegment.toPath());
}
segments.openExisting(context, -1L);
for (int segmentId = 0; segmentId < NUM_SEGMENTS; ++segmentId) {
final File newSegment = new File(storeDirectoryPath + File.separator + storeName + "." + segmentId * (RETENTION_PERIOD / (NUM_SEGMENTS - 1)));
assertTrue(newSegment.exists());
}
} |
public KiePMMLDroolsType declareType(Field field) {
String generatedType = getGeneratedClassName(field.getName());
String fieldName =field.getName();
String fieldType = field.getDataType().value();
fieldTypeMap.put(fieldName, new KiePMMLOriginalTypeGeneratedType(fieldType, generatedType));
return new KiePMMLDroolsType(generatedType, DATA_TYPE.byName(fieldType).getMappedClass().getSimpleName());
} | @Test
void declareType() {
DataField dataField = getTypeDataField();
final Map<String, KiePMMLOriginalTypeGeneratedType> fieldTypeMap = new HashMap<>();
KiePMMLDroolsType retrieved = KiePMMLDataDictionaryASTFactory.factory(fieldTypeMap).declareType(dataField);
assertThat(retrieved).isNotNull();
commonVerifyTypeDeclarationDescr(dataField, fieldTypeMap, retrieved);
} |
@Override
public Result apply(PathData item, int depth) throws IOException {
String name = getPath(item).getName();
if (!caseSensitive) {
name = StringUtils.toLowerCase(name);
}
if (globPattern.matches(name)) {
return Result.PASS;
} else {
return Result.FAIL;
}
} | @Test
public void applyGlob() throws IOException {
setup("n*e");
PathData item = new PathData("/directory/path/name", mockFs.getConf());
assertEquals(Result.PASS, name.apply(item, -1));
} |
public static Checksum crc32c()
{
return Crc32c.INSTANCE;
} | @EnabledForJreRange(min = JAVA_9)
@Test
void crc32c()
{
assertSame(Crc32c.INSTANCE, Checksums.crc32c());
} |
public static Config resolve(Config config) {
var resolveSystemProperty = System.getenv("KORA_SYSTEM_PROPERTIES_RESOLVE_ENABLED");
if (resolveSystemProperty == null) {
resolveSystemProperty = System.getProperty("kora.system.properties.resolve.enabled", "true");
}
var ctx = new ResolveContext(config, new ArrayDeque<>(), Boolean.parseBoolean(resolveSystemProperty));
var newRoot = resolve(ctx, config.root());
if (newRoot == config.root()) {
return config;
}
return new SimpleConfig(config.origin(), newRoot);
} | @Test
void testNullableReference() {
var config = fromMap(Map.of(
"reference", "${?object.field}"
)).resolve();
assertThat(config.get("reference")).isInstanceOf(ConfigValue.NullValue.class);
} |
public void add(QueryCacheEventData entry) {
events.add(entry);
} | @Test
public void testAdd() {
batchEventData.add(otherEventData);
assertEquals(2, batchEventData.size());
Collection<QueryCacheEventData> events = batchEventData.getEvents();
assertContains(events, eventData);
assertContains(events, otherEventData);
} |
public static String normalizeFilter(String projection, String filter) {
if (isNullOrWhitespaceOnly(projection) || isNullOrWhitespaceOnly(filter)) {
return filter;
}
SqlSelect sqlSelect = parseProjectionExpression(projection);
if (sqlSelect.getSelectList().isEmpty()) {
return filter;
}
Map<String, SqlNode> calculatedExpression = new HashMap<>();
for (SqlNode sqlNode : sqlSelect.getSelectList()) {
if (sqlNode instanceof SqlBasicCall) {
SqlBasicCall sqlBasicCall = (SqlBasicCall) sqlNode;
if (SqlKind.AS.equals(sqlBasicCall.getOperator().kind)) {
List<SqlNode> operandList = sqlBasicCall.getOperandList();
if (operandList.size() == 2) {
SqlIdentifier alias = (SqlIdentifier) operandList.get(1);
String name = alias.names.get(alias.names.size() - 1);
SqlNode expression = operandList.get(0);
calculatedExpression.put(name, expression);
}
}
}
}
SqlNode sqlFilter = parseFilterExpression(filter).getWhere();
sqlFilter = rewriteExpression(sqlFilter, calculatedExpression);
if (sqlFilter != null) {
return sqlFilter.toString();
} else {
return filter;
}
} | @Test
public void testNormalizeFilter() {
Assertions.assertThat(TransformParser.normalizeFilter("a, b, c, d", "a > 0 and b > 0"))
.isEqualTo("`a` > 0 AND `b` > 0");
Assertions.assertThat(TransformParser.normalizeFilter("a, b, c, d", null)).isEqualTo(null);
Assertions.assertThat(
TransformParser.normalizeFilter(
"abs(a) as cal_a, char_length(b) as cal_b, c, d",
"a > 4 and cal_a > 8 and cal_b < 17 and c != d"))
.isEqualTo("`a` > 4 AND ABS(`a`) > 8 AND CHAR_LENGTH(`b`) < 17 AND `c` <> `d`");
Assertions.assertThat(
TransformParser.normalizeFilter(
"x, y, z, 1 - x as u, 1 - y as v, 1 - z as w",
"concat(u, concat(v, concat(w, x), y), z) != 10"))
.isEqualTo(
"`concat`(1 - `x`, `concat`(1 - `y`, `concat`(1 - `z`, `x`), `y`), `z`) <> 10");
} |
public DoubleArrayAsIterable usingExactEquality() {
return new DoubleArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject());
} | @Test
public void usingExactEquality_containsAnyOf_primitiveDoubleArray_success() {
assertThat(array(1.1, 2.2, 3.3)).usingExactEquality().containsAnyOf(array(99.99, 2.2));
} |
public Future<Set<Integer>> brokersInUse(Reconciliation reconciliation, Vertx vertx, TlsPemIdentity coTlsPemIdentity, AdminClientProvider adminClientProvider) {
try {
String bootstrapHostname = KafkaResources.bootstrapServiceName(reconciliation.name()) + "." + reconciliation.namespace() + ".svc:" + KafkaCluster.REPLICATION_PORT;
LOGGER.debugCr(reconciliation, "Creating AdminClient for Kafka cluster in namespace {}", reconciliation.namespace());
Admin kafkaAdmin = adminClientProvider.createAdminClient(bootstrapHostname, coTlsPemIdentity.pemTrustSet(), coTlsPemIdentity.pemAuthIdentity());
return topicNames(reconciliation, vertx, kafkaAdmin)
.compose(names -> describeTopics(reconciliation, vertx, kafkaAdmin, names))
.compose(topicDescriptions -> {
Set<Integer> brokersWithPartitionReplicas = new HashSet<>();
for (TopicDescription td : topicDescriptions.values()) {
for (TopicPartitionInfo pd : td.partitions()) {
for (org.apache.kafka.common.Node broker : pd.replicas()) {
brokersWithPartitionReplicas.add(broker.id());
}
}
}
kafkaAdmin.close();
return Future.succeededFuture(brokersWithPartitionReplicas);
}).recover(error -> {
LOGGER.warnCr(reconciliation, "Failed to get list of brokers in use", error);
kafkaAdmin.close();
return Future.failedFuture(error);
});
} catch (KafkaException e) {
LOGGER.warnCr(reconciliation, "Failed to check if broker contains any partition replicas", e);
return Future.failedFuture(e);
}
} | @Test
public void testKafkaClientFailure(VertxTestContext context) {
Admin admin = mock(Admin.class);
AdminClientProvider mock = mock(AdminClientProvider.class);
when(mock.createAdminClient(anyString(), any(), any())).thenReturn(admin);
// Mock list topics
when(admin.listTopics(any())).thenThrow(new KafkaException("Test error ..."));
// Get brokers in use
Checkpoint checkpoint = context.checkpoint();
BrokersInUseCheck operations = new BrokersInUseCheck();
operations.brokersInUse(RECONCILIATION, vertx, DUMMY_IDENTITY, mock)
.onComplete(context.failing(e -> {
assertThat(e.getMessage(), is("Test error ..."));
checkpoint.flag();
}));
} |
public static TypeRef<?> getElementType(TypeRef<?> typeRef) {
Type type = typeRef.getType();
if (type instanceof ParameterizedType) {
ParameterizedType parameterizedType = (ParameterizedType) type;
if (parameterizedType.getRawType() == List.class) { // fastpath
Type[] actualTypeArguments = (parameterizedType).getActualTypeArguments();
Preconditions.checkState(actualTypeArguments.length == 1);
Type t = actualTypeArguments[0];
if (t.getClass() == Class.class) { // if t is wild type, upper should be parsed.
return TypeRef.of(t);
}
}
}
if (typeRef.getType().getTypeName().startsWith("scala.collection")) {
return ScalaTypes.getElementType(typeRef);
}
TypeRef<?> supertype = ((TypeRef<? extends Iterable<?>>) typeRef).getSupertype(Iterable.class);
return supertype.resolveType(ITERATOR_RETURN_TYPE).resolveType(NEXT_RETURN_TYPE);
} | @Test
public void getSubclassElementTypeTest() {
abstract class A implements Collection<List<String>> {}
Assert.assertEquals(
TypeUtils.getElementType(TypeRef.of(A.class)), new TypeRef<List<String>>() {});
} |
public static void verifyHostnames(String[] names) throws UnknownHostException {
for (String name: names) {
if (name == null) {
throw new UnknownHostException("null hostname found");
}
// The first check supports URL formats (e.g. hdfs://, etc.).
// java.net.URI requires a schema, so we add a dummy one if it doesn't
// have one already.
URI uri = null;
try {
uri = new URI(name);
if (uri.getHost() == null) {
uri = new URI("http://" + name);
}
} catch (URISyntaxException e) {
uri = null;
}
if (uri == null || uri.getHost() == null) {
throw new UnknownHostException(name + " is not a valid Inet address");
}
}
} | @Test
public void testVerifyHostnamesNoException() throws UnknownHostException {
String[] names = {"valid.host.com", "1.com"};
NetUtils.verifyHostnames(names);
} |
@Override
public Integer call() throws Exception {
return this.call(
Template.class,
yamlFlowParser,
modelValidator,
(Object object) -> {
Template template = (Template) object;
return template.getNamespace() + " / " + template.getId();
},
(Object object) -> Collections.emptyList()
);
} | @Test
void runLocal() {
URL directory = TemplateValidateCommandTest.class.getClassLoader().getResource("invalidsTemplates/template.yml");
ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
try (ApplicationContext ctx = ApplicationContext.run(Map.of("kestra.templates.enabled", "true"), Environment.CLI, Environment.TEST)) {
String[] args = {
"--local",
directory.getPath()
};
Integer call = PicocliRunner.call(TemplateValidateCommand.class, ctx, args);
assertThat(call, is(1));
assertThat(out.toString(), containsString("Unable to parse template"));
assertThat(out.toString(), containsString("must not be empty"));
}
} |
public static ReleaseInfo getReleaseInfo() {
return LazyInit.INSTANCE;
} | @Test
public void getReleaseInfo() throws Exception {
ReleaseInfo info = ReleaseInfo.getReleaseInfo();
// Validate name
assertThat(info.getName(), containsString("Beam"));
// Validate semantic version
String version = info.getVersion();
String pattern = "\\d+\\.\\d+\\.\\d+.*";
assertTrue(
String.format("%s does not match pattern %s", version, pattern), version.matches(pattern));
} |
public boolean isReadOnly(final String topicName) {
return readOnlyTopicsPattern.matcher(topicName).matches();
} | @Test
public void shouldReturnFalseOnNonReadOnlyTopics() {
// Given
final List<String> topicNames = ImmutableList.of(
"topic_prefix_", "_suffix_topic"
);
// Given
topicNames.forEach(topic -> {
// When
final boolean isReadOnly = internalTopics.isReadOnly(topic);
// Then
assertThat("Should return false on non read-only topic: " + topic,
isReadOnly, is(false));
});
} |
public void write(D datum, Encoder out) throws IOException {
Objects.requireNonNull(out, "Encoder cannot be null");
try {
write(root, datum, out);
} catch (TracingNullPointException | TracingClassCastException | TracingAvroTypeException e) {
throw e.summarize(root);
}
} | @Test
void allowWritingPrimitives() throws IOException {
Schema doubleType = Schema.create(Schema.Type.DOUBLE);
Schema.Field field = new Schema.Field("double", doubleType);
List<Schema.Field> fields = Collections.singletonList(field);
Schema schema = Schema.createRecord("test", "doc", "", false, fields);
GenericRecord record = new GenericData.Record(schema);
record.put("double", 456.4);
record.put("double", 100000L);
record.put("double", 444);
ByteArrayOutputStream bao = new ByteArrayOutputStream();
GenericDatumWriter<GenericRecord> writer = new GenericDatumWriter<>(schema);
Encoder encoder = EncoderFactory.get().jsonEncoder(schema, bao);
writer.write(record, encoder);
} |
@Override
public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException {
return invoker.invoke(invocation);
} | @SuppressWarnings("unchecked")
@Test
void testJavaException() {
ExceptionFilter exceptionFilter = new ExceptionFilter();
RpcInvocation invocation = new RpcInvocation(
"sayHello", DemoService.class.getName(), "", new Class<?>[] {String.class}, new Object[] {"world"});
AppResponse appResponse = new AppResponse();
appResponse.setException(new IllegalArgumentException("java"));
Invoker<DemoService> invoker = mock(Invoker.class);
when(invoker.invoke(invocation)).thenReturn(appResponse);
when(invoker.getInterface()).thenReturn(DemoService.class);
Result newResult = exceptionFilter.invoke(invoker, invocation);
Assertions.assertEquals(appResponse.getException(), newResult.getException());
} |
@Deprecated
public PassiveCompletableFuture<TaskExecutionState> deployLocalTask(
@NonNull TaskGroup taskGroup) {
return deployLocalTask(
taskGroup, Thread.currentThread().getContextClassLoader(), emptyList());
} | @Test
public void testFinish() {
TaskExecutionService taskExecutionService = server.getTaskExecutionService();
long sleepTime = 300;
AtomicBoolean stop = new AtomicBoolean(false);
AtomicBoolean futureMark = new AtomicBoolean(false);
TestTask testTask1 = new TestTask(stop, LOGGER, sleepTime, true);
TestTask testTask2 = new TestTask(stop, LOGGER, sleepTime, false);
final CompletableFuture<TaskExecutionState> completableFuture =
taskExecutionService.deployLocalTask(
new TaskGroupDefaultImpl(
new TaskGroupLocation(
jobId, pipeLineId, FLAKE_ID_GENERATOR.newId()),
"ts",
Lists.newArrayList(testTask1, testTask2)));
completableFuture.whenComplete((unused, throwable) -> futureMark.set(true));
stop.set(true);
await().atMost(sleepTime + 10000, TimeUnit.MILLISECONDS)
.untilAsserted(
() -> {
assertEquals(FINISHED, completableFuture.get().getExecutionState());
});
assertTrue(futureMark.get());
} |
@Override
public InterpreterResult interpret(String st, InterpreterContext context)
throws InterpreterException {
LOGGER.info("Running SQL query: '{}' over Pandas DataFrame", st);
return pythonInterpreter.interpret(
"z.show(pysqldf('" + st.trim() + "'))", context);
} | @Test
public void badSqlSyntaxFails() throws InterpreterException {
// when
context = getInterpreterContext();
InterpreterResult ret = pandasSqlInterpreter.interpret("select wrong syntax", context);
// then
assertNotNull(ret, "Interpreter returned 'null'");
assertEquals(InterpreterResult.Code.ERROR, ret.code(), context.out.toString());
} |
public <T> ProducerBuilder<T> createProducerBuilder(String topic, Schema<T> schema, String producerName) {
ProducerBuilder<T> builder = client.newProducer(schema);
if (defaultConfigurer != null) {
defaultConfigurer.accept(builder);
}
builder.blockIfQueueFull(true)
.enableBatching(true)
.batchingMaxPublishDelay(10, TimeUnit.MILLISECONDS)
.hashingScheme(HashingScheme.Murmur3_32Hash) //
.messageRoutingMode(MessageRoutingMode.CustomPartition)
.messageRouter(FunctionResultRouter.of())
// set send timeout to be infinity to prevent potential deadlock with consumer
// that might happen when consumer is blocked due to unacked messages
.sendTimeout(0, TimeUnit.SECONDS)
.topic(topic);
if (producerName != null) {
builder.producerName(producerName);
}
if (producerConfig != null) {
if (producerConfig.getCompressionType() != null) {
builder.compressionType(producerConfig.getCompressionType());
} else {
// TODO: address this inconsistency.
// PR https://github.com/apache/pulsar/pull/19470 removed the default compression type of LZ4
// from the top level. This default is only used if producer config is provided.
builder.compressionType(CompressionType.LZ4);
}
if (producerConfig.getMaxPendingMessages() != null && producerConfig.getMaxPendingMessages() != 0) {
builder.maxPendingMessages(producerConfig.getMaxPendingMessages());
}
if (producerConfig.getMaxPendingMessagesAcrossPartitions() != null
&& producerConfig.getMaxPendingMessagesAcrossPartitions() != 0) {
builder.maxPendingMessagesAcrossPartitions(producerConfig.getMaxPendingMessagesAcrossPartitions());
}
if (producerConfig.getCryptoConfig() != null) {
builder.cryptoKeyReader(crypto.keyReader);
builder.cryptoFailureAction(crypto.failureAction);
for (String encryptionKeyName : crypto.getEncryptionKeys()) {
builder.addEncryptionKey(encryptionKeyName);
}
}
if (producerConfig.getBatchBuilder() != null) {
if (producerConfig.getBatchBuilder().equals("KEY_BASED")) {
builder.batcherBuilder(BatcherBuilder.KEY_BASED);
} else {
builder.batcherBuilder(BatcherBuilder.DEFAULT);
}
}
}
return builder;
} | @Test
public void testCreateProducerBuilderWithDefaultConfigurer() {
ProducerBuilderFactory builderFactory = new ProducerBuilderFactory(pulsarClient, null, null,
builder -> builder.property("key", "value"));
builderFactory.createProducerBuilder("topic", Schema.STRING, "producerName");
verifyCommon();
verify(producerBuilder).property("key", "value");
verifyNoMoreInteractions(producerBuilder);
} |
CallIdSequence newCallIdSequence(ConcurrencyDetection concurrencyDetection) {
return CallIdFactory.newCallIdSequence(maxConcurrentInvocations, backoffTimeoutMs, concurrencyDetection);
} | @Test
public void newCallIdSequence_whenBackPressureDisabled() {
Config config = new Config();
config.setProperty(BACKPRESSURE_ENABLED.getName(), "false");
HazelcastProperties hazelcastProperties = new HazelcastProperties(config);
BackpressureRegulator backpressureRegulator = new BackpressureRegulator(hazelcastProperties, logger);
CallIdSequence callIdSequence = backpressureRegulator.newCallIdSequence(ConcurrencyDetection.createDisabled());
assertInstanceOf(CallIdSequenceWithoutBackpressure.class, callIdSequence);
} |
@Override
public byte[] fromConnectData(final String topic, final Schema schema, final Object value) {
if (this.schema == null) {
throw new UnsupportedOperationException("ProtobufNoSRConverter is an internal "
+ "converter to ksqldb. It should not be instantiated via reflection through a no-arg "
+ "constructor.");
}
try {
final ProtobufSchemaAndValue schemaAndValue = protobufData.fromConnectData(schema, value);
final Object v = schemaAndValue.getValue();
if (v == null) {
return null;
} else if (v instanceof Message) {
return serializer.serialize((Message) v);
} else {
throw new DataException("Unsupported object of class " + v.getClass().getName());
}
} catch (SerializationException e) {
throw new DataException(String.format(
"Failed to serialize Protobuf data from topic %s :",
topic
), e);
} catch (InvalidConfigurationException e) {
throw new ConfigException(
String.format("Failed to access Protobuf data from topic %s : %s", topic, e.getMessage())
);
}
} | @Test(expected = UnsupportedOperationException.class)
public void shouldThrowExceptionWhenUsedWithNoArgConstructor2() {
// Given
final ProtobufNoSRConverter protobufNoSRConverter = new ProtobufNoSRConverter();
// When
protobufNoSRConverter.fromConnectData("topic", Schema.STRING_SCHEMA, "test");
} |
@VisibleForTesting
String validateMail(String mail) {
if (StrUtil.isEmpty(mail)) {
throw exception(MAIL_SEND_MAIL_NOT_EXISTS);
}
return mail;
} | @Test
public void testValidateMail_notExists() {
// 准备参数
// mock 方法
// 调用,并断言异常
assertServiceException(() -> mailSendService.validateMail(null),
MAIL_SEND_MAIL_NOT_EXISTS);
} |
public static TriRpcStatus getStatus(Throwable throwable) {
return getStatus(throwable, null);
} | @Test
void getStatus() {
StatusRpcException rpcException = new StatusRpcException(TriRpcStatus.INTERNAL);
Assertions.assertEquals(TriRpcStatus.INTERNAL.code, TriRpcStatus.getStatus(rpcException).code);
} |
public synchronized void setLevel(Level newLevel) {
if (level == newLevel) {
// nothing to do;
return;
}
if (newLevel == null && isRootLogger()) {
throw new IllegalArgumentException("The level of the root logger cannot be set to null");
}
level = newLevel;
if (newLevel == null) {
effectiveLevelInt = parent.effectiveLevelInt;
newLevel = parent.getEffectiveLevel();
} else {
effectiveLevelInt = newLevel.levelInt;
}
if (childrenList != null) {
int len = childrenList.size();
for (int i = 0; i < len; i++) {
Logger child = (Logger) childrenList.get(i);
// tell child to handle parent levelInt change
child.handleParentLevelChange(effectiveLevelInt);
}
}
// inform listeners
loggerContext.fireOnLevelChange(this, newLevel);
} | @Test
public void fluentAPIAtDisabledDebugLevelShouldReturnNOPLoggingEventBuilder() throws Exception {
root.setLevel(Level.INFO);
LoggingEventBuilder leb = loggerTest.atLevel(org.slf4j.event.Level.DEBUG);
assertEquals(NOPLoggingEventBuilder.class, leb.getClass());
} |
public static SortOrder buildSortOrder(Table table) {
return buildSortOrder(table.schema(), table.spec(), table.sortOrder());
} | @Test
public void testSortOrderClusteringSomePartitionFields() {
PartitionSpec spec = PartitionSpec.builderFor(SCHEMA).identity("category").day("ts").build();
SortOrder order =
SortOrder.builderFor(SCHEMA).withOrderId(1).asc("category").desc("id").build();
SortOrder expected =
SortOrder.builderFor(SCHEMA)
.withOrderId(1)
.asc(Expressions.day("ts"))
.asc("category")
.desc("id")
.build();
assertThat(SortOrderUtil.buildSortOrder(SCHEMA, spec, order))
.as("Should add spec fields as prefix")
.isEqualTo(expected);
} |
public CompletableFuture<Void> reserveUsernameHash(
final Account account,
final byte[] reservedUsernameHash,
final Duration ttl) {
final Timer.Sample sample = Timer.start();
// if there is an existing old reservation it will be cleaned up via ttl. Save it so we can restore it to the local
// account if the update fails though.
final Optional<byte[]> maybeOriginalReservation = account.getReservedUsernameHash();
account.setReservedUsernameHash(reservedUsernameHash);
// Normally when a username is reserved for the first time we reserve it for the provided TTL. But if the
// reservation is for a username that we already have a reservation for (for example, if it's reclaimable, or there
// is a hold) we might own that reservation for longer anyways, so we should preserve the original TTL in that case.
// What we'd really like to do is set expirationTime = max(oldExpirationTime, now + ttl), but dynamodb doesn't
// support that. Instead, we'll set expiration if it's greater than the existing expiration, otherwise retry
final long expirationTime = clock.instant().plus(ttl).getEpochSecond();
return tryReserveUsernameHash(account, reservedUsernameHash, expirationTime)
.exceptionallyCompose(ExceptionUtils.exceptionallyHandler(TtlConflictException.class, ttlConflict ->
// retry (once) with the returned expiration time
tryReserveUsernameHash(account, reservedUsernameHash, ttlConflict.getExistingExpirationSeconds())))
.whenComplete((response, throwable) -> {
sample.stop(RESERVE_USERNAME_TIMER);
if (throwable == null) {
account.setVersion(account.getVersion() + 1);
} else {
account.setReservedUsernameHash(maybeOriginalReservation.orElse(null));
}
});
} | @Test
void switchBetweenReservedUsernameHashes() {
final Account account = generateAccount("+18005551111", UUID.randomUUID(), UUID.randomUUID());
createAccount(account);
accounts.reserveUsernameHash(account, USERNAME_HASH_1, Duration.ofDays(1)).join();
assertArrayEquals(account.getReservedUsernameHash().orElseThrow(), USERNAME_HASH_1);
assertThat(account.getUsernameHash()).isEmpty();
accounts.reserveUsernameHash(account, USERNAME_HASH_2, Duration.ofDays(1)).join();
assertArrayEquals(account.getReservedUsernameHash().orElseThrow(), USERNAME_HASH_2);
assertThat(account.getUsernameHash()).isEmpty();
final Map<String, AttributeValue> usernameConstraintRecord1 = getUsernameConstraintTableItem(USERNAME_HASH_1);
final Map<String, AttributeValue> usernameConstraintRecord2 = getUsernameConstraintTableItem(USERNAME_HASH_2);
assertThat(usernameConstraintRecord1).containsKey(Accounts.UsernameTable.KEY_USERNAME_HASH);
assertThat(usernameConstraintRecord2).containsKey(Accounts.UsernameTable.KEY_USERNAME_HASH);
assertThat(usernameConstraintRecord1).containsKey(Accounts.UsernameTable.ATTR_TTL);
assertThat(usernameConstraintRecord2).containsKey(Accounts.UsernameTable.ATTR_TTL);
clock.pin(Instant.EPOCH.plus(Duration.ofMinutes(1)));
accounts.reserveUsernameHash(account, USERNAME_HASH_1, Duration.ofDays(1)).join();
assertArrayEquals(account.getReservedUsernameHash().orElseThrow(), USERNAME_HASH_1);
assertThat(account.getUsernameHash()).isEmpty();
final Map<String, AttributeValue> newUsernameConstraintRecord1 = getUsernameConstraintTableItem(USERNAME_HASH_1);
assertThat(newUsernameConstraintRecord1).containsKey(Accounts.UsernameTable.KEY_USERNAME_HASH);
assertThat(newUsernameConstraintRecord1).containsKey(Accounts.UsernameTable.ATTR_TTL);
assertThat(usernameConstraintRecord1.get(Accounts.UsernameTable.ATTR_TTL))
.isNotEqualTo(newUsernameConstraintRecord1.get(Accounts.UsernameTable.ATTR_TTL));
} |
public static List<Long> getLongListOrNull(String property, JsonNode node) {
if (!node.has(property) || node.get(property).isNull()) {
return null;
}
return ImmutableList.<Long>builder().addAll(new JsonLongArrayIterator(property, node)).build();
} | @Test
public void getLongListOrNull() throws JsonProcessingException {
assertThat(JsonUtil.getLongListOrNull("items", JsonUtil.mapper().readTree("{}"))).isNull();
assertThat(JsonUtil.getLongListOrNull("items", JsonUtil.mapper().readTree("{\"items\": null}")))
.isNull();
assertThatThrownBy(
() ->
JsonUtil.getLongListOrNull(
"items", JsonUtil.mapper().readTree("{\"items\": [13, \"23\"]}")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse long from non-long value in items: \"23\"");
assertThat(
JsonUtil.getLongListOrNull(
"items", JsonUtil.mapper().readTree("{\"items\": [23, 45]}")))
.containsExactlyElementsOf(Arrays.asList(23L, 45L));
} |
@Override
public String getNext() {
if (alreadyUsed) {
throw new KsqlServerException("QueryIdGenerator has not been updated with new offset");
}
alreadyUsed = true;
return String.valueOf(nextId);
} | @Test
public void shouldReturnZeroIdForFirstQuery() {
assertThat(generator.getNext(), is("0"));
} |
@Override
protected Future<ReconcileResult<Service>> internalUpdate(Reconciliation reconciliation, String namespace, String name, Service current, Service desired) {
try {
if (current.getSpec() != null && desired.getSpec() != null) {
if (("NodePort".equals(current.getSpec().getType()) && "NodePort".equals(desired.getSpec().getType()))
|| ("LoadBalancer".equals(current.getSpec().getType()) && "LoadBalancer".equals(desired.getSpec().getType()))) {
patchNodePorts(current, desired);
patchHealthCheckPorts(current, desired);
patchAnnotations(current, desired);
patchLoadBalancerClass(current, desired);
}
patchDualStackNetworking(current, desired);
}
return super.internalUpdate(reconciliation, namespace, name, current, desired);
} catch (Exception e) {
LOGGER.errorCr(reconciliation, "Caught exception while patching {} {} in namespace {}", resourceKind, name, namespace, e);
return Future.failedFuture(e);
}
} | @Test
void testCattleAnnotationPatching() {
KubernetesClient client = mock(KubernetesClient.class);
Map<String, String> currentAnnotations = Map.of(
"field.cattle.io~1publicEndpoints", "foo",
"cattle.io/test", "bar",
"some-other", "baz"
);
Service current = new ServiceBuilder()
.withNewMetadata()
.withNamespace(NAMESPACE)
.withName(RESOURCE_NAME)
.withAnnotations(currentAnnotations)
.endMetadata()
.withNewSpec()
.withType("LoadBalancer")
.endSpec()
.build();
Service desired = new ServiceBuilder()
.withNewMetadata()
.withNamespace(NAMESPACE)
.withName(RESOURCE_NAME)
.endMetadata()
.withNewSpec()
.withType("LoadBalancer")
.endSpec()
.build();
ServiceOperator op = new ServiceOperator(vertx, client);
op.internalUpdate(Reconciliation.DUMMY_RECONCILIATION, NAMESPACE, RESOURCE_NAME, current, desired);
assertThat(desired.getMetadata().getAnnotations().get("field.cattle.io~1publicEndpoints"), equalTo("foo"));
assertThat(desired.getMetadata().getAnnotations().get("cattle.io/test"), equalTo("bar"));
assertThat(desired.getMetadata().getAnnotations().containsKey("some-other"), is(false));
} |
@Override
public Iterator<T> iterator()
{
return new LinkedQueueIterator(Direction.ASCENDING);
} | @Test
public void testEarlyRemoveFails()
{
LinkedDeque<Integer> q = new LinkedDeque<>(Arrays.asList(1, 2, 3));
try
{
q.iterator().remove();
}
catch (IllegalStateException e)
{
// Expected
}
} |
public ClusterAclVersionInfo getBrokerClusterAclInfo(final String addr,
final long timeoutMillis) throws RemotingCommandException, InterruptedException, RemotingTimeoutException,
RemotingSendRequestException, RemotingConnectException, MQBrokerException {
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_BROKER_CLUSTER_ACL_INFO, null);
RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr), request, timeoutMillis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
GetBrokerAclConfigResponseHeader responseHeader =
(GetBrokerAclConfigResponseHeader) response.decodeCommandCustomHeader(GetBrokerAclConfigResponseHeader.class);
ClusterAclVersionInfo clusterAclVersionInfo = new ClusterAclVersionInfo();
clusterAclVersionInfo.setClusterName(responseHeader.getClusterName());
clusterAclVersionInfo.setBrokerName(responseHeader.getBrokerName());
clusterAclVersionInfo.setBrokerAddr(responseHeader.getBrokerAddr());
clusterAclVersionInfo.setAclConfigDataVersion(DataVersion.fromJson(responseHeader.getVersion(), DataVersion.class));
HashMap<String, Object> dataVersionMap = JSON.parseObject(responseHeader.getAllAclFileVersion(), HashMap.class);
Map<String, DataVersion> allAclConfigDataVersion = new HashMap<>(dataVersionMap.size(), 1);
for (Map.Entry<String, Object> entry : dataVersionMap.entrySet()) {
allAclConfigDataVersion.put(entry.getKey(), DataVersion.fromJson(JSON.toJSONString(entry.getValue()), DataVersion.class));
}
clusterAclVersionInfo.setAllAclConfigDataVersion(allAclConfigDataVersion);
return clusterAclVersionInfo;
}
default:
break;
}
throw new MQBrokerException(response.getCode(), response.getRemark(), addr);
} | @Test
public void assertGetBrokerClusterAclInfo() throws MQBrokerException, RemotingException, InterruptedException {
mockInvokeSync();
GetBrokerAclConfigResponseHeader responseHeader = mock(GetBrokerAclConfigResponseHeader.class);
when(responseHeader.getBrokerName()).thenReturn(brokerName);
when(responseHeader.getBrokerAddr()).thenReturn(defaultBrokerAddr);
when(responseHeader.getClusterName()).thenReturn(clusterName);
when(responseHeader.getAllAclFileVersion()).thenReturn("{\"key\":{\"stateVersion\":1}}");
setResponseHeader(responseHeader);
ClusterAclVersionInfo actual = mqClientAPI.getBrokerClusterAclInfo(defaultNsAddr, defaultTimeout);
assertNotNull(actual);
assertEquals(brokerName, actual.getBrokerName());
assertEquals(defaultBrokerAddr, actual.getBrokerAddr());
assertEquals(clusterName, actual.getClusterName());
assertEquals(1, actual.getAllAclConfigDataVersion().size());
assertNull(actual.getAclConfigDataVersion());
} |
public String getQuery() throws Exception {
return getQuery(weatherConfiguration.getLocation());
} | @Test
public void testFindInCircleQuery() throws Exception {
WeatherConfiguration weatherConfiguration = new WeatherConfiguration();
weatherConfiguration.setLat(LATITUDE);
weatherConfiguration.setLon(LONGITUDE);
weatherConfiguration.setCnt(25);
weatherConfiguration.setMode(WeatherMode.JSON);
weatherConfiguration.setLanguage(WeatherLanguage.nl);
weatherConfiguration.setAppid(APPID);
WeatherQuery weatherQuery = new WeatherQuery(weatherConfiguration);
weatherConfiguration.setGeoLocationProvider(geoLocationProvider);
String query = weatherQuery.getQuery();
assertThat(query, is(
"http://api.openweathermap.org/data/2.5/find?lat=51.98&lon=4.13&lang=nl&cnt=25&APPID=9162755b2efa555823cfe0451d7fff38"));
} |
@Override
public String getName() {
return "dijkstrabi|ch";
} | @Test
public void testBaseGraph() {
BaseGraph graph = createGHStorage();
RoutingAlgorithmTest.initDirectedAndDiffSpeed(graph, carSpeedEnc);
// do CH preparation for car
Weighting weighting = new SpeedWeighting(carSpeedEnc);
prepareCH(graph, CHConfig.nodeBased(weighting.getName(), weighting));
// use base graph for solving normal Dijkstra
Path p1 = new RoutingAlgorithmFactorySimple().createAlgo(graph, weighting, new AlgorithmOptions()).calcPath(0, 3);
assertEquals(IntArrayList.from(0, 4, 6, 7, 5, 3), p1.calcNodes());
assertEquals(1261.72, p1.getDistance(), 1e-2, p1.toString());
assertEquals(30953, p1.getTime(), p1.toString());
} |
@Override
public void handle(TaskEvent event) {
if (LOG.isDebugEnabled()) {
LOG.debug("Processing " + event.getTaskID() + " of type "
+ event.getType());
}
try {
writeLock.lock();
TaskStateInternal oldState = getInternalState();
try {
stateMachine.doTransition(event.getType(), event);
} catch (InvalidStateTransitionException e) {
LOG.error("Can't handle this event at current state for "
+ this.taskId, e);
internalError(event.getType());
}
if (oldState != getInternalState()) {
LOG.info(taskId + " Task Transitioned from " + oldState + " to "
+ getInternalState());
}
} finally {
writeLock.unlock();
}
} | @Test
public void testSpeculativeMapFetchFailure() {
// Setup a scenario where speculative task wins, first attempt killed
mockTask = createMockTask(TaskType.MAP);
runSpeculativeTaskAttemptSucceeds(TaskEventType.T_ATTEMPT_KILLED);
assertEquals(2, taskAttempts.size());
// speculative attempt retroactively fails from fetch failures
mockTask.handle(new TaskTAttemptFailedEvent(
taskAttempts.get(1).getAttemptId()));
assertTaskScheduledState();
assertEquals(3, taskAttempts.size());
} |
@Override
public boolean checkPassword(CharSequence password) {
Objects.requireNonNull(password);
checkState(keyCrypter != null, () ->
"key chain not encrypted");
return checkAESKey(keyCrypter.deriveKey(password));
} | @Test(expected = IllegalStateException.class)
public void checkPasswordNoKeys() {
chain.checkPassword("test");
} |
public void unsetMeta(String key)
{
metadata.remove(key);
} | @Test
public void testUnsetMeta()
{
ZCert cert = new ZCert();
cert.setMeta("version", "1");
cert.unsetMeta("version");
assertThat(cert.getMeta("version"), nullValue());
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.