focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@ConstantFunction.List(list = {
@ConstantFunction(name = "day", argTypes = {DATETIME}, returnType = TINYINT),
@ConstantFunction(name = "day", argTypes = {DATE}, returnType = TINYINT)
})
public static ConstantOperator day(ConstantOperator arg) {
return ConstantOperator.createTinyInt((byte) arg.getDatetime().getDayOfMonth());
} | @Test
public void day() {
assertEquals(23, ScalarOperatorFunctions.day(O_DT_20150323_092355).getTinyInt());
} |
@Override
public Batch toBatch() {
return new SparkBatch(
sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode());
} | @TestTemplate
public void testUnpartitionedBucketString() throws Exception {
createUnpartitionedTable(spark, tableName);
SparkScanBuilder builder = scanBuilder();
BucketFunction.BucketString function = new BucketFunction.BucketString();
UserDefinedScalarFunc udf = toUDF(function, expressions(intLit(5), fieldRef("data")));
Predicate predicate = new Predicate("<=", expressions(udf, intLit(2)));
pushFilters(builder, predicate);
Batch scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
// NOT LTEQ
builder = scanBuilder();
predicate = new Not(predicate);
pushFilters(builder, predicate);
scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
} |
public static void clean(
Object func, ExecutionConfig.ClosureCleanerLevel level, boolean checkSerializable) {
clean(func, level, checkSerializable, Collections.newSetFromMap(new IdentityHashMap<>()));
} | @Test
void testWriteReplaceRecursive() {
WithWriteReplace writeReplace = new WithWriteReplace(new WithWriteReplace.Payload("text"));
assertThat(writeReplace.getPayload().getRaw()).isEqualTo("text");
ClosureCleaner.clean(writeReplace, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
} |
public static Schema reassignOrRefreshIds(Schema schema, Schema idSourceSchema) {
return reassignOrRefreshIds(schema, idSourceSchema, true);
} | @Test
public void testReassignOrRefreshIdsCaseInsensitive() {
Schema schema =
new Schema(
Lists.newArrayList(
required(1, "FIELD1", Types.IntegerType.get()),
required(2, "FIELD2", Types.IntegerType.get())));
Schema sourceSchema =
new Schema(
Lists.newArrayList(
required(1, "field1", Types.IntegerType.get()),
required(2, "field2", Types.IntegerType.get())));
final Schema actualSchema = TypeUtil.reassignOrRefreshIds(schema, sourceSchema, false);
final Schema expectedSchema =
new Schema(
Lists.newArrayList(
required(1, "FIELD1", Types.IntegerType.get()),
required(2, "FIELD2", Types.IntegerType.get())));
assertThat(actualSchema.asStruct()).isEqualTo(expectedSchema.asStruct());
} |
public CompletableFuture<Optional<Account>> getByAccountIdentifierAsync(final UUID uuid) {
return checkRedisThenAccountsAsync(
getByUuidTimer,
() -> redisGetByAccountIdentifierAsync(uuid),
() -> accounts.getByAccountIdentifierAsync(uuid)
);
} | @Test
void testGetAccountByUuidBrokenCacheAsync() {
UUID uuid = UUID.randomUUID();
UUID pni = UUID.randomUUID();
Account account = AccountsHelper.generateTestAccount("+14152222222", uuid, pni, new ArrayList<>(), new byte[UnidentifiedAccessUtil.UNIDENTIFIED_ACCESS_KEY_LENGTH]);
when(asyncCommands.get(eq("Account3::" + uuid)))
.thenReturn(MockRedisFuture.failedFuture(new RedisException("Connection lost!")));
when(asyncCommands.setex(any(), anyLong(), any())).thenReturn(MockRedisFuture.completedFuture("OK"));
when(accounts.getByAccountIdentifierAsync(eq(uuid)))
.thenReturn(CompletableFuture.completedFuture(Optional.of(account)));
Optional<Account> retrieved = accountsManager.getByAccountIdentifierAsync(uuid).join();
assertTrue(retrieved.isPresent());
assertSame(retrieved.get(), account);
verify(asyncCommands).get(eq("Account3::" + uuid));
verify(asyncCommands).setex(eq("AccountMap::" + pni), anyLong(), eq(uuid.toString()));
verify(asyncCommands).setex(eq("Account3::" + uuid), anyLong(), anyString());
verifyNoMoreInteractions(asyncCommands);
verify(accounts).getByAccountIdentifierAsync(eq(uuid));
verifyNoMoreInteractions(accounts);
} |
@Override
public void removeSensor(final Sensor sensor) {
Objects.requireNonNull(sensor, "Sensor is null");
metrics.removeSensor(sensor.name());
final Sensor parent = parentSensors.remove(sensor);
if (parent != null) {
metrics.removeSensor(parent.name());
}
} | @Test
public void testRemoveNullSensor() {
assertThrows(NullPointerException.class, () -> streamsMetrics.removeSensor(null));
} |
@Override
public int count(double startScore, boolean startScoreInclusive, double endScore, boolean endScoreInclusive) {
return get(countAsync(startScore, startScoreInclusive, endScore, endScoreInclusive));
} | @Test
public void testCount() {
RScoredSortedSet<String> set = redisson.getScoredSortedSet("simple");
set.add(0, "1");
set.add(1, "4");
set.add(2, "2");
set.add(3, "5");
set.add(4, "3");
assertThat(set.count(0, true, 3, false)).isEqualTo(3);
} |
@Override
public Stream<ColumnName> resolveSelectStar(
final Optional<SourceName> sourceName
) {
if (sourceName.isPresent() && !sourceName.equals(getSourceName())) {
throw new IllegalArgumentException("Expected alias of " + getAlias()
+ ", but was " + sourceName.get());
}
// Note: the 'value' columns include the key columns at this point:
return orderColumns(getSchema().value(), getSchema());
} | @Test
public void shouldResolveSelectStartToAllColumnsIncludingWindowBounds() {
// Given:
givenWindowedSource(true);
givenNodeWithMockSource();
// When:
final Stream<ColumnName> result = node.resolveSelectStar(Optional.empty());
// Then:
final List<ColumnName> columns = result.collect(Collectors.toList());
assertThat(
columns,
contains(K0, K1, WINDOWSTART_NAME, WINDOWEND_NAME, FIELD1, FIELD2, FIELD3, TIMESTAMP_FIELD, KEY)
);
} |
public void formatTo(DataTable table, StringBuilder appendable) {
try {
formatTo(table, (Appendable) appendable);
} catch (IOException e) {
throw new CucumberDataTableException(e.getMessage(), e);
}
} | @Test
void should_print_to_string_builder() {
DataTable table = tableOf("hello");
StringBuilder stringBuilder = new StringBuilder();
formatter.formatTo(table, stringBuilder);
assertEquals("| hello |\n", stringBuilder.toString());
} |
@Override
public void close() {
for (XATransactionDataSource each : cachedDataSources.values()) {
each.close();
}
cachedDataSources.clear();
if (null != xaTransactionManagerProvider) {
xaTransactionManagerProvider.close();
}
} | @Test
void assertClose() {
xaTransactionManager.close();
Map<String, XATransactionDataSource> cachedSingleXADataSourceMap = getCachedDataSources();
assertTrue(cachedSingleXADataSourceMap.isEmpty());
} |
@Override
public synchronized void setConf(Configuration conf) {
super.setConf(conf);
MetadataStore store;
try {
store = getMetadataStore(conf);
} catch (MetadataException e) {
throw new RuntimeException(METADATA_STORE_INSTANCE + " failed to init BookieId list");
}
bookieMappingCache = store.getMetadataCache(BookiesRackConfiguration.class);
store.registerListener(this::handleUpdates);
try {
var racksWithHost = bookieMappingCache.get(BOOKIE_INFO_ROOT_PATH)
.thenApply(optRes -> optRes.orElseGet(BookiesRackConfiguration::new))
.get();
for (var bookieMapping : racksWithHost.values()) {
for (String address : bookieMapping.keySet()) {
bookieAddressListLastTime.add(BookieId.parse(address));
}
if (LOG.isDebugEnabled()) {
LOG.debug("BookieRackAffinityMapping init, bookieAddressListLastTime {}",
bookieAddressListLastTime);
}
}
updateRacksWithHost(racksWithHost);
} catch (ExecutionException | InterruptedException e) {
LOG.error("Failed to update rack info. ", e);
throw new RuntimeException(e);
}
watchAvailableBookies();
} | @Test
public void testMultipleMetadataServiceUris() {
BookieRackAffinityMapping mapping1 = new BookieRackAffinityMapping();
ClientConfiguration bkClientConf1 = new ClientConfiguration();
bkClientConf1.setProperty("metadataServiceUri", "memory:local,memory:local");
bkClientConf1.setProperty("zkTimeout", "100000");
mapping1.setBookieAddressResolver(BookieSocketAddress.LEGACY_BOOKIEID_RESOLVER);
// This previously threw an exception when the metadataServiceUri was a comma delimited list.
mapping1.setConf(bkClientConf1);
} |
@Override
List<DiscoveryNode> resolveNodes() {
if (serviceName != null && !serviceName.isEmpty()) {
logger.fine("Using service name to discover nodes.");
return getSimpleDiscoveryNodes(client.endpointsByName(serviceName));
} else if (serviceLabel != null && !serviceLabel.isEmpty()) {
logger.fine("Using service label to discover nodes.");
return getSimpleDiscoveryNodes(client.endpointsByServiceLabel(serviceLabel, serviceLabelValue));
} else if (podLabel != null && !podLabel.isEmpty()) {
logger.fine("Using pod label to discover nodes.");
return getSimpleDiscoveryNodes(client.endpointsByPodLabel(podLabel, podLabelValue));
}
return getSimpleDiscoveryNodes(client.endpoints());
} | @Test
public void resolveWithPodLabelWhenNodeWithPodLabel() {
// given
List<Endpoint> endpoints = createEndpoints(2);
given(client.endpointsByPodLabel(POD_LABEL, POD_LABEL_VALUE)).willReturn(endpoints);
KubernetesApiEndpointResolver sut = new KubernetesApiEndpointResolver(LOGGER, null, 0, null, null,
POD_LABEL, POD_LABEL_VALUE, null, client);
// when
List<DiscoveryNode> nodes = sut.resolveNodes();
// then
assertEquals(1, nodes.size());
assertEquals(2, nodes.get(0).getPrivateAddress().getPort());
} |
@Override
public void register(@NonNull ThreadPoolPlugin plugin) {
mainLock.runWithWriteLock(() -> {
String id = plugin.getId();
Assert.isTrue(!isRegistered(id), "The plugin with id [" + id + "] has been registered");
registeredPlugins.put(id, plugin);
forQuickIndexes(quickIndex -> quickIndex.addIfPossible(plugin));
plugin.start();
});
} | @Test
public void testGetAllPluginsOfType() {
manager.register(new TestExecuteAwarePlugin());
manager.register(new TestRejectedAwarePlugin());
Assert.assertEquals(1, manager.getAllPluginsOfType(TestExecuteAwarePlugin.class).size());
Assert.assertEquals(1, manager.getAllPluginsOfType(TestRejectedAwarePlugin.class).size());
Assert.assertEquals(2, manager.getAllPluginsOfType(ThreadPoolPlugin.class).size());
} |
@Override
public void createSubnet(Subnet osSubnet) {
checkNotNull(osSubnet, ERR_NULL_SUBNET);
checkArgument(!Strings.isNullOrEmpty(osSubnet.getId()), ERR_NULL_SUBNET_ID);
checkArgument(!Strings.isNullOrEmpty(osSubnet.getNetworkId()), ERR_NULL_SUBNET_NET_ID);
checkArgument(!Strings.isNullOrEmpty(osSubnet.getCidr()), ERR_NULL_SUBNET_CIDR);
osNetworkStore.createSubnet(osSubnet);
log.info(String.format(MSG_SUBNET, osSubnet.getCidr(), MSG_CREATED));
} | @Test(expected = IllegalArgumentException.class)
public void testCreateSubnetWithNullNetworkId() {
final Subnet testSubnet = NeutronSubnet.builder()
.cidr("192.168.0.0/24")
.build();
testSubnet.setId(SUBNET_ID);
target.createSubnet(testSubnet);
} |
public Num getK() {
return k;
} | @Test
public void bollingerBandsUpperUsingSMAAndStandardDeviation() {
BollingerBandsMiddleIndicator bbmSMA = new BollingerBandsMiddleIndicator(sma);
StandardDeviationIndicator standardDeviation = new StandardDeviationIndicator(closePrice, barCount);
BollingerBandsUpperIndicator bbuSMA = new BollingerBandsUpperIndicator(bbmSMA, standardDeviation);
assertNumEquals(2, bbuSMA.getK());
assertNumEquals(1, bbuSMA.getValue(0));
assertNumEquals(2.5, bbuSMA.getValue(1));
assertNumEquals(3.633, bbuSMA.getValue(2));
assertNumEquals(4.633, bbuSMA.getValue(3));
assertNumEquals(4.2761, bbuSMA.getValue(4));
assertNumEquals(4.6094, bbuSMA.getValue(5));
assertNumEquals(5.633, bbuSMA.getValue(6));
assertNumEquals(5.2761, bbuSMA.getValue(7));
assertNumEquals(5.633, bbuSMA.getValue(8));
assertNumEquals(4.2761, bbuSMA.getValue(9));
BollingerBandsUpperIndicator bbuSMAwithK = new BollingerBandsUpperIndicator(bbmSMA, standardDeviation,
numFunction.apply(1.5));
assertNumEquals(1.5, bbuSMAwithK.getK());
assertNumEquals(1, bbuSMAwithK.getValue(0));
assertNumEquals(2.25, bbuSMAwithK.getValue(1));
assertNumEquals(3.2247, bbuSMAwithK.getValue(2));
assertNumEquals(4.2247, bbuSMAwithK.getValue(3));
assertNumEquals(4.0404, bbuSMAwithK.getValue(4));
assertNumEquals(4.3737, bbuSMAwithK.getValue(5));
assertNumEquals(5.2247, bbuSMAwithK.getValue(6));
assertNumEquals(5.0404, bbuSMAwithK.getValue(7));
assertNumEquals(5.2247, bbuSMAwithK.getValue(8));
assertNumEquals(4.0404, bbuSMAwithK.getValue(9));
} |
public void registerClass( String key, String className ) {
extendedClasses.put( key, className );
} | @Test
public void testRegisterClass() throws Exception {
assertTrue( dialog.extendedClasses.isEmpty() );
dialog.registerClass( "MyClass", "org.pentaho.test.MyClass" );
assertFalse( dialog.extendedClasses.isEmpty() );
assertEquals( "org.pentaho.test.MyClass", dialog.extendedClasses.get( "MyClass" ) );
} |
public String generate() {
return this.generate(false);
} | @Test
void testGenerate() throws IOException {
AdaptiveClassCodeGenerator generator = new AdaptiveClassCodeGenerator(HasAdaptiveExt.class, "adaptive");
String value = generator.generate();
URL url = getClass().getResource("/org/apache/dubbo/common/extension/adaptive/HasAdaptiveExt$Adaptive");
try (InputStream inputStream = url.openStream()) {
String content = IOUtils.read(new InputStreamReader(inputStream, StandardCharsets.UTF_8));
// in Windows platform content get from resource contains \r delimiter
content = content.replaceAll("\r", "");
assertTrue(content.contains(value));
}
} |
@Override
public void setRole(Dpid dpid, RoleState role) {
final OpenFlowSwitch sw = getSwitch(dpid);
if (sw == null) {
log.debug("Switch not connected. Ignoring setRole({}, {})", dpid, role);
return;
}
sw.setRole(role);
} | @Test
public void testRoleSetting() {
agent.addConnectedSwitch(dpid2, switch2);
// check that state can be changed for a connected switch
assertThat(switch2.getRole(), is(RoleState.MASTER));
controller.setRole(dpid2, RoleState.EQUAL);
assertThat(switch2.getRole(), is(RoleState.EQUAL));
// check that changing state on an unconnected switch does not crash
controller.setRole(dpid3, RoleState.SLAVE);
} |
public String orderClause(AmountRequest amountRequest) {
return orderClause(amountRequest, ORDER_TERM_TO_SQL_STRING);
} | @Test
void mapWithSomeIllegalStuff2() {
final AmountRequest pageRequest = new AmountRequest("updatedAt:\"delete * from jobtable\",createdAt:DESC", 2);
assertThatThrownBy(() -> amountMapper.orderClause(pageRequest))
.isInstanceOf(IllegalArgumentException.class);
} |
@Override
protected Object getContent(ScmGetRequest request) {
GithubScm.validateUserHasPushPermission(request.getApiUrl(), request.getCredentials().getPassword().getPlainText(), request.getOwner(), request.getRepo());
String url = String.format("%s/repos/%s/%s/contents/%s",
request.getApiUrl(),
request.getOwner(),
request.getRepo(),
request.getPath());
if(request.getBranch() != null){ //if branch is present fetch this file from branch
url += "?ref="+request.getBranch();
}
try {
Map ghContent = HttpRequest.get(url)
.withAuthorizationToken(request.getCredentials().getPassword().getPlainText())
.to(Map.class);
if(ghContent == null){
throw new ServiceException.UnexpectedErrorException("Failed to load file: "+request.getPath());
}
String base64Data = (String)ghContent.get("content");
// JENKINS-47887 - this content contains \n which breaks IE11
base64Data = base64Data == null ? null : base64Data.replace("\n", "");
return new GithubFile(new GitContent.Builder()
.sha((String)ghContent.get("sha"))
.name((String)ghContent.get("name"))
.repo(request.getRepo())
.owner(request.getOwner())
.path(request.getPath())
.base64Data(base64Data)
.build());
} catch (IOException e) {
throw new ServiceException.UnexpectedErrorException(String.format("Failed to load file %s: %s", request.getPath(),e.getMessage()), e);
}
} | @Test
public void getContentForOrgFolderGHE() throws UnirestException {
String credentialId = createGithubEnterpriseCredential();
StaplerRequest staplerRequest = mockStapler(GithubEnterpriseScm.ID);
MultiBranchProject mbp = mockMbp(credentialId, user, GithubEnterpriseScm.DOMAIN_NAME);
GithubFile content = (GithubFile) new GithubScmContentProvider().getContent(staplerRequest, mbp);
assertEquals("Jenkinsfile", content.getContent().getName());
assertEquals("e23b8ef5c2c4244889bf94db6c05cc08ea138aef", content.getContent().getSha());
assertEquals("PR-demo", content.getContent().getRepo());
assertEquals("cloudbeers", content.getContent().getOwner());
} |
@SuppressWarnings("MagicConstant")
@Override
public int getTransactionIsolation() throws SQLException {
return databaseConnectionManager.getTransactionIsolation().orElseGet(() -> transactionIsolation);
} | @Test
void assertGetTransactionIsolationWithoutCachedConnections() throws SQLException {
try (ShardingSphereConnection connection = new ShardingSphereConnection(DefaultDatabase.LOGIC_NAME, mockContextManager())) {
assertThat(connection.getTransactionIsolation(), is(Connection.TRANSACTION_READ_UNCOMMITTED));
}
} |
@SneakyThrows
@Override
public Integer call() throws Exception {
super.call();
PicocliRunner.call(App.class, "sys", "database", "--help");
return 0;
} | @Test
void runWithNoParam() {
ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
try (ApplicationContext ctx = ApplicationContext.builder().deduceEnvironment(false).start()) {
String[] args = {};
Integer call = PicocliRunner.call(DatabaseCommand.class, ctx, args);
assertThat(call, is(0));
assertThat(out.toString(), containsString("Usage: kestra sys database"));
}
} |
public boolean accept(DefaultIssue issue, Component component) {
if (component.getType() != FILE || (exclusionPatterns.isEmpty() && inclusionPatterns.isEmpty())) {
return true;
}
if (isExclude(issue, component)) {
return false;
}
return isInclude(issue, component);
} | @Test
public void ignore_and_include_same_rule_and_component() {
IssueFilter underTest = newIssueFilter(newSettings(
asList("xoo:x1", "**/xoo/File1*"),
asList("xoo:x1", "**/xoo/File1*")));
assertThat(underTest.accept(ISSUE_1, COMPONENT_1)).isFalse();
assertThat(underTest.accept(ISSUE_1, COMPONENT_2)).isFalse();
// Issues on other rule are accepted
assertThat(underTest.accept(ISSUE_2, COMPONENT_1)).isTrue();
assertThat(underTest.accept(ISSUE_2, COMPONENT_2)).isTrue();
} |
public SQLTranslatorContext translate(final String sql, final List<Object> parameters, final QueryContext queryContext,
final DatabaseType storageType, final ShardingSphereDatabase database, final RuleMetaData globalRuleMetaData) {
DatabaseType sqlParserType = queryContext.getSqlStatementContext().getDatabaseType();
if (sqlParserType.equals(storageType) || null == storageType) {
return new SQLTranslatorContext(sql, parameters);
}
try {
return translator.translate(sql, parameters, queryContext, storageType, database, globalRuleMetaData);
} catch (final SQLTranslationException ex) {
if (useOriginalSQLWhenTranslatingFailed) {
return new SQLTranslatorContext(sql, parameters);
}
throw ex;
}
} | @Test
void assertNotUseOriginalSQLWhenTranslatingFailed() {
QueryContext queryContext = mock(QueryContext.class, RETURNS_DEEP_STUBS);
DatabaseType sqlParserType = TypedSPILoader.getService(DatabaseType.class, "PostgreSQL");
when(queryContext.getSqlStatementContext().getDatabaseType()).thenReturn(sqlParserType);
DatabaseType storageType = TypedSPILoader.getService(DatabaseType.class, "MySQL");
assertThrows(UnsupportedTranslatedDatabaseException.class, () -> new SQLTranslatorRule(new SQLTranslatorRuleConfiguration("FIXTURE", new Properties(), false)).translate(
"ERROR: select 1", Collections.emptyList(), queryContext, storageType, mock(ShardingSphereDatabase.class), mock(RuleMetaData.class)));
} |
protected T executeAutoCommitFalse(Object[] args) throws Exception {
try {
TableRecords beforeImage = beforeImage();
T result = statementCallback.execute(statementProxy.getTargetStatement(), args);
TableRecords afterImage = afterImage(beforeImage);
prepareUndoLog(beforeImage, afterImage);
return result;
} catch (TableMetaException e) {
LOGGER.error("table meta will be refreshed later, due to TableMetaException, table:{}, column:{}",
e.getTableName(), e.getColumnName());
statementProxy.getConnectionProxy().getDataSourceProxy().tableMetaRefreshEvent();
throw e;
}
} | @Test
public void testExecuteAutoCommitFalse() throws Exception {
Mockito.when(connectionProxy.getContext())
.thenReturn(new ConnectionContext());
PreparedStatementProxy statementProxy = Mockito.mock(PreparedStatementProxy.class);
Mockito.when(statementProxy.getConnectionProxy())
.thenReturn(connectionProxy);
SQLInsertRecognizer sqlInsertRecognizer = Mockito.mock(SQLInsertRecognizer.class);
TableMeta tableMeta = Mockito.mock(TableMeta.class);
executor = Mockito.spy(new OracleInsertExecutor(statementProxy, (statement, args) -> {
return null;
}, sqlInsertRecognizer));
Mockito.when(executor.getDbType()).thenReturn(JdbcConstants.ORACLE);
Mockito.doReturn(tableMeta).when(executor).getTableMeta();
Mockito.when(tableMeta.getPrimaryKeyOnlyName()).thenReturn(Collections.singletonList("id"));
TableRecords tableRecords = Mockito.mock(TableRecords.class);
Mockito.doReturn(tableRecords).when(executor).afterImage(Mockito.any());
Assertions.assertNull(executor.executeAutoCommitFalse(null));
} |
public final void sendResponse(Object value) {
OperationResponseHandler responseHandler = getOperationResponseHandler();
if (responseHandler == null) {
if (value instanceof Throwable throwable) {
// in case of a throwable, we want the stacktrace.
getLogger().warning("Missing responseHandler for " + toString(), throwable);
} else {
getLogger().warning("Missing responseHandler for " + toString() + " value[" + value + "]");
}
} else {
responseHandler.sendResponse(this, value);
}
} | @Test
public void sendResponse_whenResponseHandlerIsNull_andThrowableValue_thenNoNPE() {
Operation op = new DummyOperation();
op.sendResponse(new Exception());
} |
public EndpointResponse streamQuery(
final KsqlSecurityContext securityContext,
final KsqlRequest request,
final CompletableFuture<Void> connectionClosedFuture,
final Optional<Boolean> isInternalRequest,
final MetricsCallbackHolder metricsCallbackHolder,
final Context context
) {
throwIfNotConfigured();
activenessRegistrar.updateLastRequestTime();
final PreparedStatement<?> statement = parseStatement(request);
CommandStoreUtil.httpWaitForCommandSequenceNumber(
commandQueue, request, commandQueueCatchupTimeout);
return handleStatement(securityContext, request, statement, connectionClosedFuture,
isInternalRequest, metricsCallbackHolder, context);
} | @Test
public void shouldSuggestAlternativesIfPrintTopicDoesNotExist() {
// Given:
final PrintTopic cmd = mock(PrintTopic.class);
when(cmd.getTopic()).thenReturn("TEST_TOPIC");
print = PreparedStatement.of("print", cmd);
when(mockStatementParser.<PrintTopic>parseSingleStatement(any()))
.thenReturn(print);
when(mockKafkaTopicClient.isTopicExists(any())).thenReturn(false);
when(mockKafkaTopicClient.listTopicNames()).thenReturn(ImmutableSet.of(
"aTopic",
"test_topic",
"Test_Topic"
));
// When:
final KsqlRestException e = assertThrows(
KsqlRestException.class,
() -> testResource.streamQuery(
securityContext,
new KsqlRequest(PRINT_TOPIC, Collections.emptyMap(), Collections.emptyMap(), null),
new CompletableFuture<>(),
Optional.empty(),
new MetricsCallbackHolder(),
context
)
);
// Then:
assertThat(e, exceptionStatusCode(is(BAD_REQUEST.code())));
assertThat(e, exceptionErrorMessage(errorMessage(containsString(
"Could not find topic 'TEST_TOPIC', "
+ "or the KSQL user does not have permissions to list the topic. "
+ "Topic names are case-sensitive."
+ System.lineSeparator()
+ "Did you mean:"
))));
assertThat(e, exceptionErrorMessage(errorMessage(containsString("\tprint test_topic;"
))));
assertThat(e, exceptionErrorMessage(errorMessage(containsString("\tprint Test_Topic;"
))));
} |
@JsonCreator
public static AuditEventType create(@JsonProperty(FIELD_NAMESPACE) String namespace,
@JsonProperty(FIELD_OBJECT) String object,
@JsonProperty(FIELD_ACTION) String action) {
return new AutoValue_AuditEventType(namespace, object, action);
} | @Test
public void testInvalid2() throws Exception {
expectedException.expect(IllegalArgumentException.class);
AuditEventType.create("");
} |
public static <T extends com.google.protobuf.GeneratedMessageV3> ProtobufSchema<T> of(Class<T> pojo) {
return of(pojo, new HashMap<>());
} | @Test
public void testEncodeAndDecode() {
Function.FunctionDetails functionDetails = Function.FunctionDetails.newBuilder().setName(NAME).build();
ProtobufSchema<Function.FunctionDetails> protobufSchema = ProtobufSchema.of(Function.FunctionDetails.class);
byte[] bytes = protobufSchema.encode(functionDetails);
Function.FunctionDetails message = protobufSchema.decode(bytes);
Assert.assertEquals(message.getName(), NAME);
} |
@Override
protected String processLink(IExpressionContext context, String link) {
if (link == null || !linkInSite(externalUrlSupplier.get(), link)) {
return link;
}
if (StringUtils.isBlank(link)) {
link = "/";
}
if (isAssetsRequest(link)) {
return PathUtils.combinePath(THEME_PREVIEW_PREFIX, theme.getName(), link);
}
// not assets link
if (theme.isActive()) {
return link;
}
return UriComponentsBuilder.fromUriString(link)
.queryParam(ThemeContext.THEME_PREVIEW_PARAM_NAME, theme.getName())
.build().toString();
} | @Test
void processTemplateLinkWithActive() {
ThemeLinkBuilder themeLinkBuilder =
new ThemeLinkBuilder(getTheme(true), externalUrlSupplier);
String link = "/post";
String processed = themeLinkBuilder.processLink(null, link);
assertThat(processed).isEqualTo("/post");
} |
@Override
public Stream<MappingField> resolveAndValidateFields(
boolean isKey,
List<MappingField> userFields,
Map<String, String> options,
InternalSerializationService serializationService
) {
Map<QueryPath, MappingField> fieldsByPath = extractFields(userFields, isKey);
Class<?> typeClass = getMetadata(fieldsByPath)
.<Class<?>>map(KvMetadataJavaResolver::loadClass)
.orElseGet(() -> loadClass(options, isKey));
QueryDataType type = QueryDataTypeUtils.resolveTypeForClass(typeClass);
if (type.getTypeFamily() != QueryDataTypeFamily.OBJECT || type.isCustomType()) {
return userFields.isEmpty()
? resolvePrimitiveField(isKey, type)
: resolveAndValidatePrimitiveField(isKey, fieldsByPath, type);
} else {
return userFields.isEmpty()
? resolveObjectFields(isKey, typeClass)
: resolveAndValidateObjectFields(isKey, fieldsByPath, typeClass);
}
} | @Test
@Parameters({
"true, __key",
"false, this"
})
public void test_resolveObjectFields(boolean key, String prefix) {
Map<String, String> options = Map.of(
(key ? OPTION_KEY_FORMAT : OPTION_VALUE_FORMAT), JAVA_FORMAT,
(key ? OPTION_KEY_CLASS : OPTION_VALUE_CLASS), Type.class.getName()
);
Stream<MappingField> fields = INSTANCE.resolveAndValidateFields(key, emptyList(), options, null);
assertThat(fields).containsExactly(field("field", QueryDataType.INT, prefix + ".field"));
} |
@Override
public void init(HazelcastInstance instance, Properties properties, String mapName) {
validateMapStoreConfig(instance, mapName);
logger = instance.getLoggingService().getLogger(GenericMapLoader.class);
this.instance = Util.getHazelcastInstanceImpl(instance);
this.genericMapStoreProperties = new GenericMapStoreProperties(properties, mapName);
this.sqlService = instance.getSql();
this.mappingHelper = new MappingHelper(this.sqlService);
this.mapName = mapName;
this.mappingName = MAPPING_PREFIX + mapName;
HazelcastProperties hzProperties = nodeEngine().getProperties();
this.initTimeoutMillis = hzProperties.getMillis(MAPSTORE_INIT_TIMEOUT);
ManagedExecutorService asyncExecutor = getMapStoreExecutor();
// Init can run on partition thread, creating a mapping uses other maps, so it needs to run elsewhere
asyncExecutor.submit(this::createOrReadMapping);
} | @Test
public void givenMapStoreConfig_WithoutDataConnection_thenFail() {
MapStoreConfig mapStoreConfig = new MapStoreConfig()
.setClassName(GenericMapLoader.class.getName());
MapConfig mapConfig = new MapConfig(mapName);
mapConfig.setMapStoreConfig(mapStoreConfig);
instance().getConfig().addMapConfig(mapConfig);
mapLoader = new GenericMapLoader<>();
Properties properties = new Properties();
assertThatThrownBy(() -> mapLoader.init(hz, properties, mapName))
.isInstanceOf(HazelcastException.class)
.hasMessage("MapStoreConfig for " + mapName + " must have `data-connection-ref` property set");
} |
@VisibleForTesting
static ExternalResourceInfoProvider createStaticExternalResourceInfoProvider(
Map<String, Long> externalResourceAmountMap,
Map<String, ExternalResourceDriver> externalResourceDrivers) {
final Map<String, Set<? extends ExternalResourceInfo>> externalResources = new HashMap<>();
for (Map.Entry<String, ExternalResourceDriver> externalResourceDriverEntry :
externalResourceDrivers.entrySet()) {
final String resourceName = externalResourceDriverEntry.getKey();
final ExternalResourceDriver externalResourceDriver =
externalResourceDriverEntry.getValue();
if (externalResourceAmountMap.containsKey(resourceName)) {
try {
final Set<? extends ExternalResourceInfo> externalResourceInfos;
externalResourceInfos =
externalResourceDriver.retrieveResourceInfo(
externalResourceAmountMap.get(resourceName));
externalResources.put(resourceName, externalResourceInfos);
} catch (Exception e) {
LOG.warn(
"Failed to retrieve information of external resource {}.",
resourceName,
e);
}
} else {
LOG.warn("Could not found legal amount configuration for {}.", resourceName);
}
}
return new StaticExternalResourceInfoProvider(externalResources);
} | @Test
public void testGetExternalResourceInfoProviderWithoutAmount() {
final Map<String, Long> externalResourceAmountMap = new HashMap<>();
final Map<String, ExternalResourceDriver> externalResourceDrivers = new HashMap<>();
externalResourceDrivers.put(RESOURCE_NAME_1, new TestingExternalResourceDriver());
final StaticExternalResourceInfoProvider externalResourceInfoProvider =
(StaticExternalResourceInfoProvider)
ExternalResourceUtils.createStaticExternalResourceInfoProvider(
externalResourceAmountMap, externalResourceDrivers);
assertThat(externalResourceInfoProvider.getExternalResources().entrySet(), is(empty()));
} |
@SuppressWarnings("unchecked")
public DynamicDestinations<UserT, DestinationT, OutputT> getDynamicDestinations() {
return (DynamicDestinations<UserT, DestinationT, OutputT>) dynamicDestinations;
} | @Test
public void testCopyToOutputFiles() throws Exception {
SimpleSink.SimpleWriteOperation<Void> writeOp = buildWriteOperation();
List<String> inputFilenames = Arrays.asList("input-1", "input-2", "input-3");
List<String> inputContents = Arrays.asList("1", "2", "3");
List<String> expectedOutputFilenames =
Arrays.asList("file-00-of-03.test", "file-01-of-03.test", "file-02-of-03.test");
List<KV<FileResult<Void>, ResourceId>> resultsToFinalFilenames = Lists.newArrayList();
List<ResourceId> expectedOutputPaths = Lists.newArrayList();
for (int i = 0; i < inputFilenames.size(); i++) {
// Generate output paths.
expectedOutputPaths.add(
getBaseOutputDirectory()
.resolve(expectedOutputFilenames.get(i), StandardResolveOptions.RESOLVE_FILE));
// Generate and write to input paths.
File inputTmpFile = tmpFolder.newFile(inputFilenames.get(i));
List<String> lines = Collections.singletonList(inputContents.get(i));
writeFile(lines, inputTmpFile);
ResourceId finalFilename =
writeOp
.getSink()
.getDynamicDestinations()
.getFilenamePolicy(null)
.unwindowedFilename(i, inputFilenames.size(), CompressionType.UNCOMPRESSED);
resultsToFinalFilenames.add(
KV.of(
new FileResult<>(
LocalResources.fromFile(inputTmpFile, false),
UNKNOWN_SHARDNUM,
GlobalWindow.INSTANCE,
PaneInfo.ON_TIME_AND_ONLY_FIRING,
null),
finalFilename));
}
// Copy input files to output files.
writeOp.moveToOutputFiles(resultsToFinalFilenames);
// Assert that the contents were copied.
for (int i = 0; i < expectedOutputPaths.size(); i++) {
assertFileContains(
Collections.singletonList(inputContents.get(i)), expectedOutputPaths.get(i));
}
} |
public static Criterion matchTcpSrc(TpPort tcpPort) {
return new TcpPortCriterion(tcpPort, Type.TCP_SRC);
} | @Test
public void testMatchTcpSrcMethod() {
Criterion matchTcpSrc = Criteria.matchTcpSrc(tpPort1);
TcpPortCriterion tcpPortCriterion =
checkAndConvert(matchTcpSrc,
Criterion.Type.TCP_SRC,
TcpPortCriterion.class);
assertThat(tcpPortCriterion.tcpPort(), is(equalTo(tpPort1)));
} |
@Override
public ByteBuf slice() {
return slice(readerIndex, readableBytes());
} | @Test
public void testSliceRelease() {
ByteBuf buf = newBuffer(8);
assertEquals(1, buf.refCnt());
assertTrue(buf.slice().release());
assertEquals(0, buf.refCnt());
} |
@SuppressWarnings("unchecked")
RestartRequest recordToRestartRequest(ConsumerRecord<String, byte[]> record, SchemaAndValue value) {
String connectorName = record.key().substring(RESTART_PREFIX.length());
if (!(value.value() instanceof Map)) {
log.error("Ignoring restart request because the value is not a Map but is {}", className(value.value()));
return null;
}
Map<String, Object> valueAsMap = (Map<String, Object>) value.value();
Object failed = valueAsMap.get(ONLY_FAILED_FIELD_NAME);
boolean onlyFailed;
if (!(failed instanceof Boolean)) {
log.warn("Invalid data for restart request '{}' field should be a Boolean but is {}, defaulting to {}", ONLY_FAILED_FIELD_NAME, className(failed), ONLY_FAILED_DEFAULT);
onlyFailed = ONLY_FAILED_DEFAULT;
} else {
onlyFailed = (Boolean) failed;
}
Object withTasks = valueAsMap.get(INCLUDE_TASKS_FIELD_NAME);
boolean includeTasks;
if (!(withTasks instanceof Boolean)) {
log.warn("Invalid data for restart request '{}' field should be a Boolean but is {}, defaulting to {}", INCLUDE_TASKS_FIELD_NAME, className(withTasks), INCLUDE_TASKS_DEFAULT);
includeTasks = INCLUDE_TASKS_DEFAULT;
} else {
includeTasks = (Boolean) withTasks;
}
return new RestartRequest(connectorName, onlyFailed, includeTasks);
} | @Test
public void testRecordToRestartRequestIncludeTasksInconsistent() {
ConsumerRecord<String, byte[]> record = new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(0),
CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty());
Struct struct = INCLUDE_TASKS_MISSING_STRUCT;
SchemaAndValue schemaAndValue = new SchemaAndValue(struct.schema(), structToMap(struct));
RestartRequest restartRequest = configStorage.recordToRestartRequest(record, schemaAndValue);
assertEquals(CONNECTOR_1_NAME, restartRequest.connectorName());
assertFalse(restartRequest.includeTasks());
assertEquals(struct.getBoolean(ONLY_FAILED_FIELD_NAME), restartRequest.onlyFailed());
} |
public static Map<AbilityKey, Boolean> getStaticAbilities() {
return INSTANCE.getSupportedAbilities();
} | @Test
void testGetStaticAbilities() {
assertFalse(ServerAbilities.getStaticAbilities().isEmpty());
} |
@Override
public void addChildren(Deque<Expression> expressions) {
addChildren(expressions, 2);
} | @Test
public void testSetOptions() throws IOException {
And and = new And();
Expression first = mock(Expression.class);
Expression second = mock(Expression.class);
Deque<Expression> children = new LinkedList<Expression>();
children.add(second);
children.add(first);
and.addChildren(children);
FindOptions options = mock(FindOptions.class);
and.setOptions(options);
verify(first).setOptions(options);
verify(second).setOptions(options);
verifyNoMoreInteractions(first);
verifyNoMoreInteractions(second);
} |
public static ApplyPermissionTemplateQuery create(String templateUuid, List<String> componentKeys) {
return new ApplyPermissionTemplateQuery(templateUuid, componentKeys);
} | @Test
public void should_invalidate_query_with_empty_name() {
assertThatThrownBy(() -> create("", newArrayList("1", "2", "3")))
.isInstanceOf(BadRequestException.class)
.hasMessage("Permission template is mandatory");
} |
public List<File> process()
throws Exception {
try {
return doProcess();
} catch (Exception e) {
// Cleaning up output dir as processing has failed. file managers left from map or reduce phase will be cleaned
// up in the respective phases.
FileUtils.deleteQuietly(_segmentsOutputDir);
throw e;
} finally {
FileUtils.deleteDirectory(_mapperOutputDir);
FileUtils.deleteDirectory(_reducerOutputDir);
}
} | @Test
public void testRecordReaderFileConfigInit() throws Exception {
File workingDir = new File(TEMP_DIR, "segmentOutput");
FileUtils.forceMkdir(workingDir);
ClassLoader classLoader = getClass().getClassLoader();
URL resource = classLoader.getResource("data/dimBaseballTeams.csv");
RecordReader recordReader = RecordReaderFactory.getRecordReader(FileFormat.CSV, new File(resource.toURI()),
null, null);
RecordReaderFileConfig recordReaderFileConfig = new RecordReaderFileConfig(FileFormat.CSV,
new File(resource.toURI()),
null, null, recordReader);
TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName("myTable").
setTimeColumnName("time").build();
Schema schema =
new Schema.SchemaBuilder().setSchemaName("mySchema").addSingleValueDimension("teamId",
DataType.STRING, "")
.addSingleValueDimension("teamName", DataType.STRING, "")
.addDateTime("time", DataType.LONG, "1:MILLISECONDS:EPOCH", "1:MILLISECONDS").build();
SegmentProcessorConfig config =
new SegmentProcessorConfig.Builder().setTableConfig(tableConfig).setSchema(schema).build();
SegmentProcessorFramework framework = new SegmentProcessorFramework(config, workingDir,
ImmutableList.of(recordReaderFileConfig), Collections.emptyList(), null);
List<File> outputSegments = framework.process();
assertEquals(outputSegments.size(), 1);
ImmutableSegment segment = ImmutableSegmentLoader.load(outputSegments.get(0), ReadMode.mmap);
SegmentMetadata segmentMetadata = segment.getSegmentMetadata();
assertEquals(segmentMetadata.getTotalDocs(), 52);
// Verify reader is closed
assertEquals(recordReaderFileConfig.isRecordReaderClosedFromRecordReaderFileConfig(), true);
} |
public synchronized void createTable(String tableId, Iterable<String> columnFamilies)
throws BigtableResourceManagerException {
createTable(tableId, columnFamilies, Duration.ofHours(1));
} | @Test
public void testCreateTableShouldWorkWhenBigtableDoesNotThrowAnyError() {
setupReadyTable();
when(bigtableResourceManagerClientFactory.bigtableTableAdminClient().exists(anyString()))
.thenReturn(false);
testManager.createTable(TABLE_ID, ImmutableList.of("cf1"));
verify(bigtableResourceManagerClientFactory.bigtableTableAdminClient()).createTable(any());
} |
@Override
public void configure(final KsqlConfig config) {
if (!config.getKsqlStreamConfigProps().containsKey(StreamsConfig.APPLICATION_SERVER_CONFIG)) {
throw new IllegalArgumentException("Need KS application server set");
}
final String applicationServer =
(String) config.getKsqlStreamConfigProps().get(StreamsConfig.APPLICATION_SERVER_CONFIG);
final HostInfo hostInfo = ServerUtil.parseHostInfo(applicationServer);
this.localHost = new KsqlHostInfo(hostInfo.host(), hostInfo.port());
try {
this.localUrl = new URL(applicationServer);
} catch (final Exception e) {
throw new IllegalStateException("Failed to convert remote host info to URL."
+ " remoteInfo: " + localHost.host() + ":"
+ localHost.host());
}
this.validator = new RequestValidator(
CustomValidators.VALIDATOR_MAP,
injectorFactory,
ksqlEngine::createSandbox,
new ValidatedCommandFactory()
);
this.handler = new RequestHandler(
CustomExecutors.EXECUTOR_MAP,
new DistributingExecutor(
config,
commandRunner.getCommandQueue(),
distributedCmdResponseTimeout,
injectorFactory,
authorizationValidator,
new ValidatedCommandFactory(),
errorHandler,
commandRunnerWarning
),
ksqlEngine,
new DefaultCommandQueueSync(
commandRunner.getCommandQueue(),
KsqlResource::shouldSynchronize,
distributedCmdResponseTimeout
)
);
} | @Test(expected = IllegalArgumentException.class)
public void shouldThrowOnConfigureIfAppServerNotSet() {
// Given:
final KsqlConfig configNoAppServer = new KsqlConfig(ImmutableMap.of());
// When:
ksqlResource.configure(configNoAppServer);
} |
@Operation(summary = "Finalize rda activation", tags = { SwaggerConfig.ACTIVATE_RDA}, operationId = "rdaActivationVerified",
parameters = {@Parameter(ref = "API-V"), @Parameter(ref = "OS-T"), @Parameter(ref = "APP-V"), @Parameter(ref = "OS-V"), @Parameter(ref = "REL-T")})
@PostMapping(value = "rda_activation_verified", produces = "application/json")
@ResponseBody
public AppResponse rdaFinalize(@Valid @RequestBody AppSessionRequest request) throws FlowNotDefinedException, NoSuchAlgorithmException, FlowStateNotDefinedException, IOException, SharedServiceClientException {
return service.processAction(ActivationFlowFactory.TYPE, Action.FINALIZE_RDA, request);
} | @Test
void validateIfCorrectProcessesAreCalledRdaFinalize() throws FlowNotDefinedException, NoSuchAlgorithmException, IOException, FlowStateNotDefinedException, SharedServiceClientException {
AppSessionRequest request = new AppSessionRequest();
activationController.rdaFinalize(request);
verify(flowService, times(1)).processAction(anyString(), any(Action.class), any(AppSessionRequest.class));
} |
@Override
public void run()
throws Exception {
// Get list of files to process.
List<String> filteredFiles = SegmentGenerationUtils.listMatchedFilesWithRecursiveOption(_inputDirFS, _inputDirURI,
_spec.getIncludeFileNamePattern(), _spec.getExcludeFileNamePattern(), _spec.isSearchRecursively());
if (_consistentPushEnabled) {
ConsistentDataPushUtils.configureSegmentPostfix(_spec);
}
File localTempDir = new File(FileUtils.getTempDirectory(), "pinot-" + UUID.randomUUID());
try {
int numInputFiles = filteredFiles.size();
_segmentCreationTaskCountDownLatch = new CountDownLatch(numInputFiles);
if (!SegmentGenerationJobUtils.useGlobalDirectorySequenceId(_spec.getSegmentNameGeneratorSpec())) {
Map<String, List<String>> localDirIndex = new HashMap<>();
for (String filteredFile : filteredFiles) {
java.nio.file.Path filteredParentPath = Paths.get(filteredFile).getParent();
localDirIndex.computeIfAbsent(filteredParentPath.toString(), k -> new ArrayList<>()).add(filteredFile);
}
for (String parentPath : localDirIndex.keySet()) {
List<String> siblingFiles = localDirIndex.get(parentPath);
Collections.sort(siblingFiles);
for (int i = 0; i < siblingFiles.size(); i++) {
URI inputFileURI = SegmentGenerationUtils
.getFileURI(siblingFiles.get(i), SegmentGenerationUtils.getDirectoryURI(parentPath));
submitSegmentGenTask(localTempDir, inputFileURI, i);
}
}
} else {
//iterate on the file list, for each
for (int i = 0; i < numInputFiles; i++) {
final URI inputFileURI = SegmentGenerationUtils.getFileURI(filteredFiles.get(i), _inputDirURI);
submitSegmentGenTask(localTempDir, inputFileURI, i);
}
}
_segmentCreationTaskCountDownLatch.await();
if (_failure.get() != null) {
_executorService.shutdownNow();
throw _failure.get();
}
} finally {
//clean up
FileUtils.deleteQuietly(localTempDir);
_executorService.shutdown();
}
} | @Test
public void testInputFilesWithSameNameInDifferentDirectories()
throws Exception {
File testDir = makeTestDir();
File inputDir = new File(testDir, "input");
File inputSubDir1 = new File(inputDir, "2009");
File inputSubDir2 = new File(inputDir, "2010");
inputSubDir1.mkdirs();
inputSubDir2.mkdirs();
File inputFile1 = new File(inputSubDir1, "input.csv");
FileUtils.writeLines(inputFile1, Lists.newArrayList("col1,col2", "value1,1", "value2,2"));
File inputFile2 = new File(inputSubDir2, "input.csv");
FileUtils.writeLines(inputFile2, Lists.newArrayList("col1,col2", "value3,3", "value4,4"));
File outputDir = new File(testDir, "output");
final String schemaName = "mySchema";
File schemaFile = makeSchemaFile(testDir, schemaName);
File tableConfigFile = makeTableConfigFile(testDir, schemaName);
SegmentGenerationJobSpec jobSpec = makeJobSpec(inputDir, outputDir, schemaFile, tableConfigFile);
jobSpec.setSearchRecursively(true);
SegmentGenerationJobRunner jobRunner = new SegmentGenerationJobRunner(jobSpec);
jobRunner.run();
// Check that both segment files are created
File newSegmentFile2009 = new File(outputDir, "2009/myTable_OFFLINE_0.tar.gz");
Assert.assertTrue(newSegmentFile2009.exists());
Assert.assertTrue(newSegmentFile2009.isFile());
Assert.assertTrue(newSegmentFile2009.length() > 0);
File newSegmentFile2010 = new File(outputDir, "2010/myTable_OFFLINE_0.tar.gz");
Assert.assertTrue(newSegmentFile2010.exists());
Assert.assertTrue(newSegmentFile2010.isFile());
Assert.assertTrue(newSegmentFile2010.length() > 0);
} |
public void validate(DataConnectionConfig dataConnectionConfig) {
int numberOfSetItems = getNumberOfSetItems(dataConnectionConfig, CLIENT_XML_PATH, CLIENT_YML_PATH, CLIENT_XML,
CLIENT_YML);
if (numberOfSetItems != 1) {
throw new HazelcastException("HazelcastDataConnection with name '" + dataConnectionConfig.getName()
+ "' could not be created, "
+ "provide either a file path with one of "
+ "\"client_xml_path\" or \"client_yml_path\" properties "
+ "or a string content with one of \"client_xml\" or \"client_yml\" properties "
+ "for the client configuration.");
}
} | @Test
public void testValidateEitherStringOrFilePath() {
DataConnectionConfig dataConnectionConfig = new DataConnectionConfig();
dataConnectionConfig.setProperty(HazelcastDataConnection.CLIENT_XML_PATH, "xml_path");
dataConnectionConfig.setProperty(HazelcastDataConnection.CLIENT_XML, "xml");
HazelcastDataConnectionConfigValidator validator = new HazelcastDataConnectionConfigValidator();
assertThatThrownBy(() -> validator.validate(dataConnectionConfig))
.isInstanceOf(HazelcastException.class);
} |
@Override
public ChannelFuture writeData(final ChannelHandlerContext ctx, final int streamId, ByteBuf data, int padding,
final boolean endOfStream, ChannelPromise promise) {
promise = promise.unvoid();
final Http2Stream stream;
try {
stream = requireStream(streamId);
// Verify that the stream is in the appropriate state for sending DATA frames.
switch (stream.state()) {
case OPEN:
case HALF_CLOSED_REMOTE:
// Allowed sending DATA frames in these states.
break;
default:
throw new IllegalStateException("Stream " + stream.id() + " in unexpected state " + stream.state());
}
} catch (Throwable e) {
data.release();
return promise.setFailure(e);
}
// Hand control of the frame to the flow controller.
flowController().addFlowControlled(stream,
new FlowControlledData(stream, data, padding, endOfStream, promise));
return promise;
} | @Test
public void dataFramesShouldMergeUseVoidPromise() throws Exception {
createStream(STREAM_ID, false);
final ByteBuf data = dummyData().retain();
ChannelPromise promise1 = newVoidPromise(channel);
encoder.writeData(ctx, STREAM_ID, data, 0, true, promise1);
ChannelPromise promise2 = newVoidPromise(channel);
encoder.writeData(ctx, STREAM_ID, data, 0, true, promise2);
// Now merge the two payloads.
List<FlowControlled> capturedWrites = payloadCaptor.getAllValues();
FlowControlled mergedPayload = capturedWrites.get(0);
mergedPayload.merge(ctx, capturedWrites.get(1));
assertEquals(16, mergedPayload.size());
assertFalse(promise1.isSuccess());
assertFalse(promise2.isSuccess());
// Write the merged payloads and verify it was written correctly.
mergedPayload.write(ctx, 16);
assertEquals(0, mergedPayload.size());
assertEquals("abcdefghabcdefgh", writtenData.get(0));
assertEquals(0, data.refCnt());
// The promises won't be set since there are no listeners.
assertFalse(promise1.isSuccess());
assertFalse(promise2.isSuccess());
} |
@Override
public boolean hasPlugin(String key) {
checkState(started.get(), NOT_STARTED_YET);
return pluginInfosByKeys.containsKey(key);
} | @Test
public void hasPlugin_throws_ISE_if_repo_is_not_started() {
assertThatThrownBy(() -> underTest.hasPlugin("foo"))
.isInstanceOf(IllegalStateException.class)
.hasMessage("not started yet");
} |
static boolean isLeaf(int nodeOrder, int depth) {
checkTrue(depth > 0, "Invalid depth: " + depth);
int leafLevel = depth - 1;
int numberOfNodes = getNumberOfNodes(depth);
int maxNodeOrder = numberOfNodes - 1;
checkTrue(nodeOrder >= 0 && nodeOrder <= maxNodeOrder, "Invalid nodeOrder: " + nodeOrder + " in a tree with depth "
+ depth);
int leftMostLeafOrder = MerkleTreeUtil.getLeftMostNodeOrderOnLevel(leafLevel);
return nodeOrder >= leftMostLeafOrder;
} | @Test
public void testIsLeaf() {
assertTrue(MerkleTreeUtil.isLeaf(0, 1));
assertFalse(MerkleTreeUtil.isLeaf(0, 2));
assertTrue(MerkleTreeUtil.isLeaf(1, 2));
assertTrue(MerkleTreeUtil.isLeaf(2, 2));
assertFalse(MerkleTreeUtil.isLeaf(1, 3));
assertFalse(MerkleTreeUtil.isLeaf(2, 3));
assertTrue(MerkleTreeUtil.isLeaf(3, 3));
assertTrue(MerkleTreeUtil.isLeaf(6, 3));
} |
@Operation(summary = "Get the correct AT cetificate")
@PostMapping(value = Constants.URL_NIK_START, consumes = "application/json", produces = "application/json")
public GetCertificateResponse getCertificateRestService(@Valid @RequestBody GetCertificateRequest request,
@RequestHeader(value = "X-FORWARDED-FOR") String clientIp) {
return nikService.getCertificateRestService(request, clientIp);
} | @Test
public void getCertificateRestServiceTest() {
GetCertificateResponse expectedResponse = new GetCertificateResponse();
when(nikServiceMock.getCertificateRestService(any(GetCertificateRequest.class), anyString())).thenReturn(expectedResponse);
GetCertificateResponse actualResponse = nikController.getCertificateRestService(new GetCertificateRequest(), "");
assertEquals(expectedResponse, actualResponse);
} |
@Override
public Set<Entry<K, V>> entries() {
return (Set<Entry<K, V>>) super.entries();
} | @Test
public void testEntrySet() {
RSetMultimap<SimpleKey, SimpleValue> map = redisson.getSetMultimap("test1");
map.put(new SimpleKey("0"), new SimpleValue("1"));
map.put(new SimpleKey("3"), new SimpleValue("4"));
assertThat(map.entries().size()).isEqualTo(2);
Map<SimpleKey, SimpleValue> testMap = new HashMap<SimpleKey, SimpleValue>();
testMap.put(new SimpleKey("0"), new SimpleValue("1"));
testMap.put(new SimpleKey("3"), new SimpleValue("4"));
assertThat(map.entries()).containsOnlyElementsOf(testMap.entrySet());
} |
@Override
public String getName() {
return name;
} | @Test
public void testConstructor_withName() {
config = new ScheduledExecutorConfig("myName");
assertEquals("myName", config.getName());
} |
public static <K extends WritableComparable<?>, V extends Writable>
Writable getEntry(MapFile.Reader[] readers,
Partitioner<K, V> partitioner, K key, V value) throws IOException {
int readerLength = readers.length;
int part;
if (readerLength <= 1) {
part = 0;
} else {
part = partitioner.getPartition(key, value, readers.length);
}
return readers[part].get(key, value);
} | @SuppressWarnings("static-access")
@Test
public void testPartitionerShouldNotBeCalledWhenOneReducerIsPresent()
throws Exception {
MapFileOutputFormat outputFormat = new MapFileOutputFormat();
Reader reader = Mockito.mock(Reader.class);
Reader[] readers = new Reader[]{reader};
outputFormat.getEntry(readers, new MyPartitioner(), new Text(), new Text());
assertTrue(!MyPartitioner.isGetPartitionCalled());
} |
@PublicEvolving
public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes(
MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) {
return getMapReturnTypes(mapInterface, inType, null, false);
} | @SuppressWarnings({"rawtypes", "unchecked"})
@Test
void testTupleArray() {
RichMapFunction<?, ?> function =
new RichMapFunction<Tuple2<String, String>[], Tuple2<String, String>[]>() {
private static final long serialVersionUID = 1L;
@Override
public Tuple2<String, String>[] map(Tuple2<String, String>[] value)
throws Exception {
return null;
}
};
TypeInformation<?> ti =
TypeExtractor.getMapReturnTypes(
function,
(TypeInformation)
TypeInformation.of(new TypeHint<Tuple2<String, String>[]>() {}));
assertThat(ti).isInstanceOf(ObjectArrayTypeInfo.class);
ObjectArrayTypeInfo<?, ?> oati = (ObjectArrayTypeInfo<?, ?>) ti;
assertThat(oati.getComponentInfo().isTupleType()).isTrue();
TupleTypeInfo<?> tti = (TupleTypeInfo<?>) oati.getComponentInfo();
assertThat(tti.getTypeAt(0)).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO);
assertThat(tti.getTypeAt(1)).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO);
} |
public static Builder custom() {
return new Builder();
} | @Test(expected = IllegalArgumentException.class)
public void testBuildWithIllegalMaxThreadPoolSize() {
ThreadPoolBulkheadConfig.custom()
.maxThreadPoolSize(-1)
.build();
} |
public static String safeBeaconCode(Point point) {
if(point.rawData() instanceof HasBeaconCodes hbc) {
String beacon = hbc.beaconActual();
return (beacon == null)
? UNKOWN_BEACON_VALUE
: beacon;
}
return UNKOWN_BEACON_VALUE;
} | @Test
public void safeBeaconGeneratesDefaultValue() {
Point p = Point.builder()
.time(EPOCH)
.latLong(0.0, 0.0)
.build();
assertThat(safeBeaconCode(p), is(UNKOWN_BEACON_VALUE));
} |
public StepInstanceActionResponse terminate(
WorkflowInstance instance,
String stepId,
User user,
Actions.StepInstanceAction action,
boolean blocking) {
validateStepId(instance, stepId, action);
StepInstance stepInstance =
stepInstanceDao.getStepInstance(
instance.getWorkflowId(),
instance.getWorkflowInstanceId(),
instance.getWorkflowRunId(),
stepId,
Constants.LATEST_INSTANCE_RUN);
if (!stepInstance.getRuntimeState().getStatus().shouldWakeup()) {
throw new MaestroInvalidStatusException(
"Cannot manually %s the step %s as it is in a terminal state [%s]",
action.name(), stepInstance.getIdentity(), stepInstance.getRuntimeState().getStatus());
}
// prepare payload and then add it to db
StepAction stepAction =
StepAction.createTerminate(
action, stepInstance, user, "manual step instance API call", false);
saveAction(stepInstance, stepAction);
if (blocking) {
long startTime = System.currentTimeMillis();
while (System.currentTimeMillis() - startTime < ACTION_TIMEOUT) {
StepRuntimeState state =
stepInstanceDao.getStepInstanceRuntimeState(
stepInstance.getWorkflowId(),
stepInstance.getWorkflowInstanceId(),
stepInstance.getWorkflowRunId(),
stepInstance.getStepId(),
Constants.LATEST_INSTANCE_RUN);
if (!state.getStatus().shouldWakeup()) {
return createActionResponseFrom(stepInstance, state, stepAction.toTimelineEvent());
}
TimeUtils.sleep(CHECK_INTERVAL);
}
throw new MaestroTimeoutException(
"%s action for the step %s is timed out. No retry is needed and maestro will eventually complete the action.",
action.name(), stepInstance.getIdentity());
} else {
return createActionResponseFrom(stepInstance, null, stepAction.toTimelineEvent());
}
} | @Test
public void testSkip() {
StepInstanceActionResponse response = actionDao.terminate(instance, "job1", user, SKIP, false);
Assert.assertEquals("sample-dag-test-3", response.getWorkflowId());
Assert.assertEquals(1, response.getWorkflowInstanceId());
Assert.assertEquals(1, response.getWorkflowRunId());
Assert.assertEquals("job1", response.getStepId());
Assert.assertEquals(1L, response.getStepAttemptId().longValue());
Assert.assertEquals(
"User [tester] take action [SKIP] on the step due to reason: [manual step instance API call]",
response.getTimelineEvent().getMessage());
Mockito.verify(publisher, Mockito.times(1)).publish(any(StepInstanceWakeUpEvent.class));
} |
public static JsonMapper validateJsonMapper(JsonMapper jsonMapper) {
try {
final String serializedJob = jsonMapper.serialize(getJobForTesting());
testTimeFields(serializedJob);
testUseFieldsNotMethods(serializedJob);
testUsePolymorphism(serializedJob);
testCanConvertBackToJob(jsonMapper, serializedJob);
return jsonMapper;
} catch (Exception e) {
throw new IllegalArgumentException("The JsonMapper you provided cannot be used as it deserializes jobs in an incorrect way.", e);
}
} | @Test
void testInvalidJacksonJsonMapperNoISO8601TimeFormat() {
assertThatThrownBy(() -> validateJsonMapper(new InvalidJacksonJsonMapper(new ObjectMapper().registerModule(new Jdk8Module()).registerModule(new JavaTimeModule()))))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("The JsonMapper you provided cannot be used as it deserializes jobs in an incorrect way.")
.hasRootCauseMessage("Timestamps are wrongly formatted for JobRunr. They should be in ISO8601 format.");
} |
public ClusterSerdes init(Environment env,
ClustersProperties clustersProperties,
int clusterIndex) {
ClustersProperties.Cluster clusterProperties = clustersProperties.getClusters().get(clusterIndex);
log.debug("Configuring serdes for cluster {}", clusterProperties.getName());
var globalPropertiesResolver = new PropertyResolverImpl(env);
var clusterPropertiesResolver = new PropertyResolverImpl(env, "kafka.clusters." + clusterIndex);
Map<String, SerdeInstance> registeredSerdes = new LinkedHashMap<>();
// initializing serdes from config
if (clusterProperties.getSerde() != null) {
for (int i = 0; i < clusterProperties.getSerde().size(); i++) {
SerdeConfig serdeConfig = clusterProperties.getSerde().get(i);
if (Strings.isNullOrEmpty(serdeConfig.getName())) {
throw new ValidationException("'name' property not set for serde: " + serdeConfig);
}
if (registeredSerdes.containsKey(serdeConfig.getName())) {
throw new ValidationException("Multiple serdes with same name: " + serdeConfig.getName());
}
var instance = createSerdeFromConfig(
serdeConfig,
new PropertyResolverImpl(env, "kafka.clusters." + clusterIndex + ".serde." + i + ".properties"),
clusterPropertiesResolver,
globalPropertiesResolver
);
registeredSerdes.put(serdeConfig.getName(), instance);
}
}
// initializing remaining built-in serdes with empty selection patters
builtInSerdeClasses.forEach((name, clazz) -> {
if (!registeredSerdes.containsKey(name)) {
BuiltInSerde serde = createSerdeInstance(clazz);
if (autoConfigureSerde(serde, clusterPropertiesResolver, globalPropertiesResolver)) {
registeredSerdes.put(name, new SerdeInstance(name, serde, null, null, null));
}
}
});
registerTopicRelatedSerde(registeredSerdes);
return new ClusterSerdes(
registeredSerdes,
Optional.ofNullable(clusterProperties.getDefaultKeySerde())
.map(name -> Preconditions.checkNotNull(registeredSerdes.get(name), "Default key serde not found"))
.orElse(null),
Optional.ofNullable(clusterProperties.getDefaultValueSerde())
.map(name -> Preconditions.checkNotNull(registeredSerdes.get(name), "Default value serde not found"))
.or(() -> Optional.ofNullable(registeredSerdes.get(SchemaRegistrySerde.name())))
.or(() -> Optional.ofNullable(registeredSerdes.get(ProtobufFileSerde.name())))
.orElse(null),
createFallbackSerde()
);
} | @Test
void serdeWithCustomNameAndBuiltInClassnameAreExplicitlyConfigured() {
ClustersProperties.SerdeConfig serdeConfig = new ClustersProperties.SerdeConfig();
serdeConfig.setName("SomeSerde");
serdeConfig.setClassName(BuiltInSerdeWithAutoconfigure.class.getName());
serdeConfig.setTopicKeysPattern("keys");
serdeConfig.setTopicValuesPattern("vals");
var serdes = init(serdeConfig);
SerdeInstance explicitlyConfiguredSerde = serdes.serdes.get("SomeSerde");
verifyExplicitlyConfigured(explicitlyConfiguredSerde);
verifyPatternsMatch(serdeConfig, explicitlyConfiguredSerde);
} |
@SuppressWarnings("MethodLength")
public void onFragment(final DirectBuffer buffer, final int offset, final int length, final Header header)
{
final MessageHeaderDecoder headerDecoder = decoders.header;
headerDecoder.wrap(buffer, offset);
final int schemaId = headerDecoder.schemaId();
if (schemaId != MessageHeaderDecoder.SCHEMA_ID)
{
throw new ArchiveException("expected schemaId=" + MessageHeaderDecoder.SCHEMA_ID + ", actual=" + schemaId);
}
final int templateId = headerDecoder.templateId();
switch (templateId)
{
case ConnectRequestDecoder.TEMPLATE_ID:
{
final ConnectRequestDecoder decoder = decoders.connectRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final ControlSession session = conductor.newControlSession(
image.correlationId(),
decoder.correlationId(),
decoder.responseStreamId(),
decoder.version(),
decoder.responseChannel(),
ArrayUtil.EMPTY_BYTE_ARRAY,
this);
controlSessionByIdMap.put(session.sessionId(), session);
break;
}
case CloseSessionRequestDecoder.TEMPLATE_ID:
{
final CloseSessionRequestDecoder decoder = decoders.closeSessionRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final ControlSession session = controlSessionByIdMap.get(controlSessionId);
if (null != session)
{
session.abort();
}
break;
}
case StartRecordingRequestDecoder.TEMPLATE_ID:
{
final StartRecordingRequestDecoder decoder = decoders.startRecordingRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onStartRecording(
correlationId,
decoder.streamId(),
decoder.sourceLocation(),
false,
decoder.channel());
}
break;
}
case StopRecordingRequestDecoder.TEMPLATE_ID:
{
final StopRecordingRequestDecoder decoder = decoders.stopRecordingRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onStopRecording(correlationId, decoder.streamId(), decoder.channel());
}
break;
}
case ReplayRequestDecoder.TEMPLATE_ID:
{
final ReplayRequestDecoder decoder = decoders.replayRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final int fileIoMaxLength = FILE_IO_MAX_LENGTH_VERSION <= headerDecoder.version() ?
decoder.fileIoMaxLength() : Aeron.NULL_VALUE;
final long recordingId = decoder.recordingId();
final long position = decoder.position();
final long replayLength = decoder.length();
final int replayStreamId = decoder.replayStreamId();
final long replayToken = REPLAY_TOKEN_VERSION <= headerDecoder.version() ?
decoder.replayToken() : Aeron.NULL_VALUE;
final String replayChannel = decoder.replayChannel();
final ChannelUri channelUri = ChannelUri.parse(replayChannel);
final ControlSession controlSession = setupSessionAndChannelForReplay(
channelUri, replayToken, recordingId, correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onStartReplay(
correlationId,
recordingId,
position,
replayLength,
fileIoMaxLength,
replayStreamId,
channelUri.toString());
}
break;
}
case StopReplayRequestDecoder.TEMPLATE_ID:
{
final StopReplayRequestDecoder decoder = decoders.stopReplayRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onStopReplay(correlationId, decoder.replaySessionId());
}
break;
}
case ListRecordingsRequestDecoder.TEMPLATE_ID:
{
final ListRecordingsRequestDecoder decoder = decoders.listRecordingsRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onListRecordings(correlationId, decoder.fromRecordingId(), decoder.recordCount());
}
break;
}
case ListRecordingsForUriRequestDecoder.TEMPLATE_ID:
{
final ListRecordingsForUriRequestDecoder decoder = decoders.listRecordingsForUriRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
final int channelLength = decoder.channelLength();
final byte[] bytes = 0 == channelLength ? ArrayUtil.EMPTY_BYTE_ARRAY : new byte[channelLength];
decoder.getChannel(bytes, 0, channelLength);
controlSession.onListRecordingsForUri(
correlationId,
decoder.fromRecordingId(),
decoder.recordCount(),
decoder.streamId(),
bytes);
}
break;
}
case ListRecordingRequestDecoder.TEMPLATE_ID:
{
final ListRecordingRequestDecoder decoder = decoders.listRecordingRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onListRecording(correlationId, decoder.recordingId());
}
break;
}
case ExtendRecordingRequestDecoder.TEMPLATE_ID:
{
final ExtendRecordingRequestDecoder decoder = decoders.extendRecordingRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onExtendRecording(
correlationId,
decoder.recordingId(),
decoder.streamId(),
decoder.sourceLocation(),
false,
decoder.channel());
}
break;
}
case RecordingPositionRequestDecoder.TEMPLATE_ID:
{
final RecordingPositionRequestDecoder decoder = decoders.recordingPositionRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onGetRecordingPosition(correlationId, decoder.recordingId());
}
break;
}
case TruncateRecordingRequestDecoder.TEMPLATE_ID:
{
final TruncateRecordingRequestDecoder decoder = decoders.truncateRecordingRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onTruncateRecording(correlationId, decoder.recordingId(), decoder.position());
}
break;
}
case StopRecordingSubscriptionRequestDecoder.TEMPLATE_ID:
{
final StopRecordingSubscriptionRequestDecoder decoder = decoders.stopRecordingSubscriptionRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onStopRecordingSubscription(correlationId, decoder.subscriptionId());
}
break;
}
case StopPositionRequestDecoder.TEMPLATE_ID:
{
final StopPositionRequestDecoder decoder = decoders.stopPositionRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onGetStopPosition(correlationId, decoder.recordingId());
}
break;
}
case FindLastMatchingRecordingRequestDecoder.TEMPLATE_ID:
{
final FindLastMatchingRecordingRequestDecoder decoder = decoders.findLastMatchingRecordingRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
final int channelLength = decoder.channelLength();
final byte[] bytes = 0 == channelLength ? ArrayUtil.EMPTY_BYTE_ARRAY : new byte[channelLength];
decoder.getChannel(bytes, 0, channelLength);
controlSession.onFindLastMatchingRecording(
correlationId,
decoder.minRecordingId(),
decoder.sessionId(),
decoder.streamId(),
bytes);
}
break;
}
case ListRecordingSubscriptionsRequestDecoder.TEMPLATE_ID:
{
final ListRecordingSubscriptionsRequestDecoder decoder = decoders.listRecordingSubscriptionsRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onListRecordingSubscriptions(
correlationId,
decoder.pseudoIndex(),
decoder.subscriptionCount(),
decoder.applyStreamId() == BooleanType.TRUE,
decoder.streamId(),
decoder.channel());
}
break;
}
case BoundedReplayRequestDecoder.TEMPLATE_ID:
{
final BoundedReplayRequestDecoder decoder = decoders.boundedReplayRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final long position = decoder.position();
final long replayLength = decoder.length();
final long recordingId = decoder.recordingId();
final int limitCounterId = decoder.limitCounterId();
final int replayStreamId = decoder.replayStreamId();
final int fileIoMaxLength = FILE_IO_MAX_LENGTH_VERSION <= headerDecoder.version() ?
decoder.fileIoMaxLength() : Aeron.NULL_VALUE;
final long replayToken = REPLAY_TOKEN_VERSION <= headerDecoder.version() ?
decoder.replayToken() : Aeron.NULL_VALUE;
final String replayChannel = decoder.replayChannel();
final ChannelUri channelUri = ChannelUri.parse(replayChannel);
final ControlSession controlSession = setupSessionAndChannelForReplay(
channelUri, replayToken, recordingId, correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onStartBoundedReplay(
correlationId,
recordingId,
position,
replayLength,
limitCounterId,
fileIoMaxLength,
replayStreamId,
channelUri.toString());
}
break;
}
case StopAllReplaysRequestDecoder.TEMPLATE_ID:
{
final StopAllReplaysRequestDecoder decoder = decoders.stopAllReplaysRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onStopAllReplays(correlationId, decoder.recordingId());
}
break;
}
case ReplicateRequestDecoder.TEMPLATE_ID:
{
final ReplicateRequestDecoder decoder = decoders.replicateRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onReplicate(
correlationId,
decoder.srcRecordingId(),
decoder.dstRecordingId(),
AeronArchive.NULL_POSITION,
Aeron.NULL_VALUE,
Aeron.NULL_VALUE,
decoder.srcControlStreamId(),
Aeron.NULL_VALUE,
Aeron.NULL_VALUE,
decoder.srcControlChannel(),
decoder.liveDestination(),
"",
NullCredentialsSupplier.NULL_CREDENTIAL,
"");
}
break;
}
case StopReplicationRequestDecoder.TEMPLATE_ID:
{
final StopReplicationRequestDecoder decoder = decoders.stopReplicationRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onStopReplication(correlationId, decoder.replicationId());
}
break;
}
case StartPositionRequestDecoder.TEMPLATE_ID:
{
final StartPositionRequestDecoder decoder = decoders.startPositionRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onGetStartPosition(correlationId, decoder.recordingId());
}
break;
}
case DetachSegmentsRequestDecoder.TEMPLATE_ID:
{
final DetachSegmentsRequestDecoder decoder = decoders.detachSegmentsRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onDetachSegments(correlationId, decoder.recordingId(), decoder.newStartPosition());
}
break;
}
case DeleteDetachedSegmentsRequestDecoder.TEMPLATE_ID:
{
final DeleteDetachedSegmentsRequestDecoder decoder = decoders.deleteDetachedSegmentsRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onDeleteDetachedSegments(correlationId, decoder.recordingId());
}
break;
}
case PurgeSegmentsRequestDecoder.TEMPLATE_ID:
{
final PurgeSegmentsRequestDecoder decoder = decoders.purgeSegmentsRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onPurgeSegments(correlationId, decoder.recordingId(), decoder.newStartPosition());
}
break;
}
case AttachSegmentsRequestDecoder.TEMPLATE_ID:
{
final AttachSegmentsRequestDecoder decoder = decoders.attachSegmentsRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onAttachSegments(correlationId, decoder.recordingId());
}
break;
}
case MigrateSegmentsRequestDecoder.TEMPLATE_ID:
{
final MigrateSegmentsRequestDecoder decoder = decoders.migrateSegmentsRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onMigrateSegments(correlationId, decoder.srcRecordingId(), decoder.dstRecordingId());
}
break;
}
case AuthConnectRequestDecoder.TEMPLATE_ID:
{
final AuthConnectRequestDecoder decoder = decoders.authConnectRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final String responseChannel = decoder.responseChannel();
final int credentialsLength = decoder.encodedCredentialsLength();
final byte[] credentials;
if (credentialsLength > 0)
{
credentials = new byte[credentialsLength];
decoder.getEncodedCredentials(credentials, 0, credentialsLength);
}
else
{
credentials = ArrayUtil.EMPTY_BYTE_ARRAY;
}
final ControlSession session = conductor.newControlSession(
image.correlationId(),
decoder.correlationId(),
decoder.responseStreamId(),
decoder.version(),
responseChannel,
credentials,
this);
controlSessionByIdMap.put(session.sessionId(), session);
break;
}
case ChallengeResponseDecoder.TEMPLATE_ID:
{
final ChallengeResponseDecoder decoder = decoders.challengeResponse;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final ControlSession session = controlSessionByIdMap.get(controlSessionId);
if (null != session)
{
final int credentialsLength = decoder.encodedCredentialsLength();
final byte[] credentials;
if (credentialsLength > 0)
{
credentials = new byte[credentialsLength];
decoder.getEncodedCredentials(credentials, 0, credentialsLength);
}
else
{
credentials = ArrayUtil.EMPTY_BYTE_ARRAY;
}
session.onChallengeResponse(decoder.correlationId(), credentials);
}
break;
}
case KeepAliveRequestDecoder.TEMPLATE_ID:
{
final KeepAliveRequestDecoder decoder = decoders.keepAliveRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onKeepAlive(correlationId);
}
break;
}
case TaggedReplicateRequestDecoder.TEMPLATE_ID:
{
final TaggedReplicateRequestDecoder decoder = decoders.taggedReplicateRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onReplicate(
correlationId,
decoder.srcRecordingId(),
decoder.dstRecordingId(),
AeronArchive.NULL_POSITION,
decoder.channelTagId(),
decoder.subscriptionTagId(),
decoder.srcControlStreamId(),
Aeron.NULL_VALUE,
Aeron.NULL_VALUE,
decoder.srcControlChannel(),
decoder.liveDestination(),
"",
NullCredentialsSupplier.NULL_CREDENTIAL,
"");
}
break;
}
case StartRecordingRequest2Decoder.TEMPLATE_ID:
{
final StartRecordingRequest2Decoder decoder = decoders.startRecordingRequest2;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onStartRecording(
correlationId,
decoder.streamId(),
decoder.sourceLocation(),
decoder.autoStop() == BooleanType.TRUE,
decoder.channel());
}
break;
}
case ExtendRecordingRequest2Decoder.TEMPLATE_ID:
{
final ExtendRecordingRequest2Decoder decoder = decoders.extendRecordingRequest2;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onExtendRecording(
correlationId,
decoder.recordingId(),
decoder.streamId(),
decoder.sourceLocation(),
decoder.autoStop() == BooleanType.TRUE,
decoder.channel());
}
break;
}
case StopRecordingByIdentityRequestDecoder.TEMPLATE_ID:
{
final StopRecordingByIdentityRequestDecoder decoder = decoders.stopRecordingByIdentityRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onStopRecordingByIdentity(correlationId, decoder.recordingId());
}
break;
}
case ReplicateRequest2Decoder.TEMPLATE_ID:
{
final ReplicateRequest2Decoder decoder = decoders.replicateRequest2;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
final int fileIoMaxLength = FILE_IO_MAX_LENGTH_VERSION <= headerDecoder.version() ?
decoder.fileIoMaxLength() : Aeron.NULL_VALUE;
final int sessionId = SESSION_ID_VERSION <= headerDecoder.version() ?
decoder.replicationSessionId() : Aeron.NULL_VALUE;
final String srcControlChannel = decoder.srcControlChannel();
final String liveDestination = decoder.liveDestination();
final String replicationChannel = decoder.replicationChannel();
final byte[] encodedCredentials;
if (ENCODED_CREDENTIALS_VERSION <= headerDecoder.version())
{
encodedCredentials = new byte[decoder.encodedCredentialsLength()];
decoder.getEncodedCredentials(encodedCredentials, 0, decoder.encodedCredentialsLength());
}
else
{
encodedCredentials = NullCredentialsSupplier.NULL_CREDENTIAL;
}
final String srcResponseChannel = decoder.srcResponseChannel();
if (null != controlSession)
{
controlSession.onReplicate(
correlationId,
decoder.srcRecordingId(),
decoder.dstRecordingId(),
decoder.stopPosition(),
decoder.channelTagId(),
decoder.subscriptionTagId(),
decoder.srcControlStreamId(),
fileIoMaxLength,
sessionId,
srcControlChannel,
liveDestination,
replicationChannel,
encodedCredentials,
srcResponseChannel
);
}
break;
}
case PurgeRecordingRequestDecoder.TEMPLATE_ID:
{
final PurgeRecordingRequestDecoder decoder = decoders.purgeRecordingRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onPurgeRecording(correlationId, decoder.recordingId());
}
break;
}
case MaxRecordedPositionRequestDecoder.TEMPLATE_ID:
{
final MaxRecordedPositionRequestDecoder decoder = decoders.maxRecordedPositionRequest;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onGetMaxRecordedPosition(correlationId, decoder.recordingId());
}
break;
}
case ArchiveIdRequestDecoder.TEMPLATE_ID:
{
final ArchiveIdRequestDecoder decoder = decoders.archiveIdRequestDecoder;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
controlSession.onArchiveId(correlationId);
}
break;
}
case ReplayTokenRequestDecoder.TEMPLATE_ID:
{
final ReplayTokenRequestDecoder decoder = decoders.replayTokenRequestDecoder;
decoder.wrap(
buffer,
offset + MessageHeaderDecoder.ENCODED_LENGTH,
headerDecoder.blockLength(),
headerDecoder.version());
final long controlSessionId = decoder.controlSessionId();
final long correlationId = decoder.correlationId();
final long recordingId = decoder.recordingId();
final ControlSession controlSession = getControlSession(correlationId, controlSessionId, templateId);
if (null != controlSession)
{
final long replayToken = conductor.generateReplayToken(controlSession, recordingId);
controlSession.sendResponse(
correlationId, replayToken, ControlResponseCode.OK, "", conductor.controlResponseProxy());
}
}
}
} | @Test
void shouldHandleReplayRequest()
{
final ControlSessionDemuxer controlSessionDemuxer = new ControlSessionDemuxer(
new ControlRequestDecoders(), mockImage, mockConductor, mockAuthorisationService);
setupControlSession(controlSessionDemuxer, CONTROL_SESSION_ID);
final ExpandableArrayBuffer buffer = new ExpandableArrayBuffer();
final MessageHeaderEncoder headerEncoder = new MessageHeaderEncoder();
final ReplayRequestEncoder replayRequestEncoder = new ReplayRequestEncoder();
replayRequestEncoder.wrapAndApplyHeader(buffer, 0, headerEncoder);
replayRequestEncoder
.controlSessionId(928374L)
.correlationId(9382475L)
.recordingId(9827345897L)
.position(982374L)
.fileIoMaxLength(4096)
.replayStreamId(9832475)
.replayChannel("aeron:ipc");
final int replicateRequestLength = replayRequestEncoder.encodedLength();
controlSessionDemuxer.onFragment(buffer, 0, replicateRequestLength, mockHeader);
final ReplayRequestDecoder expected = new ReplayRequestDecoder()
.wrapAndApplyHeader(buffer, 0, new MessageHeaderDecoder());
verify(mockSession).onStartReplay(
expected.correlationId(),
expected.recordingId(),
expected.position(),
expected.length(),
expected.fileIoMaxLength(),
expected.replayStreamId(),
expected.replayChannel());
} |
public void convertQueueHierarchy(FSQueue queue) {
List<FSQueue> children = queue.getChildQueues();
final String queueName = queue.getName();
emitChildQueues(queueName, children);
emitMaxAMShare(queueName, queue);
emitMaxParallelApps(queueName, queue);
emitMaxAllocations(queueName, queue);
emitPreemptionDisabled(queueName, queue);
emitChildCapacity(queue);
emitMaximumCapacity(queueName, queue);
emitSizeBasedWeight(queueName);
emitOrderingPolicy(queueName, queue);
checkMaxChildCapacitySetting(queue);
emitDefaultUserLimitFactor(queueName, children);
for (FSQueue childQueue : children) {
convertQueueHierarchy(childQueue);
}
} | @Test
public void testQueueSizeBasedWeightDisabled() {
converter = builder.build();
converter.convertQueueHierarchy(rootQueue);
for (String queue : ALL_QUEUES) {
key = PREFIX + queue + ".ordering-policy.fair.enable-size-based-weight";
assertNull("Key " + key + " has different value",
csConfig.get(key));
}
} |
public Data getValueData() {
if (valueData == null && serializationService != null) {
valueData = serializationService.toData(value);
}
return valueData;
} | @Test
public void testGetValueData_withObjectValue() {
assertEquals(toData("value"), objectEvent.getValueData());
} |
@Override
public EventNotificationConfigEntity toContentPackEntity(EntityDescriptorIds entityDescriptorIds) {
return SlackEventNotificationConfigEntity.builder()
.color(ValueReference.of(color()))
.webhookUrl(ValueReference.of(webhookUrl()))
.channel(ValueReference.of(channel()))
.customMessage(ValueReference.of(customMessage()))
.userName(ValueReference.of(userName()))
.notifyChannel(ValueReference.of(notifyChannel()))
.linkNames(ValueReference.of(linkNames()))
.iconUrl(ValueReference.of(iconUrl()))
.iconEmoji(ValueReference.of(iconEmoji()))
.timeZone(ValueReference.of(timeZone().getID()))
.notifyHere(ValueReference.of(notifyHere()))
.build();
} | @Test(expected = NullPointerException.class)
public void toContentPackEntity() {
final SlackEventNotificationConfig slackEventNotificationConfig = SlackEventNotificationConfig.builder().build();
slackEventNotificationConfig.toContentPackEntity(EntityDescriptorIds.empty());
} |
public static void writePositionToBlockBuilder(Block block, int position, BlockBuilder blockBuilder)
{
if (block instanceof DictionaryBlock) {
position = ((DictionaryBlock) block).getId(position);
block = ((DictionaryBlock) block).getDictionary();
}
if (blockBuilder instanceof MapBlockBuilder) {
writePositionToMapBuilder(block, position, (MapBlockBuilder) blockBuilder);
}
else if (blockBuilder instanceof ArrayBlockBuilder) {
writePositionToArrayBuilder(block, position, (ArrayBlockBuilder) blockBuilder);
}
else if (blockBuilder instanceof RowBlockBuilder) {
writePositionToRowBuilder(block, position, (RowBlockBuilder) blockBuilder);
}
else {
block.writePositionTo(position, blockBuilder);
}
} | @Test
public void testArrayBlockBuilder()
{
long[] values = new long[]{1, 2, 3, 4, 5};
ArrayBlockBuilder blockBuilder1 = new ArrayBlockBuilder(BIGINT, null, 1);
BlockBuilder elementBuilder = blockBuilder1.beginBlockEntry();
for (long value : values) {
BIGINT.writeLong(elementBuilder, value);
}
Block expectedBlock = blockBuilder1.closeEntry().build();
// write values to a new block using BlockBuilderUtil
BlockBuilder blockBuilder2 = new ArrayBlockBuilder(BIGINT, null, 1);
writePositionToBlockBuilder(expectedBlock, 0, blockBuilder2);
Block newBlock = blockBuilder2.build();
assertEquals(newBlock, expectedBlock);
} |
public static boolean isNameCoveredByPattern( String name, String pattern )
{
if ( name == null || name.isEmpty() || pattern == null || pattern.isEmpty() )
{
throw new IllegalArgumentException( "Arguments cannot be null or empty." );
}
final String needle = name.toLowerCase();
final String hayStack = pattern.toLowerCase();
if ( needle.equals( hayStack )) {
return true;
}
if ( hayStack.startsWith( "*." ) ) {
return needle.endsWith( hayStack.substring( 2 ) );
}
return false;
} | @Test
public void testNameCoverageSubSubdomainWithWildcard() throws Exception
{
// setup
final String name = "deeper.xmpp.example.org";
final String pattern = "*.example.org";
// do magic
final boolean result = DNSUtil.isNameCoveredByPattern( name, pattern );
// verify
assertTrue( result );
} |
public void applyConfig(ClientBwListDTO configDTO) {
requireNonNull(configDTO, "Client filtering config must not be null");
requireNonNull(configDTO.mode, "Config mode must not be null");
requireNonNull(configDTO.entries, "Config entries must not be null");
ClientSelector selector;
switch (configDTO.mode) {
case DISABLED:
selector = ClientSelectors.any();
break;
case WHITELIST:
selector = createSelector(configDTO.entries);
break;
case BLACKLIST:
selector = ClientSelectors.inverse(createSelector(configDTO.entries));
break;
default:
throw new IllegalArgumentException("Unknown client B/W list mode: " + configDTO.mode);
}
clientEngine.applySelector(selector);
} | @Test
public void testApplyConfig_nullEntryValue_throws() {
ClientBwListDTO config = createConfig(Mode.WHITELIST, new ClientBwListEntryDTO(Type.IP_ADDRESS, null));
assertThrows(NullPointerException.class, () -> handler.applyConfig(config));
} |
@Override
public ByteBuf setByte(int index, int value) {
checkIndex(index);
_setByte(index, value);
return this;
} | @Test
public void testSetByteAfterRelease() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().setByte(0, 1);
}
});
} |
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
return this.list(directory, listener, new HostPreferences(session.getHost()).getInteger("s3.listing.chunksize"));
} | @Test
public void tetsEmptyPlaceholder() throws Exception {
final Path container = new SpectraDirectoryFeature(session, new SpectraWriteFeature(session)).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
try {
new SpectraObjectListService(session).list(new Path(container, "empty", EnumSet.of(Path.Type.directory, Path.Type.placeholder)),
new DisabledListProgressListener());
fail();
}
catch(NotfoundException e) {
// Expected
}
new SpectraDeleteFeature(session).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
protected String copy(final Path source, final S3Object destination, final TransferStatus status, final StreamListener listener) throws BackgroundException {
try {
final List<MultipartPart> completed = new ArrayList<>();
// ID for the initiated multipart upload.
final MultipartUpload multipart = session.getClient().multipartStartUpload(
destination.getBucketName(), destination);
if(log.isDebugEnabled()) {
log.debug(String.format("Multipart upload started for %s with ID %s",
multipart.getObjectKey(), multipart.getUploadId()));
}
final long size = status.getLength();
long remaining = size;
long offset = 0;
final List<Future<MultipartPart>> parts = new ArrayList<>();
for(int partNumber = 1; remaining > 0; partNumber++) {
// Last part can be less than 5 MB. Adjust part size.
final long length = Math.min(Math.max((size / S3DefaultMultipartService.MAXIMUM_UPLOAD_PARTS), partsize), remaining);
// Submit to queue
parts.add(this.submit(source, multipart, partNumber, offset, length));
remaining -= length;
offset += length;
}
for(Future<MultipartPart> f : parts) {
final MultipartPart part = Interruptibles.await(f);
completed.add(part);
listener.sent(part.getSize());
}
// Combining all the given parts into the final object. Processing of a Complete Multipart Upload request
// could take several minutes to complete. Because a request could fail after the initial 200 OK response
// has been sent, it is important that you check the response body to determine whether the request succeeded.
final MultipartCompleted complete = session.getClient().multipartCompleteUpload(multipart, completed);
if(log.isDebugEnabled()) {
log.debug(String.format("Completed multipart upload for %s with checksum %s",
complete.getObjectKey(), complete.getEtag()));
}
return complete.getVersionId();
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Cannot copy {0}", e, source);
}
finally {
pool.shutdown(false);
}
} | @Test
public void testCopyBucketNameInHostname() throws Exception {
final Path test = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final byte[] content = RandomUtils.nextBytes(1023);
final TransferStatus status = new TransferStatus().withLength(content.length);
status.setChecksum(new SHA256ChecksumCompute().compute(new ByteArrayInputStream(content), status));
final OutputStream out = new S3WriteFeature(virtualhost, new S3AccessControlListFeature(virtualhost)).write(test, status, new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(new TransferStatus(), new TransferStatus()).transfer(new ByteArrayInputStream(content), out);
out.close();
test.attributes().setSize(content.length);
final Path copy = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final S3MultipartCopyFeature feature = new S3MultipartCopyFeature(virtualhost, new S3AccessControlListFeature(virtualhost));
feature.copy(test, copy, status, new DisabledConnectionCallback(), new DisabledStreamListener());
assertTrue(new S3FindFeature(virtualhost, new S3AccessControlListFeature(virtualhost)).find(test));
assertEquals(content.length, new S3AttributesFinderFeature(virtualhost, new S3AccessControlListFeature(virtualhost)).find(test).getSize());
new S3DefaultDeleteFeature(virtualhost).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertTrue(new S3FindFeature(virtualhost, new S3AccessControlListFeature(virtualhost)).find(copy));
assertEquals(content.length, new S3AttributesFinderFeature(virtualhost, new S3AccessControlListFeature(virtualhost)).find(copy).getSize());
new S3DefaultDeleteFeature(virtualhost).delete(Collections.singletonList(copy), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public List<AppAuthData> convert(final String json) {
return GsonUtils.getInstance().fromList(json, AppAuthData.class);
} | @Test
public void testConvert() {
AppAuthData appAuthData = createFakerAppAuthDataObjects(1).get(0);
setAppAuthDataProperties(appAuthData);
List<AppAuthData> sources = Collections.singletonList(appAuthData);
Gson gson = new Gson();
String json = gson.toJson(sources);
List<AppAuthData> appAuthDataResults = authDataHandler.convert(json);
assertThat(appAuthDataResults.get(0), is(appAuthData));
} |
public Account updatePniKeys(final Account account,
final IdentityKey pniIdentityKey,
final Map<Byte, ECSignedPreKey> deviceSignedPreKeys,
@Nullable final Map<Byte, KEMSignedPreKey> devicePqLastResortPreKeys,
final List<IncomingMessage> deviceMessages,
final Map<Byte, Integer> pniRegistrationIds) throws MismatchedDevicesException, StaleDevicesException {
validateDeviceMessages(account, deviceMessages);
// Don't try to be smart about ignoring unnecessary retries. If we make literally no change we will skip the ddb
// write anyway. Linked devices can handle some wasted extra key rotations.
final Account updatedAccount = accountsManager.updatePniKeys(
account, pniIdentityKey, deviceSignedPreKeys, devicePqLastResortPreKeys, pniRegistrationIds);
sendDeviceMessages(updatedAccount, deviceMessages);
return updatedAccount;
} | @Test
void updatePniKeysMismatchedRegistrationId() {
final Account account = mock(Account.class);
when(account.getNumber()).thenReturn("+18005551234");
final List<Device> devices = new ArrayList<>();
for (byte i = 1; i <= 3; i++) {
final Device device = mock(Device.class);
when(device.getId()).thenReturn(i);
when(device.getRegistrationId()).thenReturn((int) i);
devices.add(device);
when(account.getDevice(i)).thenReturn(Optional.of(device));
}
when(account.getDevices()).thenReturn(devices);
final byte destinationDeviceId2 = 2;
final byte destinationDeviceId3 = 3;
final List<IncomingMessage> messages = List.of(
new IncomingMessage(1, destinationDeviceId2, 1, "foo"),
new IncomingMessage(1, destinationDeviceId3, 1, "foo"));
final ECKeyPair pniIdentityKeyPair = Curve.generateKeyPair();
final ECPublicKey pniIdentityKey = pniIdentityKeyPair.getPublicKey();
final Map<Byte, ECSignedPreKey> preKeys = Map.of(Device.PRIMARY_ID,
KeysHelper.signedECPreKey(1, pniIdentityKeyPair),
destinationDeviceId2, KeysHelper.signedECPreKey(2, pniIdentityKeyPair),
destinationDeviceId3, KeysHelper.signedECPreKey(3, pniIdentityKeyPair));
final Map<Byte, Integer> registrationIds = Map.of(Device.PRIMARY_ID, 17, destinationDeviceId2, 47,
destinationDeviceId3, 89);
assertThrows(StaleDevicesException.class,
() -> changeNumberManager.updatePniKeys(account, new IdentityKey(Curve.generateKeyPair().getPublicKey()), preKeys, null, messages, registrationIds));
} |
@PublicAPI(usage = ACCESS)
public static PlantUmlArchCondition adhereToPlantUmlDiagram(URL url, Configuration configuration) {
return create(url, configuration);
} | @Test
public void diagram_with_multiple_dependencies_that_considers_only_certain_packages() {
File file = TestDiagram.in(temporaryFolder)
.component("SomeOrigin").withStereoTypes("..origin")
.component("SomeIntermediary").withStereoTypes("..intermediary")
.component("SomeTarget").withStereoTypes("..target")
.dependencyFrom("SomeOrigin").to("SomeIntermediary")
.dependencyFrom("SomeIntermediary").to("SomeTarget")
.write();
JavaClasses classes = getClassesFrom("multipledependencies");
PlantUmlArchCondition condition = adhereToPlantUmlDiagram(file,
consideringOnlyDependenciesInAnyPackage("..origin", "..intermediary", "..target"));
assertConditionHasNumberOfFailures(classes, condition, 3);
condition = adhereToPlantUmlDiagram(file,
consideringOnlyDependenciesInAnyPackage("..origin", "..intermediary"));
assertConditionHasNumberOfFailures(classes, condition, 2);
condition = adhereToPlantUmlDiagram(file,
consideringOnlyDependenciesInAnyPackage("..origin"));
assertConditionHasNumberOfFailures(classes, condition, 1);
} |
public static <V> SetOnceReference<V> ofNullable(final V value) {
return new SetOnceReference<>(value);
} | @Test
public void testFromOfNullableWithValue() {
final Sentinel sentinel = new Sentinel();
checkSetReferenceIsImmutable(SetOnceReference.ofNullable(sentinel), sentinel);
} |
@Override
public List<ProviderGroup> subscribe(ConsumerConfig config) {
String appName = config.getAppName();
if (!registryConfig.isSubscribe()) {
// 注册中心不订阅
if (LOGGER.isInfoEnabled(appName)) {
LOGGER.infoWithApp(appName, LogCodes.getLog(LogCodes.INFO_REGISTRY_IGNORE));
}
return null;
}
if (!config.isSubscribe()) {
return null;
}
try {
List<ProviderInfo> providers = lookupHealthService(config);
if (EventBus.isEnable(ConsumerSubEvent.class)) {
ConsumerSubEvent event = new ConsumerSubEvent(config);
EventBus.post(event);
}
return Collections.singletonList(new ProviderGroup().addAll(providers));
} catch (SofaRpcRuntimeException e){
throw e;
}catch (Exception e) {
throw new SofaRpcRuntimeException(LogCodes.getLog(LogCodes.ERROR_SUB_PROVIDER ,EXT_NAME), e);
}
} | @Test
public void testSubscribe() {
ProviderConfig<?> providerConfig = providerConfig("consul-test-1", 12200, 12201, 12202);
registry.register(providerConfig);
ConsumerConfig<?> consumerConfig = consumerConfig("consul-test-1");
assertUntil(() -> {
List<ProviderGroup> providerGroups = registry.subscribe(consumerConfig);
Assert.assertEquals(1, providerGroups.size());
Assert.assertEquals(3, providerGroups.get(0).size());
}, 10, TimeUnit.SECONDS);
ConsumerConfig<?> consumerConfigWithAnotherUniqueId = consumerConfig("consul-test-2");
assertUntil(() -> {
List<ProviderGroup> providerGroups = registry.subscribe(consumerConfigWithAnotherUniqueId);
Assert.assertEquals(1, providerGroups.size());
Assert.assertEquals(0, providerGroups.get(0).size());
}, 10, TimeUnit.SECONDS);
registry.unSubscribe(consumerConfig);
registry.unSubscribe(consumerConfigWithAnotherUniqueId);
} |
@Override
public void publish(ScannerReportWriter writer) {
AbstractProjectOrModule rootProject = moduleHierarchy.root();
ScannerReport.Metadata.Builder builder = ScannerReport.Metadata.newBuilder()
.setAnalysisDate(projectInfo.getAnalysisDate().getTime())
// Here we want key without branch
.setProjectKey(rootProject.key())
.setCrossProjectDuplicationActivated(cpdSettings.isCrossProjectDuplicationEnabled())
.setRootComponentRef(rootProject.scannerId());
projectInfo.getProjectVersion().ifPresent(builder::setProjectVersion);
projectInfo.getBuildString().ifPresent(builder::setBuildString);
if (branchConfiguration.branchName() != null) {
addBranchInformation(builder);
}
String newCodeReferenceBranch = referenceBranchSupplier.getFromProperties();
if (newCodeReferenceBranch != null) {
builder.setNewCodeReferenceBranch(newCodeReferenceBranch);
}
addScmInformation(builder);
addNotAnalyzedFileCountsByLanguage(builder);
for (QProfile qp : qProfiles.findAll()) {
builder.putQprofilesPerLanguage(qp.getLanguage(), ScannerReport.Metadata.QProfile.newBuilder()
.setKey(qp.getKey())
.setLanguage(qp.getLanguage())
.setName(qp.getName())
.setRulesUpdatedAt(qp.getRulesUpdatedAt().getTime()).build());
}
for (Entry<String, ScannerPlugin> pluginEntry : pluginRepository.getPluginsByKey().entrySet()) {
builder.putPluginsByKey(pluginEntry.getKey(), ScannerReport.Metadata.Plugin.newBuilder()
.setKey(pluginEntry.getKey())
.setUpdatedAt(pluginEntry.getValue().getUpdatedAt()).build());
}
addRelativePathFromScmRoot(builder);
writer.writeMetadata(builder.build());
} | @Test
public void dont_write_new_code_reference_if_not_specified_in_properties() {
when(referenceBranchSupplier.get()).thenReturn("ref");
when(referenceBranchSupplier.getFromProperties()).thenReturn(null);
underTest.publish(writer);
ScannerReport.Metadata metadata = reader.readMetadata();
assertThat(metadata.getNewCodeReferenceBranch()).isEmpty();
} |
private PlantUmlDiagram createDiagram(List<String> rawDiagramLines) {
List<String> diagramLines = filterOutComments(rawDiagramLines);
Set<PlantUmlComponent> components = parseComponents(diagramLines);
PlantUmlComponents plantUmlComponents = new PlantUmlComponents(components);
List<ParsedDependency> dependencies = parseDependencies(plantUmlComponents, diagramLines);
return new PlantUmlDiagram.Builder(plantUmlComponents)
.withDependencies(dependencies)
.build();
} | @Test
public void parses_multiple_components_and_dependencies() {
File file = TestDiagram.in(temporaryFolder)
.component("Component1").withStereoTypes("..origin1..")
.component("Component2").withStereoTypes("..target1..")
.component("Component3").withStereoTypes("..origin2..")
.component("Component4").withStereoTypes("..target2..")
.dependencyFrom("Component1").to("Component2")
.dependencyFrom("Component3").to("Component4")
.write();
PlantUmlDiagram diagram = createDiagram(file);
PlantUmlComponent component1 = getComponentWithName("Component1", diagram);
PlantUmlComponent component2 = getComponentWithName("Component2", diagram);
PlantUmlComponent component3 = getComponentWithName("Component3", diagram);
PlantUmlComponent component4 = getComponentWithName("Component4", diagram);
assertThat(diagram.getAllComponents()).containsOnly(component1, component2, component3, component4);
assertThat(component1.getDependencies()).containsOnly(component2);
assertThat(component2.getDependencies().isEmpty()).isTrue();
assertThat(component3.getDependencies()).containsOnly(component4);
assertThat(component4.getDependencies().isEmpty()).isTrue();
} |
public synchronized String get() {
ConfidentialStore cs = ConfidentialStore.get();
if (secret == null || cs != lastCS) {
lastCS = cs;
try {
byte[] payload = load();
if (payload == null) {
payload = cs.randomBytes(length / 2);
store(payload);
}
secret = Util.toHexString(payload).substring(0, length);
} catch (IOException e) {
throw new Error("Failed to load the key: " + getId(), e);
}
}
return secret;
} | @Test
public void multipleGetsAreIdempotent() {
HexStringConfidentialKey key = new HexStringConfidentialKey("test", 8);
assertEquals(key.get(), key.get());
} |
public static void extractFilesUsingFilter(File archive, File destination, FilenameFilter filter) throws ExtractionException {
if (archive == null || destination == null) {
return;
}
try (FileInputStream fis = new FileInputStream(archive)) {
extractArchive(new ZipArchiveInputStream(new BufferedInputStream(fis)), destination, filter);
} catch (FileNotFoundException ex) {
final String msg = String.format("Error extracting file `%s` with filter: %s", archive.getAbsolutePath(), ex.getMessage());
LOGGER.debug(msg, ex);
throw new ExtractionException(msg);
} catch (IOException | ArchiveExtractionException ex) {
LOGGER.warn("Exception extracting archive '{}'.", archive.getAbsolutePath());
LOGGER.debug("", ex);
throw new ExtractionException("Unable to extract from archive", ex);
}
} | @Test(expected = org.owasp.dependencycheck.utils.ExtractionException.class)
public void testExtractFilesUsingFilter() throws Exception {
File destination = getSettings().getTempDirectory();
File archive = BaseTest.getResourceAsFile(this, "evil.zip");
ExtractionUtil.extractFiles(archive, destination);
FilenameFilter filter = new NameFileFilter("evil.txt");
ExtractionUtil.extractFilesUsingFilter(archive, destination, filter);
} |
@Override
public Long time(RedisClusterNode node) {
RedisClient entry = getEntry(node);
RFuture<Long> f = executorService.readAsync(entry, LongCodec.INSTANCE, RedisCommands.TIME_LONG);
return syncFuture(f);
} | @Test
public void testTime() {
RedisClusterNode master = getFirstMaster();
Long time = connection.time(master);
assertThat(time).isGreaterThan(1000);
} |
static long calculateRocksDBMutableLimit(long bufferSize) {
return bufferSize * 7 / 8;
} | @Test
public void testCalculateRocksDBMutableLimit() {
long bufferSize = 64 * 1024 * 1024;
long limit = bufferSize * 7 / 8;
assertThat(
RocksDBMemoryControllerUtils.calculateRocksDBMutableLimit(bufferSize), is(limit));
} |
@SuppressWarnings({
"nullness" // TODO(https://github.com/apache/beam/issues/20497)
})
@Override
protected SchemaTransform from(KafkaReadSchemaTransformConfiguration configuration) {
return new KafkaReadSchemaTransform(configuration);
} | @Test
public void testBuildTransformWithProtoFormatWrongMessageName() {
ServiceLoader<SchemaTransformProvider> serviceLoader =
ServiceLoader.load(SchemaTransformProvider.class);
List<SchemaTransformProvider> providers =
StreamSupport.stream(serviceLoader.spliterator(), false)
.filter(provider -> provider.getClass() == KafkaReadSchemaTransformProvider.class)
.collect(Collectors.toList());
KafkaReadSchemaTransformProvider kafkaProvider =
(KafkaReadSchemaTransformProvider) providers.get(0);
SchemaTransform transform =
kafkaProvider.from(
KafkaReadSchemaTransformConfiguration.builder()
.setTopic("anytopic")
.setBootstrapServers("anybootstrap")
.setFormat("PROTO")
.setMessageName("MyOtherMessage")
.setFileDescriptorPath(
Objects.requireNonNull(
getClass()
.getResource("/proto_byte/file_descriptor/proto_byte_utils.pb"))
.getPath())
.build());
assertThrows(
NullPointerException.class,
() -> transform.expand(PCollectionRowTuple.empty(Pipeline.create())));
} |
public static Schema schemaFromPojoClass(
TypeDescriptor<?> typeDescriptor, FieldValueTypeSupplier fieldValueTypeSupplier) {
return StaticSchemaInference.schemaFromClass(typeDescriptor, fieldValueTypeSupplier);
} | @Test
public void testNestedMap() {
Schema schema =
POJOUtils.schemaFromPojoClass(
new TypeDescriptor<NestedMapPOJO>() {}, JavaFieldTypeSupplier.INSTANCE);
SchemaTestUtils.assertSchemaEquivalent(NESTED_MAP_POJO_SCHEMA, schema);
} |
public Visibility get(DbSession dbSession) {
PropertyDto defaultProjectVisibility = Optional
.ofNullable(dbClient.propertiesDao().selectGlobalProperty(dbSession, PROJECTS_DEFAULT_VISIBILITY_PROPERTY_NAME))
.orElseThrow(() -> new IllegalStateException("Could not find default project visibility setting"));
return Visibility.parseVisibility(defaultProjectVisibility.getValue());
} | @Test
public void fail_if_project_visibility_property_not_exist() {
DbSession dbSession = db.getSession();
assertThatThrownBy(() -> underTest.get(dbSession))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Could not find default project visibility setting");
} |
@GetMapping(value = "/{appId}/{clusterName}/{namespace:.+}")
public ApolloConfig queryConfig(@PathVariable String appId, @PathVariable String clusterName,
@PathVariable String namespace,
@RequestParam(value = "dataCenter", required = false) String dataCenter,
@RequestParam(value = "releaseKey", defaultValue = "-1") String clientSideReleaseKey,
@RequestParam(value = "ip", required = false) String clientIp,
@RequestParam(value = "label", required = false) String clientLabel,
@RequestParam(value = "messages", required = false) String messagesAsString,
HttpServletRequest request, HttpServletResponse response) throws IOException {
String originalNamespace = namespace;
//strip out .properties suffix
namespace = namespaceUtil.filterNamespaceName(namespace);
//fix the character case issue, such as FX.apollo <-> fx.apollo
namespace = namespaceUtil.normalizeNamespace(appId, namespace);
if (Strings.isNullOrEmpty(clientIp)) {
clientIp = WebUtils.tryToGetClientIp(request);
}
ApolloNotificationMessages clientMessages = transformMessages(messagesAsString);
List<Release> releases = Lists.newLinkedList();
String appClusterNameLoaded = clusterName;
if (!ConfigConsts.NO_APPID_PLACEHOLDER.equalsIgnoreCase(appId)) {
Release currentAppRelease = configService.loadConfig(appId, clientIp, clientLabel, appId, clusterName, namespace,
dataCenter, clientMessages);
if (currentAppRelease != null) {
releases.add(currentAppRelease);
//we have cluster search process, so the cluster name might be overridden
appClusterNameLoaded = currentAppRelease.getClusterName();
}
}
//if namespace does not belong to this appId, should check if there is a public configuration
if (!namespaceBelongsToAppId(appId, namespace)) {
Release publicRelease = this.findPublicConfig(appId, clientIp, clientLabel, clusterName, namespace,
dataCenter, clientMessages);
if (Objects.nonNull(publicRelease)) {
releases.add(publicRelease);
}
}
if (releases.isEmpty()) {
response.sendError(HttpServletResponse.SC_NOT_FOUND,
String.format(
"Could not load configurations with appId: %s, clusterName: %s, namespace: %s",
appId, clusterName, originalNamespace));
Tracer.logEvent("Apollo.Config.NotFound",
assembleKey(appId, clusterName, originalNamespace, dataCenter));
return null;
}
auditReleases(appId, clusterName, dataCenter, clientIp, releases);
String mergedReleaseKey = releases.stream().map(Release::getReleaseKey)
.collect(Collectors.joining(ConfigConsts.CLUSTER_NAMESPACE_SEPARATOR));
if (mergedReleaseKey.equals(clientSideReleaseKey)) {
// Client side configuration is the same with server side, return 304
response.setStatus(HttpServletResponse.SC_NOT_MODIFIED);
Tracer.logEvent("Apollo.Config.NotModified",
assembleKey(appId, appClusterNameLoaded, originalNamespace, dataCenter));
return null;
}
ApolloConfig apolloConfig = new ApolloConfig(appId, appClusterNameLoaded, originalNamespace,
mergedReleaseKey);
apolloConfig.setConfigurations(mergeReleaseConfigurations(releases));
Tracer.logEvent("Apollo.Config.Found", assembleKey(appId, appClusterNameLoaded,
originalNamespace, dataCenter));
return apolloConfig;
} | @Test
public void testQueryConfigWithPublicNamespaceAndAppOverride() throws Exception {
String someAppSideReleaseKey = "1";
String somePublicAppSideReleaseKey = "2";
HttpServletResponse someResponse = mock(HttpServletResponse.class);
String somePublicAppId = "somePublicAppId";
AppNamespace somePublicAppNamespace =
assemblePublicAppNamespace(somePublicAppId, somePublicNamespaceName);
when(someRelease.getConfigurations()).thenReturn("{\"apollo.public.foo\": \"foo-override\"}");
when(somePublicRelease.getConfigurations())
.thenReturn("{\"apollo.public.foo\": \"foo\", \"apollo.public.bar\": \"bar\"}");
when(configService.loadConfig(someAppId, someClientIp, someClientLabel, someAppId, someClusterName, somePublicNamespaceName,
someDataCenter, someNotificationMessages)).thenReturn(someRelease);
when(someRelease.getReleaseKey()).thenReturn(someAppSideReleaseKey);
when(someRelease.getNamespaceName()).thenReturn(somePublicNamespaceName);
when(appNamespaceService.findPublicNamespaceByName(somePublicNamespaceName))
.thenReturn(somePublicAppNamespace);
when(configService.loadConfig(someAppId, someClientIp, someClientLabel, somePublicAppId, someClusterName, somePublicNamespaceName,
someDataCenter, someNotificationMessages)).thenReturn(somePublicRelease);
when(somePublicRelease.getReleaseKey()).thenReturn(somePublicAppSideReleaseKey);
when(somePublicRelease.getAppId()).thenReturn(somePublicAppId);
when(somePublicRelease.getClusterName()).thenReturn(someDataCenter);
when(somePublicRelease.getNamespaceName()).thenReturn(somePublicNamespaceName);
ApolloConfig result =
configController
.queryConfig(someAppId, someClusterName, somePublicNamespaceName, someDataCenter,
someAppSideReleaseKey, someClientIp, someClientLabel, someMessagesAsString, someRequest, someResponse);
assertEquals(Joiner.on(ConfigConsts.CLUSTER_NAMESPACE_SEPARATOR)
.join(someAppSideReleaseKey, somePublicAppSideReleaseKey),
result.getReleaseKey());
assertEquals(someAppId, result.getAppId());
assertEquals(someClusterName, result.getCluster());
assertEquals(somePublicNamespaceName, result.getNamespaceName());
assertEquals("foo-override", result.getConfigurations().get("apollo.public.foo"));
assertEquals("bar", result.getConfigurations().get("apollo.public.bar"));
verify(instanceConfigAuditUtil, times(1)).audit(someAppId, someClusterName, someDataCenter,
someClientIp, someAppId, someClusterName, somePublicNamespaceName, someAppSideReleaseKey);
verify(instanceConfigAuditUtil, times(1)).audit(someAppId, someClusterName, someDataCenter,
someClientIp, somePublicAppId, someDataCenter, somePublicNamespaceName, somePublicAppSideReleaseKey);
} |
@Override
public ImportResult importItem(
UUID jobId,
IdempotentImportExecutor idempotentExecutor,
TokenSecretAuthData authData,
VideosContainerResource data)
throws Exception {
if (data == null) {
// Nothing to do
return ImportResult.OK;
}
BackblazeDataTransferClient b2Client = b2ClientFactory.getOrCreateB2Client(jobId, authData);
final LongAdder totalImportedFilesSizes = new LongAdder();
if (data.getVideos() != null && data.getVideos().size() > 0) {
for (VideoModel video : data.getVideos()) {
idempotentExecutor.importAndSwallowIOExceptions(
video,
v -> {
ItemImportResult<String> fileImportResult = importSingleVideo(jobId, b2Client, v);
if (fileImportResult.hasBytes()) {
totalImportedFilesSizes.add(fileImportResult.getBytes());
}
return fileImportResult;
});
}
}
return ImportResult.OK.copyWithBytes(totalImportedFilesSizes.longValue());
} | @Test
public void testNullData() throws Exception {
BackblazeVideosImporter sut =
new BackblazeVideosImporter(monitor, dataStore, streamProvider, clientFactory);
ImportResult result = sut.importItem(UUID.randomUUID(), executor, authData, null);
assertEquals(ImportResult.OK, result);
} |
@Override
public boolean processArgument(final ShenyuRequest shenyuRequest, final Annotation annotation, final Object arg) {
RequestTemplate requestTemplate = shenyuRequest.getRequestTemplate();
CookieValue cookie = ANNOTATION.cast(annotation);
String name = cookie.value().trim();
checkState(emptyToNull(name) != null, "Cookie.name() was empty on parameter %s", requestTemplate.getMethod());
Collection<String> cookieExpression = requestTemplate.getHeaders().getOrDefault(HttpHeaders.COOKIE, Lists.newArrayList());
cookieExpression.add(String.format("%s=%s", name, arg));
Map<String, Collection<String>> headers = shenyuRequest.getHeaders();
headers.compute(HttpHeaders.COOKIE, (key, old) -> {
if (CollectionUtils.isEmpty(old)) {
return cookieExpression;
}
CollectionUtils.addAll(old, cookieExpression);
return old;
});
shenyuRequest.setHeaders(headers);
return true;
} | @Test
public void processArgumentTwoTest() {
headers.put(HttpHeaders.COOKIE, Lists.newArrayList("one=one"));
final CookieValue cookie = spy(CookieValue.class);
when(cookie.value()).thenReturn("two");
processor.processArgument(request, cookie, "twoValue");
assertTrue(request.getHeaders().containsKey(HttpHeaders.COOKIE), "cookie value resolve fail");
assertTrue(request.getHeaders().get(HttpHeaders.COOKIE).contains("two=twoValue"), "cookie value resolve error");
} |
@Override
public boolean contains(Object o) {
QueryableEntry entry = (QueryableEntry) o;
if (index != null) {
return checkFromIndex(entry);
} else {
//todo: what is the point of this condition? Is it some kind of optimization?
if (resultSets.size() > 3) {
index = new HashSet<>();
for (Map<Data, QueryableEntry> result : resultSets) {
for (QueryableEntry queryableEntry : result.values()) {
index.add(queryableEntry.getKeyData());
}
}
return checkFromIndex(entry);
} else {
for (Map<Data, QueryableEntry> resultSet : resultSets) {
if (resultSet.containsKey(entry.getKeyData())) {
return true;
}
}
return false;
}
}
} | @Test
public void testContains_notEmpty() {
QueryableEntry entry = entry(data());
addEntry(entry);
assertThat(result.contains(entry)).isTrue();
} |
@VisibleForTesting
void loadUdfFromClass(final Class<?>... udfClasses) {
for (final Class<?> theClass : udfClasses) {
loadUdfFromClass(
theClass, KsqlScalarFunction.INTERNAL_PATH);
}
} | @Test
public void shouldThrowOnMissingSchemaProvider() throws Exception {
// Given:
final MutableFunctionRegistry functionRegistry = new InternalFunctionRegistry();
final Path udfJar = new File("src/test/resources/udf-failing-tests.jar").toPath();
try (final UdfClassLoader udfClassLoader = newClassLoader(udfJar, PARENT_CLASS_LOADER, resourceName -> false)) {
final Class<?> clazz = udfClassLoader.loadClass(
"org.damian.ksql.udf.MissingSchemaProviderUdf");
final UdfLoader udfLoader = new UdfLoader(
functionRegistry,
empty(),
create(EMPTY),
true
);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> udfLoader.loadUdfFromClass(clazz)
);
// Then:
assertThat(e.getMessage(), containsString(
"Cannot find schema provider method with name provideSchema "
+ "and parameter List<SqlType> in class org.damian.ksql.udf."
+ "MissingSchemaProviderUdf."));
}
} |
@Override
public Sensor addLatencyRateTotalSensor(final String scopeName,
final String entityName,
final String operationName,
final Sensor.RecordingLevel recordingLevel,
final String... tags) {
final String threadId = Thread.currentThread().getName();
final String group = groupNameFromScope(scopeName);
final Map<String, String> tagMap = customizedTags(threadId, scopeName, entityName, tags);
final Sensor sensor =
customInvocationRateAndCountSensor(threadId, group, entityName, operationName, tagMap, recordingLevel);
addAvgAndMaxToSensor(
sensor,
group,
tagMap,
operationName + LATENCY_SUFFIX,
AVG_LATENCY_DESCRIPTION + operationName,
MAX_LATENCY_DESCRIPTION + operationName
);
return sensor;
} | @Test
public void testTotalMetricDoesntDecrease() {
final MockTime time = new MockTime(1);
final MetricConfig config = new MetricConfig().timeWindow(1, TimeUnit.MILLISECONDS);
final Metrics metrics = new Metrics(config, time);
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, "", VERSION, time);
final String scope = "scope";
final String entity = "entity";
final String operation = "op";
final Sensor sensor = streamsMetrics.addLatencyRateTotalSensor(
scope,
entity,
operation,
RecordingLevel.INFO
);
final double latency = 100.0;
final MetricName totalMetricName = metrics.metricName(
"op-total",
"stream-scope-metrics",
"",
"thread-id",
Thread.currentThread().getName(),
"scope-id",
"entity"
);
final KafkaMetric totalMetric = metrics.metric(totalMetricName);
for (int i = 0; i < 10; i++) {
assertEquals(i, Math.round(totalMetric.measurable().measure(config, time.milliseconds())));
sensor.record(latency, time.milliseconds());
}
} |
@Override
@Transactional(rollbackFor = Exception.class) // 添加事务,异常则回滚所有导入
public UserImportRespVO importUserList(List<UserImportExcelVO> importUsers, boolean isUpdateSupport) {
// 1.1 参数校验
if (CollUtil.isEmpty(importUsers)) {
throw exception(USER_IMPORT_LIST_IS_EMPTY);
}
// 1.2 初始化密码不能为空
String initPassword = configApi.getConfigValueByKey(USER_INIT_PASSWORD_KEY).getCheckedData();
if (StrUtil.isEmpty(initPassword)) {
throw exception(USER_IMPORT_INIT_PASSWORD);
}
// 2. 遍历,逐个创建 or 更新
UserImportRespVO respVO = UserImportRespVO.builder().createUsernames(new ArrayList<>())
.updateUsernames(new ArrayList<>()).failureUsernames(new LinkedHashMap<>()).build();
importUsers.forEach(importUser -> {
// 2.1.1 校验字段是否符合要求
try {
ValidationUtils.validate(BeanUtils.toBean(importUser, UserSaveReqVO.class).setPassword(initPassword));
} catch (ConstraintViolationException ex){
respVO.getFailureUsernames().put(importUser.getUsername(), ex.getMessage());
return;
}
// 2.1.2 校验,判断是否有不符合的原因
try {
validateUserForCreateOrUpdate(null, null, importUser.getMobile(), importUser.getEmail(),
importUser.getDeptId(), null);
} catch (ServiceException ex) {
respVO.getFailureUsernames().put(importUser.getUsername(), ex.getMessage());
return;
}
// 2.2.1 判断如果不存在,在进行插入
AdminUserDO existUser = userMapper.selectByUsername(importUser.getUsername());
if (existUser == null) {
userMapper.insert(BeanUtils.toBean(importUser, AdminUserDO.class)
.setPassword(encodePassword(initPassword)).setPostIds(new HashSet<>())); // 设置默认密码及空岗位编号数组
respVO.getCreateUsernames().add(importUser.getUsername());
return;
}
// 2.2.2 如果存在,判断是否允许更新
if (!isUpdateSupport) {
respVO.getFailureUsernames().put(importUser.getUsername(), USER_USERNAME_EXISTS.getMsg());
return;
}
AdminUserDO updateUser = BeanUtils.toBean(importUser, AdminUserDO.class);
updateUser.setId(existUser.getId());
userMapper.updateById(updateUser);
respVO.getUpdateUsernames().add(importUser.getUsername());
});
return respVO;
} | @Test
public void testImportUserList_01() {
// 准备参数
UserImportExcelVO importUser = randomPojo(UserImportExcelVO.class, o -> {
o.setEmail(randomEmail());
o.setMobile(randomMobile());
});
// mock 方法,模拟失败
doThrow(new ServiceException(DEPT_NOT_FOUND)).when(deptService).validateDeptList(any());
// 调用
UserImportRespVO respVO = userService.importUserList(newArrayList(importUser), true);
// 断言
assertEquals(0, respVO.getCreateUsernames().size());
assertEquals(0, respVO.getUpdateUsernames().size());
assertEquals(1, respVO.getFailureUsernames().size());
assertEquals(DEPT_NOT_FOUND.getMsg(), respVO.getFailureUsernames().get(importUser.getUsername()));
} |
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
if (data.size() == 1) {
final int batteryLevel = data.getIntValue(Data.FORMAT_UINT8, 0);
if (batteryLevel >= 0 && batteryLevel <= 100) {
onBatteryLevelChanged(device, batteryLevel);
return;
}
}
onInvalidDataReceived(device, data);
} | @Test
public void onInvalidDataReceived_dataTooLong() {
final DataReceivedCallback callback = new BatteryLevelDataCallback() {
@Override
public void onBatteryLevelChanged(@NonNull final BluetoothDevice device, final int batteryLevel) {
assertEquals("Invalid date returned Battery Level", 1, 2);
}
@Override
public void onInvalidDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
assertEquals("Invalid data", data.size(), 2);
}
};
final Data data = new Data(new byte[] { 0x64, 0x00 });
callback.onDataReceived(null, data);
} |
@Override
public boolean serverHealthy() {
try {
String result = reqApi(UtilAndComs.nacosUrlBase + "/operator/metrics", new HashMap<>(8), HttpMethod.GET);
JsonNode json = JacksonUtils.toObj(result);
String serverStatus = json.get("status").asText();
return "UP".equals(serverStatus);
} catch (Exception e) {
return false;
}
} | @Test
void testServerHealthyForException() throws Exception {
NacosRestTemplate nacosRestTemplate = mock(NacosRestTemplate.class);
when(nacosRestTemplate.exchangeForm(any(), any(), any(), any(), any(), any())).thenThrow(
new RuntimeException("test"));
final Field nacosRestTemplateField = NamingHttpClientProxy.class.getDeclaredField("nacosRestTemplate");
nacosRestTemplateField.setAccessible(true);
nacosRestTemplateField.set(clientProxy, nacosRestTemplate);
assertFalse(clientProxy.serverHealthy());
} |
@Override
public List<URL> lookup(URL url) {
if (url == null) {
throw new IllegalArgumentException("lookup url == null");
}
try {
checkDestroyed();
List<String> providers = new ArrayList<>();
for (String path : toCategoriesPath(url)) {
List<String> children = zkClient.getChildren(path);
if (children != null) {
providers.addAll(children);
}
}
return toUrlsWithoutEmpty(url, providers);
} catch (Throwable e) {
throw new RpcException(
"Failed to lookup " + url + " from zookeeper " + getUrl() + ", cause: " + e.getMessage(), e);
}
} | @Test
void testLookupWithException() {
URL errorUrl = URL.valueOf("multicast://0.0.0.0/");
Assertions.assertThrows(RpcException.class, () -> zookeeperRegistry.lookup(errorUrl));
} |
@VisibleForTesting
public int getMaxLeavesToBeActivated(int numPendingApps) {
float childQueueAbsoluteCapacity = leafQueueTemplateAbsoluteCapacity;
if (childQueueAbsoluteCapacity > 0) {
int numLeafQueuesNeeded = (int) Math.floor(availableCapacity / childQueueAbsoluteCapacity);
return Math.min(numLeafQueuesNeeded, numPendingApps);
}
return 0;
} | @Test
public void testGetMaxLeavesToBeActivated() {
DeactivatedLeafQueuesByLabel d1 = spy(DeactivatedLeafQueuesByLabel.class);
d1.setAvailableCapacity(0.17f);
d1.setLeafQueueTemplateAbsoluteCapacity(0.03f);
assertEquals(1, d1.getMaxLeavesToBeActivated(1));
DeactivatedLeafQueuesByLabel d2 = spy(DeactivatedLeafQueuesByLabel.class);
d2.setAvailableCapacity(0.17f);
d2.setLeafQueueTemplateAbsoluteCapacity(0.03f);
assertEquals(5, d2.getMaxLeavesToBeActivated(7));
DeactivatedLeafQueuesByLabel d3 = spy(DeactivatedLeafQueuesByLabel.class);
d3.setAvailableCapacity(0f);
d3.setLeafQueueTemplateAbsoluteCapacity(0.03f);
assertEquals(0, d3.getMaxLeavesToBeActivated(10));
} |
public boolean isRegisteredUser(@Nonnull final JID user, final boolean checkRemoteDomains) {
if (xmppServer.isLocal(user)) {
try {
getUser(user.getNode());
return true;
}
catch (final UserNotFoundException e) {
return false;
}
}
else if (!checkRemoteDomains) {
return false;
} else {
// Look up in the cache using the full JID
Boolean isRegistered = remoteUsersCache.get(user.toString());
if (isRegistered == null) {
// Check if the bare JID of the user is cached
isRegistered = remoteUsersCache.get(user.toBareJID());
if (isRegistered == null) {
// No information is cached so check user identity and cache it
// A disco#info is going to be sent to the bare JID of the user. This packet
// is going to be handled by the remote server.
final IQ iq = new IQ(IQ.Type.get);
iq.setFrom(xmppServer.getServerInfo().getXMPPDomain());
iq.setTo(user.toBareJID());
iq.setChildElement("query", "http://jabber.org/protocol/disco#info");
final Semaphore completionSemaphore = new Semaphore(0);
// Send the disco#info request to the remote server.
final IQRouter iqRouter = xmppServer.getIQRouter();
final long timeoutInMillis = REMOTE_DISCO_INFO_TIMEOUT.getValue().toMillis();
iqRouter.addIQResultListener(iq.getID(), new IQResultListener() {
@Override
public void receivedAnswer(final IQ packet) {
final JID from = packet.getFrom();
// Assume that the user is not a registered user
Boolean isRegistered = Boolean.FALSE;
// Analyze the disco result packet
if (IQ.Type.result == packet.getType()) {
final Element child = packet.getChildElement();
if (child != null) {
for (final Iterator it = child.elementIterator("identity"); it.hasNext();) {
final Element identity = (Element) it.next();
final String accountType = identity.attributeValue("type");
if ("registered".equals(accountType) || "admin".equals(accountType)) {
isRegistered = Boolean.TRUE;
break;
}
}
}
}
// Update cache of remote registered users
remoteUsersCache.put(from.toBareJID(), isRegistered);
completionSemaphore.release();
}
@Override
public void answerTimeout(final String packetId) {
Log.warn("The result from the disco#info request was never received. request: {}", iq);
completionSemaphore.release();
}
}, timeoutInMillis);
// Send the request
iqRouter.route(iq);
// Wait for the response
try {
completionSemaphore.tryAcquire(timeoutInMillis, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
Thread.currentThread().interrupt();
Log.warn("Interrupted whilst waiting for response from remote server", e);
}
isRegistered = remoteUsersCache.computeIfAbsent(user.toBareJID(), ignored -> Boolean.FALSE);
}
}
return isRegistered;
}
} | @Test
public void isRegisteredUserTrueWillReturnFalseForUnknownRemoteUsers() {
final AtomicReference<IQResultListener> iqListener = new AtomicReference<>();
doAnswer(invocationOnMock -> {
final IQResultListener listener = invocationOnMock.getArgument(1);
iqListener.set(listener);
return null;
}).when(iqRouter).addIQResultListener(any(), any(), anyLong());
doAnswer(invocationOnMock -> {
final IQ iq = invocationOnMock.getArgument(0);
final Element childElement = iq.getChildElement();
final IQ response = IQ.createResultIQ(iq);
response.setChildElement(childElement.createCopy());
response.setError(new PacketError(PacketError.Condition.item_not_found, PacketError.Condition.item_not_found.getDefaultType()));
iqListener.get().receivedAnswer(response);
return null;
}).when(iqRouter).route(any());
final boolean result = userManager.isRegisteredUser(new JID(USER_ID, REMOTE_XMPP_DOMAIN, null), true);
assertThat(result, is(false));
verify(iqRouter).route(any());
} |
void placeOrder(Order order) {
sendShippingRequest(order);
} | @Test
void testPlaceOrderNoExceptionShortQueueDuration() throws Exception {
long paymentTime = timeLimits.paymentTime();
long queueTaskTime = timeLimits.queueTaskTime();
long messageTime = timeLimits.messageTime();
long employeeTime = timeLimits.employeeTime();
long queueTime = timeLimits.queueTime();
for (double d = 0.1; d < 2; d = d + 0.1) {
paymentTime *= d;
queueTaskTime *= d;
messageTime *= d;
employeeTime *= d;
queueTime *= d;
Commander c = buildCommanderObjectUnknownException();
var order = new Order(new User("K", "J"), "pen", 1f);
for (Order.MessageSent ms : Order.MessageSent.values()) {
c.placeOrder(order);
assertFalse(StringUtils.isBlank(order.id));
}
}
} |
@Override
public ExecuteResult execute(final ServiceContext serviceContext, final ConfiguredKsqlPlan plan,
final boolean restoreInProgress) {
try {
final ExecuteResult result = EngineExecutor
.create(primaryContext, serviceContext, plan.getConfig())
.execute(plan.getPlan(), restoreInProgress);
return result;
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
// add the statement text to the KsqlException
throw new KsqlStatementException(
e.getMessage(),
e.getMessage(),
plan.getPlan().getStatementText(),
e.getCause()
);
}
} | @Test
public void shouldFailDropStreamWhenMultipleStreamsAreReadingTheTable() {
// Given:
setupKsqlEngineWithSharedRuntimeEnabled();
KsqlEngineTestUtil.execute(
serviceContext,
ksqlEngine,
"create stream bar as select * from test1;"
+ "create stream foo as select * from bar;"
+ "create stream foo2 as select * from bar;",
ksqlConfig,
Collections.emptyMap()
);
// When:
final KsqlStatementException e = assertThrows(
KsqlStatementException.class,
() -> KsqlEngineTestUtil.execute(
serviceContext,
ksqlEngine,
"drop stream bar;",
ksqlConfig,
Collections.emptyMap()
)
);
// Then:
assertThat(e, rawMessage(is(
"Cannot drop BAR.\n"
+ "The following streams and/or tables read from this source: [FOO, FOO2].\n"
+ "You need to drop them before dropping BAR.")));
assertThat(e, statementText(is("drop stream bar;")));
} |
public static String normalize(CharSequence str) {
return Normalizer.normalize(str, Normalizer.Form.NFC);
} | @Test
public void normalizeTest() {
// https://blog.csdn.net/oscar999/article/details/105326270
String str1 = "\u00C1";
String str2 = "\u0041\u0301";
assertNotEquals(str1, str2);
str1 = CharSequenceUtil.normalize(str1);
str2 = CharSequenceUtil.normalize(str2);
assertEquals(str1, str2);
} |
public void evaluate(AuthenticationContext context) {
if (context == null) {
return;
}
this.authenticationStrategy.evaluate(context);
} | @Test
public void evaluate1() {
if (MixAll.isMac()) {
return;
}
User user = User.of("test", "test");
this.authenticationMetadataManager.createUser(user);
DefaultAuthenticationContext context = new DefaultAuthenticationContext();
context.setRpcCode("11");
context.setUsername("test");
context.setContent("test".getBytes(StandardCharsets.UTF_8));
context.setSignature("DJRRXBXlCVuKh6ULoN87847QX+Y=");
this.evaluator.evaluate(context);
} |
@Override
public String pluginNamed() {
return PluginEnum.HYSTRIX.getName();
} | @Test
public void testPluginNamed() {
assertEquals(hystrixPluginDataHandler.pluginNamed(), PluginEnum.HYSTRIX.getName());
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.