focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static Byte getCodeByAlias(String protocol) {
return TYPE_CODE_MAP.get(protocol);
} | @Test
public void getCodeByAlias() throws Exception {
Assert.assertTrue(ProtocolFactory.getCodeByAlias("xx") == 121);
} |
@Override
public ChannelFuture writeHeaders(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding,
boolean endStream, ChannelPromise promise) {
return writeHeaders0(ctx, streamId, headers, false, 0, (short) 0, false, padding, endStream, promise);
} | @Test
public void writeHeadersUsingVoidPromise() throws Exception {
final Throwable cause = new RuntimeException("fake exception");
when(writer.writeHeaders(eq(ctx), eq(STREAM_ID), any(Http2Headers.class),
anyInt(), anyBoolean(), any(ChannelPromise.class)))
.then(new Answer<ChannelFuture>() {
@Override
public ChannelFuture answer(InvocationOnMock invocationOnMock) throws Throwable {
ChannelPromise promise = invocationOnMock.getArgument(5);
assertFalse(promise.isVoid());
return promise.setFailure(cause);
}
});
createStream(STREAM_ID, false);
// END_STREAM flag, so that a listener is added to the future.
encoder.writeHeaders(ctx, STREAM_ID, EmptyHttp2Headers.INSTANCE, 0, true, newVoidPromise(channel));
verify(writer).writeHeaders(eq(ctx), eq(STREAM_ID), any(Http2Headers.class),
anyInt(), anyBoolean(), any(ChannelPromise.class));
// When using a void promise, the error should be propagated via the channel pipeline.
verify(pipeline).fireExceptionCaught(cause);
} |
@Field
public void setDetectAngles(boolean detectAngles) {
defaultConfig.setDetectAngles(detectAngles);
} | @Test
public void testAnglesOnPageRotation() throws Exception {
PDFParserConfig pdfParserConfig = new PDFParserConfig();
pdfParserConfig.setDetectAngles(true);
ParseContext parseContext = new ParseContext();
parseContext.set(PDFParserConfig.class, pdfParserConfig);
String xml = getXML("testPDF_rotated.pdf", parseContext).xml;
assertContains("until a further review indicates that the infrastructure", xml);
} |
public static <InputT, OutputT> MapElements<InputT, OutputT> via(
final InferableFunction<InputT, OutputT> fn) {
return new MapElements<>(fn, fn.getInputTypeDescriptor(), fn.getOutputTypeDescriptor());
} | @Test
public void testNestedPolymorphicSimpleFunction() throws Exception {
pipeline.enableAbandonedNodeEnforcement(false);
pipeline
.apply(Create.of(1, 2, 3))
// This is the function that needs to propagate the input T to output T
.apply("Polymorphic Identity", MapElements.via(new NestedPolymorphicSimpleFunction<>()))
// This is a consumer to ensure that all coder inference logic is executed.
.apply(
"Test Consumer",
MapElements.via(
new SimpleFunction<KV<Integer, String>, Integer>() {
@Override
public Integer apply(KV<Integer, String> input) {
return 42;
}
}));
} |
@SuppressWarnings("unchecked")
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
try {
if (statement.getStatement() instanceof CreateAsSelect) {
registerForCreateAs((ConfiguredStatement<? extends CreateAsSelect>) statement);
} else if (statement.getStatement() instanceof CreateSource) {
registerForCreateSource((ConfiguredStatement<? extends CreateSource>) statement);
}
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
throw new KsqlStatementException(
ErrorMessageUtil.buildErrorMessage(e),
statement.getMaskedStatementText(),
e.getCause());
}
// Remove schema id from SessionConfig
return stripSchemaIdConfig(statement);
} | @Test
public void shouldSupportPrimitiveValueSchemasInCreateStmts() throws Exception {
// Given:
givenStatement("CREATE STREAM source (f1 VARCHAR) "
+ "WITH ("
+ " kafka_topic='expectedName', "
+ " key_format='KAFKA', "
+ " value_format='AVRO', "
+ " partitions=1, "
+ " wrap_single_value='false'"
+ ");");
// When:
injector.inject(statement);
// Then:
verify(schemaRegistryClient).register("expectedName-value", AVRO_UNWRAPPED_VALUE_SCHEMA);
} |
@Override
public AddToClusterNodeLabelsResponse addToClusterNodeLabels(
AddToClusterNodeLabelsRequest request) throws YarnException, IOException {
// parameter verification.
if (request == null) {
routerMetrics.incrAddToClusterNodeLabelsFailedRetrieved();
RouterServerUtil.logAndThrowException("Missing AddToClusterNodeLabels request.", null);
}
String subClusterId = request.getSubClusterId();
if (StringUtils.isBlank(subClusterId)) {
routerMetrics.incrAddToClusterNodeLabelsFailedRetrieved();
RouterServerUtil.logAndThrowException("Missing AddToClusterNodeLabels SubClusterId.", null);
}
try {
long startTime = clock.getTime();
RMAdminProtocolMethod remoteMethod = new RMAdminProtocolMethod(
new Class[]{AddToClusterNodeLabelsRequest.class}, new Object[]{request});
Collection<AddToClusterNodeLabelsResponse> addToClusterNodeLabelsResps =
remoteMethod.invokeConcurrent(this, AddToClusterNodeLabelsResponse.class, subClusterId);
if (CollectionUtils.isNotEmpty(addToClusterNodeLabelsResps)) {
long stopTime = clock.getTime();
routerMetrics.succeededAddToClusterNodeLabelsRetrieved(stopTime - startTime);
return AddToClusterNodeLabelsResponse.newInstance();
}
} catch (YarnException e) {
routerMetrics.incrAddToClusterNodeLabelsFailedRetrieved();
RouterServerUtil.logAndThrowException(e,
"Unable to addToClusterNodeLabels due to exception. " + e.getMessage());
}
routerMetrics.incrAddToClusterNodeLabelsFailedRetrieved();
throw new YarnException("Unable to addToClusterNodeLabels.");
} | @Test
public void testAddToClusterNodeLabelsNormalRequest() throws Exception {
// case1, We add NodeLabel to subCluster SC-1
NodeLabel nodeLabelA = NodeLabel.newInstance("a");
NodeLabel nodeLabelB = NodeLabel.newInstance("b");
List<NodeLabel> labels = new ArrayList<>();
labels.add(nodeLabelA);
labels.add(nodeLabelB);
AddToClusterNodeLabelsRequest request =
AddToClusterNodeLabelsRequest.newInstance("SC-1", labels);
AddToClusterNodeLabelsResponse response = interceptor.addToClusterNodeLabels(request);
assertNotNull(response);
// case2, test the non-exist subCluster.
AddToClusterNodeLabelsRequest request1 =
AddToClusterNodeLabelsRequest.newInstance("SC-NON", labels);
LambdaTestUtils.intercept(Exception.class, "subClusterId = SC-NON is not an active subCluster.",
() -> interceptor.addToClusterNodeLabels(request1));
} |
@Override
public int rmdir(String path) {
return AlluxioFuseUtils.call(LOG, () -> rmInternal(path),
FuseConstants.FUSE_RMDIR, "path=%s", path);
} | @Test
@DoraTestTodoItem(action = DoraTestTodoItem.Action.FIX, owner = "LuQQiu")
@Ignore
public void rmdir() throws Exception {
AlluxioURI expectedPath = BASE_EXPECTED_URI.join("/foo/bar");
doNothing().when(mFileSystem).delete(expectedPath);
mFuseFs.rmdir("/foo/bar");
verify(mFileSystem).delete(expectedPath);
} |
protected static boolean isMatchingMetricTags(Set<Tag> meterTags, Set<Tag> expectedTags) {
if (!meterTags.containsAll(expectedTags)) {
return false;
}
return expectedTags.stream().allMatch(tag -> isMatchingTag(meterTags, tag));
} | @Test
void nonMatchingMetricTagsReturnsFalse() {
meterTags.add(Tag.of("key", "value"));
Set<Tag> expectedTags = new HashSet<>();
expectedTags.add(Tag.of("key", "differentValue"));
assertFalse(MetricsUtils.isMatchingMetricTags(meterTags, expectedTags));
} |
public static ListenableFuture<EntityId> findEntityAsync(
TbContext ctx,
EntityId originator,
RelationsQuery relationsQuery
) {
var relationService = ctx.getRelationService();
var query = buildQuery(originator, relationsQuery);
var relationListFuture = relationService.findByQuery(ctx.getTenantId(), query);
if (relationsQuery.getDirection() == EntitySearchDirection.FROM) {
return Futures.transformAsync(relationListFuture,
relationList -> CollectionUtils.isNotEmpty(relationList) ?
Futures.immediateFuture(relationList.get(0).getTo())
: Futures.immediateFuture(null), ctx.getDbCallbackExecutor());
} else if (relationsQuery.getDirection() == EntitySearchDirection.TO) {
return Futures.transformAsync(relationListFuture,
relationList -> CollectionUtils.isNotEmpty(relationList) ?
Futures.immediateFuture(relationList.get(0).getFrom())
: Futures.immediateFuture(null), ctx.getDbCallbackExecutor());
}
return Futures.immediateFailedFuture(new IllegalStateException("Unknown direction"));
} | @Test
public void givenRelationQuery_whenFindEntityAsync_thenReturnNull() {
// GIVEN
List<EntityRelation> entityRelations = new ArrayList<>();
when(relationServiceMock.findByQuery(ArgumentMatchers.any(), ArgumentMatchers.any())).thenReturn(Futures.immediateFuture(entityRelations));
// WHEN
ListenableFuture<EntityId> entityIdFuture = EntitiesRelatedEntityIdAsyncLoader.findEntityAsync(ctxMock, TENANT_ID, relationsQuery);
// THEN
verifyEntityIdFuture(entityIdFuture, null);
} |
@SuppressWarnings("unchecked")
protected ValueWrapper getSingleFactValueResult(FactMapping factMapping,
FactMappingValue expectedResult,
DMNDecisionResult decisionResult,
List<DMNMessage> failureMessages,
ExpressionEvaluator expressionEvaluator) {
Object resultRaw = decisionResult.getResult();
final DMNDecisionResult.DecisionEvaluationStatus evaluationStatus = decisionResult.getEvaluationStatus();
if (!SUCCEEDED.equals(evaluationStatus)) {
String failureReason = determineFailureMessage(evaluationStatus, failureMessages);
return errorWithMessage("The decision \"" +
decisionResult.getDecisionName() +
"\" has not been successfully evaluated: " +
failureReason);
}
List<ExpressionElement> elementsWithoutClass = factMapping.getExpressionElementsWithoutClass();
// DMN engine doesn't generate the whole object when no entry of the decision table match
if (resultRaw != null) {
for (ExpressionElement expressionElement : elementsWithoutClass) {
if (!(resultRaw instanceof Map)) {
throw new ScenarioException("Wrong resultRaw structure because it is not a complex type as expected");
}
Map<String, Object> result = (Map<String, Object>) resultRaw;
resultRaw = result.get(expressionElement.getStep());
}
}
Class<?> resultClass = resultRaw != null ? resultRaw.getClass() : null;
Object expectedResultRaw = expectedResult.getRawValue();
return getResultWrapper(factMapping.getClassName(),
expectedResult,
expressionEvaluator,
expectedResultRaw,
resultRaw,
resultClass);
} | @Test
public void getSingleFactValueResult_failDecisionWithMessages() {
DMNMessage errorMessage = new DMNMessageImpl(ERROR, "DMN Internal Error", DMNMessageType.FEEL_EVALUATION_ERROR, null);
DMNMessage warnMessage = new DMNMessageImpl(WARN, "DMN Internal Warn", DMNMessageType.FEEL_EVALUATION_ERROR, null);
DMNDecisionResult failedDecision = createDecisionResultMock("Test", false, new ArrayList<>());
ValueWrapper<?> failedResult = runnerHelper.getSingleFactValueResult(null,
null,
failedDecision,
List.of(warnMessage, errorMessage),
expressionEvaluator);
assertThat(failedResult.isValid()).isFalse();
assertThat(failedResult.getErrorMessage().get()).isEqualTo("The decision \"" +
failedDecision.getDecisionName() +
"\" has not been successfully evaluated: DMN Internal Error");
} |
@Override
public void process(HttpResponse response, HttpContext context) throws
HttpException, IOException {
List<Header> warnings = Arrays.stream(response.getHeaders("Warning")).filter(header -> !this.isDeprecationMessage(header.getValue())).collect(Collectors.toList());
response.removeHeaders("Warning");
warnings.stream().forEach(header -> response.addHeader(header));
} | @Test
public void testInterceptorNoHeader() throws IOException, HttpException {
OpenSearchFilterDeprecationWarningsInterceptor interceptor = new OpenSearchFilterDeprecationWarningsInterceptor();
HttpResponse response = new BasicHttpResponse(new BasicStatusLine(new ProtocolVersion("HTTP", 0, 0), 0, null));
interceptor.process(response, null);
assertThat(response.getAllHeaders())
.as("Number of Headers should be 0 and the interceptor should not fail in itself.")
.hasSize(0);
} |
@Override
public void handleRequest(RestRequest request, RequestContext requestContext, final Callback<RestResponse> callback)
{
if (HttpMethod.POST != HttpMethod.valueOf(request.getMethod()))
{
_log.error("POST is expected, but " + request.getMethod() + " received");
callback.onError(RestException.forError(HttpStatus.S_405_METHOD_NOT_ALLOWED.getCode(), "Invalid method"));
return;
}
// Disable server-side latency instrumentation for multiplexed requests
requestContext.putLocalAttr(TimingContextUtil.TIMINGS_DISABLED_KEY_NAME, true);
IndividualRequestMap individualRequests;
try
{
individualRequests = extractIndividualRequests(request);
if (_multiplexerSingletonFilter != null) {
individualRequests = _multiplexerSingletonFilter.filterRequests(individualRequests);
}
}
catch (RestException e)
{
_log.error("Invalid multiplexed request", e);
callback.onError(e);
return;
}
catch (Exception e)
{
_log.error("Invalid multiplexed request", e);
callback.onError(RestException.forError(HttpStatus.S_400_BAD_REQUEST.getCode(), e));
return;
}
// prepare the map of individual responses to be collected
final IndividualResponseMap individualResponses = new IndividualResponseMap(individualRequests.size());
final Map<String, HttpCookie> responseCookies = new HashMap<>();
// all tasks are Void and side effect based, that will be useful when we add streaming
Task<?> requestProcessingTask = createParallelRequestsTask(request, requestContext, individualRequests, individualResponses, responseCookies);
Task<Void> responseAggregationTask = Task.action("send aggregated response", () ->
{
RestResponse aggregatedResponse = aggregateResponses(individualResponses, responseCookies);
callback.onSuccess(aggregatedResponse);
}
);
_engine.run(requestProcessingTask.andThen(responseAggregationTask), MUX_PLAN_CLASS);
} | @Test(dataProvider = "multiplexerConfigurations")
public void testMultiplexedSingletonFilterFailures(MultiplexerRunMode multiplexerRunMode) throws Exception
{
// This test validates when a failure occurred in MultiplexedSingletonFilter for an individual request, only the individual
// request should fail. The multiplexed request should still be completed successfully with a 200 status code.
// Setup mock request handler: make handler return a json that contains the request uri
// We are using this uri in our mock MultiplexerSingletonFilter.filterIndividualResponse function so that
// we can simulate different response based on the request.
SynchronousRequestHandler mockHandler = new SynchronousRequestHandler() {
@Override
public RestResponse handleRequestSync(RestRequest request, RequestContext requestContext)
{
try
{
return fakeIndRestResponse(jsonBodyToByteString(fakeIndividualBody(request.getURI().toString())));
}
catch (Exception e)
{
throw new RuntimeException(e);
}
}
};
// Create a mock MultiplexerSingletonFilter to it simulate different type of failures.
// Failure is simulated are base on the request uri.
MultiplexerSingletonFilter muxFilterWithSimulatedFailures = new MultiplexerSingletonFilter() {
@Override
public IndividualRequest filterIndividualRequest(IndividualRequest request)
{
if (request.getRelativeUrl().contains("bad_request"))
{
throw new RestLiServiceException(HttpStatus.S_400_BAD_REQUEST, "not found");
}
else if (request.getRelativeUrl().contains("error_request"))
{
throw new IllegalArgumentException("Something really bad happened in filterIndividualRequest");
}
return request;
}
@Override
public IndividualResponse filterIndividualResponse(IndividualResponse response)
{
if (response.getStatus() == HttpStatus.S_200_OK.getCode())
{
if (response.getBody().data().getString("value").contains("notfound_response"))
{
throw new RestLiServiceException(HttpStatus.S_404_NOT_FOUND, "not found");
}
else if (response.getBody().data().getString("value").contains("error_response"))
{
// simulate an unexpected exception
throw new UnsupportedOperationException("Something really bad happened in filterIndividualResponse");
}
}
return response;
}
};
// Prepare request to mux handler
FutureCallback<RestResponse> callback = new FutureCallback<>();
RequestContext requestContext = new RequestContext();
Map<String, IndividualRequest> individualRequests = new HashMap<>();
individualRequests.put("0", fakeIndRequest("/good_request"));
individualRequests.put("1", fakeIndRequest("/bad_request"));
individualRequests.put("2", fakeIndRequest("/error_request"));
individualRequests.put("3", fakeIndRequest("/notfound_response"));
individualRequests.put("4", fakeIndRequest("/error_response"));
individualRequests.put("5", fakeIndRequest("/good_request", ImmutableMap.of("6", fakeIndRequest("/bad_request"))));
RestRequest request = fakeMuxRestRequest(individualRequests);
// Create mux handler instance
MultiplexedRequestHandlerImpl multiplexer = createMultiplexer(mockHandler, muxFilterWithSimulatedFailures, Collections.<String>emptySet(), 10, multiplexerRunMode);
try
{
multiplexer.handleRequest(request, requestContext, callback);
}
catch (Exception e)
{
fail("Multiplexer should not blow up because one of the individual requests failed", e);
}
RestResponse muxRestResponse = callback.get();
// Assert multiplexed request should return a 200 status code
assertEquals(muxRestResponse.getStatus(), 200, "Failure in indivudal request should not cause the entire multliplexed request to fail");
MultiplexedResponseContent muxResponseContent = new MultiplexedResponseContent(DataMapConverter.bytesToDataMap(muxRestResponse.getHeaders(), muxRestResponse.getEntity()));
IndividualResponseMap responses = muxResponseContent.getResponses();
// Validate the status code for each of the response
assertEquals(responses.get("0").getStatus().intValue(), 200, "Mux response body is: " + responses.toString());
assertEquals(responses.get("1").getStatus().intValue(), 400, "Mux response body is: " + responses.toString());
assertEquals(responses.get("2").getStatus().intValue(), 500, "Mux response body is: " + responses.toString());
assertEquals(responses.get("3").getStatus().intValue(), 404, "Mux response body is: " + responses.toString());
assertEquals(responses.get("4").getStatus().intValue(), 500, "Mux response body is: " + responses.toString());
assertEquals(responses.get("5").getStatus().intValue(), 200, "Mux response body is: " + responses.toString());
assertEquals(responses.get("6").getStatus().intValue(), 400, "Mux response body is: " + responses.toString());
} |
public TemplateException(String message, Throwable cause) {
super(message, cause);
} | @Test
public void testTemplateException() throws Exception {
try {
throw new TemplateException("not found template");
} catch (TemplateException e) {
assertEquals("not found template", e.getMessage());
}
} |
@Override
public <VO, VR> KStream<K, VR> outerJoin(final KStream<K, VO> otherStream,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final JoinWindows windows) {
return outerJoin(otherStream, toValueJoinerWithKey(joiner), windows);
} | @SuppressWarnings("deprecation")
@Test
public void shouldNotAllowNullValueJoinerWithKeyOnOuterJoinWithStreamJoined() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.outerJoin(
testStream,
(ValueJoinerWithKey<? super String, ? super String, ? super String, ?>) null,
JoinWindows.of(ofMillis(10)),
StreamJoined.as("name")));
assertThat(exception.getMessage(), equalTo("joiner can't be null"));
} |
public static ImmutableList<String> glob(final String glob) {
Path path = getGlobPath(glob);
int globIndex = getGlobIndex(path);
if (globIndex < 0) {
return of(glob);
}
return doGlob(path, searchPath(path, globIndex));
} | @Test
public void should_glob_files() {
ImmutableList<String> glob = Globs.glob("*.json");
assertThat(glob.isEmpty(), is(true));
} |
@SuppressWarnings("unchecked")
public <V> V run(String callableName, RetryOperation<V> operation)
{
int attempt = 1;
while (true) {
try {
return operation.run();
}
catch (Exception e) {
if (!exceptionClass.isInstance(e)) {
throwIfUnchecked(e);
throw new RuntimeException(e);
}
E qe = (E) e;
exceptionCallback.accept(qe);
if (attempt >= maxAttempts || !retryPredicate.test(qe)) {
throw qe;
}
attempt++;
int delayMillis = (int) min(minBackoffDelay.toMillis() * pow(scaleFactor, attempt - 1), maxBackoffDelay.toMillis());
int jitterMillis = ThreadLocalRandom.current().nextInt(max(1, (int) (delayMillis * 0.1)));
log.info(
"Failed on executing %s with attempt %d. Retry after %sms. Cause: %s",
callableName,
attempt - 1,
delayMillis,
qe.getMessage());
try {
MILLISECONDS.sleep(delayMillis + jitterMillis);
}
catch (InterruptedException ie) {
currentThread().interrupt();
throw new RuntimeException(ie);
}
}
}
} | @Test(expectedExceptions = QueryException.class)
public void testNonRetryableFailure()
{
retryDriver.run("test", new MockOperation(3, NON_RETRYABLE_EXCEPTION));
} |
void appendValuesClause(StringBuilder sb) {
sb.append("VALUES ");
appendValues(sb, jdbcTable.dbFieldNames().size());
} | @Test
void appendValuesClause() {
MySQLUpsertQueryBuilder builder = new MySQLUpsertQueryBuilder(jdbcTable, dialect);
StringBuilder sb = new StringBuilder();
builder.appendValuesClause(sb);
String valuesClause = sb.toString();
assertThat(valuesClause).isEqualTo("VALUES (?,?)");
} |
public static <K, C, V, T> V computeIfAbsent(Map<K, V> target, K key, BiFunction<C, T, V> mappingFunction, C param1,
T param2) {
Objects.requireNonNull(target, "target");
Objects.requireNonNull(key, "key");
Objects.requireNonNull(mappingFunction, "mappingFunction");
Objects.requireNonNull(param1, "param1");
Objects.requireNonNull(param2, "param2");
V val = target.get(key);
if (val == null) {
V ret = mappingFunction.apply(param1, param2);
target.put(key, ret);
return ret;
}
return val;
} | @Test
public void computeIfAbsentValIsNullTest() {
Map<String, Object> map = new HashMap<>();
map.put("abc", "123");
BiFunction<String, String, Object> mappingFunction = (a, b) -> a + b;
Object ret = MapUtil.computeIfAbsent(map, "xyz", mappingFunction, "param1", "param2");
Assert.isTrue(Objects.equals("param1param2", String.valueOf(ret)));
} |
public static <T extends PipelineOptions> T as(Class<T> klass) {
return new Builder().as(klass);
} | @Test
public void testMultipleSettersAnnotatedWithJsonIgnore() throws Exception {
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("Found setters marked with @JsonIgnore:");
expectedException.expectMessage(
"property [other] should not be marked with @JsonIgnore on ["
+ "org.apache.beam.sdk.options.PipelineOptionsFactoryTest$MultiSetterWithJsonIgnore]");
expectedException.expectMessage(
"property [value] should not be marked with @JsonIgnore on ["
+ "org.apache.beam.sdk.options.PipelineOptionsFactoryTest$SetterWithJsonIgnore]");
PipelineOptionsFactory.as(MultiSetterWithJsonIgnore.class);
} |
@Override
public AppSettings load() {
Properties p = loadPropertiesFile(homeDir);
Set<String> keysOverridableFromEnv = stream(ProcessProperties.Property.values()).map(ProcessProperties.Property::getKey)
.collect(Collectors.toSet());
keysOverridableFromEnv.addAll(p.stringPropertyNames());
// 1st pass to load static properties
Props staticProps = reloadProperties(keysOverridableFromEnv, p);
keysOverridableFromEnv.addAll(getDynamicPropertiesKeys(staticProps));
// 2nd pass to load dynamic properties like `ldap.*.url` or `ldap.*.baseDn` which keys depend on values of static
// properties loaded in 1st step
Props props = reloadProperties(keysOverridableFromEnv, p);
new ProcessProperties(serviceLoaderWrapper).completeDefaults(props);
stream(consumers).forEach(c -> c.accept(props));
return new AppSettingsImpl(props);
} | @Test
public void env_vars_take_precedence_over_properties_file() throws Exception {
when(system.getenv()).thenReturn(ImmutableMap.of("SONAR_CUSTOMPROP", "11"));
when(system.getenv("SONAR_CUSTOMPROP")).thenReturn("11");
File homeDir = temp.newFolder();
File propsFile = new File(homeDir, "conf/sonar.properties");
FileUtils.write(propsFile, "sonar.customProp=10", UTF_8);
AppSettingsLoaderImpl underTest = new AppSettingsLoaderImpl(system, new String[0], homeDir, serviceLoaderWrapper);
AppSettings settings = underTest.load();
assertThat(settings.getProps().rawProperties()).contains(entry("sonar.customProp", "11"));
} |
private static File targetFile(String dataId, String group, String tenant) {
// fix https://github.com/alibaba/nacos/issues/10067
dataId = PathEncoderManager.getInstance().encode(dataId);
group = PathEncoderManager.getInstance().encode(group);
tenant = PathEncoderManager.getInstance().encode(tenant);
File file = null;
if (StringUtils.isBlank(tenant)) {
file = new File(EnvUtil.getNacosHome(), BASE_DIR);
} else {
file = new File(EnvUtil.getNacosHome(), TENANT_BASE_DIR);
file = new File(file, tenant);
}
file = new File(file, group);
file = new File(file, dataId);
return file;
} | @Test
void testTargetFile() throws NoSuchMethodException, IllegalAccessException, InvocationTargetException {
Method method = ConfigRawDiskService.class.getDeclaredMethod("targetFile", String.class, String.class, String.class);
method.setAccessible(true);
File result = (File) method.invoke(null, "aaaa?dsaknkf", "aaaa*dsaknkf", "aaaa:dsaknkf");
// 分解路径
Path path = Paths.get(result.getPath());
Path parent = path.getParent();
Path grandParent = parent.getParent();
// 获取最后三段路径
String lastSegment = path.getFileName().toString();
String secondLastSegment = parent.getFileName().toString();
String thirdLastSegment = grandParent.getFileName().toString();
assertEquals(isWindows() ? "aaaa%A3%dsaknkf" : thirdLastSegment, thirdLastSegment);
assertEquals(isWindows() ? "aaaa%A4%dsaknkf" : secondLastSegment, secondLastSegment);
assertEquals(isWindows() ? "aaaa%A5%dsaknkf" : lastSegment, lastSegment);
} |
public boolean evaluate( RowMetaInterface rowMeta, Object[] r ) {
// Start of evaluate
boolean retval = false;
// If we have 0 items in the list, evaluate the current condition
// Otherwise, evaluate all sub-conditions
//
try {
if ( isAtomic() ) {
if ( function == FUNC_TRUE ) {
return !negate;
}
// Get fieldnrs left value
//
// Check out the fieldnrs if we don't have them...
if ( leftValuename != null && leftValuename.length() > 0 ) {
leftFieldnr = rowMeta.indexOfValue( leftValuename );
}
// Get fieldnrs right value
//
if ( rightValuename != null && rightValuename.length() > 0 ) {
rightFieldnr = rowMeta.indexOfValue( rightValuename );
}
// Get fieldnrs left field
ValueMetaInterface fieldMeta = null;
Object field = null;
if ( leftFieldnr >= 0 ) {
fieldMeta = rowMeta.getValueMeta( leftFieldnr );
field = r[ leftFieldnr ];
} else {
return false; // no fields to evaluate
}
// Get fieldnrs right exact
ValueMetaInterface fieldMeta2 = rightExact != null ? rightExact.getValueMeta() : null;
Object field2 = rightExact != null ? rightExact.getValueData() : null;
if ( field2 == null && rightFieldnr >= 0 ) {
fieldMeta2 = rowMeta.getValueMeta( rightFieldnr );
field2 = r[ rightFieldnr ];
}
// Evaluate
switch ( function ) {
case FUNC_EQUAL:
retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) == 0 );
break;
case FUNC_NOT_EQUAL:
retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) != 0 );
break;
case FUNC_SMALLER:
// Added this if/else to accommodate for CUST-270
if ( "Y".equalsIgnoreCase( System.getProperty( Const.KETTLE_FILTER_TREAT_NULLS_AS_NOT_ZERO, "N" ) )
&& fieldMeta.isNull( field ) ) {
retval = false;
} else {
retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) < 0 );
}
break;
case FUNC_SMALLER_EQUAL:
// Added this if/else to accommodate for CUST-270
if ( "Y".equalsIgnoreCase( System.getProperty( Const.KETTLE_FILTER_TREAT_NULLS_AS_NOT_ZERO, "N" ) )
&& fieldMeta.isNull( field ) ) {
retval = false;
} else {
retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) <= 0 );
}
break;
case FUNC_LARGER:
retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) > 0 );
break;
case FUNC_LARGER_EQUAL:
retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) >= 0 );
break;
case FUNC_REGEXP:
if ( fieldMeta.isNull( field ) || field2 == null ) {
retval = false;
} else {
retval =
Pattern
.matches( fieldMeta2.getCompatibleString( field2 ), fieldMeta.getCompatibleString( field ) );
}
break;
case FUNC_NULL:
retval = ( fieldMeta.isNull( field ) );
break;
case FUNC_NOT_NULL:
retval = ( !fieldMeta.isNull( field ) );
break;
case FUNC_IN_LIST:
// performance reason: create the array first or again when it is against a field and not a constant
//
if ( inList == null || rightFieldnr >= 0 ) {
inList = Const.splitString( fieldMeta2.getString( field2 ), ';', true );
for ( int i = 0; i < inList.length; i++ ) {
inList[i] = inList[i] == null ? null : inList[i].replace( "\\", "" );
}
Arrays.sort( inList );
}
String searchString = fieldMeta.getCompatibleString( field );
int inIndex = -1;
if ( searchString != null ) {
inIndex = Arrays.binarySearch( inList, searchString );
}
retval = inIndex >= 0;
break;
case FUNC_CONTAINS:
String fm2CompatibleContains = fieldMeta2.getCompatibleString( field2 );
retval = Optional.ofNullable( fieldMeta.getCompatibleString( field ) )
.filter( s -> s.contains( fm2CompatibleContains ) ).isPresent();
break;
case FUNC_STARTS_WITH:
String fm2CompatibleStarts = fieldMeta2.getCompatibleString( field2 );
retval = Optional.ofNullable( fieldMeta.getCompatibleString( field ) )
.filter( s -> s.startsWith( fm2CompatibleStarts ) ).isPresent();
break;
case FUNC_ENDS_WITH:
String string = fieldMeta.getCompatibleString( field );
if ( !Utils.isEmpty( string ) ) {
if ( rightString == null && field2 != null ) {
rightString = fieldMeta2.getCompatibleString( field2 );
}
if ( rightString != null ) {
retval = string.endsWith( fieldMeta2.getCompatibleString( field2 ) );
} else {
retval = false;
}
} else {
retval = false;
}
break;
case FUNC_LIKE:
// Converts to a regular expression
// TODO: optimize the patterns and String replacements
//
if ( fieldMeta.isNull( field ) || field2 == null ) {
retval = false;
} else {
String regex = fieldMeta2.getCompatibleString( field2 );
regex = regex.replace( "%", ".*" );
regex = regex.replace( "?", "." );
retval = Pattern.matches( regex, fieldMeta.getCompatibleString( field ) );
}
break;
default:
break;
}
// Only NOT makes sense, the rest doesn't, so ignore!!!!
// Optionally negate
//
if ( isNegated() ) {
retval = !retval;
}
} else {
// Composite : get first
Condition cb0 = list.get( 0 );
retval = cb0.evaluate( rowMeta, r );
// Loop over the conditions listed below.
//
for ( int i = 1; i < list.size(); i++ ) {
// Composite : #i
// Get right hand condition
Condition cb = list.get( i );
// Evaluate the right hand side of the condition cb.evaluate() within
// the switch statement
// because the condition may be short-circuited due to the left hand
// side (retval)
switch ( cb.getOperator() ) {
case Condition.OPERATOR_OR:
retval = retval || cb.evaluate( rowMeta, r );
break;
case Condition.OPERATOR_AND:
retval = retval && cb.evaluate( rowMeta, r );
break;
case Condition.OPERATOR_OR_NOT:
retval = retval || ( !cb.evaluate( rowMeta, r ) );
break;
case Condition.OPERATOR_AND_NOT:
retval = retval && ( !cb.evaluate( rowMeta, r ) );
break;
case Condition.OPERATOR_XOR:
retval = retval ^ cb.evaluate( rowMeta, r );
break;
default:
break;
}
}
// Composite: optionally negate
if ( isNegated() ) {
retval = !retval;
}
}
} catch ( Exception e ) {
throw new RuntimeException( "Unexpected error evaluation condition [" + toString() + "]", e );
}
return retval;
} | @Test
public void testZeroLargerOrEqualsThanNull() {
String left = "left";
String right = "right";
Long leftValue = 0L;
Long rightValue = null;
RowMetaInterface rowMeta = new RowMeta();
rowMeta.addValueMeta( new ValueMetaInteger( left ) );
rowMeta.addValueMeta( new ValueMetaInteger( right ) );
Condition condition = new Condition( left, Condition.FUNC_LARGER_EQUAL, right, null );
assertTrue( condition.evaluate( rowMeta, new Object[] { leftValue, rightValue } ) );
} |
public static <T> Values<T> of(Iterable<T> elems) {
return new Values<>(elems, Optional.absent(), Optional.absent(), false);
} | @Test
public void testSourceSplitVoid() throws Exception {
CreateSource<Void> source =
CreateSource.fromIterable(Lists.newArrayList(null, null, null, null, null), VoidCoder.of());
PipelineOptions options = PipelineOptionsFactory.create();
List<? extends BoundedSource<Void>> splitSources = source.split(3, options);
SourceTestUtils.assertSourcesEqualReferenceSource(source, splitSources, options);
} |
public AbstractPushCallBack(long timeout) {
this.timeout = timeout;
} | @Test
void testAbstractPushCallBack() {
AbstractPushCallBack callBack = new AbstractPushCallBack(2000) {
@Override
public void onSuccess() {
testValue = true;
}
@Override
public void onFail(Throwable e) {
testValue = false;
}
};
assertEquals(2000, callBack.getTimeout());
assertFalse(testValue);
callBack.onSuccess();
assertTrue(testValue);
callBack.onFail(new RuntimeException("test"));
assertFalse(testValue);
} |
@Override
protected void validateDataImpl(TenantId tenantId, ComponentDescriptor plugin) {
validateString("Component name", plugin.getName());
if (plugin.getType() == null) {
throw new DataValidationException("Component type should be specified!");
}
if (plugin.getScope() == null) {
throw new DataValidationException("Component scope should be specified!");
}
if (StringUtils.isEmpty(plugin.getClazz())) {
throw new DataValidationException("Component clazz should be specified!");
}
} | @Test
void testValidateNameInvocation() {
ComponentDescriptor plugin = new ComponentDescriptor();
plugin.setType(ComponentType.ENRICHMENT);
plugin.setScope(ComponentScope.SYSTEM);
plugin.setName("originator attributes");
plugin.setClazz("org.thingsboard.rule.engine.metadata.TbGetAttributesNode");
validator.validateDataImpl(TenantId.SYS_TENANT_ID, plugin);
verify(validator).validateString("Component name", plugin.getName());
} |
@Override
public int getLineHashesVersion(Component component) {
if (significantCodeRepository.getRangesPerLine(component).isPresent()) {
return LineHashVersion.WITH_SIGNIFICANT_CODE.getDbValue();
} else {
return LineHashVersion.WITHOUT_SIGNIFICANT_CODE.getDbValue();
}
} | @Test
public void should_return_without_significant_code_if_report_does_not_contain_it() {
when(significantCodeRepository.getRangesPerLine(file)).thenReturn(Optional.empty());
assertThat(underTest.getLineHashesVersion(file)).isEqualTo(LineHashVersion.WITHOUT_SIGNIFICANT_CODE.getDbValue());
verify(significantCodeRepository).getRangesPerLine(file);
verifyNoMoreInteractions(significantCodeRepository);
verifyNoInteractions(dbLineHashVersion);
} |
@Override
public List<InetSocketAddress> lookup(String key) throws Exception {
final String cluster = getServiceGroup(key);
if (cluster == null) {
String missingDataId = PREFIX_SERVICE_ROOT + CONFIG_SPLIT_CHAR + PREFIX_SERVICE_MAPPING + key;
throw new ConfigNotFoundException("%s configuration item is required", missingDataId);
}
return lookupByCluster(cluster);
} | @Test
public void testLookup() throws Exception {
RegistryService registryService = mock(ConsulRegistryServiceImpl.class);
registryService.lookup("test-key");
verify(registryService).lookup("test-key");
} |
@Override
public InternalNode getCurrentNode()
{
return currentNode;
} | @Test
public void testGetCurrentNode()
{
DiscoveryNodeManager manager = new DiscoveryNodeManager(selector, workerNodeInfo, new NoOpFailureDetector(), Optional.empty(), expectedVersion, testHttpClient, new TestingDriftClient<>(), internalCommunicationConfig);
try {
assertEquals(manager.getCurrentNode(), workerNode1);
}
finally {
manager.stop();
}
} |
@VisibleForTesting
void validateTableInfo(TableInfo tableInfo) {
if (tableInfo == null) {
throw exception(CODEGEN_IMPORT_TABLE_NULL);
}
if (StrUtil.isEmpty(tableInfo.getComment())) {
throw exception(CODEGEN_TABLE_INFO_TABLE_COMMENT_IS_NULL);
}
if (CollUtil.isEmpty(tableInfo.getFields())) {
throw exception(CODEGEN_IMPORT_COLUMNS_NULL);
}
tableInfo.getFields().forEach(field -> {
if (StrUtil.isEmpty(field.getComment())) {
throw exception(CODEGEN_TABLE_INFO_COLUMN_COMMENT_IS_NULL, field.getName());
}
});
} | @Test
public void testValidateTableInfo() {
// 情况一
assertServiceException(() -> codegenService.validateTableInfo(null),
CODEGEN_IMPORT_TABLE_NULL);
// 情况二
TableInfo tableInfo = mock(TableInfo.class);
assertServiceException(() -> codegenService.validateTableInfo(tableInfo),
CODEGEN_TABLE_INFO_TABLE_COMMENT_IS_NULL);
// 情况三
when(tableInfo.getComment()).thenReturn("芋艿");
assertServiceException(() -> codegenService.validateTableInfo(tableInfo),
CODEGEN_IMPORT_COLUMNS_NULL);
// 情况四
TableField field = mock(TableField.class);
when(field.getName()).thenReturn("name");
when(tableInfo.getFields()).thenReturn(Collections.singletonList(field));
assertServiceException(() -> codegenService.validateTableInfo(tableInfo),
CODEGEN_TABLE_INFO_COLUMN_COMMENT_IS_NULL, field.getName());
} |
public static UIf create(
UExpression condition, UStatement thenStatement, UStatement elseStatement) {
return new AutoValue_UIf(condition, thenStatement, elseStatement);
} | @Test
public void equality() {
new EqualsTester()
.addEqualityGroup(
UIf.create(
UFreeIdent.create("cond"),
UBlock.create(
UExpressionStatement.create(
UAssign.create(UFreeIdent.create("x"), UFreeIdent.create("y")))),
UBlock.create(
UExpressionStatement.create(
UAssign.create(UFreeIdent.create("x"), UFreeIdent.create("z"))))))
.addEqualityGroup(
UIf.create(
UFreeIdent.create("cond"),
UBlock.create(
UExpressionStatement.create(
UAssign.create(UFreeIdent.create("x"), UFreeIdent.create("y")))),
null))
.addEqualityGroup(
UIf.create(
ULiteral.booleanLit(true),
UBlock.create(
UExpressionStatement.create(
UAssign.create(UFreeIdent.create("x"), UFreeIdent.create("y")))),
UBlock.create(
UExpressionStatement.create(
UAssign.create(UFreeIdent.create("x"), UFreeIdent.create("z"))))))
.testEquals();
} |
@WorkerThread
@Override
public Unit call()
throws IOException,
StreamNotFoundException,
ShellNotRunningException,
IllegalArgumentException {
OutputStream outputStream;
File destFile = null;
switch (fileAbstraction.scheme) {
case CONTENT:
Objects.requireNonNull(fileAbstraction.uri);
if (fileAbstraction.uri.getAuthority().equals(context.get().getPackageName())) {
DocumentFile documentFile =
DocumentFile.fromSingleUri(AppConfig.getInstance(), fileAbstraction.uri);
if (documentFile != null && documentFile.exists() && documentFile.canWrite()) {
outputStream = contentResolver.openOutputStream(fileAbstraction.uri, "wt");
} else {
destFile = FileUtils.fromContentUri(fileAbstraction.uri);
outputStream = openFile(destFile, context.get());
}
} else {
outputStream = contentResolver.openOutputStream(fileAbstraction.uri, "wt");
}
break;
case FILE:
final HybridFileParcelable hybridFileParcelable = fileAbstraction.hybridFileParcelable;
Objects.requireNonNull(hybridFileParcelable);
Context context = this.context.get();
if (context == null) {
return null;
}
outputStream = openFile(hybridFileParcelable.getFile(), context);
destFile = fileAbstraction.hybridFileParcelable.getFile();
break;
default:
throw new IllegalArgumentException(
"The scheme for '" + fileAbstraction.scheme + "' cannot be processed!");
}
Objects.requireNonNull(outputStream);
outputStream.write(dataToSave.getBytes());
outputStream.close();
if (cachedFile != null && cachedFile.exists() && destFile != null) {
// cat cache content to original file and delete cache file
ConcatenateFileCommand.INSTANCE.concatenateFile(cachedFile.getPath(), destFile.getPath());
cachedFile.delete();
}
return Unit.INSTANCE;
} | @Test
public void testWriteFileNonRoot()
throws IOException, StreamNotFoundException, ShellNotRunningException {
File file = new File(Environment.getExternalStorageDirectory(), "test.txt");
Uri uri = Uri.fromFile(file);
Context ctx = ApplicationProvider.getApplicationContext();
ContentResolver cr = ctx.getContentResolver();
WriteTextFileCallable task =
new WriteTextFileCallable(
ctx, cr, new EditableFileAbstraction(ctx, uri), contents, null, false);
task.call();
String verify = IoUtils.readFully(new FileInputStream(file));
assertEquals(contents, verify);
} |
public static Document getDocument(String xml) throws ParserConfigurationException, IOException, SAXException {
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
factory.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false);
factory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
factory.setIgnoringComments(true);
DocumentBuilder dBuilder = factory.newDocumentBuilder();
try (InputStream inputStream = new ByteArrayInputStream(xml.getBytes())) {
return dBuilder.parse(inputStream);
}
} | @Test
public void getDocument() throws Exception {
Document retrieved = DOMParserUtil.getDocument(XML);
assertThat(retrieved).isNotNull();
} |
@VisibleForTesting
static Comparator<ActualProperties> streamingExecutionPreference(PreferredProperties preferred)
{
// Calculating the matches can be a bit expensive, so cache the results between comparisons
LoadingCache<List<LocalProperty<VariableReferenceExpression>>, List<Optional<LocalProperty<VariableReferenceExpression>>>> matchCache = CacheBuilder.newBuilder()
.build(CacheLoader.from(actualProperties -> LocalProperties.match(actualProperties, preferred.getLocalProperties())));
return (actual1, actual2) -> {
List<Optional<LocalProperty<VariableReferenceExpression>>> matchLayout1 = matchCache.getUnchecked(actual1.getLocalProperties());
List<Optional<LocalProperty<VariableReferenceExpression>>> matchLayout2 = matchCache.getUnchecked(actual2.getLocalProperties());
return ComparisonChain.start()
.compareTrueFirst(hasLocalOptimization(preferred.getLocalProperties(), matchLayout1), hasLocalOptimization(preferred.getLocalProperties(), matchLayout2))
.compareTrueFirst(meetsPartitioningRequirements(preferred, actual1), meetsPartitioningRequirements(preferred, actual2))
.compare(matchLayout1, matchLayout2, matchedLayoutPreference())
.result();
};
} | @Test
public void testPickLayoutGrouped()
{
Comparator<ActualProperties> preference = streamingExecutionPreference
(PreferredProperties.local(ImmutableList.of(grouped("a"))));
List<ActualProperties> input = ImmutableList.<ActualProperties>builder()
.add(builder()
.global(streamPartitionedOn("a"))
.build())
.add(builder()
.global(singleStreamPartition())
.build())
.add(builder()
.global(arbitraryPartition())
.local(ImmutableList.of(grouped("a", "b")))
.build())
.add(builder()
.global(arbitraryPartition())
.build())
.add(builder()
.global(hashDistributedOn("a"))
.build())
.add(builder()
.global(singleStream())
.local(ImmutableList.of(constant("a"), sorted("b", ASC_NULLS_FIRST)))
.build())
.add(builder()
.global(singleStreamPartition())
.local(ImmutableList.of(sorted("a", ASC_NULLS_FIRST)))
.build())
.add(builder()
.global(hashDistributedOn("a"))
.build())
.build();
List<ActualProperties> expected = ImmutableList.<ActualProperties>builder()
.add(builder()
.global(singleStream())
.local(ImmutableList.of(constant("a"), sorted("b", ASC_NULLS_FIRST)))
.build())
.add(builder()
.global(singleStreamPartition())
.local(ImmutableList.of(sorted("a", ASC_NULLS_FIRST)))
.build())
.add(builder()
.global(streamPartitionedOn("a"))
.build())
.add(builder()
.global(singleStreamPartition())
.build())
.add(builder()
.global(arbitraryPartition())
.local(ImmutableList.of(grouped("a", "b")))
.build())
.add(builder()
.global(arbitraryPartition())
.build())
.add(builder()
.global(hashDistributedOn("a"))
.build())
.add(builder()
.global(hashDistributedOn("a"))
.build())
.build();
assertEquals(stableSort(input, preference), expected);
} |
@GET
@Path("/{connector}/tasks-config")
@Operation(deprecated = true, summary = "Get the configuration of all tasks for the specified connector")
public Map<ConnectorTaskId, Map<String, String>> getTasksConfig(
final @PathParam("connector") String connector) throws Throwable {
log.warn("The 'GET /connectors/{connector}/tasks-config' endpoint is deprecated and will be removed in the next major release. "
+ "Please use the 'GET /connectors/{connector}/tasks' endpoint instead.");
FutureCallback<Map<ConnectorTaskId, Map<String, String>>> cb = new FutureCallback<>();
herder.tasksConfig(connector, cb);
return requestHandler.completeRequest(cb);
} | @Test
public void testGetTasksConfigConnectorNotFound() {
final ArgumentCaptor<Callback<Map<ConnectorTaskId, Map<String, String>>>> cb = ArgumentCaptor.forClass(Callback.class);
expectAndCallbackException(cb, new NotFoundException("not found"))
.when(herder).tasksConfig(eq(CONNECTOR_NAME), cb.capture());
assertThrows(NotFoundException.class, () ->
connectorsResource.getTasksConfig(CONNECTOR_NAME));
} |
public static String javaCharArray(String str){
StringBuilder result = new StringBuilder();
for (char c : str.toCharArray()) {
result.append(c);
}
return result.toString();
} | @Test
public void whenUseCharArrayMethod_thenIterate() {
String input = "Hello, Baeldung!";
String expectedOutput = "Hello, Baeldung!";
String result = StringIterator.javaCharArray(input);
assertEquals(expectedOutput, result);
} |
@Override
public R transform(final K readOnlyKey, final GenericRow value) {
return delegate.transform(
readOnlyKey,
value,
context.orElseThrow(() -> new IllegalStateException("Not initialized"))
);
} | @Test
public void shouldReturnValueFromInnerTransformer() {
// When:
final String result = ksTransformer.transform(KEY, VALUE);
// Then:
assertThat(result, is(RESULT));
} |
@Override
public FileLock lock(long position, long size, boolean shared) throws IOException {
checkLockArguments(position, size, shared);
// lock is interruptible
boolean completed = false;
try {
begin();
completed = true;
return new FakeFileLock(this, position, size, shared);
} finally {
try {
end(completed);
} catch (ClosedByInterruptException e) {
throw new FileLockInterruptionException();
}
}
} | @Test
public void testAsynchronousClose() throws Exception {
RegularFile file = regularFile(10);
final FileChannel channel = channel(file, READ, WRITE);
file.writeLock().lock(); // ensure all operations on the channel will block
ExecutorService executor = Executors.newCachedThreadPool();
CountDownLatch latch = new CountDownLatch(BLOCKING_OP_COUNT);
List<Future<?>> futures = queueAllBlockingOperations(channel, executor, latch);
// wait for all the threads to have started running
latch.await();
// then ensure time for operations to start blocking
Uninterruptibles.sleepUninterruptibly(20, MILLISECONDS);
// close channel on this thread
channel.close();
// the blocking operations are running on different threads, so they all get
// AsynchronousCloseException
for (Future<?> future : futures) {
try {
future.get();
fail();
} catch (ExecutionException expected) {
assertWithMessage("blocking thread exception")
.that(expected.getCause())
.isInstanceOf(AsynchronousCloseException.class);
}
}
} |
@SuppressWarnings("unchecked")
public final <T> T getValue(final E key) {
return (T) cache.get(key).getValue();
} | @Test
void assertGetDefaultValue() {
TypedPropertiesFixture actual = new TypedPropertiesFixture(new Properties());
assertFalse((Boolean) actual.getValue(TypedPropertyKeyFixture.BOOLEAN_VALUE));
assertFalse((Boolean) actual.getValue(TypedPropertyKeyFixture.BOOLEAN_OBJECT_VALUE));
assertThat(actual.getValue(TypedPropertyKeyFixture.INT_VALUE), is(10));
assertThat(actual.getValue(TypedPropertyKeyFixture.INT_OBJECT_VALUE), is(10));
assertThat(actual.getValue(TypedPropertyKeyFixture.LONG_VALUE), is(1000L));
assertThat(actual.getValue(TypedPropertyKeyFixture.LONG_OBJECT_VALUE), is(1000L));
assertThat(actual.getValue(TypedPropertyKeyFixture.STRING_VALUE), is("value"));
} |
public static DataflowRunner fromOptions(PipelineOptions options) {
DataflowPipelineOptions dataflowOptions =
PipelineOptionsValidator.validate(DataflowPipelineOptions.class, options);
ArrayList<String> missing = new ArrayList<>();
if (dataflowOptions.getAppName() == null) {
missing.add("appName");
}
if (Strings.isNullOrEmpty(dataflowOptions.getRegion())
&& isServiceEndpoint(dataflowOptions.getDataflowEndpoint())) {
missing.add("region");
}
if (missing.size() > 0) {
throw new IllegalArgumentException(
"Missing required pipeline options: " + Joiner.on(',').join(missing));
}
validateWorkerSettings(
PipelineOptionsValidator.validate(DataflowPipelineWorkerPoolOptions.class, options));
PathValidator validator = dataflowOptions.getPathValidator();
String gcpTempLocation;
try {
gcpTempLocation = dataflowOptions.getGcpTempLocation();
} catch (Exception e) {
throw new IllegalArgumentException(
"DataflowRunner requires gcpTempLocation, "
+ "but failed to retrieve a value from PipelineOptions",
e);
}
validator.validateOutputFilePrefixSupported(gcpTempLocation);
String stagingLocation;
try {
stagingLocation = dataflowOptions.getStagingLocation();
} catch (Exception e) {
throw new IllegalArgumentException(
"DataflowRunner requires stagingLocation, "
+ "but failed to retrieve a value from PipelineOptions",
e);
}
validator.validateOutputFilePrefixSupported(stagingLocation);
if (!isNullOrEmpty(dataflowOptions.getSaveProfilesToGcs())) {
validator.validateOutputFilePrefixSupported(dataflowOptions.getSaveProfilesToGcs());
}
if (dataflowOptions.getFilesToStage() != null) {
// The user specifically requested these files, so fail now if they do not exist.
// (automatically detected classpath elements are permitted to not exist, so later
// staging will not fail on nonexistent files)
dataflowOptions.getFilesToStage().stream()
.forEach(
stagedFileSpec -> {
File localFile;
if (stagedFileSpec.contains("=")) {
String[] components = stagedFileSpec.split("=", 2);
localFile = new File(components[1]);
} else {
localFile = new File(stagedFileSpec);
}
if (!localFile.exists()) {
// should be FileNotFoundException, but for build-time backwards compatibility
// cannot add checked exception
throw new RuntimeException(
String.format("Non-existent files specified in filesToStage: %s", localFile));
}
});
} else {
dataflowOptions.setFilesToStage(
detectClassPathResourcesToStage(DataflowRunner.class.getClassLoader(), options));
if (dataflowOptions.getFilesToStage().isEmpty()) {
throw new IllegalArgumentException("No files to stage has been found.");
} else {
LOG.info(
"PipelineOptions.filesToStage was not specified. "
+ "Defaulting to files from the classpath: will stage {} files. "
+ "Enable logging at DEBUG level to see which files will be staged.",
dataflowOptions.getFilesToStage().size());
LOG.debug("Classpath elements: {}", dataflowOptions.getFilesToStage());
}
}
// Verify jobName according to service requirements, truncating converting to lowercase if
// necessary.
String jobName = dataflowOptions.getJobName().toLowerCase();
checkArgument(
jobName.matches("[a-z]([-a-z0-9]*[a-z0-9])?"),
"JobName invalid; the name must consist of only the characters "
+ "[-a-z0-9], starting with a letter and ending with a letter "
+ "or number");
if (!jobName.equals(dataflowOptions.getJobName())) {
LOG.info(
"PipelineOptions.jobName did not match the service requirements. "
+ "Using {} instead of {}.",
jobName,
dataflowOptions.getJobName());
}
dataflowOptions.setJobName(jobName);
// Verify project
String project = dataflowOptions.getProject();
if (project.matches("[0-9]*")) {
throw new IllegalArgumentException(
"Project ID '"
+ project
+ "' invalid. Please make sure you specified the Project ID, not project number.");
} else if (!project.matches(PROJECT_ID_REGEXP)) {
throw new IllegalArgumentException(
"Project ID '"
+ project
+ "' invalid. Please make sure you specified the Project ID, not project"
+ " description.");
}
DataflowPipelineDebugOptions debugOptions =
dataflowOptions.as(DataflowPipelineDebugOptions.class);
// Verify the number of worker threads is a valid value
if (debugOptions.getNumberOfWorkerHarnessThreads() < 0) {
throw new IllegalArgumentException(
"Number of worker harness threads '"
+ debugOptions.getNumberOfWorkerHarnessThreads()
+ "' invalid. Please make sure the value is non-negative.");
}
// Verify that if recordJfrOnGcThrashing is set, the pipeline is at least on java 11
if (dataflowOptions.getRecordJfrOnGcThrashing()
&& Environments.getJavaVersion() == Environments.JavaVersion.java8) {
throw new IllegalArgumentException(
"recordJfrOnGcThrashing is only supported on java 9 and up.");
}
if (dataflowOptions.isStreaming() && dataflowOptions.getGcsUploadBufferSizeBytes() == null) {
dataflowOptions.setGcsUploadBufferSizeBytes(GCS_UPLOAD_BUFFER_SIZE_BYTES_DEFAULT);
}
// Adding the Java version to the SDK name for user's and support convenience.
String agentJavaVer = "(JRE 8 environment)";
if (Environments.getJavaVersion() != Environments.JavaVersion.java8) {
agentJavaVer =
String.format("(JRE %s environment)", Environments.getJavaVersion().specification());
}
DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo();
String userAgentName = dataflowRunnerInfo.getName();
Preconditions.checkArgument(
!userAgentName.equals(""), "Dataflow runner's `name` property cannot be empty.");
String userAgentVersion = dataflowRunnerInfo.getVersion();
Preconditions.checkArgument(
!userAgentVersion.equals(""), "Dataflow runner's `version` property cannot be empty.");
String userAgent =
String.format("%s/%s%s", userAgentName, userAgentVersion, agentJavaVer).replace(" ", "_");
dataflowOptions.setUserAgent(userAgent);
return new DataflowRunner(dataflowOptions);
} | @Test
public void testProjectId() throws IOException {
DataflowPipelineOptions options = buildPipelineOptions();
options.setProject("foo-12345");
DataflowRunner.fromOptions(options);
} |
public static boolean containsSystemSchema(final DatabaseType databaseType, final Collection<String> schemaNames, final ShardingSphereDatabase database) {
DialectDatabaseMetaData dialectDatabaseMetaData = new DatabaseTypeRegistry(databaseType).getDialectDatabaseMetaData();
if (database.isComplete() && !dialectDatabaseMetaData.getDefaultSchema().isPresent()) {
return false;
}
SystemDatabase systemDatabase = new SystemDatabase(databaseType);
for (String each : schemaNames) {
if (systemDatabase.getSystemSchemas().contains(each)) {
return true;
}
}
return !dialectDatabaseMetaData.getDefaultSchema().isPresent() && systemDatabase.getSystemSchemas().contains(database.getName());
} | @Test
void assertContainsSystemSchemaForOpenGaussSQL() {
DatabaseType databaseType = TypedSPILoader.getService(DatabaseType.class, "openGauss");
ShardingSphereDatabase informationSchemaDatabase = mockShardingSphereDatabase("information_schema", false);
assertTrue(SystemSchemaUtils.containsSystemSchema(databaseType, Arrays.asList("information_schema", "pg_catalog"), informationSchemaDatabase));
ShardingSphereDatabase shardingSchemaDatabase = mockShardingSphereDatabase("sharding_db", false);
assertFalse(SystemSchemaUtils.containsSystemSchema(databaseType, Collections.singletonList("sharding_db"), shardingSchemaDatabase));
ShardingSphereDatabase customizedInformationSchemaDatabase = mockShardingSphereDatabase("information_schema", true);
assertTrue(SystemSchemaUtils.containsSystemSchema(databaseType, Arrays.asList("information_schema", "pg_catalog"), customizedInformationSchemaDatabase));
ShardingSphereDatabase customizedGaussDBDatabase = mockShardingSphereDatabase("gaussdb", true);
assertFalse(SystemSchemaUtils.containsSystemSchema(databaseType, Collections.emptyList(), customizedGaussDBDatabase));
} |
@Override
public List<ServiceDTO> getServiceInstances(String serviceId) {
String configName = SERVICE_ID_TO_CONFIG_NAME.get(serviceId);
if (configName == null) {
return Collections.emptyList();
}
return assembleServiceDTO(serviceId, bizConfig.getValue(configName));
} | @Test
public void testGetServiceInstancesWithInvalidServiceId() {
String someInvalidServiceId = "someInvalidServiceId";
assertTrue(kubernetesDiscoveryService.getServiceInstances(someInvalidServiceId).isEmpty());
} |
void moveHeadIndexByOne() {
this.headIndex = (headIndex + 1) % windowSize;
} | @Test
public void testMoveHeadIndexByOne() {
FixedSizeSlidingWindowMetrics metrics = new FixedSizeSlidingWindowMetrics(3);
assertThat(metrics.headIndex).isZero();
metrics.moveHeadIndexByOne();
assertThat(metrics.headIndex).isEqualTo(1);
metrics.moveHeadIndexByOne();
assertThat(metrics.headIndex).isEqualTo(2);
metrics.moveHeadIndexByOne();
assertThat(metrics.headIndex).isZero();
metrics.moveHeadIndexByOne();
assertThat(metrics.headIndex).isEqualTo(1);
} |
public static String formatTopology(String topology) {
String prefix = "";
StringBuilder builder = new StringBuilder();
boolean params = false;
char previous = ' ';
for (char c : topology.toCharArray()) {
switch (c) {
case '[':
case '{':
case '(':
builder.append(c);
prefix += TAB;
break;
case '?':
if(' ' == previous) {
builder.deleteCharAt(builder.length()-1);
builder.append(',').append(NEW).append(prefix);
}
params = true;
prefix += TAB;
builder.append('{').append(NEW).append(prefix);
break;
case '&':
builder.append(',').append(NEW).append(prefix);
break;
case ',':
if(params) {
prefix = prefix.substring(0, prefix.length() - TAB.length());
builder.append(NEW).append(prefix).append('}').append(c).append(NEW).append(prefix);
} else {
builder.append(c);
}
params = false;
break;
case ']':
case '}':
prefix = prefix.substring(0, prefix.length() - TAB.length());
builder.append(NEW).append(prefix);
builder.append(c);
break;
case ')':
prefix = prefix.substring(0, prefix.length() - TAB.length());
builder.append(c);
break;
case ' ':
if (',' != previous) {
builder.append(c);
}
break;
default:
if (',' == previous) {
builder.append(NEW).append(prefix);
}
builder.append(c);
}
if (c != ' ' || previous != ',') {
previous = c;
}
}
return builder.toString().trim();
} | @Test
public void testFormatTopology() {
String topology = "topology on Topology@2ec5aa9[owner=ClusterConnectionImpl@1518657274[nodeUUID=b7a794ee-b5af-11ec-ae2f-3ce1a1c35439, connector=TransportConfiguration(name=netty, factory=org-apache-activemq-artemis-core-remoting-impl-netty-NettyConnectorFactory) ?port=5445&useNio=true&"
+ "host=localhost&useNioGlobalWorkerPool=true, address=jms, server=ActiveMQServerImpl::name=default]]:"
+ "b7a794ee-b5af-11ec-ae2f-3ce1a1c35439 => TopologyMember[id=b7a794ee-b5af-11ec-ae2f-3ce1a1c35439, connector=Pair[a=TransportConfiguration(name=netty, factory=org-apache-activemq-artemis-core-remoting-impl-netty-NettyConnectorFactory) ?port=5445&useNio=true&host=localhost&useNioGlobalW"
+ "orkerPool=true, b=null], backupGroupName=group1, scaleDownGroupName=null]"
+ "nodes=1 members=1";
String expResult = "topology on Topology@2ec5aa9[owner=ClusterConnectionImpl@1518657274[nodeUUID=b7a794ee-b5af-11ec-ae2f-3ce1a1c35439," + System.lineSeparator()
+ "\t\tconnector=TransportConfiguration(name=netty," + System.lineSeparator()
+ "\t\t\tfactory=org-apache-activemq-artemis-core-remoting-impl-netty-NettyConnectorFactory)," + System.lineSeparator()
+ "\t\t{" + System.lineSeparator()
+ "\t\t\tport=5445," + System.lineSeparator()
+ "\t\t\tuseNio=true," + System.lineSeparator()
+ "\t\t\thost=localhost," + System.lineSeparator()
+ "\t\t\tuseNioGlobalWorkerPool=true" + System.lineSeparator()
+ "\t\t}," + System.lineSeparator()
+ "\t\t" + System.lineSeparator()
+ "\t\taddress=jms," + System.lineSeparator()
+ "\t\tserver=ActiveMQServerImpl::name=default" + System.lineSeparator()
+ "\t]" + System.lineSeparator()
+ "]:b7a794ee-b5af-11ec-ae2f-3ce1a1c35439 => TopologyMember[id=b7a794ee-b5af-11ec-ae2f-3ce1a1c35439," + System.lineSeparator()
+ "\tconnector=Pair[a=TransportConfiguration(name=netty," + System.lineSeparator()
+ "\t\t\tfactory=org-apache-activemq-artemis-core-remoting-impl-netty-NettyConnectorFactory)," + System.lineSeparator()
+ "\t\t{" + System.lineSeparator()
+ "\t\t\tport=5445," + System.lineSeparator()
+ "\t\t\tuseNio=true," + System.lineSeparator()
+ "\t\t\thost=localhost," + System.lineSeparator()
+ "\t\t\tuseNioGlobalWorkerPool=true" + System.lineSeparator()
+ "\t\t}," + System.lineSeparator()
+ "\t\t" + System.lineSeparator()
+ "\t\tb=null" + System.lineSeparator()
+ "\t]," + System.lineSeparator()
+ "\tbackupGroupName=group1," + System.lineSeparator()
+ "\tscaleDownGroupName=null" + System.lineSeparator()
+ "]nodes=1 members=1";
String result = ClusterConnectionControlHandler.formatTopology(topology);
assertEquals(expResult, result);
} |
public static Checksum newInstance(final String className)
{
Objects.requireNonNull(className, "className is required!");
if (Crc32.class.getName().equals(className))
{
return crc32();
}
else if (Crc32c.class.getName().equals(className))
{
return crc32c();
}
else
{
try
{
final Class<?> klass = Class.forName(className);
final Object instance = klass.getDeclaredConstructor().newInstance();
return (Checksum)instance;
}
catch (final ReflectiveOperationException ex)
{
throw new IllegalArgumentException("failed to create Checksum instance for class: " + className, ex);
}
}
} | @Test
void newInstanceThrowsClassCastExceptionIfCreatedInstanceDoesNotImplementChecksumInterface()
{
assertThrows(ClassCastException.class, () -> Checksums.newInstance(Object.class.getName()));
} |
public static Long convertTimestampToMillis(String timeStr) {
long millis = 0L;
if (timeStr != null) {
try {
millis = Instant.parse(timeStr).toEpochMilli();
} catch (Exception e) {
// rethrowing with more contextual information
throw new IllegalArgumentException("Invalid time spec '" + timeStr + "' (Valid example: "
+ "'2022-08-09T12:31:38.222Z')", e);
}
}
return millis;
} | @Test
public void testConvertDateTimeToMillis() {
assertEquals((long) TimeUtils.convertTimestampToMillis("2022-08-09T12:31:38.222Z"), 1660048298222L);
Assert.assertThrows(IllegalArgumentException.class, () -> TimeUtils
.convertTimestampToMillis("2022-08-09X12:31:38.222Z"));
assertEquals((long) TimeUtils.convertTimestampToMillis(null), 0L);
} |
@Bean
public TimerRegistry timerRegistry(
TimerConfigurationProperties timerConfigurationProperties,
EventConsumerRegistry<TimerEvent> timerEventConsumerRegistry,
RegistryEventConsumer<Timer> timerRegistryEventConsumer,
@Qualifier("compositeTimerCustomizer") CompositeCustomizer<TimerConfigCustomizer> compositeTimerCustomizer,
@Autowired(required = false) MeterRegistry registry
) {
TimerRegistry timerRegistry = createTimerRegistry(timerConfigurationProperties, timerRegistryEventConsumer, compositeTimerCustomizer, registry);
registerEventConsumer(timerRegistry, timerEventConsumerRegistry, timerConfigurationProperties);
initTimerRegistry(timerRegistry, timerConfigurationProperties, compositeTimerCustomizer);
return timerRegistry;
} | @Test
public void shouldConfigureInstancesUsingCustomSharedConfig() {
InstanceProperties sharedProperties = new InstanceProperties()
.setMetricNames("resilience4j.timer.shared")
.setOnFailureTagResolver(FixedOnFailureTagResolver.class);
InstanceProperties instanceProperties1 = new InstanceProperties()
.setMetricNames("resilience4j.timer.operations1")
.setBaseConfig("shared");
InstanceProperties instanceProperties2 = new InstanceProperties()
.setOnFailureTagResolver(QualifiedClassNameOnFailureTagResolver.class)
.setBaseConfig("shared");
TimerConfigurationProperties configurationProperties = new TimerConfigurationProperties();
configurationProperties.getConfigs().put("shared", sharedProperties);
configurationProperties.getInstances().put("backend1", instanceProperties1);
configurationProperties.getInstances().put("backend2", instanceProperties2);
TimerConfiguration configuration = new TimerConfiguration();
TimerRegistry registry = configuration.timerRegistry(
configurationProperties, new DefaultEventConsumerRegistry<>(), new CompositeRegistryEventConsumer<>(emptyList()), new CompositeCustomizer<>(emptyList()), new SimpleMeterRegistry()
);
assertThat(registry.getAllTimers().count()).isEqualTo(2);
Timer timer1 = registry.timer("backend1");
assertThat(timer1).isNotNull();
assertThat(timer1.getTimerConfig().getMetricNames()).isEqualTo("resilience4j.timer.operations1");
assertThat(timer1.getTimerConfig().getOnFailureTagResolver()).isInstanceOf(FixedOnFailureTagResolver.class);
Timer timer2 = registry.timer("backend2");
assertThat(timer2).isNotNull();
assertThat(timer2.getTimerConfig().getMetricNames()).isEqualTo("resilience4j.timer.shared");
assertThat(timer2.getTimerConfig().getOnFailureTagResolver()).isInstanceOf(QualifiedClassNameOnFailureTagResolver.class);
} |
public static SelectExpression parseSelectExpression(final String expressionText) {
final SqlBaseParser.SelectItemContext parseCtx = GrammarParseUtil.getParseTree(
expressionText,
SqlBaseParser::selectItem
);
if (!(parseCtx instanceof SqlBaseParser.SelectSingleContext)) {
throw new IllegalArgumentException("Illegal select item type in: " + expressionText);
}
final SqlBaseParser.SelectSingleContext selectSingleContext =
(SqlBaseParser.SelectSingleContext) parseCtx;
if (selectSingleContext.identifier() == null) {
throw new IllegalArgumentException("Select item must have identifier in: " + expressionText);
}
return SelectExpression.of(
ColumnName.of(ParserUtil.getIdentifierText(selectSingleContext.identifier())),
new AstBuilder(TypeRegistry.EMPTY).buildExpression(selectSingleContext.expression())
);
} | @Test
public void shouldParseSelectExpression() {
// When:
final SelectExpression parsed =
ExpressionParser.parseSelectExpression("1 + 2 AS `three`");
// Then:
assertThat(
parsed,
equalTo(
SelectExpression.of(
ColumnName.of("three"),
new ArithmeticBinaryExpression(
parsed.getExpression().getLocation(), Operator.ADD, ONE, TWO
)
)
)
);
} |
@Override
public List<String> detect(ClassLoader classLoader) {
List<File> classpathContents =
classGraph
.disableNestedJarScanning()
.addClassLoader(classLoader)
.scan(1)
.getClasspathFiles();
return classpathContents.stream().map(File::getAbsolutePath).collect(Collectors.toList());
} | @Test
public void shouldDetectClassPathResourceFromJavaClassPathEnvVariable() throws IOException {
String path = tmpFolder.newFolder("folder").getCanonicalPath();
System.setProperty("java.class.path", path);
ClasspathScanningResourcesDetector detector =
new ClasspathScanningResourcesDetector(new ClassGraph());
List<String> resources = detector.detect(null);
assertThat(resources, hasItems(containsString(path)));
} |
@Override
public MaterializedTable nonWindowed() {
return new KsqlMaterializedTable(inner.nonWindowed());
} | @Test
public void shouldPipeTransforms_fullTableScan() {
// Given:
final MaterializedTable table = materialization.nonWindowed();
givenNoopProject();
when(filter.apply(any(), any(), any())).thenReturn(Optional.of(transformed));
// When:
final Iterator<Row> result =
table.get(partition);
result.next();
result.next();
// Then:
verify(project).apply(aKey, transformed, new PullProcessingContext(aRowtime));
verify(project).apply(aKey2, transformed, new PullProcessingContext(aRowtime));
} |
public static String longToIpv4(long longIP) {
return Ipv4Util.longToIpv4(longIP);
} | @Test
public void longToIpTest() {
final String ipv4 = NetUtil.longToIpv4(2130706433L);
assertEquals("127.0.0.1", ipv4);
} |
public static CDCResponse succeed(final String requestId) {
return succeed(requestId, ResponseCase.RESPONSE_NOT_SET, null);
} | @Test
void assertSucceedWhenResponseCaseServerGreetingResult() {
Message msg = ServerGreetingResult.newBuilder().build();
CDCResponse actualResponse = CDCResponseUtils.succeed("request_id_1", CDCResponse.ResponseCase.SERVER_GREETING_RESULT, msg);
assertThat(actualResponse.getStatus(), is(CDCResponse.Status.SUCCEED));
assertThat(actualResponse.getRequestId(), is("request_id_1"));
assertNotNull(actualResponse.getServerGreetingResult());
} |
IdBatchAndWaitTime newIdBaseLocal(int batchSize) {
return newIdBaseLocal(Clock.currentTimeMillis(), getNodeId(), batchSize);
} | @Test
public void when_epochStart_then_used() {
int epochStart = 456;
int timeSinceEpochStart = 1;
initialize(new FlakeIdGeneratorConfig().setEpochStart(epochStart));
long id = gen.newIdBaseLocal(epochStart + timeSinceEpochStart, 1234, 10).idBatch.base();
assertEquals((timeSinceEpochStart << DEFAULT_BITS_SEQUENCE + DEFAULT_BITS_NODE_ID) + 1234, id);
} |
@Override
public List<AdminUserDO> getUserListByDeptIds(Collection<Long> deptIds) {
if (CollUtil.isEmpty(deptIds)) {
return Collections.emptyList();
}
return userMapper.selectListByDeptIds(deptIds);
} | @Test
public void testGetUserListByDeptIds() {
// mock 数据
AdminUserDO dbUser = randomAdminUserDO(o -> o.setDeptId(1L));
userMapper.insert(dbUser);
// 测试 deptId 不匹配
userMapper.insert(cloneIgnoreId(dbUser, o -> o.setDeptId(2L)));
// 准备参数
Collection<Long> deptIds = singleton(1L);
// 调用
List<AdminUserDO> list = userService.getUserListByDeptIds(deptIds);
// 断言
assertEquals(1, list.size());
assertEquals(dbUser, list.get(0));
} |
void placeOrder(Order order) {
sendShippingRequest(order);
} | @Test
void testPlaceOrderWithoutDatabase() throws Exception {
long paymentTime = timeLimits.paymentTime();
long queueTaskTime = timeLimits.queueTaskTime();
long messageTime = timeLimits.messageTime();
long employeeTime = timeLimits.employeeTime();
long queueTime = timeLimits.queueTime();
for (double d = 0.1; d < 2; d = d + 0.1) {
paymentTime *= d;
queueTaskTime *= d;
messageTime *= d;
employeeTime *= d;
queueTime *= d;
Commander c = buildCommanderObjectWithoutDB();
var order = new Order(new User("K", null), "pen", 1f);
for (Order.MessageSent ms : Order.MessageSent.values()) {
c.placeOrder(order);
assertFalse(StringUtils.isBlank(order.id));
}
}
} |
public static PTransformMatcher flattenWithDuplicateInputs() {
return new PTransformMatcher() {
@Override
public boolean matches(AppliedPTransform<?, ?, ?> application) {
if (application.getTransform() instanceof Flatten.PCollections) {
Set<PValue> observed = new HashSet<>();
for (PValue pvalue : application.getInputs().values()) {
boolean firstInstance = observed.add(pvalue);
if (!firstInstance) {
return true;
}
}
}
return false;
}
@Override
public String toString() {
return MoreObjects.toStringHelper("FlattenWithDuplicateInputsMatcher").toString();
}
};
} | @Test
public void flattenWithDuplicateInputsWithDuplicates() {
PCollection<Integer> duplicate =
PCollection.createPrimitiveOutputInternal(
p, WindowingStrategy.globalDefault(), IsBounded.BOUNDED, VarIntCoder.of());
AppliedPTransform application =
AppliedPTransform.of(
"Flatten",
ImmutableMap.<TupleTag<?>, PCollection<?>>builder()
.put(new TupleTag<Integer>(), duplicate)
.put(new TupleTag<Integer>(), duplicate)
.build(),
Collections.singletonMap(
new TupleTag<Integer>(),
PCollection.createPrimitiveOutputInternal(
p, WindowingStrategy.globalDefault(), IsBounded.BOUNDED, VarIntCoder.of())),
Flatten.pCollections(),
ResourceHints.create(),
p);
assertThat(PTransformMatchers.flattenWithDuplicateInputs().matches(application), is(true));
} |
@Override
public String toString() {
return String.format(
"IcebergStagedScan(table=%s, type=%s, taskSetID=%s, caseSensitive=%s)",
table(), expectedSchema().asStruct(), taskSetId, caseSensitive());
} | @Test
public void testTaskSetPlanning() throws NoSuchTableException, IOException {
sql("CREATE TABLE %s (id INT, data STRING) USING iceberg", tableName);
List<SimpleRecord> records =
ImmutableList.of(new SimpleRecord(1, "a"), new SimpleRecord(2, "b"));
Dataset<Row> df = spark.createDataFrame(records, SimpleRecord.class);
df.coalesce(1).writeTo(tableName).append();
df.coalesce(1).writeTo(tableName).append();
Table table = validationCatalog.loadTable(tableIdent);
Assert.assertEquals("Should produce 2 snapshots", 2, Iterables.size(table.snapshots()));
try (CloseableIterable<FileScanTask> fileScanTasks = table.newScan().planFiles()) {
ScanTaskSetManager taskSetManager = ScanTaskSetManager.get();
String setID = UUID.randomUUID().toString();
List<FileScanTask> tasks = ImmutableList.copyOf(fileScanTasks);
taskSetManager.stageTasks(table, setID, tasks);
// load the staged file set and make sure each file is in a separate split
Dataset<Row> scanDF =
spark
.read()
.format("iceberg")
.option(SparkReadOptions.SCAN_TASK_SET_ID, setID)
.option(SparkReadOptions.SPLIT_SIZE, tasks.get(0).file().fileSizeInBytes())
.load(tableName);
Assert.assertEquals("Num partitions should match", 2, scanDF.javaRDD().getNumPartitions());
// load the staged file set and make sure we combine both files into a single split
scanDF =
spark
.read()
.format("iceberg")
.option(SparkReadOptions.SCAN_TASK_SET_ID, setID)
.option(SparkReadOptions.SPLIT_SIZE, Long.MAX_VALUE)
.load(tableName);
Assert.assertEquals("Num partitions should match", 1, scanDF.javaRDD().getNumPartitions());
}
} |
public boolean shouldRecord() {
return this.recordingLevel.shouldRecord(config.recordLevel().id);
} | @Test
public void testShouldRecordForInfoLevelSensor() {
Sensor infoSensor = new Sensor(null, "infoSensor", null, INFO_CONFIG, Time.SYSTEM,
0, Sensor.RecordingLevel.INFO);
assertTrue(infoSensor.shouldRecord());
infoSensor = new Sensor(null, "infoSensor", null, DEBUG_CONFIG, Time.SYSTEM,
0, Sensor.RecordingLevel.INFO);
assertTrue(infoSensor.shouldRecord());
infoSensor = new Sensor(null, "infoSensor", null, TRACE_CONFIG, Time.SYSTEM,
0, Sensor.RecordingLevel.INFO);
assertTrue(infoSensor.shouldRecord());
} |
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
return inject(statement, new TopicProperties.Builder());
} | @Test
public void shouldDoNothingForNonCAS() {
// Given:
final ConfiguredStatement<?> statement = givenStatement("LIST PROPERTIES;");
// When:
final ConfiguredStatement<?> result = injector.inject(statement);
// Then:
assertThat(result, is(sameInstance(statement)));
} |
@Override
public Optional<DateTime> calculateNextTime(DateTime previousExecutionTime, DateTime lastNextTime, JobSchedulerClock clock) {
final Cron cron = CronUtils.getParser().parse(cronExpression());
final ExecutionTime executionTime = ExecutionTime.forCron(cron);
ZonedDateTime zdt = getZonedDateTime(lastNextTime == null ? clock.nowUTC() : lastNextTime);
return executionTime
.nextExecution(zdt)
.map(this::toDateTime);
} | @Test
void testCalculateNextTime() {
final long midnight01Jan2020Millis = 1577836800000L;
final DateTime midnight01Jan2020 = new DateTime(midnight01Jan2020Millis, DateTimeZone.UTC);
final JobSchedulerTestClock clock = new JobSchedulerTestClock(DateTime.now(DateTimeZone.UTC));
// Every hour between 0800 and 1700.
CronJobSchedule cronJobSchedule = CronJobSchedule.builder().cronExpression("0 0 8-17 1/1 * ? *").build();
Optional<DateTime> next = cronJobSchedule.calculateNextTime(midnight01Jan2020.plusSeconds(30), midnight01Jan2020, clock);
assertThat(next).isPresent();
assertThat(next.get().getMillis()).isEqualTo(midnight01Jan2020Millis + (8 * 3600000));
// 01 Jan 2020 is a Wednesday. Skip Wednesday and next execution should be 24 hours later.
cronJobSchedule = CronJobSchedule.builder().cronExpression("0 0 * ? * MON,TUE,THU,FRI *").build();
next = cronJobSchedule.calculateNextTime(midnight01Jan2020.plusSeconds(30), midnight01Jan2020, clock);
assertThat(next).isPresent();
assertThat(next.get().getMillis()).isEqualTo(midnight01Jan2020Millis + (24 * 3600000));
} |
public String toString() {
if (queryGuid == null) {
return String.format("%s: %s", message, query);
}
return String.format("%s (%s): %s", message, queryGuid.getQueryGuid(), query);
} | @Test
public void toStringShouldReturnCorrectQueryLoggerMessageString() {
// without query guid
QueryLoggerMessage message = new QueryLoggerMessage(TEST_MESSAGE, TEST_QUERY);
assertEquals("test message: describe stream1;", message.toString());
// with query guid
message = new QueryLoggerMessage(TEST_MESSAGE, TEST_QUERY, GUID);
assertEquals(String.format("test message (%s): describe stream1;", GUID.getQueryGuid()), message.toString());
} |
public MetricGroup group(String groupName, String... tagKeyValues) {
MetricGroupId groupId = groupId(groupName, tagKeyValues);
MetricGroup group = groupsByName.get(groupId);
if (group == null) {
group = new MetricGroup(groupId);
MetricGroup previous = groupsByName.putIfAbsent(groupId, group);
if (previous != null)
group = previous;
}
return group;
} | @Test
public void testGettingGroupWithTags() {
MetricGroup group1 = metrics.group("name", "k1", "v1", "k2", "v2");
assertEquals("v1", group1.tags().get("k1"));
assertEquals("v2", group1.tags().get("k2"));
assertEquals(2, group1.tags().size());
} |
@Override
@CheckReturnValue
public boolean offer(Entry<? extends Data, ? extends Data> entry) {
int partitionId = partitionService.getPartitionId(entry.getKey());
int length = entry.getKey().totalSize() + entry.getValue().totalSize() - 2 * HeapData.TYPE_OFFSET;
// if the entry is larger than usableChunkSize, send it in its own chunk. We avoid adding it to the
// ByteArrayOutputStream since it would expand it beyond its maximum capacity.
if (length > usableChunkCapacity) {
return putAsyncToMap(partitionId, () -> {
byte[] data = new byte[serializedByteArrayHeader.length + length + valueTerminator.length];
totalKeys++;
int offset = 0;
System.arraycopy(serializedByteArrayHeader, 0, data, offset, serializedByteArrayHeader.length);
offset += serializedByteArrayHeader.length - Bits.INT_SIZE_IN_BYTES;
Bits.writeInt(data, offset, length + valueTerminator.length, useBigEndian);
offset += Bits.INT_SIZE_IN_BYTES;
copyWithoutHeader(entry.getKey(), data, offset);
offset += entry.getKey().totalSize() - HeapData.TYPE_OFFSET;
copyWithoutHeader(entry.getValue(), data, offset);
offset += entry.getValue().totalSize() - HeapData.TYPE_OFFSET;
System.arraycopy(valueTerminator, 0, data, offset, valueTerminator.length);
return new HeapData(data);
});
}
// if the buffer after adding this entry and terminator would exceed the capacity limit, flush it first
CustomByteArrayOutputStream buffer = buffers[partitionId];
if (buffer.size() + length + valueTerminator.length > buffer.capacityLimit && !flushPartition(partitionId)) {
return false;
}
// append to buffer
writeWithoutHeader(entry.getKey(), buffer);
writeWithoutHeader(entry.getValue(), buffer);
totalKeys++;
return true;
} | @Test
public void when_cannotAutoFlush_then_offerReturnsFalse() {
// When
// artificially increase number of async ops so that the writer cannot proceed
writer.numConcurrentAsyncOps.set(JetServiceBackend.MAX_PARALLEL_ASYNC_OPS);
Entry<Data, Data> entry = entry(serialize("k"), serialize("v"));
int entriesInChunk =
(writer.usableChunkCapacity - writer.serializedByteArrayHeader.length) / serializedLength(entry);
assertTrue("entriesInChunk=" + entriesInChunk, entriesInChunk > 1 && entriesInChunk < 10);
for (int i = 0; i < entriesInChunk; i++) {
assertTrue(writer.offer(entry));
}
// Then
assertFalse("offer should not have succeeded, too many parallel operations", writer.offer(entry));
writer.numConcurrentAsyncOps.set(0);
assertTrue("offer should have succeeded", writer.offer(entry));
assertTargetMapEntry("k", 0, serializedLength(entry) * entriesInChunk);
} |
public String getModule() {
return module;
} | @Test
public void testGetModule() {
shenyuRequestLog.setModule("test");
Assertions.assertEquals(shenyuRequestLog.getModule(), "test");
} |
@BuildStep
HealthBuildItem addHealthCheck(Capabilities capabilities, JobRunrBuildTimeConfiguration jobRunrBuildTimeConfiguration) {
if (capabilities.isPresent(Capability.SMALLRYE_HEALTH)) {
return new HealthBuildItem(JobRunrHealthCheck.class.getName(), jobRunrBuildTimeConfiguration.healthEnabled());
}
return null;
} | @Test
void addHealthCheckAddsHealthBuildItemIfSmallRyeHealthCapabilityIsPresent() {
lenient().when(capabilities.isPresent(Capability.SMALLRYE_HEALTH)).thenReturn(true);
final HealthBuildItem healthBuildItem = jobRunrExtensionProcessor.addHealthCheck(capabilities, jobRunrBuildTimeConfiguration);
assertThat(healthBuildItem.getHealthCheckClass())
.isEqualTo(JobRunrHealthCheck.class.getName());
} |
@Override
public Acl getPermission(final Path file) throws BackgroundException {
try {
if(file.getType().contains(Path.Type.upload)) {
// Incomplete multipart upload has no ACL set
return Acl.EMPTY;
}
final Path bucket = containerService.getContainer(file);
final Acl acl;
if(containerService.isContainer(file)) {
// This method can be performed by anonymous services, but can only succeed if the
// bucket's existing ACL already allows write access by the anonymous user.
// In general, you can only access the ACL of a bucket if the ACL already in place
// for that bucket (in S3) allows you to do so.
acl = this.toAcl(session.getClient().getBucketAcl(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName()));
}
else {
acl = this.toAcl(session.getClient().getVersionedObjectAcl(file.attributes().getVersionId(),
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file)));
}
if(this.isBucketOwnerEnforced(bucket)) {
acl.setEditable(false);
}
return acl;
}
catch(ServiceException e) {
final BackgroundException failure = new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file);
if(file.isDirectory()) {
if(failure instanceof NotfoundException) {
// No placeholder file may exist, but we just have a common prefix
return Acl.EMPTY;
}
}
if(failure instanceof InteroperabilityException) {
// The specified method is not allowed against this resource. The case for delete markers in versioned buckets.
return Acl.EMPTY;
}
throw failure;
}
} | @Test
public void testReadKey() throws Exception {
final Path container = new Path("test-acl-us-east-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Acl acl = new S3AccessControlListFeature(session).getPermission(new Path(container, "test", EnumSet.of(Path.Type.file)));
assertTrue(acl.containsKey(new Acl.GroupUser(Acl.GroupUser.EVERYONE)));
assertTrue(acl.get(new Acl.GroupUser(Acl.GroupUser.EVERYONE)).contains(new Acl.Role(Acl.Role.READ)));
assertTrue(acl.containsKey(new Acl.Owner("80b9982b7b08045ee86680cc47f43c84bf439494a89ece22b5330f8a49477cf6")));
assertTrue(acl.get(new Acl.Owner("80b9982b7b08045ee86680cc47f43c84bf439494a89ece22b5330f8a49477cf6")).contains(new Acl.Role(Acl.Role.FULL)));
} |
@Override
public void write(int b) throws IOException
{
checkClosed();
if (chunkSize - currentBufferPointer <= 0)
{
expandBuffer();
}
currentBuffer.put((byte) b);
currentBufferPointer++;
pointer++;
if (pointer > size)
{
size = pointer;
}
} | @Test
void testPaging() throws IOException
{
try (RandomAccess randomAccessReadWrite = new RandomAccessReadWriteBuffer(5))
{
assertEquals(0, randomAccessReadWrite.length());
randomAccessReadWrite.write(new byte[] { 1, 2, 3, 4, 5, 6, 7 });
assertEquals(7, randomAccessReadWrite.length());
randomAccessReadWrite.write(new byte[] { 8, 9, 10, 11, });
assertEquals(11, randomAccessReadWrite.length());
}
catch (Throwable throwable)
{
fail("Unexpected exception " + throwable.getMessage());
}
} |
@Nullable
private static JobID getJobId(CommandLine commandLine) throws FlinkParseException {
String jobId = commandLine.getOptionValue(JOB_ID_OPTION.getOpt());
if (jobId == null) {
return null;
}
try {
return JobID.fromHexString(jobId);
} catch (IllegalArgumentException e) {
throw createFlinkParseException(JOB_ID_OPTION, e);
}
} | @Test
void testShortOptions() throws FlinkParseException {
final String jobClassName = JOB_CLASS_NAME;
final JobID jobId = new JobID();
final String savepointRestorePath = "s3://foo/bar";
final String jars = String.join(",", JOB_JARS);
final String[] args = {
"-c",
confDirPath,
"-j",
jobClassName,
"-jid",
jobId.toString(),
"-s",
savepointRestorePath,
"-jars",
jars,
"-n"
};
final StandaloneApplicationClusterConfiguration clusterConfiguration =
commandLineParser.parse(args);
assertThat(clusterConfiguration.getConfigDir()).isEqualTo(confDirPath);
assertThat(clusterConfiguration.getJobClassName()).isEqualTo(jobClassName);
assertThat(clusterConfiguration.getJobId()).isEqualTo(jobId);
assertThat(clusterConfiguration.getJars()).isEqualTo(JOB_JARS);
final SavepointRestoreSettings savepointRestoreSettings =
clusterConfiguration.getSavepointRestoreSettings();
assertThat(savepointRestoreSettings.restoreSavepoint()).isTrue();
assertThat(savepointRestoreSettings.getRestorePath()).isEqualTo(savepointRestorePath);
assertThat(savepointRestoreSettings.allowNonRestoredState()).isTrue();
} |
@SuppressWarnings("deprecation")
public boolean setSocketOpt(int option, Object optval)
{
final ValueReference<Boolean> result = new ValueReference<>(false);
switch (option) {
case ZMQ.ZMQ_SNDHWM:
sendHwm = (Integer) optval;
if (sendHwm < 0) {
throw new IllegalArgumentException("sendHwm " + optval);
}
return true;
case ZMQ.ZMQ_RCVHWM:
recvHwm = (Integer) optval;
if (recvHwm < 0) {
throw new IllegalArgumentException("recvHwm " + optval);
}
return true;
case ZMQ.ZMQ_AFFINITY:
affinity = (Long) optval;
return true;
case ZMQ.ZMQ_IDENTITY:
byte[] val = parseBytes(option, optval);
if (val == null || val.length > 255) {
throw new IllegalArgumentException("identity must not be null or less than 255 " + optval);
}
identity = Arrays.copyOf(val, val.length);
identitySize = (short) identity.length;
return true;
case ZMQ.ZMQ_RATE:
rate = (Integer) optval;
return true;
case ZMQ.ZMQ_RECOVERY_IVL:
recoveryIvl = (Integer) optval;
return true;
case ZMQ.ZMQ_SNDBUF:
sndbuf = (Integer) optval;
return true;
case ZMQ.ZMQ_RCVBUF:
rcvbuf = (Integer) optval;
return true;
case ZMQ.ZMQ_TOS:
tos = (Integer) optval;
return true;
case ZMQ.ZMQ_LINGER:
linger = (Integer) optval;
return true;
case ZMQ.ZMQ_RECONNECT_IVL:
reconnectIvl = (Integer) optval;
if (reconnectIvl < -1) {
throw new IllegalArgumentException("reconnectIvl " + optval);
}
return true;
case ZMQ.ZMQ_RECONNECT_IVL_MAX:
reconnectIvlMax = (Integer) optval;
if (reconnectIvlMax < 0) {
throw new IllegalArgumentException("reconnectIvlMax " + optval);
}
return true;
case ZMQ.ZMQ_BACKLOG:
backlog = (Integer) optval;
return true;
case ZMQ.ZMQ_MAXMSGSIZE:
maxMsgSize = (Long) optval;
return true;
case ZMQ.ZMQ_MULTICAST_HOPS:
multicastHops = (Integer) optval;
return true;
case ZMQ.ZMQ_RCVTIMEO:
recvTimeout = (Integer) optval;
return true;
case ZMQ.ZMQ_SNDTIMEO:
sendTimeout = (Integer) optval;
return true;
/* Deprecated in favor of ZMQ_IPV6 */
case ZMQ.ZMQ_IPV4ONLY:
return setSocketOpt(ZMQ.ZMQ_IPV6, !parseBoolean(option, optval));
/* To replace the somewhat surprising IPV4ONLY */
case ZMQ.ZMQ_IPV6:
ipv6 = parseBoolean(option, optval);
return true;
case ZMQ.ZMQ_SOCKS_PROXY:
socksProxyAddress = parseString(option, optval);
return true;
case ZMQ.ZMQ_TCP_KEEPALIVE:
tcpKeepAlive = ((Number) optval).intValue();
if (tcpKeepAlive != -1 && tcpKeepAlive != 0 && tcpKeepAlive != 1) {
throw new IllegalArgumentException("tcpKeepAlive only accepts one of -1,0,1 " + optval);
}
return true;
case ZMQ.ZMQ_TCP_KEEPALIVE_CNT:
this.tcpKeepAliveCnt = ((Number) optval).intValue();
return true;
case ZMQ.ZMQ_TCP_KEEPALIVE_IDLE:
this.tcpKeepAliveIdle = ((Number) optval).intValue();
return true;
case ZMQ.ZMQ_TCP_KEEPALIVE_INTVL:
this.tcpKeepAliveIntvl = ((Number) optval).intValue();
return true;
case ZMQ.ZMQ_IMMEDIATE:
immediate = parseBoolean(option, optval);
return true;
case ZMQ.ZMQ_DELAY_ATTACH_ON_CONNECT:
immediate = !parseBoolean(option, optval);
return true;
case ZMQ.ZMQ_TCP_ACCEPT_FILTER:
String filterStr = parseString(option, optval);
if (filterStr == null) {
tcpAcceptFilters.clear();
}
else if (filterStr.isEmpty() || filterStr.length() > 255) {
throw new IllegalArgumentException("tcp_accept_filter " + optval);
}
else {
TcpAddressMask filter = new TcpAddressMask(filterStr, ipv6);
tcpAcceptFilters.add(filter);
}
return true;
case ZMQ.ZMQ_PLAIN_SERVER:
asServer = parseBoolean(option, optval);
mechanism = (asServer ? Mechanisms.PLAIN : Mechanisms.NULL);
return true;
case ZMQ.ZMQ_PLAIN_USERNAME:
if (optval == null) {
mechanism = Mechanisms.NULL;
asServer = false;
return true;
}
plainUsername = parseString(option, optval);
asServer = false;
mechanism = Mechanisms.PLAIN;
return true;
case ZMQ.ZMQ_PLAIN_PASSWORD:
if (optval == null) {
mechanism = Mechanisms.NULL;
asServer = false;
return true;
}
plainPassword = parseString(option, optval);
asServer = false;
mechanism = Mechanisms.PLAIN;
return true;
case ZMQ.ZMQ_ZAP_DOMAIN:
String domain = parseString(option, optval);
if (domain != null && domain.length() < 256) {
zapDomain = domain;
return true;
}
throw new IllegalArgumentException("zap domain length shall be < 256 : " + optval);
case ZMQ.ZMQ_CURVE_SERVER:
asServer = parseBoolean(option, optval);
mechanism = (asServer ? Mechanisms.CURVE : Mechanisms.NULL);
return true;
case ZMQ.ZMQ_CURVE_PUBLICKEY:
curvePublicKey = setCurveKey(option, optval, result);
return result.get();
case ZMQ.ZMQ_CURVE_SECRETKEY:
curveSecretKey = setCurveKey(option, optval, result);
return result.get();
case ZMQ.ZMQ_CURVE_SERVERKEY:
curveServerKey = setCurveKey(option, optval, result);
if (curveServerKey == null) {
asServer = false;
}
return result.get();
case ZMQ.ZMQ_CONFLATE:
conflate = parseBoolean(option, optval);
return true;
case ZMQ.ZMQ_GSSAPI_SERVER:
asServer = parseBoolean(option, optval);
mechanism = Mechanisms.GSSAPI;
return true;
case ZMQ.ZMQ_GSSAPI_PRINCIPAL:
gssPrincipal = parseString(option, optval);
mechanism = Mechanisms.GSSAPI;
return true;
case ZMQ.ZMQ_GSSAPI_SERVICE_PRINCIPAL:
gssServicePrincipal = parseString(option, optval);
mechanism = Mechanisms.GSSAPI;
return true;
case ZMQ.ZMQ_GSSAPI_PLAINTEXT:
gssPlaintext = parseBoolean(option, optval);
return true;
case ZMQ.ZMQ_HANDSHAKE_IVL:
handshakeIvl = (Integer) optval;
if (handshakeIvl < 0) {
throw new IllegalArgumentException("handshakeIvl only accept positive values " + optval);
}
return true;
case ZMQ.ZMQ_HEARTBEAT_IVL:
heartbeatInterval = (Integer) optval;
if (heartbeatInterval < 0) {
throw new IllegalArgumentException("heartbeatInterval only accept positive values " + optval);
}
return true;
case ZMQ.ZMQ_HEARTBEAT_TIMEOUT:
heartbeatTimeout = (Integer) optval;
if (heartbeatTimeout < 0) {
throw new IllegalArgumentException("heartbeatTimeout only accept positive values " + optval);
}
return true;
case ZMQ.ZMQ_HEARTBEAT_TTL:
Integer value = (Integer) optval;
// Convert this to deciseconds from milliseconds
value /= 100;
if (value >= 0 && value <= 6553) {
heartbeatTtl = value;
}
else {
throw new IllegalArgumentException("heartbeatTtl is out of range [0..655399]" + optval);
}
return true;
case ZMQ.ZMQ_HEARTBEAT_CONTEXT:
heartbeatContext = (byte[]) optval;
if (heartbeatContext == null) {
throw new IllegalArgumentException("heartbeatContext cannot be null");
}
return true;
case ZMQ.ZMQ_DECODER:
decoder = checkCustomCodec(optval, IDecoder.class);
rawSocket = true;
// failure throws ZError.InstantiationException
// if that line is reached, everything is fine
return true;
case ZMQ.ZMQ_ENCODER:
encoder = checkCustomCodec(optval, IEncoder.class);
rawSocket = true;
// failure throws ZError.InstantiationException
// if that line is reached, everything is fine
return true;
case ZMQ.ZMQ_MSG_ALLOCATOR:
if (optval instanceof String) {
try {
allocator = allocator(Class.forName((String) optval));
return true;
}
catch (ClassNotFoundException e) {
throw new IllegalArgumentException(e);
}
}
else if (optval instanceof Class) {
allocator = allocator((Class<?>) optval);
return true;
}
else if (optval instanceof MsgAllocator) {
allocator = (MsgAllocator) optval;
return true;
}
return false;
case ZMQ.ZMQ_MSG_ALLOCATION_HEAP_THRESHOLD:
Integer allocationHeapThreshold = (Integer) optval;
allocator = new MsgAllocatorThreshold(allocationHeapThreshold);
return true;
case ZMQ.ZMQ_SELECTOR_PROVIDERCHOOSER:
if (optval instanceof String) {
try {
selectorChooser = chooser(Class.forName((String) optval));
return true;
}
catch (ClassNotFoundException e) {
throw new IllegalArgumentException(e);
}
}
else if (optval instanceof Class) {
selectorChooser = chooser((Class<?>) optval);
return true;
}
else if (optval instanceof SelectorProviderChooser) {
selectorChooser = (SelectorProviderChooser) optval;
return true;
}
return false;
case ZMQ.ZMQ_HELLO_MSG:
if (optval == null) {
helloMsg = null;
}
else {
byte[] bytes = parseBytes(option, optval);
if (bytes.length == 0) {
helloMsg = null;
}
else {
helloMsg = new Msg(Arrays.copyOf(bytes, bytes.length));
}
}
return true;
case ZMQ.ZMQ_DISCONNECT_MSG:
if (optval == null) {
disconnectMsg = null;
}
else {
byte[] bytes = parseBytes(option, optval);
if (bytes.length == 0) {
disconnectMsg = null;
}
else {
disconnectMsg = new Msg(Arrays.copyOf(bytes, bytes.length));
}
}
return true;
case ZMQ.ZMQ_HICCUP_MSG:
if (optval == null) {
hiccupMsg = null;
}
else {
byte[] bytes = parseBytes(option, optval);
if (bytes.length == 0) {
hiccupMsg = null;
}
else {
hiccupMsg = new Msg(Arrays.copyOf(bytes, bytes.length));
}
}
return true;
case ZMQ.ZMQ_AS_TYPE:
this.asType = (Integer) optval;
return true;
case ZMQ.ZMQ_SELFADDR_PROPERTY_NAME:
this.selfAddressPropertyName = parseString(option, optval);
return true;
default:
throw new IllegalArgumentException("Unknown Option " + option);
}
} | @Test(expected = IllegalArgumentException.class)
public void testSelectorFailed()
{
Options opt = new Options();
Assert.assertFalse(opt.setSocketOpt(ZMQ.ZMQ_SELECTOR_PROVIDERCHOOSER, ""));
} |
public Map<String, Long> numberOfOverdueTriggers() {
final DateTime now = clock.nowUTC();
final AggregateIterable<OverdueTrigger> result = collection.aggregate(List.of(
Aggregates.match(
// We deliberately don't include the filter to include expired trigger locks we use in
// #nextRunnableTrigger because we consider that an edge case that's not important for
// the overdue calculation.
and(
eq(FIELD_LOCK_OWNER, null),
eq(FIELD_STATUS, JobTriggerStatus.RUNNABLE),
lte(FIELD_NEXT_TIME, now),
or(
not(exists(FIELD_END_TIME)),
eq(FIELD_END_TIME, null),
gte(FIELD_END_TIME, now)
)
)
),
Aggregates.group(
"$" + FIELD_JOB_DEFINITION_TYPE,
Accumulators.sum("count", 1)
)
), OverdueTrigger.class);
return stream(result).collect(Collectors.toMap(OverdueTrigger::type, OverdueTrigger::count));
} | @Test
@MongoDBFixtures("job-triggers-for-overdue-count.json")
public void numberOfOverdueTriggers() {
final JobSchedulerTestClock clock = new JobSchedulerTestClock(DateTime.parse("2019-01-01T04:00:00.000Z"));
final DBJobTriggerService service = serviceWithClock(clock);
final Map<String, Long> result = service.numberOfOverdueTriggers();
assertThat(result).isEqualTo(Map.of(
"event-processor-execution-v1", 2L,
"notification-execution-v1", 1L
));
} |
@Override
public <K, T_OTHER, OUT> ProcessConfigurableAndNonKeyedPartitionStream<OUT> connectAndProcess(
KeyedPartitionStream<K, T_OTHER> other,
TwoInputBroadcastStreamProcessFunction<T_OTHER, T, OUT> processFunction) {
// no state redistribution mode check is required here, since all redistribution modes are
// acceptable
TypeInformation<OUT> outTypeInfo =
StreamUtils.getOutputTypeForTwoInputBroadcastProcessFunction(
processFunction,
((KeyedPartitionStreamImpl<K, T_OTHER>) other).getType(),
getType());
KeyedTwoInputBroadcastProcessOperator<K, T_OTHER, T, OUT> processOperator =
new KeyedTwoInputBroadcastProcessOperator<>(processFunction);
Transformation<OUT> outTransformation =
StreamUtils.getTwoInputTransformation(
"Broadcast-Keyed-TwoInput-Process",
(KeyedPartitionStreamImpl<K, T_OTHER>) other,
// we should always take the broadcast input as second input.
this,
outTypeInfo,
processOperator);
environment.addOperator(outTransformation);
return StreamUtils.wrapWithConfigureHandle(
new NonKeyedPartitionStreamImpl<>(environment, outTransformation));
} | @Test
void testConnectNonKeyedStream() throws Exception {
ExecutionEnvironmentImpl env = StreamTestUtils.getEnv();
BroadcastStreamImpl<Integer> stream =
new BroadcastStreamImpl<>(env, new TestingTransformation<>("t1", Types.INT, 1));
NonKeyedPartitionStreamImpl<Long> nonKeyedStream =
new NonKeyedPartitionStreamImpl<>(
env, new TestingTransformation<>("t2", Types.LONG, 2));
stream.connectAndProcess(
nonKeyedStream, new StreamTestUtils.NoOpTwoInputBroadcastStreamProcessFunction());
List<Transformation<?>> transformations = env.getTransformations();
assertThat(transformations).hasSize(1);
assertProcessType(transformations.get(0), TwoInputTransformation.class, Types.LONG);
} |
@Override
public ConnectorPageSource createPageSource(
ConnectorTransactionHandle transaction,
ConnectorSession session,
ConnectorSplit split,
ConnectorTableLayoutHandle layout,
List<ColumnHandle> columns,
SplitContext splitContext,
RuntimeStats runtimeStats)
{
HiveTableLayoutHandle hiveLayout = (HiveTableLayoutHandle) layout;
List<HiveColumnHandle> selectedColumns = columns.stream()
.map(HiveColumnHandle.class::cast)
.collect(toList());
HiveSplit hiveSplit = (HiveSplit) split;
Path path = new Path(hiveSplit.getFileSplit().getPath());
Configuration configuration = hdfsEnvironment.getConfiguration(
new HdfsContext(
session,
hiveSplit.getDatabase(),
hiveSplit.getTable(),
hiveLayout.getTablePath(),
false),
path);
Optional<EncryptionInformation> encryptionInformation = hiveSplit.getEncryptionInformation();
CacheQuota cacheQuota = generateCacheQuota(hiveSplit);
HiveFileContext fileContext = new HiveFileContext(
splitContext.isCacheable(),
cacheQuota,
hiveSplit.getFileSplit().getExtraFileInfo().map(BinaryExtraHiveFileInfo::new),
OptionalLong.of(hiveSplit.getFileSplit().getFileSize()),
OptionalLong.of(hiveSplit.getFileSplit().getStart()),
OptionalLong.of(hiveSplit.getFileSplit().getLength()),
hiveSplit.getFileSplit().getFileModifiedTime(),
HiveSessionProperties.isVerboseRuntimeStatsEnabled(session),
runtimeStats);
if (columns.stream().anyMatch(columnHandle -> ((HiveColumnHandle) columnHandle).getColumnType().equals(AGGREGATED))) {
checkArgument(columns.stream().allMatch(columnHandle -> ((HiveColumnHandle) columnHandle).getColumnType().equals(AGGREGATED)), "Not all columns are of 'AGGREGATED' type");
if (hiveLayout.isFooterStatsUnreliable()) {
throw new PrestoException(HIVE_UNSUPPORTED_FORMAT, format("Partial aggregation pushdown is not supported when footer stats are unreliable. " +
"Table %s has file %s with unreliable footer stats. " +
"Set session property [catalog-name].pushdown_partial_aggregations_into_scan=false and execute query again.",
hiveLayout.getSchemaTableName(),
hiveSplit.getFileSplit().getPath()));
}
return createAggregatedPageSource(aggregatedPageSourceFactories, configuration, session, hiveSplit, hiveLayout, selectedColumns, fileContext, encryptionInformation);
}
if (hiveLayout.isPushdownFilterEnabled()) {
Optional<ConnectorPageSource> selectivePageSource = createSelectivePageSource(
selectivePageSourceFactories,
configuration,
session,
hiveSplit,
hiveLayout,
selectedColumns,
hiveStorageTimeZone,
typeManager,
optimizedRowExpressionCache,
splitContext,
fileContext,
encryptionInformation);
if (selectivePageSource.isPresent()) {
return selectivePageSource.get();
}
}
TupleDomain<HiveColumnHandle> effectivePredicate = hiveLayout.getDomainPredicate()
.transform(Subfield::getRootName)
.transform(hiveLayout.getPredicateColumns()::get);
if (shouldSkipBucket(hiveLayout, hiveSplit, splitContext, isLegacyTimestampBucketing(session))) {
return new HiveEmptySplitPageSource();
}
if (shouldSkipPartition(typeManager, hiveLayout, hiveStorageTimeZone, hiveSplit, splitContext)) {
return new HiveEmptySplitPageSource();
}
Optional<ConnectorPageSource> pageSource = createHivePageSource(
cursorProviders,
pageSourceFactories,
configuration,
session,
hiveSplit.getFileSplit(),
hiveSplit.getTableBucketNumber(),
hiveSplit.getStorage(),
splitContext.getDynamicFilterPredicate().map(filter -> filter.transform(handle -> (HiveColumnHandle) handle).intersect(effectivePredicate)).orElse(effectivePredicate),
selectedColumns,
hiveLayout.getPredicateColumns(),
hiveSplit.getPartitionKeys(),
hiveStorageTimeZone,
typeManager,
hiveLayout.getSchemaTableName(),
hiveLayout.getPartitionColumns().stream().map(HiveColumnHandle.class::cast).collect(toList()),
hiveLayout.getDataColumns(),
hiveLayout.getTableParameters(),
hiveSplit.getPartitionDataColumnCount(),
hiveSplit.getTableToPartitionMapping(),
hiveSplit.getBucketConversion(),
hiveSplit.isS3SelectPushdownEnabled(),
fileContext,
hiveLayout.getRemainingPredicate(),
hiveLayout.isPushdownFilterEnabled(),
rowExpressionService,
encryptionInformation,
hiveSplit.getRowIdPartitionComponent());
if (pageSource.isPresent()) {
return pageSource.get();
}
throw new IllegalStateException("Could not find a file reader for split " + hiveSplit);
} | @Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Not all columns are of 'AGGREGATED' type")
public void testFailsWhenMixOfAggregatedAndRegularColumns()
{
HivePageSourceProvider pageSourceProvider = createPageSourceProvider();
pageSourceProvider.createPageSource(
new HiveTransactionHandle(),
SESSION,
getHiveSplit(ORC),
getHiveTableLayout(false, true, false),
ImmutableList.of(LONG_COLUMN, LONG_AGGREGATED_COLUMN),
new SplitContext(false),
new RuntimeStats());
} |
public static boolean startsWith(final CharSequence str, final CharSequence prefix) {
return startsWith(str, prefix, false);
} | @Test
void testStartsWith() {
assertTrue(StringUtils.startsWith(null, null));
assertFalse(StringUtils.startsWith(null, "abc"));
assertFalse(StringUtils.startsWith("abcdef", null));
assertTrue(StringUtils.startsWith("abcdef", "abc"));
assertFalse(StringUtils.startsWith("ABCDEF", "abc"));
assertFalse(StringUtils.startsWith("ABC", "ABCDEF"));
} |
@Override
public String getMethod() {
return davMethod;
} | @Test
public void testGetMethod() throws URISyntaxException {
for (String method : VALID_METHODS) {
HttpRequestBase request = new HttpWebdav(method, new URI(
"http://example.com"));
Assertions.assertEquals(method, request.getMethod());
}
} |
static int validatePubsubMessageSize(PubsubMessage message, int maxPublishBatchSize)
throws SizeLimitExceededException {
int payloadSize = message.getPayload().length;
if (payloadSize > PUBSUB_MESSAGE_DATA_MAX_BYTES) {
throw new SizeLimitExceededException(
"Pubsub message data field of length "
+ payloadSize
+ " exceeds maximum of "
+ PUBSUB_MESSAGE_DATA_MAX_BYTES
+ " bytes. See https://cloud.google.com/pubsub/quotas#resource_limits");
}
int totalSize = payloadSize;
@Nullable Map<String, String> attributes = message.getAttributeMap();
if (attributes != null) {
if (attributes.size() > PUBSUB_MESSAGE_MAX_ATTRIBUTES) {
throw new SizeLimitExceededException(
"Pubsub message contains "
+ attributes.size()
+ " attributes which exceeds the maximum of "
+ PUBSUB_MESSAGE_MAX_ATTRIBUTES
+ ". See https://cloud.google.com/pubsub/quotas#resource_limits");
}
// Consider attribute encoding overhead, so it doesn't go over the request limits
totalSize += attributes.size() * PUBSUB_MESSAGE_ATTRIBUTE_ENCODE_ADDITIONAL_BYTES;
for (Map.Entry<String, String> attribute : attributes.entrySet()) {
String key = attribute.getKey();
int keySize = key.getBytes(StandardCharsets.UTF_8).length;
if (keySize > PUBSUB_MESSAGE_ATTRIBUTE_MAX_KEY_BYTES) {
throw new SizeLimitExceededException(
"Pubsub message attribute key '"
+ key
+ "' exceeds the maximum of "
+ PUBSUB_MESSAGE_ATTRIBUTE_MAX_KEY_BYTES
+ " bytes. See https://cloud.google.com/pubsub/quotas#resource_limits");
}
totalSize += keySize;
String value = attribute.getValue();
int valueSize = value.getBytes(StandardCharsets.UTF_8).length;
if (valueSize > PUBSUB_MESSAGE_ATTRIBUTE_MAX_VALUE_BYTES) {
throw new SizeLimitExceededException(
"Pubsub message attribute value for key '"
+ key
+ "' starting with '"
+ value.substring(0, Math.min(256, value.length()))
+ "' exceeds the maximum of "
+ PUBSUB_MESSAGE_ATTRIBUTE_MAX_VALUE_BYTES
+ " bytes. See https://cloud.google.com/pubsub/quotas#resource_limits");
}
totalSize += valueSize;
}
}
if (totalSize > maxPublishBatchSize) {
throw new SizeLimitExceededException(
"Pubsub message of length "
+ totalSize
+ " exceeds maximum of "
+ maxPublishBatchSize
+ " bytes, when considering the payload and attributes. "
+ "See https://cloud.google.com/pubsub/quotas#resource_limits");
}
return totalSize;
} | @Test
public void testValidatePubsubMessageSizePayloadPlusAttributesTooLarge() {
byte[] data = new byte[(10 << 20)];
String attributeKey = "key";
String attributeValue = "value";
Map<String, String> attributes = ImmutableMap.of(attributeKey, attributeValue);
PubsubMessage message = new PubsubMessage(data, attributes);
assertThrows(
SizeLimitExceededException.class,
() ->
PreparePubsubWriteDoFn.validatePubsubMessageSize(
message, PUBSUB_MESSAGE_MAX_TOTAL_SIZE));
} |
@Override
public Result detect(ChannelBuffer in) {
int prefaceLen = clientPrefaceString.readableBytes();
int bytesRead = min(in.readableBytes(), prefaceLen);
// If the input so far doesn't match the preface, break the connection.
if (bytesRead == 0 || !ChannelBuffers.prefixEquals(in, clientPrefaceString, bytesRead)) {
return Result.UNRECOGNIZED;
}
if (bytesRead == prefaceLen) {
return Result.RECOGNIZED;
}
return Result.NEED_MORE_DATA;
} | @Test
void testDetect() {
ProtocolDetector detector = new Http2ProtocolDetector();
ChannelHandlerContext ctx = Mockito.mock(ChannelHandlerContext.class);
ByteBuf connectionPrefaceBuf = Http2CodecUtil.connectionPrefaceBuf();
ByteBuf byteBuf = ByteBufAllocator.DEFAULT.buffer();
ChannelBuffer in = new ByteBufferBackedChannelBuffer(byteBuf.nioBuffer());
ProtocolDetector.Result result = detector.detect(in);
Assertions.assertEquals(result, ProtocolDetector.Result.UNRECOGNIZED);
byteBuf.writeBytes(connectionPrefaceBuf);
result = detector.detect(new ByteBufferBackedChannelBuffer(byteBuf.nioBuffer()));
Assertions.assertEquals(result, ProtocolDetector.Result.RECOGNIZED);
byteBuf.clear();
byteBuf.writeBytes(connectionPrefaceBuf, 0, 1);
result = detector.detect(new ByteBufferBackedChannelBuffer(byteBuf.nioBuffer()));
Assertions.assertEquals(result, ProtocolDetector.Result.NEED_MORE_DATA);
} |
public static String encodeHexStr(byte[] data) {
return encodeHexStr(data, true);
} | @Test
public void issueI50MI6Test(){
final String s = HexUtil.encodeHexStr("烟".getBytes(StandardCharsets.UTF_16BE));
assertEquals("70df", s);
} |
public void replay(
ConsumerGroupMemberMetadataKey key,
ConsumerGroupMemberMetadataValue value
) {
String groupId = key.groupId();
String memberId = key.memberId();
ConsumerGroup consumerGroup = getOrMaybeCreatePersistedConsumerGroup(groupId, value != null);
Set<String> oldSubscribedTopicNames = new HashSet<>(consumerGroup.subscribedTopicNames().keySet());
if (value != null) {
ConsumerGroupMember oldMember = consumerGroup.getOrMaybeCreateMember(memberId, true);
consumerGroup.updateMember(new ConsumerGroupMember.Builder(oldMember)
.updateWith(value)
.build());
} else {
ConsumerGroupMember oldMember = consumerGroup.getOrMaybeCreateMember(memberId, false);
if (oldMember.memberEpoch() != LEAVE_GROUP_MEMBER_EPOCH) {
throw new IllegalStateException("Received a tombstone record to delete member " + memberId
+ " but did not receive ConsumerGroupCurrentMemberAssignmentValue tombstone.");
}
if (consumerGroup.targetAssignment().containsKey(memberId)) {
throw new IllegalStateException("Received a tombstone record to delete member " + memberId
+ " but did not receive ConsumerGroupTargetAssignmentMetadataValue tombstone.");
}
consumerGroup.removeMember(memberId);
}
updateGroupsByTopics(groupId, oldSubscribedTopicNames, consumerGroup.subscribedTopicNames().keySet());
} | @Test
public void testOnConsumerGroupStateTransitionOnLoading() {
GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
.build();
// Even if there are more group epoch records loaded than tombstone records, the last replayed record
// (tombstone in this test) is the latest state of the group. Hence, the overall metric count should be 0.
IntStream.range(0, 5).forEach(__ ->
context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord("group-id", 0))
);
context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord("group-id"));
context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord("group-id"));
IntStream.range(0, 3).forEach(__ -> {
assertThrows(IllegalStateException.class, () -> context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochTombstoneRecord("group-id")));
assertThrows(IllegalStateException.class, () -> context.replay(GroupCoordinatorRecordHelpers.newConsumerGroupEpochTombstoneRecord("group-id")));
});
verify(context.metrics, times(1)).onConsumerGroupStateTransition(null, ConsumerGroup.ConsumerGroupState.EMPTY);
verify(context.metrics, times(1)).onConsumerGroupStateTransition(ConsumerGroup.ConsumerGroupState.EMPTY, null);
} |
@Override
synchronized public void close() {
if (stream != null) {
IOUtils.cleanupWithLogger(LOG, stream);
stream = null;
}
} | @Test(timeout=120000)
public void testRandomLong() throws Exception {
OsSecureRandom random = getOsSecureRandom();
long rand1 = random.nextLong();
long rand2 = random.nextLong();
while (rand1 == rand2) {
rand2 = random.nextLong();
}
random.close();
} |
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
SessionCredentials other = (SessionCredentials) obj;
if (accessKey == null) {
if (other.accessKey != null)
return false;
} else if (!accessKey.equals(other.accessKey))
return false;
if (secretKey == null) {
if (other.secretKey != null)
return false;
} else if (!secretKey.equals(other.secretKey))
return false;
if (signature == null) {
return other.signature == null;
} else return signature.equals(other.signature);
} | @Test
public void equalsTest() {
SessionCredentials sessionCredentials = new SessionCredentials("RocketMQ","12345678");
sessionCredentials.setSecurityToken("abcd");
SessionCredentials other = new SessionCredentials("RocketMQ","12345678","abcd");
Assert.assertTrue(sessionCredentials.equals(other));
} |
public void setup(final Map<String, InternalTopicConfig> topicConfigs) {
log.info("Starting to setup internal topics {}.", topicConfigs.keySet());
final long now = time.milliseconds();
final long deadline = now + retryTimeoutMs;
final Map<String, Map<String, String>> streamsSideTopicConfigs = topicConfigs.values().stream()
.collect(Collectors.toMap(
InternalTopicConfig::name,
topicConfig -> topicConfig.properties(defaultTopicConfigs, windowChangeLogAdditionalRetention)
));
final Set<String> createdTopics = new HashSet<>();
final Set<String> topicStillToCreate = new HashSet<>(topicConfigs.keySet());
while (!topicStillToCreate.isEmpty()) {
final Set<NewTopic> newTopics = topicStillToCreate.stream()
.map(topicName -> new NewTopic(
topicName,
topicConfigs.get(topicName).numberOfPartitions(),
Optional.of(replicationFactor)
).configs(streamsSideTopicConfigs.get(topicName))
).collect(Collectors.toSet());
log.info("Going to create internal topics: " + newTopics);
final CreateTopicsResult createTopicsResult = adminClient.createTopics(newTopics);
processCreateTopicResults(createTopicsResult, topicStillToCreate, createdTopics, deadline);
maybeSleep(Collections.singletonList(topicStillToCreate), deadline, "created");
}
log.info("Completed setup of internal topics {}.", topicConfigs.keySet());
} | @Test
public void shouldThrowTimeoutExceptionWhenFuturesNeverCompleteDuringSetup() {
final AdminClient admin = mock(AdminClient.class);
final MockTime time = new MockTime(
(Integer) config.get(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG)) / 3
);
final StreamsConfig streamsConfig = new StreamsConfig(config);
final InternalTopicManager topicManager = new InternalTopicManager(time, admin, streamsConfig);
final KafkaFutureImpl<TopicMetadataAndConfig> createTopicFutureThatNeverCompletes = new KafkaFutureImpl<>();
final InternalTopicConfig internalTopicConfig = setupRepartitionTopicConfig(topic1, 1);
final NewTopic newTopic = newTopic(topic1, internalTopicConfig, streamsConfig);
when(admin.createTopics(mkSet(newTopic)))
.thenAnswer(answer -> new MockCreateTopicsResult(mkMap(mkEntry(topic1, createTopicFutureThatNeverCompletes))));
assertThrows(
TimeoutException.class,
() -> topicManager.setup(Collections.singletonMap(topic1, internalTopicConfig))
);
} |
public static <T> List<List<T>> splitBySize(List<T> list, int expectedSize)
throws NullPointerException, IllegalArgumentException {
Preconditions.checkNotNull(list, "list must not be null");
Preconditions.checkArgument(expectedSize > 0, "expectedSize must larger than 0");
if (1 == expectedSize) {
return Collections.singletonList(list);
}
int splitSize = Math.min(expectedSize, list.size());
List<List<T>> result = new ArrayList<List<T>>(splitSize);
for (int i = 0; i < splitSize; i++) {
result.add(new ArrayList<>());
}
int index = 0;
for (T t : list) {
result.get(index).add(t);
index = (index + 1) % splitSize;
}
return result;
} | @Test
public void testSplitBySizeWithEmptyList() {
List<Integer> lists = Lists.newArrayList();
int expectSize = 10;
List<List<Integer>> splitLists = ListUtil.splitBySize(lists, expectSize);
Assert.assertEquals(splitLists.size(), lists.size());
} |
@SuppressWarnings("unchecked")
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
try {
if (statement.getStatement() instanceof CreateAsSelect) {
registerForCreateAs((ConfiguredStatement<? extends CreateAsSelect>) statement);
} else if (statement.getStatement() instanceof CreateSource) {
registerForCreateSource((ConfiguredStatement<? extends CreateSource>) statement);
}
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
throw new KsqlStatementException(
ErrorMessageUtil.buildErrorMessage(e),
statement.getMaskedStatementText(),
e.getCause());
}
// Remove schema id from SessionConfig
return stripSchemaIdConfig(statement);
} | @Test
public void shouldNotPlanQueryOnOriginalExecutionContext() {
// Given:
givenStatement("CREATE STREAM sink WITH(value_format='AVRO') AS SELECT * FROM SOURCE;");
// When:
injector.inject(statement);
// Then:
verify(executionContext, Mockito.never()).plan(any(), any(ConfiguredStatement.class));
verify(executionSandbox, Mockito.times(1))
.plan(any(), any(ConfiguredStatement.class));
} |
@Override
protected void flush(MessageTuple item) {
prepare(item);
channel.flush();
} | @Test
public void testFlush() {
Object message = new Object();
NettyBatchWriteQueue.MessageTuple messageTuple = new NettyBatchWriteQueue.MessageTuple(message,
mockChannelPromise);
nettyBatchWriteQueue.flush(messageTuple);
Mockito.verify(mockChannel).write(eq(message), eq(mockChannelPromise));
Mockito.verify(mockChannel).flush();
} |
@Override
public NacosUser authenticate(String username, String rawPassword) throws AccessException {
if (StringUtils.isBlank(username) || StringUtils.isBlank(rawPassword)) {
throw new AccessException("user not found!");
}
NacosUserDetails nacosUserDetails = (NacosUserDetails) userDetailsService.loadUserByUsername(username);
if (nacosUserDetails == null || !PasswordEncoderUtil.matches(rawPassword, nacosUserDetails.getPassword())) {
throw new AccessException("user not found!");
}
return new NacosUser(nacosUserDetails.getUsername(), jwtTokenManager.createToken(username));
} | @Test
void testAuthenticate7() throws AccessException {
NacosUser nacosUser = new NacosUser();
when(jwtTokenManager.parseToken(anyString())).thenReturn(nacosUser);
MockHttpServletRequest mockHttpServletRequest = new MockHttpServletRequest();
mockHttpServletRequest.addHeader(AuthConstants.AUTHORIZATION_HEADER, AuthConstants.TOKEN_PREFIX + "-token");
NacosUser authenticate = abstractAuthenticationManager.authenticate(mockHttpServletRequest);
assertEquals(nacosUser, authenticate);
} |
public int getInt(int rowId, int colId) {
return ((IntColumnVector) columns[colId]).getInt(rowId);
} | @Test
void testDictionary() {
// all null
HeapIntVector col = new HeapIntVector(VECTOR_SIZE);
Integer[] dict = new Integer[2];
dict[0] = 1998;
dict[1] = 9998;
col.setDictionary(new ColumnVectorTest.TestDictionary(dict));
HeapIntVector heapIntVector = col.reserveDictionaryIds(VECTOR_SIZE);
for (int i = 0; i < VECTOR_SIZE; i++) {
heapIntVector.vector[i] = i % 2 == 0 ? 0 : 1;
}
VectorizedColumnBatch batch = new VectorizedColumnBatch(new ColumnVector[] {col});
for (int i = 0; i < VECTOR_SIZE; i++) {
ColumnarRowData row = new ColumnarRowData(batch, i);
if (i % 2 == 0) {
assertThat(1998).isEqualTo(row.getInt(0));
} else {
assertThat(9998).isEqualTo(row.getInt(0));
}
}
} |
static void checkManifestPlatform(
BuildContext buildContext, ContainerConfigurationTemplate containerConfig)
throws PlatformNotFoundInBaseImageException {
Optional<Path> path = buildContext.getBaseImageConfiguration().getTarPath();
String baseImageName =
path.map(Path::toString)
.orElse(buildContext.getBaseImageConfiguration().getImage().toString());
Set<Platform> platforms = buildContext.getContainerConfiguration().getPlatforms();
Verify.verify(!platforms.isEmpty());
if (platforms.size() != 1) {
String msg =
String.format(
"cannot build for multiple platforms since the base image '%s' is not a manifest list.",
baseImageName);
throw new PlatformNotFoundInBaseImageException(msg);
} else {
Platform platform = platforms.iterator().next();
if (!platform.getArchitecture().equals(containerConfig.getArchitecture())
|| !platform.getOs().equals(containerConfig.getOs())) {
// Unfortunately, "platforms" has amd64/linux by default even if the user didn't explicitly
// configure it. Skip reporting to suppress false alarm.
if (!(platform.getArchitecture().equals("amd64") && platform.getOs().equals("linux"))) {
String msg =
String.format(
"the configured platform (%s/%s) doesn't match the platform (%s/%s) of the base image (%s)",
platform.getArchitecture(),
platform.getOs(),
containerConfig.getArchitecture(),
containerConfig.getOs(),
baseImageName);
throw new PlatformNotFoundInBaseImageException(msg);
}
}
}
} | @Test
public void testCheckManifestPlatform_mismatch() {
Mockito.when(containerConfig.getPlatforms())
.thenReturn(ImmutableSet.of(new Platform("configured arch", "configured OS")));
ContainerConfigurationTemplate containerConfigJson = new ContainerConfigurationTemplate();
containerConfigJson.setArchitecture("actual arch");
containerConfigJson.setOs("actual OS");
Exception ex =
assertThrows(
PlatformNotFoundInBaseImageException.class,
() -> PlatformChecker.checkManifestPlatform(buildContext, containerConfigJson));
assertThat(ex)
.hasMessageThat()
.isEqualTo(
"the configured platform (configured arch/configured OS) doesn't match the "
+ "platform (actual arch/actual OS) of the base image (scratch)");
} |
public FEELFnResult<Boolean> invoke(@ParameterName( "point" ) Comparable point, @ParameterName( "range" ) Range range) {
if ( point == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be null"));
}
if ( range == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range", "cannot be null"));
}
try {
boolean result = (range.getLowEndPoint().compareTo(point) < 0 && range.getHighEndPoint().compareTo(point) > 0) ||
(range.getLowEndPoint().compareTo(point) == 0 && range.getLowBoundary() == RangeBoundary.CLOSED) ||
(range.getHighEndPoint().compareTo(point) == 0 && range.getHighBoundary() == RangeBoundary.CLOSED);
return FEELFnResult.ofResult( result );
} catch( Exception e ) {
// points are not comparable
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be compared to range"));
}
} | @Test
void invokeParamsCantBeCompared() {
FunctionTestUtil.assertResultError( duringFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, 1, 2, Range.RangeBoundary.CLOSED ) ), InvalidParametersEvent.class );
} |
public static double regularizedIncompleteGamma(double s, double x) {
if (s < 0.0) {
throw new IllegalArgumentException("Invalid s: " + s);
}
if (x < 0.0) {
throw new IllegalArgumentException("Invalid x: " + x);
}
double igf;
if (x < s + 1.0) {
// Series representation
igf = regularizedIncompleteGammaSeries(s, x);
} else {
// Continued fraction representation
igf = regularizedIncompleteGammaFraction(s, x);
}
return igf;
} | @Test
public void testIncompleteGamma() {
System.out.println("incompleteGamma");
assertEquals(0.7807, Gamma.regularizedIncompleteGamma(2.1, 3), 1E-4);
assertEquals(0.3504, Gamma.regularizedIncompleteGamma(3, 2.1), 1E-4);
} |
public static Version of(int major, int minor) {
if (major == UNKNOWN_VERSION && minor == UNKNOWN_VERSION) {
return UNKNOWN;
} else {
return new Version(major, minor);
}
} | @Test(expected = AssertionError.class)
@RequireAssertEnabled
public void construct_withOverflowingMinor() {
Version.of(1, Byte.MAX_VALUE + 1);
} |
@Override
public boolean localMember() {
return localMember;
} | @Test
public void testConstructor_withLocalMember_isTrue() {
MemberImpl member = new MemberImpl(address, MemberVersion.of("3.8.0"), true);
assertBasicMemberImplFields(member);
assertTrue(member.localMember());
} |
@Override
public void startBundle() {
// This can contain user code. Wrap it in case it throws an exception.
try {
invoker.invokeStartBundle(new DoFnStartBundleArgumentProvider());
} catch (Throwable t) {
// Exception in user code.
throw wrapUserCodeException(t);
}
} | @Test
public void testStartBundleExceptionsWrappedAsUserCodeException() {
ThrowingDoFn fn = new ThrowingDoFn();
DoFnRunner<String, String> runner =
new SimpleDoFnRunner<>(
null,
fn,
NullSideInputReader.empty(),
null,
null,
Collections.emptyList(),
mockStepContext,
null,
Collections.emptyMap(),
WindowingStrategy.of(new GlobalWindows()),
DoFnSchemaInformation.create(),
Collections.emptyMap());
thrown.expect(UserCodeException.class);
thrown.expectCause(is(fn.exceptionToThrow));
runner.startBundle();
} |
@Override
public boolean betterThan(Num criterionValue1, Num criterionValue2) {
return criterionValue1.isGreaterThan(criterionValue2);
} | @Test
public void betterThan() {
AnalysisCriterion criterion = getCriterion();
assertTrue(criterion.betterThan(numOf(5000), numOf(4500)));
assertFalse(criterion.betterThan(numOf(4500), numOf(5000)));
} |
@Override
public void registerRemote(RemoteInstance remoteInstance) throws ServiceRegisterException {
if (needUsingInternalAddr()) {
remoteInstance = new RemoteInstance(
new Address(config.getInternalComHost(), config.getInternalComPort(), true));
}
this.selfAddress = remoteInstance.getAddress();
try {
AgentClient agentClient = client.agentClient();
Registration registration = ImmutableRegistration.builder()
.id(remoteInstance.getAddress().toString())
.name(serviceName)
.address(remoteInstance.getAddress().getHost())
.port(remoteInstance.getAddress().getPort())
.check(Registration.RegCheck.grpc(
remoteInstance.getAddress()
.getHost() + ":" + remoteInstance
.getAddress()
.getPort(),
5
)) // registers with a TTL of 5 seconds
.build();
agentClient.register(registration);
healthChecker.health();
} catch (Throwable e) {
healthChecker.unHealth(e);
throw new ServiceRegisterException(e.getMessage());
}
} | @Test
public void registerRemote() {
registerRemote(remoteAddress);
} |
public int deleteExpiredFileByOffset(long offset, int unitSize) {
Object[] mfs = this.copyMappedFiles(0);
List<MappedFile> files = new ArrayList<>();
int deleteCount = 0;
if (null != mfs) {
int mfsLength = mfs.length - 1;
for (int i = 0; i < mfsLength; i++) {
boolean destroy;
MappedFile mappedFile = (MappedFile) mfs[i];
SelectMappedBufferResult result = mappedFile.selectMappedBuffer(this.mappedFileSize - unitSize);
if (result != null) {
long maxOffsetInLogicQueue = result.getByteBuffer().getLong();
result.release();
destroy = maxOffsetInLogicQueue < offset;
if (destroy) {
log.info("physic min offset " + offset + ", logics in current mappedFile max offset "
+ maxOffsetInLogicQueue + ", delete it");
}
} else if (!mappedFile.isAvailable()) { // Handle hanged file.
log.warn("Found a hanged consume queue file, attempting to delete it.");
destroy = true;
} else {
log.warn("this being not executed forever.");
break;
}
if (destroy && mappedFile.destroy(1000 * 60)) {
files.add(mappedFile);
deleteCount++;
} else {
break;
}
}
}
deleteExpiredFile(files);
return deleteCount;
} | @Test
public void testDeleteExpiredFileByOffset() {
MappedFileQueue mappedFileQueue =
new MappedFileQueue(storePath + File.separator + "e/", 5120, null);
for (int i = 0; i < 2048; i++) {
MappedFile mappedFile = mappedFileQueue.getLastMappedFile(0);
assertThat(mappedFile).isNotNull();
ByteBuffer byteBuffer = ByteBuffer.allocate(ConsumeQueue.CQ_STORE_UNIT_SIZE);
byteBuffer.putLong(i);
byte[] padding = new byte[12];
Arrays.fill(padding, (byte) '0');
byteBuffer.put(padding);
byteBuffer.flip();
assertThat(mappedFile.appendMessage(byteBuffer.array())).isTrue();
}
MappedFile first = mappedFileQueue.getFirstMappedFile();
first.hold();
assertThat(mappedFileQueue.deleteExpiredFileByOffset(20480, ConsumeQueue.CQ_STORE_UNIT_SIZE)).isEqualTo(0);
first.release();
assertThat(mappedFileQueue.deleteExpiredFileByOffset(20480, ConsumeQueue.CQ_STORE_UNIT_SIZE)).isGreaterThan(0);
first = mappedFileQueue.getFirstMappedFile();
assertThat(first.getFileFromOffset()).isGreaterThan(0);
mappedFileQueue.shutdown(1000);
mappedFileQueue.destroy();
} |
@Nonnull
@Override
public List<DataConnectionResource> listResources() {
try {
try (Connection connection = getConnection()) {
DatabaseMetaData databaseMetaData = connection.getMetaData();
ResourceReader reader = new ResourceReader();
switch (resolveDialect(databaseMetaData)) {
case H2:
reader.withCatalog(connection.getCatalog())
.exclude(
(catalog, schema, table) ->
H2_SYSTEM_SCHEMA_LIST.contains(schema)
);
break;
case POSTGRESQL:
reader.withCatalog(connection.getCatalog());
break;
case MYSQL:
reader.exclude(
(catalog, schema, table) ->
catalog != null && MYSQL_SYSTEM_CATALOG_LIST.contains(catalog.toUpperCase(ROOT))
);
break;
case MICROSOFT_SQL_SERVER:
reader
.withCatalog(connection.getCatalog())
.exclude(
(catalog, schema, table) ->
MSSQL_SYSTEM_SCHEMA_LIST.contains(schema)
|| MSSQL_SYSTEM_TABLE_LIST.contains(table)
);
break;
default:
// Nothing to do
}
return reader.listResources(connection);
}
} catch (Exception exception) {
throw new HazelcastException("Could not read resources for DataConnection " + getName(), exception);
}
} | @Test
public void list_resources_should_return_table_in_schema() throws Exception {
jdbcDataConnection = new JdbcDataConnection(SHARED_DATA_CONNECTION_CONFIG);
executeJdbc(JDBC_URL_SHARED, "CREATE SCHEMA MY_SCHEMA");
executeJdbc(JDBC_URL_SHARED, "CREATE TABLE MY_SCHEMA.MY_TABLE (ID INT, NAME VARCHAR)");
List<DataConnectionResource> dataConnectionResources = jdbcDataConnection.listResources();
assertThat(dataConnectionResources).contains(
new DataConnectionResource("TABLE", DB_NAME_SHARED, "MY_SCHEMA", "MY_TABLE")
);
} |
@Override
public RemotingCommand processRequest(final ChannelHandlerContext ctx, RemotingCommand request)
throws RemotingCommandException {
final long beginTimeMills = this.brokerController.getMessageStore().now();
request.addExtFieldIfNotExist(BORN_TIME, String.valueOf(System.currentTimeMillis()));
if (Objects.equals(request.getExtFields().get(BORN_TIME), "0")) {
request.addExtField(BORN_TIME, String.valueOf(System.currentTimeMillis()));
}
Channel channel = ctx.channel();
RemotingCommand response = RemotingCommand.createResponseCommand(PopMessageResponseHeader.class);
final PopMessageResponseHeader responseHeader = (PopMessageResponseHeader) response.readCustomHeader();
final PopMessageRequestHeader requestHeader =
(PopMessageRequestHeader) request.decodeCommandCustomHeader(PopMessageRequestHeader.class, true);
StringBuilder startOffsetInfo = new StringBuilder(64);
StringBuilder msgOffsetInfo = new StringBuilder(64);
StringBuilder orderCountInfo = null;
if (requestHeader.isOrder()) {
orderCountInfo = new StringBuilder(64);
}
brokerController.getConsumerManager().compensateBasicConsumerInfo(requestHeader.getConsumerGroup(),
ConsumeType.CONSUME_POP, MessageModel.CLUSTERING);
response.setOpaque(request.getOpaque());
if (brokerController.getBrokerConfig().isEnablePopLog()) {
POP_LOGGER.info("receive PopMessage request command, {}", request);
}
if (requestHeader.isTimeoutTooMuch()) {
response.setCode(ResponseCode.POLLING_TIMEOUT);
response.setRemark(String.format("the broker[%s] pop message is timeout too much",
this.brokerController.getBrokerConfig().getBrokerIP1()));
return response;
}
if (!PermName.isReadable(this.brokerController.getBrokerConfig().getBrokerPermission())) {
response.setCode(ResponseCode.NO_PERMISSION);
response.setRemark(String.format("the broker[%s] pop message is forbidden",
this.brokerController.getBrokerConfig().getBrokerIP1()));
return response;
}
if (requestHeader.getMaxMsgNums() > 32) {
response.setCode(ResponseCode.SYSTEM_ERROR);
response.setRemark(String.format("the broker[%s] pop message's num is greater than 32",
this.brokerController.getBrokerConfig().getBrokerIP1()));
return response;
}
if (!brokerController.getMessageStore().getMessageStoreConfig().isTimerWheelEnable()) {
response.setCode(ResponseCode.SYSTEM_ERROR);
response.setRemark(String.format("the broker[%s] pop message is forbidden because timerWheelEnable is false",
this.brokerController.getBrokerConfig().getBrokerIP1()));
return response;
}
TopicConfig topicConfig =
this.brokerController.getTopicConfigManager().selectTopicConfig(requestHeader.getTopic());
if (null == topicConfig) {
POP_LOGGER.error("The topic {} not exist, consumer: {} ", requestHeader.getTopic(),
RemotingHelper.parseChannelRemoteAddr(channel));
response.setCode(ResponseCode.TOPIC_NOT_EXIST);
response.setRemark(String.format("topic[%s] not exist, apply first please! %s", requestHeader.getTopic(),
FAQUrl.suggestTodo(FAQUrl.APPLY_TOPIC_URL)));
return response;
}
if (!PermName.isReadable(topicConfig.getPerm())) {
response.setCode(ResponseCode.NO_PERMISSION);
response.setRemark("the topic[" + requestHeader.getTopic() + "] peeking message is forbidden");
return response;
}
if (requestHeader.getQueueId() >= topicConfig.getReadQueueNums()) {
String errorInfo = String.format("queueId[%d] is illegal, topic:[%s] topicConfig.readQueueNums:[%d] " +
"consumer:[%s]",
requestHeader.getQueueId(), requestHeader.getTopic(), topicConfig.getReadQueueNums(),
channel.remoteAddress());
POP_LOGGER.warn(errorInfo);
response.setCode(ResponseCode.SYSTEM_ERROR);
response.setRemark(errorInfo);
return response;
}
SubscriptionGroupConfig subscriptionGroupConfig =
this.brokerController.getSubscriptionGroupManager().findSubscriptionGroupConfig(requestHeader.getConsumerGroup());
if (null == subscriptionGroupConfig) {
response.setCode(ResponseCode.SUBSCRIPTION_GROUP_NOT_EXIST);
response.setRemark(String.format("subscription group [%s] does not exist, %s",
requestHeader.getConsumerGroup(), FAQUrl.suggestTodo(FAQUrl.SUBSCRIPTION_GROUP_NOT_EXIST)));
return response;
}
if (!subscriptionGroupConfig.isConsumeEnable()) {
response.setCode(ResponseCode.NO_PERMISSION);
response.setRemark("subscription group no permission, " + requestHeader.getConsumerGroup());
return response;
}
BrokerConfig brokerConfig = brokerController.getBrokerConfig();
SubscriptionData subscriptionData = null;
ExpressionMessageFilter messageFilter = null;
if (requestHeader.getExp() != null && !requestHeader.getExp().isEmpty()) {
try {
subscriptionData = FilterAPI.build(requestHeader.getTopic(), requestHeader.getExp(), requestHeader.getExpType());
brokerController.getConsumerManager().compensateSubscribeData(requestHeader.getConsumerGroup(),
requestHeader.getTopic(), subscriptionData);
String retryTopic = KeyBuilder.buildPopRetryTopic(requestHeader.getTopic(), requestHeader.getConsumerGroup(), brokerConfig.isEnableRetryTopicV2());
SubscriptionData retrySubscriptionData = FilterAPI.build(retryTopic, SubscriptionData.SUB_ALL, requestHeader.getExpType());
brokerController.getConsumerManager().compensateSubscribeData(requestHeader.getConsumerGroup(),
retryTopic, retrySubscriptionData);
ConsumerFilterData consumerFilterData = null;
if (!ExpressionType.isTagType(subscriptionData.getExpressionType())) {
consumerFilterData = ConsumerFilterManager.build(
requestHeader.getTopic(), requestHeader.getConsumerGroup(), requestHeader.getExp(),
requestHeader.getExpType(), System.currentTimeMillis()
);
if (consumerFilterData == null) {
POP_LOGGER.warn("Parse the consumer's subscription[{}] failed, group: {}",
requestHeader.getExp(), requestHeader.getConsumerGroup());
response.setCode(ResponseCode.SUBSCRIPTION_PARSE_FAILED);
response.setRemark("parse the consumer's subscription failed");
return response;
}
}
messageFilter = new ExpressionMessageFilter(subscriptionData, consumerFilterData,
brokerController.getConsumerFilterManager());
} catch (Exception e) {
POP_LOGGER.warn("Parse the consumer's subscription[{}] error, group: {}", requestHeader.getExp(),
requestHeader.getConsumerGroup());
response.setCode(ResponseCode.SUBSCRIPTION_PARSE_FAILED);
response.setRemark("parse the consumer's subscription failed");
return response;
}
} else {
try {
subscriptionData = FilterAPI.build(requestHeader.getTopic(), "*", ExpressionType.TAG);
brokerController.getConsumerManager().compensateSubscribeData(requestHeader.getConsumerGroup(),
requestHeader.getTopic(), subscriptionData);
String retryTopic = KeyBuilder.buildPopRetryTopic(requestHeader.getTopic(), requestHeader.getConsumerGroup(), brokerConfig.isEnableRetryTopicV2());
SubscriptionData retrySubscriptionData = FilterAPI.build(retryTopic, "*", ExpressionType.TAG);
brokerController.getConsumerManager().compensateSubscribeData(requestHeader.getConsumerGroup(),
retryTopic, retrySubscriptionData);
} catch (Exception e) {
POP_LOGGER.warn("Build default subscription error, group: {}", requestHeader.getConsumerGroup());
}
}
int randomQ = random.nextInt(100);
int reviveQid;
if (requestHeader.isOrder()) {
reviveQid = KeyBuilder.POP_ORDER_REVIVE_QUEUE;
} else {
reviveQid = (int) Math.abs(ckMessageNumber.getAndIncrement() % this.brokerController.getBrokerConfig().getReviveQueueNum());
}
GetMessageResult getMessageResult = new GetMessageResult(requestHeader.getMaxMsgNums());
ExpressionMessageFilter finalMessageFilter = messageFilter;
StringBuilder finalOrderCountInfo = orderCountInfo;
// Due to the design of the fields startOffsetInfo, msgOffsetInfo, and orderCountInfo,
// a single POP request could only invoke the popMsgFromQueue method once
// for either a normal topic or a retry topic's queue. Retry topics v1 and v2 are
// considered the same type because they share the same retry flag in previous fields.
// Therefore, needRetryV1 is designed as a subset of needRetry, and within a single request,
// only one type of retry topic is able to call popMsgFromQueue.
boolean needRetry = randomQ % 5 == 0;
boolean needRetryV1 = false;
if (brokerConfig.isEnableRetryTopicV2() && brokerConfig.isRetrieveMessageFromPopRetryTopicV1()) {
needRetryV1 = randomQ % 2 == 0;
}
long popTime = System.currentTimeMillis();
CompletableFuture<Long> getMessageFuture = CompletableFuture.completedFuture(0L);
if (needRetry && !requestHeader.isOrder()) {
if (needRetryV1) {
String retryTopic = KeyBuilder.buildPopRetryTopicV1(requestHeader.getTopic(), requestHeader.getConsumerGroup());
getMessageFuture = popMsgFromTopic(retryTopic, true, getMessageResult, requestHeader, reviveQid, channel,
popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture);
} else {
String retryTopic = KeyBuilder.buildPopRetryTopic(requestHeader.getTopic(), requestHeader.getConsumerGroup(), brokerConfig.isEnableRetryTopicV2());
getMessageFuture = popMsgFromTopic(retryTopic, true, getMessageResult, requestHeader, reviveQid, channel,
popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture);
}
}
if (requestHeader.getQueueId() < 0) {
// read all queue
getMessageFuture = popMsgFromTopic(topicConfig, false, getMessageResult, requestHeader, reviveQid, channel,
popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture);
} else {
int queueId = requestHeader.getQueueId();
getMessageFuture = getMessageFuture.thenCompose(restNum ->
popMsgFromQueue(topicConfig.getTopicName(), requestHeader.getAttemptId(), false,
getMessageResult, requestHeader, queueId, restNum, reviveQid, channel, popTime, finalMessageFilter,
startOffsetInfo, msgOffsetInfo, finalOrderCountInfo));
}
// if not full , fetch retry again
if (!needRetry && getMessageResult.getMessageMapedList().size() < requestHeader.getMaxMsgNums() && !requestHeader.isOrder()) {
if (needRetryV1) {
String retryTopicV1 = KeyBuilder.buildPopRetryTopicV1(requestHeader.getTopic(), requestHeader.getConsumerGroup());
getMessageFuture = popMsgFromTopic(retryTopicV1, true, getMessageResult, requestHeader, reviveQid, channel,
popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture);
} else {
String retryTopic = KeyBuilder.buildPopRetryTopic(requestHeader.getTopic(), requestHeader.getConsumerGroup(), brokerConfig.isEnableRetryTopicV2());
getMessageFuture = popMsgFromTopic(retryTopic, true, getMessageResult, requestHeader, reviveQid, channel,
popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture);
}
}
final RemotingCommand finalResponse = response;
SubscriptionData finalSubscriptionData = subscriptionData;
getMessageFuture.thenApply(restNum -> {
if (!getMessageResult.getMessageBufferList().isEmpty()) {
finalResponse.setCode(ResponseCode.SUCCESS);
getMessageResult.setStatus(GetMessageStatus.FOUND);
if (restNum > 0) {
// all queue pop can not notify specified queue pop, and vice versa
popLongPollingService.notifyMessageArriving(
requestHeader.getTopic(), requestHeader.getQueueId(), requestHeader.getConsumerGroup(),
null, 0L, null, null);
}
} else {
PollingResult pollingResult = popLongPollingService.polling(
ctx, request, new PollingHeader(requestHeader), finalSubscriptionData, finalMessageFilter);
if (PollingResult.POLLING_SUC == pollingResult) {
if (restNum > 0) {
popLongPollingService.notifyMessageArriving(
requestHeader.getTopic(), requestHeader.getQueueId(), requestHeader.getConsumerGroup(),
null, 0L, null, null);
}
return null;
} else if (PollingResult.POLLING_FULL == pollingResult) {
finalResponse.setCode(ResponseCode.POLLING_FULL);
} else {
finalResponse.setCode(ResponseCode.POLLING_TIMEOUT);
}
getMessageResult.setStatus(GetMessageStatus.NO_MESSAGE_IN_QUEUE);
}
responseHeader.setInvisibleTime(requestHeader.getInvisibleTime());
responseHeader.setPopTime(popTime);
responseHeader.setReviveQid(reviveQid);
responseHeader.setRestNum(restNum);
responseHeader.setStartOffsetInfo(startOffsetInfo.toString());
responseHeader.setMsgOffsetInfo(msgOffsetInfo.toString());
if (requestHeader.isOrder() && finalOrderCountInfo != null) {
responseHeader.setOrderCountInfo(finalOrderCountInfo.toString());
}
finalResponse.setRemark(getMessageResult.getStatus().name());
switch (finalResponse.getCode()) {
case ResponseCode.SUCCESS:
if (this.brokerController.getBrokerConfig().isTransferMsgByHeap()) {
final byte[] r = this.readGetMessageResult(getMessageResult, requestHeader.getConsumerGroup(),
requestHeader.getTopic(), requestHeader.getQueueId());
this.brokerController.getBrokerStatsManager().incGroupGetLatency(requestHeader.getConsumerGroup(),
requestHeader.getTopic(), requestHeader.getQueueId(),
(int) (this.brokerController.getMessageStore().now() - beginTimeMills));
finalResponse.setBody(r);
} else {
final GetMessageResult tmpGetMessageResult = getMessageResult;
try {
FileRegion fileRegion =
new ManyMessageTransfer(finalResponse.encodeHeader(getMessageResult.getBufferTotalSize()),
getMessageResult);
channel.writeAndFlush(fileRegion)
.addListener((ChannelFutureListener) future -> {
tmpGetMessageResult.release();
Attributes attributes = RemotingMetricsManager.newAttributesBuilder()
.put(LABEL_REQUEST_CODE, RemotingHelper.getRequestCodeDesc(request.getCode()))
.put(LABEL_RESPONSE_CODE, RemotingHelper.getResponseCodeDesc(finalResponse.getCode()))
.put(LABEL_RESULT, RemotingMetricsManager.getWriteAndFlushResult(future))
.build();
RemotingMetricsManager.rpcLatency.record(request.getProcessTimer().elapsed(TimeUnit.MILLISECONDS), attributes);
if (!future.isSuccess()) {
POP_LOGGER.error("Fail to transfer messages from page cache to {}",
channel.remoteAddress(), future.cause());
}
});
} catch (Throwable e) {
POP_LOGGER.error("Error occurred when transferring messages from page cache", e);
getMessageResult.release();
}
return null;
}
break;
default:
return finalResponse;
}
return finalResponse;
}).thenAccept(result -> NettyRemotingAbstract.writeResponse(channel, request, result));
return null;
} | @Test
public void testProcessRequest_MsgWasRemoving() throws RemotingCommandException {
GetMessageResult getMessageResult = createGetMessageResult(1);
getMessageResult.setStatus(GetMessageStatus.MESSAGE_WAS_REMOVING);
when(messageStore.getMessageStoreConfig()).thenReturn(new MessageStoreConfig());
when(messageStore.getMessageAsync(anyString(), anyString(), anyInt(), anyLong(), anyInt(), any())).thenReturn(CompletableFuture.completedFuture(getMessageResult));
final RemotingCommand request = createPopMsgCommand();
popMessageProcessor.processRequest(handlerContext, request);
RemotingCommand response = embeddedChannel.readOutbound();
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
} |
public static <T> Collection<T> checkedSubTypeCast(
Collection<? super T> collection, Class<T> subTypeClass) {
for (Object o : collection) {
// probe each object, will throw ClassCastException on mismatch.
subTypeClass.cast(o);
}
return subTypeCast(collection);
} | @Test
void testCheckedSubTypeCast() {
List<A> list = new ArrayList<>();
B b = new B();
C c = new C();
list.add(b);
list.add(c);
list.add(null);
Collection<B> castSuccess = CollectionUtil.checkedSubTypeCast(list, B.class);
Iterator<B> iterator = castSuccess.iterator();
assertThat(iterator.next()).isEqualTo(b);
assertThat(iterator.next()).isEqualTo(c);
assertThat(iterator.next()).isNull();
assertThat(iterator).isExhausted();
assertThatThrownBy(() -> CollectionUtil.checkedSubTypeCast(list, C.class))
.isInstanceOf(ClassCastException.class);
} |
public static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord) {
int offsetDelta = (int) (lastOffset - baseOffset);
writeHeader(buffer, baseOffset, offsetDelta, DefaultRecordBatch.RECORD_BATCH_OVERHEAD, magic,
CompressionType.NONE, timestampType, RecordBatch.NO_TIMESTAMP, timestamp, producerId,
producerEpoch, baseSequence, isTransactional, isControlRecord, false, partitionLeaderEpoch, 0);
} | @Test
public void testWriteEmptyHeader() {
long producerId = 23423L;
short producerEpoch = 145;
int baseSequence = 983;
long baseOffset = 15L;
long lastOffset = 37;
int partitionLeaderEpoch = 15;
long timestamp = System.currentTimeMillis();
for (TimestampType timestampType : Arrays.asList(TimestampType.CREATE_TIME, TimestampType.LOG_APPEND_TIME)) {
for (boolean isTransactional : Arrays.asList(true, false)) {
for (boolean isControlBatch : Arrays.asList(true, false)) {
ByteBuffer buffer = ByteBuffer.allocate(2048);
DefaultRecordBatch.writeEmptyHeader(buffer, RecordBatch.CURRENT_MAGIC_VALUE, producerId,
producerEpoch, baseSequence, baseOffset, lastOffset, partitionLeaderEpoch, timestampType,
timestamp, isTransactional, isControlBatch);
buffer.flip();
DefaultRecordBatch batch = new DefaultRecordBatch(buffer);
assertEquals(producerId, batch.producerId());
assertEquals(producerEpoch, batch.producerEpoch());
assertEquals(baseSequence, batch.baseSequence());
assertEquals(baseSequence + ((int) (lastOffset - baseOffset)), batch.lastSequence());
assertEquals(baseOffset, batch.baseOffset());
assertEquals(lastOffset, batch.lastOffset());
assertEquals(partitionLeaderEpoch, batch.partitionLeaderEpoch());
assertEquals(isTransactional, batch.isTransactional());
assertEquals(timestampType, batch.timestampType());
assertEquals(timestamp, batch.maxTimestamp());
assertEquals(RecordBatch.NO_TIMESTAMP, batch.baseTimestamp());
assertEquals(isControlBatch, batch.isControlBatch());
}
}
}
} |
@Override
public String execute(CommandContext commandContext, String[] args) {
if (ArrayUtils.isEmpty(args)) {
return "Please input method name, eg: \r\ninvoke xxxMethod(1234, \"abcd\", {\"prop\" : \"value\"})\r\n"
+ "invoke XxxService.xxxMethod(1234, \"abcd\", {\"prop\" : \"value\"})\r\n"
+ "invoke com.xxx.XxxService.xxxMethod(1234, \"abcd\", {\"prop\" : \"value\"})";
}
Channel channel = commandContext.getRemote();
String service = channel.attr(ChangeTelnet.SERVICE_KEY) != null
? channel.attr(ChangeTelnet.SERVICE_KEY).get()
: null;
String message = args[0];
int i = message.indexOf("(");
if (i < 0 || !message.endsWith(")")) {
return "Invalid parameters, format: service.method(args)";
}
String method = message.substring(0, i).trim();
String param = message.substring(i + 1, message.length() - 1).trim();
i = method.lastIndexOf(".");
if (i >= 0) {
service = method.substring(0, i).trim();
method = method.substring(i + 1).trim();
}
if (StringUtils.isEmpty(service)) {
return "If you want to invoke like [invoke sayHello(\"xxxx\")], please execute cd command first,"
+ " or you can execute it like [invoke IHelloService.sayHello(\"xxxx\")]";
}
List<Object> list;
try {
list = JsonUtils.toJavaList("[" + param + "]", Object.class);
} catch (Throwable t) {
return "Invalid json argument, cause: " + t.getMessage();
}
StringBuilder buf = new StringBuilder();
Method invokeMethod = null;
ProviderModel selectedProvider = null;
if (isInvokedSelectCommand(channel)) {
selectedProvider = channel.attr(INVOKE_METHOD_PROVIDER_KEY).get();
invokeMethod = channel.attr(SelectTelnet.SELECT_METHOD_KEY).get();
} else {
for (ProviderModel provider : frameworkModel.getServiceRepository().allProviderModels()) {
if (!isServiceMatch(service, provider)) {
continue;
}
selectedProvider = provider;
List<Method> methodList = findSameSignatureMethod(provider.getAllMethods(), method, list);
if (CollectionUtils.isEmpty(methodList)) {
break;
}
if (methodList.size() == 1) {
invokeMethod = methodList.get(0);
} else {
List<Method> matchMethods = findMatchMethods(methodList, list);
if (CollectionUtils.isEmpty(matchMethods)) {
break;
}
if (matchMethods.size() == 1) {
invokeMethod = matchMethods.get(0);
} else { // exist overridden method
channel.attr(INVOKE_METHOD_PROVIDER_KEY).set(provider);
channel.attr(INVOKE_METHOD_LIST_KEY).set(matchMethods);
channel.attr(INVOKE_MESSAGE_KEY).set(message);
printSelectMessage(buf, matchMethods);
return buf.toString();
}
}
break;
}
}
if (!StringUtils.isEmpty(service)) {
buf.append("Use default service ").append(service).append('.');
}
if (selectedProvider == null) {
buf.append("\r\nNo such service ").append(service);
return buf.toString();
}
if (invokeMethod == null) {
buf.append("\r\nNo such method ")
.append(method)
.append(" in service ")
.append(service);
return buf.toString();
}
try {
Object[] array =
realize(list.toArray(), invokeMethod.getParameterTypes(), invokeMethod.getGenericParameterTypes());
long start = System.currentTimeMillis();
AppResponse result = new AppResponse();
try {
Object o = invokeMethod.invoke(selectedProvider.getServiceInstance(), array);
boolean setValueDone = false;
if (RpcContext.getServerAttachment().isAsyncStarted()) {
AsyncContext asyncContext = RpcContext.getServerAttachment().getAsyncContext();
if (asyncContext instanceof AsyncContextImpl) {
CompletableFuture<Object> internalFuture =
((AsyncContextImpl) asyncContext).getInternalFuture();
result.setValue(internalFuture.get());
setValueDone = true;
}
}
if (!setValueDone) {
result.setValue(o);
}
} catch (Throwable t) {
result.setException(t);
if (t instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
} finally {
RpcContext.removeContext();
}
long end = System.currentTimeMillis();
buf.append("\r\nresult: ");
buf.append(JsonUtils.toJson(result.recreate()));
buf.append("\r\nelapsed: ");
buf.append(end - start);
buf.append(" ms.");
} catch (Throwable t) {
return "Failed to invoke method " + invokeMethod.getName() + ", cause: " + StringUtils.toString(t);
}
return buf.toString();
} | @Test
void testInvokeByPassingEnumValue() throws RemotingException {
defaultAttributeMap.attr(ChangeTelnet.SERVICE_KEY).set(DemoService.class.getName());
defaultAttributeMap.attr(SelectTelnet.SELECT_KEY).set(null);
given(mockChannel.attr(ChangeTelnet.SERVICE_KEY))
.willReturn(defaultAttributeMap.attr(ChangeTelnet.SERVICE_KEY));
given(mockChannel.attr(SelectTelnet.SELECT_KEY)).willReturn(defaultAttributeMap.attr(SelectTelnet.SELECT_KEY));
registerProvider(DemoService.class.getName(), new DemoServiceImpl(), DemoService.class);
String result = invoke.execute(mockCommandContext, new String[] {"getType(\"High\")"});
assertTrue(result.contains("result: \"High\""));
defaultAttributeMap.attr(ChangeTelnet.SERVICE_KEY).remove();
defaultAttributeMap.attr(SelectTelnet.SELECT_KEY).remove();
} |
static void checkNearCacheNativeMemoryConfig(InMemoryFormat inMemoryFormat, NativeMemoryConfig nativeMemoryConfig,
boolean isEnterprise) {
if (!isEnterprise) {
return;
}
if (inMemoryFormat != NATIVE) {
return;
}
if (nativeMemoryConfig != null && nativeMemoryConfig.isEnabled()) {
return;
}
throw new InvalidConfigurationException("Enable native memory config to use NATIVE in-memory-format for Near Cache");
} | @Test
public void checkNearCacheNativeMemoryConfig_shouldNotNeedNativeMemoryConfig_BINARY_onOS() {
checkNearCacheNativeMemoryConfig(BINARY, null, false);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.