focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public WebAppProxyServlet() {
super();
conf = new YarnConfiguration();
this.trackingUriPlugins =
conf.getInstances(YarnConfiguration.YARN_TRACKING_URL_GENERATOR,
TrackingUriPlugin.class);
this.failurePageUrlBase =
StringHelper.pjoin(WebAppUtils.getResolvedRMWebAppURLWithScheme(conf),
"cluster", "failure");
} | @Test
@Timeout(5000)
void testWebAppProxyServlet() throws Exception {
configuration.set(YarnConfiguration.PROXY_ADDRESS, "localhost:9090");
// overriding num of web server threads, see HttpServer.HTTP_MAXTHREADS
configuration.setInt("hadoop.http.max.threads", 10);
WebAppProxyServerForTest proxy = new WebAppProxyServerForTest();
proxy.init(configuration);
proxy.start();
int proxyPort = proxy.proxy.proxyServer.getConnectorAddress(0).getPort();
AppReportFetcherForTest appReportFetcher = proxy.proxy.appReportFetcher;
// wrong url
try {
// wrong url without app ID
URL emptyUrl = new URL("http://localhost:" + proxyPort + "/proxy");
HttpURLConnection emptyProxyConn = (HttpURLConnection) emptyUrl
.openConnection();
emptyProxyConn.connect();
assertEquals(HttpURLConnection.HTTP_NOT_FOUND, emptyProxyConn.getResponseCode());
// wrong url. Set wrong app ID
URL wrongUrl = new URL("http://localhost:" + proxyPort + "/proxy/app");
HttpURLConnection proxyConn = (HttpURLConnection) wrongUrl
.openConnection();
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR,
proxyConn.getResponseCode());
// set true Application ID in url
URL url = new URL("http://localhost:" + proxyPort + "/proxy/application_00_0");
proxyConn = (HttpURLConnection) url.openConnection();
// set cookie
proxyConn.setRequestProperty("Cookie", "checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_OK, proxyConn.getResponseCode());
assertTrue(isResponseCookiePresent(
proxyConn, "checked_application_0_0000", "true"));
// test that redirection is squashed correctly
URL redirectUrl = new URL("http://localhost:" + proxyPort
+ "/proxy/redirect/application_00_0");
proxyConn = (HttpURLConnection) redirectUrl.openConnection();
proxyConn.setInstanceFollowRedirects(false);
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_MOVED_TEMP, proxyConn.getResponseCode(),
"The proxy returned an unexpected status code rather than"
+ "redirecting the connection (302)");
String expected =
WebAppUtils.getResolvedRMWebAppURLWithScheme(configuration)
+ "/cluster/failure/application_00_0";
String redirect = proxyConn.getHeaderField(ProxyUtils.LOCATION);
assertEquals(expected, redirect, "The proxy did not redirect the connection to the failure "
+ "page of the RM");
// cannot found application 1: null
appReportFetcher.answer = 1;
proxyConn = (HttpURLConnection) url.openConnection();
proxyConn.setRequestProperty("Cookie", "checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_NOT_FOUND,
proxyConn.getResponseCode());
assertFalse(isResponseCookiePresent(
proxyConn, "checked_application_0_0000", "true"));
// cannot found application 2: ApplicationNotFoundException
appReportFetcher.answer = 4;
proxyConn = (HttpURLConnection) url.openConnection();
proxyConn.setRequestProperty("Cookie", "checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_NOT_FOUND,
proxyConn.getResponseCode());
assertFalse(isResponseCookiePresent(
proxyConn, "checked_application_0_0000", "true"));
// wrong user
appReportFetcher.answer = 2;
proxyConn = (HttpURLConnection) url.openConnection();
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_OK, proxyConn.getResponseCode());
String s = readInputStream(proxyConn.getInputStream());
assertTrue(s
.contains("to continue to an Application Master web interface owned by"));
assertTrue(s.contains("WARNING: The following page may not be safe!"));
//case if task has a not running status
appReportFetcher.answer = 3;
proxyConn = (HttpURLConnection) url.openConnection();
proxyConn.setRequestProperty("Cookie", "checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_OK, proxyConn.getResponseCode());
// test user-provided path and query parameter can be appended to the
// original tracking url
appReportFetcher.answer = 5;
URL clientUrl = new URL("http://localhost:" + proxyPort
+ "/proxy/application_00_0/test/tez?x=y&h=p");
proxyConn = (HttpURLConnection) clientUrl.openConnection();
proxyConn.connect();
LOG.info("" + proxyConn.getURL());
LOG.info("ProxyConn.getHeaderField(): " + proxyConn.getHeaderField(ProxyUtils.LOCATION));
assertEquals("http://localhost:" + originalPort
+ "/foo/bar/test/tez?a=b&x=y&h=p#main", proxyConn.getURL().toString());
} finally {
proxy.close();
}
} |
public void translate(Pipeline pipeline) {
this.flinkBatchEnv = null;
this.flinkStreamEnv = null;
final boolean hasUnboundedOutput =
PipelineTranslationModeOptimizer.hasUnboundedOutput(pipeline);
if (hasUnboundedOutput) {
LOG.info("Found unbounded PCollection. Switching to streaming execution.");
options.setStreaming(true);
}
// Staged files need to be set before initializing the execution environments
prepareFilesToStageForRemoteClusterExecution(options);
FlinkPipelineTranslator translator;
if (options.isStreaming() || options.getUseDataStreamForBatch()) {
this.flinkStreamEnv = FlinkExecutionEnvironments.createStreamExecutionEnvironment(options);
if (hasUnboundedOutput && !flinkStreamEnv.getCheckpointConfig().isCheckpointingEnabled()) {
LOG.warn(
"UnboundedSources present which rely on checkpointing, but checkpointing is disabled.");
}
translator =
new FlinkStreamingPipelineTranslator(flinkStreamEnv, options, options.isStreaming());
if (!options.isStreaming()) {
flinkStreamEnv.setRuntimeMode(RuntimeExecutionMode.BATCH);
}
} else {
this.flinkBatchEnv = FlinkExecutionEnvironments.createBatchExecutionEnvironment(options);
translator = new FlinkBatchPipelineTranslator(flinkBatchEnv, options);
}
// Transform replacements need to receive the finalized PipelineOptions
// including execution mode (batch/streaming) and parallelism.
pipeline.replaceAll(FlinkTransformOverrides.getDefaultOverrides(options));
translator.translate(pipeline);
} | @Test
public void testTranslationModeOverrideWithUnboundedSources() {
FlinkPipelineOptions options = getDefaultPipelineOptions();
options.setRunner(FlinkRunner.class);
options.setStreaming(false);
FlinkPipelineExecutionEnvironment flinkEnv = new FlinkPipelineExecutionEnvironment(options);
Pipeline pipeline = Pipeline.create(options);
pipeline.apply(GenerateSequence.from(0));
flinkEnv.translate(pipeline);
assertThat(options.isStreaming(), Matchers.is(true));
} |
@Override
public void onHeartbeatSuccess(ShareGroupHeartbeatResponseData response) {
if (response.errorCode() != Errors.NONE.code()) {
String errorMessage = String.format(
"Unexpected error in Heartbeat response. Expected no error, but received: %s",
Errors.forCode(response.errorCode())
);
throw new IllegalArgumentException(errorMessage);
}
MemberState state = state();
if (state == MemberState.LEAVING) {
log.debug("Ignoring heartbeat response received from broker. Member {} with epoch {} is " +
"already leaving the group.", memberId, memberEpoch);
return;
}
if (state == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) {
log.debug("Member {} with epoch {} received a successful response to the heartbeat " +
"to leave the group and completed the leave operation. ", memberId, memberEpoch);
return;
}
if (isNotInGroup()) {
log.debug("Ignoring heartbeat response received from broker. Member {} is in {} state" +
" so it's not a member of the group. ", memberId, state);
return;
}
// Update the group member id label in the client telemetry reporter if the member id has
// changed. Initially the member id is empty, and it is updated when the member joins the
// group. This is done here to avoid updating the label on every heartbeat response. Also
// check if the member id is null, as the schema defines it as nullable.
if (response.memberId() != null && !response.memberId().equals(memberId)) {
clientTelemetryReporter.ifPresent(reporter -> reporter.updateMetricsLabels(
Collections.singletonMap(ClientTelemetryProvider.GROUP_MEMBER_ID, response.memberId())));
}
this.memberId = response.memberId();
updateMemberEpoch(response.memberEpoch());
ShareGroupHeartbeatResponseData.Assignment assignment = response.assignment();
if (assignment != null) {
if (!state.canHandleNewAssignment()) {
// New assignment received but member is in a state where it cannot take new
// assignments (ex. preparing to leave the group)
log.debug("Ignoring new assignment {} received from server because member is in {} state.",
assignment, state);
return;
}
Map<Uuid, SortedSet<Integer>> newAssignment = new HashMap<>();
assignment.topicPartitions().forEach(topicPartition -> newAssignment.put(topicPartition.topicId(), new TreeSet<>(topicPartition.partitions())));
processAssignmentReceived(newAssignment);
}
} | @Test
public void testSameAssignmentReconciledAgainWithMissingTopic() {
ShareMembershipManager membershipManager = createMemberInStableState();
Uuid topic1 = Uuid.randomUuid();
Uuid topic2 = Uuid.randomUuid();
final ShareGroupHeartbeatResponseData.Assignment assignment1 = new ShareGroupHeartbeatResponseData.Assignment()
.setTopicPartitions(Arrays.asList(
new ShareGroupHeartbeatResponseData.TopicPartitions().setTopicId(topic1).setPartitions(Collections.singletonList(0)),
new ShareGroupHeartbeatResponseData.TopicPartitions().setTopicId(topic2).setPartitions(Collections.singletonList(0))
));
final ShareGroupHeartbeatResponseData.Assignment assignment2 = new ShareGroupHeartbeatResponseData.Assignment()
.setTopicPartitions(Arrays.asList(
new ShareGroupHeartbeatResponseData.TopicPartitions().setTopicId(topic1).setPartitions(Arrays.asList(0, 1)),
new ShareGroupHeartbeatResponseData.TopicPartitions().setTopicId(topic2).setPartitions(Collections.singletonList(0))
));
when(metadata.topicNames()).thenReturn(Collections.singletonMap(topic1, "topic1"));
// Receive assignment - full reconciliation triggered
// stay in RECONCILING state, since an unresolved topic is assigned
membershipManager.onHeartbeatSuccess(createShareGroupHeartbeatResponse(assignment1).data());
assertEquals(MemberState.RECONCILING, membershipManager.state());
membershipManager.poll(time.milliseconds());
verifyReconciliationTriggeredAndCompleted(membershipManager,
Collections.singletonList(new TopicIdPartition(topic1, new TopicPartition("topic1", 0)))
);
membershipManager.onHeartbeatRequestGenerated();
assertEquals(MemberState.RECONCILING, membershipManager.state());
clearInvocations(membershipManager);
// Receive extended assignment - assignment received but no reconciliation triggered
membershipManager.onHeartbeatSuccess(createShareGroupHeartbeatResponse(assignment2).data());
assertEquals(MemberState.RECONCILING, membershipManager.state());
verifyReconciliationNotTriggered(membershipManager);
// Receive original assignment again - full reconciliation not triggered but assignment is acked again
membershipManager.onHeartbeatSuccess(createShareGroupHeartbeatResponse(assignment1).data());
assertEquals(MemberState.RECONCILING, membershipManager.state());
membershipManager.poll(time.milliseconds());
assertEquals(MemberState.ACKNOWLEDGING, membershipManager.state());
verifyReconciliationNotTriggered(membershipManager);
assertEquals(Collections.singletonMap(topic1, mkSortedSet(0)), membershipManager.currentAssignment().partitions);
assertEquals(mkSet(topic2), membershipManager.topicsAwaitingReconciliation());
} |
public static URI toURI(String name) {
try {
return new URI(name);
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
} | @Test
void testToUri() {
Assertions.assertThrows(RuntimeException.class, () -> ClassUtils.toURI("#xx_abc#hello"));
} |
public ValidationResult validateAuthConfig(final String pluginId, final Map<String, String> configuration) {
return pluginRequestHelper.submitRequest(pluginId, REQUEST_VALIDATE_AUTH_CONFIG, new DefaultPluginInteractionCallback<>() {
@Override
public String requestBody(String resolvedExtensionVersion) {
return getMessageConverter(resolvedExtensionVersion).validatePluginConfigurationRequestBody(configuration);
}
@Override
public ValidationResult onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) {
return getMessageConverter(resolvedExtensionVersion).getPluginConfigurationValidationResultFromResponseBody(responseBody);
}
});
} | @Test
void shouldTalkToPlugin_To_ValidateAuthConfig() {
String responseBody = "[{\"message\":\"Url must not be blank.\",\"key\":\"Url\"},{\"message\":\"SearchBase must not be blank.\",\"key\":\"SearchBase\"}]";
when(pluginManager.submitTo(eq(PLUGIN_ID), eq(AUTHORIZATION_EXTENSION), requestArgumentCaptor.capture())).thenReturn(new DefaultGoPluginApiResponse(SUCCESS_RESPONSE_CODE, responseBody));
ValidationResult validationResult = authorizationExtension.validateAuthConfig(PLUGIN_ID, Collections.emptyMap());
assertRequest(requestArgumentCaptor.getValue(), AUTHORIZATION_EXTENSION, "2.0", REQUEST_VALIDATE_AUTH_CONFIG, "{}");
assertThat(validationResult.isSuccessful()).isEqualTo(false);
assertThat(validationResult.getErrors()).hasSize(2)
.contains(
new ValidationError("Url", "Url must not be blank."),
new ValidationError("SearchBase", "SearchBase must not be blank.")
);
} |
@Override
public void updateNode(ResourceId path, DataNode node) {
super.updateNode(toAbsoluteId(path), node);
} | @Test
public void testUpdateNode() {
view.updateNode(relIntf, node);
assertTrue(ResourceIds.isPrefix(rid, realPath));
} |
@PublicEvolving
public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes(
MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) {
return getMapReturnTypes(mapInterface, inType, null, false);
} | @SuppressWarnings({"rawtypes", "unchecked"})
@Test
void testFunctionDependingOnInputWithCustomTupleInput() {
IdentityMapper<SameTypeVariable<String>> function =
new IdentityMapper<SameTypeVariable<String>>();
TypeInformation<?> ti =
TypeExtractor.getMapReturnTypes(
function,
(TypeInformation)
TypeInformation.of(new TypeHint<Tuple2<String, String>>() {}));
assertThat(ti.isTupleType()).isTrue();
assertThat(ti.getArity()).isEqualTo(2);
TupleTypeInfo<?> tti = (TupleTypeInfo<?>) ti;
assertThat(tti.getTypeAt(0)).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO);
assertThat(tti.getTypeAt(1)).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO);
} |
@SuppressWarnings({"unchecked", "rawtypes"})
@Override
public @Nullable <InputT> TransformEvaluator<InputT> forApplication(
AppliedPTransform<?, ?, ?> application, CommittedBundle<?> inputBundle) throws IOException {
return createEvaluator((AppliedPTransform) application);
} | @Test
public void boundedSourceEvaluatorClosesReader() throws Exception {
TestSource<Long> source = new TestSource<>(BigEndianLongCoder.of(), 1L, 2L, 3L);
PCollection<Long> pcollection = p.apply(Read.from(source));
AppliedPTransform<?, ?, ?> sourceTransform = DirectGraphs.getProducer(pcollection);
UncommittedBundle<Long> output = bundleFactory.createBundle(pcollection);
when(context.createBundle(pcollection)).thenReturn(output);
TransformEvaluator<BoundedSourceShard<Long>> evaluator =
factory.forApplication(
sourceTransform, bundleFactory.createRootBundle().commit(Instant.now()));
evaluator.processElement(WindowedValue.valueInGlobalWindow(BoundedSourceShard.of(source)));
evaluator.finishBundle();
CommittedBundle<Long> committed = output.commit(Instant.now());
assertThat(committed.getElements(), containsInAnyOrder(gw(2L), gw(3L), gw(1L)));
assertThat(TestSource.readerClosed, is(true));
} |
public PDDocument createPDFFromText( Reader text ) throws IOException
{
PDDocument doc = new PDDocument();
createPDFFromText(doc, text);
return doc;
} | @Test
void testCreateEmptyPdf() throws IOException
{
TextToPDF pdfCreator = new TextToPDF();
PDDocument pdfDoc;
try (StringReader reader = new StringReader(""))
{
pdfDoc = pdfCreator.createPDFFromText(reader);
}
// In order for the PDF document to be openable by Adobe Reader, it needs
// to have some pages in it. So we'll check that.
int pageCount = pdfDoc.getNumberOfPages();
assertTrue(pageCount > 0, "All Pages was unexpectedly zero.");
assertEquals(1, pageCount, "Wrong number of pages.");
pdfDoc.close();
} |
public Optional<UpdateCenter> getUpdateCenter() {
return getUpdateCenter(false);
} | @Test
public void forceRefresh() throws Exception {
when(reader.readString(new URI(URL_DEFAULT_VALUE), StandardCharsets.UTF_8)).thenReturn("sonar.versions=2.2,2.3");
underTest.getUpdateCenter();
underTest.getUpdateCenter(true);
verify(reader, times(2)).readString(new URI(URL_DEFAULT_VALUE), StandardCharsets.UTF_8);
} |
@Override
public long getDelay(TimeUnit unit) {
return original.getDelay(unit);
} | @Test
public void getDelay() {
ScheduledFuture<Integer> future = new DelegatingScheduledFutureStripper<>(
scheduler.schedule(new SimpleCallableTestTask(), 0, TimeUnit.SECONDS));
//getDelay returns the remaining delay; zero or negative values indicate that the delay has already elapsed
//If JVM pauses for GC we may get a negative value
assertTrue(future.getDelay(TimeUnit.SECONDS) <= 0);
future = new DelegatingScheduledFutureStripper<>(
scheduler.schedule(new SimpleCallableTestTask(), 10, TimeUnit.SECONDS));
assertTrue(future.getDelay(TimeUnit.SECONDS) <= 10);
} |
@Override
public CanEmitBatchOfRecordsChecker getCanEmitBatchOfRecords() {
return () -> false;
} | @TestTemplate
void testCanEmitBatchOfRecords() throws Exception {
AvailabilityProvider.AvailabilityHelper availabilityHelper =
new AvailabilityProvider.AvailabilityHelper();
try (StreamTaskMailboxTestHarness<String> testHarness =
new StreamTaskMailboxTestHarnessBuilder<>(
MultipleInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO)
.addInput(BasicTypeInfo.STRING_TYPE_INFO)
.addInput(BasicTypeInfo.INT_TYPE_INFO)
.addInput(BasicTypeInfo.DOUBLE_TYPE_INFO)
.addAdditionalOutput(
new ResultPartitionWriterWithAvailabilityHelper(availabilityHelper))
.setupOperatorChain(new MapToStringMultipleInputOperatorFactory(3))
.finishForSingletonOperatorChain(IntSerializer.INSTANCE)
.build()) {
StreamTask.CanEmitBatchOfRecordsChecker canEmitBatchOfRecordsChecker =
testHarness.streamTask.getCanEmitBatchOfRecords();
testHarness.processAll();
availabilityHelper.resetAvailable();
assertThat(canEmitBatchOfRecordsChecker.check()).isFalse();
// The canEmitBatchOfRecordsChecker should be the false after the record writer is
// unavailable.
availabilityHelper.resetUnavailable();
assertThat(canEmitBatchOfRecordsChecker.check()).isFalse();
// Restore record writer to available
availabilityHelper.resetAvailable();
assertThat(canEmitBatchOfRecordsChecker.check()).isFalse();
// The canEmitBatchOfRecordsChecker should be the false after add the mail to mail box.
testHarness.streamTask.mainMailboxExecutor.execute(() -> {}, "mail");
assertThat(canEmitBatchOfRecordsChecker.check()).isFalse();
testHarness.processAll();
assertThat(canEmitBatchOfRecordsChecker.check()).isFalse();
}
} |
static String toBar(double percentValue) { // NOPMD
final double myPercent = Math.max(Math.min(percentValue, 100d), 0d);
final StringBuilder sb = new StringBuilder();
final String body = "<img src=''?resource=bar/rb_{0}.gif'' alt=''+'' title=''"
+ I18N.createPercentFormat().format(myPercent) + "%'' />";
final int fullBlockCount = (int) Math.floor(myPercent / (UNIT_SIZE * PARTIAL_BLOCKS));
final int partialBlockIndex = (int) Math
.floor((myPercent - fullBlockCount * UNIT_SIZE * PARTIAL_BLOCKS) / UNIT_SIZE);
sb.append(MessageFormat.format(body,
fullBlockCount > 0 || partialBlockIndex > 0 ? "a" : "a0"));
final String fullBody = MessageFormat.format(body, PARTIAL_BLOCKS);
for (int i = 0; i < fullBlockCount; i++) {
sb.append(fullBody);
}
if (partialBlockIndex > 0) {
final String partialBody = MessageFormat.format(body, partialBlockIndex);
sb.append(partialBody);
}
final int emptyBlocks = FULL_BLOCKS - fullBlockCount - (partialBlockIndex > 0 ? 1 : 0);
final String emptyBody = MessageFormat.format(body, 0);
for (int i = 0; i < emptyBlocks; i++) {
sb.append(emptyBody);
}
sb.append(MessageFormat.format(body, fullBlockCount == FULL_BLOCKS ? "b" : "b0"));
return sb.toString();
} | @Test
public void testToBar() {
assertNotNull("toBar", HtmlJavaInformationsReport.toBar(0));
assertNotNull("toBar", HtmlJavaInformationsReport.toBar(1));
assertNotNull("toBar", HtmlJavaInformationsReport.toBar(10));
assertNotNull("toBar", HtmlJavaInformationsReport.toBar(15));
assertNotNull("toBarWithAlert", HtmlJavaInformationsReport.toBarWithAlert(10, "detail"));
assertNotNull("toBarWithAlert", HtmlJavaInformationsReport.toBarWithAlert(100, "detail"));
assertNotNull("toBarWithAlert", HtmlJavaInformationsReport.toBarWithAlert(100, null));
} |
@PUT
@Consumes("application/json")
@Produces("application/json")
@Path("profile")
public Map<String, Object> profile(InputStream is) throws Exception {
JsonNode node = null;
try (BufferedReader reader = new BufferedReader(
new InputStreamReader(is, StandardCharsets.UTF_8))) {
node = new ObjectMapper().readTree(reader);
}
String id = node.get(ID).asText();
String text = node.get(TEXT).asText();
long timeoutMillis = node.has("timeoutMillis") ? node.get("timeoutMillis").asLong() :
DEFAULT_TIMEOUT_MILLIS;
return profile(id, text, timeoutMillis);
} | @Test
public void testBasicProfile() throws Exception {
Map<String, String> request = new HashMap<>();
request.put(TikaEvalResource.ID, "1");
request.put(TikaEvalResource.TEXT, "the quick brown fox jumped qwertyuiop");
Response response = profile(request);
Map<String, Object> results = deserialize(response);
assertEquals(6, (int)results.get(TikaEvalMetadataFilter.NUM_TOKENS.getName()));
assertEquals(0.166, (double)results.get(TikaEvalMetadataFilter.OUT_OF_VOCABULARY.getName()),
0.01);
assertEquals("eng", (String)results.get(TikaEvalMetadataFilter.LANGUAGE.getName()));
} |
@Override
public boolean betterThan(Num criterionValue1, Num criterionValue2) {
return lessIsBetter ? criterionValue1.isLessThan(criterionValue2)
: criterionValue1.isGreaterThan(criterionValue2);
} | @Test
public void betterThanWithLessIsNotBetter() {
AnalysisCriterion criterion = getCriterion(new ProfitLossCriterion());
assertTrue(criterion.betterThan(numOf(5000), numOf(4500)));
assertFalse(criterion.betterThan(numOf(4500), numOf(5000)));
} |
@Override
public String getAuthenticationMethodName() {
return PostgreSQLAuthenticationMethod.PASSWORD.getMethodName();
} | @Test
void assertAuthenticationMethodName() {
assertThat(new PostgreSQLPasswordAuthenticator().getAuthenticationMethodName(), is("password"));
} |
public DateTimeStamp minus(double offsetInDecimalSeconds) {
return add(-offsetInDecimalSeconds);
} | @Test
void testNanWithMinusNonZero() {
assertThrows(IllegalArgumentException.class,
() -> { new DateTimeStamp("2018-04-04T10:10:00.586-0100").minus(Double.NaN); },
"IllegalAccess Not Thrown");
} |
@Override
public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext,
final ShardingSphereDatabase database, final ConnectionContext connectionContext) throws SQLException {
if (1 == queryResults.size() && !isNeedAggregateRewrite(sqlStatementContext)) {
return new IteratorStreamMergedResult(queryResults);
}
Map<String, Integer> columnLabelIndexMap = getColumnLabelIndexMap(queryResults.get(0));
SelectStatementContext selectStatementContext = (SelectStatementContext) sqlStatementContext;
selectStatementContext.setIndexes(columnLabelIndexMap);
MergedResult mergedResult = build(queryResults, selectStatementContext, columnLabelIndexMap, database);
return decorate(queryResults, selectStatementContext, mergedResult);
} | @Test
void assertBuildIteratorStreamMergedResultWithOracleLimit() throws SQLException {
final ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "Oracle"));
final ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS);
OracleSelectStatement selectStatement = (OracleSelectStatement) buildSelectStatement(new OracleSelectStatement());
selectStatement.setProjections(new ProjectionsSegment(0, 0));
WhereSegment whereSegment = mock(WhereSegment.class);
BinaryOperationExpression binaryOperationExpression = mock(BinaryOperationExpression.class);
when(binaryOperationExpression.getLeft()).thenReturn(new ColumnSegment(0, 0, new IdentifierValue("row_id")));
when(binaryOperationExpression.getRight()).thenReturn(new LiteralExpressionSegment(0, 0, 1L));
when(binaryOperationExpression.getOperator()).thenReturn(">=");
when(whereSegment.getExpr()).thenReturn(binaryOperationExpression);
SubqueryTableSegment subqueryTableSegment = mock(SubqueryTableSegment.class);
SubquerySegment subquerySegment = mock(SubquerySegment.class);
SelectStatement subSelectStatement = mock(MySQLSelectStatement.class);
ProjectionsSegment subProjectionsSegment = mock(ProjectionsSegment.class);
TopProjectionSegment topProjectionSegment = mock(TopProjectionSegment.class);
when(topProjectionSegment.getAlias()).thenReturn("row_id");
when(subProjectionsSegment.getProjections()).thenReturn(Collections.singletonList(topProjectionSegment));
when(subSelectStatement.getProjections()).thenReturn(subProjectionsSegment);
when(subquerySegment.getSelect()).thenReturn(subSelectStatement);
when(subqueryTableSegment.getSubquery()).thenReturn(subquerySegment);
selectStatement.setFrom(subqueryTableSegment);
selectStatement.setWhere(whereSegment);
SelectStatementContext selectStatementContext = new SelectStatementContext(createShardingSphereMetaData(database), null, selectStatement, DefaultDatabase.LOGIC_NAME, Collections.emptyList());
MergedResult actual = resultMerger.merge(createQueryResults(), selectStatementContext, createDatabase(), mock(ConnectionContext.class));
assertThat(actual, instanceOf(RowNumberDecoratorMergedResult.class));
assertThat(((RowNumberDecoratorMergedResult) actual).getMergedResult(), instanceOf(IteratorStreamMergedResult.class));
} |
public static File generate(String content, int width, int height, File targetFile) {
String extName = FileUtil.extName(targetFile);
switch (extName) {
case QR_TYPE_SVG:
String svg = generateAsSvg(content, new QrConfig(width, height));
FileUtil.writeString(svg, targetFile, StandardCharsets.UTF_8);
break;
case QR_TYPE_TXT:
String txt = generateAsAsciiArt(content, new QrConfig(width, height));
FileUtil.writeString(txt, targetFile, StandardCharsets.UTF_8);
break;
default:
final BufferedImage image = generate(content, width, height);
ImgUtil.write(image, targetFile);
break;
}
return targetFile;
} | @Test
public void generateTest() {
final BufferedImage image = QrCodeUtil.generate("https://hutool.cn/", 300, 300);
Assert.notNull(image);
} |
@Override
public void destroy()
{
PendingRead pendingRead;
synchronized (this) {
if (state.setIf(FINISHED, oldState -> !oldState.isTerminal())) {
close();
}
pendingRead = this.pendingRead;
this.pendingRead = null;
}
if (pendingRead != null) {
pendingRead.completeResultFutureWithEmpty();
}
} | @Test
public void testAddAfterDestroy()
{
SpoolingOutputBuffer buffer = createSpoolingOutputBuffer();
for (int i = 0; i < 2; i++) {
addPage(buffer, createPage(i));
}
compareTotalBuffered(buffer, 2);
buffer.destroy();
// nothing in buffer
compareTotalBuffered(buffer, 0);
// should not be added
addPage(buffer, createPage(2));
compareTotalBuffered(buffer, 0);
assertBufferResultEquals(TYPES, getBufferResult(buffer, BUFFER_ID, 0, sizeOfPages(3), NO_WAIT), emptyResults(TASK_INSTANCE_ID, 0, true));
} |
@Override
public Expression createExpression(String expression) {
org.springframework.expression.Expression defaultExpression = parser.parseExpression(expression);
EvaluationContext evaluationContext = ((SpelExpression)defaultExpression).getEvaluationContext();
((StandardEvaluationContext)evaluationContext).setBeanResolver(new AppContextBeanResolver());
return new SpringELExpression(defaultExpression);
} | @Test
public void testCreateExpression() {
SpringELExpressionFactory factory = new SpringELExpressionFactory(null);
Assertions.assertNotNull(factory.createExpression("'Hello World'.concat('!')"));
} |
@Override
public Stream<MappingField> resolveAndValidateFields(
boolean isKey,
List<MappingField> userFields,
Map<String, String> options,
InternalSerializationService serializationService
) {
Map<QueryPath, MappingField> fieldsByPath = extractFields(userFields, isKey);
PortableId portableId = getPortableId(fieldsByPath, options, isKey);
ClassDefinition classDefinition = serializationService.getPortableContext()
.lookupClassDefinition(portableId);
// Fallback option for the case, when the portable objects were not de/serialized yet
// and user fields were not provided by the user explicitly. In this case we try to
// manually create a Portable instance and register its ClassDefinition.
if (userFields.isEmpty() && classDefinition == null) {
SerializationServiceV1 ss = (SerializationServiceV1) serializationService;
// Try to create a Portable instance with the default constructor,
// register its ClassDefinition, and throw object away.
var tempPortableObj = ss.getPortableSerializer()
.createNewPortableInstance(portableId.getFactoryId(), portableId.getClassId());
if (tempPortableObj != null) {
try {
ss.getPortableContext().lookupOrRegisterClassDefinition(tempPortableObj);
} catch (Exception e) {
// If the default constructor doesn't make Portable fields non-null,we're done:
// we can't register the class, so we interrupt the execution with the exception.
throw QueryException.error("Cannot create mapping for Portable type. "
+ "Please, provide the explicit definition for all columns.");
}
classDefinition = serializationService.getPortableContext().lookupClassDefinition(portableId);
}
}
return userFields.isEmpty()
? resolveFields(isKey, classDefinition)
: resolveAndValidateFields(isKey, fieldsByPath, classDefinition);
} | @Test
@Parameters({
"true, __key",
"false, this"
})
public void when_typeMismatchBetweenDeclaredAndClassDefinitionField_then_throws(boolean key, String prefix) {
InternalSerializationService ss = new DefaultSerializationServiceBuilder().build();
ClassDefinition classDefinition =
new ClassDefinitionBuilder(1, 2, 3)
.addIntField("field")
.build();
ss.getPortableContext().registerClassDefinition(classDefinition);
Map<String, String> options = ImmutableMap.of(
(key ? OPTION_KEY_FACTORY_ID : OPTION_VALUE_FACTORY_ID), String.valueOf(classDefinition.getFactoryId()),
(key ? OPTION_KEY_CLASS_ID : OPTION_VALUE_CLASS_ID), String.valueOf(classDefinition.getClassId()),
(key ? OPTION_KEY_CLASS_VERSION : OPTION_VALUE_CLASS_VERSION), String.valueOf(classDefinition.getVersion())
);
assertThatThrownBy(() -> INSTANCE.resolveAndValidateFields(
key,
singletonList(field("field", QueryDataType.VARCHAR, prefix + ".field")),
options,
ss
)).isInstanceOf(QueryException.class)
.hasMessageContaining("Mismatch between declared and resolved type: field");
} |
public int getPort() {
return port;
} | @Test
void testDefaultPortIsSet() throws Exception {
try (CouchDbEndpoint endpoint
= new CouchDbEndpoint("couchdb:http://localhost/db", "http://localhost/db", new CouchDbComponent())) {
assertEquals(CouchDbEndpoint.DEFAULT_PORT, endpoint.getPort());
}
} |
public static String jsonFromMap(Map<String, Object> jsonData) {
try {
JsonDocument json = new JsonDocument();
json.startGroup();
for (String key : jsonData.keySet()) {
Object data = jsonData.get(key);
if (data instanceof Map) {
/* it's a nested map, so we'll recursively add the JSON of this map to the current JSON */
json.addValue(key, jsonFromMap((Map<String, Object>) data));
} else if (data instanceof Object[]) {
/* it's an object array, so we'll iterate the elements and put them all in here */
json.addValue(key, "[" + stringArrayFromObjectArray((Object[]) data) + "]");
} else if (data instanceof Collection) {
/* it's a collection, so we'll iterate the elements and put them all in here */
json.addValue(key, "[" + stringArrayFromObjectArray(((Collection) data).toArray()) + "]");
} else if (data instanceof int[]) {
/* it's an int array, so we'll get the string representation */
String intArray = Arrays.toString((int[]) data);
/* remove whitespace */
intArray = intArray.replaceAll(" ", "");
json.addValue(key, intArray);
} else if (data instanceof JsonCapableObject) {
json.addValue(key, jsonFromMap(((JsonCapableObject) data).jsonMap()));
} else {
/* all other objects we assume we are to just put the string value in */
json.addValue(key, String.valueOf(data));
}
}
json.endGroup();
logger.debug("created json from map => {}", json);
return json.toString();
} catch (Exception e) {
logger.error("Could not create JSON from Map. ", e);
return "{}";
}
} | @Test
void testSimpleOne() {
Map<String, Object> jsonData = new LinkedHashMap<String, Object>();
jsonData.put("myKey", "myValue");
String json = JsonUtility.jsonFromMap(jsonData);
String expected = "{\"myKey\":\"myValue\"}";
assertEquals(expected, json);
} |
public AbilityStatus isCurrentNodeAbilityRunning(AbilityKey abilityKey) {
Map<String, Boolean> abilities = currentNodeAbilities.get(abilityKey.getMode());
if (abilities != null) {
Boolean support = abilities.get(abilityKey.getName());
if (support != null) {
return support ? AbilityStatus.SUPPORTED : AbilityStatus.NOT_SUPPORTED;
}
}
return AbilityStatus.UNKNOWN;
} | @Test
void testIsCurrentNodeAbilityRunning() {
assertEquals(AbilityStatus.SUPPORTED, abilityControlManager.isCurrentNodeAbilityRunning(AbilityKey.SERVER_TEST_1));
assertEquals(AbilityStatus.NOT_SUPPORTED, abilityControlManager.isCurrentNodeAbilityRunning(AbilityKey.SERVER_TEST_2));
assertEquals(AbilityStatus.UNKNOWN, abilityControlManager.isCurrentNodeAbilityRunning(AbilityKey.SDK_CLIENT_TEST_1));
} |
public static SpiImplPushExecutorHolder getInstance() {
return INSTANCE;
} | @Test
void testGetInstance() {
SpiImplPushExecutorHolder instance = SpiImplPushExecutorHolder.getInstance();
assertNotNull(instance);
} |
@Override
public void registerDeleted(Weapon weapon) {
LOGGER.info("Registering {} for delete in context.", weapon.getName());
register(weapon, UnitActions.DELETE.getActionValue());
} | @Test
void shouldSaveDeletedStudentWithoutWritingToDb() {
armsDealer.registerDeleted(weapon1);
armsDealer.registerDeleted(weapon2);
assertEquals(2, context.get(UnitActions.DELETE.getActionValue()).size());
verifyNoMoreInteractions(weaponDatabase);
} |
public FloatArrayAsIterable usingExactEquality() {
return new FloatArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject());
} | @Test
public void usingExactEquality_containsNoneOf_primitiveFloatArray_success() {
assertThat(array(1.0f, 2.0f, 3.0f))
.usingExactEquality()
.containsNoneOf(array(99.99f, 999.999f));
} |
public FloatArrayAsIterable usingExactEquality() {
return new FloatArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject());
} | @Test
public void usingExactEquality_contains_otherTypes_bigIntegerNotSupported() {
BigInteger expected = BigInteger.valueOf(2);
float[] actual = array(1.0f, 2.0f, 3.0f);
expectFailureWhenTestingThat(actual).usingExactEquality().contains(expected);
assertFailureKeys(
"value of",
"expected to contain",
"testing whether",
"but was",
"additionally, one or more exceptions were thrown while comparing elements",
"first exception");
assertFailureValue("expected to contain", "2");
assertThatFailure()
.factValue("first exception")
.startsWith(
"compare("
+ actual[0]
+ ", "
+ expected
+ ") threw java.lang.IllegalArgumentException");
assertThatFailure()
.factValue("first exception")
.contains(
"Expected value in assertion using exact float equality was of unsupported type "
+ BigInteger.class
+ " (it may not have an exact float representation)");
} |
public static String asAlphaNumeric(int i) {
StringBuilder stringBuilder = new StringBuilder();
int index = i % MAX;
int ratio = i / MAX;
if (index == 0) {
ratio--;
index = MAX;
}
for (int j = 0; j <= ratio; j++) {
stringBuilder.append(ALPHABET[index - 1]);
}
return stringBuilder.toString();
} | @Test
public void testAlphaUpper() {
assertEquals("A", AutoPageNumberUtils.asAlphaNumeric(1));
assertEquals("Z", AutoPageNumberUtils.asAlphaNumeric(26));
assertEquals("AA", AutoPageNumberUtils.asAlphaNumeric(27));
assertEquals("ZZ", AutoPageNumberUtils.asAlphaNumeric(52));
assertEquals("AAA", AutoPageNumberUtils.asAlphaNumeric(53));
assertEquals("ZZZ", AutoPageNumberUtils.asAlphaNumeric(78));
} |
public Analysis analyze(Statement statement)
{
return analyze(statement, false);
} | @Test
public void testGrouping()
{
analyze("SELECT a, b, sum(c), grouping(a, b) FROM t1 GROUP BY GROUPING SETS ((a), (a, b))");
analyze("SELECT grouping(t1.a) FROM t1 GROUP BY a");
analyze("SELECT grouping(b) FROM t1 GROUP BY t1.b");
analyze("SELECT grouping(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a) FROM t1 GROUP BY a");
} |
protected String generateQueryString(MultiValuedTreeMap<String, String> parameters, boolean encode, String encodeCharset)
throws ServletException {
if (parameters == null || parameters.isEmpty()) {
return null;
}
if (queryString != null) {
return queryString;
}
StringBuilder queryStringBuilder = new StringBuilder();
try {
for (String key : parameters.keySet()) {
for (String val : parameters.get(key)) {
queryStringBuilder.append("&");
if (encode) {
queryStringBuilder.append(URLEncoder.encode(key, encodeCharset));
} else {
queryStringBuilder.append(key);
}
queryStringBuilder.append("=");
if (val != null) {
if (encode) {
queryStringBuilder.append(URLEncoder.encode(val, encodeCharset));
} else {
queryStringBuilder.append(val);
}
}
}
}
} catch (UnsupportedEncodingException e) {
throw new ServletException("Invalid charset passed for query string encoding", e);
}
queryString = queryStringBuilder.toString();
queryString = queryString.substring(1); // remove the first & - faster to do it here than adding logic in the Lambda
return queryString;
} | @Test
void queryString_generateQueryString_validQuery() {
AwsProxyHttpServletRequest request = new AwsProxyHttpServletRequest(queryString, mockContext, null, config);
String parsedString = null;
try {
parsedString = request.generateQueryString(request.getAwsProxyRequest().getMultiValueQueryStringParameters(), true, config.getUriEncoding());
} catch (ServletException e) {
e.printStackTrace();
fail("Could not generate query string");
}
assertTrue(parsedString.contains("one=two"));
assertTrue(parsedString.contains("three=four"));
assertTrue(parsedString.contains("&") && parsedString.indexOf("&") > 0 && parsedString.indexOf("&") < parsedString.length());
} |
public ShareFetch<K, V> collect(final ShareFetchBuffer fetchBuffer) {
ShareFetch<K, V> fetch = ShareFetch.empty();
int recordsRemaining = fetchConfig.maxPollRecords;
try {
while (recordsRemaining > 0) {
final ShareCompletedFetch nextInLineFetch = fetchBuffer.nextInLineFetch();
if (nextInLineFetch == null || nextInLineFetch.isConsumed()) {
final ShareCompletedFetch completedFetch = fetchBuffer.peek();
if (completedFetch == null) {
break;
}
if (!completedFetch.isInitialized()) {
try {
fetchBuffer.setNextInLineFetch(initialize(completedFetch));
} catch (Exception e) {
if (fetch.isEmpty()) {
fetchBuffer.poll();
}
throw e;
}
} else {
fetchBuffer.setNextInLineFetch(completedFetch);
}
fetchBuffer.poll();
} else {
final TopicIdPartition tp = nextInLineFetch.partition;
ShareInFlightBatch<K, V> batch = nextInLineFetch.fetchRecords(
deserializers,
recordsRemaining,
fetchConfig.checkCrcs);
if (batch.isEmpty()) {
nextInLineFetch.drain();
}
recordsRemaining -= batch.numRecords();
fetch.add(tp, batch);
if (batch.getException() != null) {
throw batch.getException();
} else if (batch.hasCachedException()) {
break;
}
}
}
} catch (KafkaException e) {
if (fetch.isEmpty()) {
throw e;
}
}
return fetch;
} | @Test
public void testFetchWithTopicAuthorizationFailed() {
buildDependencies();
subscribeAndAssign(topicAPartition0);
ShareCompletedFetch completedFetch = completedFetchBuilder
.error(Errors.TOPIC_AUTHORIZATION_FAILED)
.build();
fetchBuffer.add(completedFetch);
assertThrows(TopicAuthorizationException.class, () -> fetchCollector.collect(fetchBuffer));
} |
@Override
public void set(File file, String view, String attribute, Object value, boolean create) {
if (supports(attribute)) {
checkNotCreate(view, attribute, create);
file.setAttribute("dos", attribute, checkType(view, attribute, value, Boolean.class));
}
} | @Test
public void testSet() {
for (String attribute : DOS_ATTRIBUTES) {
assertSetAndGetSucceeds(attribute, true);
assertSetFailsOnCreate(attribute, true);
}
} |
@Override
public FileClient getMasterFileClient() {
return clientCache.getUnchecked(CACHE_MASTER_ID);
} | @Test
public void testGetMasterFileClient() {
// mock 数据
FileConfigDO fileConfig = randomFileConfigDO().setMaster(true);
fileConfigMapper.insert(fileConfig);
// 准备参数
Long id = fileConfig.getId();
// mock 获得 Client
FileClient fileClient = new LocalFileClient(id, new LocalFileClientConfig());
when(fileClientFactory.getFileClient(eq(fileConfig.getId()))).thenReturn(fileClient);
// 调用,并断言
assertSame(fileClient, fileConfigService.getMasterFileClient());
// 断言缓存
verify(fileClientFactory).createOrUpdateFileClient(eq(fileConfig.getId()), eq(fileConfig.getStorage()),
eq(fileConfig.getConfig()));
} |
public static int[] computePhysicalIndicesOrTimeAttributeMarkers(
TableSource<?> tableSource,
List<TableColumn> logicalColumns,
boolean streamMarkers,
Function<String, String> nameRemapping) {
Optional<String> proctimeAttribute = getProctimeAttribute(tableSource);
List<String> rowtimeAttributes = getRowtimeAttributes(tableSource);
List<TableColumn> columnsWithoutTimeAttributes =
logicalColumns.stream()
.filter(
col ->
!rowtimeAttributes.contains(col.getName())
&& proctimeAttribute
.map(attr -> !attr.equals(col.getName()))
.orElse(true))
.collect(Collectors.toList());
Map<TableColumn, Integer> columnsToPhysicalIndices =
TypeMappingUtils.computePhysicalIndices(
columnsWithoutTimeAttributes.stream(),
tableSource.getProducedDataType(),
nameRemapping);
return logicalColumns.stream()
.mapToInt(
logicalColumn -> {
if (proctimeAttribute
.map(attr -> attr.equals(logicalColumn.getName()))
.orElse(false)) {
verifyTimeAttributeType(logicalColumn, "Proctime");
if (streamMarkers) {
return TimeIndicatorTypeInfo.PROCTIME_STREAM_MARKER;
} else {
return TimeIndicatorTypeInfo.PROCTIME_BATCH_MARKER;
}
} else if (rowtimeAttributes.contains(logicalColumn.getName())) {
verifyTimeAttributeType(logicalColumn, "Rowtime");
if (streamMarkers) {
return TimeIndicatorTypeInfo.ROWTIME_STREAM_MARKER;
} else {
return TimeIndicatorTypeInfo.ROWTIME_BATCH_MARKER;
}
} else {
return columnsToPhysicalIndices.get(logicalColumn);
}
})
.toArray();
} | @Test
void testMappingWithStreamTimeAttributes() {
TestTableSource tableSource =
new TestTableSource(
DataTypes.BIGINT(), Collections.singletonList("rowtime"), "proctime");
int[] indices =
TypeMappingUtils.computePhysicalIndicesOrTimeAttributeMarkers(
tableSource,
TableSchema.builder()
.field("a", Types.LONG)
.field("rowtime", Types.SQL_TIMESTAMP)
.field("proctime", Types.SQL_TIMESTAMP)
.build()
.getTableColumns(),
true,
Function.identity());
assertThat(indices).isEqualTo(new int[] {0, -1, -2});
} |
@Override
public GraphModel getGraphModel() {
Workspace currentWorkspace = Lookup.getDefault().lookup(ProjectController.class).getCurrentWorkspace();
if (currentWorkspace == null) {
return null;
}
return getGraphModel(currentWorkspace);
} | @Test
public void testWithConfiguration() {
GraphControllerImpl graphController = new GraphControllerImpl();
Configuration configuration = graphController.getDefaultConfigurationBuilder().nodeIdType(Long.class).build();
ProjectController pc = Lookup.getDefault().lookup(ProjectController.class);
Project project = pc.newProject();
Workspace workspace = pc.newWorkspace(project, configuration);
GraphModel gm = graphController.getGraphModel(workspace);
Assert.assertEquals(gm.getConfiguration(), configuration);
} |
@Override
@MethodNotAvailable
public void loadAll(boolean replaceExistingValues) {
throw new MethodNotAvailableException();
} | @Test(expected = MethodNotAvailableException.class)
public void testLoadAll() {
adapter.loadAll(true);
} |
public static Filter parse(String filter){
return FilterCompiler.compile(filter);
} | @Test
public void criteria_can_be_parsed() {
Filter criteria = Filter.parse("[?(@.foo == 'baar')]");
assertThat(criteria.toString()).isEqualTo("[?(@['foo'] == 'baar')]");
criteria = Filter.parse("[?(@.foo)]");
assertThat(criteria.toString()).isEqualTo("[?(@['foo'])]");
} |
public static Boolean judge(final ConditionData conditionData, final String realData) {
if (Objects.isNull(conditionData) || StringUtils.isBlank(conditionData.getOperator())) {
return false;
}
PredicateJudge predicateJudge = newInstance(conditionData.getOperator());
if (!(predicateJudge instanceof BlankPredicateJudge) && StringUtils.isBlank(realData)) {
return false;
}
return predicateJudge.judge(conditionData, realData);
} | @Test
public void testStartsJudge() {
conditionData.setOperator(OperatorEnum.STARTS_WITH.getAlias());
assertTrue(PredicateJudgeFactory.judge(conditionData, "/http/**/test"));
assertFalse(PredicateJudgeFactory.judge(conditionData, "/test/http/**"));
assertFalse(PredicateJudgeFactory.judge(conditionData, "/http1/**"));
} |
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
if (getEmbeddedHttp2Exception(cause) != null) {
// Some exception in the causality chain is an Http2Exception - handle it.
onError(ctx, false, cause);
} else {
super.exceptionCaught(ctx, cause);
}
} | @Test
public void serverShouldNeverSend431HeaderSizeErrorWhenEncoding() throws Exception {
int padding = 0;
handler = newHandler();
Http2Exception e = new Http2Exception.HeaderListSizeException(STREAM_ID, PROTOCOL_ERROR,
"Header size exceeded max allowed size 8196", false);
when(stream.id()).thenReturn(STREAM_ID);
when(connection.isServer()).thenReturn(true);
when(stream.isHeadersSent()).thenReturn(false);
when(remote.lastStreamCreated()).thenReturn(STREAM_ID);
when(frameWriter.writeRstStream(eq(ctx), eq(STREAM_ID),
eq(PROTOCOL_ERROR.code()), eq(promise))).thenReturn(future);
handler.exceptionCaught(ctx, e);
verify(encoder, never()).writeHeaders(eq(ctx), eq(STREAM_ID),
any(Http2Headers.class), eq(padding), eq(true), eq(promise));
verify(frameWriter).writeRstStream(ctx, STREAM_ID, PROTOCOL_ERROR.code(), promise);
} |
public FEELFnResult<List<Object>> invoke( @ParameterName( "list" ) List list, @ParameterName( "item" ) Object[] items ) {
return invoke((Object) list, items);
} | @Test
void invokeEmptyParams() {
FunctionTestUtil.assertResultList(appendFunction.invoke(Collections.emptyList(), new Object[]{}),
Collections.emptyList());
} |
@Override
public List<BlockWorkerInfo> getPreferredWorkers(WorkerClusterView workerClusterView,
String fileId, int count) throws ResourceExhaustedException {
if (workerClusterView.size() < count) {
throw new ResourceExhaustedException(String.format(
"Not enough workers in the cluster %d workers in the cluster but %d required",
workerClusterView.size(), count));
}
Set<WorkerIdentity> workerIdentities = workerClusterView.workerIds();
mHashProvider.refresh(workerIdentities);
List<WorkerIdentity> workers = mHashProvider.getMultiple(fileId, count);
if (workers.size() != count) {
throw new ResourceExhaustedException(String.format(
"Found %d workers from the hash ring but %d required", workers.size(), count));
}
ImmutableList.Builder<BlockWorkerInfo> builder = ImmutableList.builder();
for (WorkerIdentity worker : workers) {
Optional<WorkerInfo> optionalWorkerInfo = workerClusterView.getWorkerById(worker);
final WorkerInfo workerInfo;
if (optionalWorkerInfo.isPresent()) {
workerInfo = optionalWorkerInfo.get();
} else {
// the worker returned by the policy does not exist in the cluster view
// supplied by the client.
// this can happen when the membership changes and some callers fail to update
// to the latest worker cluster view.
// in this case, just skip this worker
LOG.debug("Inconsistency between caller's view of cluster and that of "
+ "the consistent hash policy's: worker {} selected by policy does not exist in "
+ "caller's view {}. Skipping this worker.",
worker, workerClusterView);
continue;
}
BlockWorkerInfo blockWorkerInfo = new BlockWorkerInfo(
worker, workerInfo.getAddress(), workerInfo.getCapacityBytes(),
workerInfo.getUsedBytes(), workerInfo.getState() == WorkerState.LIVE
);
builder.add(blockWorkerInfo);
}
List<BlockWorkerInfo> infos = builder.build();
return infos;
} | @Test
public void getOneWorker() throws Exception {
WorkerLocationPolicy policy = WorkerLocationPolicy.Factory.create(mConf);
assertTrue(policy instanceof ConsistentHashPolicy);
// Prepare a worker list
WorkerClusterView workers = new WorkerClusterView(Arrays.asList(
new WorkerInfo()
.setIdentity(WorkerIdentityTestUtils.ofLegacyId(1))
.setAddress(new WorkerNetAddress()
.setHost("master1").setRpcPort(29998).setDataPort(29999).setWebPort(30000))
.setCapacityBytes(1024)
.setUsedBytes(0),
new WorkerInfo()
.setIdentity(WorkerIdentityTestUtils.ofLegacyId(2))
.setAddress(new WorkerNetAddress()
.setHost("master2").setRpcPort(29998).setDataPort(29999).setWebPort(30000))
.setCapacityBytes(1024)
.setUsedBytes(0)));
List<BlockWorkerInfo> assignedWorkers = policy.getPreferredWorkers(workers, "hdfs://a/b/c", 1);
assertEquals(1, assignedWorkers.size());
assertTrue(contains(workers, assignedWorkers.get(0)));
assertThrows(ResourceExhaustedException.class, () -> {
// Getting 1 out of no workers will result in an error
policy.getPreferredWorkers(new WorkerClusterView(ImmutableList.of()), "hdfs://a/b/c", 1);
});
} |
public static Builder builder() {
return new Builder();
} | @Test
public void testBuilderDoesNotCreateInvalidObjects() {
assertThatThrownBy(() -> ListTablesResponse.builder().add(null))
.isInstanceOf(NullPointerException.class)
.hasMessage("Invalid table identifier: null");
assertThatThrownBy(() -> ListTablesResponse.builder().addAll(null))
.isInstanceOf(NullPointerException.class)
.hasMessage("Invalid table identifier list: null");
List<TableIdentifier> listWithNullElement =
Lists.newArrayList(TableIdentifier.of(Namespace.of("foo"), "bar"), null);
assertThatThrownBy(() -> ListTablesResponse.builder().addAll(listWithNullElement))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Invalid table identifier: null");
} |
Collection<AuxiliaryService> getServices() {
return Collections.unmodifiableCollection(serviceMap.values());
} | @Test
public void testAuxServicesManifestPermissions() throws IOException {
Assume.assumeTrue(useManifest);
Configuration conf = getABConf();
FileSystem fs = FileSystem.get(conf);
fs.setPermission(new Path(manifest.getAbsolutePath()), FsPermission
.createImmutable((short) 0777));
AuxServices aux = new AuxServices(MOCK_AUX_PATH_HANDLER,
MOCK_CONTEXT, MOCK_DEL_SERVICE);
aux.init(conf);
assertEquals(0, aux.getServices().size());
fs.setPermission(new Path(manifest.getAbsolutePath()), FsPermission
.createImmutable((short) 0775));
aux = new AuxServices(MOCK_AUX_PATH_HANDLER,
MOCK_CONTEXT, MOCK_DEL_SERVICE);
aux.init(conf);
assertEquals(0, aux.getServices().size());
fs.setPermission(new Path(manifest.getAbsolutePath()), FsPermission
.createImmutable((short) 0755));
fs.setPermission(new Path(rootDir.getAbsolutePath()), FsPermission
.createImmutable((short) 0775));
aux = new AuxServices(MOCK_AUX_PATH_HANDLER,
MOCK_CONTEXT, MOCK_DEL_SERVICE);
aux.init(conf);
assertEquals(0, aux.getServices().size());
fs.setPermission(new Path(rootDir.getAbsolutePath()), FsPermission
.createImmutable((short) 0755));
aux = new AuxServices(MOCK_AUX_PATH_HANDLER,
MOCK_CONTEXT, MOCK_DEL_SERVICE);
aux.init(conf);
assertEquals(2, aux.getServices().size());
conf.set(YarnConfiguration.YARN_ADMIN_ACL, "");
aux = new AuxServices(MOCK_AUX_PATH_HANDLER,
MOCK_CONTEXT, MOCK_DEL_SERVICE);
aux.init(conf);
assertEquals(0, aux.getServices().size());
conf.set(YarnConfiguration.YARN_ADMIN_ACL, UserGroupInformation
.getCurrentUser().getShortUserName());
aux = new AuxServices(MOCK_AUX_PATH_HANDLER,
MOCK_CONTEXT, MOCK_DEL_SERVICE);
aux.init(conf);
assertEquals(2, aux.getServices().size());
} |
@Around("@annotation(com.linecorp.flagship4j.javaflagr.annotations.ControllerFeatureToggle)")
public Object processControllerFeatureToggleAnnotation(ProceedingJoinPoint joinPoint) throws Throwable {
log.info("start processing controllerFeatureToggle annotation");
MethodSignature signature = (MethodSignature) joinPoint.getSignature();
Method method = signature.getMethod();
Annotation[][] parameterAnnotations = method.getParameterAnnotations();
Object[] args = joinPoint.getArgs();
ControllerFeatureToggle featureToggle = method.getAnnotation(ControllerFeatureToggle.class);
Boolean isFlagOn = flagrService.isFeatureFlagOn(featureToggle.value());
if (Boolean.FALSE.equals(isFlagOn)) {
throw new FlagrApiNotFoundException();
}
outerlabel:
for (int argIndex = 0; argIndex < args.length; argIndex++) {
for (Annotation annotation : parameterAnnotations[argIndex]) {
if (annotation instanceof VariantKey) {
args[argIndex] = isFlagOn;
break outerlabel;
}
}
}
return joinPoint.proceed(args);
} | @Test
public void processFlagrMethodWithControllerFeatureToggleTestWhenNotVariantKeyAnnotation() throws Throwable {
String methodName = "methodWithControllerFeatureToggleWithoutVariantKey";
FlagrAnnotationTest flagrAnnotationTest = new FlagrAnnotationTest();
Method method = Arrays.stream(flagrAnnotationTest.getClass().getMethods()).filter(m -> m.getName().equals(methodName)).findFirst().get();
args = new Object[]{};
when(joinPoint.getSignature()).thenReturn(signature);
when(signature.getMethod()).thenReturn(method);
when(joinPoint.getArgs()).thenReturn(args);
when(flagrService.isFeatureFlagOn(any(String.class)))
.thenReturn(givenPostEvaluationResponse().getVariantKey().equals(EffectiveVariant.ON.toValue()));
when(joinPoint.proceed(any(Object[].class))).thenReturn(args);
Object returnArgs = featureToggleAspect.processControllerFeatureToggleAnnotation(joinPoint);
assertEquals(args, returnArgs);
verify(joinPoint, times(1)).getSignature();
verify(signature, times(1)).getMethod();
verify(joinPoint, times(1)).getArgs();
verify(signature, times(1)).getMethod();
verify(flagrService, times(1)).isFeatureFlagOn(any(String.class));
verify(joinPoint, times(1)).proceed(any(Object[].class));
} |
public static void tar(@NotNull File source, @NotNull File dest) throws IOException {
if (!source.exists()) {
throw new IllegalArgumentException("No source file or folder exists: " + source.getAbsolutePath());
}
if (dest.exists()) {
throw new IllegalArgumentException("Destination refers to existing file or folder: " + dest.getAbsolutePath());
}
try (TarArchiveOutputStream tarOut = new TarArchiveOutputStream(new GZIPOutputStream(
new BufferedOutputStream(Files.newOutputStream(dest.toPath())), 0x1000))) {
doTar("", source, tarOut);
} catch (IOException e) {
IOUtil.deleteFile(dest); // operation filed, let's remove the destination archive
throw e;
}
} | @Test
public void testFileArchived() throws Exception {
File src = new File(randName + ".txt");
FileWriter fw = new FileWriter(src);
fw.write("12345");
fw.close();
CompressBackupUtil.tar(src, dest);
Assert.assertTrue("No destination archive created", dest.exists());
TarArchiveInputStream tai = new TarArchiveInputStream(new GZIPInputStream(new BufferedInputStream(new FileInputStream(dest))));
ArchiveEntry entry = tai.getNextEntry();
Assert.assertNotNull("No entry found in destination archive", entry);
Assert.assertEquals("Entry has wrong size", 5, entry.getSize());
} |
@Override
public CloseableIterator<T> iterator() {
ParallelIterator<T> iter =
new ParallelIterator<>(iterables, workerPool, approximateMaxQueueSize);
addCloseable(iter);
return iter;
} | @Test
public void queueSizeOne() {
List<Iterable<Integer>> iterables =
ImmutableList.of(
() -> IntStream.range(0, 100).iterator(),
() -> IntStream.range(0, 100).iterator(),
() -> IntStream.range(0, 100).iterator());
Multiset<Integer> expectedValues =
IntStream.range(0, 100)
.boxed()
.flatMap(i -> Stream.of(i, i, i))
.collect(ImmutableMultiset.toImmutableMultiset());
ExecutorService executor = Executors.newCachedThreadPool();
ParallelIterable<Integer> parallelIterable = new ParallelIterable<>(iterables, executor, 1);
ParallelIterator<Integer> iterator = (ParallelIterator<Integer>) parallelIterable.iterator();
Multiset<Integer> actualValues = HashMultiset.create();
while (iterator.hasNext()) {
assertThat(iterator.queueSize())
.as("iterator internal queue size")
.isLessThanOrEqualTo(1 + iterables.size());
actualValues.add(iterator.next());
}
assertThat(actualValues)
.as("multiset of values returned by the iterator")
.isEqualTo(expectedValues);
iterator.close();
executor.shutdownNow();
} |
@Override
public boolean add(FilteredBlock block) throws VerificationException, PrunedException {
boolean success = super.add(block);
if (success) {
trackFilteredTransactions(block.getTransactionCount());
}
return success;
} | @Test
public void duplicates() throws Exception {
Context.propagate(new Context(100, Coin.ZERO, false, true));
// Adding a block twice should not have any effect, in particular it should not send the block to the wallet.
Block b1 = TESTNET.getGenesisBlock().createNextBlock(coinbaseTo);
Block b2 = b1.createNextBlock(coinbaseTo);
Block b3 = b2.createNextBlock(coinbaseTo);
assertTrue(testNetChain.add(b1));
assertEquals(b1, testNetChain.getChainHead().getHeader());
assertTrue(testNetChain.add(b2));
assertEquals(b2, testNetChain.getChainHead().getHeader());
assertTrue(testNetChain.add(b3));
assertEquals(b3, testNetChain.getChainHead().getHeader());
assertTrue(testNetChain.add(b2)); // add old block
assertEquals(b3, testNetChain.getChainHead().getHeader()); // block didn't change, duplicate was spotted
} |
public static ObjectMapper createObjectMapper() {
final ObjectMapper objectMapper = new ObjectMapper();
registerModules(objectMapper);
return objectMapper;
} | @Test
void testObjectMapperOptionalSupportedEnabled() throws Exception {
final ObjectMapper mapper = JacksonMapperFactory.createObjectMapper();
assertThat(mapper.writeValueAsString(new TypeWithOptional(Optional.of("value"))))
.isEqualTo("{\"data\":\"value\"}");
assertThat(mapper.writeValueAsString(new TypeWithOptional(Optional.empty())))
.isEqualTo("{\"data\":null}");
assertThat(mapper.readValue("{\"data\":\"value\"}", TypeWithOptional.class).data)
.contains("value");
assertThat(mapper.readValue("{\"data\":null}", TypeWithOptional.class).data).isEmpty();
assertThat(mapper.readValue("{}", TypeWithOptional.class).data).isEmpty();
} |
@Override
public AnnotationVisitor visitAnnotation(String descriptor, boolean visible) {
//if (!descriptor.equals("Lorg/pf4j/Extension;")) {
if (!Type.getType(descriptor).getClassName().equals(Extension.class.getName())) {
return super.visitAnnotation(descriptor, visible);
}
return new AnnotationVisitor(ASM_VERSION) {
@Override
public AnnotationVisitor visitArray(final String name) {
if ("ordinal".equals(name) || "plugins".equals(name) || "points".equals(name)) {
return new AnnotationVisitor(ASM_VERSION, super.visitArray(name)) {
@Override
public void visit(String key, Object value) {
log.debug("Load annotation attribute {} = {} ({})", name, value, value.getClass().getName());
if ("ordinal".equals(name)) {
extensionInfo.ordinal = Integer.parseInt(value.toString());
} else if ("plugins".equals(name)) {
if (value instanceof String) {
log.debug("Found plugin {}", value);
extensionInfo.plugins.add((String) value);
} else if (value instanceof String[]) {
log.debug("Found plugins {}", Arrays.toString((String[]) value));
extensionInfo.plugins.addAll(Arrays.asList((String[]) value));
} else {
log.debug("Found plugin {}", value.toString());
extensionInfo.plugins.add(value.toString());
}
} else {
String pointClassName = ((Type) value).getClassName();
log.debug("Found point " + pointClassName);
extensionInfo.points.add(pointClassName);
}
super.visit(key, value);
}
};
}
return super.visitArray(name);
}
};
} | @Test
void visitAnnotationShouldReturnSuperVisitorForNonExtensionAnnotation() {
ExtensionInfo extensionInfo = new ExtensionInfo("org.pf4j.asm.ExtensionInfo");
ClassVisitor extensionVisitor = new ExtensionVisitor(extensionInfo);
AnnotationVisitor returnedVisitor = extensionVisitor.visitAnnotation("Lorg/pf4j/NonExtension;", true);
assertNull(returnedVisitor);
} |
@Override
public NacosRestTemplate createNacosRestTemplate() {
HttpClientConfig httpClientConfig = buildHttpClientConfig();
final JdkHttpClientRequest clientRequest = new JdkHttpClientRequest(httpClientConfig);
// enable ssl
initTls((sslContext, hostnameVerifier) -> {
clientRequest.setSSLContext(loadSSLContext());
clientRequest.replaceSSLHostnameVerifier(hostnameVerifier);
}, filePath -> clientRequest.setSSLContext(loadSSLContext()));
return new NacosRestTemplate(assignLogger(), clientRequest);
} | @Test
void testCreateNacosRestTemplateWithSsl() throws Exception {
TlsSystemConfig.tlsEnable = true;
HttpClientFactory httpClientFactory = new DefaultHttpClientFactory(logger);
NacosRestTemplate nacosRestTemplate = httpClientFactory.createNacosRestTemplate();
assertNotNull(nacosRestTemplate);
} |
static <T extends Comparable<? super T>> int compareListWithFillValue(
List<T> left, List<T> right, T fillValue) {
int longest = Math.max(left.size(), right.size());
for (int i = 0; i < longest; i++) {
T leftElement = fillValue;
T rightElement = fillValue;
if (i < left.size()) {
leftElement = left.get(i);
}
if (i < right.size()) {
rightElement = right.get(i);
}
int compareResult = leftElement.compareTo(rightElement);
if (compareResult != 0) {
return compareResult;
}
}
return 0;
} | @Test
public void compareWithFillValue_nonEmptyListVariedSizeWithZeroFillValue_returnsNegative() {
assertThat(
ComparisonUtility.compareListWithFillValue(
Lists.newArrayList(1, 2), Lists.newArrayList(1, 2, 3), 0))
.isLessThan(0);
} |
@Nonnull
public static <K, V> Sink<Entry<K, V>> map(@Nonnull String mapName) {
return map(mapName, Entry::getKey, Entry::getValue);
} | @Test
public void map_byName() {
// Given
List<Integer> input = sequence(itemCount);
putToBatchSrcMap(input);
// When
Sink<Entry<String, Integer>> sink = Sinks.map(sinkName);
// Then
p.readFrom(Sources.<String, Integer>map(srcName)).writeTo(sink);
execute();
List<Entry<String, Integer>> expected = input.stream()
.map(i -> entry(String.valueOf(i), i))
.collect(toList());
Set<Entry<String, Integer>> actual = hz().<String, Integer>getMap(sinkName).entrySet();
assertEquals(expected.size(), actual.size());
expected.forEach(entry -> assertTrue(actual.contains(entry)));
} |
@ApiOperation(value = "Make tenant profile default (setDefaultTenantProfile)",
notes = "Makes specified tenant profile to be default. Referencing non-existing tenant profile Id will cause an error. " + SYSTEM_AUTHORITY_PARAGRAPH)
@PreAuthorize("hasAnyAuthority('SYS_ADMIN')")
@RequestMapping(value = "/tenantProfile/{tenantProfileId}/default", method = RequestMethod.POST)
@ResponseBody
public TenantProfile setDefaultTenantProfile(
@Parameter(description = TENANT_PROFILE_ID_PARAM_DESCRIPTION)
@PathVariable("tenantProfileId") String strTenantProfileId) throws ThingsboardException {
checkParameter("tenantProfileId", strTenantProfileId);
TenantProfileId tenantProfileId = new TenantProfileId(toUUID(strTenantProfileId));
TenantProfile tenantProfile = checkTenantProfileId(tenantProfileId, Operation.WRITE);
tenantProfileService.setDefaultTenantProfile(getTenantId(), tenantProfileId);
return tenantProfile;
} | @Test
public void testSetDefaultTenantProfile() throws Exception {
loginSysAdmin();
TenantProfile tenantProfile = this.createTenantProfile("Tenant Profile 1");
TenantProfile savedTenantProfile = doPost("/api/tenantProfile", tenantProfile, TenantProfile.class);
TenantProfile defaultTenantProfile = doPost("/api/tenantProfile/" + savedTenantProfile.getId().getId().toString() + "/default", TenantProfile.class);
Assert.assertNotNull(defaultTenantProfile);
EntityInfo foundDefaultTenantProfile = doGet("/api/tenantProfileInfo/default", EntityInfo.class);
Assert.assertNotNull(foundDefaultTenantProfile);
Assert.assertEquals(savedTenantProfile.getName(), foundDefaultTenantProfile.getName());
Assert.assertEquals(savedTenantProfile.getId(), foundDefaultTenantProfile.getId());
} |
@Override
public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback delete, final ConnectionCallback callback) throws BackgroundException {
try {
final StoregateApiClient client = session.getClient();
final MoveFileRequest move = new MoveFileRequest()
.name(renamed.getName())
.parentID(fileid.getFileId(renamed.getParent()))
.mode(1); // Overwrite
final HttpEntityEnclosingRequestBase request;
request = new HttpPost(String.format("%s/v4.2/files/%s/move", client.getBasePath(), fileid.getFileId(file)));
if(status.getLockId() != null) {
request.addHeader("X-Lock-Id", status.getLockId().toString());
}
request.setEntity(new StringEntity(new JSON().getContext(move.getClass()).writeValueAsString(move),
ContentType.create("application/json", StandardCharsets.UTF_8.name())));
request.addHeader(HTTP.CONTENT_TYPE, MEDIA_TYPE);
final HttpResponse response = client.getClient().execute(request);
try {
switch(response.getStatusLine().getStatusCode()) {
case HttpStatus.SC_NO_CONTENT:
final PathAttributes attr = new PathAttributes(file.attributes());
fileid.cache(file, null);
fileid.cache(renamed, attr.getFileId());
return renamed.withAttributes(attr);
default:
throw new StoregateExceptionMappingService(fileid).map("Cannot rename {0}",
new ApiException(response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file);
}
}
finally {
EntityUtils.consume(response.getEntity());
}
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Cannot rename {0}", e, file);
}
} | @Test
public void testMoveDirectory() throws Exception {
final StoregateIdProvider nodeid = new StoregateIdProvider(session);
final Path room = new StoregateDirectoryFeature(session, nodeid).mkdir(
new Path(String.format("/My files/%s", new AlphanumericRandomStringService().random()),
EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final String foldername = new AlphanumericRandomStringService().random();
final Path test = new StoregateDirectoryFeature(session, nodeid).mkdir(new Path(room, foldername, EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path testsub = new StoregateDirectoryFeature(session, nodeid).mkdir(new Path(test, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path target = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory));
new StoregateMoveFeature(session, nodeid).move(test, target, new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback());
assertEquals(0, session.getMetrics().get(Copy.class));
assertFalse(new DefaultFindFeature(session).find(new Path(room, foldername, EnumSet.of(Path.Type.directory))));
assertTrue(new DefaultFindFeature(session).find(target));
new StoregateDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public MetadataReportConfig build() {
MetadataReportConfig metadataReport = new MetadataReportConfig();
super.build(metadataReport);
metadataReport.setAddress(address);
metadataReport.setUsername(username);
metadataReport.setPassword(password);
metadataReport.setTimeout(timeout);
metadataReport.setGroup(group);
metadataReport.setParameters(parameters);
metadataReport.setRetryTimes(retryTimes);
metadataReport.setRetryPeriod(retryPeriod);
metadataReport.setCycleReport(cycleReport);
metadataReport.setSyncReport(syncReport);
metadataReport.setCheck(check);
return metadataReport;
} | @Test
void build() {
MetadataReportBuilder builder = new MetadataReportBuilder();
builder.address("address")
.username("username")
.password("password")
.timeout(1000)
.group("group")
.retryTimes(1)
.retryPeriod(2)
.cycleReport(true)
.syncReport(false)
.appendParameter("default.num", "one")
.id("id");
MetadataReportConfig config = builder.build();
MetadataReportConfig config2 = builder.build();
Assertions.assertTrue(config.getCycleReport());
Assertions.assertFalse(config.getSyncReport());
Assertions.assertEquals(1000, config.getTimeout());
Assertions.assertEquals(1, config.getRetryTimes());
Assertions.assertEquals(2, config.getRetryPeriod());
Assertions.assertEquals("address", config.getAddress());
Assertions.assertEquals("username", config.getUsername());
Assertions.assertEquals("password", config.getPassword());
Assertions.assertEquals("group", config.getGroup());
Assertions.assertTrue(config.getParameters().containsKey("default.num"));
Assertions.assertEquals("one", config.getParameters().get("default.num"));
Assertions.assertEquals("id", config.getId());
Assertions.assertNotSame(config, config2);
} |
public WorkflowStartResponse toWorkflowStartResponse() {
return WorkflowStartResponse.builder()
.workflowId(this.workflowId)
.workflowVersionId(this.workflowVersionId)
.workflowInstanceId(this.workflowInstanceId)
.workflowRunId(this.workflowRunId)
.workflowUuid(this.workflowUuid)
.status(this.status.runStatus)
.timelineEvent(this.timelineEvent)
.build();
} | @Test
public void testToWorkflowStartResponse() {
RunResponse res = RunResponse.from(stepInstance, TimelineLogEvent.info("bar"));
WorkflowStartResponse response = res.toWorkflowStartResponse();
Assert.assertEquals(InstanceRunStatus.CREATED, response.getStatus());
res = RunResponse.from(instance, "foo");
response = res.toWorkflowStartResponse();
Assert.assertEquals(InstanceRunStatus.INTERNAL_ERROR, response.getStatus());
res = RunResponse.from(instance, 0);
response = res.toWorkflowStartResponse();
Assert.assertEquals(InstanceRunStatus.DUPLICATED, response.getStatus());
res = RunResponse.from(instance, -1);
response = res.toWorkflowStartResponse();
Assert.assertEquals(InstanceRunStatus.STOPPED, response.getStatus());
res = RunResponse.from(instance, 1);
response = res.toWorkflowStartResponse();
Assert.assertEquals(InstanceRunStatus.CREATED, response.getStatus());
} |
@Override
public ByteBuf writeBytes(byte[] src, int srcIndex, int length) {
ensureWritable(length);
setBytes(writerIndex, src, srcIndex, length);
writerIndex += length;
return this;
} | @Test
public void testWriteBytesAfterRelease3() {
final ByteBuf buffer = buffer(8);
try {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().writeBytes(buffer, 0, 1);
}
});
} finally {
buffer.release();
}
} |
@Override
public void onApplicationEvent(ContextRefreshedEvent contextRefreshedEvent) {
try {
File pluginsFolder = new File(systemEnvironment.get(SystemEnvironment.AGENT_PLUGINS_PATH));
if (pluginsFolder.exists()) {
FileUtils.forceDelete(pluginsFolder);
}
zipUtil.unzip(DownloadableFile.AGENT_PLUGINS.getLocalFile(), pluginsFolder);
defaultPluginJarLocationMonitor.initialize();
pluginManager.startInfrastructure(false);
} catch (IOException e) {
LOG.warn("could not extract plugin zip", e);
} catch (RuntimeException e) {
LOG.warn("error while initializing agent plugins", e);
}
} | @Test
void shouldHandleIOExceptionQuietly() throws Exception {
doThrow(new IOException()).when(zipUtil).unzip(DownloadableFile.AGENT_PLUGINS.getLocalFile(), new File(SystemEnvironment.PLUGINS_PATH));
try {
agentPluginsInitializer.onApplicationEvent(null);
} catch (Exception e) {
fail("should have handled IOException");
}
} |
protected VersionedElasticAgentExtension getVersionedElasticAgentExtension(String pluginId) {
final String resolvedExtensionVersion = pluginManager.resolveExtensionVersion(pluginId, ELASTIC_AGENT_EXTENSION, goSupportedVersions());
return elasticAgentExtensionMap.get(resolvedExtensionVersion);
} | @Test
public void shouldHaveVersionedElasticAgentExtensionForAllSupportedVersions() {
for (String supportedVersion : SUPPORTED_VERSIONS) {
final String message = String.format("Must define versioned extension class for %s extension with version %s", ELASTIC_AGENT_EXTENSION, supportedVersion);
when(pluginManager.resolveExtensionVersion(PLUGIN_ID, ELASTIC_AGENT_EXTENSION, SUPPORTED_VERSIONS)).thenReturn(supportedVersion);
final VersionedElasticAgentExtension extension = this.extension.getVersionedElasticAgentExtension(PLUGIN_ID);
assertNotNull(extension, message);
assertThat(ReflectionUtil.getField(extension, "VERSION"), is(supportedVersion));
}
} |
public void updateStatusCount(final String state, final int change) {
updateStatusCount(KafkaStreams.State.valueOf(state), change);
} | @Test
public void shouldRoundTripWhenNotEmpty() {
// Given:
queryStatusCount.updateStatusCount(KafkaStreams.State.RUNNING, 2);
queryStatusCount.updateStatusCount(KafkaStreams.State.ERROR, 10);
queryStatusCount.updateStatusCount(KsqlQueryStatus.UNRESPONSIVE, 1);
// When:
final String json = assertDeserializedToSame(queryStatusCount);
// Then:
assertThat(json, is("{"
+ "\"RUNNING\":2,"
+ "\"ERROR\":10,"
+ "\"UNRESPONSIVE\":1"
+ "}"));
} |
@Override
public int compareTo(Bandwidth other) {
if (other instanceof LongBandwidth) {
return ComparisonChain.start()
.compare(this.bps, ((LongBandwidth) other).bps)
.result();
}
return ComparisonChain.start()
.compare(this.bps, other.bps())
.result();
} | @Test
public void testLessThan() {
assertThat(small, is(lessThan(big)));
assertThat(small.compareTo(big), is(-1));
assertThat(big, is(greaterThan(small)));
assertThat(big.compareTo(small), is(1));
assertThat(notLongSmall, is(lessThan(notLongBig)));
assertThat(notLongSmall.compareTo(notLongBig), is(-1));
assertThat(notLongBig, is(greaterThan(notLongSmall)));
assertThat(notLongBig.compareTo(notLongSmall), is(1));
} |
@Override
public <T> void register(Class<T> remoteInterface, T object) {
register(remoteInterface, object, 1);
} | @Test
public void testAckWithoutResultInvocations() throws InterruptedException {
RedissonClient server = createInstance();
RedissonClient client = createInstance();
try {
server.getRemoteService().register(RemoteInterface.class, new RemoteImpl());
// fire and forget with an ack timeout of 1 sec
RemoteInvocationOptions options = RemoteInvocationOptions.defaults().expectAckWithin(1, TimeUnit.SECONDS).noResult();
RemoteInterface service = client.getRemoteService().get(RemoteInterface.class, options);
service.voidMethod("noResult", 100L);
try {
service.resultMethod(100L);
Assertions.fail();
} catch (Exception e) {
assertThat(e).isInstanceOf(IllegalArgumentException.class);
}
try {
service.errorMethod();
} catch (IOException e) {
Assertions.fail("noResult option should not throw server side exception");
}
try {
service.errorMethodWithCause();
} catch (Exception e) {
Assertions.fail("noResult option should not throw server side exception");
}
long time = System.currentTimeMillis();
service.timeoutMethod();
time = System.currentTimeMillis() - time;
assertThat(time).describedAs("noResult option should not wait for the server to return a response").isLessThan(2000);
try {
service.timeoutMethod();
Assertions.fail("noResult option should still wait for the server to ack the request and throw if the ack timeout is exceeded");
} catch (Exception e) {
assertThat(e).isInstanceOf(RemoteServiceAckTimeoutException.class);
}
} finally {
client.shutdown();
server.shutdown();
}
} |
@VisibleForTesting
static ParallelInstruction forParallelInstruction(
ParallelInstruction input, boolean replaceWithByteArrayCoder) throws Exception {
try {
ParallelInstruction instruction = clone(input, ParallelInstruction.class);
if (instruction.getRead() != null) {
Source cloudSource = instruction.getRead().getSource();
cloudSource.setCodec(forCodec(cloudSource.getCodec(), replaceWithByteArrayCoder));
} else if (instruction.getWrite() != null) {
com.google.api.services.dataflow.model.Sink cloudSink = instruction.getWrite().getSink();
cloudSink.setCodec(forCodec(cloudSink.getCodec(), replaceWithByteArrayCoder));
} else if (instruction.getParDo() != null) {
instruction.setParDo(
forParDoInstruction(instruction.getParDo(), replaceWithByteArrayCoder));
} else if (instruction.getPartialGroupByKey() != null) {
PartialGroupByKeyInstruction pgbk = instruction.getPartialGroupByKey();
pgbk.setInputElementCodec(forCodec(pgbk.getInputElementCodec(), replaceWithByteArrayCoder));
} else if (instruction.getFlatten() != null) {
// FlattenInstructions have no codecs to wrap.
} else {
throw new RuntimeException("Unknown parallel instruction: " + input);
}
return instruction;
} catch (IOException e) {
throw new RuntimeException(
String.format(
"Failed to replace unknown coder with " + "LengthPrefixCoder for : {%s}", input),
e);
}
} | @Test
public void testLengthPrefixReadInstructionCoder() throws Exception {
ReadInstruction readInstruction = new ReadInstruction();
readInstruction.setSource(
new Source()
.setCodec(CloudObjects.asCloudObject(windowedValueCoder, /*sdkComponents=*/ null)));
instruction.setRead(readInstruction);
ParallelInstruction prefixedInstruction = forParallelInstruction(instruction, false);
assertEqualsAsJson(
CloudObjects.asCloudObject(prefixedWindowedValueCoder, /*sdkComponents=*/ null),
prefixedInstruction.getRead().getSource().getCodec());
// Should not mutate the instruction.
assertEqualsAsJson(
readInstruction.getSource().getCodec(),
CloudObjects.asCloudObject(windowedValueCoder, /*sdkComponents=*/ null));
} |
@Operation(summary = "Get the status of a mijn digid session [VALID, INVALID]")
@GetMapping("/session_status/{mijn_digid_session_id}")
public ResponseEntity<MijnDigidSessionStatus> sessionStatus(@PathVariable(name = "mijn_digid_session_id") String mijnDigiDSessionId) {
if(mijnDigiDSessionId == null) {
return ResponseEntity.badRequest().build();
}
return ResponseEntity.ok(mijnDigiDSessionService.sessionStatus(mijnDigiDSessionId));
} | @Test
void validateSessionStatus() {
String sessionId = "id";
when(mijnDigiDSessionService.sessionStatus(sessionId)).thenReturn(MijnDigidSessionStatus.VALID);
ResponseEntity<MijnDigidSessionStatus> response = mijnDigiDSessionController.sessionStatus(sessionId);
verify(mijnDigiDSessionService, times(1)).sessionStatus(sessionId);
assertEquals(response.getStatusCode(), HttpStatus.OK);
assertEquals(response.getBody(), MijnDigidSessionStatus.VALID);
} |
public boolean isAllowed(HttpServletRequest httpRequest, HttpServletResponse httpResponse)
throws IOException {
if (!isRequestAllowed(httpRequest)) {
LOG.debug("Forbidden access to monitoring from " + httpRequest.getRemoteAddr());
httpResponse.sendError(HttpServletResponse.SC_FORBIDDEN, "Forbidden access");
return false;
}
if (!isUserAuthorized(httpRequest)) {
// Not allowed, so report he's unauthorized
httpResponse.setHeader("WWW-Authenticate", "BASIC realm=\"JavaMelody\"");
if (isLocked()) {
httpResponse.sendError(HttpServletResponse.SC_UNAUTHORIZED,
"Unauthorized (locked)");
} else {
httpResponse.sendError(HttpServletResponse.SC_UNAUTHORIZED, "Unauthorized");
}
return false;
}
return true;
} | @Test
public void testIsAllowed() throws IOException {
assertTrue("no auth", isAllowed());
setProperty(Parameter.ALLOWED_ADDR_PATTERN, REMOTE_ADDR);
assertTrue("addr pattern", isAllowed());
setProperty(Parameter.ALLOWED_ADDR_PATTERN, "none");
assertFalse("addr pattern", isAllowed());
setProperty(Parameter.ALLOWED_ADDR_PATTERN, null);
setProperty(Parameter.AUTHORIZED_USERS, USER_PWD);
assertFalse("authorized users", isAllowed(null));
assertFalse("authorized users", isAllowed("not BASIC"));
assertTrue("authorized users", isAllowed("BASIC " + Base64Coder.encodeString(USER_PWD)));
setProperty(Parameter.AUTHORIZED_USERS, "none");
assertFalse("authorized users", isAllowed("BASIC " + Base64Coder.encodeString(USER_PWD)));
// check lock
final HttpAuth httpAuth = new HttpAuth();
setProperty(Parameter.AUTHORIZED_USERS, USER_PWD);
// 20 > HttpAuth.AUTH_FAILURES_MAX
for (int i = 0; i < 20; i++) {
assertFalse("lock",
isAllowed(httpAuth, "BASIC " + Base64Coder.encodeString("notuser:notpwd")));
}
assertFalse("lock", isAllowed(httpAuth, "BASIC " + Base64Coder.encodeString(USER_PWD)));
} |
@Override
public Messages process(Messages messages) {
try (Timer.Context ignored = executionTime.time()) {
final State latestState = stateUpdater.getLatestState();
if (latestState.enableRuleMetrics()) {
return process(messages, new RuleMetricsListener(metricRegistry), latestState);
}
return process(messages, new NoopInterpreterListener(), latestState);
}
} | @Test
public void testCreateMessage() {
final RuleService ruleService = mock(MongoDbRuleService.class);
when(ruleService.loadAll()).thenReturn(Collections.singleton(
RuleDao.create("abc",
"title",
"description",
"rule \"creates message\"\n" +
"when to_string($message.message) == \"original message\"\n" +
"then\n" +
" create_message(\"derived message\");\n" +
"end",
Tools.nowUTC(),
null, null, null)
));
final PipelineService pipelineService = mock(MongoDbPipelineService.class);
when(pipelineService.loadAll()).thenReturn(Collections.singleton(
PipelineDao.create("p1", "title", "description",
"pipeline \"pipeline\"\n" +
"stage 0 match all\n" +
" rule \"creates message\";\n" +
"end\n",
Tools.nowUTC(),
null)
));
final Map<String, Function<?>> functions = ImmutableMap.of(
CreateMessage.NAME, new CreateMessage(messageFactory),
StringConversion.NAME, new StringConversion());
final PipelineInterpreter interpreter = createPipelineInterpreter(ruleService, pipelineService, functions);
Message msg = messageInDefaultStream("original message", "test");
final Messages processed = interpreter.process(msg);
final Message[] messages = Iterables.toArray(processed, Message.class);
assertEquals(2, messages.length);
} |
public NSBundle bundle() {
if(cached != null) {
return cached;
}
if(log.isInfoEnabled()) {
log.info("Loading application bundle resources");
}
final NSBundle main = NSBundle.mainBundle();
if(null == main) {
cached = null;
}
else {
final Local executable = new FinderLocal(main.executablePath());
cached = this.bundle(main, executable);
}
return cached;
} | @Test
public void testAccessDenied() {
final NSBundle bundle = new BundleApplicationResourcesFinder().bundle(NSBundle.bundleWithPath("."), new Local("/usr/bin/java") {
@Override
public Local getSymlinkTarget() throws NotfoundException {
throw new NotfoundException("f");
}
});
assertNotNull(bundle);
assertEquals(NSBundle.bundleWithPath("."), bundle);
} |
public long decrementAndGet() {
return getAndAddVal(-1L) - 1L;
} | @Test
public void testDecrementAndGet() {
PaddedAtomicLong counter = new PaddedAtomicLong();
long value = counter.decrementAndGet();
assertEquals(-1, value);
assertEquals(-1, counter.get());
} |
public StatementExecutorResponse execute(
final ConfiguredStatement<? extends Statement> statement,
final KsqlExecutionContext executionContext,
final KsqlSecurityContext securityContext
) {
final String commandRunnerWarningString = commandRunnerWarning.get();
if (!commandRunnerWarningString.equals("")) {
throw new KsqlServerException("Failed to handle Ksql Statement."
+ System.lineSeparator()
+ commandRunnerWarningString);
}
final InjectorWithSideEffects injector = InjectorWithSideEffects.wrap(
injectorFactory.apply(executionContext, securityContext.getServiceContext()));
final ConfiguredStatementWithSideEffects<?> injectedWithSideEffects =
injector.injectWithSideEffects(statement);
try {
return executeInjected(
injectedWithSideEffects.getStatement(),
statement,
executionContext,
securityContext);
} catch (Exception e) {
injector.revertSideEffects(injectedWithSideEffects);
throw e;
}
} | @Test
public void shouldThrowExceptionWhenInsertIntoUnknownStream() {
// Given
final PreparedStatement<Statement> preparedStatement =
PreparedStatement.of("", new InsertInto(SourceName.of("s1"), mock(Query.class)));
final ConfiguredStatement<Statement> configured =
ConfiguredStatement.of(preparedStatement, SessionConfig.of(KSQL_CONFIG, ImmutableMap.of())
);
doReturn(null).when(metaStore).getSource(SourceName.of("s1"));
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> distributor.execute(configured, executionContext, mock(KsqlSecurityContext.class))
);
// Then:
assertThat(e.getMessage(), containsString(
"Cannot insert into an unknown stream/table: `s1`"));
} |
@Override
public void deleteTopics(final Collection<String> topicsToDelete) {
if (topicsToDelete.isEmpty()) {
return;
}
final DeleteTopicsResult deleteTopicsResult = adminClient.get().deleteTopics(topicsToDelete);
final Map<String, KafkaFuture<Void>> results = deleteTopicsResult.topicNameValues();
final List<String> failList = Lists.newArrayList();
final List<Pair<String, Throwable>> exceptionList = Lists.newArrayList();
for (final Map.Entry<String, KafkaFuture<Void>> entry : results.entrySet()) {
try {
entry.getValue().get(30, TimeUnit.SECONDS);
} catch (final Exception e) {
final Throwable rootCause = ExceptionUtils.getRootCause(e);
if (rootCause instanceof TopicDeletionDisabledException) {
throw new TopicDeletionDisabledException("Topic deletion is disabled. "
+ "To delete the topic, you must set '" + DELETE_TOPIC_ENABLE + "' to true in "
+ "the Kafka broker configuration.");
} else if (rootCause instanceof TopicAuthorizationException) {
throw new KsqlTopicAuthorizationException(
AclOperation.DELETE, Collections.singleton(entry.getKey()));
} else if (!(rootCause instanceof UnknownTopicOrPartitionException)) {
LOG.error(String.format("Could not delete topic '%s'", entry.getKey()), e);
failList.add(entry.getKey());
exceptionList.add(new Pair<>(entry.getKey(), rootCause));
}
}
}
if (!failList.isEmpty()) {
throw new KafkaDeleteTopicsException("Failed to clean up topics: "
+ String.join(",", failList), exceptionList);
}
} | @Test
@SuppressWarnings("unchecked")
public void shouldFailToDeleteOnKafkaDeleteTopicsException() {
// Given:
when(adminClient.deleteTopics(any(Collection.class)))
.thenAnswer(deleteTopicsResult(new Exception("error")));
// When:
assertThrows(
KafkaDeleteTopicsException.class,
() -> kafkaTopicClient.deleteTopics(ImmutableList.of("aTopic"))
);
} |
public void replace(String name, String token, String value) {
name = name.trim();
Variable v = vars.get(name);
if (v == null) {
throw new RuntimeException("no variable found with name: " + name);
}
String text = v.getAsString();
String replaced = replacePlaceholderText(text, token, value);
setVariable(name, replaced);
} | @Test
void testReplace() {
assign("foo", "'hello <world>'");
engine.replace("foo", "world", "'blah'");
matchEquals("foo", "'hello blah'");
assign("str", "'ha <foo> ha'");
Json json = Json.of("[{token: 'foo', value: \"'bar'\" }]");
engine.replaceTable("str", json.asList());
matchEquals("str", "'ha bar ha'");
} |
public CMap parse(RandomAccessRead randomAcccessRead) throws IOException
{
CMap result = new CMap();
Object previousToken = null;
Object token = parseNextToken(randomAcccessRead);
while (token != null)
{
if (token instanceof Operator)
{
Operator op = (Operator) token;
if (op.op.equals("endcmap"))
{
// end of CMap reached, stop reading as there isn't any interesting info anymore
break;
}
if (op.op.equals("usecmap") && previousToken instanceof LiteralName)
{
parseUsecmap((LiteralName) previousToken, result);
}
else if (previousToken instanceof Number)
{
if (op.op.equals("begincodespacerange"))
{
parseBegincodespacerange((Number) previousToken, randomAcccessRead, result);
}
else if (op.op.equals("beginbfchar"))
{
parseBeginbfchar((Number) previousToken, randomAcccessRead, result);
}
else if (op.op.equals("beginbfrange"))
{
parseBeginbfrange((Number) previousToken, randomAcccessRead, result);
}
else if (op.op.equals("begincidchar"))
{
parseBegincidchar((Number) previousToken, randomAcccessRead, result);
}
else if (op.op.equals("begincidrange") && previousToken instanceof Integer)
{
parseBegincidrange((Integer) previousToken, randomAcccessRead, result);
}
}
}
else if (token instanceof LiteralName)
{
parseLiteralName((LiteralName) token, randomAcccessRead, result);
}
previousToken = token;
token = parseNextToken(randomAcccessRead);
}
return result;
} | @Test
void testParserWithMalformedbfrange2() throws IOException
{
CMap cMap = new CMapParser()
.parse(new RandomAccessReadBufferedFile(
new File("src/test/resources/cmap", "CMapMalformedbfrange2")));
assertNotNull(cMap, "Failed to parse malformed CMap file");
assertEquals("0", cMap.toUnicode(new byte[] { 0, 1 }),
"bytes 00 01 from bfrange <0001> <0009> <0030>");
assertEquals("A", cMap.toUnicode(new byte[] { 2, 0x32 }),
"bytes 02 32 from bfrange <0232> <0432> <0041>");
// check border values for non strict mode
assertNotNull(cMap.toUnicode(new byte[] { 2, (byte) 0xF0 }));
assertNotNull(cMap.toUnicode(new byte[] { 2, (byte) 0xF1 }));
// use strict mode
cMap = new CMapParser(true)
.parse(new RandomAccessReadBufferedFile(
new File("src/test/resources/cmap", "CMapMalformedbfrange2")));
// check border values for strict mode
assertNotNull(cMap.toUnicode(new byte[] { 2, (byte) 0xF0 }));
assertNull(cMap.toUnicode(new byte[] { 2, (byte) 0xF1 }));
} |
@GET
@Path("{path:.*}")
@Produces({MediaType.APPLICATION_OCTET_STREAM + "; " + JettyUtils.UTF_8,
MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8})
public Response get(@PathParam("path") String path,
@Context UriInfo uriInfo,
@QueryParam(OperationParam.NAME) OperationParam op,
@Context Parameters params,
@Context HttpServletRequest request)
throws IOException, FileSystemAccessException {
// Restrict access to only GETFILESTATUS and LISTSTATUS in write-only mode
if((op.value() != HttpFSFileSystem.Operation.GETFILESTATUS) &&
(op.value() != HttpFSFileSystem.Operation.LISTSTATUS) &&
accessMode == AccessMode.WRITEONLY) {
return Response.status(Response.Status.FORBIDDEN).build();
}
UserGroupInformation user = HttpUserGroupInformation.get();
Response response;
path = makeAbsolute(path);
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
MDC.put("hostname", request.getRemoteAddr());
switch (op.value()) {
case OPEN: {
Boolean noRedirect = params.get(
NoRedirectParam.NAME, NoRedirectParam.class);
if (noRedirect) {
URI redirectURL = createOpenRedirectionURL(uriInfo);
final String js = JsonUtil.toJsonString("Location", redirectURL);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
} else {
//Invoking the command directly using an unmanaged FileSystem that is
// released by the FileSystemReleaseFilter
final FSOperations.FSOpen command = new FSOperations.FSOpen(path);
final FileSystem fs = createFileSystem(user);
InputStream is = null;
UserGroupInformation ugi = UserGroupInformation
.createProxyUser(user.getShortUserName(),
UserGroupInformation.getLoginUser());
try {
is = ugi.doAs(new PrivilegedExceptionAction<InputStream>() {
@Override
public InputStream run() throws Exception {
return command.execute(fs);
}
});
} catch (InterruptedException ie) {
LOG.warn("Open interrupted.", ie);
Thread.currentThread().interrupt();
}
Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
Long len = params.get(LenParam.NAME, LenParam.class);
AUDIT_LOG.info("[{}] offset [{}] len [{}]",
new Object[] { path, offset, len });
InputStreamEntity entity = new InputStreamEntity(is, offset, len);
response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM)
.build();
}
break;
}
case GETFILESTATUS: {
FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTSTATUS: {
String filter = params.get(FilterParam.NAME, FilterParam.class);
FSOperations.FSListStatus command =
new FSOperations.FSListStatus(path, filter);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}] filter [{}]", path, (filter != null) ? filter : "-");
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETHOMEDIRECTORY: {
enforceRootPath(op.value(), path);
FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("Home Directory for [{}]", user);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case INSTRUMENTATION: {
enforceRootPath(op.value(), path);
Groups groups = HttpFSServerWebApp.get().get(Groups.class);
Set<String> userGroups = groups.getGroupsSet(user.getShortUserName());
if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
throw new AccessControlException(
"User not in HttpFSServer admin group");
}
Instrumentation instrumentation =
HttpFSServerWebApp.get().get(Instrumentation.class);
Map snapshot = instrumentation.getSnapshot();
response = Response.ok(snapshot).build();
break;
}
case GETCONTENTSUMMARY: {
FSOperations.FSContentSummary command =
new FSOperations.FSContentSummary(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("Content summary for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETQUOTAUSAGE: {
FSOperations.FSQuotaUsage command =
new FSOperations.FSQuotaUsage(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("Quota Usage for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETFILECHECKSUM: {
FSOperations.FSFileChecksum command =
new FSOperations.FSFileChecksum(path);
Boolean noRedirect = params.get(
NoRedirectParam.NAME, NoRedirectParam.class);
AUDIT_LOG.info("[{}]", path);
if (noRedirect) {
URI redirectURL = createOpenRedirectionURL(uriInfo);
final String js = JsonUtil.toJsonString("Location", redirectURL);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
} else {
Map json = fsExecute(user, command);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
}
break;
}
case GETFILEBLOCKLOCATIONS: {
long offset = 0;
long len = Long.MAX_VALUE;
Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class);
Long lenParam = params.get(LenParam.NAME, LenParam.class);
AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam);
if (offsetParam != null && offsetParam > 0) {
offset = offsetParam;
}
if (lenParam != null && lenParam > 0) {
len = lenParam;
}
FSOperations.FSFileBlockLocations command =
new FSOperations.FSFileBlockLocations(path, offset, len);
@SuppressWarnings("rawtypes")
Map locations = fsExecute(user, command);
final String json = JsonUtil.toJsonString("BlockLocations", locations);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETACLSTATUS: {
FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("ACL status for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETXATTRS: {
List<String> xattrNames =
params.getValues(XAttrNameParam.NAME, XAttrNameParam.class);
XAttrCodec encoding =
params.get(XAttrEncodingParam.NAME, XAttrEncodingParam.class);
FSOperations.FSGetXAttrs command =
new FSOperations.FSGetXAttrs(path, xattrNames, encoding);
@SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
AUDIT_LOG.info("XAttrs for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTXATTRS: {
FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path);
@SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
AUDIT_LOG.info("XAttr names for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTSTATUS_BATCH: {
String startAfter = params.get(
HttpFSParametersProvider.StartAfterParam.NAME,
HttpFSParametersProvider.StartAfterParam.class);
byte[] token = HttpFSUtils.EMPTY_BYTES;
if (startAfter != null) {
token = startAfter.getBytes(StandardCharsets.UTF_8);
}
FSOperations.FSListStatusBatch command = new FSOperations
.FSListStatusBatch(path, token);
@SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}] token [{}]", path, token);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETTRASHROOT: {
FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path);
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETALLSTORAGEPOLICY: {
FSOperations.FSGetAllStoragePolicies command =
new FSOperations.FSGetAllStoragePolicies();
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSTORAGEPOLICY: {
FSOperations.FSGetStoragePolicy command =
new FSOperations.FSGetStoragePolicy(path);
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSNAPSHOTDIFF: {
String oldSnapshotName = params.get(OldSnapshotNameParam.NAME,
OldSnapshotNameParam.class);
String snapshotName = params.get(SnapshotNameParam.NAME,
SnapshotNameParam.class);
FSOperations.FSGetSnapshotDiff command =
new FSOperations.FSGetSnapshotDiff(path, oldSnapshotName,
snapshotName);
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSNAPSHOTDIFFLISTING: {
String oldSnapshotName = params.get(OldSnapshotNameParam.NAME,
OldSnapshotNameParam.class);
String snapshotName = params.get(SnapshotNameParam.NAME,
SnapshotNameParam.class);
String snapshotDiffStartPath = params
.get(HttpFSParametersProvider.SnapshotDiffStartPathParam.NAME,
HttpFSParametersProvider.SnapshotDiffStartPathParam.class);
Integer snapshotDiffIndex = params.get(HttpFSParametersProvider.SnapshotDiffIndexParam.NAME,
HttpFSParametersProvider.SnapshotDiffIndexParam.class);
FSOperations.FSGetSnapshotDiffListing command =
new FSOperations.FSGetSnapshotDiffListing(path, oldSnapshotName,
snapshotName, snapshotDiffStartPath, snapshotDiffIndex);
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSNAPSHOTTABLEDIRECTORYLIST: {
FSOperations.FSGetSnapshottableDirListing command =
new FSOperations.FSGetSnapshottableDirListing();
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", "/");
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSNAPSHOTLIST: {
FSOperations.FSGetSnapshotListing command =
new FSOperations.FSGetSnapshotListing(path);
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", "/");
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSERVERDEFAULTS: {
FSOperations.FSGetServerDefaults command =
new FSOperations.FSGetServerDefaults();
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", "/");
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case CHECKACCESS: {
String mode = params.get(FsActionParam.NAME, FsActionParam.class);
FsActionParam fsparam = new FsActionParam(mode);
FSOperations.FSAccess command = new FSOperations.FSAccess(path,
FsAction.getFsAction(fsparam.value()));
fsExecute(user, command);
AUDIT_LOG.info("[{}]", "/");
response = Response.ok().build();
break;
}
case GETECPOLICY: {
FSOperations.FSGetErasureCodingPolicy command =
new FSOperations.FSGetErasureCodingPolicy(path);
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETECPOLICIES: {
FSOperations.FSGetErasureCodingPolicies command =
new FSOperations.FSGetErasureCodingPolicies();
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETECCODECS: {
FSOperations.FSGetErasureCodingCodecs command =
new FSOperations.FSGetErasureCodingCodecs();
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GET_BLOCK_LOCATIONS: {
long offset = 0;
long len = Long.MAX_VALUE;
Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class);
Long lenParam = params.get(LenParam.NAME, LenParam.class);
AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam);
if (offsetParam != null && offsetParam > 0) {
offset = offsetParam;
}
if (lenParam != null && lenParam > 0) {
len = lenParam;
}
FSOperations.FSFileBlockLocationsLegacy command =
new FSOperations.FSFileBlockLocationsLegacy(path, offset, len);
@SuppressWarnings("rawtypes")
Map locations = fsExecute(user, command);
final String json = JsonUtil.toJsonString("LocatedBlocks", locations);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETFILELINKSTATUS: {
FSOperations.FSFileLinkStatus command =
new FSOperations.FSFileLinkStatus(path);
@SuppressWarnings("rawtypes") Map js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSTATUS: {
FSOperations.FSStatus command = new FSOperations.FSStatus(path);
@SuppressWarnings("rawtypes") Map js = fsExecute(user, command);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETTRASHROOTS: {
Boolean allUsers = params.get(AllUsersParam.NAME, AllUsersParam.class);
FSOperations.FSGetTrashRoots command = new FSOperations.FSGetTrashRoots(allUsers);
Map json = fsExecute(user, command);
AUDIT_LOG.info("allUsers [{}]", allUsers);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
default: {
throw new IOException(
MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value()));
}
}
return response;
} | @Test
@TestDir
@TestJetty
@TestHdfs
public void testAccess() throws Exception {
createHttpFSServer(false, false);
final String dir = "/xattrTest";
Path path1 = new Path(dir);
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
.get(path1.toUri(), TestHdfsHelper.getHdfsConf());
dfs.mkdirs(new Path(dir));
HttpURLConnection conn =
sendRequestToHttpFSServer(dir, "CHECKACCESS", "fsaction=r--");
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
HttpURLConnection conn1 =
sendRequestToHttpFSServer(dir, "CHECKACCESS", "fsaction=-w-");
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn1.getResponseCode());
} |
public static Number parseNumber(String numberStr) throws NumberFormatException {
if (StrUtil.startWithIgnoreCase(numberStr, "0x")) {
// 0x04表示16进制数
return Long.parseLong(numberStr.substring(2), 16);
} else if (StrUtil.startWith(numberStr, '+')) {
// issue#I79VS7
numberStr = StrUtil.subSuf(numberStr, 1);
}
try {
final NumberFormat format = NumberFormat.getInstance();
if (format instanceof DecimalFormat) {
// issue#1818@Github
// 当字符串数字超出double的长度时,会导致截断,此处使用BigDecimal接收
((DecimalFormat) format).setParseBigDecimal(true);
}
return format.parse(numberStr);
} catch (ParseException e) {
final NumberFormatException nfe = new NumberFormatException(e.getMessage());
nfe.initCause(e);
throw nfe;
}
} | @Test
public void parseNumberTest2(){
// issue#I5M55F
final String numberStr = "429900013E20220812163344551";
final Number number = NumberUtil.parseNumber(numberStr);
assertNotNull(number);
assertInstanceOf(BigDecimal.class, number);
} |
void addTo(EnvironmentVariableContext context) {
context.setProperty(name, getValue(), isSecure());
} | @Test
void shouldAddPlainTextEnvironmentVariableToContext() {
String key = "key";
String plainText = "plainText";
EnvironmentVariableConfig environmentVariableConfig = new EnvironmentVariableConfig(goCipher, key, plainText, false);
EnvironmentVariableContext context = new EnvironmentVariableContext();
environmentVariableConfig.addTo(context);
assertThat(context.getProperty(key)).isEqualTo(plainText);
assertThat(context.getPropertyForDisplay(key)).isEqualTo(plainText);
} |
@VisibleForTesting
Map<String, List<Operation>> computeOperations(SegmentDirectory.Reader segmentReader)
throws Exception {
Map<String, List<Operation>> columnOperationsMap = new HashMap<>();
// Does not work for segment versions < V3.
if (_segmentDirectory.getSegmentMetadata().getVersion().compareTo(SegmentVersion.v3) < 0) {
return columnOperationsMap;
}
Set<String> existingAllColumns = _segmentDirectory.getSegmentMetadata().getAllColumns();
Set<String> existingDictColumns = _segmentDirectory.getColumnsWithIndex(StandardIndexes.dictionary());
Set<String> existingForwardIndexColumns = _segmentDirectory.getColumnsWithIndex(StandardIndexes.forward());
for (String column : existingAllColumns) {
if (_schema != null && !_schema.hasColumn(column)) {
// _schema will be null only in tests
LOGGER.info("Column {} is not in schema, skipping updating forward index", column);
continue;
}
boolean existingHasDict = existingDictColumns.contains(column);
boolean existingHasFwd = existingForwardIndexColumns.contains(column);
FieldIndexConfigs newConf = _fieldIndexConfigs.get(column);
boolean newIsFwd = newConf.getConfig(StandardIndexes.forward()).isEnabled();
boolean newIsDict = newConf.getConfig(StandardIndexes.dictionary()).isEnabled();
boolean newIsRange = newConf.getConfig(StandardIndexes.range()).isEnabled();
if (existingHasFwd && !newIsFwd) {
// Existing column has a forward index. New column config disables the forward index
ColumnMetadata columnMetadata = _segmentDirectory.getSegmentMetadata().getColumnMetadataFor(column);
if (columnMetadata.isSorted()) {
// Check if the column is sorted. If sorted, disabling forward index should be a no-op. Do not return an
// operation for this column related to disabling forward index.
LOGGER.warn("Trying to disable the forward index for a sorted column {}, ignoring", column);
continue;
}
if (existingHasDict) {
if (!newIsDict) {
// Dictionary was also disabled. Just disable the dictionary and remove it along with the forward index
// If range index exists, don't try to regenerate it on toggling the dictionary, throw an error instead
Preconditions.checkState(!newIsRange, String.format(
"Must disable range (enabled) index to disable the dictionary and forward index for column: %s or "
+ "refresh / back-fill the forward index", column));
columnOperationsMap.put(column,
Arrays.asList(Operation.DISABLE_FORWARD_INDEX, Operation.DISABLE_DICTIONARY));
} else {
// Dictionary is still enabled, keep it but remove the forward index
columnOperationsMap.put(column, Collections.singletonList(Operation.DISABLE_FORWARD_INDEX));
}
} else {
if (!newIsDict) {
// Dictionary remains disabled and we should not reconstruct temporary forward index as dictionary based
columnOperationsMap.put(column, Collections.singletonList(Operation.DISABLE_FORWARD_INDEX));
} else {
// Dictionary is enabled, creation of dictionary and conversion to dictionary based forward index is needed
columnOperationsMap.put(column,
Arrays.asList(Operation.DISABLE_FORWARD_INDEX, Operation.ENABLE_DICTIONARY));
}
}
} else if (!existingHasFwd && newIsFwd) {
// Existing column does not have a forward index. New column config enables the forward index
ColumnMetadata columnMetadata = _segmentDirectory.getSegmentMetadata().getColumnMetadataFor(column);
if (columnMetadata != null && columnMetadata.isSorted()) {
// Check if the column is sorted. If sorted, disabling forward index should be a no-op and forward index
// should already exist. Do not return an operation for this column related to enabling forward index.
LOGGER.warn("Trying to enable the forward index for a sorted column {}, ignoring", column);
continue;
}
// Get list of columns with inverted index
Set<String> existingInvertedIndexColumns =
segmentReader.toSegmentDirectory().getColumnsWithIndex(StandardIndexes.inverted());
if (!existingHasDict || !existingInvertedIndexColumns.contains(column)) {
// If either dictionary or inverted index is missing on the column there is no way to re-generate the forward
// index. Treat this as a no-op and log a warning.
LOGGER.warn("Trying to enable the forward index for a column {} missing either the dictionary ({}) and / or "
+ "the inverted index ({}) is not possible. Either a refresh or back-fill is required to get the "
+ "forward index, ignoring", column, existingHasDict ? "enabled" : "disabled",
existingInvertedIndexColumns.contains(column) ? "enabled" : "disabled");
continue;
}
columnOperationsMap.put(column, Collections.singletonList(Operation.ENABLE_FORWARD_INDEX));
} else if (!existingHasFwd) {
// Forward index is disabled for the existing column and should remain disabled based on the latest config
// Need some checks to see whether the dictionary is being enabled or disabled here and take appropriate actions
// If the dictionary is not enabled on the existing column it must be on the new noDictionary column list.
// Cannot enable the dictionary for a column with forward index disabled.
Preconditions.checkState(existingHasDict || !newIsDict,
String.format("Cannot regenerate the dictionary for column %s with forward index disabled. Please "
+ "refresh or back-fill the data to add back the forward index", column));
if (existingHasDict && !newIsDict) {
// Dictionary is currently enabled on this column but is supposed to be disabled. Remove the dictionary
// and update the segment metadata If the range index exists then throw an error since we are not
// regenerating the range index on toggling the dictionary
Preconditions.checkState(!newIsRange, String.format(
"Must disable range (enabled) index to disable the dictionary for a forwardIndexDisabled column: %s or "
+ "refresh / back-fill the forward index", column));
columnOperationsMap.put(column, Collections.singletonList(Operation.DISABLE_DICTIONARY));
}
} else if (!existingHasDict && newIsDict) {
// Existing column is RAW. New column is dictionary enabled.
if (_schema == null || _tableConfig == null) {
// This can only happen in tests.
LOGGER.warn("Cannot enable dictionary for column={} as schema or tableConfig is null.", column);
continue;
}
ColumnMetadata existingColumnMetadata = _segmentDirectory.getSegmentMetadata().getColumnMetadataFor(column);
if (DictionaryIndexType.ignoreDictionaryOverride(_tableConfig.getIndexingConfig().isOptimizeDictionary(),
_tableConfig.getIndexingConfig().isOptimizeDictionaryForMetrics(),
_tableConfig.getIndexingConfig().getNoDictionarySizeRatioThreshold(), existingColumnMetadata.getFieldSpec(),
_fieldIndexConfigs.get(column), existingColumnMetadata.getCardinality(),
existingColumnMetadata.getTotalNumberOfEntries())) {
columnOperationsMap.put(column, Collections.singletonList(Operation.ENABLE_DICTIONARY));
}
} else if (existingHasDict && !newIsDict) {
// Existing column has dictionary. New config for the column is RAW.
if (shouldDisableDictionary(column, _segmentDirectory.getSegmentMetadata().getColumnMetadataFor(column))) {
columnOperationsMap.put(column, Collections.singletonList(Operation.DISABLE_DICTIONARY));
}
} else if (!existingHasDict) {
// Both existing and new column is RAW forward index encoded. Check if compression needs to be changed.
// TODO: Also check if raw index version needs to be changed
if (shouldChangeRawCompressionType(column, segmentReader)) {
columnOperationsMap.put(column, Collections.singletonList(Operation.CHANGE_INDEX_COMPRESSION_TYPE));
}
} else {
// Both existing and new column is dictionary encoded. Check if compression needs to be changed.
if (shouldChangeDictIdCompressionType(column, segmentReader)) {
columnOperationsMap.put(column, Collections.singletonList(Operation.CHANGE_INDEX_COMPRESSION_TYPE));
}
}
}
return columnOperationsMap;
} | @Test
public void testComputeOperationEnableForwardIndex()
throws Exception {
// Setup
SegmentMetadataImpl existingSegmentMetadata = new SegmentMetadataImpl(_segmentDirectory);
SegmentDirectory segmentLocalFSDirectory =
new SegmentLocalFSDirectory(_segmentDirectory, existingSegmentMetadata, ReadMode.mmap);
SegmentDirectory.Writer writer = segmentLocalFSDirectory.createWriter();
// TEST1: Try to change compression type for a forward index disabled column and enable forward index for it
List<FieldConfig> fieldConfigs = new ArrayList<>(_tableConfig.getFieldConfigList());
int randIdx;
Random rand = new Random();
String name;
do {
// Only try to change compression type for forward index disabled columns
randIdx = rand.nextInt(fieldConfigs.size());
name = fieldConfigs.get(randIdx).getName();
} while (!SV_FORWARD_INDEX_DISABLED_COLUMNS.contains(name) && !MV_FORWARD_INDEX_DISABLED_COLUMNS.contains(name));
FieldConfig config = fieldConfigs.remove(randIdx);
CompressionCodec newCompressionType = null;
for (CompressionCodec type : RAW_COMPRESSION_TYPES) {
if (config.getCompressionCodec() != type) {
newCompressionType = type;
break;
}
}
FieldConfig newConfig =
new FieldConfig(config.getName(), FieldConfig.EncodingType.RAW, Collections.emptyList(), newCompressionType,
null);
fieldConfigs.add(newConfig);
List<String> noDictionaryColumns = new ArrayList<>(_noDictionaryColumns);
noDictionaryColumns.add(config.getName());
TableConfig tableConfig =
new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setNoDictionaryColumns(noDictionaryColumns)
.setInvertedIndexColumns(_invertedIndexColumns).setFieldConfigList(fieldConfigs).build();
tableConfig.setFieldConfigList(fieldConfigs);
IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null, tableConfig);
indexLoadingConfig.addNoDictionaryColumns(config.getName());
indexLoadingConfig.removeInvertedIndexColumns(config.getName());
ForwardIndexHandler fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, null);
Map<String, List<ForwardIndexHandler.Operation>> operationMap = fwdIndexHandler.computeOperations(writer);
assertEquals(operationMap.size(), 1);
assertEquals(operationMap.get(config.getName()),
Collections.singletonList(ForwardIndexHandler.Operation.ENABLE_FORWARD_INDEX));
// TEST2: Enable forward index in dictionary format for a column with forward index disabled
indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig);
indexLoadingConfig.removeForwardIndexDisabledColumns(DIM_SV_FORWARD_INDEX_DISABLED_BYTES);
fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema);
operationMap = fwdIndexHandler.computeOperations(writer);
assertEquals(operationMap.size(), 1);
assertEquals(operationMap.get(DIM_SV_FORWARD_INDEX_DISABLED_BYTES),
Collections.singletonList(ForwardIndexHandler.Operation.ENABLE_FORWARD_INDEX));
// TEST3: Enable forward index in raw format for a column with forward index disabled. Remove column from inverted
// index as well (inverted index needs dictionary)
indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig);
indexLoadingConfig.removeForwardIndexDisabledColumns(DIM_MV_FORWARD_INDEX_DISABLED_INTEGER);
indexLoadingConfig.addNoDictionaryColumns(DIM_MV_FORWARD_INDEX_DISABLED_INTEGER);
indexLoadingConfig.removeInvertedIndexColumns(DIM_MV_FORWARD_INDEX_DISABLED_INTEGER);
fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema);
operationMap = fwdIndexHandler.computeOperations(writer);
assertEquals(operationMap.size(), 1);
assertEquals(operationMap.get(DIM_MV_FORWARD_INDEX_DISABLED_INTEGER),
Collections.singletonList(ForwardIndexHandler.Operation.ENABLE_FORWARD_INDEX));
// TEST4: Enable forward index in dictionary format for two columns with forward index disabled. Disable inverted
// index for one of them
indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig);
indexLoadingConfig.removeForwardIndexDisabledColumns(DIM_SV_FORWARD_INDEX_DISABLED_LONG);
indexLoadingConfig.removeForwardIndexDisabledColumns(DIM_MV_FORWARD_INDEX_DISABLED_STRING);
indexLoadingConfig.removeInvertedIndexColumns(DIM_SV_FORWARD_INDEX_DISABLED_LONG);
fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema);
operationMap = fwdIndexHandler.computeOperations(writer);
assertEquals(operationMap.size(), 2);
assertEquals(operationMap.get(DIM_SV_FORWARD_INDEX_DISABLED_LONG),
Collections.singletonList(ForwardIndexHandler.Operation.ENABLE_FORWARD_INDEX));
assertEquals(operationMap.get(DIM_MV_FORWARD_INDEX_DISABLED_STRING),
Collections.singletonList(ForwardIndexHandler.Operation.ENABLE_FORWARD_INDEX));
// TEST5: Enable forward index in raw format for two columns with forward index disabled. Remove column from
// inverted index as well (inverted index needs dictionary)
indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig);
indexLoadingConfig.removeForwardIndexDisabledColumns(DIM_SV_FORWARD_INDEX_DISABLED_STRING);
indexLoadingConfig.addNoDictionaryColumns(DIM_SV_FORWARD_INDEX_DISABLED_STRING);
indexLoadingConfig.removeInvertedIndexColumns(DIM_SV_FORWARD_INDEX_DISABLED_STRING);
indexLoadingConfig.removeForwardIndexDisabledColumns(DIM_MV_FORWARD_INDEX_DISABLED_LONG);
indexLoadingConfig.addNoDictionaryColumns(DIM_MV_FORWARD_INDEX_DISABLED_LONG);
indexLoadingConfig.removeInvertedIndexColumns(DIM_MV_FORWARD_INDEX_DISABLED_LONG);
fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema);
operationMap = fwdIndexHandler.computeOperations(writer);
assertEquals(operationMap.size(), 2);
assertEquals(operationMap.get(DIM_SV_FORWARD_INDEX_DISABLED_STRING),
Collections.singletonList(ForwardIndexHandler.Operation.ENABLE_FORWARD_INDEX));
assertEquals(operationMap.get(DIM_MV_FORWARD_INDEX_DISABLED_LONG),
Collections.singletonList(ForwardIndexHandler.Operation.ENABLE_FORWARD_INDEX));
// TEST6: Enable forward index in dictionary format and one in raw format for columns with forward index disabled
indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig);
indexLoadingConfig.removeForwardIndexDisabledColumns(DIM_MV_FORWARD_INDEX_DISABLED_LONG);
indexLoadingConfig.addNoDictionaryColumns(DIM_MV_FORWARD_INDEX_DISABLED_LONG);
indexLoadingConfig.removeInvertedIndexColumns(DIM_MV_FORWARD_INDEX_DISABLED_LONG);
indexLoadingConfig.removeForwardIndexDisabledColumns(DIM_SV_FORWARD_INDEX_DISABLED_BYTES);
fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema);
operationMap = fwdIndexHandler.computeOperations(writer);
assertEquals(operationMap.size(), 2);
assertEquals(operationMap.get(DIM_MV_FORWARD_INDEX_DISABLED_LONG),
Collections.singletonList(ForwardIndexHandler.Operation.ENABLE_FORWARD_INDEX));
assertEquals(operationMap.get(DIM_SV_FORWARD_INDEX_DISABLED_BYTES),
Collections.singletonList(ForwardIndexHandler.Operation.ENABLE_FORWARD_INDEX));
// TEST7: Enable forward index for a raw column with forward index disabled and keep it as raw
indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig);
indexLoadingConfig.removeForwardIndexDisabledColumns(DIM_RAW_MV_FORWARD_INDEX_DISABLED_INTEGER);
fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema);
operationMap = fwdIndexHandler.computeOperations(writer);
assertEquals(operationMap, Collections.EMPTY_MAP);
// TEST8: Enable forward index for a dictionary based column with forward index and inverted index disabled
indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig);
indexLoadingConfig.removeForwardIndexDisabledColumns(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX);
fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema);
operationMap = fwdIndexHandler.computeOperations(writer);
assertEquals(operationMap, Collections.EMPTY_MAP);
// Tear down
segmentLocalFSDirectory.close();
} |
@SafeVarargs
public static <E> Set<E> intersection(final Supplier<Set<E>> constructor, final Set<E> first, final Set<E>... set) {
final Set<E> result = constructor.get();
result.addAll(first);
for (final Set<E> s : set) {
result.retainAll(s);
}
return result;
} | @Test
public void testIntersection() {
final Set<String> oneSet = mkSet("a", "b", "c");
final Set<String> anotherSet = mkSet("c", "d", "e");
final Set<String> intersection = intersection(TreeSet::new, oneSet, anotherSet);
assertEquals(mkSet("c"), intersection);
assertEquals(TreeSet.class, intersection.getClass());
} |
public static String unsplit(Object[] splittee, Object splitChar) {
StringBuilder retVal = new StringBuilder();
int count = -1;
while (++count < splittee.length) {
if (splittee[count] != null) {
retVal.append(splittee[count]);
}
if (count + 1 < splittee.length && splittee[count + 1] != null) {
retVal.append(splitChar);
}
}
return retVal.toString();
} | @Test
public void testUnsplit() {
assertEquals("", JOrphanUtils.unsplit(new Object[]{null, null}, 0));
assertEquals("11", JOrphanUtils.unsplit(new Object[]{null, 1}, 1));
assertEquals("-26738698", JOrphanUtils.unsplit(new Object[]{-26_738_698}, 1));
} |
@VisibleForTesting
public static Properties buildSyncConfig(Configuration conf) {
TypedProperties props = StreamerUtil.flinkConf2TypedProperties(conf);
props.setPropertyIfNonNull(META_SYNC_BASE_PATH.key(), conf.getString(FlinkOptions.PATH));
props.setPropertyIfNonNull(META_SYNC_BASE_FILE_FORMAT.key(), conf.getString(FlinkOptions.HIVE_SYNC_FILE_FORMAT));
props.setPropertyIfNonNull(HIVE_USE_PRE_APACHE_INPUT_FORMAT.key(), "false");
props.setPropertyIfNonNull(META_SYNC_DATABASE_NAME.key(), conf.getString(FlinkOptions.HIVE_SYNC_DB));
props.setPropertyIfNonNull(META_SYNC_TABLE_NAME.key(), conf.getString(FlinkOptions.HIVE_SYNC_TABLE));
props.setPropertyIfNonNull(HIVE_SYNC_MODE.key(), conf.getString(FlinkOptions.HIVE_SYNC_MODE));
props.setPropertyIfNonNull(HIVE_USER.key(), conf.getString(FlinkOptions.HIVE_SYNC_USERNAME));
props.setPropertyIfNonNull(HIVE_PASS.key(), conf.getString(FlinkOptions.HIVE_SYNC_PASSWORD));
props.setPropertyIfNonNull(HIVE_URL.key(), conf.getString(FlinkOptions.HIVE_SYNC_JDBC_URL));
props.setPropertyIfNonNull(METASTORE_URIS.key(), conf.getString(FlinkOptions.HIVE_SYNC_METASTORE_URIS));
props.setPropertyIfNonNull(HIVE_TABLE_PROPERTIES.key(), conf.getString(FlinkOptions.HIVE_SYNC_TABLE_PROPERTIES));
props.setPropertyIfNonNull(HIVE_TABLE_SERDE_PROPERTIES.key(), conf.getString(FlinkOptions.HIVE_SYNC_TABLE_SERDE_PROPERTIES));
props.setPropertyIfNonNull(META_SYNC_PARTITION_FIELDS.key(), String.join(",", FilePathUtils.extractHivePartitionFields(conf)));
props.setPropertyIfNonNull(META_SYNC_PARTITION_EXTRACTOR_CLASS.key(), conf.getString(FlinkOptions.HIVE_SYNC_PARTITION_EXTRACTOR_CLASS_NAME));
props.setPropertyIfNonNull(HIVE_USE_JDBC.key(), String.valueOf(conf.getBoolean(FlinkOptions.HIVE_SYNC_USE_JDBC)));
props.setPropertyIfNonNull(META_SYNC_USE_FILE_LISTING_FROM_METADATA.key(), String.valueOf(conf.getBoolean(FlinkOptions.METADATA_ENABLED)));
props.setPropertyIfNonNull(HIVE_IGNORE_EXCEPTIONS.key(), String.valueOf(conf.getBoolean(FlinkOptions.HIVE_SYNC_IGNORE_EXCEPTIONS)));
props.setPropertyIfNonNull(HIVE_SUPPORT_TIMESTAMP_TYPE.key(), String.valueOf(conf.getBoolean(FlinkOptions.HIVE_SYNC_SUPPORT_TIMESTAMP)));
props.setPropertyIfNonNull(HIVE_AUTO_CREATE_DATABASE.key(), String.valueOf(conf.getBoolean(FlinkOptions.HIVE_SYNC_AUTO_CREATE_DB)));
props.setPropertyIfNonNull(META_SYNC_DECODE_PARTITION.key(), String.valueOf(conf.getBoolean(FlinkOptions.URL_ENCODE_PARTITIONING)));
props.setPropertyIfNonNull(HIVE_SKIP_RO_SUFFIX_FOR_READ_OPTIMIZED_TABLE.key(), String.valueOf(conf.getBoolean(FlinkOptions.HIVE_SYNC_SKIP_RO_SUFFIX)));
props.setPropertyIfNonNull(HIVE_SYNC_TABLE_STRATEGY.key(), String.valueOf(conf.getString(FlinkOptions.HIVE_SYNC_TABLE_STRATEGY)));
return props;
} | @Test
void testOptionWithoutShortcutKey() {
Configuration configuration3 = new Configuration();
configuration3.setBoolean(HiveSyncConfig.HIVE_CREATE_MANAGED_TABLE.key(), true);
Properties props3 = HiveSyncContext.buildSyncConfig(configuration3);
assertTrue(Boolean.parseBoolean(props3.getProperty(HiveSyncConfig.HIVE_CREATE_MANAGED_TABLE.key(), "false")));
} |
@Override
public Float getFloat(K name) {
return null;
} | @Test
public void testGetFloatDefault() {
assertEquals(1, HEADERS.getFloat("name1", 1), 0);
} |
public static Row of(
final LogicalSchema schema,
final GenericKey key,
final GenericRow value,
final long rowTime
) {
return new Row(schema, key, value, rowTime, TableRowValidation::validate);
} | @SuppressWarnings("UnstableApiUsage")
@Test
public void shouldImplementEquals() {
final LogicalSchema differentSchema = LogicalSchema.builder()
.keyColumn(ColumnName.of("k0"), SqlTypes.STRING)
.keyColumn(ColumnName.of("k1"), SqlTypes.INTEGER)
.valueColumn(ColumnName.of("diff0"), SqlTypes.STRING)
.valueColumn(ColumnName.of("diff1"), SqlTypes.DOUBLE)
.build();
new EqualsTester()
.addEqualityGroup(
Row.of(SCHEMA, A_KEY, A_VALUE, A_ROWTIME),
Row.of(SCHEMA, A_KEY, A_VALUE, A_ROWTIME)
)
.addEqualityGroup(
Row.of(differentSchema, A_KEY, A_VALUE, A_ROWTIME)
)
.addEqualityGroup(
Row.of(SCHEMA, GenericKey.genericKey("diff", 11), A_VALUE, A_ROWTIME)
)
.addEqualityGroup(
Row.of(SCHEMA, A_KEY, GenericRow.genericRow(null, null), A_ROWTIME)
)
.addEqualityGroup(
Row.of(SCHEMA, A_KEY, A_VALUE, -1L)
)
.testEquals();
} |
public long next() {
long current = this.next;
if (current < max) {
this.next = Math.min(this.next * 2, this.max);
}
// Check for mandatory stop
if (!mandatoryStopMade) {
long now = clock.millis();
long timeElapsedSinceFirstBackoff = 0;
if (initial == current) {
firstBackoffTimeInMillis = now;
} else {
timeElapsedSinceFirstBackoff = now - firstBackoffTimeInMillis;
}
if (timeElapsedSinceFirstBackoff + current > mandatoryStop) {
current = Math.max(initial, mandatoryStop - timeElapsedSinceFirstBackoff);
mandatoryStopMade = true;
}
}
// Randomly decrease the timeout up to 10% to avoid simultaneous retries
// If current < 10 then current/10 < 1 and we get an exception from Random saying "Bound must be positive"
if (current > 10) {
current -= random.nextInt((int) current / 10);
}
return Math.max(initial, current);
} | @Test
public void mandatoryStopTestNegativeTest() {
Backoff backoff = new Backoff(100, TimeUnit.MILLISECONDS, 60, TimeUnit.SECONDS, 1900, TimeUnit.MILLISECONDS);
assertEquals(backoff.next(), 100);
backoff.next(); // 200
backoff.next(); // 400
backoff.next(); // 800
assertFalse(withinTenPercentAndDecrementTimer(backoff, 400));
} |
public List<InputSplit> getSplits(JobContext job) throws IOException {
StopWatch sw = new StopWatch().start();
long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));
long maxSize = getMaxSplitSize(job);
// generate splits
List<InputSplit> splits = new ArrayList<InputSplit>();
List<FileStatus> files = listStatus(job);
boolean ignoreDirs = !getInputDirRecursive(job)
&& job.getConfiguration().getBoolean(INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS, false);
for (FileStatus file: files) {
if (ignoreDirs && file.isDirectory()) {
continue;
}
Path path = file.getPath();
long length = file.getLen();
if (length != 0) {
BlockLocation[] blkLocations;
if (file instanceof LocatedFileStatus) {
blkLocations = ((LocatedFileStatus) file).getBlockLocations();
} else {
FileSystem fs = path.getFileSystem(job.getConfiguration());
blkLocations = fs.getFileBlockLocations(file, 0, length);
}
if (isSplitable(job, path)) {
long blockSize = file.getBlockSize();
long splitSize = computeSplitSize(blockSize, minSize, maxSize);
long bytesRemaining = length;
while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining, splitSize,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts()));
bytesRemaining -= splitSize;
}
if (bytesRemaining != 0) {
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining, bytesRemaining,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts()));
}
} else { // not splitable
if (LOG.isDebugEnabled()) {
// Log only if the file is big enough to be splitted
if (length > Math.min(file.getBlockSize(), minSize)) {
LOG.debug("File is not splittable so no parallelization "
+ "is possible: " + file.getPath());
}
}
splits.add(makeSplit(path, 0, length, blkLocations[0].getHosts(),
blkLocations[0].getCachedHosts()));
}
} else {
//Create empty hosts array for zero length files
splits.add(makeSplit(path, 0, length, new String[0]));
}
}
// Save the number of input files for metrics/loadgen
job.getConfiguration().setLong(NUM_INPUT_FILES, files.size());
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
+ ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS));
}
return splits;
} | @Test
public void testListLocatedStatus() throws Exception {
Configuration conf = getConfiguration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
conf.setBoolean("fs.test.impl.disable.cache", false);
conf.set(FileInputFormat.INPUT_DIR, "test:///a1/a2");
MockFileSystem mockFs =
(MockFileSystem) new Path("test:///").getFileSystem(conf);
Assert.assertEquals("listLocatedStatus already called",
0, mockFs.numListLocatedStatusCalls);
Job job = Job.getInstance(conf);
FileInputFormat<?, ?> fileInputFormat = new TextInputFormat();
List<InputSplit> splits = fileInputFormat.getSplits(job);
Assert.assertEquals("Input splits are not correct", 2, splits.size());
Assert.assertEquals("listLocatedStatuss calls",
1, mockFs.numListLocatedStatusCalls);
FileSystem.closeAll();
} |
@Override
public boolean equals(Object object)
{
if (this == object)
{
return true;
}
if (object == null || getClass() != object.getClass())
{
return false;
}
UpdateResponse that = (UpdateResponse) object;
return _status == that._status;
} | @Test(dataProvider = "testEqualsDataProvider")
public void testEquals
(
boolean shouldEquals,
@Nonnull UpdateResponse updateResponse,
@Nullable Object compareObject
)
{
assertEquals(updateResponse.equals(compareObject), shouldEquals);
} |
public DoubleArrayAsIterable usingExactEquality() {
return new DoubleArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject());
} | @Test
public void usingExactEquality_containsExactly_primitiveDoubleArray_inOrder_success() {
assertThat(array(1.1, 2.2, 3.3))
.usingExactEquality()
.containsExactly(array(1.1, 2.2, 3.3))
.inOrder();
} |
public static void w(String tag, String message, Object... args) {
sLogger.w(tag, message, args);
} | @Test
public void warningWithThrowable() {
String tag = "TestTag";
String message = "Test message";
Throwable t = new Throwable();
LogManager.w(t, tag, message);
verify(logger).w(t, tag, message);
} |
@CanIgnoreReturnValue
public final Ordered containsExactlyElementsIn(@Nullable Iterable<?> expected) {
return containsExactlyElementsIn(expected, false);
} | @Test
@SuppressWarnings("ContainsExactlyElementsInWithVarArgsToExactly")
public void iterableContainsExactlyElementsInWithOneIterableDoesNotGiveWarning() {
expectFailureWhenTestingThat(asList(1, 2, 3, 4)).containsExactlyElementsIn(asList(1, 2, 3));
assertFailureValue("unexpected (1)", "4");
} |
public static <T> CompressedSerializedValue<T> fromObject(T object) throws IOException {
Preconditions.checkNotNull(object, "Value must not be null");
return new CompressedSerializedValue<>(object);
} | @Test
void testNullValue() {
assertThatThrownBy(() -> CompressedSerializedValue.fromObject(null))
.isInstanceOf(NullPointerException.class);
} |
public static DateTime parseUTC(String utcString) {
if (utcString == null) {
return null;
}
final int length = utcString.length();
if (StrUtil.contains(utcString, 'Z')) {
if (length == DatePattern.UTC_PATTERN.length() - 4) {
// 格式类似:2018-09-13T05:34:31Z,-4表示减去4个单引号的长度
return parse(utcString, DatePattern.UTC_FORMAT);
}
final int patternLength = DatePattern.UTC_MS_PATTERN.length();
// 格式类似:2018-09-13T05:34:31.999Z,-4表示减去4个单引号的长度
// -4 ~ -6范围表示匹配毫秒1~3位的情况
if (length <= patternLength && length >= patternLength - 6) {
// issue#I7H34N,支持最多6位毫秒
return parse(utcString, DatePattern.UTC_MS_FORMAT);
}
} else if (StrUtil.contains(utcString, '+')) {
// 去除类似2019-06-01T19:45:43 +08:00加号前的空格
utcString = utcString.replace(" +", "+");
final String zoneOffset = StrUtil.subAfter(utcString, '+', true);
if (StrUtil.isBlank(zoneOffset)) {
throw new DateException("Invalid format: [{}]", utcString);
}
if (false == StrUtil.contains(zoneOffset, ':')) {
// +0800转换为+08:00
final String pre = StrUtil.subBefore(utcString, '+', true);
utcString = pre + "+" + zoneOffset.substring(0, 2) + ":" + "00";
}
if (StrUtil.contains(utcString, CharUtil.DOT)) {
// 带毫秒,格式类似:2018-09-13T05:34:31.999+08:00
utcString = normalizeMillSeconds(utcString, ".", "+");
return parse(utcString, DatePattern.UTC_MS_WITH_XXX_OFFSET_FORMAT);
} else {
// 格式类似:2018-09-13T05:34:31+08:00
return parse(utcString, DatePattern.UTC_WITH_XXX_OFFSET_FORMAT);
}
} else if(ReUtil.contains("-\\d{2}:?00", utcString)){
// Issue#2612,类似 2022-09-14T23:59:00-08:00 或者 2022-09-14T23:59:00-0800
// 去除类似2019-06-01T19:45:43 -08:00加号前的空格
utcString = utcString.replace(" -", "-");
if(':' != utcString.charAt(utcString.length() - 3)){
utcString = utcString.substring(0, utcString.length() - 2) + ":00";
}
if (StrUtil.contains(utcString, CharUtil.DOT)) {
// 带毫秒,格式类似:2018-09-13T05:34:31.999-08:00
utcString = normalizeMillSeconds(utcString, ".", "-");
return new DateTime(utcString, DatePattern.UTC_MS_WITH_XXX_OFFSET_FORMAT);
} else {
// 格式类似:2018-09-13T05:34:31-08:00
return new DateTime(utcString, DatePattern.UTC_WITH_XXX_OFFSET_FORMAT);
}
} else {
if (length == DatePattern.UTC_SIMPLE_PATTERN.length() - 2) {
// 格式类似:2018-09-13T05:34:31
return parse(utcString, DatePattern.UTC_SIMPLE_FORMAT);
} else if (length == DatePattern.UTC_SIMPLE_PATTERN.length() - 5) {
// 格式类似:2018-09-13T05:34
return parse(utcString + ":00", DatePattern.UTC_SIMPLE_FORMAT);
} else if (StrUtil.contains(utcString, CharUtil.DOT)) {
// 可能为: 2021-03-17T06:31:33.99
utcString = normalizeMillSeconds(utcString, ".", null);
return parse(utcString, DatePattern.UTC_SIMPLE_MS_FORMAT);
}
}
// 没有更多匹配的时间格式
throw new DateException("No format fit for date String [{}] !", utcString);
} | @Test
public void parseUTCTest2() {
// issue1503@Github
// 检查不同毫秒长度都可以正常匹配
String utcTime = "2021-03-30T12:56:51.3Z";
DateTime parse = DateUtil.parseUTC(utcTime);
assertEquals("2021-03-30 12:56:51", parse.toString());
utcTime = "2021-03-30T12:56:51.34Z";
parse = DateUtil.parseUTC(utcTime);
assertEquals("2021-03-30 12:56:51", parse.toString());
utcTime = "2021-03-30T12:56:51.345Z";
parse = DateUtil.parseUTC(utcTime);
assertEquals("2021-03-30 12:56:51", parse.toString());
} |
protected GelfMessage toGELFMessage(final Message message) {
final DateTime timestamp;
final Object fieldTimeStamp = message.getField(Message.FIELD_TIMESTAMP);
if (fieldTimeStamp instanceof DateTime) {
timestamp = (DateTime) fieldTimeStamp;
} else {
timestamp = Tools.nowUTC();
}
final GelfMessageLevel messageLevel = extractLevel(message.getField(Message.FIELD_LEVEL));
final String fullMessage = (String) message.getField(Message.FIELD_FULL_MESSAGE);
final String forwarder = GelfOutput.class.getCanonicalName();
final GelfMessageBuilder builder = new GelfMessageBuilder(message.getMessage(), message.getSource())
.timestamp(timestamp.getMillis() / 1000.0d)
.additionalField("_forwarder", forwarder)
.additionalFields(message.getFields());
if (messageLevel != null) {
builder.level(messageLevel);
}
if (fullMessage != null) {
builder.fullMessage(fullMessage);
}
return builder.build();
} | @Test
public void testToGELFMessageWithNullLevel() throws Exception {
final GelfTransport transport = mock(GelfTransport.class);
final GelfOutput gelfOutput = new GelfOutput(transport);
final DateTime now = DateTime.now(DateTimeZone.UTC);
final Message message = messageFactory.createMessage("Test", "Source", now);
message.addField("level", null);
final GelfMessage gelfMessage = gelfOutput.toGELFMessage(message);
assertEquals(GelfMessageLevel.ALERT, gelfMessage.getLevel());
} |
public static void checkGreaterOrEqual(
long value1,
String value1Name,
long value2,
String value2Name) {
checkArgument(
value1 >= value2,
"'%s' (%s) must be greater than or equal to '%s' (%s).",
value1Name,
value1,
value2Name,
value2);
} | @Test
public void testCheckGreaterOrEqual() throws Exception {
// Should not throw.
Validate.checkGreaterOrEqual(10, "arg1", 5, "arg2");
// Verify it throws.
intercept(IllegalArgumentException.class,
"'arg1' (5) must be greater than or equal to 'arg2' (10)",
() -> Validate.checkGreaterOrEqual(5, "arg1", 10, "arg2"));
} |
@GetMapping("")
public AdminResult<CommonPager<SelectorVO>> querySelectors(final String pluginId, final String name,
@RequestParam @NotNull final Integer currentPage,
@RequestParam @NotNull final Integer pageSize,
@Valid @Existed(message = "namespaceId is not existed",
provider = NamespaceMapper.class) final String namespaceId
) {
final SelectorQueryCondition condition = new SelectorQueryCondition();
condition.setUserId(SessionUtil.visitor().getUserId());
condition.setPlugin(ListUtil.of(pluginId));
condition.setKeyword(name);
condition.setNamespaceId(namespaceId);
return searchAdaptor(new PageCondition<>(currentPage, pageSize, condition));
} | @Test
public void querySelectors() throws Exception {
given(this.selectorService.searchByPageToPager(any())).willReturn(commonPager);
String urlTemplate = "/selector?pluginId={pluginId}&name={name}¤tPage={currentPage}&pageSize={pageSize}";
this.mockMvc.perform(MockMvcRequestBuilders.get(urlTemplate, "2", "selector-1", 1, 12))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.QUERY_SUCCESS)))
.andExpect(jsonPath("$.data.dataList[0].id", is(selectorVO.getId())))
.andReturn();
} |
public <T extends Tuple> DataSource<T> tupleType(Class<T> targetType) {
Preconditions.checkNotNull(targetType, "The target type class must not be null.");
if (!Tuple.class.isAssignableFrom(targetType)) {
throw new IllegalArgumentException(
"The target type must be a subclass of " + Tuple.class.getName());
}
@SuppressWarnings("unchecked")
TupleTypeInfo<T> typeInfo = (TupleTypeInfo<T>) TypeExtractor.createTypeInfo(targetType);
CsvInputFormat<T> inputFormat =
new TupleCsvInputFormat<T>(
path, this.lineDelimiter, this.fieldDelimiter, typeInfo, this.includedMask);
Class<?>[] classes = new Class<?>[typeInfo.getArity()];
for (int i = 0; i < typeInfo.getArity(); i++) {
classes[i] = typeInfo.getTypeAt(i).getTypeClass();
}
configureInputFormat(inputFormat);
return new DataSource<T>(
executionContext, inputFormat, typeInfo, Utils.getCallLocationName());
} | @Test
void testSubClass() {
CsvReader reader = getCsvReader();
DataSource<SubItem> sitems = reader.tupleType(SubItem.class);
TypeInformation<?> info = sitems.getType();
assertThat(info.isTupleType()).isTrue();
assertThat(info.getTypeClass()).isEqualTo(SubItem.class);
@SuppressWarnings("unchecked")
TupleTypeInfo<SubItem> tinfo = (TupleTypeInfo<SubItem>) info;
assertThat(tinfo.getTypeAt(0)).isEqualTo(BasicTypeInfo.INT_TYPE_INFO);
assertThat(tinfo.getTypeAt(1)).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO);
assertThat(tinfo.getTypeAt(2)).isEqualTo(BasicTypeInfo.DOUBLE_TYPE_INFO);
assertThat(tinfo.getTypeAt(3)).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO);
CsvInputFormat<?> inputFormat = (CsvInputFormat<?>) sitems.getInputFormat();
assertThat(inputFormat.getFieldTypes())
.containsExactly(Integer.class, String.class, Double.class, String.class);
} |
public NodeModel pathEnd(NodeModel commonAncestor) {
return relativeNode(commonAncestor, endPath, endPath.length);
} | @Test
public void zeroLevelEnd(){
final NodeModel parent = root();
final NodeRelativePath nodeRelativePath = new NodeRelativePath(parent, parent);
final NodeModel startingPoint = new NodeModel("startingPoint", map);
assertThat(nodeRelativePath.pathEnd(startingPoint), equalTo(startingPoint));
} |
String createPermalink(SinglePage page) {
var permalink = encodePath(page.getSpec().getSlug(), UTF_8);
permalink = StringUtils.prependIfMissing(permalink, "/");
return externalUrlSupplier.get().resolve(permalink).normalize().toString();
} | @Test
void createPermalink() {
SinglePage page = pageV1();
page.getSpec().setSlug("page-slug");
when(externalUrlSupplier.get()).thenReturn(URI.create(""));
String permalink = singlePageReconciler.createPermalink(page);
assertThat(permalink).isEqualTo("/page-slug");
when(externalUrlSupplier.get()).thenReturn(URI.create("http://example.com"));
permalink = singlePageReconciler.createPermalink(page);
assertThat(permalink).isEqualTo("http://example.com/page-slug");
page.getSpec().setSlug("中文 slug");
permalink = singlePageReconciler.createPermalink(page);
assertThat(permalink).isEqualTo("http://example.com/%E4%B8%AD%E6%96%87%20slug");
} |
@Override
public List<MultipartUpload> find(final Path file) throws BackgroundException {
if(log.isDebugEnabled()) {
log.debug(String.format("Finding multipart uploads for %s", file));
}
final List<MultipartUpload> uploads = new ArrayList<>();
// This operation lists in-progress multipart uploads. An in-progress multipart upload is a
// multipart upload that has been initiated, using the Initiate Multipart Upload request, but has
// not yet been completed or aborted.
String nextUploadIdMarker = null;
String nextKeyMarker = null;
boolean isTruncated;
do {
final Path bucket = containerService.getContainer(file);
final MultipartUploadChunk chunk;
try {
chunk = session.getClient().multipartListUploadsChunked(
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file),
String.valueOf(Path.DELIMITER), nextKeyMarker, nextUploadIdMarker, null, false);
}
catch(S3ServiceException e) {
final BackgroundException failure = new S3ExceptionMappingService().map("Upload {0} failed", e, file);
if(failure instanceof NotfoundException) {
return Collections.emptyList();
}
if(failure instanceof InteroperabilityException) {
return Collections.emptyList();
}
throw failure;
}
uploads.addAll(Arrays.asList(chunk.getUploads()));
if(log.isInfoEnabled()) {
log.info(String.format("Found %d previous multipart uploads for %s", uploads.size(), file));
}
// Sort with newest upload first in list
uploads.sort(new Comparator<MultipartUpload>() {
@Override
public int compare(final MultipartUpload o1, final MultipartUpload o2) {
return -o1.getInitiatedDate().compareTo(o2.getInitiatedDate());
}
});
nextKeyMarker = chunk.getPriorLastKey();
nextUploadIdMarker = chunk.getPriorLastIdMarker();
isTruncated = !chunk.isListingComplete();
}
while(isTruncated && nextUploadIdMarker != null);
if(log.isInfoEnabled()) {
for(MultipartUpload upload : uploads) {
log.info(String.format("Found multipart upload %s for %s", upload, file));
}
}
return uploads;
} | @Test
public void testFindNotFound() throws Exception {
final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path test = new Path(container, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
final List<MultipartUpload> list = new S3DefaultMultipartService(session).find(test);
assertTrue(list.isEmpty());
} |
@Override
public void generateError(Response response) throws IOException {
final JsonNode errorNode;
try {
errorNode = OAuth2AccessTokenJsonExtractor.OBJECT_MAPPER.readTree(response.getBody()).get("errors").get(0);
} catch (JsonProcessingException ex) {
throw new OAuth2AccessTokenErrorResponse(null, null, null, response);
}
OAuth2Error errorCode;
try {
errorCode = OAuth2Error
.parseFrom(extractRequiredParameter(errorNode, "errorType", response.getBody()).asText());
} catch (IllegalArgumentException iaE) {
//non oauth standard error code
errorCode = null;
}
throw new OAuth2AccessTokenErrorResponse(errorCode, errorNode.get("message").asText(), null, response);
} | @Test
public void testErrorExtraction() throws IOException {
final FitBitJsonTokenExtractor extractor = new FitBitJsonTokenExtractor();
final OAuth2AccessTokenErrorResponse thrown = assertThrows(OAuth2AccessTokenErrorResponse.class,
new ThrowingRunnable() {
@Override
public void run() throws Throwable {
extractor.generateError(new Response(403, null, null, ERROR_JSON));
}
});
assertSame(OAuth2Error.INVALID_GRANT, thrown.getError());
assertEquals(ERROR_DESCRIPTION, thrown.getErrorDescription());
} |
public final TypeRef<? extends T> getSubtype(Class<?> subclass) {
if (type instanceof WildcardType) {
Type[] lowerBounds = ((WildcardType) type).getLowerBounds();
if (lowerBounds.length > 0) {
TypeRef<? extends T> bound = of(lowerBounds[0]);
// Java supports only one lowerbound anyway.
return bound.getSubtype(subclass);
}
throw new IllegalArgumentException(subclass + " isn't a subclass of " + this);
}
Type componentType = getComponentType(type);
if (componentType != null) {
Class<?> subclassComponentType = subclass.getComponentType();
if (subclassComponentType == null) {
throw new IllegalArgumentException(
subclass + " does not appear to be a subtype of " + this);
}
// array is covariant. component type is subtype, so is the array type.
// requireNonNull is safe because we call getArraySubtype only when isArray().
TypeRef<?> componentSubtype = of(componentType).getSubtype(subclassComponentType);
// If we are passed with int[].class, don't turn it to GenericArrayType
return of(newArrayType(componentSubtype.type));
}
Class<? super T> rawType = getRawType();
if (!rawType.isAssignableFrom(subclass)) {
throw new IllegalArgumentException(subclass + " isn't a subclass of " + this);
}
// If both runtimeType and subclass are not parameterized, return subclass
// If runtimeType is not parameterized but subclass is, process subclass as a parameterized type
// If runtimeType is a raw type (i.e. is a parameterized type specified as a Class<?>), we
// return subclass as a raw type
if (type instanceof Class
&& ((subclass.getTypeParameters().length == 0)
|| (rawType.getTypeParameters().length != 0))) {
// no resolution needed
@SuppressWarnings({"unchecked"}) // subclass isn't <? extends T>
TypeRef<? extends T> result = (TypeRef<? extends T>) of(subclass);
return result;
}
// class Base<A, B> {}
// class Sub<X, Y> extends Base<X, Y> {}
// Base<String, Integer>.subtype(Sub.class):
// Sub<X, Y>.getSupertype(Base.class) => Base<X, Y>
// => X=String, Y=Integer
// => Sub<X, Y>=Sub<String, Integer>
TypeRef<?> genericSubtype = of(toGenericType(subclass));
@SuppressWarnings({"rawtypes", "unchecked"}) // subclass isn't <? extends T>
Type supertypeWithArgsFromSubtype = genericSubtype.getSupertype((Class) rawType).type;
if (genericSubtype.type instanceof WildcardType) {
@SuppressWarnings({"unchecked"}) // subclass isn't <? extends T>
TypeRef<? extends T> result = (TypeRef<? extends T>) genericSubtype;
return result;
}
Map<TypeVariableKey, Type> mappings = new HashMap<>();
populateTypeMappings(mappings, supertypeWithArgsFromSubtype, type);
return (TypeRef<? extends T>) resolveType0(genericSubtype.type, mappings);
} | @Test
public void testGetSubtype() {
// For issue: https://github.com/apache/fury/issues/1604
TypeRef<? extends Map<String, Object>> typeRef =
TypeUtils.mapOf(MapObject.class, String.class, Object.class);
assertEquals(typeRef, TypeRef.of(MapObject.class));
assertEquals(
TypeUtils.mapOf(Map.class, String.class, Object.class),
new TypeRef<Map<String, Object>>() {});
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.