focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static SerdeFeatures buildValueFeatures(
final LogicalSchema schema,
final Format valueFormat,
final SerdeFeatures explicitFeatures,
final KsqlConfig ksqlConfig
) {
final boolean singleColumn = schema.value().size() == 1;
final ImmutableSet.Builder<SerdeFeature> builder = ImmutableSet.builder();
getValueWrapping(singleColumn, valueFormat, explicitFeatures, ksqlConfig)
.ifPresent(builder::add);
return SerdeFeatures.from(builder.build());
} | @Test
public void shouldDefaultToNoSingleValueWrappingIfNoExplicitAndNoConfigDefault() {
// When:
final SerdeFeatures result = SerdeFeaturesFactory.buildValueFeatures(
SINGLE_FIELD_SCHEMA,
JSON,
SerdeFeatures.of(),
ksqlConfig
);
// Then:
assertThat(result.findAny(SerdeFeatures.WRAPPING_FEATURES), is(Optional.empty()));
} |
List<Transfer> getTransfersToStop(String toStopId, String toRouteId) {
final List<Transfer> allInboundTransfers = transfersToStop.getOrDefault(toStopId, Collections.emptyList());
final Map<String, List<Transfer>> byFromStop = allInboundTransfers.stream()
.filter(t -> t.transfer_type == 0 || t.transfer_type == 2)
.filter(t -> t.to_route_id == null || toRouteId.equals(t.to_route_id))
.collect(Collectors.groupingBy(t -> t.from_stop_id));
final List<Transfer> result = new ArrayList<>();
byFromStop.forEach((fromStop, transfers) -> {
if (hasNoRouteSpecificArrivalTransferRules(fromStop)) {
Transfer myRule = new Transfer();
myRule.from_stop_id = fromStop;
myRule.to_stop_id = toStopId;
if(transfers.size() == 1)
myRule.min_transfer_time = transfers.get(0).min_transfer_time;
result.add(myRule);
} else {
routesByStop.getOrDefault(fromStop, Collections.emptySet()).forEach(fromRoute -> {
final Transfer mostSpecificRule = findMostSpecificRule(transfers, fromRoute, toRouteId);
final Transfer myRule = new Transfer();
myRule.to_route_id = toRouteId;
myRule.from_route_id = fromRoute;
myRule.to_stop_id = mostSpecificRule.to_stop_id;
myRule.from_stop_id = mostSpecificRule.from_stop_id;
myRule.transfer_type = mostSpecificRule.transfer_type;
myRule.min_transfer_time = mostSpecificRule.min_transfer_time;
myRule.from_trip_id = mostSpecificRule.from_trip_id;
myRule.to_trip_id = mostSpecificRule.to_trip_id;
result.add(myRule);
});
}
});
if (result.stream().noneMatch(t -> t.from_stop_id.equals(toStopId))) {
final Transfer withinStationTransfer = new Transfer();
withinStationTransfer.from_stop_id = toStopId;
withinStationTransfer.to_stop_id = toStopId;
result.add(withinStationTransfer);
}
return result;
} | @Test
public void testInternalTransfersByToRouteIfRouteSpecific() {
List<Transfer> transfersToStop = sampleFeed.getTransfersToStop("BEATTY_AIRPORT", "AB");
assertEquals(5, transfersToStop.size());
assertEquals("AB", transfersToStop.get(0).from_route_id);
assertEquals("FUNNY_BLOCK_AB", transfersToStop.get(1).from_route_id);
assertEquals("STBA", transfersToStop.get(2).from_route_id);
assertEquals("AAMV", transfersToStop.get(3).from_route_id);
assertEquals("ABBFC", transfersToStop.get(4).from_route_id);
} |
public static ConfigurableResource parseResourceConfigValue(String value)
throws AllocationConfigurationException {
return parseResourceConfigValue(value, Long.MAX_VALUE);
} | @Test
public void testOldStyleResourcesSeparatedBySpacesInvalid() throws Exception {
String value = "2 vcores 5120 mb 555 mb";
expectUnparsableResource(value);
parseResourceConfigValue(value);
} |
@Override
public JType apply(String nodeName, JsonNode node, JsonNode parent, JClassContainer jClassContainer, Schema schema) {
String propertyTypeName = getTypeName(node);
JType type;
if (propertyTypeName.equals("object") || node.has("properties") && node.path("properties").size() > 0) {
type = ruleFactory.getObjectRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema);
} else if (node.has("existingJavaType")) {
String typeName = node.path("existingJavaType").asText();
if (isPrimitive(typeName, jClassContainer.owner())) {
type = primitiveType(typeName, jClassContainer.owner());
} else {
type = resolveType(jClassContainer, typeName);
}
} else if (propertyTypeName.equals("string")) {
type = jClassContainer.owner().ref(String.class);
} else if (propertyTypeName.equals("number")) {
type = getNumberType(jClassContainer.owner(), ruleFactory.getGenerationConfig());
} else if (propertyTypeName.equals("integer")) {
type = getIntegerType(jClassContainer.owner(), node, ruleFactory.getGenerationConfig());
} else if (propertyTypeName.equals("boolean")) {
type = unboxIfNecessary(jClassContainer.owner().ref(Boolean.class), ruleFactory.getGenerationConfig());
} else if (propertyTypeName.equals("array")) {
type = ruleFactory.getArrayRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema);
} else {
type = jClassContainer.owner().ref(Object.class);
}
if (!node.has("javaType") && !node.has("existingJavaType") && node.has("format")) {
type = ruleFactory.getFormatRule().apply(nodeName, node.get("format"), node, type, schema);
} else if (!node.has("javaType") && !node.has("existingJavaType") && propertyTypeName.equals("string") && node.has("media")) {
type = ruleFactory.getMediaRule().apply(nodeName, node.get("media"), node, type, schema);
}
return type;
} | @Test
public void applyGeneratesNumberUsingJavaTypeBigDecimal() {
JPackage jpackage = new JCodeModel()._package(getClass().getPackage().getName());
ObjectNode objectNode = new ObjectMapper().createObjectNode();
objectNode.put("type", "number");
objectNode.put("existingJavaType", "java.math.BigDecimal");
JType result = rule.apply("fooBar", objectNode, null, jpackage, null);
assertThat(result.fullName(), is("java.math.BigDecimal"));
} |
@Override
public ImportResult importItem(
UUID jobId,
IdempotentImportExecutor idempotentImportExecutor,
TokensAndUrlAuthData authData,
VideosContainerResource resource)
throws Exception {
KoofrClient koofrClient = koofrClientFactory.create(authData);
monitor.debug(
() ->
String.format(
"%s: Importing %s albums and %s videos",
jobId, resource.getAlbums().size(), resource.getVideos().size()));
// TODO: VideosContainerResource does not support transmogrification
for (VideoAlbum album : resource.getAlbums()) {
// Create a Koofr folder and then save the id with the mapping data
idempotentImportExecutor.executeAndSwallowIOExceptions(
album.getId(), album.getName(), () -> createAlbumFolder(album, koofrClient));
}
final LongAdder totalImportedFilesSizes = new LongAdder();
for (VideoModel videoModel : resource.getVideos()) {
idempotentImportExecutor.importAndSwallowIOExceptions(
videoModel,
video -> {
ItemImportResult<String> fileImportResult =
importSingleVideo(videoModel, jobId, idempotentImportExecutor, koofrClient);
if (fileImportResult != null && fileImportResult.hasBytes()) {
totalImportedFilesSizes.add(fileImportResult.getBytes());
}
return fileImportResult;
});
}
return ImportResult.OK.copyWithBytes(totalImportedFilesSizes.longValue());
} | @Test
public void testImportItemFromURLWithAlbum() throws Exception {
server.enqueue(new MockResponse().setResponseCode(200).setBody("123"));
server.enqueue(new MockResponse().setResponseCode(200).setBody("4567"));
server.enqueue(new MockResponse().setResponseCode(200).setBody("89"));
when(client.ensureRootFolder()).thenReturn("/root");
when(executor.getCachedValue(eq("id1"))).thenReturn("/root/Album 1");
when(executor.getCachedValue(eq("id2"))).thenReturn("/root/Album");
when(client.fileExists("/root/Album 1/video1.mp4")).thenReturn(false);
when(client.fileExists("/root/Album 1/video2.mp4")).thenReturn(true);
when(client.fileExists("/root/Album/video3.mp4")).thenReturn(false);
String description1000 = new String(new char[1000]).replace("\0", "a");
String description1001 = new String(new char[1001]).replace("\0", "a");
UUID jobId = UUID.randomUUID();
Collection<VideoAlbum> albums =
ImmutableList.of(
new VideoAlbum("id1", "Album 1", "This is a fake album"),
new VideoAlbum("id2", "", description1001));
Date uploadedTime = Date.from(Instant.parse("2020-09-04T12:40:57.741Z"));
Collection<VideoModel> videos =
ImmutableList.of(
new VideoModel(
"video1.mp4",
server.url("/1.mp4").toString(),
"A video 1",
"video/mp4",
"video1",
"id1",
false,
uploadedTime),
new VideoModel(
"video2.mp4",
server.url("/2.mp4").toString(),
"A video 2",
"video/mp4",
"video2",
"id1",
false,
null),
new VideoModel(
"video3.mp4",
server.url("/3.mp4").toString(),
description1001,
"video/mp4",
"video3",
"id2",
false,
null));
VideosContainerResource resource = spy(new VideosContainerResource(albums, videos));
importer.importItem(jobId, executor, authData, resource);
InOrder clientInOrder = Mockito.inOrder(client);
clientInOrder.verify(client).ensureRootFolder();
clientInOrder.verify(client).ensureFolder("/root", "Album 1");
clientInOrder.verify(client).addDescription("/root/Album 1", "This is a fake album");
clientInOrder.verify(client).ensureFolder("/root", "Album");
clientInOrder.verify(client).addDescription("/root/Album", description1000);
clientInOrder.verify(client).fileExists(eq("/root/Album 1/video1.mp4"));
clientInOrder
.verify(client)
.uploadFile(
eq("/root/Album 1"),
eq("video1.mp4"),
any(),
eq("video/mp4"),
eq(uploadedTime),
eq("A video 1"));
clientInOrder.verify(client).fileExists(eq("/root/Album 1/video2.mp4"));
clientInOrder.verify(client).fileExists(eq("/root/Album/video3.mp4"));
clientInOrder
.verify(client)
.uploadFile(
eq("/root/Album"),
eq("video3.mp4"),
any(),
eq("video/mp4"),
isNull(),
eq(description1000));
clientInOrder.verifyNoMoreInteractions();
} |
public String format(Date then)
{
if (then == null)
then = now();
Duration d = approximateDuration(then);
return format(d);
} | @Test
public void testCenturiesFromNow() throws Exception
{
PrettyTime t = new PrettyTime(now);
Assert.assertEquals("3 centuries from now", t.format(now.plus(3, ChronoUnit.CENTURIES)));
} |
public EqualityPartition generateEqualitiesPartitionedBy(Predicate<VariableReferenceExpression> variableScope)
{
ImmutableSet.Builder<RowExpression> scopeEqualities = ImmutableSet.builder();
ImmutableSet.Builder<RowExpression> scopeComplementEqualities = ImmutableSet.builder();
ImmutableSet.Builder<RowExpression> scopeStraddlingEqualities = ImmutableSet.builder();
for (Collection<RowExpression> equalitySet : equalitySets.asMap().values()) {
Set<RowExpression> scopeExpressions = new LinkedHashSet<>();
Set<RowExpression> scopeComplementExpressions = new LinkedHashSet<>();
Set<RowExpression> scopeStraddlingExpressions = new LinkedHashSet<>();
// Try to push each non-derived expression into one side of the scope
for (RowExpression expression : filter(equalitySet, not(derivedExpressions::contains))) {
RowExpression scopeRewritten = rewriteExpression(expression, variableScope, false);
if (scopeRewritten != null) {
scopeExpressions.add(scopeRewritten);
}
RowExpression scopeComplementRewritten = rewriteExpression(expression, not(variableScope), false);
if (scopeComplementRewritten != null) {
scopeComplementExpressions.add(scopeComplementRewritten);
}
if (scopeRewritten == null && scopeComplementRewritten == null) {
scopeStraddlingExpressions.add(expression);
}
}
// Compile the equality expressions on each side of the scope
RowExpression matchingCanonical = getCanonical(scopeExpressions);
if (scopeExpressions.size() >= 2) {
for (RowExpression expression : filter(scopeExpressions, not(equalTo(matchingCanonical)))) {
scopeEqualities.add(buildEqualsExpression(functionAndTypeManager, matchingCanonical, expression));
}
}
RowExpression complementCanonical = getCanonical(scopeComplementExpressions);
if (scopeComplementExpressions.size() >= 2) {
for (RowExpression expression : filter(scopeComplementExpressions, not(equalTo(complementCanonical)))) {
scopeComplementEqualities.add(buildEqualsExpression(functionAndTypeManager, complementCanonical, expression));
}
}
// Compile the scope straddling equality expressions
List<RowExpression> connectingExpressions = new ArrayList<>();
connectingExpressions.add(matchingCanonical);
connectingExpressions.add(complementCanonical);
connectingExpressions.addAll(scopeStraddlingExpressions);
connectingExpressions = ImmutableList.copyOf(filter(connectingExpressions, Predicates.notNull()));
RowExpression connectingCanonical = getCanonical(connectingExpressions);
if (connectingCanonical != null) {
for (RowExpression expression : filter(connectingExpressions, not(equalTo(connectingCanonical)))) {
scopeStraddlingEqualities.add(buildEqualsExpression(functionAndTypeManager, connectingCanonical, expression));
}
}
}
return new EqualityPartition(scopeEqualities.build(), scopeComplementEqualities.build(), scopeStraddlingEqualities.build());
} | @Test
public void testEqualityPartitionGeneration()
{
EqualityInference.Builder builder = new EqualityInference.Builder(METADATA);
builder.addEquality(variable("a1"), variable("b1"));
builder.addEquality(add("a1", "a1"), multiply(variable("a1"), number(2)));
builder.addEquality(variable("b1"), variable("c1"));
builder.addEquality(add("a1", "a1"), variable("c1"));
builder.addEquality(add("a1", "b1"), variable("c1"));
EqualityInference inference = builder.build();
EqualityInference.EqualityPartition emptyScopePartition = inference.generateEqualitiesPartitionedBy(Predicates.alwaysFalse());
// Cannot generate any scope equalities with no matching symbols
assertTrue(emptyScopePartition.getScopeEqualities().isEmpty());
// All equalities should be represented in the inverse scope
assertFalse(emptyScopePartition.getScopeComplementEqualities().isEmpty());
// There should be no equalities straddling the scope
assertTrue(emptyScopePartition.getScopeStraddlingEqualities().isEmpty());
EqualityInference.EqualityPartition equalityPartition = inference.generateEqualitiesPartitionedBy(matchesVariables("c1"));
// There should be equalities in the scope, that only use c1 and are all inferable equalities
assertFalse(equalityPartition.getScopeEqualities().isEmpty());
assertTrue(Iterables.all(equalityPartition.getScopeEqualities(), matchesVariableScope(matchesVariables("c1"))));
assertTrue(Iterables.all(equalityPartition.getScopeEqualities(), isInferenceCandidate(METADATA)));
// There should be equalities in the inverse scope, that never use c1 and are all inferable equalities
assertFalse(equalityPartition.getScopeComplementEqualities().isEmpty());
assertTrue(Iterables.all(equalityPartition.getScopeComplementEqualities(), matchesVariableScope(not(matchesVariables("c1")))));
assertTrue(Iterables.all(equalityPartition.getScopeComplementEqualities(), isInferenceCandidate(METADATA)));
// There should be equalities in the straddling scope, that should use both c1 and not c1 symbols
assertFalse(equalityPartition.getScopeStraddlingEqualities().isEmpty());
assertTrue(Iterables.any(equalityPartition.getScopeStraddlingEqualities(), matchesStraddlingScope(matchesVariables("c1"))));
assertTrue(Iterables.all(equalityPartition.getScopeStraddlingEqualities(), isInferenceCandidate(METADATA)));
// There should be a "full cover" of all of the equalities used
// THUS, we should be able to plug the generated equalities back in and get an equivalent set of equalities back the next time around
EqualityInference newInference = new EqualityInference.Builder(METADATA)
.addAllEqualities(equalityPartition.getScopeEqualities())
.addAllEqualities(equalityPartition.getScopeComplementEqualities())
.addAllEqualities(equalityPartition.getScopeStraddlingEqualities())
.build();
EqualityInference.EqualityPartition newEqualityPartition = newInference.generateEqualitiesPartitionedBy(matchesVariables("c1"));
assertEquals(setCopy(equalityPartition.getScopeEqualities()), setCopy(newEqualityPartition.getScopeEqualities()));
assertEquals(setCopy(equalityPartition.getScopeComplementEqualities()), setCopy(newEqualityPartition.getScopeComplementEqualities()));
assertEquals(setCopy(equalityPartition.getScopeStraddlingEqualities()), setCopy(newEqualityPartition.getScopeStraddlingEqualities()));
} |
@Override
public String toString() {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append('[');
int numColumns = _columnNames.length;
for (int i = 0; i < numColumns; i++) {
stringBuilder.append(_columnNames[i]).append('(').append(_columnDataTypes[i]).append(')').append(',');
}
stringBuilder.setCharAt(stringBuilder.length() - 1, ']');
return stringBuilder.toString();
} | @Test
public void testColumnDataType() {
for (DataSchema.ColumnDataType columnDataType : new DataSchema.ColumnDataType[]{INT, LONG}) {
Assert.assertTrue(columnDataType.isNumber());
Assert.assertTrue(columnDataType.isWholeNumber());
Assert.assertFalse(columnDataType.isArray());
Assert.assertFalse(columnDataType.isNumberArray());
Assert.assertFalse(columnDataType.isWholeNumberArray());
Assert.assertTrue(columnDataType.isCompatible(DOUBLE));
Assert.assertFalse(columnDataType.isCompatible(STRING));
Assert.assertFalse(columnDataType.isCompatible(DOUBLE_ARRAY));
Assert.assertFalse(columnDataType.isCompatible(STRING_ARRAY));
Assert.assertFalse(columnDataType.isCompatible(BYTES_ARRAY));
}
for (DataSchema.ColumnDataType columnDataType : new DataSchema.ColumnDataType[]{FLOAT, DOUBLE}) {
Assert.assertTrue(columnDataType.isNumber());
Assert.assertFalse(columnDataType.isWholeNumber());
Assert.assertFalse(columnDataType.isArray());
Assert.assertFalse(columnDataType.isNumberArray());
Assert.assertFalse(columnDataType.isWholeNumberArray());
Assert.assertTrue(columnDataType.isCompatible(LONG));
Assert.assertFalse(columnDataType.isCompatible(STRING));
Assert.assertFalse(columnDataType.isCompatible(LONG_ARRAY));
Assert.assertFalse(columnDataType.isCompatible(STRING_ARRAY));
Assert.assertFalse(columnDataType.isCompatible(BYTES_ARRAY));
}
Assert.assertFalse(STRING.isNumber());
Assert.assertFalse(STRING.isWholeNumber());
Assert.assertFalse(STRING.isArray());
Assert.assertFalse(STRING.isNumberArray());
Assert.assertFalse(STRING.isWholeNumberArray());
Assert.assertFalse(STRING.isCompatible(DOUBLE));
Assert.assertTrue(STRING.isCompatible(STRING));
Assert.assertFalse(STRING.isCompatible(DOUBLE_ARRAY));
Assert.assertFalse(STRING.isCompatible(STRING_ARRAY));
Assert.assertFalse(STRING.isCompatible(BYTES_ARRAY));
Assert.assertFalse(OBJECT.isNumber());
Assert.assertFalse(OBJECT.isWholeNumber());
Assert.assertFalse(OBJECT.isArray());
Assert.assertFalse(OBJECT.isNumberArray());
Assert.assertFalse(OBJECT.isWholeNumberArray());
Assert.assertFalse(OBJECT.isCompatible(DOUBLE));
Assert.assertFalse(OBJECT.isCompatible(STRING));
Assert.assertFalse(OBJECT.isCompatible(DOUBLE_ARRAY));
Assert.assertFalse(OBJECT.isCompatible(STRING_ARRAY));
Assert.assertFalse(OBJECT.isCompatible(BYTES_ARRAY));
Assert.assertTrue(OBJECT.isCompatible(OBJECT));
for (DataSchema.ColumnDataType columnDataType : new DataSchema.ColumnDataType[]{INT_ARRAY, LONG_ARRAY}) {
Assert.assertFalse(columnDataType.isNumber());
Assert.assertFalse(columnDataType.isWholeNumber());
Assert.assertTrue(columnDataType.isArray());
Assert.assertTrue(columnDataType.isNumberArray());
Assert.assertTrue(columnDataType.isWholeNumberArray());
Assert.assertFalse(columnDataType.isCompatible(DOUBLE));
Assert.assertFalse(columnDataType.isCompatible(STRING));
Assert.assertTrue(columnDataType.isCompatible(DOUBLE_ARRAY));
Assert.assertFalse(columnDataType.isCompatible(STRING_ARRAY));
Assert.assertFalse(columnDataType.isCompatible(BYTES_ARRAY));
}
for (DataSchema.ColumnDataType columnDataType : new DataSchema.ColumnDataType[]{FLOAT_ARRAY, DOUBLE_ARRAY}) {
Assert.assertFalse(columnDataType.isNumber());
Assert.assertFalse(columnDataType.isWholeNumber());
Assert.assertTrue(columnDataType.isArray());
Assert.assertTrue(columnDataType.isNumberArray());
Assert.assertFalse(columnDataType.isWholeNumberArray());
Assert.assertFalse(columnDataType.isCompatible(LONG));
Assert.assertFalse(columnDataType.isCompatible(STRING));
Assert.assertTrue(columnDataType.isCompatible(LONG_ARRAY));
Assert.assertFalse(columnDataType.isCompatible(STRING_ARRAY));
Assert.assertFalse(columnDataType.isCompatible(BYTES_ARRAY));
}
for (DataSchema.ColumnDataType columnDataType : new DataSchema.ColumnDataType[]{
STRING_ARRAY, BOOLEAN_ARRAY, TIMESTAMP_ARRAY, BYTES_ARRAY
}) {
Assert.assertFalse(columnDataType.isNumber());
Assert.assertFalse(columnDataType.isWholeNumber());
Assert.assertTrue(columnDataType.isArray());
Assert.assertFalse(columnDataType.isNumberArray());
Assert.assertFalse(columnDataType.isWholeNumberArray());
Assert.assertFalse(columnDataType.isCompatible(DOUBLE));
Assert.assertFalse(columnDataType.isCompatible(STRING));
Assert.assertFalse(columnDataType.isCompatible(DOUBLE_ARRAY));
Assert.assertFalse(columnDataType.isCompatible(INT_ARRAY));
Assert.assertTrue(columnDataType.isCompatible(columnDataType));
}
Assert.assertEquals(fromDataType(FieldSpec.DataType.INT, true), INT);
Assert.assertEquals(fromDataType(FieldSpec.DataType.INT, false), INT_ARRAY);
Assert.assertEquals(fromDataType(FieldSpec.DataType.LONG, true), LONG);
Assert.assertEquals(fromDataType(FieldSpec.DataType.LONG, false), LONG_ARRAY);
Assert.assertEquals(fromDataType(FieldSpec.DataType.FLOAT, true), FLOAT);
Assert.assertEquals(fromDataType(FieldSpec.DataType.FLOAT, false), FLOAT_ARRAY);
Assert.assertEquals(fromDataType(FieldSpec.DataType.DOUBLE, true), DOUBLE);
Assert.assertEquals(fromDataType(FieldSpec.DataType.DOUBLE, false), DOUBLE_ARRAY);
Assert.assertEquals(fromDataType(FieldSpec.DataType.STRING, true), STRING);
Assert.assertEquals(fromDataType(FieldSpec.DataType.STRING, false), STRING_ARRAY);
Assert.assertEquals(fromDataType(FieldSpec.DataType.BOOLEAN, false), BOOLEAN_ARRAY);
Assert.assertEquals(fromDataType(FieldSpec.DataType.TIMESTAMP, false), TIMESTAMP_ARRAY);
Assert.assertEquals(fromDataType(FieldSpec.DataType.BYTES, false), BYTES_ARRAY);
BigDecimal bigDecimalValue = new BigDecimal("1.2345678901234567890123456789");
Assert.assertEquals(BIG_DECIMAL.format(bigDecimalValue), bigDecimalValue.toPlainString());
Timestamp timestampValue = new Timestamp(1234567890123L);
Assert.assertEquals(TIMESTAMP.format(timestampValue), timestampValue.toString());
byte[] bytesValue = {12, 34, 56};
Assert.assertEquals(BYTES.format(bytesValue), BytesUtils.toHexString(bytesValue));
} |
@Override
public String key() {
return "git";
} | @Test
public void sanityCheck() {
assertThat(newGitScmProvider().key()).isEqualTo("git");
} |
public static Configuration windows() {
return WindowsHolder.WINDOWS;
} | @Test
public void testFileSystemForDefaultWindowsConfiguration() throws IOException {
FileSystem fs = Jimfs.newFileSystem(Configuration.windows());
assertThat(fs.getRootDirectories())
.containsExactlyElementsIn(ImmutableList.of(fs.getPath("C:\\")))
.inOrder();
assertThatPath(fs.getPath("").toRealPath()).isEqualTo(fs.getPath("C:\\work"));
assertThat(Iterables.getOnlyElement(fs.getFileStores()).getTotalSpace())
.isEqualTo(4L * 1024 * 1024 * 1024);
assertThat(fs.supportedFileAttributeViews()).containsExactly("basic");
Files.createFile(fs.getPath("C:\\foo"));
try {
Files.createFile(fs.getPath("C:\\FOO"));
fail();
} catch (FileAlreadyExistsException expected) {
}
} |
public static void deleteApplicationFiles(final String applicationFilesDir) {
if (!StringUtils.isNullOrWhitespaceOnly(applicationFilesDir)) {
final org.apache.flink.core.fs.Path path =
new org.apache.flink.core.fs.Path(applicationFilesDir);
try {
final org.apache.flink.core.fs.FileSystem fileSystem = path.getFileSystem();
if (!fileSystem.delete(path, true)) {
LOG.error(
"Deleting yarn application files under {} was unsuccessful.",
applicationFilesDir);
}
} catch (final IOException e) {
LOG.error(
"Could not properly delete yarn application files directory {}.",
applicationFilesDir,
e);
}
} else {
LOG.debug(
"No yarn application files directory set. Therefore, cannot clean up the data.");
}
} | @Test
void testDeleteApplicationFiles(@TempDir Path tempDir) throws Exception {
final Path applicationFilesDir = Files.createTempDirectory(tempDir, ".flink");
Files.createTempFile(applicationFilesDir, "flink", ".jar");
try (Stream<Path> files = Files.list(tempDir)) {
assertThat(files).hasSize(1);
}
try (Stream<Path> files = Files.list(applicationFilesDir)) {
assertThat(files).hasSize(1);
}
Utils.deleteApplicationFiles(applicationFilesDir.toString());
try (Stream<Path> files = Files.list(tempDir.toFile().toPath())) {
assertThat(files).isEmpty();
}
} |
Optional<TextRange> mapRegion(@Nullable Region region, InputFile file) {
if (region == null) {
return Optional.empty();
}
int startLine = Objects.requireNonNull(region.getStartLine(), "No start line defined for the region.");
int endLine = Optional.ofNullable(region.getEndLine()).orElse(startLine);
int startColumn = Optional.ofNullable(region.getStartColumn()).map(RegionMapper::adjustSarifColumnIndexToSqIndex).orElse(0);
int endColumn = Optional.ofNullable(region.getEndColumn()).map(RegionMapper::adjustSarifColumnIndexToSqIndex)
.orElseGet(() -> file.selectLine(endLine).end().lineOffset());
if (rangeIsEmpty(startLine, endLine, startColumn, endColumn)) {
return Optional.of(file.selectLine(startLine));
} else {
return Optional.of(file.newRange(startLine, startColumn, endLine, endColumn));
}
} | @Test
public void mapRegion_whenStartLineIsNull_shouldThrow() {
when(region.getStartLine()).thenReturn(null);
assertThatNullPointerException()
.isThrownBy(() -> regionMapper.mapRegion(region, INPUT_FILE))
.withMessage("No start line defined for the region.");
} |
@JsonIgnore
public long getFirstRestartIterationId() {
if (restartInfo != null && !restartInfo.isEmpty()) {
return restartInfo.stream().min(Long::compare).get();
}
return 0;
} | @Test
public void testGetFirstRestartIterationId() throws Exception {
ForeachStepOverview overview =
loadObject(
"fixtures/instances/sample-foreach-step-overview.json", ForeachStepOverview.class);
assertEquals(0, overview.getFirstRestartIterationId());
overview.addOne(23L, WorkflowInstance.Status.FAILED, null);
overview.addOne(123L, WorkflowInstance.Status.FAILED, null);
overview.refreshDetail();
overview.updateForRestart(
23L, WorkflowInstance.Status.CREATED, WorkflowInstance.Status.FAILED, null);
overview.updateForRestart(
123L, WorkflowInstance.Status.CREATED, WorkflowInstance.Status.FAILED, null);
assertEquals(23L, overview.getFirstRestartIterationId());
} |
public ElasticAgentInformation getElasticAgentInformationFromResponseBody(String responseBody) {
final ElasticAgentInformationDTO elasticAgentInformationDTO = FORCED_EXPOSE_GSON.fromJson(responseBody, ElasticAgentInformationDTO.class);
return elasticAgentInformationConverterV5.fromDTO(elasticAgentInformationDTO);
} | @Test
public void shouldGetTheElasticAgentInformationFromResponseBodyOfMigrateCall() throws CryptoException {
String responseBody = "{" +
" \"plugin_settings\":{" +
" \"key2\":\"password\", " +
" \"key\":\"value\"" +
" }," +
" \"cluster_profiles\":[" +
" {" +
" \"id\":\"cluster_profile_id\"," +
" \"plugin_id\":\"plugin_id\"," +
" \"properties\":{" +
" \"some_key\":\"some_value\", " +
" \"some_key2\":\"some_value2\"" +
" }" +
" }" +
" ]," +
" \"elastic_agent_profiles\":[" +
" {" +
" \"id\":\"profile_id\"," +
" \"plugin_id\":\"plugin_id\"," +
" \"cluster_profile_id\":\"cluster_profile_id\"," +
" \"properties\":{" +
" \"some_key\":\"some_value\"," +
" \"some_key2\":\"some_value2\"" +
" }" +
" }" +
" ]" +
"}\n";
ElasticAgentMetadataStore store = ElasticAgentMetadataStore.instance();
PluggableInstanceSettings elasticAgentProfileSettings = new PluggableInstanceSettings(List.of(new PluginConfiguration("some_key", new Metadata(true, true))));
PluggableInstanceSettings clusterProfileSettings = new PluggableInstanceSettings(List.of(new PluginConfiguration("some_key2", new Metadata(true, true))));
store.setPluginInfo(new ElasticAgentPluginInfo(pluginDescriptor("plugin_id"), elasticAgentProfileSettings, clusterProfileSettings, null, null, null));
ElasticAgentInformation elasticAgentInformation = new ElasticAgentExtensionConverterV5().getElasticAgentInformationFromResponseBody(responseBody);
ConfigurationProperty property1 = new ConfigurationProperty(new ConfigurationKey("key"), new ConfigurationValue("value"));
ConfigurationProperty property2 = new ConfigurationProperty(new ConfigurationKey("key2"), new EncryptedConfigurationValue(new GoCipher().encrypt("password")));
Configuration configuration = new Configuration();
configuration.add(property1);
configuration.add(property2);
Map<String, String> pluginSettings = configuration.getConfigurationAsMap(true);
List<ClusterProfile> clusterProfiles = new ArrayList<>();
clusterProfiles.add(new ClusterProfile("cluster_profile_id", "plugin_id", new ConfigurationProperty(new ConfigurationKey("some_key"), new ConfigurationValue("some_value")), new ConfigurationProperty(new ConfigurationKey("some_key2"), new EncryptedConfigurationValue(new GoCipher().encrypt("some_value2")))));
List<ElasticProfile> elasticAgentProfiles = new ArrayList<>();
//do not worry about encryption, it is handled during config save (migrate-config) call
elasticAgentProfiles.add(new ElasticProfile("profile_id", "cluster_profile_id", new ConfigurationProperty(new ConfigurationKey("some_key"), new ConfigurationValue("some_value")), new ConfigurationProperty(new ConfigurationKey("some_key2"), new ConfigurationValue("some_value2"))));
ElasticAgentInformation expectedElasticAgentInformation = new ElasticAgentInformation(pluginSettings, clusterProfiles, elasticAgentProfiles);
assertThat(elasticAgentInformation, is(expectedElasticAgentInformation));
} |
public HsDataView registerNewConsumer(
int subpartitionId,
HsConsumerId consumerId,
HsSubpartitionConsumerInternalOperations operation)
throws IOException {
synchronized (lock) {
checkState(!isReleased, "HsFileDataManager is already released.");
lazyInitialize();
HsSubpartitionFileReader subpartitionReader =
fileReaderFactory.createFileReader(
subpartitionId,
consumerId,
dataFileChannel,
operation,
dataIndex,
hybridShuffleConfiguration.getMaxBuffersReadAhead(),
this::releaseSubpartitionReader,
headerBuf);
allReaders.add(subpartitionReader);
mayTriggerReading();
return subpartitionReader;
}
} | @Test
void testRunReadBuffersThrowException() throws Exception {
TestingHsSubpartitionFileReader reader = new TestingHsSubpartitionFileReader();
CompletableFuture<Throwable> cause = new CompletableFuture<>();
reader.setFailConsumer((cause::complete));
reader.setReadBuffersConsumer(
(requestedBuffers, readBuffers) -> {
throw new IOException("expected exception.");
});
factory.allReaders.add(reader);
fileDataManager.registerNewConsumer(0, DEFAULT, subpartitionViewOperation);
ioExecutor.trigger();
assertThat(cause).isCompleted();
assertThat(cause.get())
.isInstanceOf(IOException.class)
.hasMessageContaining("expected exception.");
} |
private RemotingCommand getMaxOffset(ChannelHandlerContext ctx,
RemotingCommand request) throws RemotingCommandException {
final RemotingCommand response = RemotingCommand.createResponseCommand(GetMaxOffsetResponseHeader.class);
final GetMaxOffsetResponseHeader responseHeader = (GetMaxOffsetResponseHeader) response.readCustomHeader();
final GetMaxOffsetRequestHeader requestHeader =
(GetMaxOffsetRequestHeader) request.decodeCommandCustomHeader(GetMaxOffsetRequestHeader.class);
TopicQueueMappingContext mappingContext = this.brokerController.getTopicQueueMappingManager().buildTopicQueueMappingContext(requestHeader);
RemotingCommand rewriteResult = rewriteRequestForStaticTopic(requestHeader, mappingContext);
if (rewriteResult != null) {
return rewriteResult;
}
long offset = this.brokerController.getMessageStore().getMaxOffsetInQueue(requestHeader.getTopic(), requestHeader.getQueueId());
responseHeader.setOffset(offset);
response.setCode(ResponseCode.SUCCESS);
response.setRemark(null);
return response;
} | @Test
public void testGetMaxOffset() throws Exception {
messageStore = mock(MessageStore.class);
when(messageStore.getMaxOffsetInQueue(anyString(), anyInt())).thenReturn(Long.MIN_VALUE);
when(brokerController.getMessageStore()).thenReturn(messageStore);
GetMaxOffsetRequestHeader getMaxOffsetRequestHeader = new GetMaxOffsetRequestHeader();
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_MAX_OFFSET, getMaxOffsetRequestHeader);
request.addExtField("topic", "topic");
request.addExtField("queueId", "0");
RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
} |
@Override
public String getName() {
return FUNCTION_NAME;
} | @Test
public void testTruncateDecimalTransformFunction() {
ExpressionContext expression =
RequestContextUtils.getExpression(String.format("truncate(%s,%s)", INT_SV_COLUMN, LONG_SV_COLUMN));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof TruncateDecimalTransformFunction);
Assert.assertEquals(transformFunction.getName(), TruncateDecimalTransformFunction.FUNCTION_NAME);
double[] expectedValues = new double[NUM_ROWS];
expression = RequestContextUtils.getExpression(String.format("truncate(%s,2)", DOUBLE_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof TruncateDecimalTransformFunction);
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = truncate(_doubleSVValues[i], 2);
}
testTransformFunction(transformFunction, expectedValues);
expression = RequestContextUtils.getExpression(String.format("truncate(%s, -2)", DOUBLE_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof TruncateDecimalTransformFunction);
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = truncate(_doubleSVValues[i], -2);
}
testTransformFunction(transformFunction, expectedValues);
expression = RequestContextUtils.getExpression(String.format("truncate(%s)", DOUBLE_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof TruncateDecimalTransformFunction);
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = truncate(_doubleSVValues[i], 0);
}
testTransformFunction(transformFunction, expectedValues);
} |
@Description("Returns the number of points in a Geometry")
@ScalarFunction("ST_NumPoints")
@SqlType(BIGINT)
public static long stNumPoints(@SqlType(GEOMETRY_TYPE_NAME) Slice input)
{
return getPointCount(EsriGeometrySerde.deserialize(input));
} | @Test
public void testSTNumPoints()
{
assertNumPoints("POINT EMPTY", 0);
assertNumPoints("MULTIPOINT EMPTY", 0);
assertNumPoints("LINESTRING EMPTY", 0);
assertNumPoints("MULTILINESTRING EMPTY", 0);
assertNumPoints("POLYGON EMPTY", 0);
assertNumPoints("MULTIPOLYGON EMPTY", 0);
assertNumPoints("GEOMETRYCOLLECTION EMPTY", 0);
assertNumPoints("POINT (1 2)", 1);
assertNumPoints("MULTIPOINT (1 2, 2 4, 3 6, 4 8)", 4);
assertNumPoints("LINESTRING (8 4, 5 7)", 2);
assertNumPoints("MULTILINESTRING ((1 1, 5 1), (2 4, 4 4))", 4);
assertNumPoints("POLYGON ((0 0, 8 0, 0 8, 0 0), (1 1, 1 5, 5 1, 1 1))", 6);
assertNumPoints("MULTIPOLYGON (((1 1, 1 3, 3 3, 3 1, 1 1)), ((2 4, 2 6, 6 6, 6 4, 2 4)))", 8);
assertNumPoints("GEOMETRYCOLLECTION (POINT (1 2), LINESTRING (8 4, 5 7), POLYGON EMPTY)", 3);
} |
@Override
public RemotingCommand processRequest(final ChannelHandlerContext ctx,
RemotingCommand request) throws RemotingCommandException {
return this.processRequest(ctx.channel(), request, true);
} | @Test
public void testSingleAck_appendAck() throws RemotingCommandException {
{
// buffer addAk OK
PopBufferMergeService popBufferMergeService = mock(PopBufferMergeService.class);
when(popBufferMergeService.addAk(anyInt(), any())).thenReturn(true);
when(popMessageProcessor.getPopBufferMergeService()).thenReturn(popBufferMergeService);
AckMessageRequestHeader requestHeader = new AckMessageRequestHeader();
long ackOffset = MIN_OFFSET_IN_QUEUE + 10;
requestHeader.setTopic(topic);
requestHeader.setQueueId(0);
requestHeader.setOffset(ackOffset);
requestHeader.setConsumerGroup(MixAll.DEFAULT_CONSUMER_GROUP);
requestHeader.setExtraInfo("64 1666860736757 60000 4 0 broker-a 0 " + ackOffset);
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.ACK_MESSAGE, requestHeader);
request.makeCustomHeaderToNet();
RemotingCommand response = ackMessageProcessor.processRequest(handlerContext, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
{
// buffer addAk fail
PopBufferMergeService popBufferMergeService = mock(PopBufferMergeService.class);
when(popBufferMergeService.addAk(anyInt(), any())).thenReturn(false);
when(popMessageProcessor.getPopBufferMergeService()).thenReturn(popBufferMergeService);
// store putMessage OK
PutMessageResult putMessageResult = new PutMessageResult(PutMessageStatus.PUT_OK, null);
when(messageStore.putMessage(any())).thenReturn(putMessageResult);
AckMessageRequestHeader requestHeader = new AckMessageRequestHeader();
long ackOffset = MIN_OFFSET_IN_QUEUE + 10;
requestHeader.setTopic(topic);
requestHeader.setQueueId(0);
requestHeader.setOffset(ackOffset);
requestHeader.setConsumerGroup(MixAll.DEFAULT_CONSUMER_GROUP);
requestHeader.setExtraInfo("64 1666860736757 60000 4 0 broker-a 0 " + ackOffset);
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.ACK_MESSAGE, requestHeader);
request.makeCustomHeaderToNet();
RemotingCommand response = ackMessageProcessor.processRequest(handlerContext, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
}
} |
public BigDecimal calculateProductGramsForRequiredFiller(Filler filler, BigDecimal fillerGrams) {
if (filler == null || fillerGrams == null || fillerGrams.doubleValue() <= 0) {
return BigDecimal.valueOf(0);
}
if (filler.equals(Filler.PROTEIN)) {
return calculateProductGramsForRequiredProteins(fillerGrams).setScale(0, RoundingMode.HALF_DOWN);
} else if (filler.equals(Filler.CARBOHYDRATE)) {
return calculateProductGramsForRequiredCarbohydrates(fillerGrams).setScale(0, RoundingMode.HALF_DOWN);
} else if (filler.equals(Filler.FAT)) {
return calculateProductGramsForRequiredFats(fillerGrams).setScale(0, RoundingMode.HALF_DOWN);
}
return BigDecimal.valueOf(0);
} | @Test
void calculateProductGramsForRequiredFiller_negativeValue() {
BigDecimal result = product.calculateProductGramsForRequiredFiller(Filler.CARBOHYDRATE, BigDecimal.valueOf(-999));
assertEquals(BigDecimal.valueOf(0), result);
} |
public double currentConsumptionRate() {
return aggregateStat(ConsumerCollector.CONSUMER_MESSAGES_PER_SEC, false);
} | @Test
public void shouldAggregateStatsAcrossAllConsumers() {
final MetricCollectors metricCollectors = new MetricCollectors();
final ConsumerCollector collector1 = new ConsumerCollector();
collector1.configure(
ImmutableMap.of(
ConsumerConfig.CLIENT_ID_CONFIG, "client1",
KsqlConfig.KSQL_INTERNAL_METRIC_COLLECTORS_CONFIG, metricCollectors
)
);
final ConsumerCollector collector2 = new ConsumerCollector();
collector2.configure(
ImmutableMap.of(
ConsumerConfig.CLIENT_ID_CONFIG, "client2",
KsqlConfig.KSQL_INTERNAL_METRIC_COLLECTORS_CONFIG, metricCollectors
)
);
final Map<TopicPartition, List<ConsumerRecord<Object, Object>>> records = new HashMap<>();
final List<ConsumerRecord<Object, Object>> recordList = new ArrayList<>();
for (int i = 0; i < 500; i++) {
recordList.add(
new ConsumerRecord<>(
TEST_TOPIC,
1,
1,
1L,
TimestampType
.CREATE_TIME,
10,
10,
"key",
"1234567890",
new RecordHeaders(),
Optional.empty()
)
);
}
records.put(new TopicPartition(TEST_TOPIC, 1), recordList);
final ConsumerRecords<Object, Object> consumerRecords = new ConsumerRecords<>(records);
collector1.onConsume(consumerRecords);
collector2.onConsume(consumerRecords);
// Same as the above test, the kafka `Rate` measurable stat reports the rate as a tenth
// of what it should be because all the samples haven't been filled out yet.
assertEquals(10, Math.floor(metricCollectors.currentConsumptionRate()), 0);
} |
@Override
public PageResult<OAuth2ClientDO> getOAuth2ClientPage(OAuth2ClientPageReqVO pageReqVO) {
return oauth2ClientMapper.selectPage(pageReqVO);
} | @Test
public void testGetOAuth2ClientPage() {
// mock 数据
OAuth2ClientDO dbOAuth2Client = randomPojo(OAuth2ClientDO.class, o -> { // 等会查询到
o.setName("潜龙");
o.setStatus(CommonStatusEnum.ENABLE.getStatus());
});
oauth2ClientMapper.insert(dbOAuth2Client);
// 测试 name 不匹配
oauth2ClientMapper.insert(cloneIgnoreId(dbOAuth2Client, o -> o.setName("凤凰")));
// 测试 status 不匹配
oauth2ClientMapper.insert(cloneIgnoreId(dbOAuth2Client, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())));
// 准备参数
OAuth2ClientPageReqVO reqVO = new OAuth2ClientPageReqVO();
reqVO.setName("龙");
reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus());
// 调用
PageResult<OAuth2ClientDO> pageResult = oauth2ClientService.getOAuth2ClientPage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbOAuth2Client, pageResult.getList().get(0));
} |
public static RowCoder of(Schema schema) {
return new RowCoder(schema);
} | @Test
public void testEncodingPositionAddNewFields() throws Exception {
Schema schema1 =
Schema.builder()
.addNullableField("f_int32", FieldType.INT32)
.addNullableField("f_string", FieldType.STRING)
.build();
Schema schema2 =
Schema.builder()
.addNullableField("f_int32", FieldType.INT32)
.addNullableField("f_string", FieldType.STRING)
.addNullableField("f_boolean", FieldType.BOOLEAN)
.build();
Row row =
Row.withSchema(schema1)
.withFieldValue("f_int32", 42)
.withFieldValue("f_string", "hello world!")
.build();
Row expected =
Row.withSchema(schema2)
.withFieldValue("f_int32", 42)
.withFieldValue("f_string", "hello world!")
.build();
ByteArrayOutputStream os = new ByteArrayOutputStream();
RowCoder.of(schema1).encode(row, os);
Row decoded = RowCoder.of(schema2).decode(new ByteArrayInputStream(os.toByteArray()));
assertEquals(expected, decoded);
} |
@Override
public List<SQLPrimaryKey> getPrimaryKeys(String catName, String db_name, String tbl_name) throws MetaException {
PrimaryKeysRequest request = new PrimaryKeysRequest(db_name, tbl_name);
request.setCatName(catName);
return getPrimaryKeys(request);
} | @Test
public void testGetPrimaryKeys() throws Exception {
Database db1 =
new DatabaseBuilder().setName(DB1).setDescription("description")
.setLocation("locationurl").build(conf);
objectStore.createDatabase(db1);
StorageDescriptor sd1 = new StorageDescriptor(
ImmutableList.of(new FieldSchema("pk_col", "double", null)), "location",
null, null, false, 0,
new SerDeInfo("SerDeName", "serializationLib", null), null, null, null);
HashMap<String, String> params = new HashMap<>();
params.put("EXTERNAL", "false");
Table tbl1 =
new Table(TABLE1, DB1, "owner", 1, 2, 3, sd1, null, params, null, null,
"MANAGED_TABLE");
objectStore.createTable(tbl1);
SQLPrimaryKey pk =
new SQLPrimaryKey(DB1, TABLE1, "pk_col", 1, "pk_const_1", false, false,
false);
pk.setCatName(DEFAULT_CATALOG_NAME);
objectStore.addPrimaryKeys(ImmutableList.of(pk));
// Primary key retrieval should be success, even if db_name isn't specified.
assertEquals("pk_col",
objectStore.getPrimaryKeys(DEFAULT_CATALOG_NAME, null, TABLE1).get(0)
.getColumn_name());
objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, TABLE1);
objectStore.dropDatabase(db1.getCatalogName(), DB1);
} |
@Override
public synchronized Multimap<String, String> findBundlesForUnloading(final LoadData loadData,
final ServiceConfiguration conf) {
selectedBundlesCache.clear();
final double threshold = conf.getLoadBalancerBrokerThresholdShedderPercentage() / 100.0;
final Map<String, Long> recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles();
final double minThroughputThreshold = conf.getLoadBalancerBundleUnloadMinThroughputThreshold() * MB;
final double avgUsage = getBrokerAvgUsage(loadData, conf.getLoadBalancerHistoryResourcePercentage(), conf);
if (avgUsage == 0) {
log.warn("average max resource usage is 0");
return selectedBundlesCache;
}
loadData.getBrokerData().forEach((broker, brokerData) -> {
final LocalBrokerData localData = brokerData.getLocalData();
final double currentUsage = brokerAvgResourceUsage.getOrDefault(broker, 0.0);
if (currentUsage < avgUsage + threshold) {
if (log.isDebugEnabled()) {
log.debug("[{}] broker is not overloaded, ignoring at this point ({})", broker,
localData.printResourceUsage());
}
return;
}
double percentOfTrafficToOffload =
currentUsage - avgUsage - threshold + ADDITIONAL_THRESHOLD_PERCENT_MARGIN;
double brokerCurrentThroughput = localData.getMsgThroughputIn() + localData.getMsgThroughputOut();
double minimumThroughputToOffload = brokerCurrentThroughput * percentOfTrafficToOffload;
if (minimumThroughputToOffload < minThroughputThreshold) {
if (log.isDebugEnabled()) {
log.debug("[{}] broker is planning to shed throughput {} MByte/s less than "
+ "minimumThroughputThreshold {} MByte/s, skipping bundle unload ({})",
broker, minimumThroughputToOffload / MB, minThroughputThreshold / MB,
localData.printResourceUsage());
}
return;
}
log.info(
"Attempting to shed load on {}, which has max resource usage above avgUsage and threshold {}%"
+ " > {}% + {}% -- Offloading at least {} MByte/s of traffic,"
+ " left throughput {} MByte/s ({})",
broker, 100 * currentUsage, 100 * avgUsage, 100 * threshold, minimumThroughputToOffload / MB,
(brokerCurrentThroughput - minimumThroughputToOffload) / MB, localData.printResourceUsage());
if (localData.getBundles().size() > 1) {
filterAndSelectBundle(loadData, recentlyUnloadedBundles, broker, localData, minimumThroughputToOffload);
} else if (localData.getBundles().size() == 1) {
log.warn(
"HIGH USAGE WARNING : Sole namespace bundle {} is overloading broker {}. "
+ "No Load Shedding will be done on this broker",
localData.getBundles().iterator().next(), broker);
} else {
log.warn("Broker {} is overloaded despite having no bundles", broker);
}
});
if (selectedBundlesCache.isEmpty() && conf.isLowerBoundarySheddingEnabled()) {
tryLowerBoundaryShedding(loadData, conf);
}
return selectedBundlesCache;
} | @Test
public void testBrokerReachThreshold() {
LoadData loadData = new LoadData();
LocalBrokerData broker1 = new LocalBrokerData();
broker1.setCpu(new ResourceUsage(140, 100));
broker1.setMemory(new ResourceUsage(10, 100));
broker1.setDirectMemory(new ResourceUsage(10, 100));
broker1.setBandwidthIn(new ResourceUsage(500, 1000));
broker1.setBandwidthOut(new ResourceUsage(500, 1000));
broker1.setBundles(Sets.newHashSet("bundle-1", "bundle-2"));
broker1.setMsgThroughputIn(Double.MAX_VALUE);
LocalBrokerData broker2 = new LocalBrokerData();
broker2.setCpu(new ResourceUsage(10, 100));
broker2.setMemory(new ResourceUsage(10, 100));
broker2.setDirectMemory(new ResourceUsage(10, 100));
broker2.setBandwidthIn(new ResourceUsage(500, 1000));
broker2.setBandwidthOut(new ResourceUsage(500, 1000));
broker2.setBundles(Sets.newHashSet("bundle-3", "bundle-4"));
BundleData bundleData = new BundleData();
TimeAverageMessageData timeAverageMessageData = new TimeAverageMessageData();
timeAverageMessageData.setMsgThroughputIn(1000);
timeAverageMessageData.setMsgThroughputOut(1000);
bundleData.setShortTermData(timeAverageMessageData);
loadData.getBundleData().put("bundle-2", bundleData);
loadData.getBrokerData().put("broker-2", new BrokerData(broker1));
loadData.getBrokerData().put("broker-3", new BrokerData(broker2));
assertFalse(thresholdShedder.findBundlesForUnloading(loadData, conf).isEmpty());
} |
public boolean isMatch(Map<String, Pattern> patterns) {
if (!patterns.isEmpty()) {
return matchPatterns(patterns);
}
// Empty pattern is still considered as a match.
return true;
} | @Test
public void testIsMatchMismatchFail() throws UnknownHostException {
Uuid uuid = Uuid.randomUuid();
ClientMetricsInstanceMetadata instanceMetadata = new ClientMetricsInstanceMetadata(uuid,
ClientMetricsTestUtils.requestContext());
Map<String, Pattern> patternMap = new HashMap<>();
patternMap.put(ClientMetricsConfigs.CLIENT_INSTANCE_ID, Pattern.compile(uuid.toString()));
patternMap.put(ClientMetricsConfigs.CLIENT_SOFTWARE_NAME, Pattern.compile("apache-kafka-.*"));
patternMap.put(ClientMetricsConfigs.CLIENT_SOFTWARE_VERSION, Pattern.compile("3.5.2"));
patternMap.put(ClientMetricsConfigs.CLIENT_SOURCE_ADDRESS, Pattern.compile(InetAddress.getLocalHost().getHostAddress()));
// Client id is different.
patternMap.put(ClientMetricsConfigs.CLIENT_ID, Pattern.compile("producer-2"));
assertFalse(instanceMetadata.isMatch(patternMap));
// Client instance id is different.
patternMap.put(ClientMetricsConfigs.CLIENT_ID, Pattern.compile("producer-1"));
patternMap.put(ClientMetricsConfigs.CLIENT_INSTANCE_ID, Pattern.compile(uuid + "random"));
assertFalse(instanceMetadata.isMatch(patternMap));
// Software name is different.
patternMap.put(ClientMetricsConfigs.CLIENT_INSTANCE_ID, Pattern.compile(uuid.toString()));
patternMap.put(ClientMetricsConfigs.CLIENT_SOFTWARE_NAME, Pattern.compile("apache-kafka-java-1"));
assertFalse(instanceMetadata.isMatch(patternMap));
// Software version is different.
patternMap.put(ClientMetricsConfigs.CLIENT_SOFTWARE_NAME, Pattern.compile("apache-kafka-java"));
patternMap.put(ClientMetricsConfigs.CLIENT_SOFTWARE_VERSION, Pattern.compile("3.5.x"));
assertFalse(instanceMetadata.isMatch(patternMap));
// Source address is different.
patternMap.put(ClientMetricsConfigs.CLIENT_SOFTWARE_VERSION, Pattern.compile("3.5.2"));
patternMap.put(ClientMetricsConfigs.CLIENT_SOURCE_ADDRESS, Pattern.compile("1.2.3.4"));
assertFalse(instanceMetadata.isMatch(patternMap));
// Source port is different.
patternMap.put(ClientMetricsConfigs.CLIENT_SOURCE_ADDRESS, Pattern.compile(InetAddress.getLocalHost().getHostAddress()));
patternMap.put(ClientMetricsConfigs.CLIENT_SOURCE_PORT, Pattern.compile("8080"));
assertFalse(instanceMetadata.isMatch(patternMap));
} |
@NonNull
public URI callback(@NonNull CallbackRequest request) {
var session = mustFindSession(request.sessionId());
var idToken =
session
.trustedSectoralIdpStep()
.exchangeSectoralIdpCode(request.code(), session.codeVerifier());
session = removeSession(request.sessionId());
if (session == null) {
throw new ValidationException(new Message("error.invalidSession"));
}
var issued = tokenIssuer.issueCode(session, idToken);
return UriBuilder.fromUri(session.redirectUri())
.queryParam("code", issued.code())
.queryParam("state", session.state())
.build();
} | @Test
void callback_unknownSession() {
var config = new RelyingPartyConfig(null, null);
var sessionRepo = mock(SessionRepo.class);
var sut = new AuthService(BASE_URI, config, null, sessionRepo, null, null);
var sessionId = UUID.randomUUID().toString();
when(sessionRepo.load(sessionId)).thenReturn(null);
var req = new CallbackRequest(sessionId, null);
// when & then
assertThrows(ValidationException.class, () -> sut.callback(req));
} |
@Override
public BuiltInScalarFunctionImplementation specialize(BoundVariables boundVariables, int arity, FunctionAndTypeManager functionAndTypeManager)
{
ImmutableList.Builder<ScalarFunctionImplementationChoice> implementationChoices = ImmutableList.builder();
for (PolymorphicScalarFunctionChoice choice : choices) {
implementationChoices.add(getScalarFunctionImplementationChoice(boundVariables, functionAndTypeManager, choice));
}
return new BuiltInScalarFunctionImplementation(implementationChoices.build());
} | @Test
public void testSelectsMultipleChoiceWithBlockPosition()
throws Throwable
{
Signature signature = SignatureBuilder.builder()
.kind(SCALAR)
.operatorType(IS_DISTINCT_FROM)
.argumentTypes(DECIMAL_SIGNATURE, DECIMAL_SIGNATURE)
.returnType(parseTypeSignature(BOOLEAN))
.build();
SqlScalarFunction function = SqlScalarFunction.builder(TestMethods.class, IS_DISTINCT_FROM)
.signature(signature)
.deterministic(true)
.choice(choice -> choice
.argumentProperties(
valueTypeArgumentProperty(USE_NULL_FLAG),
valueTypeArgumentProperty(USE_NULL_FLAG))
.implementation(methodsGroup -> methodsGroup
.methods("shortShort", "longLong")))
.choice(choice -> choice
.argumentProperties(
valueTypeArgumentProperty(BLOCK_AND_POSITION),
valueTypeArgumentProperty(BLOCK_AND_POSITION))
.implementation(methodsGroup -> methodsGroup
.methodWithExplicitJavaTypes("blockPositionLongLong",
asList(Optional.of(Slice.class), Optional.of(Slice.class)))
.methodWithExplicitJavaTypes("blockPositionShortShort",
asList(Optional.of(long.class), Optional.of(long.class)))))
.build();
BuiltInScalarFunctionImplementation functionImplementation = function.specialize(SHORT_DECIMAL_BOUND_VARIABLES, 2, FUNCTION_AND_TYPE_MANAGER);
assertEquals(functionImplementation.getAllChoices().size(), 2);
assertEquals(functionImplementation.getAllChoices().get(0).getArgumentProperties(), Collections.nCopies(2, valueTypeArgumentProperty(USE_NULL_FLAG)));
assertEquals(functionImplementation.getAllChoices().get(1).getArgumentProperties(), Collections.nCopies(2, valueTypeArgumentProperty(BLOCK_AND_POSITION)));
Block block1 = new LongArrayBlock(0, Optional.empty(), new long[0]);
Block block2 = new LongArrayBlock(0, Optional.empty(), new long[0]);
assertFalse((boolean) functionImplementation.getAllChoices().get(1).getMethodHandle().invoke(block1, 0, block2, 0));
functionImplementation = function.specialize(LONG_DECIMAL_BOUND_VARIABLES, 2, FUNCTION_AND_TYPE_MANAGER);
assertTrue((boolean) functionImplementation.getAllChoices().get(1).getMethodHandle().invoke(block1, 0, block2, 0));
} |
public static List<Class<?>> findEntityClassesFromDirectory(String[] pckgs) {
@SuppressWarnings("unchecked")
final AnnotationAcceptingListener asl = new AnnotationAcceptingListener(Entity.class);
try (final PackageNamesScanner scanner = new PackageNamesScanner(pckgs, true)) {
while (scanner.hasNext()) {
final String next = scanner.next();
if (asl.accept(next)) {
try (final InputStream in = scanner.open()) {
asl.process(next, in);
} catch (IOException e) {
throw new RuntimeException("AnnotationAcceptingListener failed to process scanned resource: " + next);
}
}
}
}
return new ArrayList<>(asl.getAnnotatedClasses());
} | @Test
void testFindEntityClassesFromMultipleDirectories() {
//given
String packageWithEntities = "io.dropwizard.hibernate.fake.entities.pckg";
String packageWithEntities2 = "io.dropwizard.hibernate.fake2.entities.pckg";
//when
List<Class<?>> findEntityClassesFromDirectory =
ScanningHibernateBundle.findEntityClassesFromDirectory(new String[]{packageWithEntities, packageWithEntities2});
//then
assertFalse(findEntityClassesFromDirectory.isEmpty());
assertEquals(8, findEntityClassesFromDirectory.size());
} |
public static String toJagexName(String str)
{
return CharMatcher.ascii().retainFrom(str.replaceAll("[\u00A0_-]", " ")).trim();
} | @Test
public void toJagexName()
{
assertEquals("lab rat", Text.toJagexName("lab rat"));
assertEquals("lab rat", Text.toJagexName("-lab_rat"));
assertEquals("lab rat", Text.toJagexName(" lab-rat__"));
assertEquals("lab rat", Text.toJagexName("lab\u00A0rat\u00A0\u00A0"));
assertEquals("Test Man", Text.toJagexName("蹔Test\u00A0蹔Man"));
assertEquals("Test Boy", Text.toJagexName(" Te⓲st\u00A0B⓲oy⓲ "));
assertEquals("mR nAmE", Text.toJagexName("mR nAmE"));
assertEquals("mR nAmE", Text.toJagexName("mR__nAmE"));
assertEquals("mR nAmE", Text.toJagexName("mR--nAmE"));
assertEquals("mR nAmE", Text.toJagexName("-_ mR\u00A0-nAmE _-"));
assertEquals("mR nAmE", Text.toJagexName("--__--mR_-nAmE__ --"));
assertEquals("Mind the gap", Text.toJagexName("Mind_-_-the-- __gap"));
} |
public void setBaseResource(Resource baseResource) {
handler.setBaseResource(baseResource);
} | @Test
void setsBaseResourceList(@TempDir Path tempDir) throws Exception {
Resource wooResource = Resource.newResource(Files.createDirectory(tempDir.resolve("dir-1")));
Resource fooResource = Resource.newResource(Files.createDirectory(tempDir.resolve("dir-2")));
final Resource[] testResources = new Resource[]{wooResource, fooResource};
environment.setBaseResource(testResources);
assertThat(handler.getBaseResource()).isExactlyInstanceOf(ResourceCollection.class);
assertThat(((ResourceCollection) handler.getBaseResource()).getResources()).contains(wooResource, fooResource);
} |
@Override
public Path move(final Path source, final Path target, final TransferStatus status, final Delete.Callback callback,
final ConnectionCallback connectionCallback) throws BackgroundException {
if(containerService.isContainer(source)) {
if(new SimplePathPredicate(source.getParent()).test(target.getParent())) {
// Rename only
return proxy.move(source, target, status, callback, connectionCallback);
}
}
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(source) ^ new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(target))) {
// Moving into or from an encrypted room
final Copy copy = new SDSDelegatingCopyFeature(session, nodeid, new SDSCopyFeature(session, nodeid));
if(log.isDebugEnabled()) {
log.debug(String.format("Move %s to %s using copy feature %s", source, target, copy));
}
final Path c = copy.copy(source, target, status, connectionCallback, new DisabledStreamListener());
// Delete source file after copy is complete
final Delete delete = new SDSDeleteFeature(session, nodeid);
if(delete.isSupported(source)) {
log.warn(String.format("Delete source %s copied to %s", source, target));
delete.delete(Collections.singletonMap(source, status), connectionCallback, callback);
}
return c;
}
else {
return proxy.move(source, target, status, callback, connectionCallback);
}
} | @Test
public void testMoveDataRoom() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final String directoryname = new AlphanumericRandomStringService().random();
final Path test = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(directoryname, EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path target = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume));
new SDSMoveFeature(session, nodeid).move(test, target, new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback());
assertEquals(0, session.getMetrics().get(Copy.class));
assertFalse(new SDSFindFeature(session, nodeid).find(new Path(directoryname, EnumSet.of(Path.Type.directory, Path.Type.volume))));
assertTrue(new SDSFindFeature(session, nodeid).find(target));
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(target), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public static Map<String, Object> flatten(Map<String, Object> originalMap, String parentKey, String separator) {
final Map<String, Object> result = new HashMap<>();
for (Map.Entry<String, Object> entry : originalMap.entrySet()) {
final String key = parentKey.isEmpty() ? entry.getKey() : parentKey + separator + entry.getKey();
final Object value = entry.getValue();
if (value instanceof Map) {
@SuppressWarnings("unchecked")
final Map<String, Object> valueMap = (Map<String, Object>) value;
result.putAll(flatten(valueMap, key, separator));
} else {
result.put(key, value);
}
}
return result;
} | @Test
public void flattenAddsParentKeys() throws Exception {
final Map<String, Object> map = ImmutableMap.of(
"map", ImmutableMap.of(
"foo", "bar",
"baz", "qux"));
final Map<String, Object> expected = ImmutableMap.of(
"test_map_foo", "bar",
"test_map_baz", "qux");
assertThat(MapUtils.flatten(map, "test", "_")).isEqualTo(expected);
} |
public long getUpdateTime() {
return updateTime;
} | @Test
public void testGetUpdateTime() {
long lastUpdateTime = replicatedRecord.getUpdateTime();
sleepAtLeastMillis(100);
replicatedRecord.setValue("newValue", 0);
assertTrue("replicatedRecord.getUpdateTime() should return a greater update time",
replicatedRecord.getUpdateTime() > lastUpdateTime);
} |
static String isHostParam(final String given) {
final String hostUri = StringHelper.notEmpty(given, "host");
final Matcher matcher = HOST_PATTERN.matcher(given);
if (!matcher.matches()) {
throw new IllegalArgumentException(
"host must be an absolute URI (e.g. http://api.example.com), given: `" + hostUri + "`");
}
return hostUri;
} | @Test
public void nonUriHostParametersAreNotAllowed() {
assertThrows(IllegalArgumentException.class,
() -> RestOpenApiHelper.isHostParam("carrot"));
} |
public static <T> MasterTriggerRestoreHook<T> wrapHook(
MasterTriggerRestoreHook<T> hook, ClassLoader userClassLoader) {
return new WrappedMasterHook<>(hook, userClassLoader);
} | @Test
void wrapHook() throws Exception {
final String id = "id";
Thread thread = Thread.currentThread();
final ClassLoader originalClassLoader = thread.getContextClassLoader();
final ClassLoader userClassLoader = new URLClassLoader(new URL[0]);
final CompletableFuture<Void> onceRunnableFuture = new CompletableFuture<>();
final Runnable onceRunnable =
() -> {
assertThat(Thread.currentThread().getContextClassLoader())
.isEqualTo(userClassLoader);
assertThat(onceRunnableFuture)
.withFailMessage("The runnable shouldn't be called multiple times.")
.isNotDone();
onceRunnableFuture.complete(null);
};
final CompletableFuture<Void> getIdentifierFuture = new CompletableFuture<>();
final CompletableFuture<Void> closeFuture = new CompletableFuture<>();
final CompletableFuture<Void> restoreCheckpointFuture = new CompletableFuture<>();
final CompletableFuture<Void> createCheckpointDataSerializerFuture =
new CompletableFuture<>();
MasterTriggerRestoreHook<String> hook =
new MasterTriggerRestoreHook<String>() {
@Override
public String getIdentifier() {
assertThat(Thread.currentThread().getContextClassLoader())
.isEqualTo(userClassLoader);
assertThat(getIdentifierFuture)
.withFailMessage("The method shouldn't be called multiple times.")
.isNotDone();
getIdentifierFuture.complete(null);
return id;
}
@Override
public void reset() {
assertThat(Thread.currentThread().getContextClassLoader())
.isEqualTo(userClassLoader);
}
@Override
public void close() {
assertThat(Thread.currentThread().getContextClassLoader())
.isEqualTo(userClassLoader);
assertThat(closeFuture)
.withFailMessage("The method shouldn't be called multiple times.")
.isNotDone();
closeFuture.complete(null);
}
@Nullable
@Override
public CompletableFuture<String> triggerCheckpoint(
long checkpointId, long timestamp, Executor executor) {
assertThat(Thread.currentThread().getContextClassLoader())
.isEqualTo(userClassLoader);
executor.execute(onceRunnable);
return null;
}
@Override
public void restoreCheckpoint(
long checkpointId, @Nullable String checkpointData) {
assertThat(Thread.currentThread().getContextClassLoader())
.isEqualTo(userClassLoader);
assertThat(checkpointId).isZero();
assertThat(checkpointData).isEmpty();
assertThat(restoreCheckpointFuture)
.withFailMessage("The method shouldn't be called multiple times.")
.isNotDone();
restoreCheckpointFuture.complete(null);
}
@Nullable
@Override
public SimpleVersionedSerializer<String> createCheckpointDataSerializer() {
assertThat(Thread.currentThread().getContextClassLoader())
.isEqualTo(userClassLoader);
assertThat(createCheckpointDataSerializerFuture)
.withFailMessage("The method shouldn't be called multiple times.")
.isNotDone();
createCheckpointDataSerializerFuture.complete(null);
return null;
}
};
MasterTriggerRestoreHook<String> wrapped = MasterHooks.wrapHook(hook, userClassLoader);
// verify getIdentifier
wrapped.getIdentifier();
assertThat(getIdentifierFuture).isCompleted();
assertThat(thread.getContextClassLoader()).isEqualTo(originalClassLoader);
// verify triggerCheckpoint and its wrapped executor
TestExecutor testExecutor = new TestExecutor();
wrapped.triggerCheckpoint(0L, 0, testExecutor);
assertThat(thread.getContextClassLoader()).isEqualTo(originalClassLoader);
assertThat(testExecutor.command).isNotNull();
testExecutor.command.run();
assertThat(onceRunnableFuture).isCompleted();
assertThat(thread.getContextClassLoader()).isEqualTo(originalClassLoader);
// verify restoreCheckpoint
wrapped.restoreCheckpoint(0L, "");
assertThat(restoreCheckpointFuture).isCompleted();
assertThat(thread.getContextClassLoader()).isEqualTo(originalClassLoader);
// verify createCheckpointDataSerializer
wrapped.createCheckpointDataSerializer();
assertThat(createCheckpointDataSerializerFuture).isCompleted();
assertThat(thread.getContextClassLoader()).isEqualTo(originalClassLoader);
// verify close
wrapped.close();
assertThat(closeFuture).isCompleted();
assertThat(thread.getContextClassLoader()).isEqualTo(originalClassLoader);
} |
@Override
public MapSettings setProperty(String key, String value) {
return (MapSettings) super.setProperty(key, value);
} | @Test
public void getStringLines_linux() {
Settings settings = new MapSettings();
settings.setProperty("foo", "one\ntwo");
assertThat(settings.getStringLines("foo")).isEqualTo(new String[]{"one", "two"});
settings.setProperty("foo", "one\ntwo\n");
assertThat(settings.getStringLines("foo")).isEqualTo(new String[]{"one", "two"});
} |
public static String formatSql(final AstNode root) {
final StringBuilder builder = new StringBuilder();
new Formatter(builder).process(root, 0);
return StringUtils.stripEnd(builder.toString(), "\n");
} | @Test
public void shouldFormatPauseQuery() {
// Given:
final PauseQuery query = PauseQuery.query(Optional.empty(), new QueryId("FOO"));
// When:
final String formatted = SqlFormatter.formatSql(query);
// Then:
assertThat(formatted, is("PAUSE FOO"));
} |
@Override
public void start() throws Exception {
validateConfiguration(configs, registry.getNames());
} | @Test
void startValidationsShouldSucceedWhenNoHealthChecksConfigured() throws Exception {
// given
List<HealthCheckConfiguration> configs = emptyList();
HealthCheckRegistry registry = new HealthCheckRegistry();
// when
HealthCheckConfigValidator validator = new HealthCheckConfigValidator(configs, registry);
validator.start();
// then
verifyNoInteractions(mockLogAppender);
} |
public EnumSet<RepositoryFilePermission> getPermissionSet() {
return ace.getPermissions();
} | @Test
public void testGetPermissionSet() {
UIRepositoryObjectAcl uiAcl = new UIRepositoryObjectAcl( createObjectAce() );
EnumSet<RepositoryFilePermission> permissions = uiAcl.getPermissionSet();
assertNotNull( permissions );
assertEquals( 1, permissions.size() );
assertTrue( permissions.contains( RepositoryFilePermission.ALL ) );
} |
public static long getDirectoryFilesSize(java.nio.file.Path path, FileVisitOption... options)
throws IOException {
if (path == null) {
return 0L;
}
try (Stream<java.nio.file.Path> pathStream = Files.walk(path, options)) {
return pathStream
.map(java.nio.file.Path::toFile)
.filter(File::isFile)
.mapToLong(File::length)
.sum();
}
} | @Test
void testGetDirectorySize() throws Exception {
final File parent = TempDirUtils.newFolder(temporaryFolder);
// Empty directory should have size 0
assertThat(FileUtils.getDirectoryFilesSize(parent.toPath())).isZero();
// Expected size: (20*5^0 + 20*5^1 + 20*5^2 + 20*5^3) * 1 byte = 3120 bytes
generateRandomDirs(parent, 20, 5, 3);
assertThat(FileUtils.getDirectoryFilesSize(parent.toPath())).isEqualTo(3120);
} |
public Rule<ProjectNode> projectNodeRule()
{
return new PullUpExpressionInLambdaProjectNodeRule();
} | @Test
public void testInvalidNestedLambdaInProjection()
{
tester().assertThat(new PullUpExpressionInLambdaRules(getFunctionManager()).projectNodeRule())
.setSystemProperty(PULL_EXPRESSION_FROM_LAMBDA_ENABLED, "true")
.on(p ->
{
p.variable("expr", new ArrayType(new ArrayType(BIGINT)));
p.variable("arr1", new ArrayType(BIGINT));
p.variable("arr2", new ArrayType(BIGINT));
return p.project(
Assignments.builder().put(p.variable("expr", new ArrayType(new ArrayType(BIGINT))), p.rowExpression("transform(arr1, x->transform(arr2, y->slice(arr2, 1, x)))")).build(),
p.values(p.variable("arr1", new ArrayType(BIGINT)), p.variable("arr2", new ArrayType(BIGINT))));
}).doesNotFire();
} |
@Override
public Object convert(String value) {
if (isNullOrEmpty(value)) {
return value;
}
if (value.contains("=")) {
final Map<String, String> fields = new HashMap<>();
Matcher m = PATTERN.matcher(value);
while (m.find()) {
if (m.groupCount() != 2) {
continue;
}
fields.put(removeQuotes(m.group(1)), removeQuotes(m.group(2)));
}
return fields;
} else {
return Collections.emptyMap();
}
} | @Test
public void testFilterSupportsMultipleIdenticalKeys() {
TokenizerConverter f = new TokenizerConverter(new HashMap<String, Object>());
@SuppressWarnings("unchecked")
Map<String, String> result = (Map<String, String>) f.convert("Ohai I am a message k1=v1 k1=v2 Awesome!");
assertEquals(1, result.size());
assertEquals("v2", result.get("k1"));
} |
public void addEdge(final V u, final V v, final int capacity, final int cost, final int flow) {
Objects.requireNonNull(u);
Objects.requireNonNull(v);
addEdge(u, new Edge(v, capacity, cost, capacity - flow, flow));
} | @Test
public void testNullNode() {
final Graph<Integer> graph1 = new Graph<>();
assertThrows(NullPointerException.class, () -> graph1.addEdge(null, 1, 1, 1, 1));
assertThrows(NullPointerException.class, () -> graph1.addEdge(1, null, 1, 1, 1));
} |
@Override
@MethodNotAvailable
public void loadAll(boolean replaceExistingValues) {
throw new MethodNotAvailableException();
} | @Test(expected = MethodNotAvailableException.class)
public void testLoadAllWithListener() {
adapter.loadAll(Collections.emptySet(), true, null);
} |
@Override
public Iterable<MappingEntry> getMappingEntriesByAppId(Type type, ApplicationId appId) {
Set<MappingEntry> mappingEntries = Sets.newHashSet();
for (Device d : deviceService.getDevices()) {
for (MappingEntry mappingEntry : store.getMappingEntries(type, d.id())) {
if (mappingEntry.appId() == appId.id()) {
mappingEntries.add(mappingEntry);
}
}
}
return mappingEntries;
} | @Test
public void getMappingEntriesByAddId() {
addMapping(MAP_DATABASE, 1);
addMapping(MAP_DATABASE, 2);
assertTrue("should have two mappings",
Lists.newLinkedList(
service.getMappingEntriesByAppId(MAP_DATABASE, appId)).size() == 2);
} |
public BackgroundException map(final IOException failure, final Path directory) {
return super.map("Connection failed", failure, directory);
} | @Test
public void testMapPathName() {
final DefaultIOExceptionMappingService s = new DefaultIOExceptionMappingService();
assertEquals("Download n failed.", s.map("Download {0} failed", new SocketException("s"),
new Path("/n", EnumSet.of(Path.Type.directory, Path.Type.volume))).getMessage());
assertEquals("Download failed n.", s.map("Download failed {0}", new SocketException("s"),
new Path("/n", EnumSet.of(Path.Type.directory, Path.Type.volume))).getMessage());
assertEquals("Download failed (/n).", s.map("Download failed", new SocketException("s"),
new Path("/n", EnumSet.of(Path.Type.directory, Path.Type.volume))).getMessage());
} |
protected List<Long> chooseTablets() {
GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState();
MetaObject chosenOne;
List<Long> chosenTablets = Lists.newArrayList();
// sort dbs
List<Long> dbIds = globalStateMgr.getLocalMetastore().getDbIds();
if (dbIds.isEmpty()) {
return chosenTablets;
}
Queue<MetaObject> dbQueue = new PriorityQueue<>(dbIds.size(), COMPARATOR);
for (Long dbId : dbIds) {
if (dbId == 0L) {
// skip 'information_schema' database
continue;
}
Database db = globalStateMgr.getDb(dbId);
if (db == null) {
continue;
}
dbQueue.add(db);
}
// must lock jobsLock first to obey the lock order rule
this.jobsLock.readLock().lock();
try {
while ((chosenOne = dbQueue.poll()) != null) {
Database db = (Database) chosenOne;
Locker locker = new Locker();
locker.lockDatabase(db, LockType.READ);
long startTime = System.currentTimeMillis();
try {
// sort tables
List<Table> tables = db.getTables();
Queue<MetaObject> tableQueue = new PriorityQueue<>(Math.max(tables.size(), 1), COMPARATOR);
for (Table table : tables) {
// Only check the OLAP table who is in NORMAL state.
// Because some tablets of the not NORMAL table may just a temporary presence in memory,
// if we check those tablets and log FinishConsistencyCheck to bdb,
// it will throw NullPointerException when replaying the log.
if (!table.isOlapTableOrMaterializedView() || ((OlapTable) table).getState() != OlapTableState.NORMAL) {
continue;
}
tableQueue.add(table);
}
while ((chosenOne = tableQueue.poll()) != null) {
OlapTable table = (OlapTable) chosenOne;
// sort partitions
Queue<MetaObject> partitionQueue =
new PriorityQueue<>(Math.max(table.getAllPhysicalPartitions().size(), 1), COMPARATOR);
for (PhysicalPartition partition : table.getPhysicalPartitions()) {
// check partition's replication num. if 1 replication. skip
if (table.getPartitionInfo().getReplicationNum(partition.getParentId()) == (short) 1) {
LOG.debug("partition[{}]'s replication num is 1. ignore", partition.getParentId());
continue;
}
// check if this partition has no data
if (partition.getVisibleVersion() == Partition.PARTITION_INIT_VERSION) {
LOG.debug("partition[{}]'s version is {}. ignore", partition.getId(),
Partition.PARTITION_INIT_VERSION);
continue;
}
if (partition instanceof Partition) {
partitionQueue.add((Partition) partition);
} else if (partition instanceof PhysicalPartitionImpl) {
partitionQueue.add((PhysicalPartitionImpl) partition);
}
}
while ((chosenOne = partitionQueue.poll()) != null) {
PhysicalPartition partition = (PhysicalPartition) chosenOne;
// sort materializedIndices
List<MaterializedIndex> visibleIndexes =
partition.getMaterializedIndices(IndexExtState.VISIBLE);
Queue<MetaObject> indexQueue =
new PriorityQueue<>(Math.max(visibleIndexes.size(), 1), COMPARATOR);
indexQueue.addAll(visibleIndexes);
while ((chosenOne = indexQueue.poll()) != null) {
MaterializedIndex index = (MaterializedIndex) chosenOne;
// sort tablets
Queue<MetaObject> tabletQueue =
new PriorityQueue<>(Math.max(index.getTablets().size(), 1), COMPARATOR);
tabletQueue.addAll(index.getTablets());
while ((chosenOne = tabletQueue.poll()) != null) {
LocalTablet tablet = (LocalTablet) chosenOne;
long chosenTabletId = tablet.getId();
if (this.jobs.containsKey(chosenTabletId)) {
continue;
}
// check if version has already been checked
if (partition.getVisibleVersion() == tablet.getCheckedVersion()) {
if (tablet.isConsistent()) {
LOG.debug("tablet[{}]'s version[{}-{}] has been checked. ignore",
chosenTabletId, tablet.getCheckedVersion(),
partition.getVisibleVersion());
}
} else {
LOG.info("chose tablet[{}-{}-{}-{}-{}] to check consistency", db.getId(),
table.getId(), partition.getId(), index.getId(), chosenTabletId);
chosenTablets.add(chosenTabletId);
}
} // end while tabletQueue
} // end while indexQueue
if (chosenTablets.size() >= MAX_JOB_NUM) {
return chosenTablets;
}
} // end while partitionQueue
} // end while tableQueue
} finally {
// Since only at most `MAX_JOB_NUM` tablet are chosen, we don't need to release the db read lock
// from time to time, just log the time cost here.
LOG.info("choose tablets from db[{}-{}](with read lock held) took {}ms",
db.getFullName(), db.getId(), System.currentTimeMillis() - startTime);
locker.unLockDatabase(db, LockType.READ);
}
} // end while dbQueue
} finally {
jobsLock.readLock().unlock();
}
return chosenTablets;
} | @Test
public void testChooseTablets(@Mocked GlobalStateMgr globalStateMgr) {
long dbId = 1L;
long tableId = 2L;
long partitionId = 3L;
long indexId = 4L;
long tabletId = 5L;
long replicaId = 6L;
long backendId = 7L;
TStorageMedium medium = TStorageMedium.HDD;
MaterializedIndex materializedIndex = new MaterializedIndex(indexId, MaterializedIndex.IndexState.NORMAL);
Replica replica = new Replica(replicaId, backendId, 2L, 1111,
10, 1000, Replica.ReplicaState.NORMAL, -1, 2);
TabletMeta tabletMeta = new TabletMeta(dbId, tableId, partitionId, indexId, 1111, medium);
LocalTablet tablet = new LocalTablet(tabletId, Lists.newArrayList(replica));
materializedIndex.addTablet(tablet, tabletMeta, false);
PartitionInfo partitionInfo = new PartitionInfo();
DataProperty dataProperty = new DataProperty(medium);
partitionInfo.addPartition(partitionId, dataProperty, (short) 3, false);
DistributionInfo distributionInfo = new HashDistributionInfo(1, Lists.newArrayList());
Partition partition = new Partition(partitionId, "partition", materializedIndex, distributionInfo);
partition.setVisibleVersion(2L, System.currentTimeMillis());
OlapTable table = new OlapTable(tableId, "table", Lists.newArrayList(), KeysType.AGG_KEYS, partitionInfo,
distributionInfo);
table.addPartition(partition);
Database database = new Database(dbId, "database");
database.registerTableUnlocked(table);
new Expectations() {
{
GlobalStateMgr.getCurrentState();
result = globalStateMgr;
minTimes = 0;
globalStateMgr.getLocalMetastore().getDbIds();
result = Lists.newArrayList(dbId);
minTimes = 0;
globalStateMgr.getDb(dbId);
result = database;
minTimes = 0;
}
};
Assert.assertEquals(1, new ConsistencyChecker().chooseTablets().size());
// set table state to RESTORE, we will make sure checker will not choose its tablets.
table.setState(OlapTable.OlapTableState.RESTORE);
Assert.assertEquals(0, new ConsistencyChecker().chooseTablets().size());
} |
public List<ChangeStreamRecord> toChangeStreamRecords(
PartitionMetadata partition,
ChangeStreamResultSet resultSet,
ChangeStreamResultSetMetadata resultSetMetadata) {
if (this.isPostgres()) {
// In PostgresQL, change stream records are returned as JsonB.
return Collections.singletonList(
toChangeStreamRecordJson(partition, resultSet.getPgJsonb(0), resultSetMetadata));
}
// In GoogleSQL, change stream records are returned as an array of structs.
return resultSet.getCurrentRowAsStruct().getStructList(0).stream()
.flatMap(struct -> toChangeStreamRecord(partition, struct, resultSetMetadata))
.collect(Collectors.toList());
} | @Test
public void testMappingDeleteJsonRowNewValuesToDataChangeRecord() {
final DataChangeRecord dataChangeRecord =
new DataChangeRecord(
"partitionToken",
Timestamp.ofTimeSecondsAndNanos(10L, 20),
"transactionId",
false,
"1",
"tableName",
Arrays.asList(
new ColumnType("column1", new TypeCode("{\"code\":\"INT64\"}"), true, 1L),
new ColumnType("column2", new TypeCode("{\"code\":\"BYTES\"}"), false, 2L)),
Collections.singletonList(new Mod("{\"column1\":\"value1\"}", null, null)),
ModType.DELETE,
ValueCaptureType.NEW_VALUES,
10L,
2L,
"transactionTag",
true,
null);
final String jsonString = recordToJson(dataChangeRecord, false, false);
assertNotNull(jsonString);
ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class);
when(resultSet.getPgJsonb(0)).thenReturn(jsonString);
assertEquals(
Collections.singletonList(dataChangeRecord),
mapperPostgres.toChangeStreamRecords(partition, resultSet, resultSetMetadata));
} |
@SuppressWarnings({"unchecked", "rawtypes"})
public Collection<DataNode> getDataNodes(final String tableName) {
Collection<DataNode> result = getDataNodesByTableName(tableName);
if (result.isEmpty()) {
return result;
}
for (Entry<ShardingSphereRule, DataNodeBuilder> entry : dataNodeBuilders.entrySet()) {
result = entry.getValue().build(result, entry.getKey());
}
return result;
} | @Test
void assertGetDataNodesForSingleTableWithDataNodeContainedRuleAndDataSourceContainedRule() {
DataNodes dataNodes = new DataNodes(mockShardingSphereRules());
Collection<DataNode> actual = dataNodes.getDataNodes("t_single");
assertThat(actual.size(), is(3));
Iterator<DataNode> iterator = actual.iterator();
DataNode firstDataNode = iterator.next();
assertThat(firstDataNode.getDataSourceName(), is("primary_ds"));
assertThat(firstDataNode.getTableName(), is("t_single"));
DataNode secondDataNode = iterator.next();
assertThat(secondDataNode.getDataSourceName(), is("replica_ds_0"));
assertThat(secondDataNode.getTableName(), is("t_single"));
DataNode thirdDataNode = iterator.next();
assertThat(thirdDataNode.getDataSourceName(), is("replica_ds_1"));
assertThat(thirdDataNode.getTableName(), is("t_single"));
} |
@Override
public CRFModel train(SequenceDataset<Label> sequenceExamples, Map<String, Provenance> runProvenance) {
if (sequenceExamples.getOutputInfo().getUnknownCount() > 0) {
throw new IllegalArgumentException("The supplied Dataset contained unknown Outputs, and this Trainer is supervised.");
}
// Creates a new RNG, adds one to the invocation count, generates a local optimiser.
SplittableRandom localRNG;
TrainerProvenance trainerProvenance;
StochasticGradientOptimiser localOptimiser;
synchronized(this) {
localRNG = rng.split();
localOptimiser = optimiser.copy();
trainerProvenance = getProvenance();
trainInvocationCounter++;
}
ImmutableOutputInfo<Label> labelIDMap = sequenceExamples.getOutputIDInfo();
ImmutableFeatureMap featureIDMap = sequenceExamples.getFeatureIDMap();
SGDVector[][] sgdFeatures = new SGDVector[sequenceExamples.size()][];
int[][] sgdLabels = new int[sequenceExamples.size()][];
double[] weights = new double[sequenceExamples.size()];
int n = 0;
for (SequenceExample<Label> example : sequenceExamples) {
weights[n] = example.getWeight();
Pair<int[],SGDVector[]> pair = CRFModel.convertToVector(example,featureIDMap,labelIDMap);
sgdFeatures[n] = pair.getB();
sgdLabels[n] = pair.getA();
n++;
}
logger.info(String.format("Training SGD CRF with %d examples", n));
CRFParameters crfParameters = new CRFParameters(featureIDMap.size(),labelIDMap.size());
localOptimiser.initialise(crfParameters);
double loss = 0.0;
int iteration = 0;
for (int i = 0; i < epochs; i++) {
if (shuffle) {
Util.shuffleInPlace(sgdFeatures, sgdLabels, weights, localRNG);
}
if (minibatchSize == 1) {
/*
* Special case a minibatch of size 1. Directly updates the parameters after each
* example rather than aggregating.
*/
for (int j = 0; j < sgdFeatures.length; j++) {
Pair<Double,Tensor[]> output = crfParameters.valueAndGradient(sgdFeatures[j],sgdLabels[j]);
loss += output.getA()*weights[j];
//Update the gradient with the current learning rates
Tensor[] updates = localOptimiser.step(output.getB(),weights[j]);
//Apply the update to the current parameters.
crfParameters.update(updates);
iteration++;
if ((iteration % loggingInterval == 0) && (loggingInterval != -1)) {
logger.info("At iteration " + iteration + ", average loss = " + loss/loggingInterval);
loss = 0.0;
}
}
} else {
Tensor[][] gradients = new Tensor[minibatchSize][];
for (int j = 0; j < sgdFeatures.length; j += minibatchSize) {
double tempWeight = 0.0;
int curSize = 0;
//Aggregate the gradient updates for each example in the minibatch
for (int k = j; k < j+minibatchSize && k < sgdFeatures.length; k++) {
Pair<Double,Tensor[]> output = crfParameters.valueAndGradient(sgdFeatures[j],sgdLabels[j]);
loss += output.getA()*weights[k];
tempWeight += weights[k];
gradients[k-j] = output.getB();
curSize++;
}
//Merge the values into a single gradient update
Tensor[] updates = crfParameters.merge(gradients,curSize);
for (Tensor update : updates) {
update.scaleInPlace(minibatchSize);
}
tempWeight /= minibatchSize;
//Update the gradient with the current learning rates
updates = localOptimiser.step(updates,tempWeight);
//Apply the gradient.
crfParameters.update(updates);
iteration++;
if ((loggingInterval != -1) && (iteration % loggingInterval == 0)) {
logger.info("At iteration " + iteration + ", average loss = " + loss/loggingInterval);
loss = 0.0;
}
}
}
}
localOptimiser.finalise();
//public CRFModel(String name, String description, ImmutableInfoMap featureIDMap, ImmutableInfoMap outputIDInfo, CRFParameters parameters) {
ModelProvenance provenance = new ModelProvenance(CRFModel.class.getName(),OffsetDateTime.now(),sequenceExamples.getProvenance(),trainerProvenance,runProvenance);
CRFModel model = new CRFModel("crf-sgd-model",provenance,featureIDMap,labelIDMap,crfParameters);
localOptimiser.reset();
return model;
} | @Test
public void testValidExample() {
SequenceDataset<Label> p = SequenceDataGenerator.generateGorillaDataset(5);
SequenceModel<Label> m = t.train(p);
m.predict(p.getExample(0));
Helpers.testSequenceModelSerialization(m,Label.class);
Helpers.testSequenceModelProtoSerialization(m,Label.class,p);
} |
@VisibleForTesting
List<String> getFuseInfo() {
return mFuseInfo;
} | @Test
public void UnderFileSystemLocal() {
try (FuseUpdateChecker checker = getUpdateCheckerWithUfs("/home/ec2-user/testFolder")) {
Assert.assertTrue(containsTargetInfo(checker.getFuseInfo(),
FuseUpdateChecker.LOCAL_FS));
}
} |
String instanceName(String instanceName) {
return sanitize(instanceName, INSTANCE_RESERVED);
} | @Test
public void replacesIllegalCharactersInInstanceName() throws Exception {
assertThat(sanitize.instanceName("foo\u0000bar/baz-quux")).isEqualTo("foo_bar_baz-quux");
} |
protected static DataSource getDataSourceFromJndi( String dsName, Context ctx ) throws NamingException {
if ( Utils.isEmpty( dsName ) ) {
throw new NamingException( BaseMessages.getString( PKG, "DatabaseUtil.DSNotFound", String.valueOf( dsName ) ) );
}
Object foundDs = FoundDS.get( dsName );
if ( foundDs != null ) {
return (DataSource) foundDs;
}
Object lkup = null;
DataSource rtn = null;
NamingException firstNe = null;
// First, try what they ask for...
try {
lkup = ctx.lookup( dsName );
if ( lkup instanceof DataSource ) {
rtn = (DataSource) lkup;
FoundDS.put( dsName, rtn );
return rtn;
}
} catch ( NamingException ignored ) {
firstNe = ignored;
}
try {
// Needed this for Jboss
lkup = ctx.lookup( "java:" + dsName );
if ( lkup instanceof DataSource ) {
rtn = (DataSource) lkup;
FoundDS.put( dsName, rtn );
return rtn;
}
} catch ( NamingException ignored ) {
// ignore
}
try {
// Tomcat
lkup = ctx.lookup( "java:comp/env/jdbc/" + dsName );
if ( lkup instanceof DataSource ) {
rtn = (DataSource) lkup;
FoundDS.put( dsName, rtn );
return rtn;
}
} catch ( NamingException ignored ) {
// ignore
}
try {
// Others?
lkup = ctx.lookup( "jdbc/" + dsName );
if ( lkup instanceof DataSource ) {
rtn = (DataSource) lkup;
FoundDS.put( dsName, rtn );
return rtn;
}
} catch ( NamingException ignored ) {
// ignore
}
if ( firstNe != null ) {
throw firstNe;
}
throw new NamingException( BaseMessages.getString( PKG, "DatabaseUtil.DSNotFound", dsName ) );
} | @Test
public void testCaching() throws NamingException {
DataSource dataSource = mock( DataSource.class );
when( context.lookup( testName ) ).thenReturn( dataSource ).thenThrow( new NullPointerException() );
assertEquals( dataSource, DatabaseUtil.getDataSourceFromJndi( testName, context ) );
assertEquals( dataSource, DatabaseUtil.getDataSourceFromJndi( testName, context ) );
} |
public boolean isSensitive(ConfigRecord record) {
ConfigResource.Type type = ConfigResource.Type.forId(record.resourceType());
return isSensitive(type, record.name());
} | @Test
public void testIsSensitive() {
assertFalse(SCHEMA.isSensitive(BROKER, "foo.bar"));
assertTrue(SCHEMA.isSensitive(BROKER, "quuux"));
assertTrue(SCHEMA.isSensitive(BROKER, "quuux2"));
assertTrue(SCHEMA.isSensitive(BROKER, "unknown.config.key"));
assertFalse(SCHEMA.isSensitive(TOPIC, "abc"));
} |
@Override
public int hashCode() {
return Objects.hash(targetImage, imageDigest, imageId, tags, imagePushed);
} | @Test
public void testEquality_differentImagePushed() {
JibContainer container1 = new JibContainer(targetImage1, digest1, digest1, tags1, true);
JibContainer container2 = new JibContainer(targetImage1, digest1, digest1, tags1, false);
Assert.assertNotEquals(container1, container2);
Assert.assertNotEquals(container1.hashCode(), container2.hashCode());
} |
@Override
public CompletableFuture<?> getAvailableFuture() {
return targetPartition.getAvailableFuture();
} | @TestTemplate
void testIsAvailableOrNot() throws Exception {
// setup
final NetworkBufferPool globalPool = new NetworkBufferPool(10, 128);
final BufferPool localPool = globalPool.createBufferPool(1, 1, 1, Integer.MAX_VALUE, 0);
final ResultPartitionWriter resultPartition =
new ResultPartitionBuilder().setBufferPoolFactory(() -> localPool).build();
resultPartition.setup();
final RecordWriter<?> recordWriter = createRecordWriter(resultPartition);
try {
// record writer is available because of initial available global pool
assertThat(recordWriter.getAvailableFuture()).isDone();
// request one buffer from the local pool to make it unavailable afterwards
try (BufferBuilder bufferBuilder = localPool.requestBufferBuilder(0)) {
assertThat(bufferBuilder).isNotNull();
assertThat(recordWriter.getAvailableFuture()).isNotDone();
// recycle the buffer to make the local pool available again
final Buffer buffer = BufferBuilderTestUtils.buildSingleBuffer(bufferBuilder);
buffer.recycleBuffer();
}
assertThat(recordWriter.getAvailableFuture()).isDone();
assertThat(recordWriter.getAvailableFuture()).isEqualTo(recordWriter.AVAILABLE);
} finally {
localPool.lazyDestroy();
globalPool.destroy();
}
} |
@VisibleForTesting
@SuppressWarnings("nullness") // ok to have nullable elements on stream
static String renderName(String prefix, MetricResult<?> metricResult) {
MetricKey key = metricResult.getKey();
MetricName name = key.metricName();
String step = key.stepName();
return Streams.concat(
Stream.of(prefix), // prefix is not cleaned, should it be?
Stream.of(stripSuffix(normalizePart(step))),
Stream.of(name.getNamespace(), name.getName()).map(SparkBeamMetric::normalizePart))
.filter(not(Strings::isNullOrEmpty))
.collect(Collectors.joining("."));
} | @Test
public void testRenderNameWithPrefix() {
MetricResult<Object> metricResult =
MetricResult.create(
MetricKey.create(
"myStep.one.two(three)", MetricName.named("myNameSpace//", "myName()")),
123,
456);
String renderedName = SparkBeamMetric.renderName("prefix", metricResult);
assertThat(
"Metric name was not rendered correctly",
renderedName,
equalTo("prefix.myStep_one_two_three.myNameSpace__.myName__"));
} |
public static int findNextPositivePowerOfTwo(final int value)
{
return 1 << (Integer.SIZE - Integer.numberOfLeadingZeros(value - 1));
} | @Test
void shouldReturnNextPositivePowerOfTwo()
{
assertThat(findNextPositivePowerOfTwo(MIN_VALUE), is(MIN_VALUE));
assertThat(findNextPositivePowerOfTwo(MIN_VALUE + 1), is(1));
assertThat(findNextPositivePowerOfTwo(-1), is(1));
assertThat(findNextPositivePowerOfTwo(0), is(1));
assertThat(findNextPositivePowerOfTwo(1), is(1));
assertThat(findNextPositivePowerOfTwo(2), is(2));
assertThat(findNextPositivePowerOfTwo(3), is(4));
assertThat(findNextPositivePowerOfTwo(4), is(4));
assertThat(findNextPositivePowerOfTwo(31), is(32));
assertThat(findNextPositivePowerOfTwo(32), is(32));
assertThat(findNextPositivePowerOfTwo(1 << 30), is(1 << 30));
assertThat(findNextPositivePowerOfTwo((1 << 30) + 1), is(MIN_VALUE));
} |
public HttpApiV2ProxyRequestContext getRequestContext() {
return requestContext;
} | @Test
void deserialize_fromJsonString_authorizerEmptyMap() {
try {
HttpApiV2ProxyRequest req = LambdaContainerHandler.getObjectMapper().readValue(NO_AUTH_PROXY,
HttpApiV2ProxyRequest.class);
assertNotNull(req.getRequestContext().getAuthorizer());
assertFalse(req.getRequestContext().getAuthorizer().isJwt());
assertFalse(req.getRequestContext().getAuthorizer().isLambda());
assertFalse(req.getRequestContext().getAuthorizer().isIam());
} catch (JsonProcessingException e) {
e.printStackTrace();
fail("Exception while parsing request" + e.getMessage());
}
} |
@Override
protected Object getTargetObject(boolean key) {
Object targetObject;
if (key) {
// keyData is never null
if (keyData.isPortable() || keyData.isJson() || keyData.isCompact()) {
targetObject = keyData;
} else {
targetObject = getKey();
}
} else {
if (valueObject == null) {
targetObject = getTargetObjectFromData();
} else {
if (valueObject instanceof PortableGenericRecord
|| valueObject instanceof CompactGenericRecord) {
// These two classes should be able to be handled by respective Getters
// see PortableGetter and CompactGetter
// We get into this branch when in memory format is Object and
// - the cluster does not have PortableFactory configuration for Portable
// - the cluster does not related classes for Compact
targetObject = getValue();
} else if (valueObject instanceof Portable
|| serializationService.isCompactSerializable(valueObject)) {
targetObject = getValueData();
} else {
// Note that targetObject can be PortableGenericRecord
// and it will be handled with PortableGetter for query.
// We get PortableGenericRecord here when in-memory format is OBJECT and
// the cluster does not have PortableFactory configuration for the object's factory ID
targetObject = getValue();
}
}
}
return targetObject;
} | @Test
public void testGetTargetObject_givenValueIsPortable_whenKeyFlagIsFalse_thenReturnValueData() {
Data key = serializationService.toData("indexedKey");
Portable value = new PortableEmployee(30, "peter");
QueryableEntry entry = createEntry(key, value, newExtractor());
Object targetObject = entry.getTargetObject(false);
assertEquals(serializationService.toData(value), targetObject);
} |
public List<String> getInfo() {
List<String> info = Lists.newArrayList();
info.add(String.valueOf(id));
info.add(name);
info.add(TimeUtils.longToTimeString(createTime));
info.add(String.valueOf(isReadOnly));
info.add(location);
info.add(storage.getBrokerName());
info.add(errMsg == null ? FeConstants.NULL_STRING : errMsg);
return info;
} | @Test
public void testGetInfo() {
repo = new Repository(10000, "repo", false, location, storage);
List<String> infos = repo.getInfo();
Assert.assertTrue(infos.size() == ShowRepositoriesStmt.TITLE_NAMES.size());
} |
public static ParameterizedType listOf(Type elementType) {
return parameterizedType(List.class, elementType);
} | @Test
public void createListType() {
ParameterizedType type = Types.listOf(Person.class);
assertThat(type.getRawType()).isEqualTo(List.class);
assertThat(type.getActualTypeArguments()).isEqualTo(new Type[] {Person.class});
} |
public static void setProtectedFieldValue(String protectedField, Object object, Object newValue) {
try {
// acgegi would silently fail to write to final fields
// FieldUtils.writeField(Object, field, true) only sets accessible on *non* public fields
// and then fails with IllegalAccessException (even if you make the field accessible in the interim!
// for backwards compatability we need to use a few steps
Field field = org.apache.commons.lang.reflect.FieldUtils.getField(object.getClass(), protectedField, true);
field.setAccessible(true);
field.set(object, newValue);
} catch (Exception x) {
throw new RuntimeException(x);
}
} | @Test
@Issue("JENKINS-64390")
public void setProtectedFieldValue_Should_fail_silently_to_set_public_final_fields_in_OuterClass() {
OuterClassWithPublicFinalField sut = new OuterClassWithPublicFinalField();
FieldUtils.setProtectedFieldValue("myField", sut, "test");
assertEquals("original", sut.getMyField());
} |
@Override
public Server build(Environment environment) {
printBanner(environment.getName());
final ThreadPool threadPool = createThreadPool(environment.metrics());
final Server server = buildServer(environment.lifecycle(), threadPool);
final Handler applicationHandler = createAppServlet(server,
environment.jersey(),
environment.getObjectMapper(),
environment.getValidator(),
environment.getApplicationContext(),
environment.getJerseyServletContainer(),
environment.metrics());
final Handler adminHandler = createAdminServlet(server,
environment.getAdminContext(),
environment.metrics(),
environment.healthChecks(),
environment.admin());
final RoutingHandler routingHandler = buildRoutingHandler(environment.metrics(),
server,
applicationHandler,
adminHandler);
final Handler gzipHandler = buildGzipHandler(routingHandler);
server.setHandler(addStatsHandler(addRequestLog(server, gzipHandler, environment.getName())));
return server;
} | @Test
void testDeserializeWithoutJsonAutoDetect() throws ConfigurationException, IOException {
final ObjectMapper objectMapper = Jackson.newObjectMapper()
.setVisibility(PropertyAccessor.ALL, JsonAutoDetect.Visibility.NONE);
assertThat(new YamlConfigurationFactory<>(
DefaultServerFactory.class,
BaseValidator.newValidator(),
objectMapper,
"dw"
).build(new ResourceConfigurationSourceProvider(), "yaml/server.yml")
.getMaxThreads())
.isEqualTo(101);
} |
public static Date parseDate(String dateStr, String format) throws ParseException {
if (StringUtils.isBlank(dateStr)) {
return null;
}
SimpleDateFormat sdf = new SimpleDateFormat(format);
return sdf.parse(dateStr);
} | @Test
public void testParseDate() throws ParseException {
String dateStr = "2021-01-01";
Date date = DateUtil.parseDate(dateStr, "yyyy-MM-dd");
Assertions.assertNotNull(date);
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
Assertions.assertEquals(dateStr, sdf.format(date));
} |
@VisibleForTesting
CompletableFuture<Acknowledge> getBootstrapCompletionFuture() {
return bootstrapCompletionFuture;
} | @Test
void testClusterShutdownWhenApplicationFails() throws Exception {
// we're "listening" on this to be completed to verify that the cluster
// is being shut down from the ApplicationDispatcherBootstrap
final CompletableFuture<ApplicationStatus> externalShutdownFuture =
new CompletableFuture<>();
final TestingDispatcherGateway.Builder dispatcherBuilder =
failedJobGatewayBuilder()
.setClusterShutdownFunction(
status -> {
externalShutdownFuture.complete(status);
return CompletableFuture.completedFuture(Acknowledge.get());
});
ApplicationDispatcherBootstrap bootstrap =
createApplicationDispatcherBootstrap(
3, dispatcherBuilder.build(), scheduledExecutor);
final CompletableFuture<Acknowledge> completionFuture =
bootstrap.getBootstrapCompletionFuture();
// wait until the bootstrap "thinks" it's done
completionFuture.get(TIMEOUT_SECONDS, TimeUnit.SECONDS);
// verify that the dispatcher is actually being shut down
assertThat(externalShutdownFuture.get(TIMEOUT_SECONDS, TimeUnit.SECONDS))
.isEqualTo(ApplicationStatus.FAILED);
} |
public synchronized ResultSet fetchResults(FetchOrientation orientation, int maxFetchSize) {
long token;
switch (orientation) {
case FETCH_NEXT:
token = currentToken;
break;
case FETCH_PRIOR:
token = currentToken - 1;
break;
default:
throw new UnsupportedOperationException(
String.format("Unknown fetch orientation: %s.", orientation));
}
if (orientation == FetchOrientation.FETCH_NEXT && bufferedResults.isEmpty()) {
// make sure data is available in the buffer
resultStore.waitUntilHasData();
}
return fetchResults(token, maxFetchSize);
} | @Test
void testFetchResultsMultipleTimesWithLimitedFetchSizeInOrientation() {
int bufferSize = data.size();
ResultFetcher fetcher =
buildResultFetcher(Collections.singletonList(data.iterator()), bufferSize);
int fetchSize = data.size() / 2;
runFetchMultipleTimes(
bufferSize,
fetchSize,
token -> fetcher.fetchResults(FetchOrientation.FETCH_NEXT, fetchSize));
} |
public static InetSocketAddress parseAddress(String address, int defaultPort) {
return parseAddress(address, defaultPort, false);
} | @Test
void shouldParseAddressForIPv6WithoutPort_Strict() {
InetSocketAddress socketAddress = AddressUtils.parseAddress("[1abc:2abc:3abc::5ABC:6abc]:", 80);
assertThat(socketAddress.isUnresolved()).isFalse();
assertThat(socketAddress.getAddress().getHostAddress()).isEqualTo("1abc:2abc:3abc:0:0:0:5abc:6abc");
assertThat(socketAddress.getPort()).isEqualTo(80);
assertThat(socketAddress.getHostString()).isEqualTo("1abc:2abc:3abc:0:0:0:5abc:6abc");
} |
@Deprecated
@Override
public void init(final ProcessorContext context, final StateStore root) {
internal.init(context, root);
} | @Test
public void shouldDelegateInit() {
// init is already called in setUp()
verify(inner).init((StateStoreContext) context, store);
} |
public PushTelemetryResponse processPushTelemetryRequest(PushTelemetryRequest request, RequestContext requestContext) {
Uuid clientInstanceId = request.data().clientInstanceId();
if (clientInstanceId == null || Uuid.RESERVED.contains(clientInstanceId)) {
String msg = String.format("Invalid request from the client [%s], invalid client instance id",
clientInstanceId);
return request.getErrorResponse(0, new InvalidRequestException(msg));
}
long now = time.milliseconds();
ClientMetricsInstance clientInstance = clientInstance(clientInstanceId, requestContext);
try {
// Validate the push request parameters for the client instance.
validatePushRequest(request, clientInstance, now);
} catch (ApiException exception) {
log.debug("Error validating push telemetry request from client [{}]", clientInstanceId, exception);
clientInstance.lastKnownError(Errors.forException(exception));
return request.getErrorResponse(0, exception);
} finally {
// Update the client instance with the latest push request parameters.
clientInstance.terminating(request.data().terminating());
}
// Push the metrics to the external client receiver plugin.
byte[] metrics = request.data().metrics();
if (metrics != null && metrics.length > 0) {
try {
long exportTimeStartMs = time.hiResClockMs();
receiverPlugin.exportMetrics(requestContext, request);
clientMetricsStats.recordPluginExport(clientInstanceId, time.hiResClockMs() - exportTimeStartMs);
} catch (Exception exception) {
clientMetricsStats.recordPluginErrorCount(clientInstanceId);
clientInstance.lastKnownError(Errors.INVALID_RECORD);
log.error("Error exporting client metrics to the plugin for client instance id: {}", clientInstanceId, exception);
return request.errorResponse(0, Errors.INVALID_RECORD);
}
}
clientInstance.lastKnownError(Errors.NONE);
return new PushTelemetryResponse(new PushTelemetryResponseData());
} | @Test
public void testPushTelemetryClientInstanceIdInvalid() throws UnknownHostException {
// Null client instance id
PushTelemetryRequest request = new PushTelemetryRequest.Builder(
new PushTelemetryRequestData().setClientInstanceId(null), true).build();
PushTelemetryResponse response = clientMetricsManager.processPushTelemetryRequest(
request, ClientMetricsTestUtils.requestContext());
assertEquals(Errors.INVALID_REQUEST, response.error());
// Zero client instance id
request = new PushTelemetryRequest.Builder(
new PushTelemetryRequestData().setClientInstanceId(Uuid.ZERO_UUID), true).build();
response = clientMetricsManager.processPushTelemetryRequest(
request, ClientMetricsTestUtils.requestContext());
assertEquals(Errors.INVALID_REQUEST, response.error());
} |
public void write(final ConsumerRecord<byte[], byte[]> record) throws IOException {
if (!writable) {
throw new IOException("Write permission denied.");
}
final File dirty = dirty(file);
final File tmp = tmp(file);
// first write to the dirty copy
appendRecordToFile(record, dirty, filesystem);
// atomically rename the dirty copy to the "live" copy while copying the live copy to
// the "dirty" copy via a temporary hard link
Files.createLink(tmp.toPath(), file.toPath());
Files.move(
dirty.toPath(),
file.toPath(),
StandardCopyOption.REPLACE_EXISTING,
StandardCopyOption.ATOMIC_MOVE
);
Files.move(tmp.toPath(), dirty.toPath());
// keep the dirty copy in sync with the live copy, which now has the write
appendRecordToFile(record, dirty, filesystem);
} | @Test
public void shouldWriteMultipleRecords() throws IOException {
// Given
final ConsumerRecord<byte[], byte[]> record1= newStreamRecord("stream1");
final ConsumerRecord<byte[], byte[]> record2 = newStreamRecord("stream2");
// When
replayFile.write(record1);
replayFile.write(record2);
// Then
final List<String> commands = Files.readAllLines(internalReplayFile.toPath());
assertThat(commands.size(), is(2));
assertThat(commands.get(0), is(
"\"stream/stream1/create\"" + KEY_VALUE_SEPARATOR
+ "{\"statement\":\"CREATE STREAM stream1 (id INT) WITH (kafka_topic='stream1')\""
+ ",\"streamsProperties\":{},\"originalProperties\":{},\"plan\":null}"
));
assertThat(commands.get(1), is(
"\"stream/stream2/create\"" + KEY_VALUE_SEPARATOR
+ "{\"statement\":\"CREATE STREAM stream2 (id INT) WITH (kafka_topic='stream2')\""
+ ",\"streamsProperties\":{},\"originalProperties\":{},\"plan\":null}"
));
} |
public Optional<Integer> getTimestampFieldIndex() {
return timestampFieldIndex;
} | @Test
public void shouldGenerateCorrectTimestamp() throws IOException {
final Generator generator = new Generator(new File("./src/main/resources/pageviews_schema.avro"), new Random());
final RowGenerator rowGenerator = new RowGenerator(generator, "viewtime", Optional.of("viewtime"));
assertThat("incorrect timestamp column index.", rowGenerator.getTimestampFieldIndex().get(), is(0));
} |
public String process(String str)
{
StringBuilder sb = new StringBuilder();
for (String line : str.split("\r?\n"))
{
if (line.startsWith("#include "))
{
String resource = line.substring(9);
if (resource.startsWith("\"") && resource.endsWith("\""))
{
resource = resource.substring(1, resource.length() - 1);
}
String resourceStr = load(resource);
sb.append(resourceStr);
}
else
{
sb.append(line).append('\n');
}
}
return sb.toString();
} | @Test
public void testProcess()
{
Function<String, String> func = (String resource) ->
{
switch (resource)
{
case "file2":
return FILE2;
default:
throw new RuntimeException("unknown resource");
}
};
String out = new Template()
.add(func)
.process(FILE1);
assertEquals(RESULT, out);
} |
public static StructType partitionType(Table table) {
Collection<PartitionSpec> specs = table.specs().values();
return buildPartitionProjectionType("table partition", specs, allFieldIds(specs));
} | @Test
public void testPartitionTypeWithSpecEvolutionInV1Tables() {
TestTables.TestTable table =
TestTables.create(tableDir, "test", SCHEMA, BY_DATA_SPEC, V1_FORMAT_VERSION);
table.updateSpec().addField(Expressions.bucket("category", 8)).commit();
assertThat(table.specs()).hasSize(2);
StructType expectedType =
StructType.of(
NestedField.optional(1000, "data", Types.StringType.get()),
NestedField.optional(1001, "category_bucket_8", Types.IntegerType.get()));
StructType actualType = Partitioning.partitionType(table);
assertThat(actualType).isEqualTo(expectedType);
table.updateSpec().removeField("data").removeField("category_bucket_8").commit();
assertThat(table.specs()).hasSize(3);
assertThat(table.spec().isUnpartitioned()).isTrue();
} |
@Override
public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
if(log.isWarnEnabled()) {
log.warn(String.format("Disable checksum verification for %s", file));
// Do not set checksum when metadata key X-Static-Large-Object is present. Disable checksum verification in download filter.
status.setChecksum(Checksum.NONE);
}
final Response response;
if(status.isAppend()) {
final HttpRange range = HttpRange.withStatus(status);
if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) {
response = session.getClient().getObject(regionService.lookup(file),
containerService.getContainer(file).getName(), containerService.getKey(file),
range.getStart());
}
else {
response = session.getClient().getObject(regionService.lookup(file),
containerService.getContainer(file).getName(), containerService.getKey(file),
range.getStart(), range.getLength());
}
}
else {
response = session.getClient().getObject(regionService.lookup(file),
containerService.getContainer(file).getName(), containerService.getKey(file));
}
return new HttpMethodReleaseInputStream(response.getResponse(), status);
}
catch(GenericException e) {
throw new SwiftExceptionMappingService().map("Download {0} failed", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map(e, file);
}
} | @Test
public void testReadRangeUnknownLength() throws Exception {
final Path container = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume));
container.attributes().setRegion("IAD");
final Path test = new Path(container, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
new SwiftTouchFeature(session, new SwiftRegionService(session)).touch(test, new TransferStatus());
final byte[] content = RandomUtils.nextBytes(1023);
final SwiftRegionService regionService = new SwiftRegionService(session);
final HttpResponseOutputStream<StorageObject> out = new SwiftWriteFeature(session, regionService).write(test, new TransferStatus().withLength(content.length), new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(new TransferStatus(), new TransferStatus()).transfer(new ByteArrayInputStream(content), out);
out.close();
assertNotNull(out.getStatus());
final TransferStatus status = new TransferStatus();
// Set to unknown length
status.setLength(-1L);
status.setAppend(true);
status.setOffset(100L);
final InputStream in = new SwiftReadFeature(session, regionService).read(test, status, new DisabledConnectionCallback());
assertNotNull(in);
final ByteArrayOutputStream buffer = new ByteArrayOutputStream(content.length - 100);
new StreamCopier(status, status).transfer(in, buffer);
final byte[] reference = new byte[content.length - 100];
System.arraycopy(content, 100, reference, 0, content.length - 100);
assertArrayEquals(reference, buffer.toByteArray());
in.close();
new SwiftDeleteFeature(session).delete(Collections.<Path>singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@SuppressWarnings("checkstyle:npathcomplexity")
public PartitionServiceState getPartitionServiceState() {
PartitionServiceState state = getPartitionTableState();
if (state != SAFE) {
return state;
}
if (!checkAndTriggerReplicaSync()) {
return REPLICA_NOT_SYNC;
}
return SAFE;
} | @Test
public void shouldNotBeSafe_whenUnknownReplicaOwnerPresent_whileNotActive() throws UnknownHostException {
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory();
HazelcastInstance hz = factory.newHazelcastInstance();
HazelcastInstance hz2 = factory.newHazelcastInstance();
InternalPartitionServiceImpl partitionService = getNode(hz).partitionService;
partitionService.firstArrangement();
changeClusterStateEventually(hz2, ClusterState.FROZEN);
hz2.shutdown();
assertClusterSizeEventually(1, hz);
PartitionStateManager partitionStateManager = partitionService.getPartitionStateManager();
InternalPartitionImpl partition = partitionStateManager.getPartitionImpl(0);
PartitionReplica[] members = partition.replicas();
PartitionReplica[] illegalMembers = Arrays.copyOf(members, members.length);
Address address = members[0].address();
illegalMembers[0] = new PartitionReplica(new Address(address.getInetAddress(), address.getPort() + 1000), members[0].uuid());
partition.setReplicas(illegalMembers);
PartitionReplicaStateChecker replicaStateChecker = partitionService.getPartitionReplicaStateChecker();
assertEquals(PartitionServiceState.REPLICA_NOT_OWNED, replicaStateChecker.getPartitionServiceState());
partition.setReplicas(members);
assertEquals(PartitionServiceState.SAFE, replicaStateChecker.getPartitionServiceState());
} |
@Override
public boolean add(E element) {
return add(element, element.hashCode());
} | @Test(expected = NullPointerException.class)
public void testAddNull() {
final OAHashSet<Integer> set = new OAHashSet<>(8);
set.add(null);
} |
public boolean isCheckpointPending() {
return !pendingCheckpoints.isEmpty();
} | @Test
void testNoFastPathWithChannelFinishedDuringCheckpoints() throws Exception {
BufferOrEvent[] sequence = {
createBarrier(1, 0), createEndOfPartition(0), createBarrier(1, 1)
};
ValidatingCheckpointHandler validator = new ValidatingCheckpointHandler();
inputGate = createCheckpointedInputGate(2, sequence, validator);
for (BufferOrEvent boe : sequence) {
assertThat(inputGate.pollNext()).hasValue(boe);
}
// The last barrier should finish the pending checkpoint instead of trigger a "new" one.
assertThat(validator.getTriggeredCheckpointCounter()).isOne();
assertThat(inputGate.getCheckpointBarrierHandler().isCheckpointPending()).isFalse();
} |
public void close()
{
if (!isClosed)
{
isClosed = true;
unmapAndCloseChannel();
}
} | @Test
void shouldThrowExceptionAfterFailureOnPageStraddle() throws Exception
{
final long newRecordingId = newRecording();
final File segmentFile = new File(archiveDir, segmentFileName(newRecordingId, 0));
try (FileChannel log = FileChannel.open(segmentFile.toPath(), READ, WRITE, CREATE))
{
final ByteBuffer bb = allocate(HEADER_LENGTH);
final DataHeaderFlyweight flyweight = new DataHeaderFlyweight(bb);
flyweight.frameLength(PAGE_SIZE - 128);
log.write(bb);
bb.clear();
flyweight.frameLength(256);
log.write(bb, PAGE_SIZE - 128);
bb.clear();
bb.put(0, (byte)0).limit(1).position(0);
log.write(bb, PAGE_SIZE + 127);
}
final ArchiveException exception = assertThrows(
ArchiveException.class,
() ->
{
final Catalog catalog = new Catalog(archiveDir, null, 0, CAPACITY, clock, null, segmentFileBuffer);
catalog.close();
});
assertThat(exception.getMessage(), containsString(segmentFile.getAbsolutePath()));
} |
@Override
public void setSystemState(ClusterStateBundle stateBundle, NodeInfo node, Waiter<SetClusterStateRequest> externalWaiter) {
RPCSetClusterStateWaiter waiter = new RPCSetClusterStateWaiter(externalWaiter);
ClusterState baselineState = stateBundle.getBaselineClusterState();
Target connection = getConnection(node);
if ( ! connection.isValid()) {
log.log(Level.FINE, () -> String.format("Connection to '%s' could not be created.", node.getRpcAddress()));
return;
}
Request req = new Request(SET_DISTRIBUTION_STATES_RPC_METHOD_NAME);
SlimeClusterStateBundleCodec codec = new SlimeClusterStateBundleCodec();
EncodedClusterStateBundle encodedBundle = codec.encode(stateBundle);
Values v = req.parameters();
v.add(new Int8Value(encodedBundle.getCompression().type().getCode()));
v.add(new Int32Value(encodedBundle.getCompression().uncompressedSize()));
v.add(new DataValue(encodedBundle.getCompression().data()));
log.log(Level.FINE, () -> String.format("Sending '%s' RPC to %s for state version %d",
req.methodName(), node.getRpcAddress(), stateBundle.getVersion()));
RPCSetClusterStateRequest stateRequest = new RPCSetClusterStateRequest(node, req, baselineState.getVersion());
waiter.setRequest(stateRequest);
connection.invokeAsync(req, Duration.ofSeconds(60), waiter);
node.setClusterStateVersionBundleSent(stateBundle);
} | @Test
void setSystemState_v3_sends_distribution_states_rpc() {
var f = new Fixture<SetClusterStateRequest>();
var cf = ClusterFixture.forFlatCluster(3).bringEntireClusterUp().assignDummyRpcAddresses();
var sentBundle = ClusterStateBundleUtil.makeBundle("distributor:3 storage:3");
f.communicator.setSystemState(sentBundle, cf.cluster().getNodeInfo(Node.ofStorage(1)), f.mockWaiter);
Request req = f.receivedRequest.get();
assertNotNull(req);
assertEquals(req.methodName(), RPCCommunicator.SET_DISTRIBUTION_STATES_RPC_METHOD_NAME);
assertTrue(req.parameters().satisfies("bix")); // <compression type>, <uncompressed size>, <payload>
ClusterStateBundle receivedBundle = RPCUtil.decodeStateBundleFromSetDistributionStatesRequest(req);
assertEquals(receivedBundle, sentBundle);
} |
@Override
public String execute(CommandContext commandContext, String[] args) {
if (ArrayUtils.isEmpty(args)) {
return "Please input method name, eg: \r\ninvoke xxxMethod(1234, \"abcd\", {\"prop\" : \"value\"})\r\n"
+ "invoke XxxService.xxxMethod(1234, \"abcd\", {\"prop\" : \"value\"})\r\n"
+ "invoke com.xxx.XxxService.xxxMethod(1234, \"abcd\", {\"prop\" : \"value\"})";
}
Channel channel = commandContext.getRemote();
String service = channel.attr(ChangeTelnet.SERVICE_KEY) != null
? channel.attr(ChangeTelnet.SERVICE_KEY).get()
: null;
String message = args[0];
int i = message.indexOf("(");
if (i < 0 || !message.endsWith(")")) {
return "Invalid parameters, format: service.method(args)";
}
String method = message.substring(0, i).trim();
String param = message.substring(i + 1, message.length() - 1).trim();
i = method.lastIndexOf(".");
if (i >= 0) {
service = method.substring(0, i).trim();
method = method.substring(i + 1).trim();
}
if (StringUtils.isEmpty(service)) {
return "If you want to invoke like [invoke sayHello(\"xxxx\")], please execute cd command first,"
+ " or you can execute it like [invoke IHelloService.sayHello(\"xxxx\")]";
}
List<Object> list;
try {
list = JsonUtils.toJavaList("[" + param + "]", Object.class);
} catch (Throwable t) {
return "Invalid json argument, cause: " + t.getMessage();
}
StringBuilder buf = new StringBuilder();
Method invokeMethod = null;
ProviderModel selectedProvider = null;
if (isInvokedSelectCommand(channel)) {
selectedProvider = channel.attr(INVOKE_METHOD_PROVIDER_KEY).get();
invokeMethod = channel.attr(SelectTelnet.SELECT_METHOD_KEY).get();
} else {
for (ProviderModel provider : frameworkModel.getServiceRepository().allProviderModels()) {
if (!isServiceMatch(service, provider)) {
continue;
}
selectedProvider = provider;
List<Method> methodList = findSameSignatureMethod(provider.getAllMethods(), method, list);
if (CollectionUtils.isEmpty(methodList)) {
break;
}
if (methodList.size() == 1) {
invokeMethod = methodList.get(0);
} else {
List<Method> matchMethods = findMatchMethods(methodList, list);
if (CollectionUtils.isEmpty(matchMethods)) {
break;
}
if (matchMethods.size() == 1) {
invokeMethod = matchMethods.get(0);
} else { // exist overridden method
channel.attr(INVOKE_METHOD_PROVIDER_KEY).set(provider);
channel.attr(INVOKE_METHOD_LIST_KEY).set(matchMethods);
channel.attr(INVOKE_MESSAGE_KEY).set(message);
printSelectMessage(buf, matchMethods);
return buf.toString();
}
}
break;
}
}
if (!StringUtils.isEmpty(service)) {
buf.append("Use default service ").append(service).append('.');
}
if (selectedProvider == null) {
buf.append("\r\nNo such service ").append(service);
return buf.toString();
}
if (invokeMethod == null) {
buf.append("\r\nNo such method ")
.append(method)
.append(" in service ")
.append(service);
return buf.toString();
}
try {
Object[] array =
realize(list.toArray(), invokeMethod.getParameterTypes(), invokeMethod.getGenericParameterTypes());
long start = System.currentTimeMillis();
AppResponse result = new AppResponse();
try {
Object o = invokeMethod.invoke(selectedProvider.getServiceInstance(), array);
boolean setValueDone = false;
if (RpcContext.getServerAttachment().isAsyncStarted()) {
AsyncContext asyncContext = RpcContext.getServerAttachment().getAsyncContext();
if (asyncContext instanceof AsyncContextImpl) {
CompletableFuture<Object> internalFuture =
((AsyncContextImpl) asyncContext).getInternalFuture();
result.setValue(internalFuture.get());
setValueDone = true;
}
}
if (!setValueDone) {
result.setValue(o);
}
} catch (Throwable t) {
result.setException(t);
if (t instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
} finally {
RpcContext.removeContext();
}
long end = System.currentTimeMillis();
buf.append("\r\nresult: ");
buf.append(JsonUtils.toJson(result.recreate()));
buf.append("\r\nelapsed: ");
buf.append(end - start);
buf.append(" ms.");
} catch (Throwable t) {
return "Failed to invoke method " + invokeMethod.getName() + ", cause: " + StringUtils.toString(t);
}
return buf.toString();
} | @Test
void testInvokeByPassingNullValue() {
defaultAttributeMap.attr(ChangeTelnet.SERVICE_KEY).set(DemoService.class.getName());
defaultAttributeMap.attr(SelectTelnet.SELECT_KEY).set(null);
given(mockChannel.attr(ChangeTelnet.SERVICE_KEY))
.willReturn(defaultAttributeMap.attr(ChangeTelnet.SERVICE_KEY));
given(mockChannel.attr(SelectTelnet.SELECT_KEY)).willReturn(defaultAttributeMap.attr(SelectTelnet.SELECT_KEY));
registerProvider(DemoService.class.getName(), new DemoServiceImpl(), DemoService.class);
try {
invoke.execute(mockCommandContext, new String[] {"sayHello(null)"});
} catch (Exception ex) {
assertTrue(ex instanceof NullPointerException);
}
defaultAttributeMap.attr(ChangeTelnet.SERVICE_KEY).remove();
defaultAttributeMap.attr(SelectTelnet.SELECT_KEY).remove();
} |
@Override
public void collect(MetricsEmitter metricsEmitter) {
for (Map.Entry<MetricKey, KafkaMetric> entry : ledger.getMetrics()) {
MetricKey metricKey = entry.getKey();
KafkaMetric metric = entry.getValue();
try {
collectMetric(metricsEmitter, metricKey, metric);
} catch (Exception e) {
// catch and log to continue processing remaining metrics
log.error("Error processing Kafka metric {}", metricKey, e);
}
}
} | @Test
public void testCollectMetricsWithExcludeLabels() {
collector = new KafkaMetricsCollector(
metricNamingStrategy,
time,
Collections.singleton("tag2")
);
tags = new HashMap<>();
tags.put("tag1", "value1");
tags.put("tag2", "value2");
// Gauge metric.
MetricName name1 = metrics.metricName("nonMeasurable", "group1", tags);
metrics.addMetric(name1, (Gauge<Double>) (config, now) -> 99d);
// Sum metric.
MetricName name2 = metrics.metricName("counter", "group1", tags);
Sensor sensor = metrics.sensor("counter");
sensor.add(name2, new WindowedCount());
sensor.record();
testEmitter.reconfigurePredicate(k -> !k.key().name().endsWith(".count"));
// Collect sum metrics
collector.collect(testEmitter);
List<SinglePointMetric> result = testEmitter.emittedMetrics();
Metric metric = result.stream()
.flatMap(metrics -> Stream.of(metrics.builder().build()))
.filter(m -> m.getName().equals("test.domain.group1.nonmeasurable")).findFirst().get();
assertEquals(1, metric.getGauge().getDataPointsCount());
NumberDataPoint point = metric.getGauge().getDataPoints(0);
assertEquals(1, point.getAttributesCount());
assertEquals("tag1", point.getAttributes(0).getKey());
assertEquals("value1", point.getAttributes(0).getValue().getStringValue());
metric = result.stream()
.flatMap(metrics -> Stream.of(metrics.builder().build()))
.filter(m -> m.getName().equals("test.domain.group1.counter")).findFirst().get();
assertEquals(AggregationTemporality.AGGREGATION_TEMPORALITY_CUMULATIVE, metric.getSum().getAggregationTemporality());
assertEquals(1, metric.getSum().getDataPointsCount());
point = metric.getSum().getDataPoints(0);
assertEquals(1, point.getAttributesCount());
assertEquals("tag1", point.getAttributes(0).getKey());
assertEquals("value1", point.getAttributes(0).getValue().getStringValue());
testEmitter.reset();
testEmitter.onlyDeltaMetrics(true);
collector.collect(testEmitter);
result = testEmitter.emittedMetrics();
// Delta metrics.
metric = result.stream()
.flatMap(metrics -> Stream.of(metrics.builder().build()))
.filter(m -> m.getName().equals("test.domain.group1.counter")).findFirst().get();
assertEquals(AggregationTemporality.AGGREGATION_TEMPORALITY_DELTA, metric.getSum().getAggregationTemporality());
assertEquals(1, metric.getSum().getDataPointsCount());
point = metric.getSum().getDataPoints(0);
assertEquals(1, point.getAttributesCount());
assertEquals("tag1", point.getAttributes(0).getKey());
assertEquals("value1", point.getAttributes(0).getValue().getStringValue());
} |
@Override
public int compareTo(Delayed o) {
throw new UnsupportedOperationException();
} | @Test(expected = UnsupportedOperationException.class)
@SuppressWarnings("ConstantConditions")
public void compareTo() {
ScheduledFuture<Integer> future = new DelegatingScheduledFutureStripper<>(
scheduler.schedule(new SimpleCallableTestTask(), 0, TimeUnit.SECONDS));
future.compareTo(null);
} |
public Timestamp parseToTimestamp(final String text) {
return new Timestamp(parse(text));
} | @Test
public void shouldParseToTimestamp() {
assertThat(parser.parseToTimestamp("2017-11-13T23:59:58").getTime(), is(1510617598000L));
assertThat(parser.parseToTimestamp("2017-11-13T23:59:58.999-0100").getTime(), is(1510621198999L));
} |
public void append(AbortedTxn abortedTxn) throws IOException {
lastOffset.ifPresent(offset -> {
if (offset >= abortedTxn.lastOffset())
throw new IllegalArgumentException("The last offset of appended transactions must increase sequentially, but "
+ abortedTxn.lastOffset() + " is not greater than current last offset " + offset + " of index "
+ file.getAbsolutePath());
});
lastOffset = OptionalLong.of(abortedTxn.lastOffset());
Utils.writeFully(channel(), abortedTxn.buffer.duplicate());
} | @Test
public void testLastOffsetMustIncrease() throws IOException {
index.append(new AbortedTxn(1L, 5, 15, 13));
assertThrows(IllegalArgumentException.class, () -> index.append(new AbortedTxn(0L, 0,
15, 11)));
} |
public SqlType getExpressionSqlType(final Expression expression) {
return getExpressionSqlType(expression, Collections.emptyMap());
} | @Test
public void shouldFailForComplexTypeComparison() {
// Given:
final Expression expression = new ComparisonExpression(Type.GREATER_THAN, MAPCOL, ADDRESS);
// When:
final KsqlStatementException e = assertThrows(
KsqlStatementException.class,
() -> expressionTypeManager.getExpressionSqlType(expression)
);
// Then:
assertThat(e.getMessage(), containsString(
"Cannot compare MAP<BIGINT, DOUBLE>"
+ " to STRUCT<`NUMBER` BIGINT, `STREET` STRING, `CITY` STRING,"
+ " `STATE` STRING, `ZIPCODE` BIGINT> with GREATER_THAN."
));
assertThat(e.getUnloggedMessage(), containsString(
"Cannot compare COL5 (MAP<BIGINT, DOUBLE>) to COL6 (STRUCT<`NUMBER` BIGINT, "
+ "`STREET` STRING, `CITY` STRING, `STATE` STRING, `ZIPCODE` BIGINT>) "
+ "with GREATER_THAN"
));
} |
public JmxCollector register() {
return register(PrometheusRegistry.defaultRegistry);
} | @Test
public void testServletRequestPattern() throws Exception {
JmxCollector jc =
new JmxCollector(
"\n---\nrules:\n- pattern: 'Catalina<j2eeType=Servlet, WebModule=//([-a-zA-Z0-9+&@#/%?=~_|!:.,;]*[-a-zA-Z0-9+&@#/%=~_|]),\n name=([-a-zA-Z0-9+/$%~_-|!.]*), J2EEApplication=none, \nJ2EEServer=none><>RequestCount:'\n name: tomcat_request_servlet_count\n labels:\n module: `$1`\n servlet: `$2`\n help: Tomcat servlet request count\n type: COUNTER\n attrNameSnakeCase: false"
.replace('`', '"'))
.register(prometheusRegistry);
assertEquals(
1.0,
getSampleValue(
"tomcat_request_servlet_count",
new String[] {"module", "servlet"},
new String[] {"localhost/host-manager", "HTMLHostManager"}),
.001);
} |
@Override
public boolean matches(T objectUnderTest) {
boolean matches = super.matches(objectUnderTest);
describedAs(buildVerboseDescription(objectUnderTest, matches));
return matches;
} | @Test
public void multiple_matches_should_not_change_description() {
VERBOSE_CONDITION.matches("foooo");
assertThat(VERBOSE_CONDITION).hasToString("shorter than 4 but length was 5");
VERBOSE_CONDITION.matches("foooo");
VERBOSE_CONDITION.matches("foooo");
assertThat(VERBOSE_CONDITION).hasToString("shorter than 4 but length was 5");
} |
public static List<DiskRange> mergeAdjacentDiskRanges(Collection<DiskRange> diskRanges, DataSize maxMergeDistance, DataSize maxReadSize)
{
// sort ranges by start offset
List<DiskRange> ranges = new ArrayList<>(diskRanges);
Collections.sort(ranges, new Comparator<DiskRange>()
{
@Override
public int compare(DiskRange o1, DiskRange o2)
{
return Long.compare(o1.getOffset(), o2.getOffset());
}
});
// merge overlapping ranges
long maxReadSizeBytes = maxReadSize.toBytes();
long maxMergeDistanceBytes = maxMergeDistance.toBytes();
ImmutableList.Builder<DiskRange> result = ImmutableList.builder();
DiskRange last = ranges.get(0);
for (int i = 1; i < ranges.size(); i++) {
DiskRange current = ranges.get(i);
DiskRange merged = last.span(current);
if (merged.getLength() <= maxReadSizeBytes && last.getEnd() + maxMergeDistanceBytes >= current.getOffset()) {
last = merged;
}
else {
result.add(last);
last = current;
}
}
result.add(last);
return result.build();
} | @Test
public void testMergeAdjacent()
{
List<DiskRange> diskRanges = mergeAdjacentDiskRanges(
ImmutableList.of(new DiskRange(100, 100), new DiskRange(200, 100), new DiskRange(300, 100)),
new DataSize(0, BYTE),
new DataSize(1, GIGABYTE));
assertEquals(diskRanges, ImmutableList.of(new DiskRange(100, 300)));
} |
@Override
public void dropPartition(ObjectPath tablePath, CatalogPartitionSpec catalogPartitionSpec, boolean ignoreIfNotExists)
throws PartitionNotExistException, CatalogException {
if (!tableExists(tablePath)) {
if (ignoreIfNotExists) {
return;
} else {
throw new PartitionNotExistException(getName(), tablePath, catalogPartitionSpec);
}
}
String tablePathStr = inferTablePath(catalogPathStr, tablePath);
Map<String, String> options = TableOptionProperties.loadFromProperties(tablePathStr, hadoopConf);
boolean hiveStylePartitioning = Boolean.parseBoolean(options.getOrDefault(FlinkOptions.HIVE_STYLE_PARTITIONING.key(), "false"));
String partitionPathStr = HoodieCatalogUtil.inferPartitionPath(hiveStylePartitioning, catalogPartitionSpec);
if (!StreamerUtil.partitionExists(tablePathStr, partitionPathStr, hadoopConf)) {
if (ignoreIfNotExists) {
return;
} else {
throw new PartitionNotExistException(getName(), tablePath, catalogPartitionSpec);
}
}
try (HoodieFlinkWriteClient<?> writeClient = HoodieCatalogUtil.createWriteClient(options, tablePathStr, tablePath, hadoopConf)) {
writeClient.deletePartitions(Collections.singletonList(partitionPathStr),
writeClient.createNewInstantTime())
.forEach(writeStatus -> {
if (writeStatus.hasErrors()) {
throw new HoodieMetadataException(String.format("Failed to commit metadata table records at file id %s.", writeStatus.getFileId()));
}
});
fs.delete(new Path(tablePathStr, partitionPathStr), true);
} catch (Exception e) {
throw new CatalogException(String.format("Dropping partition %s of table %s exception.", partitionPathStr, tablePath), e);
}
} | @Test
public void testDropPartition() throws Exception {
ObjectPath tablePath = new ObjectPath(TEST_DEFAULT_DATABASE, "tb1");
// create table
catalog.createTable(tablePath, EXPECTED_CATALOG_TABLE, true);
CatalogPartitionSpec partitionSpec = new CatalogPartitionSpec(new HashMap<String, String>() {
{
put("partition", "par1");
}
});
// drop non-exist partition
assertThrows(PartitionNotExistException.class,
() -> catalog.dropPartition(tablePath, partitionSpec, false));
String tablePathStr = catalog.inferTablePath(catalogPathStr, tablePath);
Configuration flinkConf = TestConfigurations.getDefaultConf(tablePathStr);
HoodieTableMetaClient metaClient = HoodieTestUtils
.createMetaClient(
new HadoopStorageConfiguration(HadoopConfigurations.getHadoopConf(flinkConf)), tablePathStr);
TestData.writeData(TestData.DATA_SET_INSERT, flinkConf);
assertTrue(catalog.partitionExists(tablePath, partitionSpec));
// drop partition 'par1'
catalog.dropPartition(tablePath, partitionSpec, false);
HoodieInstant latestInstant = metaClient.getActiveTimeline().filterCompletedInstants().lastInstant().orElse(null);
assertNotNull(latestInstant, "Delete partition commit should be completed");
HoodieCommitMetadata commitMetadata = WriteProfiles.getCommitMetadata("tb1", new Path(tablePathStr), latestInstant, metaClient.getActiveTimeline());
assertThat(commitMetadata, instanceOf(HoodieReplaceCommitMetadata.class));
HoodieReplaceCommitMetadata replaceCommitMetadata = (HoodieReplaceCommitMetadata) commitMetadata;
assertThat(replaceCommitMetadata.getPartitionToReplaceFileIds().size(), is(1));
assertFalse(catalog.partitionExists(tablePath, partitionSpec));
} |
public static <K, V> Read<K, V> read() {
return new AutoValue_KafkaIO_Read.Builder<K, V>()
.setTopics(new ArrayList<>())
.setTopicPartitions(new ArrayList<>())
.setConsumerFactoryFn(KafkaIOUtils.KAFKA_CONSUMER_FACTORY_FN)
.setConsumerConfig(KafkaIOUtils.DEFAULT_CONSUMER_PROPERTIES)
.setMaxNumRecords(Long.MAX_VALUE)
.setCommitOffsetsInFinalizeEnabled(false)
.setDynamicRead(false)
.setTimestampPolicyFactory(TimestampPolicyFactory.withProcessingTime())
.setConsumerPollingTimeout(2L)
.setRedistributed(false)
.setAllowDuplicates(false)
.setRedistributeNumKeys(0)
.build();
} | @Test
public void testUnboundedSourceRawSizeMetric() {
final String readStep = "readFromKafka";
final int numElements = 1000;
final int numPartitionsPerTopic = 10;
final int recordSize = 12; // The size of key and value is defined in ConsumerFactoryFn.
List<String> topics = ImmutableList.of("test");
KafkaIO.Read<byte[], Long> reader =
KafkaIO.<byte[], Long>read()
.withBootstrapServers("none")
.withTopicPartitions(
ImmutableList.of(new TopicPartition("test", 5), new TopicPartition("test", 8)))
.withConsumerFactoryFn(
new ConsumerFactoryFn(
topics, numPartitionsPerTopic, numElements, OffsetResetStrategy.EARLIEST))
.withKeyDeserializer(ByteArrayDeserializer.class)
.withValueDeserializer(LongDeserializer.class)
.withMaxNumRecords(numElements / numPartitionsPerTopic * 2); // 2 is the # of partitions
p.apply(readStep, reader.withoutMetadata()).apply(Values.create());
PipelineResult result = p.run();
MetricQueryResults metrics =
result
.metrics()
.queryMetrics(
MetricsFilter.builder()
.addNameFilter(
MetricNameFilter.inNamespace(KafkaUnboundedReader.METRIC_NAMESPACE))
.build());
assertThat(
metrics.getDistributions(),
hasItem(
attemptedMetricsResult(
KafkaUnboundedReader.METRIC_NAMESPACE,
KafkaUnboundedReader.RAW_SIZE_METRIC_PREFIX + "test-5",
readStep,
DistributionResult.create(
recordSize * numElements / numPartitionsPerTopic,
numElements / numPartitionsPerTopic,
recordSize,
recordSize))));
assertThat(
metrics.getDistributions(),
hasItem(
attemptedMetricsResult(
KafkaUnboundedReader.METRIC_NAMESPACE,
KafkaUnboundedReader.RAW_SIZE_METRIC_PREFIX + "test-8",
readStep,
DistributionResult.create(
recordSize * numElements / numPartitionsPerTopic,
numElements / numPartitionsPerTopic,
recordSize,
recordSize))));
} |
public static String readAllBytes(InputStream input, Charset charset) throws IOException {
if (charset == null) {
input = ensureMarkSupport(input);
input.mark(4);
byte[] buffer = new byte[4];
int bytesRead = fillBuffer(input, buffer);
input.reset();
charset = detectUtfCharset0(buffer, bytesRead);
if (charset == null) {
throw new IOException("Unsupported UCS-4 variant (neither UTF-32BE nor UTF32-LE)");
}
}
Reader reader = new InputStreamReader(input, charset);
return readAllChars(reader);
} | @Test
void validateTextConversionFromStreams() throws IOException {
assertEquals("A",
UtfTextUtils.readAllBytes(new ByteArrayInputStream(hexBytes("EFBBBF41")), StandardCharsets.UTF_8));
assertEquals("A", UtfTextUtils.readAllBytes(new ByteArrayInputStream(hexBytes("EFBBBF41")), null));
assertEquals("A", UtfTextUtils.readAllBytes(new ByteArrayInputStream(hexBytes("41")), StandardCharsets.UTF_8));
assertEquals("A", UtfTextUtils.readAllBytes(new ByteArrayInputStream(hexBytes("41")), null));
// Invalid UCS-4 encoding should throw an IOException instead of an
// IllegalArgumentException.
assertThrows(IOException.class,
() -> UtfTextUtils.readAllBytes(new ByteArrayInputStream(hexBytes("0000FFFE")), null));
} |
@PUT
@Path("/{connector}/config")
@Operation(summary = "Create or reconfigure the specified connector")
public Response putConnectorConfig(final @PathParam("connector") String connector,
final @Context HttpHeaders headers,
final @Parameter(hidden = true) @QueryParam("forward") Boolean forward,
final Map<String, String> connectorConfig) throws Throwable {
FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>();
checkAndPutConnectorConfigName(connector, connectorConfig);
herder.putConnectorConfig(connector, connectorConfig, true, cb);
Herder.Created<ConnectorInfo> createdInfo = requestHandler.completeOrForwardRequest(cb, "/connectors/" + connector + "/config",
"PUT", headers, connectorConfig, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward);
Response.ResponseBuilder response;
if (createdInfo.created()) {
URI location = UriBuilder.fromUri("/connectors").path(connector).build();
response = Response.created(location);
} else {
response = Response.ok();
}
return response.entity(createdInfo.result()).build();
} | @Test
public void testPutConnectorConfigNameMismatch() {
Map<String, String> connConfig = new HashMap<>(CONNECTOR_CONFIG);
connConfig.put(ConnectorConfig.NAME_CONFIG, "mismatched-name");
assertThrows(BadRequestException.class, () -> connectorsResource.putConnectorConfig(CONNECTOR_NAME,
NULL_HEADERS, FORWARD, connConfig));
} |
@Override
public boolean isMatchedWithFilter(SAPropertyFilter filter) {
return "Android".equals(filter.getEventJson(SAPropertyFilter.LIB).optString("$lib"));
} | @Test
public void isMatchedWithFilter() {
InternalCustomPropertyPlugin customPropertyPlugin = new InternalCustomPropertyPlugin();
SAPropertyFilter propertyFilter = new SAPropertyFilter();
JSONObject libProperty = new JSONObject();
try {
libProperty.put("$lib", "Android");
} catch (JSONException e) {
e.printStackTrace();
}
propertyFilter.setEventJson(SAPropertyFilter.LIB, libProperty);
Assert.assertTrue(customPropertyPlugin.isMatchedWithFilter(propertyFilter));
try {
libProperty.put("$lib", "iOS");
} catch (JSONException e) {
e.printStackTrace();
}
propertyFilter.setEventJson(SAPropertyFilter.LIB, libProperty);
Assert.assertFalse(customPropertyPlugin.isMatchedWithFilter(propertyFilter));
} |
void format(FSNamesystem fsn, String clusterId, boolean force)
throws IOException {
long fileCount = fsn.getFilesTotal();
// Expect 1 file, which is the root inode
Preconditions.checkState(fileCount == 1,
"FSImage.format should be called with an uninitialized namesystem, has " +
fileCount + " files");
NamespaceInfo ns = NNStorage.newNamespaceInfo();
LOG.info("Allocated new BlockPoolId: " + ns.getBlockPoolID());
ns.clusterID = clusterId;
storage.format(ns);
editLog.formatNonFileJournals(ns, force);
saveFSImageInAllDirs(fsn, 0);
} | @Test
public void testZeroBlockSize() throws Exception {
final Configuration conf = new HdfsConfiguration();
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
+ "/" + HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ;
String testDir = PathUtils.getTestDirName(getClass());
File dfsDir = new File(testDir, "image-with-zero-block-size");
if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
}
FileUtil.unTar(new File(tarFile), new File(testDir));
File nameDir = new File(dfsDir, "name");
GenericTestUtils.assertExists(nameDir);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nameDir.getAbsolutePath());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.waitSafeMode(false).startupOption(StartupOption.UPGRADE)
.build();
try {
FileSystem fs = cluster.getFileSystem();
Path testPath = new Path("/tmp/zeroBlockFile");
assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath));
assertTrue("Name node didn't come up", cluster.isNameNodeUp(0));
} finally {
cluster.shutdown();
//Clean up
FileUtil.fullyDelete(dfsDir);
}
} |
@Override
public void execute(final ConnectionSession connectionSession) {
String databaseName = sqlStatement.getFromDatabase().map(schema -> schema.getDatabase().getIdentifier().getValue()).orElseGet(connectionSession::getUsedDatabaseName);
queryResultMetaData = createQueryResultMetaData(databaseName);
mergedResult = new TransparentMergedResult(getQueryResult(databaseName));
} | @Test
void assertShowTablesExecutorWithUpperCase() throws SQLException {
MySQLShowTablesStatement showTablesStatement = new MySQLShowTablesStatement();
ShowFilterSegment showFilterSegment = mock(ShowFilterSegment.class);
when(showFilterSegment.getLike()).thenReturn(Optional.of(new ShowLikeSegment(0, 10, "T_TEST")));
showTablesStatement.setFilter(showFilterSegment);
ShowTablesExecutor executor = new ShowTablesExecutor(showTablesStatement, TypedSPILoader.getService(DatabaseType.class, "MySQL"));
Map<String, ShardingSphereDatabase> databases = getDatabases();
ContextManager contextManager = mockContextManager(databases);
when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager);
executor.execute(mockConnectionSession());
assertThat(executor.getQueryResultMetaData().getColumnCount(), is(1));
executor.getMergedResult().next();
assertThat(executor.getMergedResult().getValue(1, Object.class), is("T_TEST"));
assertFalse(executor.getMergedResult().next());
} |
@Override
public Object[] toArray() {
Object[] array = new Object[size];
for (int i = 0; i < size; i++) {
array[i] = i;
}
return array;
} | @Test
public void toArray1() throws Exception {
RangeSet rs = new RangeSet(4);
Object[] array = rs.toArray(new Integer[4]);
assertEquals(4, array.length);
assertEquals(0, array[0]);
assertEquals(1, array[1]);
assertEquals(2, array[2]);
assertEquals(3, array[3]);
} |
@Override
public int hashCode() {
return Objects.hash(instanceType, filterDatabaseName(this));
} | @Test
void assertHashCodeEqualsForJdbcMode() {
PipelineContextKey contextKey1 = new PipelineContextKey("logic_db", InstanceType.JDBC);
PipelineContextKey contextKey2 = new PipelineContextKey("sharding_db", InstanceType.JDBC);
assertThat(contextKey1.hashCode(), not(contextKey2.hashCode()));
assertThat(contextKey1, not(contextKey2));
} |
static void handleDisable(Namespace namespace, Admin adminClient) throws TerseException {
FeatureUpdate.UpgradeType upgradeType = downgradeType(namespace);
Map<String, FeatureUpdate> updates = new HashMap<>();
List<String> features = namespace.getList("feature");
if (features != null) {
features.forEach(feature -> {
if (updates.put(feature, new FeatureUpdate((short) 0, upgradeType)) != null) {
throw new RuntimeException("Feature " + feature + " was specified more than once.");
}
});
}
update("disable", adminClient, updates, namespace.getBoolean("dry_run"));
} | @Test
public void testHandleDisable() {
Map<String, Object> namespace = new HashMap<>();
namespace.put("feature", Arrays.asList("foo.bar", "metadata.version", "quux"));
namespace.put("dry_run", false);
String disableOutput = ToolsTestUtils.captureStandardOut(() -> {
Throwable t = assertThrows(TerseException.class, () -> FeatureCommand.handleDisable(new Namespace(namespace), buildAdminClient()));
assertTrue(t.getMessage().contains("1 out of 3 operation(s) failed."));
});
assertEquals(format("foo.bar was disabled.%n" +
"Could not disable metadata.version. Can't downgrade below 4%n" +
"quux was disabled."), disableOutput);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.