focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public ConfigData get(String path) {
if (allowedPaths == null) {
throw new IllegalStateException("The provider has not been configured yet.");
}
Map<String, String> data = new HashMap<>();
if (path == null || path.isEmpty()) {
return new ConfigData(data);
}
Path filePath = allowedPaths.parseUntrustedPath(path);
if (filePath == null) {
log.warn("The path {} is not allowed to be accessed", path);
return new ConfigData(data);
}
try (Reader reader = reader(filePath)) {
Properties properties = new Properties();
properties.load(reader);
Enumeration<Object> keys = properties.keys();
while (keys.hasMoreElements()) {
String key = keys.nextElement().toString();
String value = properties.getProperty(key);
if (value != null) {
data.put(key, value);
}
}
return new ConfigData(data);
} catch (IOException e) {
log.error("Could not read properties from file {}", path, e);
throw new ConfigException("Could not read properties from file " + path);
}
} | @Test
public void testGetAllKeysAtPath() {
ConfigData configData = configProvider.get("dummy");
Map<String, String> result = new HashMap<>();
result.put("testKey", "testResult");
result.put("testKey2", "testResult2");
assertEquals(result, configData.data());
assertNull(configData.ttl());
} |
@Deprecated
@Override
public Boolean hasAppendsOnly(org.apache.hadoop.hive.ql.metadata.Table hmsTable, SnapshotContext since) {
TableDesc tableDesc = Utilities.getTableDesc(hmsTable);
Table table = IcebergTableUtil.getTable(conf, tableDesc.getProperties());
return hasAppendsOnly(table.snapshots(), since);
} | @Test
public void testHasAppendsOnlyReturnsNullWhenGivenSnapshotNotInTheList() {
SnapshotContext since = new SnapshotContext(1);
List<Snapshot> snapshotList = Arrays.asList(anySnapshot, appendSnapshot, deleteSnapshot);
HiveIcebergStorageHandler storageHandler = new HiveIcebergStorageHandler();
Boolean result = storageHandler.hasAppendsOnly(snapshotList, since);
assertThat(result, is(nullValue()));
} |
CodeEmitter<T> emit(final Parameter parameter) {
emitter.emit("param");
emit("name", parameter.getName());
final String parameterType = parameter.getIn();
if (ObjectHelper.isNotEmpty(parameterType)) {
emit("type", RestParamType.valueOf(parameterType));
}
if (!"body".equals(parameterType)) {
final Schema schema = parameter.getSchema();
if (schema != null) {
final String dataType = schema.getType();
if (ObjectHelper.isNotEmpty(dataType)) {
emit("dataType", dataType);
}
emit("allowableValues", asStringList(schema.getEnum()));
final StyleEnum style = parameter.getStyle();
if (ObjectHelper.isNotEmpty(style)) {
if (style.equals(StyleEnum.FORM)) {
// Guard against null explode value
// See: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.3.md#fixed-fields-10
if (Boolean.FALSE.equals(parameter.getExplode())) {
emit("collectionFormat", CollectionFormat.csv);
} else {
emit("collectionFormat", CollectionFormat.multi);
}
}
}
if (ObjectHelper.isNotEmpty(schema.getDefault())) {
final String value = StringHelper.removeLeadingAndEndingQuotes(schema.getDefault().toString());
emit("defaultValue", value);
}
if ("array".equals(dataType) && schema.getItems() != null) {
emit("arrayType", schema.getItems().getType());
}
}
}
if (parameter.getRequired() != null) {
emit("required", parameter.getRequired());
} else {
emit("required", Boolean.FALSE);
}
emit("description", parameter.getDescription());
emitter.emit("endParam");
return emitter;
} | @Test
public void shouldEmitCodeForOas3RefParameters() {
final Builder method = MethodSpec.methodBuilder("configure");
final MethodBodySourceCodeEmitter emitter = new MethodBodySourceCodeEmitter(method);
final OperationVisitor<?> visitor = new OperationVisitor<>(emitter, null, null, null, null);
final Paths paths = new Paths();
final PathItem path = new PathItem();
paths.addPathItem("/path/{param}", path);
final Parameter parameter = new Parameter();
parameter.setName("param");
parameter.setIn("query");
parameter.set$ref("#/components/parameters/param");
path.addParametersItem(parameter);
visitor.emit(parameter);
assertThat(method.build().toString()).isEqualTo("void configure() {\n"
+ " param()\n"
+ " .name(\"param\")\n"
+ " .type(org.apache.camel.model.rest.RestParamType.query)\n"
+ " .required(false)\n"
+ " .endParam()\n"
+ " }\n");
} |
public void transitionTo(ClassicGroupState groupState) {
assertValidTransition(groupState);
previousState = state;
state = groupState;
currentStateTimestamp = Optional.of(time.milliseconds());
metrics.onClassicGroupStateTransition(previousState, state);
} | @Test
public void testStableToDeadTransition() {
group.transitionTo(DEAD);
assertState(group, DEAD);
} |
@Override
public void loadData(Priority priority, DataCallback<? super T> callback) {
this.callback = callback;
serializer.startRequest(priority, url, this);
} | @Test
public void testLoadData_withInProgressRequest_isNotifiedWhenRequestCompletes() throws Exception {
ChromiumUrlFetcher<ByteBuffer> firstFetcher =
new ChromiumUrlFetcher<>(serializer, parser, glideUrl);
ChromiumUrlFetcher<ByteBuffer> secondFetcher =
new ChromiumUrlFetcher<>(serializer, parser, glideUrl);
firstFetcher.loadData(Priority.LOW, firstCallback);
secondFetcher.loadData(Priority.HIGH, secondCallback);
succeed(getInfo(10, 200), urlRequestListenerCaptor.getValue(), ByteBuffer.allocateDirect(10));
verify(firstCallback, timeout(1000)).onDataReady(isA(ByteBuffer.class));
verify(secondCallback, timeout(1000)).onDataReady(isA(ByteBuffer.class));
} |
@Override
public Set<String> scope() {
// Immutability of the set is performed in the constructor/validation utils class, so
// we don't need to repeat it here.
return scopes;
} | @Test
public void noErrorIfModifyScope() {
// Start with a basic set created by the caller.
SortedSet<String> callerSet = new TreeSet<>(Arrays.asList("a", "b", "c"));
OAuthBearerToken token = new BasicOAuthBearerToken("not.valid.token",
callerSet,
0L,
"jdoe",
0L);
// Make sure it all looks good
assertNotNull(token.scope());
assertEquals(3, token.scope().size());
// Add a value to the caller's set and note that it changes the token's scope set.
// Make sure to make it read-only when it's passed in.
callerSet.add("d");
assertTrue(token.scope().contains("d"));
// Similarly, removing a value from the caller's will affect the token's scope set.
// Make sure to make it read-only when it's passed in.
callerSet.remove("c");
assertFalse(token.scope().contains("c"));
// Ensure that attempting to change the token's scope set directly will not throw any error.
token.scope().clear();
} |
public static Predicate parse(String expression)
{
final Stack<Predicate> predicateStack = new Stack<>();
final Stack<Character> operatorStack = new Stack<>();
final String trimmedExpression = TRIMMER_PATTERN.matcher(expression).replaceAll("");
final StringTokenizer tokenizer = new StringTokenizer(trimmedExpression, OPERATORS, true);
boolean isTokenMode = true;
while (true)
{
final Character operator;
final String token;
if (isTokenMode)
{
if (tokenizer.hasMoreTokens())
{
token = tokenizer.nextToken();
}
else
{
break;
}
if (OPERATORS.contains(token))
{
operator = token.charAt(0);
}
else
{
operator = null;
}
}
else
{
operator = operatorStack.pop();
token = null;
}
isTokenMode = true;
if (operator == null)
{
try
{
predicateStack.push(Class.forName(token).asSubclass(Predicate.class).getDeclaredConstructor().newInstance());
}
catch (ClassCastException e)
{
throw new RuntimeException(token + " must implement " + Predicate.class.getName(), e);
}
catch (Exception e)
{
throw new RuntimeException(e);
}
}
else
{
if (operatorStack.empty() || operator == '(')
{
operatorStack.push(operator);
}
else if (operator == ')')
{
while (operatorStack.peek() != '(')
{
evaluate(predicateStack, operatorStack);
}
operatorStack.pop();
}
else
{
if (OPERATOR_PRECEDENCE.get(operator) < OPERATOR_PRECEDENCE.get(operatorStack.peek()))
{
evaluate(predicateStack, operatorStack);
isTokenMode = false;
}
operatorStack.push(operator);
}
}
}
while (!operatorStack.empty())
{
evaluate(predicateStack, operatorStack);
}
if (predicateStack.size() > 1)
{
throw new RuntimeException("Invalid logical expression");
}
return predicateStack.pop();
} | @Test
public void testNotOrParenAnd()
{
final Predicate parsed = PredicateExpressionParser.parse("!(com.linkedin.data.it.AlwaysTruePredicate | com.linkedin.data.it.AlwaysTruePredicate) & !com.linkedin.data.it.AlwaysFalsePredicate");
Assert.assertEquals(parsed.getClass(), AndPredicate.class);
final List<Predicate> andChildren = ((AndPredicate) parsed).getChildPredicates();
Assert.assertEquals(andChildren.get(0).getClass(), NotPredicate.class);
Assert.assertEquals(andChildren.get(1).getClass(), NotPredicate.class);
final Predicate notChild1 = ((NotPredicate) andChildren.get(0)).getChildPredicate();
Assert.assertEquals(notChild1.getClass(), OrPredicate.class);
final Predicate notChild2 = ((NotPredicate) andChildren.get(1)).getChildPredicate();
Assert.assertEquals(notChild2.getClass(), AlwaysFalsePredicate.class);
final List<Predicate> orChildren = ((OrPredicate) notChild1).getChildPredicates();
Assert.assertEquals(orChildren.get(0).getClass(), AlwaysTruePredicate.class);
Assert.assertEquals(orChildren.get(1).getClass(), AlwaysTruePredicate.class);
} |
public static List<String> parseColumnsFromPath(String filePath, List<String> columnsFromPath)
throws SparkDppException {
if (columnsFromPath == null || columnsFromPath.isEmpty()) {
return Collections.emptyList();
}
String[] strings = filePath.split("/");
if (strings.length < 2) {
System.err
.println("Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath);
throw new SparkDppException(
"Reason: Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath);
}
String[] columns = new String[columnsFromPath.size()];
int size = 0;
for (int i = strings.length - 2; i >= 0; i--) {
String str = strings[i];
if (str != null && str.isEmpty()) {
continue;
}
if (str == null || !str.contains("=")) {
System.err.println(
"Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath);
throw new SparkDppException(
"Reason: Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " +
filePath);
}
String[] pair = str.split("=", 2);
if (pair.length != 2) {
System.err.println(
"Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath);
throw new SparkDppException(
"Reason: Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " +
filePath);
}
int index = columnsFromPath.indexOf(pair[0]);
if (index == -1) {
continue;
}
columns[index] = pair[1];
size++;
if (size >= columnsFromPath.size()) {
break;
}
}
if (size != columnsFromPath.size()) {
System.err
.println("Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath);
throw new SparkDppException(
"Reason: Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath);
}
return Lists.newArrayList(columns);
} | @Test
public void testParseColumnsFromPath() {
DppUtils dppUtils = new DppUtils();
String path = "/path/to/file/city=beijing/date=2020-04-10/data";
List<String> columnFromPaths = new ArrayList<>();
columnFromPaths.add("city");
columnFromPaths.add("date");
try {
List<String> columnFromPathValues = dppUtils.parseColumnsFromPath(path, columnFromPaths);
Assert.assertEquals(2, columnFromPathValues.size());
Assert.assertEquals("beijing", columnFromPathValues.get(0));
Assert.assertEquals("2020-04-10", columnFromPathValues.get(1));
} catch (Exception e) {
Assert.assertTrue(false);
}
} |
public byte[] getTail() {
int size = (int) Math.min(tailSize, bytesRead);
byte[] result = new byte[size];
System.arraycopy(tailBuffer, currentIndex, result, 0, size - currentIndex);
System.arraycopy(tailBuffer, 0, result, size - currentIndex, currentIndex);
return result;
} | @Test
public void testTailBeforeRead() throws IOException {
TailStream stream = new TailStream(generateStream(0, 100), 50);
assertEquals(0, stream.getTail().length, "Wrong buffer length");
stream.close();
} |
@Override
public void close() throws LiquibaseException {
LiquibaseException exception = null;
try {
super.close();
} finally {
try {
dataSource.stop();
} catch (Exception e) {
exception = new LiquibaseException(e);
}
}
if (exception != null) {
throw exception;
}
} | @Test
void testWhenClosingAllConnectionsInPoolIsReleased() throws Exception {
ConnectionPool pool = dataSource.getPool();
assertThat(pool.getActive()).isEqualTo(1);
liquibase.close();
assertThat(pool.getActive()).isZero();
assertThat(pool.getIdle()).isZero();
assertThat(pool.isClosed()).isTrue();
} |
@Override
public boolean equals(final Object obj) {
if (!(obj instanceof OrderByItem)) {
return false;
}
OrderByItem orderByItem = (OrderByItem) obj;
return segment.getOrderDirection() == orderByItem.segment.getOrderDirection() && index == orderByItem.index;
} | @SuppressWarnings({"SimplifiableAssertion", "ConstantValue"})
@Test
void assertEqualsWhenObjIsNull() {
assertFalse(new OrderByItem(mock(OrderByItemSegment.class)).equals(null));
} |
@Override
public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) {
long datetime = readDatetimeV2FromPayload(payload);
return 0L == datetime ? MySQLTimeValueUtils.DATETIME_OF_ZERO : readDatetime(columnDef, datetime, payload);
} | @Test
void assertReadWithoutFraction() {
when(payload.readInt1()).thenReturn(0xfe, 0xf3, 0xff, 0x7e, 0xfb);
LocalDateTime expected = LocalDateTime.of(9999, 12, 31, 23, 59, 59);
assertThat(new MySQLDatetime2BinlogProtocolValue().read(columnDef, payload), is(Timestamp.valueOf(expected)));
} |
@Override
public int run(String[] args) throws Exception {
YarnConfiguration yarnConf = getConf() == null ?
new YarnConfiguration() : new YarnConfiguration(getConf());
boolean isFederationEnabled = yarnConf.getBoolean(YarnConfiguration.FEDERATION_ENABLED,
YarnConfiguration.DEFAULT_FEDERATION_ENABLED);
if (args.length < 1 || !isFederationEnabled) {
printUsage();
return -1;
}
String cmd = args[0];
if ("-help".equals(cmd)) {
printHelp();
return 0;
}
return 0;
} | @Test
public void testHelp() throws Exception {
PrintStream oldOutPrintStream = System.out;
PrintStream oldErrPrintStream = System.err;
ByteArrayOutputStream dataOut = new ByteArrayOutputStream();
ByteArrayOutputStream dataErr = new ByteArrayOutputStream();
System.setOut(new PrintStream(dataOut));
System.setErr(new PrintStream(dataErr));
String[] args = {"-help"};
assertEquals(0, gpgCLI.run(args));
} |
void startServiceFingerprinting(
ImmutableList<PluginMatchingResult<ServiceFingerprinter>> selectedServiceFingerprinters) {
checkState(currentExecutionStage.equals(ExecutionStage.PORT_SCANNING));
checkState(portScanningTimer.isRunning());
this.portScanningTimer.stop();
this.serviceFingerprintingTimer.start();
this.currentExecutionStage = ExecutionStage.SERVICE_FINGERPRINTING;
this.selectedServiceFingerprinters = checkNotNull(selectedServiceFingerprinters);
} | @Test
public void startServiceFingerprinting_whenStageNotPortScanning_throwsException() {
ExecutionTracer executionTracer =
new ExecutionTracer(
portScanningTimer, serviceFingerprintingTimer, vulnerabilityDetectingTimer);
assertThrows(
IllegalStateException.class,
() -> executionTracer.startServiceFingerprinting(ImmutableList.of()));
} |
@Deprecated
public static String getJwt(JwtClaims claims) throws JoseException {
String jwt;
RSAPrivateKey privateKey = (RSAPrivateKey) getPrivateKey(
jwtConfig.getKey().getFilename(),jwtConfig.getKey().getPassword(), jwtConfig.getKey().getKeyName());
// A JWT is a JWS and/or a JWE with JSON claims as the payload.
// In this example it is a JWS nested inside a JWE
// So we first create a JsonWebSignature object.
JsonWebSignature jws = new JsonWebSignature();
// The payload of the JWS is JSON content of the JWT Claims
jws.setPayload(claims.toJson());
// The JWT is signed using the sender's private key
jws.setKey(privateKey);
// Get provider from security config file, it should be two digit
// And the provider id will set as prefix for keyid in the token header, for example: 05100
// if there is no provider id, we use "00" for the default value
String provider_id = "";
if (jwtConfig.getProviderId() != null) {
provider_id = jwtConfig.getProviderId();
if (provider_id.length() == 1) {
provider_id = "0" + provider_id;
} else if (provider_id.length() > 2) {
logger.error("provider_id defined in the security.yml file is invalid; the length should be 2");
provider_id = provider_id.substring(0, 2);
}
}
jws.setKeyIdHeaderValue(provider_id + jwtConfig.getKey().getKid());
// Set the signature algorithm on the JWT/JWS that will integrity protect the claims
jws.setAlgorithmHeaderValue(AlgorithmIdentifiers.RSA_USING_SHA256);
// Sign the JWS and produce the compact serialization, which will be the inner JWT/JWS
// representation, which is a string consisting of three dot ('.') separated
// base64url-encoded parts in the form Header.Payload.Signature
jwt = jws.getCompactSerialization();
return jwt;
} | @Test
public void longLivedCodegenJwt() throws Exception {
JwtClaims claims = ClaimsUtil.getTestClaims("steve", "EMPLOYEE", "f7d42348-c647-4efb-a52d-4c5787421e72", Arrays.asList("codegen.r", "codegen.w", "server.info.r"), "user");
claims.setExpirationTimeMinutesInTheFuture(5256000);
String jwt = JwtIssuer.getJwt(claims, long_kid, KeyUtil.deserializePrivateKey(long_key, KeyUtil.RSA));
System.out.println("***LongLived Codegen JWT***: " + jwt);
} |
@Override
public int chmod(String path, long mode) {
return AlluxioFuseUtils.call(LOG, () -> chmodInternal(path, mode),
FuseConstants.FUSE_CHMOD, "path=%s,mode=%o", path, mode);
} | @Test
@DoraTestTodoItem(action = DoraTestTodoItem.Action.FIX, owner = "LuQQiu",
comment = "waiting on security metadata to be implemented in Dora")
@Ignore
public void chmod() throws Exception {
long mode = 123;
mFuseFs.chmod("/foo/bar", mode);
AlluxioURI expectedPath = BASE_EXPECTED_URI.join("/foo/bar");
SetAttributePOptions options =
SetAttributePOptions.newBuilder().setMode(new Mode((short) mode).toProto()).build();
verify(mFileSystem).setAttribute(expectedPath, options);
} |
@Override
public Interpreter getInterpreter(String replName,
ExecutionContext executionContext)
throws InterpreterNotFoundException {
if (StringUtils.isBlank(replName)) {
// Get the default interpreter of the defaultInterpreterSetting
InterpreterSetting defaultSetting =
interpreterSettingManager.getByName(executionContext.getDefaultInterpreterGroup());
return defaultSetting.getDefaultInterpreter(executionContext);
}
String[] replNameSplits = replName.split("\\.");
if (replNameSplits.length == 2) {
String group = replNameSplits[0];
String name = replNameSplits[1];
InterpreterSetting setting = interpreterSettingManager.getByName(group);
if (null != setting) {
Interpreter interpreter = setting.getInterpreter(executionContext, name);
if (null != interpreter) {
return interpreter;
}
throw new InterpreterNotFoundException("No such interpreter: " + replName);
}
throw new InterpreterNotFoundException("No interpreter setting named: " + group);
} else if (replNameSplits.length == 1){
// first assume group is omitted
InterpreterSetting setting =
interpreterSettingManager.getByName(executionContext.getDefaultInterpreterGroup());
if (setting != null) {
Interpreter interpreter = setting.getInterpreter(executionContext, replName);
if (null != interpreter) {
return interpreter;
}
}
// then assume interpreter name is omitted
setting = interpreterSettingManager.getByName(replName);
if (null != setting) {
return setting.getDefaultInterpreter(executionContext);
}
}
throw new InterpreterNotFoundException("No such interpreter: " + replName);
} | @Test
void testUnknownRepl1() {
try {
interpreterFactory.getInterpreter("test.unknown_repl", new ExecutionContext("user1", "note1", "test"));
fail("should fail due to no such interpreter");
} catch (InterpreterNotFoundException e) {
assertEquals("No such interpreter: test.unknown_repl", e.getMessage());
}
} |
@Override
public TimestampedSegment getOrCreateSegmentIfLive(final long segmentId,
final ProcessorContext context,
final long streamTime) {
final TimestampedSegment segment = super.getOrCreateSegmentIfLive(segmentId, context, streamTime);
cleanupExpiredSegments(streamTime);
return segment;
} | @Test
public void shouldGetSegmentsWithinTimeRange() {
updateStreamTimeAndCreateSegment(0);
updateStreamTimeAndCreateSegment(1);
updateStreamTimeAndCreateSegment(2);
updateStreamTimeAndCreateSegment(3);
final long streamTime = updateStreamTimeAndCreateSegment(4);
segments.getOrCreateSegmentIfLive(0, context, streamTime);
segments.getOrCreateSegmentIfLive(1, context, streamTime);
segments.getOrCreateSegmentIfLive(2, context, streamTime);
segments.getOrCreateSegmentIfLive(3, context, streamTime);
segments.getOrCreateSegmentIfLive(4, context, streamTime);
final List<TimestampedSegment> segments = this.segments.segments(0, 2 * SEGMENT_INTERVAL, true);
assertEquals(3, segments.size());
assertEquals(0, segments.get(0).id);
assertEquals(1, segments.get(1).id);
assertEquals(2, segments.get(2).id);
} |
public User(String name) {
this.name = name;
this.roles = null;
} | @Test
void testUser() {
Set<String> roles = new HashSet<>();
roles.add("userRole1");
roles.add("userRole2");
User user = new User("userName", roles);
assertThat(user.getName()).isEqualTo("userName");
assertThat(user.getRoles()).hasSize(2).contains("userRole1", "userRole2");
assertThat(user.getId()).isNotNegative().isLessThan(100);
User secondUser = new User("secondUser");
assertThat(secondUser.getRoles()).isNull();
} |
@Subscribe
public void inputUpdated(InputUpdated inputUpdatedEvent) {
final String inputId = inputUpdatedEvent.id();
LOG.debug("Input updated: {}", inputId);
final Input input;
try {
input = inputService.find(inputId);
} catch (NotFoundException e) {
LOG.warn("Received InputUpdated event but could not find input {}", inputId, e);
return;
}
final boolean startInput;
final IOState<MessageInput> inputState = inputRegistry.getInputState(inputId);
if (inputState != null) {
startInput = inputState.getState() == IOState.Type.RUNNING;
inputRegistry.remove(inputState);
} else {
startInput = false;
}
if (startInput && (input.isGlobal() || this.nodeId.getNodeId().equals(input.getNodeId()))) {
startInput(input);
}
} | @Test
public void inputUpdatedStopsInputIfItIsRunning() throws Exception {
final String inputId = "input-id";
final Input input = mock(Input.class);
@SuppressWarnings("unchecked")
final IOState<MessageInput> inputState = mock(IOState.class);
when(inputService.find(inputId)).thenReturn(input);
when(inputRegistry.getInputState(inputId)).thenReturn(inputState);
listener.inputUpdated(InputUpdated.create(inputId));
verify(inputRegistry, times(1)).remove(inputState);
} |
public void addValueProviders(final String segmentName,
final RocksDB db,
final Cache cache,
final Statistics statistics) {
if (storeToValueProviders.isEmpty()) {
logger.debug("Adding metrics recorder of task {} to metrics recording trigger", taskId);
streamsMetrics.rocksDBMetricsRecordingTrigger().addMetricsRecorder(this);
} else if (storeToValueProviders.containsKey(segmentName)) {
throw new IllegalStateException("Value providers for store " + segmentName + " of task " + taskId +
" has been already added. This is a bug in Kafka Streams. " +
"Please open a bug report under https://issues.apache.org/jira/projects/KAFKA/issues");
}
verifyDbAndCacheAndStatistics(segmentName, db, cache, statistics);
logger.debug("Adding value providers for store {} of task {}", segmentName, taskId);
storeToValueProviders.put(segmentName, new DbAndCacheAndStatistics(db, cache, statistics));
} | @Test
public void shouldThrowIfStatisticsToAddIsNullButExistingStatisticsAreNotNull() {
recorder.addValueProviders(SEGMENT_STORE_NAME_1, dbToAdd1, cacheToAdd1, statisticsToAdd1);
final Throwable exception = assertThrows(
IllegalStateException.class,
() -> recorder.addValueProviders(SEGMENT_STORE_NAME_2, dbToAdd2, cacheToAdd2, null)
);
assertThat(
exception.getMessage(),
is("Statistics for segment " + SEGMENT_STORE_NAME_2 + " of task " + TASK_ID1 +
" is null although the statistics of another segment in this metrics recorder is not null. " +
"This is a bug in Kafka Streams. " +
"Please open a bug report under https://issues.apache.org/jira/projects/KAFKA/issues")
);
} |
public KsqlGenericRecord build(
final List<ColumnName> columnNames,
final List<Expression> expressions,
final LogicalSchema schema,
final DataSourceType dataSourceType
) {
final List<ColumnName> columns = columnNames.isEmpty()
? implicitColumns(schema)
: columnNames;
if (columns.size() != expressions.size()) {
throw new KsqlException(
"Expected a value for each column."
+ " Expected Columns: " + columnNames
+ ". Got " + expressions);
}
final LogicalSchema schemaWithPseudoColumns = withPseudoColumns(schema);
for (ColumnName col : columns) {
if (!schemaWithPseudoColumns.findColumn(col).isPresent()) {
throw new KsqlException("Column name " + col + " does not exist.");
}
if (SystemColumns.isDisallowedForInsertValues(col)) {
throw new KsqlException("Inserting into column " + col + " is not allowed.");
}
}
final Map<ColumnName, Object> values = resolveValues(
columns,
expressions,
schemaWithPseudoColumns,
functionRegistry,
config
);
if (dataSourceType == DataSourceType.KTABLE) {
final String noValue = schemaWithPseudoColumns.key().stream()
.map(Column::name)
.filter(colName -> !values.containsKey(colName))
.map(ColumnName::text)
.collect(Collectors.joining(", "));
if (!noValue.isEmpty()) {
throw new KsqlException("Value for primary key column(s) "
+ noValue + " is required for tables");
}
}
final long ts = (long) values.getOrDefault(SystemColumns.ROWTIME_NAME, clock.getAsLong());
final GenericKey key = buildKey(schema, values);
final GenericRow value = buildValue(schema, values);
return KsqlGenericRecord.of(key, value, ts);
} | @Test
public void shouldThrowOnInsertRowoffset() {
// Given:
final LogicalSchema schema = LogicalSchema.builder()
.keyColumn(KEY, SqlTypes.STRING)
.valueColumn(COL0, SqlTypes.STRING)
.build();
final Expression exp = new StringLiteral("a");
// When:
final KsqlException e = assertThrows(KsqlException.class, () -> recordFactory.build(
ImmutableList.of(SystemColumns.ROWTIME_NAME, KEY, SystemColumns.ROWOFFSET_NAME),
ImmutableList.of(new LongLiteral(1L), exp, exp), schema, DataSourceType.KSTREAM
));
// Then:
assertThat(e.getMessage(), containsString("Inserting into column `ROWOFFSET` is not allowed."));
} |
public Set<Integer> nodesThatShouldBeDown(ClusterState state) {
return calculate(state).nodesThatShouldBeDown();
} | @Test
void down_to_down_edge_keeps_group_down() {
GroupAvailabilityCalculator calc = calcForHierarchicCluster(
DistributionBuilder.withGroups(2).eachWithNodeCount(4), 0.76);
assertThat(calc.nodesThatShouldBeDown(clusterState(
"distributor:8 storage:8 .1.s:d")), equalTo(indices(0, 2, 3)));
assertThat(calc.nodesThatShouldBeDown(clusterState(
"distributor:8 storage:8 .1.s:d .2.s:d")), equalTo(indices(0, 3)));
} |
@Override
public void chunk(final Path directory, final AttributedList<Path> list) throws ListCanceledException {
if(directory.isRoot()) {
if(list.size() >= container) {
// Allow another chunk until limit is reached again
container += preferences.getInteger("browser.list.limit.container");
throw new ListCanceledException(list);
}
}
if(list.size() >= this.directory) {
// Allow another chunk until limit is reached again
this.directory += preferences.getInteger("browser.list.limit.directory");
throw new ListCanceledException(list);
}
} | @Test(expected = ListCanceledException.class)
public void testChunkLimitContainer() throws Exception {
new LimitedListProgressListener(new DisabledProgressListener()).chunk(
new Path("/", EnumSet.of(Path.Type.volume, Path.Type.directory)), new AttributedList<Path>() {
@Override
public int size() {
return 100;
}
}
);
} |
@Override
public Uuid clientInstanceId(Duration timeout) {
if (!clientTelemetryReporter.isPresent()) {
throw new IllegalStateException("Telemetry is not enabled. Set config `" + ProducerConfig.ENABLE_METRICS_PUSH_CONFIG + "` to `true`.");
}
return ClientTelemetryUtils.fetchClientInstanceId(clientTelemetryReporter.get(), timeout);
} | @Test
public void testClientInstanceId() {
Properties props = new Properties();
props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
ClientTelemetryReporter clientTelemetryReporter = mock(ClientTelemetryReporter.class);
clientTelemetryReporter.configure(any());
try (MockedStatic<CommonClientConfigs> mockedCommonClientConfigs = mockStatic(CommonClientConfigs.class, new CallsRealMethods())) {
mockedCommonClientConfigs.when(() -> CommonClientConfigs.telemetryReporter(anyString(), any())).thenReturn(Optional.of(clientTelemetryReporter));
ClientTelemetrySender clientTelemetrySender = mock(ClientTelemetrySender.class);
Uuid expectedUuid = Uuid.randomUuid();
when(clientTelemetryReporter.telemetrySender()).thenReturn(clientTelemetrySender);
when(clientTelemetrySender.clientInstanceId(any())).thenReturn(Optional.of(expectedUuid));
try (KafkaProducer<String, String> producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer())) {
Uuid uuid = producer.clientInstanceId(Duration.ofMillis(0));
assertEquals(expectedUuid, uuid);
}
}
} |
static void dissectFrame(
final DriverEventCode eventCode,
final MutableDirectBuffer buffer,
final int offset,
final StringBuilder builder)
{
int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder);
builder.append(": address=");
encodedLength += dissectSocketAddress(buffer, offset + encodedLength, builder);
builder.append(" ");
final int frameOffset = offset + encodedLength;
final int frameType = frameType(buffer, frameOffset);
switch (frameType)
{
case HeaderFlyweight.HDR_TYPE_PAD:
case HeaderFlyweight.HDR_TYPE_DATA:
DATA_HEADER.wrap(buffer, frameOffset, buffer.capacity() - frameOffset);
dissectDataFrame(builder);
break;
case HeaderFlyweight.HDR_TYPE_SM:
SM_HEADER.wrap(buffer, frameOffset, buffer.capacity() - frameOffset);
dissectStatusFrame(builder);
break;
case HeaderFlyweight.HDR_TYPE_NAK:
NAK_HEADER.wrap(buffer, frameOffset, buffer.capacity() - frameOffset);
dissectNakFrame(builder);
break;
case HeaderFlyweight.HDR_TYPE_SETUP:
SETUP_HEADER.wrap(buffer, frameOffset, buffer.capacity() - frameOffset);
dissectSetupFrame(builder);
break;
case HeaderFlyweight.HDR_TYPE_RTTM:
RTT_MEASUREMENT.wrap(buffer, frameOffset, buffer.capacity() - frameOffset);
dissectRttFrame(builder);
break;
case HeaderFlyweight.HDR_TYPE_RES:
dissectResFrame(buffer, frameOffset, builder);
break;
case HeaderFlyweight.HDR_TYPE_RSP_SETUP:
RSP_SETUP.wrap(buffer, frameOffset, buffer.capacity() - frameOffset);
dissectRspSetupFrame(builder);
break;
default:
builder.append("type=UNKNOWN(").append(frameType).append(")");
break;
}
} | @Test
void dissectFrameTypeUnknown()
{
internalEncodeLogHeader(buffer, 0, 3, 3, () -> 3_000_000_000L);
final int socketAddressOffset = encodeSocketAddress(
buffer, LOG_HEADER_LENGTH, new InetSocketAddress("localhost", 8888));
final DataHeaderFlyweight flyweight = new DataHeaderFlyweight();
flyweight.wrap(buffer, LOG_HEADER_LENGTH + socketAddressOffset, 300);
flyweight.headerType(Integer.MAX_VALUE);
dissectFrame(CMD_OUT_ON_OPERATION_SUCCESS, buffer, 0, builder);
assertEquals("[3.000000000] " + CONTEXT + ": " + CMD_OUT_ON_OPERATION_SUCCESS.name() +
" [3/3]: address=127.0.0.1:8888 type=UNKNOWN(65535)",
builder.toString());
} |
@ExceptionHandler({HttpMessageNotReadableException.class})
@ResponseStatus(HttpStatus.BAD_REQUEST)
protected ResponseEntity<RestError> handleHttpMessageNotReadableException(HttpMessageNotReadableException httpMessageNotReadableException) {
String exceptionMessage = getExceptionMessage(httpMessageNotReadableException);
return new ResponseEntity<>(new RestError(exceptionMessage), HttpStatus.BAD_REQUEST);
} | @Test
public void handleHttpMessageNotReadableException_whenCauseIsNotInvalidFormatException_shouldUseMessage() {
HttpMessageNotReadableException exception = new HttpMessageNotReadableException("Message not readable", new Exception());
ResponseEntity<RestError> responseEntity = underTest.handleHttpMessageNotReadableException(exception);
assertThat(responseEntity.getStatusCode()).isEqualTo(HttpStatus.BAD_REQUEST);
assertThat(responseEntity.getBody().message()).isEqualTo("Message not readable; nested exception is java.lang.Exception");
} |
@Nullable
public String getStorageClass() {
return _storageClass;
} | @Test
public void testIntelligentTieringStorageClass() {
PinotConfiguration pinotConfig = new PinotConfiguration();
pinotConfig.setProperty("storageClass", StorageClass.INTELLIGENT_TIERING.toString());
S3Config cfg = new S3Config(pinotConfig);
Assert.assertEquals(cfg.getStorageClass(), "INTELLIGENT_TIERING");
} |
protected boolean isInvalidOrigin(FullHttpRequest req) {
final String origin = req.headers().get(HttpHeaderNames.ORIGIN);
if (origin == null || !origin.toLowerCase().endsWith(originDomain)) {
logger.error("Invalid Origin header {} in WebSocket upgrade request", origin);
return true;
}
return false;
} | @Test
void testIsInvalidOrigin() {
ZuulPushAuthHandlerTest authHandler = new ZuulPushAuthHandlerTest();
final DefaultFullHttpRequest request =
new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/ws", Unpooled.buffer());
// Invalid input
assertTrue(authHandler.isInvalidOrigin(request));
request.headers().add(HttpHeaderNames.ORIGIN, "zuul-push.foo.com");
assertTrue(authHandler.isInvalidOrigin(request));
// Valid input
request.headers().remove(HttpHeaderNames.ORIGIN);
request.headers().add(HttpHeaderNames.ORIGIN, "zuul-push.netflix.com");
assertFalse(authHandler.isInvalidOrigin(request));
} |
public void removePackageRepository(String id) {
PackageRepository packageRepositoryToBeDeleted = this.find(id);
if (packageRepositoryToBeDeleted == null) {
throw new RuntimeException(String.format("Could not find repository with id '%s'", id));
}
this.remove(packageRepositoryToBeDeleted);
} | @Test
void shouldFindPackageRepositoryById() throws Exception {
PackageRepositories packageRepositories = new PackageRepositories();
packageRepositories.add(PackageRepositoryMother.create("repo1"));
PackageRepository repo2 = PackageRepositoryMother.create("repo2");
packageRepositories.add(repo2);
packageRepositories.removePackageRepository("repo1");
assertThat(packageRepositories).containsExactly(repo2);
} |
@Private
public void handleEvent(JobHistoryEvent event) {
synchronized (lock) {
// If this is JobSubmitted Event, setup the writer
if (event.getHistoryEvent().getEventType() == EventType.AM_STARTED) {
try {
AMStartedEvent amStartedEvent =
(AMStartedEvent) event.getHistoryEvent();
setupEventWriter(event.getJobID(), amStartedEvent);
} catch (IOException ioe) {
LOG.error("Error JobHistoryEventHandler in handleEvent: " + event,
ioe);
throw new YarnRuntimeException(ioe);
}
}
// For all events
// (1) Write it out
// (2) Process it for JobSummary
// (3) Process it for ATS (if enabled)
MetaInfo mi = fileMap.get(event.getJobID());
try {
HistoryEvent historyEvent = event.getHistoryEvent();
if (! (historyEvent instanceof NormalizedResourceEvent)) {
mi.writeEvent(historyEvent);
}
processEventForJobSummary(event.getHistoryEvent(), mi.getJobSummary(),
event.getJobID());
if (LOG.isDebugEnabled()) {
LOG.debug("In HistoryEventHandler "
+ event.getHistoryEvent().getEventType());
}
} catch (IOException e) {
LOG.error("Error writing History Event: " + event.getHistoryEvent(),
e);
throw new YarnRuntimeException(e);
}
if (event.getHistoryEvent().getEventType() == EventType.JOB_SUBMITTED) {
JobSubmittedEvent jobSubmittedEvent =
(JobSubmittedEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setSubmitTime(jobSubmittedEvent.getSubmitTime());
mi.getJobIndexInfo().setQueueName(jobSubmittedEvent.getJobQueueName());
}
//initialize the launchTime in the JobIndexInfo of MetaInfo
if(event.getHistoryEvent().getEventType() == EventType.JOB_INITED ){
JobInitedEvent jie = (JobInitedEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setJobStartTime(jie.getLaunchTime());
}
if (event.getHistoryEvent().getEventType() == EventType.JOB_QUEUE_CHANGED) {
JobQueueChangeEvent jQueueEvent =
(JobQueueChangeEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setQueueName(jQueueEvent.getJobQueueName());
}
// If this is JobFinishedEvent, close the writer and setup the job-index
if (event.getHistoryEvent().getEventType() == EventType.JOB_FINISHED) {
try {
JobFinishedEvent jFinishedEvent =
(JobFinishedEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setFinishTime(jFinishedEvent.getFinishTime());
mi.getJobIndexInfo().setNumMaps(jFinishedEvent.getSucceededMaps());
mi.getJobIndexInfo().setNumReduces(
jFinishedEvent.getSucceededReduces());
mi.getJobIndexInfo().setJobStatus(JobState.SUCCEEDED.toString());
closeEventWriter(event.getJobID());
processDoneFiles(event.getJobID());
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
// In case of JOB_ERROR, only process all the Done files(e.g. job
// summary, job history file etc.) if it is last AM retry.
if (event.getHistoryEvent().getEventType() == EventType.JOB_ERROR) {
try {
JobUnsuccessfulCompletionEvent jucEvent =
(JobUnsuccessfulCompletionEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setFinishTime(jucEvent.getFinishTime());
mi.getJobIndexInfo().setNumMaps(jucEvent.getSucceededMaps());
mi.getJobIndexInfo().setNumReduces(jucEvent.getSucceededReduces());
mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus());
closeEventWriter(event.getJobID());
if(context.isLastAMRetry())
processDoneFiles(event.getJobID());
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
if (event.getHistoryEvent().getEventType() == EventType.JOB_FAILED
|| event.getHistoryEvent().getEventType() == EventType.JOB_KILLED) {
try {
JobUnsuccessfulCompletionEvent jucEvent =
(JobUnsuccessfulCompletionEvent) event
.getHistoryEvent();
mi.getJobIndexInfo().setFinishTime(jucEvent.getFinishTime());
mi.getJobIndexInfo().setNumMaps(jucEvent.getSucceededMaps());
mi.getJobIndexInfo().setNumReduces(jucEvent.getSucceededReduces());
mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus());
closeEventWriter(event.getJobID());
processDoneFiles(event.getJobID());
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
}
} | @Test (timeout=50000)
public void testDefaultFsIsUsedForHistory() throws Exception {
// Create default configuration pointing to the minicluster
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
dfsCluster.getURI().toString());
FileOutputStream os = new FileOutputStream(coreSitePath);
conf.writeXml(os);
os.close();
// simulate execution under a non-default namenode
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
"file:///");
TestParams t = new TestParams();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.dfsWorkDir);
JHEvenHandlerForTest realJheh =
new JHEvenHandlerForTest(t.mockAppContext, 0, false);
JHEvenHandlerForTest jheh = spy(realJheh);
jheh.init(conf);
try {
jheh.start();
handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));
handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, 0, 0, new Counters(),
new Counters(), new Counters())));
// If we got here then event handler worked but we don't know with which
// file system. Now we check that history stuff was written to minicluster
FileSystem dfsFileSystem = dfsCluster.getFileSystem();
assertTrue("Minicluster contains some history files",
dfsFileSystem.globStatus(new Path(t.dfsWorkDir + "/*")).length != 0);
FileSystem localFileSystem = LocalFileSystem.get(conf);
assertFalse("No history directory on non-default file system",
localFileSystem.exists(new Path(t.dfsWorkDir)));
} finally {
jheh.stop();
purgeHdfsHistoryIntermediateDoneDirectory(conf);
}
} |
@Override
public boolean retainAll(Collection<?> c) {
// will throw UnsupportedOperationException
return underlying().retainAll(c);
} | @Test
public void testDelegationOfUnsupportedFunctionRetainAll() {
new PCollectionsTreeSetWrapperDelegationChecker<>()
.defineMockConfigurationForUnsupportedFunction(mock -> mock.retainAll(eq(Collections.emptyList())))
.defineWrapperUnsupportedFunctionInvocation(wrapper -> wrapper.retainAll(Collections.emptyList()))
.doUnsupportedFunctionDelegationCheck();
} |
public static SchemaAndValue parseString(String value) {
if (value == null) {
return NULL_SCHEMA_AND_VALUE;
}
if (value.isEmpty()) {
return new SchemaAndValue(Schema.STRING_SCHEMA, value);
}
ValueParser parser = new ValueParser(new Parser(value));
return parser.parse(false);
} | @Test
public void shouldParseFractionalPartsAsIntegerWhenNoFractionalPart() {
assertEquals(new SchemaAndValue(Schema.INT8_SCHEMA, (byte) 1), Values.parseString("1.0"));
assertEquals(new SchemaAndValue(Schema.FLOAT32_SCHEMA, 1.1f), Values.parseString("1.1"));
assertEquals(new SchemaAndValue(Schema.INT16_SCHEMA, (short) 300), Values.parseString("300.0"));
assertEquals(new SchemaAndValue(Schema.FLOAT32_SCHEMA, 300.01f), Values.parseString("300.01"));
assertEquals(new SchemaAndValue(Schema.INT32_SCHEMA, 66000), Values.parseString("66000.0"));
assertEquals(new SchemaAndValue(Schema.FLOAT32_SCHEMA, 66000.0008f), Values.parseString("66000.0008"));
} |
public AstNode rewrite(final AstNode node, final C context) {
return rewriter.process(node, context);
} | @Test
public void shouldNotRewriteExplainWithId() {
// Given:
final Explain explain = new Explain(location, Optional.of("id"), Optional.empty());
// When:
final AstNode rewritten = rewriter.rewrite(explain, context);
// Then:
assertThat(rewritten, is(sameInstance(explain)));
} |
protected static SimpleDateFormat getLog4j2Appender() {
Optional<Appender> log4j2xmlAppender =
configuration.getAppenders().values().stream()
.filter( a -> a.getName().equalsIgnoreCase( log4J2Appender ) ).findFirst();
if ( log4j2xmlAppender.isPresent() ) {
ArrayList<String> matchesArray = new ArrayList<>();
String dateFormatFromLog4j2xml = log4j2xmlAppender.get().getLayout().getContentFormat().get( "format" );
Pattern pattern = Pattern.compile( "(\\{(.*?)})" );
Matcher matcher = pattern.matcher( dateFormatFromLog4j2xml );
while ( matcher.find() ) {
matchesArray.add( matcher.group( 2 ) );
}
if ( !matchesArray.isEmpty() ) {
return processMatches( matchesArray );
}
}
return new SimpleDateFormat( "yyyy/MM/dd HH:mm:ss" );
} | @Test
public void testGetLog4j2UsingAppender8() {
// Testing MMM-dd,yyyy pattern
KettleLogLayout.log4J2Appender = "pdi-execution-appender-test-8";
Assert.assertEquals( "MMM-dd,yyyy",
KettleLogLayout.getLog4j2Appender().toPattern() );
} |
@Retryable(DataAccessResourceFailureException.class)
@CacheEvict(value = CACHE_AVERAGE_REVIEW_RATING, allEntries = true)
public void updateSearchIndex() {
if (!isEnabled()) {
return;
}
var stopWatch = new StopWatch();
stopWatch.start();
updateSearchIndex(false);
stopWatch.stop();
logger.info("Updated search index in " + stopWatch.getTotalTimeMillis() + " ms");
} | @Test
public void testHardUpdateNotExists() {
var index = mockIndex(false);
mockExtensions();
search.updateSearchIndex(true);
assertThat(index.created).isTrue();
assertThat(index.deleted).isFalse();
assertThat(index.entries).hasSize(3);
} |
private Map<String, StorageUnit> getStorageUnits(final Map<String, StorageNode> storageUnitNodeMap,
final Map<StorageNode, DataSource> storageNodeDataSources, final Map<String, DataSourcePoolProperties> dataSourcePoolPropsMap) {
Map<String, StorageUnit> result = new LinkedHashMap<>(dataSourcePoolPropsMap.size(), 1F);
for (Entry<String, DataSourcePoolProperties> entry : dataSourcePoolPropsMap.entrySet()) {
String storageUnitName = entry.getKey();
StorageNode storageNode = storageUnitNodeMap.get(storageUnitName);
DataSource dataSource = storageNodeDataSources.containsKey(storageNode) ? storageNodeDataSources.get(storageNode) : storageNodeDataSources.get(new StorageNode(storageUnitName));
StorageUnit storageUnit = new StorageUnit(storageNode, entry.getValue(), dataSource);
result.put(storageUnitName, storageUnit);
}
return result;
} | @Test
void assertGetDataSources() {
DataSourceProvidedDatabaseConfiguration databaseConfig = createDataSourceProvidedDatabaseConfiguration();
DataSource dataSource = databaseConfig.getStorageUnits().get("foo_ds").getDataSource();
assertTrue(dataSource instanceof MockedDataSource);
} |
public static void debug(final Logger logger, final String format, final Supplier<Object> supplier) {
if (logger.isDebugEnabled()) {
logger.debug(format, supplier.get());
}
} | @Test
public void testAtLeastOnceDebug() {
when(logger.isDebugEnabled()).thenReturn(true);
LogUtils.debug(logger, supplier);
verify(supplier, atLeastOnce()).get();
} |
public static UIf create(
UExpression condition, UStatement thenStatement, UStatement elseStatement) {
return new AutoValue_UIf(condition, thenStatement, elseStatement);
} | @Test
public void inlineWithoutElse() {
UIf ifTree =
UIf.create(
UFreeIdent.create("cond"),
UBlock.create(
UExpressionStatement.create(
UAssign.create(UFreeIdent.create("x"), UFreeIdent.create("y")))),
null);
bind(new UFreeIdent.Key("cond"), parseExpression("true"));
bind(new UFreeIdent.Key("x"), parseExpression("x"));
bind(new UFreeIdent.Key("y"), parseExpression("\"foo\""));
assertInlines(
Joiner.on(System.lineSeparator())
.join(
"if (true) {", //
" x = \"foo\";",
"}"),
ifTree);
} |
@Override
public List<Long> bitField(byte[] key, BitFieldSubCommands subCommands) {
List<Object> params = new ArrayList<>();
params.add(key);
boolean writeOp = false;
for (BitFieldSubCommands.BitFieldSubCommand subCommand : subCommands) {
String size = "u";
if (subCommand.getType().isSigned()) {
size = "i";
}
size += subCommand.getType().getBits();
String offset = "#";
if (subCommand.getOffset().isZeroBased()) {
offset = "";
}
offset += subCommand.getOffset().getValue();
if (subCommand instanceof BitFieldSubCommands.BitFieldGet) {
params.add("GET");
params.add(size);
params.add(offset);
} else if (subCommand instanceof BitFieldSubCommands.BitFieldSet) {
writeOp = true;
params.add("SET");
params.add(size);
params.add(offset);
params.add(((BitFieldSubCommands.BitFieldSet) subCommand).getValue());
} else if (subCommand instanceof BitFieldSubCommands.BitFieldIncrBy) {
writeOp = true;
params.add("INCRBY");
params.add(size);
params.add(offset);
params.add(((BitFieldSubCommands.BitFieldIncrBy) subCommand).getValue());
BitFieldSubCommands.BitFieldIncrBy.Overflow overflow = ((BitFieldSubCommands.BitFieldIncrBy) subCommand).getOverflow();
if (overflow != null) {
params.add("OVERFLOW");
params.add(overflow);
}
}
}
if (writeOp) {
return write(key, StringCodec.INSTANCE, BITFIELD, params.toArray());
}
return read(key, StringCodec.INSTANCE, BITFIELD, params.toArray());
} | @Test
public void testBitField() {
BitFieldSubCommands c = BitFieldSubCommands.create();
c = c.set(BitFieldSubCommands.BitFieldType.INT_8).valueAt(1).to(120);
List<Long> list = connection.bitField("testUnsigned".getBytes(), c);
assertThat(list).containsExactly(0L);
BitFieldSubCommands c2 = BitFieldSubCommands.create();
c2 = c2.incr(BitFieldSubCommands.BitFieldType.INT_8).valueAt(1).by(1);
List<Long> list2 = connection.bitField("testUnsigned".getBytes(), c2);
assertThat(list2).containsExactly(121L);
BitFieldSubCommands c3 = BitFieldSubCommands.create();
c3 = c3.get(BitFieldSubCommands.BitFieldType.INT_8).valueAt(1);
List<Long> list3 = connection.bitField("testUnsigned".getBytes(), c3);
assertThat(list3).containsExactly(121L);
} |
@GET
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
@Override
public ClusterInfo get() {
return getClusterInfo();
} | @Test
public void testClusterSchedulerFifoDefault() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("cluster")
.path("scheduler").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
response.getType().toString());
JSONObject json = response.getEntity(JSONObject.class);
verifyClusterSchedulerFifo(json);
} |
@Override
public void start() {
boolean hasExternalPlugins = pluginRepository.getPlugins().stream().anyMatch(plugin -> plugin.getType().equals(PluginType.EXTERNAL));
try (DbSession session = dbClient.openSession(false)) {
PropertyDto property = Optional.ofNullable(dbClient.propertiesDao().selectGlobalProperty(session, PLUGINS_RISK_CONSENT))
.orElse(defaultPluginRiskConsentProperty());
if (hasExternalPlugins && NOT_ACCEPTED == PluginRiskConsent.valueOf(property.getValue())) {
addWarningInSonarDotLog();
property.setValue(REQUIRED.name());
dbClient.propertiesDao().saveProperty(session, property);
session.commit();
} else if (!hasExternalPlugins && REQUIRED == PluginRiskConsent.valueOf(property.getValue())) {
dbClient.propertiesDao().deleteGlobalProperty(PLUGINS_RISK_CONSENT, session);
session.commit();
}
}
} | @Test
public void require_consent_when_exist_external_plugins_and_consent_property_not_exist() {
setupExternalPlugin();
underTest.start();
assertThat(logTester.logs(Level.WARN)).contains("Plugin(s) detected. Plugins are not provided by SonarSource"
+ " and are therefore installed at your own risk. A SonarQube administrator needs to acknowledge this risk once logged in.");
assertThat(dbClient.propertiesDao().selectGlobalProperty(PLUGINS_RISK_CONSENT))
.extracting(PropertyDto::getValue)
.isEqualTo(REQUIRED.name());
} |
@Override
public Column convert(BasicTypeDefine typeDefine) {
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.sourceType(typeDefine.getColumnType())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String xuguDataType = typeDefine.getDataType().toUpperCase();
switch (xuguDataType) {
case XUGU_BOOLEAN:
case XUGU_BOOL:
builder.dataType(BasicType.BOOLEAN_TYPE);
break;
case XUGU_TINYINT:
builder.dataType(BasicType.BYTE_TYPE);
break;
case XUGU_SMALLINT:
builder.dataType(BasicType.SHORT_TYPE);
break;
case XUGU_INT:
case XUGU_INTEGER:
builder.dataType(BasicType.INT_TYPE);
break;
case XUGU_BIGINT:
builder.dataType(BasicType.LONG_TYPE);
break;
case XUGU_FLOAT:
builder.dataType(BasicType.FLOAT_TYPE);
break;
case XUGU_DOUBLE:
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case XUGU_NUMBER:
case XUGU_DECIMAL:
case XUGU_NUMERIC:
DecimalType decimalType;
if (typeDefine.getPrecision() != null && typeDefine.getPrecision() > 0) {
decimalType =
new DecimalType(
typeDefine.getPrecision().intValue(), typeDefine.getScale());
} else {
decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE);
}
builder.dataType(decimalType);
builder.columnLength(Long.valueOf(decimalType.getPrecision()));
builder.scale(decimalType.getScale());
break;
case XUGU_CHAR:
case XUGU_NCHAR:
builder.dataType(BasicType.STRING_TYPE);
if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) {
builder.columnLength(TypeDefineUtils.charTo4ByteLength(1L));
} else {
builder.columnLength(typeDefine.getLength());
}
break;
case XUGU_VARCHAR:
case XUGU_VARCHAR2:
builder.dataType(BasicType.STRING_TYPE);
if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) {
builder.columnLength(TypeDefineUtils.charTo4ByteLength(MAX_VARCHAR_LENGTH));
} else {
builder.columnLength(typeDefine.getLength());
}
break;
case XUGU_CLOB:
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(BYTES_2GB - 1);
break;
case XUGU_JSON:
case XUGU_GUID:
builder.dataType(BasicType.STRING_TYPE);
break;
case XUGU_BINARY:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(MAX_BINARY_LENGTH);
break;
case XUGU_BLOB:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(BYTES_2GB - 1);
break;
case XUGU_DATE:
builder.dataType(LocalTimeType.LOCAL_DATE_TYPE);
break;
case XUGU_TIME:
case XUGU_TIME_WITH_TIME_ZONE:
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
break;
case XUGU_DATETIME:
case XUGU_DATETIME_WITH_TIME_ZONE:
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
break;
case XUGU_TIMESTAMP:
case XUGU_TIMESTAMP_WITH_TIME_ZONE:
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
if (typeDefine.getScale() == null) {
builder.scale(TIMESTAMP_DEFAULT_SCALE);
} else {
builder.scale(typeDefine.getScale());
}
break;
default:
throw CommonError.convertToSeaTunnelTypeError(
DatabaseIdentifier.XUGU, xuguDataType, typeDefine.getName());
}
return builder.build();
} | @Test
public void testConvertInt() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder().name("test").columnType("int").dataType("int").build();
Column column = XuguTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(BasicType.INT_TYPE, column.getDataType());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
} |
@Override
public BasicTypeDefine reconvert(Column column) {
BasicTypeDefine.BasicTypeDefineBuilder builder =
BasicTypeDefine.builder()
.name(column.getName())
.nullable(column.isNullable())
.comment(column.getComment())
.defaultValue(column.getDefaultValue());
switch (column.getDataType().getSqlType()) {
case BOOLEAN:
builder.columnType(DB2_BOOLEAN);
builder.dataType(DB2_BOOLEAN);
break;
case TINYINT:
case SMALLINT:
builder.columnType(DB2_SMALLINT);
builder.dataType(DB2_SMALLINT);
break;
case INT:
builder.columnType(DB2_INT);
builder.dataType(DB2_INT);
break;
case BIGINT:
builder.columnType(DB2_BIGINT);
builder.dataType(DB2_BIGINT);
break;
case FLOAT:
builder.columnType(DB2_REAL);
builder.dataType(DB2_REAL);
break;
case DOUBLE:
builder.columnType(DB2_DOUBLE);
builder.dataType(DB2_DOUBLE);
break;
case DECIMAL:
DecimalType decimalType = (DecimalType) column.getDataType();
long precision = decimalType.getPrecision();
int scale = decimalType.getScale();
if (precision <= 0) {
precision = DEFAULT_PRECISION;
scale = DEFAULT_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is precision less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (precision > MAX_PRECISION) {
scale = (int) Math.max(0, scale - (precision - MAX_PRECISION));
precision = MAX_PRECISION;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum precision of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_PRECISION,
precision,
scale);
}
if (scale < 0) {
scale = 0;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is scale less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (scale > MAX_SCALE) {
scale = MAX_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_SCALE,
precision,
scale);
}
builder.columnType(String.format("%s(%s,%s)", DB2_DECIMAL, precision, scale));
builder.dataType(DB2_DECIMAL);
builder.precision(precision);
builder.scale(scale);
break;
case BYTES:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(
String.format("%s(%s)", DB2_VARBINARY, MAX_VARBINARY_LENGTH));
builder.dataType(DB2_VARBINARY);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_BINARY_LENGTH) {
builder.columnType(
String.format("%s(%s)", DB2_BINARY, column.getColumnLength()));
builder.dataType(DB2_BINARY);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_VARBINARY_LENGTH) {
builder.columnType(
String.format("%s(%s)", DB2_VARBINARY, column.getColumnLength()));
builder.dataType(DB2_VARBINARY);
builder.length(column.getColumnLength());
} else {
long length = column.getColumnLength();
if (length > MAX_BLOB_LENGTH) {
length = MAX_BLOB_LENGTH;
log.warn(
"The length of blob type {} is out of range, "
+ "it will be converted to {}({})",
column.getName(),
DB2_BLOB,
length);
}
builder.columnType(String.format("%s(%s)", DB2_BLOB, length));
builder.dataType(DB2_BLOB);
builder.length(length);
}
break;
case STRING:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(String.format("%s(%s)", DB2_VARCHAR, MAX_VARCHAR_LENGTH));
builder.dataType(DB2_VARCHAR);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_CHAR_LENGTH) {
builder.columnType(String.format("%s(%s)", DB2_CHAR, column.getColumnLength()));
builder.dataType(DB2_CHAR);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_VARCHAR_LENGTH) {
builder.columnType(
String.format("%s(%s)", DB2_VARCHAR, column.getColumnLength()));
builder.dataType(DB2_VARCHAR);
builder.length(column.getColumnLength());
} else {
long length = column.getColumnLength();
if (length > MAX_CLOB_LENGTH) {
length = MAX_CLOB_LENGTH;
log.warn(
"The length of clob type {} is out of range, "
+ "it will be converted to {}({})",
column.getName(),
DB2_CLOB,
length);
}
builder.columnType(String.format("%s(%s)", DB2_CLOB, length));
builder.dataType(DB2_CLOB);
builder.length(length);
}
break;
case DATE:
builder.columnType(DB2_DATE);
builder.dataType(DB2_DATE);
break;
case TIME:
builder.columnType(DB2_TIME);
builder.dataType(DB2_TIME);
break;
case TIMESTAMP:
if (column.getScale() != null && column.getScale() > 0) {
int timestampScale = column.getScale();
if (column.getScale() > MAX_TIMESTAMP_SCALE) {
timestampScale = MAX_TIMESTAMP_SCALE;
log.warn(
"The timestamp column {} type timestamp({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to timestamp({})",
column.getName(),
column.getScale(),
MAX_TIMESTAMP_SCALE,
timestampScale);
}
builder.columnType(String.format("%s(%s)", DB2_TIMESTAMP, timestampScale));
builder.scale(timestampScale);
} else {
builder.columnType(DB2_TIMESTAMP);
}
builder.dataType(DB2_TIMESTAMP);
break;
default:
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.DB_2,
column.getDataType().getSqlType().name(),
column.getName());
}
return builder.build();
} | @Test
public void testReconvertUnsupported() {
Column column =
PhysicalColumn.of(
"test",
new MapType<>(BasicType.STRING_TYPE, BasicType.STRING_TYPE),
(Long) null,
true,
null,
null);
try {
DB2TypeConverter.INSTANCE.reconvert(column);
Assertions.fail();
} catch (SeaTunnelRuntimeException e) {
// ignore
} catch (Throwable e) {
Assertions.fail();
}
} |
public static ZMsg recvMsg(Socket socket)
{
return recvMsg(socket, 0);
} | @Test
public void testRecvMsg()
{
ZMQ.Context ctx = ZMQ.context(0);
ZMQ.Socket socket = ctx.socket(SocketType.PULL);
ZMsg.recvMsg(socket, ZMQ.NOBLOCK, (msg)-> assertThat(msg, nullValue()));
socket.close();
ctx.close();
} |
@Override
public Optional<EncryptionInformation> getWriteEncryptionInformation(ConnectorSession session, TableEncryptionProperties tableEncryptionProperties, String dbName, String tableName)
{
if (!(tableEncryptionProperties instanceof DwrfTableEncryptionProperties)) {
return Optional.empty();
}
return Optional.of(getWriteEncryptionInformationInternal(session, (DwrfTableEncryptionProperties) tableEncryptionProperties, dbName, tableName));
} | @Test
public void testGetWriteEncryptionInformation()
{
Optional<EncryptionInformation> encryptionInformation = encryptionInformationSource.getWriteEncryptionInformation(SESSION, forTable("table_level", "algo", "provider"), "dbName", "tableName");
assertTrue(encryptionInformation.isPresent());
assertEquals(
encryptionInformation.get(),
EncryptionInformation.fromEncryptionMetadata(DwrfEncryptionMetadata.forTable("table_level".getBytes(), ImmutableMap.of(TEST_EXTRA_METADATA, "algo"), "algo", "provider")));
} |
@Override
public UnderFileSystem create(String path, UnderFileSystemConfiguration conf) {
Preconditions.checkNotNull(path, "Unable to create UnderFileSystem instance:"
+ " URI path should not be null");
if (checkOBSCredentials(conf)) {
try {
return OBSUnderFileSystem.createInstance(new AlluxioURI(path), conf);
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
String err = "OBS credentials or endpoint not available, cannot create OBS Under File System.";
throw Throwables.propagate(new IOException(err));
} | @Test
public void createInstanceWithNullPath() {
Exception e = Assert.assertThrows(NullPointerException.class, () -> mFactory.create(
null, mConf));
Assert.assertTrue(e.getMessage().contains("Unable to create UnderFileSystem instance: URI "
+ "path should not be null"));
} |
public void doesNotContain(@Nullable CharSequence string) {
checkNotNull(string);
if (actual == null) {
failWithActual("expected a string that does not contain", string);
} else if (actual.contains(string)) {
failWithActual("expected not to contain", string);
}
} | @Test
public void stringDoesNotContainCharSequence() {
CharSequence charSeq = new StringBuilder("d");
assertThat("abc").doesNotContain(charSeq);
} |
public static String stringBlankAndThenExecute(String source, Callable<String> callable) {
if (StringUtils.isBlank(source)) {
try {
return callable.call();
} catch (Exception e) {
LogUtils.NAMING_LOGGER.error("string empty and then execute cause an exception.", e);
}
}
return source == null ? null : source.trim();
} | @Test
void testStringBlankAndThenExecuteSuccess() {
String word = "success";
String actual = TemplateUtils.stringBlankAndThenExecute(word, () -> "call");
assertEquals(word, actual);
} |
public int getType() {
return type;
} | @Test
public void getTypeFromCode() {
assertEquals( DragAndDropContainer.TYPE_BASE_STEP_TYPE, DragAndDropContainer.getType( "BaseStep" ) );
} |
public static URL getResourceUrl(String resource) throws IOException {
if (resource.startsWith(CLASSPATH_PREFIX)) {
String path = resource.substring(CLASSPATH_PREFIX.length());
ClassLoader classLoader = ResourceUtils.class.getClassLoader();
URL url = (classLoader != null ? classLoader.getResource(path) : ClassLoader.getSystemResource(path));
if (url == null) {
throw new FileNotFoundException("Resource [" + resource + "] does not exist");
}
return url;
}
try {
return new URL(resource);
} catch (MalformedURLException ex) {
return new File(resource).toURI().toURL();
}
} | @Test
void testGetResourceUrlForClasspath() throws IOException {
URL url = ResourceUtils.getResourceUrl("classpath:test-tls-cert.pem");
assertNotNull(url);
} |
public static ServiceDescriptor echoService() {
return echoServiceDescriptor;
} | @Test
void echoService() {
Assertions.assertNotNull(ServiceDescriptorInternalCache.echoService());
Assertions.assertEquals(
EchoService.class, ServiceDescriptorInternalCache.echoService().getServiceInterfaceClass());
} |
public static File load(String name) {
try {
if (name == null) {
throw new IllegalArgumentException("name can't be null");
}
String decodedPath = URLDecoder.decode(name, StandardCharsets.UTF_8.name());
return getFileFromFileSystem(decodedPath);
} catch (UnsupportedEncodingException e) {
LOGGER.error("decode name error: {}", e.getMessage(), e);
}
return null;
} | @Test
public void testLoadNotExistFile() {
File file = FileLoader.load("io/NotExistFile.txt");
Assertions.assertTrue(file == null || !file.exists());
} |
public Tile getParent() {
if (this.zoomLevel == 0) {
return null;
}
return new Tile(this.tileX / 2, this.tileY / 2, (byte) (this.zoomLevel - 1), this.tileSize);
} | @Test
public void getParentTest() {
Tile rootTile = new Tile(0, 0, (byte) 0, TILE_SIZE);
Assert.assertNull(rootTile.getParent());
Assert.assertEquals(rootTile, new Tile(0, 0, (byte) 1, TILE_SIZE).getParent());
Assert.assertEquals(rootTile, new Tile(1, 0, (byte) 1, TILE_SIZE).getParent());
Assert.assertEquals(rootTile, new Tile(0, 1, (byte) 1, TILE_SIZE).getParent());
Assert.assertEquals(rootTile, new Tile(1, 1, (byte) 1, TILE_SIZE).getParent());
} |
public static void validateRdwAid(byte[] aId) {
for (final byte[] compare : RDW_AID) {
if (Arrays.equals(compare, aId)) {
return;
}
}
logger.error("Driving licence has unknown aId: {}", Hex.toHexString(aId).toUpperCase());
throw new ClientException("Unknown aId");
} | @Test
public void validateRdwAidSuccessful() {
CardValidations.validateRdwAid(Hex.decode("SSSSSSSSSSSSSSSSSSSSSS"));
} |
@Override
public K8sHost removeHost(IpAddress hostIp) {
checkArgument(hostIp != null, ERR_NULL_HOST_IP);
K8sHost host = hostStore.removeHost(hostIp);
log.info(String.format(MSG_HOST, hostIp.toString(), MSG_REMOVED));
return host;
} | @Test(expected = IllegalArgumentException.class)
public void testRemoveNullHost() {
target.removeHost(null);
} |
public Schema addToSchema(Schema schema) {
validate(schema);
schema.addProp(LOGICAL_TYPE_PROP, name);
schema.setLogicalType(this);
return schema;
} | @Test
void logicalTypeInSchemaEquals() {
Schema schema1 = Schema.createFixed("aDecimal", null, null, 4);
Schema schema2 = Schema.createFixed("aDecimal", null, null, 4);
Schema schema3 = Schema.createFixed("aDecimal", null, null, 4);
assertNotSame(schema1, schema2);
assertNotSame(schema1, schema3);
assertEqualsTrue("No logical types", schema1, schema2);
assertEqualsTrue("No logical types", schema1, schema3);
LogicalTypes.decimal(9).addToSchema(schema1);
assertEqualsFalse("Two has no logical type", schema1, schema2);
LogicalTypes.decimal(9).addToSchema(schema2);
assertEqualsTrue("Same logical types", schema1, schema2);
LogicalTypes.decimal(9, 2).addToSchema(schema3);
assertEqualsFalse("Different logical type", schema1, schema3);
} |
@Override
public TransferStatus prepare(final Path file, final Local local, final TransferStatus parent, final ProgressListener progress) throws BackgroundException {
final TransferStatus status = super.prepare(file, local, parent, progress);
if(status.isExists()) {
final String filename = file.getName();
int no = 0;
do {
String proposal = String.format("%s-%d", FilenameUtils.getBaseName(filename), ++no);
if(StringUtils.isNotBlank(Path.getExtension(filename))) {
proposal += String.format(".%s", Path.getExtension(filename));
}
final Path renamed = new Path(file.getParent(), proposal, file.getType());
if(options.temporary) {
// Adjust final destination when uploading with temporary filename
status.getDisplayname().withRemote(renamed).exists(false);
}
else {
status.withRename(renamed);
}
}
while(find.find(status.getRename().remote));
if(log.isInfoEnabled()) {
log.info(String.format("Changed upload target from %s to %s", file, status.getRename().remote));
}
if(log.isDebugEnabled()) {
log.debug(String.format("Clear exist flag for file %s", file));
}
status.setExists(false);
}
else {
if(parent.getRename().remote != null) {
final Path renamed = new Path(parent.getRename().remote, file.getName(), file.getType());
if(options.temporary) {
// Adjust final destination when uploading with temporary filename
status.getDisplayname().withRemote(renamed).exists(false);
}
else {
status.withRename(renamed);
}
}
if(log.isInfoEnabled()) {
log.info(String.format("Changed upload target from %s to %s", file, status.getRename().remote));
}
}
return status;
} | @Test
public void testFileUploadWithTemporaryFilename() throws Exception {
final Path file = new Path("f", EnumSet.of(Path.Type.file));
final AtomicBoolean found = new AtomicBoolean();
final AttributesFinder attributes = new AttributesFinder() {
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) {
return new PathAttributes();
}
};
final Find find = new Find() {
@Override
public boolean find(final Path f, final ListProgressListener listener) {
if(f.equals(file)) {
found.set(true);
return true;
}
return false;
}
};
final NullSession session = new NullSession(new Host(new TestProtocol()));
final RenameFilter f = new RenameFilter(new DisabledUploadSymlinkResolver(), session, new UploadFilterOptions(session.getHost()).withTemporary(true));
f.withFinder(find).withAttributes(attributes);
final TransferStatus status = f.prepare(file, new NullLocal("t/f"), new TransferStatus().exists(true), new DisabledProgressListener());
assertTrue(found.get());
assertFalse(status.isExists());
f.apply(file, new NullLocal("t/f"), status, new DisabledProgressListener());
assertNotNull(status.getDisplayname().remote);
assertNotEquals(file, status.getRename().remote);
assertFalse(status.isExists());
assertNotEquals(file, status.getDisplayname().remote);
assertFalse(status.getDisplayname().exists);
assertNull(status.getRename().local);
assertNotNull(status.getRename().remote);
// assertEquals(new Path("/f-2g3vYDqR-", EnumSet.of(Path.Type.file)), fileStatus.getRename().remote);
assertEquals(new Path("/f-1", EnumSet.of(Path.Type.file)), status.getDisplayname().remote);
assertNotEquals(status.getDisplayname().remote, status.getRename().remote);
} |
@Override
public boolean enable(String pluginId) {
return mainLock.applyWithReadLock(() -> {
ThreadPoolPlugin plugin = registeredPlugins.get(pluginId);
if (Objects.isNull(plugin) || !disabledPlugins.remove(pluginId)) {
return false;
}
forQuickIndexes(quickIndex -> quickIndex.addIfPossible(plugin));
return true;
});
} | @Test
public void testEnable() {
ThreadPoolPlugin plugin = new TestExecuteAwarePlugin();
Assert.assertFalse(manager.enable(plugin.getId()));
manager.register(plugin);
Assert.assertFalse(manager.enable(plugin.getId()));
manager.disable(plugin.getId());
Assert.assertTrue(manager.enable(plugin.getId()));
} |
public synchronized void executeDdlStatement(String statement) throws IllegalStateException {
checkIsUsable();
maybeCreateInstance();
maybeCreateDatabase();
LOG.info("Executing DDL statement '{}' on database {}.", statement, databaseId);
try {
databaseAdminClient
.updateDatabaseDdl(
instanceId, databaseId, ImmutableList.of(statement), /* operationId= */ null)
.get();
LOG.info("Successfully executed DDL statement '{}' on database {}.", statement, databaseId);
} catch (ExecutionException | InterruptedException | SpannerException e) {
throw new SpannerResourceManagerException("Failed to execute statement.", e);
}
} | @Test
public void testExecuteDdlStatementShouldThrowExceptionWhenSpannerCreateDatabaseFails()
throws ExecutionException, InterruptedException {
// arrange
prepareCreateInstanceMock();
when(spanner.getDatabaseAdminClient().createDatabase(any(), any()).get())
.thenThrow(InterruptedException.class);
prepareUpdateDatabaseMock();
String statement =
"CREATE TABLE Singers (\n"
+ " SingerId INT64 NOT NULL,\n"
+ " FirstName STRING(1024),\n"
+ " LastName STRING(1024),\n"
+ ") PRIMARY KEY (SingerId)";
// act & assert
assertThrows(
SpannerResourceManagerException.class, () -> testManager.executeDdlStatement(statement));
} |
public static boolean isAbsoluteUri(final String uriString) {
if (StringUtils.isBlank(uriString)) {
return false;
}
try {
URI uri = new URI(uriString);
return uri.isAbsolute();
} catch (URISyntaxException e) {
log.debug("Failed to parse uri: " + uriString, e);
// ignore this exception
return false;
}
} | @Test
void isAbsoluteUri() {
String[] absoluteUris = new String[] {
"ftp://ftp.is.co.za/rfc/rfc1808.txt",
"http://www.ietf.org/rfc/rfc2396.txt",
"ldap://[2001:db8::7]/c=GB?objectClass?one",
"mailto:John.Doe@example.com",
"news:comp.infosystems.www.servers.unix",
"tel:+1-816-555-1212",
"telnet://192.0.2.16:80/",
"urn:oasis:names:specification:docbook:dtd:xml:4.1.2",
"data:text/vnd-example+xyz;foo=bar;base64,R0lGODdh",
"irc://irc.example.com:6667/#some-channel",
"ircs://irc.example.com:6667/#some-channel",
"irc6://irc.example.com:6667/#some-channel"
};
for (String uri : absoluteUris) {
assertThat(PathUtils.isAbsoluteUri(uri)).isTrue();
}
String[] paths = new String[] {
"//example.com/path/resource.txt",
"/path/resource.txt",
"path/resource.txt",
"../resource.txt",
"./resource.txt",
"resource.txt",
"#fragment",
"",
null
};
for (String path : paths) {
assertThat(PathUtils.isAbsoluteUri(path)).isFalse();
}
} |
public static ConfigurableResource parseResourceConfigValue(String value)
throws AllocationConfigurationException {
return parseResourceConfigValue(value, Long.MAX_VALUE);
} | @Test
public void testParseNewStyleResourceVcoresNegativeWithSpaces()
throws Exception {
expectNegativeValueOfResource("vcores");
parseResourceConfigValue("memory-mb=5120, vcores=-2");
} |
@Override
public int hashCode() {
return Objects.hash(
threadName,
threadState,
activeTasks,
standbyTasks,
mainConsumerClientId,
restoreConsumerClientId,
producerClientIds,
adminClientId);
} | @Test
public void shouldNotBeEqualIfDifferInActiveTasks() {
final ThreadMetadata differActiveTasks = new ThreadMetadataImpl(
THREAD_NAME,
THREAD_STATE,
MAIN_CONSUMER_CLIENT_ID,
RESTORE_CONSUMER_CLIENT_ID,
PRODUCER_CLIENT_IDS,
ADMIN_CLIENT_ID,
mkSet(TM_0),
STANDBY_TASKS
);
assertThat(threadMetadata, not(equalTo(differActiveTasks)));
assertThat(threadMetadata.hashCode(), not(equalTo(differActiveTasks.hashCode())));
} |
@Override
public String toString() {
return "XmlRowAdapter{" + "record=" + record + '}';
} | @Test
public void allPrimitiveDataTypes()
throws XPathExpressionException, JAXBException, IOException, SAXException,
ParserConfigurationException {
for (Row row : DATA.allPrimitiveDataTypesRows) {
NodeList entries = xmlDocumentEntries(row);
assertEquals(ALL_PRIMITIVE_DATA_TYPES_SCHEMA.getFieldNames().size(), entries.getLength());
Map<String, Node> actualMap = keyValues("allPrimitiveDataTypes", entries);
assertEquals(
new HashSet<>(ALL_PRIMITIVE_DATA_TYPES_SCHEMA.getFieldNames()), actualMap.keySet());
for (Entry<String, Node> actualKV : actualMap.entrySet()) {
String key = actualKV.getKey();
Node node = actualKV.getValue();
String actual = node.getTextContent();
Optional<Object> safeExpected = Optional.ofNullable(row.getValue(key));
assertTrue(safeExpected.isPresent());
String expected = safeExpected.get().toString();
assertEquals(expected, actual);
}
}
} |
ProducerListeners listeners() {
return new ProducerListeners(eventListeners.toArray(new HollowProducerEventListener[0]));
} | @Test
public void fireNewDeltaChainDontStopWhenOneFails() {
long version = 31337;
HollowProducer.ReadState readState = Mockito.mock(HollowProducer.ReadState.class);
Mockito.when(readState.getVersion()).thenReturn(version);
Mockito.doThrow(RuntimeException.class).when(listener).onNewDeltaChain(version);
listenerSupport.listeners().fireNewDeltaChain(version);
Mockito.verify(listener).onNewDeltaChain(version);
} |
@Override
public boolean consumeBytes(Http2Stream stream, int numBytes) throws Http2Exception {
assert ctx != null && ctx.executor().inEventLoop();
checkPositiveOrZero(numBytes, "numBytes");
if (numBytes == 0) {
return false;
}
// Streams automatically consume all remaining bytes when they are closed, so just ignore
// if already closed.
if (stream != null && !isClosed(stream)) {
if (stream.id() == CONNECTION_STREAM_ID) {
throw new UnsupportedOperationException("Returning bytes for the connection window is not supported");
}
return consumeAllBytes(state(stream), numBytes);
}
return false;
} | @Test
public void consumeBytesForNegativeNumBytesShouldFail() throws Http2Exception {
assertThrows(IllegalArgumentException.class, new Executable() {
@Override
public void execute() throws Throwable {
controller.consumeBytes(connection.stream(STREAM_ID), -1);
}
});
} |
public static void parseReverseLookupDomain(PtrDnsAnswer.Builder dnsAnswerBuilder, String hostname) {
dnsAnswerBuilder.fullDomain(hostname);
final InternetDomainName internetDomainName = InternetDomainName.from(hostname);
if (internetDomainName.hasPublicSuffix()) {
// Use Guava to extract domain name.
final InternetDomainName topDomainName = internetDomainName.topDomainUnderRegistrySuffix();
dnsAnswerBuilder.domain(topDomainName.toString());
} else {
/* Manually extract domain name.
* Eg. for hostname test.some-domain.com, only some-domain.com will be extracted. */
String[] split = hostname.split("\\.");
if (split.length > 1) {
dnsAnswerBuilder.domain(split[split.length - 2] + "." + split[split.length - 1]);
} else if (split.length == 1) {
dnsAnswerBuilder.domain(hostname); // Domain is a single word with no dots.
} else {
dnsAnswerBuilder.domain(""); // Domain is blank.
}
}
} | @Test
public void testParseReverseLookupDomain() {
// Test all of the types of domains that parsing is performed for.
PtrDnsAnswer result = buildReverseLookupDomainTest("subdomain.test.co.uk");
assertEquals("subdomain.test.co.uk", result.fullDomain());
assertEquals("test.co.uk", result.domain());
result = buildReverseLookupDomainTest("subdomain.test.com");
assertEquals("subdomain.test.com", result.fullDomain());
assertEquals("test.com", result.domain());
// Test some completely bogus domain to verify that the manual domain parsing is exercised.
result = buildReverseLookupDomainTest("blah.blahblah.lala.blaaa");
assertEquals("blah.blahblah.lala.blaaa", result.fullDomain());
assertEquals("lala.blaaa", result.domain());
// Test a single word domain
result = buildReverseLookupDomainTest("HahaOneWordDomainTryingToBreakTheSoftware");
assertEquals("HahaOneWordDomainTryingToBreakTheSoftware", result.fullDomain());
assertEquals("HahaOneWordDomainTryingToBreakTheSoftware", result.domain());
} |
int parseAndConvert(String[] args) throws Exception {
Options opts = createOptions();
int retVal = 0;
try {
if (args.length == 0) {
LOG.info("Missing command line arguments");
printHelp(opts);
return 0;
}
CommandLine cliParser = new GnuParser().parse(opts, args);
if (cliParser.hasOption(CliOption.HELP.shortSwitch)) {
printHelp(opts);
return 0;
}
FSConfigToCSConfigConverter converter =
prepareAndGetConverter(cliParser);
converter.convert(converterParams);
String outputDir = converterParams.getOutputDirectory();
boolean skipVerification =
cliParser.hasOption(CliOption.SKIP_VERIFICATION.shortSwitch);
if (outputDir != null && !skipVerification) {
validator.validateConvertedConfig(
converterParams.getOutputDirectory());
}
} catch (ParseException e) {
String msg = "Options parsing failed: " + e.getMessage();
logAndStdErr(e, msg);
printHelp(opts);
retVal = -1;
} catch (PreconditionException e) {
String msg = "Cannot start FS config conversion due to the following"
+ " precondition error: " + e.getMessage();
handleException(e, msg);
retVal = -1;
} catch (UnsupportedPropertyException e) {
String msg = "Unsupported property/setting encountered during FS config "
+ "conversion: " + e.getMessage();
handleException(e, msg);
retVal = -1;
} catch (ConversionException | IllegalArgumentException e) {
String msg = "Fatal error during FS config conversion: " + e.getMessage();
handleException(e, msg);
retVal = -1;
} catch (VerificationException e) {
Throwable cause = e.getCause();
String msg = "Verification failed: " + e.getCause().getMessage();
conversionOptions.handleVerificationFailure(cause, msg);
retVal = -1;
}
conversionOptions.handleParsingFinished();
return retVal;
} | @Test
public void testEmptyRulesConfigurationSpecified() throws Exception {
FSConfigConverterTestCommons.configureEmptyFairSchedulerXml();
FSConfigConverterTestCommons.configureEmptyYarnSiteXml();
FSConfigConverterTestCommons.configureEmptyConversionRulesFile();
FSConfigToCSConfigArgumentHandler argumentHandler =
createArgumentHandler();
String[] args = getArgumentsAsArrayWithDefaults("-f",
FSConfigConverterTestCommons.FS_ALLOC_FILE,
"-r", FSConfigConverterTestCommons.CONVERSION_RULES_FILE);
argumentHandler.parseAndConvert(args);
} |
@Override
public String toString() {
StringBuilder stringBuilder = new StringBuilder();
stringBuilder.append("latLong=");
stringBuilder.append(this.latLong);
stringBuilder.append(", zoomLevel=");
stringBuilder.append(this.zoomLevel);
stringBuilder.append(", rotation=");
stringBuilder.append(this.rotation);
return stringBuilder.toString();
} | @Test
public void toStringTest() {
MapPosition mapPosition = new MapPosition(new LatLong(1.0, 2.0), (byte) 3);
Assert.assertEquals(MAP_POSITION_TO_STRING, mapPosition.toString());
} |
@Override
public void handle(LogHandlerEvent event) {
switch (event.getType()) {
case APPLICATION_STARTED:
LogHandlerAppStartedEvent appStartEvent =
(LogHandlerAppStartedEvent) event;
initApp(appStartEvent.getApplicationId(), appStartEvent.getUser(),
appStartEvent.getCredentials(),
appStartEvent.getApplicationAcls(),
appStartEvent.getLogAggregationContext(),
appStartEvent.getRecoveredAppLogInitedTime());
break;
case CONTAINER_FINISHED:
LogHandlerContainerFinishedEvent containerFinishEvent =
(LogHandlerContainerFinishedEvent) event;
stopContainer(containerFinishEvent.getContainerId(),
containerFinishEvent.getContainerType(),
containerFinishEvent.getExitCode());
break;
case APPLICATION_FINISHED:
LogHandlerAppFinishedEvent appFinishedEvent =
(LogHandlerAppFinishedEvent) event;
stopApp(appFinishedEvent.getApplicationId());
break;
case LOG_AGG_TOKEN_UPDATE:
checkAndEnableAppAggregators();
break;
default:
; // Ignore
}
} | @Test
public void testAppLogDirCreation() throws Exception {
final String inputSuffix = "logs-tfile";
this.conf.set(YarnConfiguration.NM_LOG_DIRS,
localLogDir.getAbsolutePath());
this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
this.remoteRootLogDir.getAbsolutePath());
this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX, "logs");
InlineDispatcher dispatcher = new InlineDispatcher();
dispatcher.init(this.conf);
dispatcher.start();
FileSystem fs = FileSystem.get(this.conf);
final FileSystem spyFs = spy(FileSystem.get(this.conf));
final LogAggregationTFileController spyFileFormat
= new LogAggregationTFileController() {
@Override
public FileSystem getFileSystem(Configuration conf)
throws IOException {
return spyFs;
}
};
spyFileFormat.initialize(conf, "TFile");
LogAggregationService aggSvc = new LogAggregationService(dispatcher,
this.context, this.delSrvc, super.dirsHandler) {
@Override
public LogAggregationFileController getLogAggregationFileController(
Configuration conf) {
return spyFileFormat;
}
};
aggSvc.init(this.conf);
aggSvc.start();
// start an application and verify user, suffix, and app dirs created
ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
Path userDir = fs.makeQualified(new Path(
remoteRootLogDir.getAbsolutePath(), this.user));
Path bucketDir = fs.makeQualified(LogAggregationUtils.getRemoteBucketDir(
new Path(remoteRootLogDir.getAbsolutePath()),
this.user, inputSuffix, appId));
Path suffixDir = bucketDir.getParent();
Path appDir = fs.makeQualified(LogAggregationUtils.getRemoteAppLogDir(
new Path(remoteRootLogDir.getAbsolutePath()), appId,
this.user, inputSuffix));
LogAggregationContext contextWithAllContainers =
Records.newRecord(LogAggregationContext.class);
contextWithAllContainers.setLogAggregationPolicyClassName(
AllContainerLogAggregationPolicy.class.getName());
aggSvc.handle(new LogHandlerAppStartedEvent(appId, this.user, null,
this.acls, contextWithAllContainers));
verify(spyFs).mkdirs(eq(userDir), isA(FsPermission.class));
verify(spyFs).mkdirs(eq(suffixDir), isA(FsPermission.class));
verify(spyFs).mkdirs(eq(bucketDir), isA(FsPermission.class));
verify(spyFs).mkdirs(eq(appDir), isA(FsPermission.class));
// start another application and verify only app dir created
ApplicationId appId2 = BuilderUtils.newApplicationId(1, 2);
Path appDir2 = fs.makeQualified(LogAggregationUtils.getRemoteAppLogDir(
new Path(remoteRootLogDir.getAbsolutePath()),
appId2, this.user, inputSuffix));
aggSvc.handle(new LogHandlerAppStartedEvent(appId2, this.user, null,
this.acls, contextWithAllContainers));
verify(spyFs).mkdirs(eq(appDir2), isA(FsPermission.class));
// start another application with the app dir already created and verify
// we do not try to create it again
ApplicationId appId3 = BuilderUtils.newApplicationId(2, 2);
Path appDir3 = fs.makeQualified(LogAggregationUtils.getRemoteAppLogDir(
new Path(remoteRootLogDir.getAbsolutePath()),
appId3, this.user, inputSuffix));
new File(appDir3.toUri().getPath()).mkdir();
aggSvc.handle(new LogHandlerAppStartedEvent(appId3, this.user, null,
this.acls, contextWithAllContainers));
verify(spyFs, never()).mkdirs(eq(appDir3), isA(FsPermission.class));
// Verify we do not create bucket dir again
ApplicationId appId4 = BuilderUtils.newApplicationId(2, 10003);
Path appDir4 = fs.makeQualified(LogAggregationUtils.getRemoteAppLogDir(
new Path(remoteRootLogDir.getAbsolutePath()),
appId4, this.user, inputSuffix));
Path bucketDir4 = appDir4.getParent();
new File(bucketDir4.toUri().getPath()).mkdir();
aggSvc.handle(new LogHandlerAppStartedEvent(appId4, this.user, null,
this.acls, contextWithAllContainers));
verify(spyFs, never()).mkdirs(eq(bucketDir4), isA(FsPermission.class));
verify(spyFs).mkdirs(eq(appDir4), isA(FsPermission.class));
aggSvc.stop();
aggSvc.close();
dispatcher.stop();
} |
List<DataflowPackage> stageClasspathElements(
Collection<StagedFile> classpathElements, String stagingPath, CreateOptions createOptions) {
return stageClasspathElements(classpathElements, stagingPath, DEFAULT_SLEEPER, createOptions);
} | @Test
public void testPackageUploadIsSkippedWithNonExistentResource() throws Exception {
String nonExistentFile =
FileSystems.matchNewResource(tmpFolder.getRoot().getPath(), true)
.resolve("non-existent-file", StandardResolveOptions.RESOLVE_FILE)
.toString();
assertEquals(
Collections.EMPTY_LIST,
defaultPackageUtil.stageClasspathElements(
ImmutableList.of(makeStagedFile(nonExistentFile)), STAGING_PATH, createOptions));
} |
public Set<Long> calculateUsers(DelegateExecution execution, int level) {
Assert.isTrue(level > 0, "level 必须大于 0");
// 获得发起人
ProcessInstance processInstance = processInstanceService.getProcessInstance(execution.getProcessInstanceId());
Long startUserId = NumberUtils.parseLong(processInstance.getStartUserId());
// 获得对应 leve 的部门
DeptRespDTO dept = null;
for (int i = 0; i < level; i++) {
// 获得 level 对应的部门
if (dept == null) {
dept = getStartUserDept(startUserId);
if (dept == null) { // 找不到发起人的部门,所以无法使用该规则
return emptySet();
}
} else {
DeptRespDTO parentDept = deptApi.getDept(dept.getParentId());
if (parentDept == null) { // 找不到父级部门,所以只好结束寻找。原因是:例如说,级别比较高的人,所在部门层级比较少
break;
}
dept = parentDept;
}
}
return dept.getLeaderUserId() != null ? asSet(dept.getLeaderUserId()) : emptySet();
} | @Test
public void testCalculateUsers_noDept() {
// 准备参数
DelegateExecution execution = mockDelegateExecution(1L);
// mock 方法(startUser)
AdminUserRespDTO startUser = randomPojo(AdminUserRespDTO.class, o -> o.setDeptId(10L));
when(adminUserApi.getUser(eq(1L))).thenReturn(startUser);
// mock 方法(getStartUserDept)没有部门
when(deptApi.getDept(eq(10L))).thenReturn(null);
// 调用
Set<Long> result = expression.calculateUsers(execution, 1);
// 断言
assertEquals(0, result.size());
} |
public static RelDataType create(HazelcastIntegerType type, boolean nullable) {
if (type.isNullable() == nullable) {
return type;
}
return create0(type.getSqlTypeName(), nullable, type.getBitWidth());
} | @Test
public void testNullableIntegerTypeOfBitWidth() {
for (int i = 0; i < Long.SIZE + 10; ++i) {
RelDataType type = HazelcastIntegerType.create(i, false);
RelDataType nullableType = HazelcastIntegerType.create(i, true);
if (i < Byte.SIZE) {
assertType(TINYINT, i, false, type);
assertType(TINYINT, i, true, nullableType);
} else if (i < Short.SIZE) {
assertType(SMALLINT, i, false, type);
assertType(SMALLINT, i, true, nullableType);
} else if (i < Integer.SIZE) {
assertType(INTEGER, i, false, type);
assertType(INTEGER, i, true, nullableType);
} else if (i < Long.SIZE) {
assertType(BIGINT, i, false, type);
assertType(BIGINT, i, true, nullableType);
} else {
assertType(BIGINT, Long.SIZE, false, type);
assertType(BIGINT, Long.SIZE, true, nullableType);
}
}
} |
private ContentType getContentType(Exchange exchange) throws ParseException {
String contentTypeStr = ExchangeHelper.getContentType(exchange);
if (contentTypeStr == null) {
contentTypeStr = DEFAULT_CONTENT_TYPE;
}
ContentType contentType = new ContentType(contentTypeStr);
String contentEncoding = ExchangeHelper.getContentEncoding(exchange);
// add a charset parameter for text subtypes
if (contentEncoding != null && contentType.match("text/*")) {
contentType.setParameter("charset", MimeUtility.mimeCharset(contentEncoding));
}
return contentType;
} | @Test
public void roundtripWithBinaryAttachments() throws IOException {
String attContentType = "application/binary";
byte[] attText = { 0, 1, 2, 3, 4, 5, 6, 7 };
String attFileName = "Attachment File Name";
in.setBody("Body text");
DataSource ds = new ByteArrayDataSource(attText, attContentType);
in.addAttachment(attFileName, new DataHandler(ds));
Exchange result = template.send("direct:roundtrip", exchange);
AttachmentMessage out = result.getMessage(AttachmentMessage.class);
assertEquals("Body text", out.getBody(String.class));
assertTrue(out.hasAttachments());
assertEquals(1, out.getAttachmentNames().size());
assertTrue(out.getAttachmentNames().contains(attFileName));
DataHandler dh = out.getAttachment(attFileName);
assertNotNull(dh);
assertEquals(attContentType, dh.getContentType());
InputStream is = dh.getInputStream();
ByteArrayOutputStream os = new ByteArrayOutputStream();
IOHelper.copyAndCloseInput(is, os);
assertArrayEquals(attText, os.toByteArray());
} |
public static boolean validatePlugin(PluginLookup.PluginType type, Class<?> pluginClass) {
switch (type) {
case INPUT:
return containsAllMethods(inputMethods, pluginClass.getMethods());
case FILTER:
return containsAllMethods(filterMethods, pluginClass.getMethods());
case CODEC:
return containsAllMethods(codecMethods, pluginClass.getMethods());
case OUTPUT:
return containsAllMethods(outputMethods, pluginClass.getMethods());
default:
throw new IllegalStateException("Unknown plugin type for validation: " + type);
}
} | @Test
public void testValidFilterPlugin() {
Assert.assertTrue(PluginValidator.validatePlugin(PluginLookup.PluginType.FILTER, Uuid.class));
} |
@Override
public void finished(boolean allStepsExecuted) {
if (postProjectAnalysisTasks.length == 0) {
return;
}
ProjectAnalysisImpl projectAnalysis = createProjectAnalysis(allStepsExecuted ? SUCCESS : FAILED);
for (PostProjectAnalysisTask postProjectAnalysisTask : postProjectAnalysisTasks) {
executeTask(projectAnalysis, postProjectAnalysisTask);
}
} | @Test
public void ceTask_uuid_is_UUID_of_CeTask() {
underTest.finished(true);
verify(postProjectAnalysisTask).finished(taskContextCaptor.capture());
assertThat(taskContextCaptor.getValue().getProjectAnalysis().getCeTask().getId())
.isEqualTo(ceTask.getUuid());
} |
@Override
public LogicalSchema getSchema() {
return schema;
} | @Test
public void shouldHaveFullyQualifiedJoinSchemaWithNonSyntheticKey() {
// Given:
when(joinKey.resolveKeyName(any(), any())).thenReturn(ColumnName.of("right_rightKey"));
// When:
final JoinNode joinNode = new JoinNode(nodeId, OUTER, joinKey, true, left,
right, empty(),"KAFKA");
// When:
assertThat(joinNode.getSchema(), is(LogicalSchema.builder()
.keyColumn(ColumnName.of("right_rightKey"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of(LEFT_ALIAS.text() + "_C0"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of(LEFT_ALIAS.text() + "_L1"), SqlTypes.STRING)
.valueColumn(ColumnName.of(LEFT_ALIAS.text() + "_ROWTIME"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of(LEFT_ALIAS.text() + "_ROWPARTITION"), SqlTypes.INTEGER)
.valueColumn(ColumnName.of(LEFT_ALIAS.text() + "_ROWOFFSET"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of(LEFT_ALIAS.text() + "_leftKey"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of(RIGHT_ALIAS.text() + "_C0"), SqlTypes.STRING)
.valueColumn(ColumnName.of(RIGHT_ALIAS.text() + "_R1"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of(RIGHT_ALIAS.text() + "_ROWTIME"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of(RIGHT_ALIAS.text() + "_ROWPARTITION"), SqlTypes.INTEGER)
.valueColumn(ColumnName.of(RIGHT_ALIAS.text() + "_ROWOFFSET"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of(RIGHT_ALIAS.text() + "_rightKey"), SqlTypes.BIGINT)
.build()
));
} |
@Override
public PageData<AuditLog> findAuditLogsByTenantIdAndUserId(UUID tenantId, UserId userId, List<ActionType> actionTypes, TimePageLink pageLink) {
return DaoUtil.toPageData(
auditLogRepository
.findAuditLogsByTenantIdAndUserId(
tenantId,
userId.getId(),
pageLink.getTextSearch(),
pageLink.getStartTime(),
pageLink.getEndTime(),
actionTypes,
DaoUtil.toPageable(pageLink)));
} | @Test
public void testFindAuditLogsByTenantIdAndUserId() {
List<AuditLog> foundedAuditLogs = auditLogDao.findAuditLogsByTenantIdAndUserId(tenantId,
userId1,
List.of(ActionType.ADDED),
new TimePageLink(20)).getData();
checkFoundedAuditLogsList(foundedAuditLogs, 10);
} |
@PostConstruct
public void init() {
ClientMeta.INSTANCE.setServiceName(serviceName);
final DynamicConfigInitializer service = PluginServiceManager.getPluginService(DynamicConfigInitializer.class);
service.doStart();
} | @Test
public void testInit() {
final DynamicProperties dynamicProperties = new DynamicProperties();
String serviceName = "testService";
ReflectUtils.setFieldValue(dynamicProperties, "serviceName", serviceName);
try(final MockedStatic<ServiceManager> serviceManagerMockedStatic = Mockito.mockStatic(ServiceManager.class)) {
final AtomicBoolean executed = new AtomicBoolean();
final DynamicConfigInitializer dynamicConfigInitializer = new DynamicConfigInitializer() {
@Override
public void doStart() {
executed.set(true);
}
};
serviceManagerMockedStatic.when(() -> ServiceManager.getService(DynamicConfigInitializer.class))
.thenReturn(dynamicConfigInitializer);
dynamicProperties.init();
Assert.assertTrue(executed.get());
Assert.assertEquals(ClientMeta.INSTANCE.getServiceName(), serviceName);
}
} |
@Override
protected JobExceptionsInfoWithHistory handleRequest(
HandlerRequest<EmptyRequestBody> request, ExecutionGraphInfo executionGraph) {
final List<Integer> exceptionToReportMaxSizes =
request.getQueryParameter(UpperLimitExceptionParameter.class);
final int exceptionToReportMaxSize =
exceptionToReportMaxSizes.size() > 0
? exceptionToReportMaxSizes.get(0)
: MAX_NUMBER_EXCEPTION_TO_REPORT;
List<FailureLabelFilterParameter.FailureLabel> failureLabelFilter =
request.getQueryParameter(FailureLabelFilterParameter.class);
failureLabelFilter =
failureLabelFilter.size() > 0 ? failureLabelFilter : EMPTY_FAILURE_LABEL_FILTER;
return createJobExceptionsInfo(
executionGraph, exceptionToReportMaxSize, failureLabelFilter);
} | @Test
void testNoExceptions() throws HandlerRequestException {
final ExecutionGraphInfo executionGraphInfo =
new ExecutionGraphInfo(new ArchivedExecutionGraphBuilder().build());
final HandlerRequest<EmptyRequestBody> request =
createRequest(executionGraphInfo.getJobId(), 10);
final JobExceptionsInfoWithHistory response =
testInstance.handleRequest(request, executionGraphInfo);
assertThat(response.getRootException()).isNull();
assertThat(response.getRootTimestamp()).isNull();
assertThat(response.isTruncated()).isFalse();
assertThat(response.getAllExceptions()).isEmpty();
assertThat(response.getExceptionHistory().getEntries()).isEmpty();
} |
@Override
public void processElement1(StreamRecord<IN1> element) throws Exception {
collector.setTimestamp(element);
rContext.setElement(element);
userFunction.processElement(element.getValue(), rContext, collector);
rContext.setElement(null);
} | @Test
void testNoKeyedStateOnNonBroadcastSide() throws Exception {
boolean exceptionThrown = false;
final ValueStateDescriptor<String> valueState =
new ValueStateDescriptor<>("any", BasicTypeInfo.STRING_TYPE_INFO);
try (TwoInputStreamOperatorTestHarness<String, Integer, String> testHarness =
getInitializedTestHarness(
new BroadcastProcessFunction<String, Integer, String>() {
private static final long serialVersionUID = -1725365436500098384L;
@Override
public void processBroadcastElement(
Integer value, Context ctx, Collector<String> out)
throws Exception {
// do nothing
}
@Override
public void processElement(
String value, ReadOnlyContext ctx, Collector<String> out)
throws Exception {
assertThatThrownBy(
() ->
getRuntimeContext()
.getState(valueState)
.value())
.isInstanceOf(NullPointerException.class)
.hasMessage(
String.format(
"Keyed state '%s' with type %s can only be used on a 'keyed stream', i.e., after a 'keyBy()' operation.",
valueState.getName(),
valueState.getType()));
}
})) {
testHarness.processWatermark1(new Watermark(10L));
testHarness.processWatermark2(new Watermark(10L));
testHarness.processElement1(new StreamRecord<>("5", 12L));
}
} |
@Udf(description = "Converts a TIMESTAMP value into the"
+ " string representation of the timestamp in the given format. Single quotes in the"
+ " timestamp format can be escaped with '', for example: 'yyyy-MM-dd''T''HH:mm:ssX'"
+ " The system default time zone is used when no time zone is explicitly provided."
+ " The format pattern should be in the format expected"
+ " by java.time.format.DateTimeFormatter")
public String formatTimestamp(
@UdfParameter(
description = "TIMESTAMP value.") final Timestamp timestamp,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
return formatTimestamp(timestamp, formatPattern, ZoneId.of("GMT").getId());
} | @Test
public void shouldReturnNullOnNullDateFormat() {
// When:
final String returnValue = udf.formatTimestamp( new Timestamp(1534353043000L), null);
// Then:
assertThat(returnValue, is(nullValue()));
} |
public Publisher<V> iterator() {
return new SetRxIterator<V>() {
@Override
protected RFuture<ScanResult<Object>> scanIterator(RedisClient client, String nextIterPos) {
return ((ScanIterator) instance).scanIteratorAsync(((RedissonObject) instance).getRawName(), client, nextIterPos, null, 10);
}
}.create();
} | @Test
public void testAddBean() throws InterruptedException, ExecutionException {
SimpleBean sb = new SimpleBean();
sb.setLng(1L);
RSetCacheRx<SimpleBean> set = redisson.getSetCache("simple");
sync(set.add(sb));
Assertions.assertEquals(sb.getLng(), toIterator(set.iterator()).next().getLng());
} |
static Schema getSchema(Class<? extends Message> clazz) {
return getSchema(ProtobufUtil.getDescriptorForClass(clazz));
} | @Test
public void testRequiredNestedSchema() {
assertEquals(
TestProtoSchemas.REQUIRED_NESTED_SCHEMA,
ProtoSchemaTranslator.getSchema(Proto2SchemaMessages.RequiredNested.class));
} |
@Override
public String formatNotifyTemplateContent(String content, Map<String, Object> params) {
return StrUtil.format(content, params);
} | @Test
public void testFormatNotifyTemplateContent() {
// 准备参数
Map<String, Object> params = new HashMap<>();
params.put("name", "小红");
params.put("what", "饭");
// 调用,并断言
assertEquals("小红,你好,饭吃了吗?",
notifyTemplateService.formatNotifyTemplateContent("{name},你好,{what}吃了吗?", params));
} |
public static ConnectionGroup getOrCreateGroup(String namespace) {
AssertUtil.assertNotBlank(namespace, "namespace should not be empty");
ConnectionGroup group = CONN_MAP.get(namespace);
if (group == null) {
synchronized (CREATE_LOCK) {
if ((group = CONN_MAP.get(namespace)) == null) {
group = new ConnectionGroup(namespace);
CONN_MAP.put(namespace, group);
}
}
}
return group;
} | @Test
public void testGetOrCreateGroupMultipleThread() throws Exception {
final String namespace = "test-namespace";
int threadCount = 32;
final List<ConnectionGroup> groups = new CopyOnWriteArrayList<>();
final CountDownLatch latch = new CountDownLatch(threadCount);
for (int i = 0; i < threadCount; i++) {
new Thread(new Runnable() {
@Override
public void run() {
groups.add(ConnectionManager.getOrCreateGroup(namespace));
latch.countDown();
}
}).start();
}
latch.await();
for (int i = 1; i < groups.size(); i++) {
assertSame(groups.get(i - 1).getNamespace(), groups.get(i).getNamespace());
}
} |
public static <T> void invokeAll(List<Callable<T>> callables, long timeoutMs)
throws TimeoutException, ExecutionException {
ExecutorService service = Executors.newCachedThreadPool();
try {
invokeAll(service, callables, timeoutMs);
} finally {
service.shutdownNow();
}
} | @Test
public void invokeAllPropagatesException() throws Exception {
int numTasks = 5;
final AtomicInteger id = new AtomicInteger();
List<Callable<Void>> tasks = new ArrayList<>();
final Exception testException = new Exception("test message");
for (int i = 0; i < numTasks; i++) {
tasks.add(new Callable<Void>() {
@Override
public Void call() throws Exception {
int myId = id.incrementAndGet();
// The 3rd task throws an exception
if (myId == 3) {
throw testException;
}
return null;
}
});
}
try {
CommonUtils.invokeAll(tasks, 2 * Constants.SECOND_MS);
fail("Expected an exception to be thrown");
} catch (ExecutionException e) {
assertSame(testException, e.getCause());
}
} |
public void isInOrder() {
isInOrder(Ordering.natural());
} | @Test
public void iterableIsInOrderWithComparatorFailure() {
expectFailureWhenTestingThat(asList("1", "10", "2", "20")).isInOrder(COMPARE_AS_DECIMAL);
assertFailureKeys("expected to be in order", "but contained", "followed by", "full contents");
assertFailureValue("but contained", "10");
assertFailureValue("followed by", "2");
assertFailureValue("full contents", "[1, 10, 2, 20]");
} |
public CompletableFuture<Boolean> allowNamespaceOperationAsync(NamespaceName namespaceName,
NamespaceOperation operation,
String role,
AuthenticationDataSource authData) {
if (!this.conf.isAuthorizationEnabled()) {
return CompletableFuture.completedFuture(true);
}
return provider.allowNamespaceOperationAsync(namespaceName, role, operation, authData);
} | @Test(dataProvider = "roles")
public void testNamespaceOperationAsync(String role, String originalRole, boolean shouldPass) throws Exception {
boolean isAuthorized = authorizationService.allowNamespaceOperationAsync(NamespaceName.get("public/default"),
NamespaceOperation.PACKAGES, originalRole, role, null).get();
checkResult(shouldPass, isAuthorized);
} |
public Map<String,String> getValByRegex(String regex) {
Pattern p = Pattern.compile(regex);
Map<String,String> result = new HashMap<String,String>();
List<String> resultKeys = new ArrayList<>();
Matcher m;
for(Map.Entry<Object,Object> item: getProps().entrySet()) {
if (item.getKey() instanceof String &&
item.getValue() instanceof String) {
m = p.matcher((String)item.getKey());
if(m.find()) { // match
resultKeys.add((String) item.getKey());
}
}
}
resultKeys.forEach(item ->
result.put(item, substituteVars(getProps().getProperty(item))));
return result;
} | @Test
public void testGetValByRegex() {
Configuration conf = new Configuration();
String key1 = "t.abc.key1";
String key2 = "t.abc.key2";
String key3 = "tt.abc.key3";
String key4 = "t.abc.ey3";
conf.set(key1, "value1");
conf.set(key2, "value2");
conf.set(key3, "value3");
conf.set(key4, "value3");
Map<String,String> res = conf.getValByRegex("^t\\..*\\.key\\d");
assertTrue("Conf didn't get key " + key1, res.containsKey(key1));
assertTrue("Conf didn't get key " + key2, res.containsKey(key2));
assertTrue("Picked out wrong key " + key3, !res.containsKey(key3));
assertTrue("Picked out wrong key " + key4, !res.containsKey(key4));
} |
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
super.userEventTriggered(ctx, evt);
if (evt instanceof IdleStateEvent) {
IdleStateEvent event = (IdleStateEvent) evt;
switch (event.state()) {
case READER_IDLE:
log.debug("[{}] No reads were performed for specified period for channel {}", event.state(), ctx.channel().id());
this.sendPingReq(ctx.channel());
break;
case WRITER_IDLE:
log.debug("[{}] No writes were performed for specified period for channel {}", event.state(), ctx.channel().id());
this.sendPingReq(ctx.channel());
break;
}
}
} | @Test
void givenChannelReaderIdleState_whenNoPingResponse_thenDisconnectClient() throws Exception {
ChannelHandlerContext ctx = mock(ChannelHandlerContext.class);
Channel channel = mock(Channel.class);
when(ctx.channel()).thenReturn(channel);
when(channel.eventLoop()).thenReturn(new DefaultEventLoop());
ChannelFuture channelFuture = mock(ChannelFuture.class);
when(channel.writeAndFlush(any())).thenReturn(channelFuture);
mqttPingHandler.userEventTriggered(ctx, IdleStateEvent.FIRST_READER_IDLE_STATE_EVENT);
verify(
channelFuture,
after(TimeUnit.SECONDS.toMillis(KEEP_ALIVE_SECONDS) + PROCESS_SEND_DISCONNECT_MSG_TIME_MS)
).addListener(eq(ChannelFutureListener.CLOSE));
} |
@Override
public void commit() {
//no-op
} | @Test
public void shouldNotFailOnNoOpCommit() {
globalContext.commit();
} |
@Override
public void transitionToActive(final StreamTask streamTask, final RecordCollector recordCollector, final ThreadCache newCache) {
if (stateManager.taskType() != TaskType.ACTIVE) {
throw new IllegalStateException("Tried to transition processor context to active but the state manager's " +
"type was " + stateManager.taskType());
}
this.streamTask = streamTask;
this.collector = recordCollector;
this.cache = newCache;
addAllFlushListenersToNewCache();
} | @Test
public void globalKeyValueStoreShouldBeReadOnly() {
foreachSetUp();
when(stateManager.taskType()).thenReturn(TaskType.ACTIVE);
when(stateManager.getGlobalStore(anyString())).thenReturn(null);
final KeyValueStore<String, Long> keyValueStoreMock = mock(KeyValueStore.class);
when(stateManager.getGlobalStore("GlobalKeyValueStore")).thenAnswer(answer -> keyValueStoreMock(keyValueStoreMock));
context = buildProcessorContextImpl(streamsConfig, stateManager);
final StreamTask task = mock(StreamTask.class);
context.transitionToActive(task, null, null);
mockProcessorNodeWithLocalKeyValueStore();
doTest("GlobalKeyValueStore", (Consumer<KeyValueStore<String, Long>>) store -> {
verifyStoreCannotBeInitializedOrClosed(store);
checkThrowsUnsupportedOperation(store::flush, "flush()");
checkThrowsUnsupportedOperation(() -> store.put("1", 1L), "put()");
checkThrowsUnsupportedOperation(() -> store.putIfAbsent("1", 1L), "putIfAbsent()");
checkThrowsUnsupportedOperation(() -> store.putAll(Collections.emptyList()), "putAll()");
checkThrowsUnsupportedOperation(() -> store.delete("1"), "delete()");
assertEquals((Long) VALUE, store.get(KEY));
assertEquals(rangeIter, store.range("one", "two"));
assertEquals(allIter, store.all());
assertEquals(VALUE, store.approximateNumEntries());
});
} |
@Override
public RenewDelegationTokenResponse renewDelegationToken(
RenewDelegationTokenRequest request) throws YarnException, IOException {
try {
if (!RouterServerUtil.isAllowedDelegationTokenOp()) {
routerMetrics.incrRenewDelegationTokenFailedRetrieved();
String msg = "Delegation Token can be renewed only with kerberos authentication";
RouterAuditLogger.logFailure(user.getShortUserName(), RENEW_DELEGATIONTOKEN, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg);
throw new IOException(msg);
}
long startTime = clock.getTime();
org.apache.hadoop.yarn.api.records.Token protoToken = request.getDelegationToken();
Token<RMDelegationTokenIdentifier> token = new Token<>(
protoToken.getIdentifier().array(), protoToken.getPassword().array(),
new Text(protoToken.getKind()), new Text(protoToken.getService()));
String renewer = RouterServerUtil.getRenewerForToken(token);
long nextExpTime = this.getTokenSecretManager().renewToken(token, renewer);
RenewDelegationTokenResponse renewResponse =
Records.newRecord(RenewDelegationTokenResponse.class);
renewResponse.setNextExpirationTime(nextExpTime);
long stopTime = clock.getTime();
routerMetrics.succeededRenewDelegationTokenRetrieved((stopTime - startTime));
RouterAuditLogger.logSuccess(user.getShortUserName(), RENEW_DELEGATIONTOKEN,
TARGET_CLIENT_RM_SERVICE);
return renewResponse;
} catch (IOException e) {
routerMetrics.incrRenewDelegationTokenFailedRetrieved();
RouterAuditLogger.logFailure(user.getShortUserName(), RENEW_DELEGATIONTOKEN, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, "renewDelegationToken error, errMsg = " + e.getMessage());
throw new YarnException(e);
}
} | @Test
public void testRenewDelegationToken() throws IOException, YarnException {
// We design such a unit test to check
// that the execution of the GetDelegationToken method is as expected
// 1. Call GetDelegationToken to apply for delegationToken.
// 2. Call renewDelegationToken to refresh delegationToken.
// By looking at the code of AbstractDelegationTokenSecretManager#renewToken,
// we know that renewTime is calculated as Math.min(id.getMaxDate(), now + tokenRenewInterval)
// so renewTime will be less than or equal to maxDate.
// 3. We will compare whether the expirationTime returned to the
// client is consistent with the renewDate in the stateStore.
// Step1. Call GetDelegationToken to apply for delegationToken.
GetDelegationTokenRequest request = mock(GetDelegationTokenRequest.class);
when(request.getRenewer()).thenReturn("renewer2");
GetDelegationTokenResponse response = interceptor.getDelegationToken(request);
Assert.assertNotNull(response);
Token delegationToken = response.getRMDelegationToken();
org.apache.hadoop.security.token.Token<RMDelegationTokenIdentifier> token =
ConverterUtils.convertFromYarn(delegationToken, (Text) null);
RMDelegationTokenIdentifier rMDelegationTokenIdentifier = token.decodeIdentifier();
String renewer = rMDelegationTokenIdentifier.getRenewer().toString();
long maxDate = rMDelegationTokenIdentifier.getMaxDate();
Assert.assertEquals("renewer2", renewer);
// Step2. Call renewDelegationToken to refresh delegationToken.
RenewDelegationTokenRequest renewRequest = Records.newRecord(RenewDelegationTokenRequest.class);
renewRequest.setDelegationToken(delegationToken);
RenewDelegationTokenResponse renewResponse = interceptor.renewDelegationToken(renewRequest);
Assert.assertNotNull(renewResponse);
long expDate = renewResponse.getNextExpirationTime();
Assert.assertTrue(expDate <= maxDate);
// Step3. Compare whether the expirationTime returned to
// the client is consistent with the renewDate in the stateStore
RouterRMDTSecretManagerState managerState = stateStore.getRouterRMSecretManagerState();
Map<RMDelegationTokenIdentifier, RouterStoreToken> delegationTokenState =
managerState.getTokenState();
Assert.assertNotNull(delegationTokenState);
Assert.assertTrue(delegationTokenState.containsKey(rMDelegationTokenIdentifier));
RouterStoreToken resultRouterStoreToken = delegationTokenState.get(rMDelegationTokenIdentifier);
Assert.assertNotNull(resultRouterStoreToken);
long renewDate = resultRouterStoreToken.getRenewDate();
Assert.assertEquals(expDate, renewDate);
} |
@Override
public List<FileEntriesLayer> createLayers() {
FileEntriesLayer jarLayer =
FileEntriesLayer.builder()
.setName(JarLayers.JAR)
.addEntry(jarPath, JarLayers.APP_ROOT.resolve(jarPath.getFileName()))
.build();
return Collections.singletonList(jarLayer);
} | @Test
public void testCreateLayers() throws URISyntaxException {
Path springBootJar = Paths.get(Resources.getResource(SPRING_BOOT_JAR).toURI());
SpringBootPackagedProcessor springBootProcessor =
new SpringBootPackagedProcessor(springBootJar, JAR_JAVA_VERSION);
List<FileEntriesLayer> layers = springBootProcessor.createLayers();
assertThat(layers.size()).isEqualTo(1);
FileEntriesLayer jarLayer = layers.get(0);
assertThat(jarLayer.getName()).isEqualTo("jar");
assertThat(jarLayer.getEntries().size()).isEqualTo(1);
assertThat(jarLayer.getEntries().get(0).getExtractionPath())
.isEqualTo(AbsoluteUnixPath.get("/app/springboot_sample.jar"));
} |
public static Ip4Address valueOf(int value) {
byte[] bytes =
ByteBuffer.allocate(INET_BYTE_LENGTH).putInt(value).array();
return new Ip4Address(bytes);
} | @Test
public void testValueOfInetAddressIPv4() {
Ip4Address ipAddress;
InetAddress inetAddress;
inetAddress = InetAddresses.forString("1.2.3.4");
ipAddress = Ip4Address.valueOf(inetAddress);
assertThat(ipAddress.toString(), is("1.2.3.4"));
inetAddress = InetAddresses.forString("0.0.0.0");
ipAddress = Ip4Address.valueOf(inetAddress);
assertThat(ipAddress.toString(), is("0.0.0.0"));
inetAddress = InetAddresses.forString("255.255.255.255");
ipAddress = Ip4Address.valueOf(inetAddress);
assertThat(ipAddress.toString(), is("255.255.255.255"));
} |
public static BundleCounter bundleProcessingThreadCounter(String shortId, MetricName name) {
return new BundleProcessingThreadCounter(shortId, name);
} | @Test
public void testAccurateBundleCounterReportsValueFirstTimeWithoutMutations() throws Exception {
Map<String, ByteString> report = new HashMap<>();
BundleCounter bundleCounter = Metrics.bundleProcessingThreadCounter(TEST_ID, TEST_NAME);
bundleCounter.updateIntermediateMonitoringData(report);
assertEquals(
report, Collections.singletonMap(TEST_ID, MonitoringInfoEncodings.encodeInt64Counter(0)));
report.clear();
// Test that a reported value isn't reported again on final update
bundleCounter.updateFinalMonitoringData(report);
assertEquals(report, Collections.emptyMap());
// Test that the value is not reported after reset if no mutations after being
// reported the first time.
bundleCounter.reset();
bundleCounter.updateFinalMonitoringData(report);
assertEquals(report, Collections.emptyMap());
} |
@ProcessElement
public void processElement(OutputReceiver<InitialPipelineState> receiver) throws IOException {
LOG.info(daoFactory.getStreamTableDebugString());
LOG.info(daoFactory.getMetadataTableDebugString());
LOG.info("ChangeStreamName: " + daoFactory.getChangeStreamName());
boolean resume = false;
DetectNewPartitionsState detectNewPartitionsState =
daoFactory.getMetadataTableDao().readDetectNewPartitionsState();
switch (existingPipelineOptions) {
case RESUME_OR_NEW:
// perform resumption.
if (detectNewPartitionsState != null) {
resume = true;
startTime = detectNewPartitionsState.getWatermark();
LOG.info("Resuming from previous pipeline with low watermark of {}", startTime);
} else {
LOG.info(
"Attempted to resume, but previous watermark does not exist, starting at {}",
startTime);
}
break;
case RESUME_OR_FAIL:
// perform resumption.
if (detectNewPartitionsState != null) {
resume = true;
startTime = detectNewPartitionsState.getWatermark();
LOG.info("Resuming from previous pipeline with low watermark of {}", startTime);
} else {
LOG.error("Previous pipeline with the same change stream name doesn't exist, stopping");
return;
}
break;
case FAIL_IF_EXISTS:
if (detectNewPartitionsState != null) {
LOG.error(
"A previous pipeline exists with the same change stream name and existingPipelineOption is set to FAIL_IF_EXISTS.");
return;
}
break;
case SKIP_CLEANUP:
if (detectNewPartitionsState != null) {
LOG.error(
"A previous pipeline exists with the same change stream name and existingPipelineOption is set to SKIP_CLEANUP. This option should only be used in tests.");
return;
}
break;
default:
LOG.error("Unexpected existingPipelineOptions option.");
// terminate pipeline
return;
}
daoFactory.getMetadataTableDao().writeDetectNewPartitionVersion();
receiver.output(new InitialPipelineState(startTime, resume));
} | @Test
public void testInitializeSkipCleanupWithDNP() throws IOException {
Instant resumeTime = Instant.now().minus(Duration.standardSeconds(10000));
metadataTableDao.updateDetectNewPartitionWatermark(resumeTime);
ByteString metadataRowKey =
metadataTableAdminDao
.getChangeStreamNamePrefix()
.concat(ByteString.copyFromUtf8("existing_row"));
dataClient.mutateRow(
RowMutation.create(tableId, metadataRowKey)
.setCell(
MetadataTableAdminDao.CF_WATERMARK, MetadataTableAdminDao.QUALIFIER_DEFAULT, 123));
Instant startTime = Instant.now();
InitializeDoFn initializeDoFn =
new InitializeDoFn(daoFactory, startTime, ExistingPipelineOptions.SKIP_CLEANUP);
initializeDoFn.processElement(outputReceiver);
// We don't want the pipeline to resume to avoid duplicates
verify(outputReceiver, never()).output(any());
// Existing metadata shouldn't be cleaned up
assertNotNull(dataClient.readRow(tableId, metadataRowKey));
} |
@Nullable
public DataBuffer readChunk(int index) throws IOException {
if (index >= mDataBuffers.length) {
return null;
}
if (index >= mBufferCount.get()) {
try (LockResource ignored = new LockResource(mBufferLocks.writeLock())) {
while (index >= mBufferCount.get()) {
DataBuffer buffer = readChunk();
mDataBuffers[mBufferCount.get()] = buffer;
mBufferCount.incrementAndGet();
}
}
}
return mDataBuffers[index];
} | @Test
public void testSequentialRead() throws Exception {
int chunkNum = BLOCK_SIZE / CHUNK_SIZE;
for (int i = 0; i < chunkNum; i++) {
DataBuffer buffer = mDataReader.readChunk(i);
Assert.assertEquals(i + 1, mDataReader.getReadChunkNum());
Assert.assertTrue(mDataReader.validateBuffer(i, buffer));
}
} |
public URI getHttpPublishUri() {
if (httpPublishUri == null) {
final URI defaultHttpUri = getDefaultHttpUri();
LOG.debug("No \"http_publish_uri\" set. Using default <{}>.", defaultHttpUri);
return defaultHttpUri;
} else {
final InetAddress inetAddress = toInetAddress(httpPublishUri.getHost());
if (Tools.isWildcardInetAddress(inetAddress)) {
final URI defaultHttpUri = getDefaultHttpUri(httpPublishUri.getPath());
LOG.warn("\"{}\" is not a valid setting for \"http_publish_uri\". Using default <{}>.", httpPublishUri, defaultHttpUri);
return defaultHttpUri;
} else {
return Tools.normalizeURI(httpPublishUri, httpPublishUri.getScheme(), GRAYLOG_DEFAULT_PORT, httpPublishUri.getPath());
}
}
} | @Test
public void testHttpPublishUriWithMissingTrailingSlash() throws RepositoryException, ValidationException {
jadConfig.setRepository(new InMemoryRepository(ImmutableMap.of("http_publish_uri", "http://www.example.com:12900/foo"))).addConfigurationBean(configuration).process();
assertThat(configuration.getHttpPublishUri()).isEqualTo(URI.create("http://www.example.com:12900/foo/"));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.