focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public long getSplitBacklogBytes() {
// Safety check in case a progress check is made for the start method is called.
if (shardReadersPool == null) {
return UnboundedReader.BACKLOG_UNKNOWN;
}
Instant latestRecordTimestamp = shardReadersPool.getLatestRecordTimestamp();
if (latestRecordTimestamp.equals(BoundedWindow.TIMESTAMP_MIN_VALUE)) {
LOG.debug("Split backlog bytes for stream {} unknown", spec.getStreamName());
return UnboundedSource.UnboundedReader.BACKLOG_UNKNOWN;
}
if (latestRecordTimestamp.plus(spec.getUpToDateThreshold()).isAfterNow()) {
LOG.debug(
"Split backlog bytes for stream {} with latest record timestamp {}: 0 (latest record timestamp is up-to-date with threshold of {})",
spec.getStreamName(),
latestRecordTimestamp,
spec.getUpToDateThreshold());
return 0L;
}
if (backlogBytesLastCheckTime.plus(backlogBytesCheckThreshold).isAfterNow()) {
LOG.debug(
"Split backlog bytes for {} stream with latest record timestamp {}: {} (cached value)",
spec.getStreamName(),
latestRecordTimestamp,
lastBacklogBytes);
return lastBacklogBytes;
}
try {
lastBacklogBytes = kinesis.getBacklogBytes(spec.getStreamName(), latestRecordTimestamp);
backlogBytesLastCheckTime = Instant.now();
} catch (TransientKinesisException e) {
LOG.warn(
"Transient exception occurred during backlog estimation for stream {}.",
spec.getStreamName(),
e);
}
LOG.info(
"Split backlog bytes for {} stream with {} latest record timestamp: {}",
spec.getStreamName(),
latestRecordTimestamp,
lastBacklogBytes);
return lastBacklogBytes;
}
|
@Test
public void getSplitBacklogBytesShouldReturnUnknownIfNotStarted() {
assertThat(reader.getSplitBacklogBytes()).isEqualTo(UnboundedReader.BACKLOG_UNKNOWN);
}
|
public static String variablesToGetParamValue(final String variables) {
return StringUtils.trimToNull(variables);
}
|
@Test
void testVariablesToGetParamValue() throws Exception {
assertEquals(OBJECT_MAPPER.readTree(EXPECTED_VARIABLES_GET_PARAM_VALUE),
OBJECT_MAPPER.readTree(GraphQLRequestParamUtils.variablesToGetParamValue(params.getVariables())));
}
|
@Override
public boolean remove(Object o) {
return underlying.remove(o);
}
|
@Test
public void testRemove() {
BoundedList<String> list = BoundedList.newArrayBacked(3);
list.add("a");
list.add("a");
list.add("c");
assertEquals(0, list.indexOf("a"));
assertEquals(1, list.lastIndexOf("a"));
list.remove("a");
assertEquals(Arrays.asList("a", "c"), list);
list.remove(0);
assertEquals(Arrays.asList("c"), list);
}
|
public static <T> T deserialize(CustomObject customObject) {
return (T) SER_DES[customObject.getType()].deserialize(customObject.getBuffer());
}
|
@Test
public void testHyperLogLogDeserializeThrowsForSizeMismatch()
throws Exception {
// We serialize a HLL w/ log2m of 12 and then trim 1024 bytes from the end of it and try to deserialize it. An
// exception should occur because 2732 bytes are expected after the headers, but instead it will only find 1708.
byte[] bytes = (new HyperLogLog(12)).getBytes();
byte[] trimmed = Arrays.copyOfRange(bytes, 0, bytes.length - 1024);
assertThrows(RuntimeException.class,
() -> ObjectSerDeUtils.deserialize(trimmed, ObjectSerDeUtils.ObjectType.HyperLogLog));
}
|
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) {
return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature);
}
|
@Test
public void testWrongOutputReceiverType() throws Exception {
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("OutputReceiver should be parameterized by java.lang.String");
DoFnSignatures.getSignature(
new DoFn<String, String>() {
@ProcessElement
public void process(OutputReceiver<Integer> receiver) {}
}.getClass());
}
|
public static Comparator<StructLike> forType(Types.StructType struct) {
return new StructLikeComparator(struct);
}
|
@Test
public void testTimestamp() {
assertComparesCorrectly(Comparators.forType(Types.TimestampType.withoutZone()), 111, 222);
assertComparesCorrectly(Comparators.forType(Types.TimestampType.withZone()), 111, 222);
}
|
@GET
@Path("{path:.*}")
@Produces({MediaType.APPLICATION_OCTET_STREAM + "; " + JettyUtils.UTF_8,
MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8})
public Response get(@PathParam("path") String path,
@Context UriInfo uriInfo,
@QueryParam(OperationParam.NAME) OperationParam op,
@Context Parameters params,
@Context HttpServletRequest request)
throws IOException, FileSystemAccessException {
// Restrict access to only GETFILESTATUS and LISTSTATUS in write-only mode
if((op.value() != HttpFSFileSystem.Operation.GETFILESTATUS) &&
(op.value() != HttpFSFileSystem.Operation.LISTSTATUS) &&
accessMode == AccessMode.WRITEONLY) {
return Response.status(Response.Status.FORBIDDEN).build();
}
UserGroupInformation user = HttpUserGroupInformation.get();
Response response;
path = makeAbsolute(path);
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
MDC.put("hostname", request.getRemoteAddr());
switch (op.value()) {
case OPEN: {
Boolean noRedirect = params.get(
NoRedirectParam.NAME, NoRedirectParam.class);
if (noRedirect) {
URI redirectURL = createOpenRedirectionURL(uriInfo);
final String js = JsonUtil.toJsonString("Location", redirectURL);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
} else {
//Invoking the command directly using an unmanaged FileSystem that is
// released by the FileSystemReleaseFilter
final FSOperations.FSOpen command = new FSOperations.FSOpen(path);
final FileSystem fs = createFileSystem(user);
InputStream is = null;
UserGroupInformation ugi = UserGroupInformation
.createProxyUser(user.getShortUserName(),
UserGroupInformation.getLoginUser());
try {
is = ugi.doAs(new PrivilegedExceptionAction<InputStream>() {
@Override
public InputStream run() throws Exception {
return command.execute(fs);
}
});
} catch (InterruptedException ie) {
LOG.warn("Open interrupted.", ie);
Thread.currentThread().interrupt();
}
Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
Long len = params.get(LenParam.NAME, LenParam.class);
AUDIT_LOG.info("[{}] offset [{}] len [{}]",
new Object[] { path, offset, len });
InputStreamEntity entity = new InputStreamEntity(is, offset, len);
response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM)
.build();
}
break;
}
case GETFILESTATUS: {
FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTSTATUS: {
String filter = params.get(FilterParam.NAME, FilterParam.class);
FSOperations.FSListStatus command =
new FSOperations.FSListStatus(path, filter);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}] filter [{}]", path, (filter != null) ? filter : "-");
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETHOMEDIRECTORY: {
enforceRootPath(op.value(), path);
FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("Home Directory for [{}]", user);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case INSTRUMENTATION: {
enforceRootPath(op.value(), path);
Groups groups = HttpFSServerWebApp.get().get(Groups.class);
Set<String> userGroups = groups.getGroupsSet(user.getShortUserName());
if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
throw new AccessControlException(
"User not in HttpFSServer admin group");
}
Instrumentation instrumentation =
HttpFSServerWebApp.get().get(Instrumentation.class);
Map snapshot = instrumentation.getSnapshot();
response = Response.ok(snapshot).build();
break;
}
case GETCONTENTSUMMARY: {
FSOperations.FSContentSummary command =
new FSOperations.FSContentSummary(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("Content summary for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETQUOTAUSAGE: {
FSOperations.FSQuotaUsage command =
new FSOperations.FSQuotaUsage(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("Quota Usage for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETFILECHECKSUM: {
FSOperations.FSFileChecksum command =
new FSOperations.FSFileChecksum(path);
Boolean noRedirect = params.get(
NoRedirectParam.NAME, NoRedirectParam.class);
AUDIT_LOG.info("[{}]", path);
if (noRedirect) {
URI redirectURL = createOpenRedirectionURL(uriInfo);
final String js = JsonUtil.toJsonString("Location", redirectURL);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
} else {
Map json = fsExecute(user, command);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
}
break;
}
case GETFILEBLOCKLOCATIONS: {
long offset = 0;
long len = Long.MAX_VALUE;
Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class);
Long lenParam = params.get(LenParam.NAME, LenParam.class);
AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam);
if (offsetParam != null && offsetParam > 0) {
offset = offsetParam;
}
if (lenParam != null && lenParam > 0) {
len = lenParam;
}
FSOperations.FSFileBlockLocations command =
new FSOperations.FSFileBlockLocations(path, offset, len);
@SuppressWarnings("rawtypes")
Map locations = fsExecute(user, command);
final String json = JsonUtil.toJsonString("BlockLocations", locations);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETACLSTATUS: {
FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("ACL status for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETXATTRS: {
List<String> xattrNames =
params.getValues(XAttrNameParam.NAME, XAttrNameParam.class);
XAttrCodec encoding =
params.get(XAttrEncodingParam.NAME, XAttrEncodingParam.class);
FSOperations.FSGetXAttrs command =
new FSOperations.FSGetXAttrs(path, xattrNames, encoding);
@SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
AUDIT_LOG.info("XAttrs for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTXATTRS: {
FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path);
@SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
AUDIT_LOG.info("XAttr names for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTSTATUS_BATCH: {
String startAfter = params.get(
HttpFSParametersProvider.StartAfterParam.NAME,
HttpFSParametersProvider.StartAfterParam.class);
byte[] token = HttpFSUtils.EMPTY_BYTES;
if (startAfter != null) {
token = startAfter.getBytes(StandardCharsets.UTF_8);
}
FSOperations.FSListStatusBatch command = new FSOperations
.FSListStatusBatch(path, token);
@SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}] token [{}]", path, token);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETTRASHROOT: {
FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path);
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETALLSTORAGEPOLICY: {
FSOperations.FSGetAllStoragePolicies command =
new FSOperations.FSGetAllStoragePolicies();
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSTORAGEPOLICY: {
FSOperations.FSGetStoragePolicy command =
new FSOperations.FSGetStoragePolicy(path);
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSNAPSHOTDIFF: {
String oldSnapshotName = params.get(OldSnapshotNameParam.NAME,
OldSnapshotNameParam.class);
String snapshotName = params.get(SnapshotNameParam.NAME,
SnapshotNameParam.class);
FSOperations.FSGetSnapshotDiff command =
new FSOperations.FSGetSnapshotDiff(path, oldSnapshotName,
snapshotName);
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSNAPSHOTDIFFLISTING: {
String oldSnapshotName = params.get(OldSnapshotNameParam.NAME,
OldSnapshotNameParam.class);
String snapshotName = params.get(SnapshotNameParam.NAME,
SnapshotNameParam.class);
String snapshotDiffStartPath = params
.get(HttpFSParametersProvider.SnapshotDiffStartPathParam.NAME,
HttpFSParametersProvider.SnapshotDiffStartPathParam.class);
Integer snapshotDiffIndex = params.get(HttpFSParametersProvider.SnapshotDiffIndexParam.NAME,
HttpFSParametersProvider.SnapshotDiffIndexParam.class);
FSOperations.FSGetSnapshotDiffListing command =
new FSOperations.FSGetSnapshotDiffListing(path, oldSnapshotName,
snapshotName, snapshotDiffStartPath, snapshotDiffIndex);
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSNAPSHOTTABLEDIRECTORYLIST: {
FSOperations.FSGetSnapshottableDirListing command =
new FSOperations.FSGetSnapshottableDirListing();
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", "/");
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSNAPSHOTLIST: {
FSOperations.FSGetSnapshotListing command =
new FSOperations.FSGetSnapshotListing(path);
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", "/");
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSERVERDEFAULTS: {
FSOperations.FSGetServerDefaults command =
new FSOperations.FSGetServerDefaults();
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", "/");
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case CHECKACCESS: {
String mode = params.get(FsActionParam.NAME, FsActionParam.class);
FsActionParam fsparam = new FsActionParam(mode);
FSOperations.FSAccess command = new FSOperations.FSAccess(path,
FsAction.getFsAction(fsparam.value()));
fsExecute(user, command);
AUDIT_LOG.info("[{}]", "/");
response = Response.ok().build();
break;
}
case GETECPOLICY: {
FSOperations.FSGetErasureCodingPolicy command =
new FSOperations.FSGetErasureCodingPolicy(path);
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETECPOLICIES: {
FSOperations.FSGetErasureCodingPolicies command =
new FSOperations.FSGetErasureCodingPolicies();
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETECCODECS: {
FSOperations.FSGetErasureCodingCodecs command =
new FSOperations.FSGetErasureCodingCodecs();
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GET_BLOCK_LOCATIONS: {
long offset = 0;
long len = Long.MAX_VALUE;
Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class);
Long lenParam = params.get(LenParam.NAME, LenParam.class);
AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam);
if (offsetParam != null && offsetParam > 0) {
offset = offsetParam;
}
if (lenParam != null && lenParam > 0) {
len = lenParam;
}
FSOperations.FSFileBlockLocationsLegacy command =
new FSOperations.FSFileBlockLocationsLegacy(path, offset, len);
@SuppressWarnings("rawtypes")
Map locations = fsExecute(user, command);
final String json = JsonUtil.toJsonString("LocatedBlocks", locations);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETFILELINKSTATUS: {
FSOperations.FSFileLinkStatus command =
new FSOperations.FSFileLinkStatus(path);
@SuppressWarnings("rawtypes") Map js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSTATUS: {
FSOperations.FSStatus command = new FSOperations.FSStatus(path);
@SuppressWarnings("rawtypes") Map js = fsExecute(user, command);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETTRASHROOTS: {
Boolean allUsers = params.get(AllUsersParam.NAME, AllUsersParam.class);
FSOperations.FSGetTrashRoots command = new FSOperations.FSGetTrashRoots(allUsers);
Map json = fsExecute(user, command);
AUDIT_LOG.info("allUsers [{}]", allUsers);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
default: {
throw new IOException(
MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value()));
}
}
return response;
}
|
@Test
@TestDir
@TestJetty
@TestHdfs
public void testErasureCodingPolicy() throws Exception {
createHttpFSServer(false, false);
final String dir = "/ecPolicy";
Path path1 = new Path(dir);
final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies
.getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
final String ecPolicyName = ecPolicy.getName();
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
.get(path1.toUri(), TestHdfsHelper.getHdfsConf());
dfs.mkdirs(new Path(dir));
dfs.enableErasureCodingPolicy(ecPolicyName);
HttpURLConnection conn =
putCmdWithReturn(dir, "SETECPOLICY", "ecpolicy=" + ecPolicyName);
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
HttpURLConnection conn1 = sendRequestToHttpFSServer(dir, "GETECPOLICY", "");
// Should return HTTP_OK
Assert.assertEquals(conn1.getResponseCode(), HttpURLConnection.HTTP_OK);
// Verify the response
BufferedReader reader =
new BufferedReader(new InputStreamReader(conn1.getInputStream()));
// The response should be a one-line JSON string.
String dirLst = reader.readLine();
ErasureCodingPolicy dfsDirLst = dfs.getErasureCodingPolicy(path1);
Assert.assertNotNull(dfsDirLst);
Assert.assertEquals(dirLst, JsonUtil.toJsonString(dfsDirLst));
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
URL url = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1{0}?user.name={1}&op={2}&{3}", dir,
user, "UNSETECPOLICY", ""));
HttpURLConnection conn2 = (HttpURLConnection) url.openConnection();
conn2.setRequestMethod("POST");
conn2.connect();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn2.getResponseCode());
// response should be null
dfsDirLst = dfs.getErasureCodingPolicy(path1);
Assert.assertNull(dfsDirLst);
// test put opeartion with path as "/"
final String dir1 = "/";
HttpURLConnection conn3 =
putCmdWithReturn(dir1, "SETECPOLICY", "ecpolicy=" + ecPolicyName);
// Should return HTTP_OK
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn3.getResponseCode());
// test post operation with path as "/"
final String dir2 = "/";
URL url1 = new URL(TestJettyHelper.getJettyURL(),
MessageFormat.format("/webhdfs/v1{0}?user.name={1}&op={2}&{3}", dir2,
user, "UNSETECPOLICY", ""));
HttpURLConnection conn4 = (HttpURLConnection) url1.openConnection();
conn4.setRequestMethod("POST");
conn4.connect();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn4.getResponseCode());
}
|
public boolean isEmpty() {
return username() == null && password() == null && privateKey() == null && passPhrase() == null;
}
|
@Test
public void isEmpty_givenAllNotNullProperties_returnFalse() {
MapSettings settings = new MapSettings(new PropertyDefinitions(System2.INSTANCE));
settings.setProperty("sonar.svn.username", "bob");
settings.setProperty("sonar.svn.privateKeyPath", "bob");
settings.setProperty("sonar.svn.passphrase.secured", "bob");
SvnConfiguration svnConfiguration = new SvnConfiguration(settings.asConfig());
assertThat(svnConfiguration.isEmpty()).isFalse();
}
|
@VisibleForTesting
Set<? extends Watcher> getEntries() {
return Sets.newHashSet(entries);
}
|
@Test
public void testTriggered() throws Exception {
CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
try {
client.start();
WatcherRemovalFacade removerClient = (WatcherRemovalFacade) client.newWatcherRemoveCuratorFramework();
final CountDownLatch latch = new CountDownLatch(1);
Watcher watcher = new Watcher() {
@Override
public void process(WatchedEvent event) {
if (event.getType() == Event.EventType.NodeCreated) {
latch.countDown();
}
}
};
removerClient.checkExists().usingWatcher(watcher).forPath("/yo");
assertEquals(removerClient.getRemovalManager().getEntries().size(), 1);
removerClient.create().forPath("/yo");
assertTrue(new Timing().awaitLatch(latch));
assertEquals(removerClient.getRemovalManager().getEntries().size(), 0);
} finally {
TestCleanState.closeAndTestClean(client);
}
}
|
public CodegenTableDO buildTable(TableInfo tableInfo) {
CodegenTableDO table = CodegenConvert.INSTANCE.convert(tableInfo);
initTableDefault(table);
return table;
}
|
@Test
public void testBuildTable() {
// 准备参数
TableInfo tableInfo = mock(TableInfo.class);
// mock 方法
when(tableInfo.getName()).thenReturn("system_user");
when(tableInfo.getComment()).thenReturn("用户");
// 调用
CodegenTableDO table = codegenBuilder.buildTable(tableInfo);
// 断言
assertEquals("system_user", table.getTableName());
assertEquals("用户", table.getTableComment());
assertEquals("system", table.getModuleName());
assertEquals("user", table.getBusinessName());
assertEquals("User", table.getClassName());
assertEquals("用户", table.getClassComment());
}
|
public void clear() {
filter_.clear();
}
|
@Ignore
@Test
public void timeit() {
int size = 300 * FilterTest.ELEMENTS;
bf = new BloomFilter(size, FilterTest.spec.bucketsPerElement);
for (int i = 0; i < 10; i++) {
FilterTest.testFalsePositives(bf,
new KeyGenerator.RandomStringGenerator(new Random().nextInt(), size),
new KeyGenerator.RandomStringGenerator(new Random().nextInt(), size));
bf.clear();
}
}
|
public void clear() {
map.clear();
}
|
@Test
public void testClear() {
map.put("foo", () -> "foovalue");
map.put("bar", () -> "foovalue2");
map.put("baz", () -> "foovalue3");
map.clear();
assertEquals(0, map.size());
}
|
@Override
public boolean canHandleReturnType(Class returnType) {
return (Flux.class.isAssignableFrom(returnType)) || (Mono.class
.isAssignableFrom(returnType));
}
|
@Test
public void testCheckTypes() {
assertThat(reactorRetryAspectExt.canHandleReturnType(Mono.class)).isTrue();
assertThat(reactorRetryAspectExt.canHandleReturnType(Flux.class)).isTrue();
}
|
public IsJson(Matcher<? super ReadContext> jsonMatcher) {
this.jsonMatcher = jsonMatcher;
}
|
@Test
public void shouldDescribeMismatchOfInvalidJson() {
Matcher<Object> matcher = isJson(withPathEvaluatedTo(true));
Description description = new StringDescription();
matcher.describeMismatch("invalid-json", description);
assertThat(description.toString(), containsString("\"invalid-json\""));
}
|
Double calculateAverage(List<Double> durationEntries) {
double sum = 0.0;
for (Double duration : durationEntries) {
sum = sum + duration;
}
if (sum == 0) {
return 0.0;
}
return sum / durationEntries.size();
}
|
@Test
void calculateAverageOfEmptylist() {
OutputStream out = new ByteArrayOutputStream();
UsageFormatter usageFormatter = new UsageFormatter(out);
Double result = usageFormatter.calculateAverage(Collections.emptyList());
assertThat(result, is(equalTo(0.0)));
}
|
public void logAddress(final DriverEventCode code, final InetSocketAddress address)
{
final int length = socketAddressLength(address);
final int captureLength = captureLength(length);
final int encodedLength = encodedLength(captureLength);
final ManyToOneRingBuffer ringBuffer = this.ringBuffer;
final int index = ringBuffer.tryClaim(toEventCodeId(code), encodedLength);
if (index > 0)
{
try
{
encode((UnsafeBuffer)ringBuffer.buffer(), index, captureLength, length, address);
}
finally
{
ringBuffer.commit(index);
}
}
}
|
@Test
void logAddress()
{
final int recordOffset = 64;
logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, recordOffset);
final DriverEventCode eventCode = NAME_RESOLUTION_NEIGHBOR_REMOVED;
final int captureLength = 12;
logger.logAddress(eventCode, new InetSocketAddress("localhost", 5656));
verifyLogHeader(logBuffer, recordOffset, toEventCodeId(eventCode), captureLength, captureLength);
assertEquals(5656, logBuffer.getInt(encodedMsgOffset(recordOffset + LOG_HEADER_LENGTH), LITTLE_ENDIAN));
assertEquals(4, logBuffer.getInt(
encodedMsgOffset(recordOffset + LOG_HEADER_LENGTH + SIZE_OF_INT), LITTLE_ENDIAN));
}
|
public static List<Transformation<?>> optimize(List<Transformation<?>> transformations) {
final Map<Transformation<?>, Set<Transformation<?>>> outputMap =
buildOutputMap(transformations);
final LinkedHashSet<Transformation<?>> chainedTransformations = new LinkedHashSet<>();
final Set<Transformation<?>> alreadyTransformed = Sets.newIdentityHashSet();
final Queue<Transformation<?>> toTransformQueue = Queues.newArrayDeque(transformations);
while (!toTransformQueue.isEmpty()) {
final Transformation<?> transformation = toTransformQueue.poll();
if (!alreadyTransformed.contains(transformation)) {
alreadyTransformed.add(transformation);
final ChainInfo chainInfo = chainWithInputIfPossible(transformation, outputMap);
chainedTransformations.add(chainInfo.newTransformation);
chainedTransformations.removeAll(chainInfo.oldTransformations);
alreadyTransformed.addAll(chainInfo.oldTransformations);
// Add the chained transformation and its inputs to the to-optimize list
toTransformQueue.add(chainInfo.newTransformation);
toTransformQueue.addAll(chainInfo.newTransformation.getInputs());
}
}
return new ArrayList<>(chainedTransformations);
}
|
@Test
void testChainingNonKeyedOperators() {
ExternalPythonProcessOperator<?, ?> processOperator1 =
createProcessOperator(
"f1", new RowTypeInfo(Types.INT(), Types.INT()), Types.STRING());
ExternalPythonProcessOperator<?, ?> processOperator2 =
createProcessOperator("f2", Types.STRING(), Types.INT());
Transformation<?> sourceTransformation = mock(SourceTransformation.class);
OneInputTransformation<?, ?> processTransformation1 =
new OneInputTransformation(
sourceTransformation,
"Process1",
processOperator1,
processOperator1.getProducedType(),
2);
Transformation<?> processTransformation2 =
new OneInputTransformation(
processTransformation1,
"process2",
processOperator2,
processOperator2.getProducedType(),
2);
List<Transformation<?>> transformations = new ArrayList<>();
transformations.add(sourceTransformation);
transformations.add(processTransformation1);
transformations.add(processTransformation2);
List<Transformation<?>> optimized =
PythonOperatorChainingOptimizer.optimize(transformations);
assertThat(optimized).hasSize(2);
OneInputTransformation<?, ?> chainedTransformation =
(OneInputTransformation<?, ?>) optimized.get(1);
assertThat(sourceTransformation.getOutputType())
.isEqualTo(chainedTransformation.getInputType());
assertThat(processOperator2.getProducedType())
.isEqualTo(chainedTransformation.getOutputType());
OneInputStreamOperator<?, ?> chainedOperator = chainedTransformation.getOperator();
assertThat(chainedOperator).isInstanceOf(ExternalPythonProcessOperator.class);
validateChainedPythonFunctions(
((ExternalPythonProcessOperator<?, ?>) chainedOperator).getPythonFunctionInfo(),
"f2",
"f1");
}
|
@Nullable
@Override
public Message decode(@Nonnull RawMessage rawMessage) {
final byte[] payload = rawMessage.getPayload();
final JsonNode event;
try {
event = objectMapper.readTree(payload);
if (event == null || event.isMissingNode()) {
throw new IOException("null result");
}
} catch (IOException e) {
LOG.error("Couldn't decode raw message {}", rawMessage);
return null;
}
return parseEvent(event);
}
|
@Test
public void decodeMessagesHandlesGenericBeatWithCloudTencent() throws Exception {
final Message message = codec.decode(messageFromJson("generic-with-cloud-tencent.json"));
assertThat(message).isNotNull();
assertThat(message.getMessage()).isEqualTo("-");
assertThat(message.getSource()).isEqualTo("unknown");
assertThat(message.getTimestamp()).isEqualTo(new DateTime(2016, 4, 1, 0, 0, DateTimeZone.UTC));
assertThat(message.getField("beats_type")).isEqualTo("beat");
assertThat(message.getField("beat_foo")).isEqualTo("bar");
assertThat(message.getField("beat_meta_cloud_provider")).isEqualTo("qcloud");
assertThat(message.getField("beat_meta_cloud_instance_id")).isEqualTo("ins-qcloudv5");
assertThat(message.getField("beat_meta_cloud_region")).isEqualTo("china-south-gz");
assertThat(message.getField("beat_meta_cloud_availability_zone")).isEqualTo("gz-azone2");
}
|
public synchronized Counter findCounter(String group, String name) {
if (name.equals("MAP_INPUT_BYTES")) {
LOG.warn("Counter name MAP_INPUT_BYTES is deprecated. " +
"Use FileInputFormatCounters as group name and " +
" BYTES_READ as counter name instead");
return findCounter(FileInputFormatCounter.BYTES_READ);
}
String newGroupKey = getNewGroupKey(group);
if (newGroupKey != null) {
group = newGroupKey;
}
return getGroup(group).getCounterForName(name);
}
|
@SuppressWarnings("deprecation")
@Test
public void testCounterValue() {
Counters counters = new Counters();
final int NUMBER_TESTS = 100;
final int NUMBER_INC = 10;
final Random rand = new Random();
for (int i = 0; i < NUMBER_TESTS; i++) {
long initValue = rand.nextInt();
long expectedValue = initValue;
Counter counter = counters.findCounter("foo", "bar");
counter.setValue(initValue);
assertEquals("Counter value is not initialized correctly",
expectedValue, counter.getValue());
for (int j = 0; j < NUMBER_INC; j++) {
int incValue = rand.nextInt();
counter.increment(incValue);
expectedValue += incValue;
assertEquals("Counter value is not incremented correctly",
expectedValue, counter.getValue());
}
expectedValue = rand.nextInt();
counter.setValue(expectedValue);
assertEquals("Counter value is not set correctly",
expectedValue, counter.getValue());
}
}
|
public static void deleteIfExists(final File file)
{
try
{
Files.deleteIfExists(file.toPath());
}
catch (final IOException ex)
{
LangUtil.rethrowUnchecked(ex);
}
}
|
@Test
void deleteIfExistsErrorHandlerEmptyDirectory() throws IOException
{
final ErrorHandler errorHandler = mock(ErrorHandler.class);
final Path dir = tempDir.resolve("dir");
Files.createDirectory(dir);
IoUtil.deleteIfExists(dir.toFile(), errorHandler);
assertFalse(Files.exists(dir));
verifyNoInteractions(errorHandler);
}
|
@ScalarOperator(GREATER_THAN_OR_EQUAL)
@SqlType(StandardTypes.BOOLEAN)
public static boolean greaterThanOrEqual(@SqlType(StandardTypes.BOOLEAN) boolean left, @SqlType(StandardTypes.BOOLEAN) boolean right)
{
return left || !right;
}
|
@Test
public void testGreaterThanOrEqual()
{
assertFunction("true >= true", BOOLEAN, true);
assertFunction("true >= false", BOOLEAN, true);
assertFunction("false >= true", BOOLEAN, false);
assertFunction("false >= false", BOOLEAN, true);
}
|
@VisibleForTesting
WxMaService getWxMaService(Integer userType) {
// 第一步,查询 DB 的配置项,获得对应的 WxMaService 对象
SocialClientDO client = socialClientMapper.selectBySocialTypeAndUserType(
SocialTypeEnum.WECHAT_MINI_APP.getType(), userType);
if (client != null && Objects.equals(client.getStatus(), CommonStatusEnum.ENABLE.getStatus())) {
return wxMaServiceCache.getUnchecked(client.getClientId() + ":" + client.getClientSecret());
}
// 第二步,不存在 DB 配置项,则使用 application-*.yaml 对应的 WxMaService 对象
return wxMaService;
}
|
@Test
public void testGetWxMaService_clientEnable() {
// 准备参数
Integer userType = randomPojo(UserTypeEnum.class).getValue();
// mock 数据
SocialClientDO client = randomPojo(SocialClientDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus())
.setUserType(userType).setSocialType(SocialTypeEnum.WECHAT_MINI_APP.getType()));
socialClientMapper.insert(client);
// mock 方法
WxMaProperties.ConfigStorage configStorage = mock(WxMaProperties.ConfigStorage.class);
when(wxMaProperties.getConfigStorage()).thenReturn(configStorage);
// 调用
WxMaService result = socialClientService.getWxMaService(userType);
// 断言
assertNotSame(wxMaService, result);
assertEquals(client.getClientId(), result.getWxMaConfig().getAppid());
assertEquals(client.getClientSecret(), result.getWxMaConfig().getSecret());
}
|
@Override
public void open() throws Exception {
super.open();
final String operatorID = getRuntimeContext().getOperatorUniqueID();
this.workerPool =
ThreadPools.newWorkerPool("iceberg-worker-pool-" + operatorID, workerPoolSize);
}
|
@TestTemplate
public void testMaxContinuousEmptyCommits() throws Exception {
table.updateProperties().set(MAX_CONTINUOUS_EMPTY_COMMITS, "3").commit();
JobID jobId = new JobID();
long checkpointId = 0;
long timestamp = 0;
try (OneInputStreamOperatorTestHarness<WriteResult, Void> harness = createStreamSink(jobId)) {
harness.setup();
harness.open();
assertSnapshotSize(0);
for (int i = 1; i <= 9; i++) {
harness.snapshot(++checkpointId, ++timestamp);
harness.notifyOfCompletedCheckpoint(checkpointId);
assertSnapshotSize(i / 3);
}
}
}
|
@Override
public Class<QGChangeNotification> getNotificationClass() {
return QGChangeNotification.class;
}
|
@Test
public void getNotificationClass_is_QGChangeNotification() {
assertThat(underTest.getNotificationClass()).isEqualTo(QGChangeNotification.class);
}
|
@Deprecated
public static String getJwt(JwtClaims claims) throws JoseException {
String jwt;
RSAPrivateKey privateKey = (RSAPrivateKey) getPrivateKey(
jwtConfig.getKey().getFilename(),jwtConfig.getKey().getPassword(), jwtConfig.getKey().getKeyName());
// A JWT is a JWS and/or a JWE with JSON claims as the payload.
// In this example it is a JWS nested inside a JWE
// So we first create a JsonWebSignature object.
JsonWebSignature jws = new JsonWebSignature();
// The payload of the JWS is JSON content of the JWT Claims
jws.setPayload(claims.toJson());
// The JWT is signed using the sender's private key
jws.setKey(privateKey);
// Get provider from security config file, it should be two digit
// And the provider id will set as prefix for keyid in the token header, for example: 05100
// if there is no provider id, we use "00" for the default value
String provider_id = "";
if (jwtConfig.getProviderId() != null) {
provider_id = jwtConfig.getProviderId();
if (provider_id.length() == 1) {
provider_id = "0" + provider_id;
} else if (provider_id.length() > 2) {
logger.error("provider_id defined in the security.yml file is invalid; the length should be 2");
provider_id = provider_id.substring(0, 2);
}
}
jws.setKeyIdHeaderValue(provider_id + jwtConfig.getKey().getKid());
// Set the signature algorithm on the JWT/JWS that will integrity protect the claims
jws.setAlgorithmHeaderValue(AlgorithmIdentifiers.RSA_USING_SHA256);
// Sign the JWS and produce the compact serialization, which will be the inner JWT/JWS
// representation, which is a string consisting of three dot ('.') separated
// base64url-encoded parts in the form Header.Payload.Signature
jwt = jws.getCompactSerialization();
return jwt;
}
|
@Test
public void longlivedTransferJwt() throws Exception {
JwtClaims claims = ClaimsUtil.getTestClaims("steve", "EMPLOYEE", "f7d42348-c647-4efb-a52d-4c5787421e72", Arrays.asList("etransfer.r", "etransfer.w"), "user");
claims.setExpirationTimeMinutesInTheFuture(5256000);
String jwt = JwtIssuer.getJwt(claims, long_kid, KeyUtil.deserializePrivateKey(long_key, KeyUtil.RSA));
System.out.println("***Long lived token for etransfer***: " + jwt);
}
|
@Override
public boolean hasAgent(String uuid) {
return agents.stream().anyMatch(agent -> agent.hasUuid(uuid));
}
|
@Test
void shouldReturnFalseIfEnvDoesNotContainTheSpecifiedAgentUuid() {
assertThat(this.environmentConfig.hasAgent("uuid")).isFalse();
}
|
@Override
public Set<ApplicationId> getAppIds() {
return ImmutableSet.copyOf(registeredIds.asJavaMap().values());
}
|
@Test(expected = NullPointerException.class)
public void testEmpty() {
idStore = new DistributedApplicationIdStore();
idStore.getAppIds();
}
|
public abstract Map<String, String> properties(final Map<String, String> defaultProperties, final long additionalRetentionMs);
|
@Test
public void shouldSetCreateTimeByDefaultForWindowedChangelog() {
final WindowedChangelogTopicConfig topicConfig = new WindowedChangelogTopicConfig("name", Collections.emptyMap(), 10);
final Map<String, String> properties = topicConfig.properties(Collections.emptyMap(), 0);
assertEquals("CreateTime", properties.get(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG));
}
|
public AdSession updateAdSession(AdSession session, Map<String, Object> body) throws AdValidationException {
session.setAuthenticationLevel((Integer) body.get("authentication_level"));
session.setAuthenticationStatus((String) body.get("authentication_status"));
session.setBsn((String) body.get("bsn"));
session.setPolymorphIdentity(valueToStringOrNull(body, "polymorph_identity"));
session.setPolymorphPseudonym(valueToStringOrNull(body, "polymorph_pseudonym"));
BeanPropertyBindingResult result = new BeanPropertyBindingResult(session, "adSession");
ValidationUtils.invokeValidator(new AdSessionValidator(), session, result);
if (result.hasErrors()) {
throw new AdValidationException("AdSession validation error", result);
}
adSessionRepository.save(session);
return session;
}
|
@Test
public void updateAdSessionInvalid() {
HashMap<String, Object> body = new HashMap<>();
body.put("authentication_level", 1);
body.put("authentication_status", "Pending");
body.put("bsn", "PPPPPPP");
AdValidationException exception = assertThrows(AdValidationException.class, () -> adService.updateAdSession(new AdSession(), body));
assertEquals("AdSession validation error", exception.getLocalizedMessage());
assertEquals(3, exception.getDetails().getErrorCount());
}
|
@Override
public RLock readLock() {
return new RedissonReadLock(commandExecutor, getName());
}
|
@Test
public void testReadLockExpirationRenewal() throws InterruptedException {
int threadCount = 50;
ExecutorService executorService = Executors.newFixedThreadPool(threadCount/5);
AtomicInteger exceptions = new AtomicInteger();
for (int i=0; i<threadCount; i++) {
executorService.submit(()-> {
try {
RReadWriteLock rw1 = redisson.getReadWriteLock("mytestlock");
RLock readLock = rw1.readLock();
readLock.lock();
try {
Thread.sleep(redisson.getConfig().getLockWatchdogTimeout() + 5000);
} catch (InterruptedException e) {
e.printStackTrace();
}
readLock.unlock();
} catch (Exception e) {
exceptions.incrementAndGet();
e.printStackTrace();
}
});
}
executorService.shutdown();
assertThat(executorService.awaitTermination(180, TimeUnit.SECONDS)).isTrue();
assertThat(exceptions.get()).isZero();
}
|
@Override
protected String buildUndoSQL() {
TableRecords beforeImage = sqlUndoLog.getBeforeImage();
List<Row> beforeImageRows = beforeImage.getRows();
if (CollectionUtils.isEmpty(beforeImageRows)) {
throw new ShouldNeverHappenException("Invalid UNDO LOG");
}
Row row = beforeImageRows.get(0);
List<Field> fields = new ArrayList<>(row.nonPrimaryKeys());
fields.addAll(getOrderedPkList(beforeImage,row,JdbcConstants.ORACLE));
// delete sql undo log before image all field come from table meta, need add escape.
// see BaseTransactionalExecutor#buildTableRecords
String insertColumns = fields.stream()
.map(field -> ColumnUtils.addEscape(field.getName(), JdbcConstants.ORACLE))
.collect(Collectors.joining(", "));
String insertValues = fields.stream().map(field -> "?")
.collect(Collectors.joining(", "));
return String.format(INSERT_SQL_TEMPLATE, sqlUndoLog.getTableName(), insertColumns, insertValues);
}
|
@Test
public void buildUndoSQL() {
OracleUndoDeleteExecutor executor = upperCase();
String sql = executor.buildUndoSQL();
Assertions.assertNotNull(sql);
Assertions.assertTrue(sql.contains("INSERT"));
Assertions.assertTrue(sql.contains("ID"));
Assertions.assertTrue(sql.contains("TABLE_NAME"));
}
|
@Override
public void getBytes(int index, byte[] dst) {
getBytes(index, dst, 0, dst.length);
}
|
@Test
void getByteArrayBoundaryCheck1() {
Assertions.assertThrows(IndexOutOfBoundsException.class, () -> buffer.getBytes(-1, new byte[0]));
}
|
static KiePMMLNormContinuous getKiePMMLNormContinuous(final NormContinuous normContinuous) {
final List<KiePMMLLinearNorm> linearNorms = normContinuous.hasLinearNorms() ?
getKiePMMLLinearNorms(normContinuous.getLinearNorms()) : Collections.emptyList();
final OUTLIER_TREATMENT_METHOD outlierTreatmentMethod = normContinuous.getOutliers() != null ? OUTLIER_TREATMENT_METHOD.byName(normContinuous.getOutliers().value()) : null;
return new KiePMMLNormContinuous(normContinuous.getField(), getKiePMMLExtensions(normContinuous.getExtensions()), linearNorms, outlierTreatmentMethod, normContinuous.getMapMissingTo());
}
|
@Test
void getKiePMMLNormContinuous() {
final NormContinuous toConvert = getRandomNormContinuous();
final KiePMMLNormContinuous retrieved =
KiePMMLNormContinuousInstanceFactory.getKiePMMLNormContinuous(toConvert);
commonVerifyKiePMMLNormContinuous(retrieved, toConvert);
}
|
@Override
public Map<String, Long> call() throws Exception {
Map<String, Long> result = new LinkedHashMap<>();
for (DownloadableItem item : items) {
InputStreamWrapper stream = connectionProvider.getInputStreamForItem(jobId, item);
long size = stream.getBytes();
if (size <= 0) {
size = computeSize(stream);
}
result.put(item.getIdempotentId(), size);
}
return result;
}
|
@Test
public void testSizeIsNotProvided() throws Exception {
DownloadableItem item = createItem("1-" + nextInt(100, 9999));
int size = nextInt(1, 1024 * 1024 * 42); // 42MB max
byte[] bytes = new byte[size];
Arrays.fill(bytes, (byte) 0);
InputStream inputStream = new ByteArrayInputStream(bytes);
when(connectionProvider.getInputStreamForItem(eq(jobId), eq(item)))
.thenReturn(new InputStreamWrapper(inputStream, -1L));
Map<String, Long> expected = Collections.singletonMap(item.getIdempotentId(), (long) size);
Map<String, Long> actual = new CallableSizeCalculator(jobId, connectionProvider,
Collections.singleton(item)).call();
Truth.assertThat(actual).containsExactlyEntriesIn(expected);
// Make sure the input stream was read in full
int nextByte = inputStream.read();
Truth.assertThat(nextByte).isEqualTo(-1);
}
|
public static IntArrayList permutation(int size, Random rnd) {
IntArrayList result = iota(size);
shuffle(result, rnd);
return result;
}
|
@Test
public void testPermutation() {
IntArrayList list = ArrayUtil.permutation(15, new Random());
assertEquals(15, list.buffer.length);
assertEquals(15, list.elementsCount);
assertEquals(14 / 2.0 * (14 + 1), Arrays.stream(list.buffer).sum());
assertTrue(ArrayUtil.isPermutation(list));
}
|
public Future<Collection<Integer>> resizeAndReconcilePvcs(KafkaStatus kafkaStatus, List<PersistentVolumeClaim> pvcs) {
Set<Integer> podIdsToRestart = new HashSet<>();
List<Future<Void>> futures = new ArrayList<>(pvcs.size());
for (PersistentVolumeClaim desiredPvc : pvcs) {
Future<Void> perPvcFuture = pvcOperator.getAsync(reconciliation.namespace(), desiredPvc.getMetadata().getName())
.compose(currentPvc -> {
if (currentPvc == null || currentPvc.getStatus() == null || !"Bound".equals(currentPvc.getStatus().getPhase())) {
// This branch handles the following conditions:
// * The PVC doesn't exist yet, we should create it
// * The PVC is not Bound, we should reconcile it
return pvcOperator.reconcile(reconciliation, reconciliation.namespace(), desiredPvc.getMetadata().getName(), desiredPvc)
.map((Void) null);
} else if (currentPvc.getStatus().getConditions().stream().anyMatch(cond -> "Resizing".equals(cond.getType()) && "true".equals(cond.getStatus().toLowerCase(Locale.ENGLISH)))) {
// The PVC is Bound, but it is already resizing => Nothing to do, we should let it resize
LOGGER.debugCr(reconciliation, "The PVC {} is resizing, nothing to do", desiredPvc.getMetadata().getName());
return Future.succeededFuture();
} else if (currentPvc.getStatus().getConditions().stream().anyMatch(cond -> "FileSystemResizePending".equals(cond.getType()) && "true".equals(cond.getStatus().toLowerCase(Locale.ENGLISH)))) {
// The PVC is Bound and resized but waiting for FS resizing => We need to restart the pod which is using it
podIdsToRestart.add(getPodIndexFromPvcName(desiredPvc.getMetadata().getName()));
LOGGER.infoCr(reconciliation, "The PVC {} is waiting for file system resizing and the pod using it might need to be restarted.", desiredPvc.getMetadata().getName());
return Future.succeededFuture();
} else {
// The PVC is Bound and resizing is not in progress => We should check if the SC supports resizing and check if size changed
Long currentSize = StorageUtils.convertToMillibytes(currentPvc.getSpec().getResources().getRequests().get("storage"));
Long desiredSize = StorageUtils.convertToMillibytes(desiredPvc.getSpec().getResources().getRequests().get("storage"));
if (!currentSize.equals(desiredSize)) {
// The sizes are different => we should resize (shrinking will be handled in StorageDiff, so we do not need to check that)
return resizePvc(kafkaStatus, currentPvc, desiredPvc);
} else {
// size didn't change, just reconcile
return pvcOperator.reconcile(reconciliation, reconciliation.namespace(), desiredPvc.getMetadata().getName(), desiredPvc)
.map((Void) null);
}
}
});
futures.add(perPvcFuture);
}
return Future.all(futures)
.map(podIdsToRestart);
}
|
@Test
public void testVolumesBoundWithoutStorageClass(VertxTestContext context) {
List<PersistentVolumeClaim> pvcs = List.of(
createPvc("data-pod-0"),
createPvc("data-pod-1"),
createPvc("data-pod-2")
);
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
// Mock the PVC Operator
PvcOperator mockPvcOps = supplier.pvcOperations;
when(mockPvcOps.getAsync(eq(NAMESPACE), ArgumentMatchers.startsWith("data-")))
.thenAnswer(invocation -> {
String pvcName = invocation.getArgument(1);
PersistentVolumeClaim currentPvc = pvcs.stream().filter(pvc -> pvcName.equals(pvc.getMetadata().getName())).findFirst().orElse(null);
if (currentPvc != null) {
PersistentVolumeClaim pvcWithStatus = new PersistentVolumeClaimBuilder(currentPvc)
.editSpec()
.withStorageClassName(null)
.withNewResources()
.withRequests(Map.of("storage", new Quantity("50Gi", null)))
.endResources()
.endSpec()
.withNewStatus()
.withPhase("Bound")
.withCapacity(Map.of("storage", new Quantity("50Gi", null)))
.endStatus()
.build();
return Future.succeededFuture(pvcWithStatus);
} else {
return Future.succeededFuture();
}
});
ArgumentCaptor<PersistentVolumeClaim> pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class);
when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture());
// Reconcile the PVCs
PvcReconciler reconciler = new PvcReconciler(
new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME),
mockPvcOps,
supplier.storageClassOperations
);
// Used to capture the warning condition
KafkaStatus kafkaStatus = new KafkaStatus();
Checkpoint async = context.checkpoint();
reconciler.resizeAndReconcilePvcs(kafkaStatus, pvcs)
.onComplete(res -> {
assertThat(res.succeeded(), is(true));
assertThat(res.result().size(), is(0));
assertThat(pvcCaptor.getAllValues().size(), is(0));
assertThat(kafkaStatus.getConditions().size(), is(3));
kafkaStatus.getConditions().stream().forEach(c -> {
assertThat(c.getReason(), is("PvcResizingWarning"));
assertThat(c.getMessage(), containsString("does not use any Storage Class and cannot be resized."));
});
async.flag();
});
}
|
public static Node build(final List<JoinInfo> joins) {
Node root = null;
for (final JoinInfo join : joins) {
if (root == null) {
root = new Leaf(join.getLeftSource());
}
if (root.containsSource(join.getRightSource()) && root.containsSource(join.getLeftSource())) {
throw new KsqlException("Cannot perform circular join - both " + join.getRightSource()
+ " and " + join.getLeftJoinExpression()
+ " are already included in the current join tree: " + root.debugString(0));
} else if (root.containsSource(join.getLeftSource())) {
root = new Join(root, new Leaf(join.getRightSource()), join);
} else if (root.containsSource(join.getRightSource())) {
root = new Join(root, new Leaf(join.getLeftSource()), join.flip());
} else {
throw new KsqlException(
"Cannot build JOIN tree; neither source in the join is the FROM source or included "
+ "in a previous JOIN: " + join + ". The current join tree is "
+ root.debugString(0)
);
}
}
return root;
}
|
@Test
public void shouldIgnoreOuterJoinsWhenComputingEquivalenceSets() {
// Given:
when(j1.getLeftSource()).thenReturn(a);
when(j1.getRightSource()).thenReturn(b);
when(j2.getLeftSource()).thenReturn(a);
when(j2.getRightSource()).thenReturn(c);
when(j1.getType()).thenReturn(JoinType.OUTER);
when(j2.getLeftJoinExpression()).thenReturn(e1);
when(j2.getRightJoinExpression()).thenReturn(e3);
final List<JoinInfo> joins = ImmutableList.of(j1, j2);
// When:
final Node root = JoinTree.build(joins);
// Then:
assertThat(root.joinEquivalenceSet(), containsInAnyOrder(e1, e3));
}
|
@Nullable static String lastStringHeader(Headers headers, String key) {
Header header = headers.lastHeader(key);
if (header == null || header.value() == null) return null;
return new String(header.value(), UTF_8);
}
|
@Test void lastStringHeader_null() {
assertThat(KafkaHeaders.lastStringHeader(record.headers(), "b3")).isNull();
}
|
@Override
public int getInt(final int columnIndex) throws SQLException {
return (int) ResultSetUtils.convertValue(mergeResultSet.getValue(columnIndex, int.class), int.class);
}
|
@Test
void assertGetIntWithColumnIndex() throws SQLException {
when(mergeResultSet.getValue(1, int.class)).thenReturn(1);
assertThat(shardingSphereResultSet.getInt(1), is(1));
}
|
public static String toHexStringNoPrefix(BigInteger value) {
return value.toString(16);
}
|
@Test
public void testToHexStringNoPrefix() {
assertEquals(Numeric.toHexStringNoPrefix(BigInteger.TEN), ("a"));
}
|
@Override
public long getStartTs() {
return _startTs;
}
|
@Test
public void testGetStartTs() {
MinionProgressObserver observer = new MinionProgressObserver(3);
long ts1 = System.currentTimeMillis();
observer.notifyTaskStart(null);
long ts = observer.getStartTs();
long ts2 = System.currentTimeMillis();
assertTrue(ts1 <= ts);
assertTrue(ts2 >= ts);
}
|
@Description("encode binary data as hex")
@ScalarFunction
@SqlType(StandardTypes.VARCHAR)
public static Slice toHex(@SqlType(StandardTypes.VARBINARY) Slice slice)
{
String encoded;
if (slice.hasByteArray()) {
encoded = BaseEncoding.base16().encode(slice.byteArray(), slice.byteArrayOffset(), slice.length());
}
else {
encoded = BaseEncoding.base16().encode(slice.getBytes());
}
return Slices.utf8Slice(encoded);
}
|
@Test
public void testToHex()
{
assertFunction("to_hex(CAST('' AS VARBINARY))", VARCHAR, encodeHex(""));
assertFunction("to_hex(CAST('a' AS VARBINARY))", VARCHAR, encodeHex("a"));
assertFunction("to_hex(CAST('abc' AS VARBINARY))", VARCHAR, encodeHex("abc"));
assertFunction("to_hex(CAST('hello world' AS VARBINARY))", VARCHAR, "68656C6C6F20776F726C64");
}
|
@Override
public Connection connect(String url, Properties info) throws SQLException {
// calciteConnection is initialized with an empty Beam schema,
// we need to populate it with pipeline options, load table providers, etc
return JdbcConnection.initialize((CalciteConnection) super.connect(url, info));
}
|
@Test
public void testInternalConnect_bounded_limit() throws Exception {
ReadOnlyTableProvider tableProvider =
new ReadOnlyTableProvider(
"test",
ImmutableMap.of(
"test",
TestBoundedTable.of(
Schema.FieldType.INT32, "id",
Schema.FieldType.STRING, "name")
.addRows(1, "first")
.addRows(1, "second first")
.addRows(2, "second")));
CalciteConnection connection =
JdbcDriver.connect(tableProvider, PipelineOptionsFactory.create());
Statement statement = connection.createStatement();
ResultSet resultSet1 = statement.executeQuery("SELECT * FROM test LIMIT 5");
assertTrue(resultSet1.next());
assertTrue(resultSet1.next());
assertTrue(resultSet1.next());
assertFalse(resultSet1.next());
assertFalse(resultSet1.next());
ResultSet resultSet2 = statement.executeQuery("SELECT * FROM test LIMIT 1");
assertTrue(resultSet2.next());
assertFalse(resultSet2.next());
ResultSet resultSet3 = statement.executeQuery("SELECT * FROM test LIMIT 2");
assertTrue(resultSet3.next());
assertTrue(resultSet3.next());
assertFalse(resultSet3.next());
ResultSet resultSet4 = statement.executeQuery("SELECT * FROM test LIMIT 3");
assertTrue(resultSet4.next());
assertTrue(resultSet4.next());
assertTrue(resultSet4.next());
assertFalse(resultSet4.next());
}
|
@Override
public OUT nextRecord(OUT record) throws IOException {
OUT returnRecord = null;
do {
returnRecord = super.nextRecord(record);
} while (returnRecord == null && !reachedEnd());
return returnRecord;
}
|
@Test
void testPojoTypeWithTrailingEmptyFields() throws Exception {
final String fileContent = "123,,3.123,,\n456,BBB,3.23,,";
final FileInputSplit split = createTempFile(fileContent);
@SuppressWarnings("unchecked")
PojoTypeInfo<PrivatePojoItem> typeInfo =
(PojoTypeInfo<PrivatePojoItem>) TypeExtractor.createTypeInfo(PrivatePojoItem.class);
CsvInputFormat<PrivatePojoItem> inputFormat = new PojoCsvInputFormat<>(PATH, typeInfo);
inputFormat.configure(new Configuration());
inputFormat.open(split);
PrivatePojoItem item = new PrivatePojoItem();
inputFormat.nextRecord(item);
assertThat(item.field1).isEqualTo(123);
assertThat(item.field2).isEmpty();
assertThat(item.field3).isEqualTo(Double.valueOf(3.123));
assertThat(item.field4).isEmpty();
inputFormat.nextRecord(item);
assertThat(item.field1).isEqualTo(456);
assertThat(item.field2).isEqualTo("BBB");
assertThat(item.field3).isEqualTo(Double.valueOf(3.23));
assertThat(item.field4).isEmpty();
}
|
@VisibleForTesting
Object[] callHttpService( RowMetaInterface rowMeta, Object[] rowData ) throws KettleException {
HttpClientManager.HttpClientBuilderFacade clientBuilder = HttpClientManager.getInstance().createBuilder();
if ( data.realConnectionTimeout > -1 ) {
clientBuilder.setConnectionTimeout( data.realConnectionTimeout );
}
if ( data.realSocketTimeout > -1 ) {
clientBuilder.setSocketTimeout( data.realSocketTimeout );
}
if ( StringUtils.isNotBlank( data.realHttpLogin ) ) {
clientBuilder.setCredentials( data.realHttpLogin, data.realHttpPassword );
}
if ( StringUtils.isNotBlank( data.realProxyHost ) ) {
clientBuilder.setProxy( data.realProxyHost, data.realProxyPort );
}
CloseableHttpClient httpClient = clientBuilder.build();
// Prepare HTTP get
URI uri = null;
try {
URIBuilder uriBuilder = constructUrlBuilder( rowMeta, rowData );
uri = uriBuilder.build();
HttpGet method = new HttpGet( uri );
// Add Custom HTTP headers
if ( data.useHeaderParameters ) {
for ( int i = 0; i < data.header_parameters_nrs.length; i++ ) {
method.addHeader( data.headerParameters[ i ].getName(), data.inputRowMeta.getString( rowData,
data.header_parameters_nrs[ i ] ) );
if ( isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "HTTPDialog.Log.HeaderValue",
data.headerParameters[ i ].getName(), data.inputRowMeta
.getString( rowData, data.header_parameters_nrs[ i ] ) ) );
}
}
}
Object[] newRow = null;
if ( rowData != null ) {
newRow = rowData.clone();
}
// Execute request
CloseableHttpResponse httpResponse = null;
try {
// used for calculating the responseTime
long startTime = System.currentTimeMillis();
HttpHost target = new HttpHost( uri.getHost(), uri.getPort(), uri.getScheme() );
// Create AuthCache instance
AuthCache authCache = new BasicAuthCache();
// Generate BASIC scheme object and add it to the local
// auth cache
BasicScheme basicAuth = new BasicScheme();
authCache.put( target, basicAuth );
// Add AuthCache to the execution context
HttpClientContext localContext = HttpClientContext.create();
localContext.setAuthCache( authCache );
// Preemptive authentication
if ( StringUtils.isNotBlank( data.realProxyHost ) ) {
httpResponse = httpClient.execute( target, method, localContext );
} else {
httpResponse = httpClient.execute( method, localContext );
}
// calculate the responseTime
long responseTime = System.currentTimeMillis() - startTime;
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "HTTP.Log.ResponseTime", responseTime, uri ) );
}
int statusCode = requestStatusCode( httpResponse );
// The status code
if ( isDebug() ) {
logDebug( BaseMessages.getString( PKG, "HTTP.Log.ResponseStatusCode", "" + statusCode ) );
}
String body;
switch ( statusCode ) {
case HttpURLConnection.HTTP_UNAUTHORIZED:
throw new KettleStepException( BaseMessages
.getString( PKG, "HTTP.Exception.Authentication", data.realUrl ) );
case -1:
throw new KettleStepException( BaseMessages
.getString( PKG, "HTTP.Exception.IllegalStatusCode", data.realUrl ) );
case HttpURLConnection.HTTP_NO_CONTENT:
body = "";
break;
default:
HttpEntity entity = httpResponse.getEntity();
if ( entity != null ) {
body = StringUtils.isEmpty( meta.getEncoding() ) ? EntityUtils.toString( entity ) : EntityUtils.toString( entity, meta.getEncoding() );
} else {
body = "";
}
break;
}
Header[] headers = searchForHeaders( httpResponse );
JSONObject json = new JSONObject();
for ( Header header : headers ) {
Object previousValue = json.get( header.getName() );
if ( previousValue == null ) {
json.put( header.getName(), header.getValue() );
} else if ( previousValue instanceof List ) {
List<String> list = (List<String>) previousValue;
list.add( header.getValue() );
} else {
ArrayList<String> list = new ArrayList<String>();
list.add( (String) previousValue );
list.add( header.getValue() );
json.put( header.getName(), list );
}
}
String headerString = json.toJSONString();
int returnFieldsOffset = rowMeta.size();
if ( !Utils.isEmpty( meta.getFieldName() ) ) {
newRow = RowDataUtil.addValueData( newRow, returnFieldsOffset, body );
returnFieldsOffset++;
}
if ( !Utils.isEmpty( meta.getResultCodeFieldName() ) ) {
newRow = RowDataUtil.addValueData( newRow, returnFieldsOffset, new Long( statusCode ) );
returnFieldsOffset++;
}
if ( !Utils.isEmpty( meta.getResponseTimeFieldName() ) ) {
newRow = RowDataUtil.addValueData( newRow, returnFieldsOffset, new Long( responseTime ) );
returnFieldsOffset++;
}
if ( !Utils.isEmpty( meta.getResponseHeaderFieldName() ) ) {
newRow = RowDataUtil.addValueData( newRow, returnFieldsOffset, headerString );
}
} finally {
if ( httpResponse != null ) {
httpResponse.close();
}
// Release current connection to the connection pool once you are done
method.releaseConnection();
}
return newRow;
} catch ( UnknownHostException uhe ) {
throw new KettleException( BaseMessages.getString( PKG, "HTTP.Error.UnknownHostException", uhe.getMessage() ) );
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString( PKG, "HTTP.Log.UnableGetResult", uri ), e );
}
}
|
@Test
public void callHttpServiceWithUTF8Encoding() throws Exception {
try ( MockedStatic<HttpClientManager> httpClientManagerMockedStatic = mockStatic( HttpClientManager.class ) ) {
httpClientManagerMockedStatic.when( HttpClientManager::getInstance ).thenReturn( manager );
doReturn( "UTF-8" ).when( meta ).getEncoding();
assertEquals( DATA, http.callHttpService( rmi, new Object[] { 0 } )[ 0 ] );
}
}
|
public final void addTransactionSigner(TransactionSigner signer) {
lock.lock();
try {
if (signer.isReady())
signers.add(signer);
else
throw new IllegalStateException("Signer instance is not ready to be added into Wallet: " + signer.getClass());
} finally {
lock.unlock();
}
}
|
@Test(expected = IllegalStateException.class)
public void shouldNotAddTransactionSignerThatIsNotReady() {
wallet.addTransactionSigner(new NopTransactionSigner(false));
}
|
public void validate(List<String> values, String type, List<String> options) {
TypeValidation typeValidation = findByKey(type);
for (String value : values) {
typeValidation.validate(value, options);
}
}
|
@Test
public void fail_on_unknown_type() {
TypeValidation fakeTypeValidation = mock(TypeValidation.class);
when(fakeTypeValidation.key()).thenReturn("Fake");
try {
TypeValidations typeValidations = new TypeValidations(newArrayList(fakeTypeValidation));
typeValidations.validate("10", "Unknown", null);
fail();
} catch (Exception e) {
assertThat(e).isInstanceOf(BadRequestException.class);
BadRequestException badRequestException = (BadRequestException) e;
assertThat(badRequestException.getMessage()).isEqualTo("Type 'Unknown' is not valid.");
}
}
|
void handleStatement(final QueuedCommand queuedCommand) {
throwIfNotConfigured();
handleStatementWithTerminatedQueries(
queuedCommand.getAndDeserializeCommand(commandDeserializer),
queuedCommand.getAndDeserializeCommandId(),
queuedCommand.getStatus(),
Mode.EXECUTE,
queuedCommand.getOffset(),
false
);
}
|
@Test
public void restartsRuntimeWhenAlterSystemIsSuccessful() {
// Given:
final String alterSystemQuery = "ALTER SYSTEM 'TEST'='TEST';";
when(mockParser.parseSingleStatement(alterSystemQuery))
.thenReturn(statementParser.parseSingleStatement(alterSystemQuery));
final Command alterSystemCommand = new Command(
"ALTER SYSTEM 'TEST'='TEST';",
emptyMap(),
ksqlConfig.getAllConfigPropsWithSecretsObfuscated(),
Optional.empty()
);
// When:
handleStatement(statementExecutorWithMocks, alterSystemCommand, COMMAND_ID, Optional.empty(), 0L);
// Then:
verify(mockEngine).updateStreamsPropertiesAndRestartRuntime();
}
|
public static long write(InputStream is, OutputStream os) throws IOException {
return write(is, os, BUFFER_SIZE);
}
|
@Test
void testWrite2() throws Exception {
assertThat((int) IOUtils.write(reader, writer, 16), equalTo(TEXT.length()));
}
|
@Override
public DescriptiveUrl toDownloadUrl(final Path file, final Sharee sharee, CreateDownloadShareRequest options, final PasswordCallback callback) throws BackgroundException {
try {
if(log.isDebugEnabled()) {
log.debug(String.format("Create download share for %s", file));
}
if(null == options) {
options = new CreateDownloadShareRequest();
log.warn(String.format("Use default share options %s", options));
}
final Long fileid = Long.parseLong(nodeid.getVersionId(file));
final Host bookmark = session.getHost();
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(file)) {
// get existing file key associated with the sharing user
final FileKey key = new NodesApi(session.getClient()).requestUserFileKey(fileid, null, null);
final EncryptedFileKey encFileKey = TripleCryptConverter.toCryptoEncryptedFileKey(key);
final UserKeyPairContainer keyPairContainer = session.getKeyPairForFileKey(encFileKey.getVersion());
final UserKeyPair userKeyPair = TripleCryptConverter.toCryptoUserKeyPair(keyPairContainer);
final Credentials passphrase = new TripleCryptKeyPair().unlock(callback, bookmark, userKeyPair);
final PlainFileKey plainFileKey = Crypto.decryptFileKey(encFileKey, userKeyPair.getUserPrivateKey(), passphrase.getPassword().toCharArray());
// encrypt file key with a new key pair
final UserKeyPair pair;
if(null == options.getPassword()) {
pair = Crypto.generateUserKeyPair(session.requiredKeyPairVersion(), callback.prompt(
bookmark, LocaleFactory.localizedString("Passphrase", "Cryptomator"),
LocaleFactory.localizedString("Provide additional login credentials", "Credentials"), new LoginOptions().icon(session.getHost().getProtocol().disk())
).getPassword().toCharArray());
}
else {
pair = Crypto.generateUserKeyPair(session.requiredKeyPairVersion(), options.getPassword().toCharArray());
}
final EncryptedFileKey encryptedFileKey = Crypto.encryptFileKey(plainFileKey, pair.getUserPublicKey());
options.setPassword(null);
options.setKeyPair(TripleCryptConverter.toSwaggerUserKeyPairContainer(pair));
options.setFileKey(TripleCryptConverter.toSwaggerFileKey(encryptedFileKey));
}
final DownloadShare share = new SharesApi(session.getClient()).createDownloadShare(
options.nodeId(fileid), StringUtils.EMPTY, null);
final String help;
if(null == share.getExpireAt()) {
help = MessageFormat.format(LocaleFactory.localizedString("{0} URL"), LocaleFactory.localizedString("Pre-Signed", "S3"));
}
else {
final long expiry = share.getExpireAt().getMillis();
help = MessageFormat.format(LocaleFactory.localizedString("{0} URL"), LocaleFactory.localizedString("Pre-Signed", "S3")) + " (" + MessageFormat.format(LocaleFactory.localizedString("Expires {0}", "S3") + ")",
UserDateFormatterFactory.get().getShortFormat(expiry * 1000)
);
}
final Matcher matcher = Pattern.compile(SDSSession.VERSION_REGEX).matcher(session.softwareVersion().getRestApiVersion());
if(matcher.matches()) {
if(new Version(matcher.group(1)).compareTo(new Version("4.26")) < 0) {
return new DescriptiveUrl(URI.create(String.format("%s://%s/#/public/shares-downloads/%s",
bookmark.getProtocol().getScheme(),
bookmark.getHostname(),
share.getAccessKey())),
DescriptiveUrl.Type.signed, help);
}
}
return new DescriptiveUrl(URI.create(String.format("%s://%s/public/download-shares/%s",
bookmark.getProtocol().getScheme(),
bookmark.getHostname(),
share.getAccessKey())),
DescriptiveUrl.Type.signed, help);
}
catch(ApiException e) {
throw new SDSExceptionMappingService(nodeid).map(e);
}
catch(CryptoException e) {
throw new TripleCryptExceptionMappingService().map(e);
}
}
|
@Test
public void testShareFile() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path test = new SDSTouchFeature(session, nodeid).touch(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final DescriptiveUrl url = new SDSShareFeature(session, nodeid).toDownloadUrl(test,
Share.Sharee.world, new CreateDownloadShareRequest()
.expiration(new ObjectExpiration().enableExpiration(false))
.notifyCreator(false)
.sendMail(false)
.sendSms(false)
.password(null)
.mailRecipients(null)
.mailSubject(null)
.mailBody(null)
.maxDownloads(null), new DisabledPasswordCallback());
assertNotEquals(DescriptiveUrl.EMPTY, url);
assertEquals(DescriptiveUrl.Type.signed, url.getType());
assertTrue(url.getUrl().startsWith("https://duck.dracoon.com/public/download-shares/"));
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
// Camel calls this method if the endpoint isSynchronous(), as the
// KafkaEndpoint creates a SynchronousDelegateProducer for it
public void process(Exchange exchange) throws Exception {
// is the message body a list or something that contains multiple values
Message message = exchange.getIn();
if (transactionId != null) {
startKafkaTransaction(exchange);
}
if (endpoint.getConfiguration().isUseIterator() && isIterable(message.getBody())) {
processIterableSync(exchange, message);
} else {
processSingleMessageSync(exchange, message);
}
}
|
@Test
public void processSendsMessageWithTopicHeaderAndNoTopicInEndPoint() throws Exception {
endpoint.getConfiguration().setTopic(null);
Mockito.when(exchange.getIn()).thenReturn(in);
in.setHeader(KafkaConstants.TOPIC, "anotherTopic");
Mockito.when(exchange.getMessage()).thenReturn(in);
producer.process(exchange);
verifySendMessage("sometopic");
assertRecordMetadataExists();
}
|
@Override
public SchemaAndValue toConnectData(String topic, byte[] value) {
JsonNode jsonValue;
// This handles a tombstone message
if (value == null) {
return SchemaAndValue.NULL;
}
try {
jsonValue = deserializer.deserialize(topic, value);
} catch (SerializationException e) {
throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e);
}
if (config.schemasEnabled() && (!jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME) || !jsonValue.has(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME)))
throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." +
" If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration.");
// The deserialized data should either be an envelope object containing the schema and the payload or the schema
// was stripped during serialization and we need to fill in an all-encompassing schema.
if (!config.schemasEnabled()) {
ObjectNode envelope = JSON_NODE_FACTORY.objectNode();
envelope.set(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME, null);
envelope.set(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME, jsonValue);
jsonValue = envelope;
}
Schema schema = asConnectSchema(jsonValue.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
return new SchemaAndValue(
schema,
convertToConnect(schema, jsonValue.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME), config)
);
}
|
@Test
public void intToConnect() {
assertEquals(new SchemaAndValue(Schema.INT32_SCHEMA, 12), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"int32\" }, \"payload\": 12 }".getBytes()));
}
|
public void upgrade() {
viewService.streamAll().forEach(view -> {
final Optional<User> user = view.owner().map(userService::load);
if (user.isPresent() && !user.get().isLocalAdmin()) {
final GRNType grnType = ViewDTO.Type.DASHBOARD.equals(view.type()) ? GRNTypes.DASHBOARD : GRNTypes.SEARCH;
final GRN target = grnType.toGRN(view.id());
ensureGrant(user.get(), target);
}
});
}
|
@Test
@DisplayName("migrate existing owner")
void migrateExistingOwner() {
final GRN testuserGRN = GRNTypes.USER.toGRN("testuser");
final GRN dashboard = GRNTypes.DASHBOARD.toGRN("54e3deadbeefdeadbeef0002");
final User testuser = mock(User.class);
when(testuser.getName()).thenReturn("testuser");
when(testuser.getId()).thenReturn("testuser");
final User adminuser = mock(User.class);
when(adminuser.isLocalAdmin()).thenReturn(true);
when(userService.load("testuser")).thenReturn(testuser);
when(userService.load("admin")).thenReturn(adminuser);
migration.upgrade();
assertThat(grantService.hasGrantFor(testuserGRN, Capability.OWN, dashboard)).isTrue();
}
|
@Override
public String getGroupKeyString(int rowIndex, int groupKeyColumnIndex) {
throw new AssertionError("No grouping key for queries without a group by clause");
}
|
@Test(expectedExceptions = AssertionError.class)
public void testGetGroupKeyString() {
_aggregationResultSetUnderTest.getGroupKeyString(0, 0);
}
|
@Override
public JdbcDialect create() {
throw new UnsupportedOperationException(
"Can't create JdbcDialect without compatible mode for Hive");
}
|
@Test
public void testWithCompatibleMode() {
HiveDialectFactory hiveDialectFactory = new HiveDialectFactory();
JdbcDialect inceptorDialect = hiveDialectFactory.create("inceptor", "");
Assertions.assertTrue(inceptorDialect instanceof InceptorDialect);
JdbcDialect hiveDialect = hiveDialectFactory.create("", "");
Assertions.assertTrue(hiveDialect instanceof HiveDialect);
}
|
public boolean checkValid(String connectionId) {
return connections.containsKey(connectionId);
}
|
@Test
void testCheckValid() {
assertTrue(connectionManager.checkValid(connectId));
}
|
@Override
public boolean addAll(Collection<? extends E> c) {
checkNotNull(c, "The collection to be added cannot be null.");
boolean changed = false;
for (Object item : c) {
changed = items.add(serializer.encode(item)) || changed;
}
return changed;
}
|
@Test
public void testAddAll() throws Exception {
//Test multi-adds with change checking
Set<Integer> integersToCheck = Sets.newHashSet();
fillSet(10, integersToCheck);
assertFalse("Set should be empty and so integers to check should not be a subset.",
set.containsAll(integersToCheck));
assertTrue("The set should have changed as a result of add all.", set.addAll(integersToCheck));
assertFalse("The set should not have changed as a result of add all a second time.",
set.addAll(integersToCheck));
assertTrue("The sets should now be equivalent.", set.containsAll(integersToCheck));
assertTrue("The sets should now be equivalent.", integersToCheck.containsAll(set));
}
|
@Override
public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
if(file.getType().contains(Path.Type.upload)) {
return new NullInputStream(0L);
}
final HttpRange range = HttpRange.withStatus(status);
final RequestEntityRestStorageService client = session.getClient();
final Map<String, Object> requestHeaders = new HashMap<>();
final Map<String, String> requestParameters = new HashMap<>();
if(file.attributes().getVersionId() != null) {
requestParameters.put("versionId", file.attributes().getVersionId());
}
if(status.isAppend()) {
final String header;
if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) {
header = String.format("bytes=%d-", range.getStart());
}
else {
header = String.format("bytes=%d-%d", range.getStart(), range.getEnd());
}
if(log.isDebugEnabled()) {
log.debug(String.format("Add range header %s for file %s", header, file));
}
requestHeaders.put(HttpHeaders.RANGE, header);
}
final Path bucket = containerService.getContainer(file);
final HttpResponse response = client.performRestGet(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(),
containerService.getKey(file), requestParameters, requestHeaders, new int[]{HttpStatus.SC_PARTIAL_CONTENT, HttpStatus.SC_OK});
return new HttpMethodReleaseInputStream(response, status);
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Download {0} failed", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Download {0} failed", e, file);
}
}
|
@Test
public void testReadGzipContentEncoding() throws Exception {
final ByteArrayOutputStream compressedStream = new ByteArrayOutputStream();
final byte[] rawContent = RandomUtils.nextBytes(1457);
final GZIPOutputStream gzipOutputStream = new GZIPOutputStream(compressedStream);
gzipOutputStream.write(rawContent);
// Make sure to write
gzipOutputStream.close();
final byte[] compressedContent = compressedStream.toByteArray();
final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path file = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final TransferStatus status = new TransferStatus().withLength(compressedContent.length);
status.setMetadata(Collections.singletonMap(HttpHeaders.CONTENT_ENCODING, "gzip"));
assertNotEquals(TransferStatus.UNKNOWN_LENGTH, status.getLength());
status.setChecksum(new SHA256ChecksumCompute().compute(new ByteArrayInputStream(compressedContent), status));
final OutputStream out = new S3WriteFeature(session, new S3AccessControlListFeature(session)).write(file, status, new DisabledConnectionCallback());
new StreamCopier(new TransferStatus(), new TransferStatus()).transfer(new ByteArrayInputStream(compressedContent), out);
final InputStream in = new S3ReadFeature(session).read(file, status, new DisabledConnectionCallback());
assertNotNull(in);
assertEquals(TransferStatus.UNKNOWN_LENGTH, status.getLength());
final BytecountStreamListener count = new BytecountStreamListener();
final ByteArrayOutputStream received = new ByteArrayOutputStream();
new StreamCopier(status, status).withListener(count).transfer(in, received);
assertEquals(rawContent.length, count.getRecv());
assertArrayEquals(rawContent, received.toByteArray());
in.close();
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public HttpServletRequest readRequest(AwsProxyRequest request, SecurityContext securityContext, Context lambdaContext, ContainerConfig config)
throws InvalidRequestEventException {
// Expect the HTTP method and context to be populated. If they are not, we are handling an
// unsupported event type.
if (request.getHttpMethod() == null || request.getHttpMethod().equals("") || request.getRequestContext() == null) {
throw new InvalidRequestEventException(INVALID_REQUEST_ERROR);
}
request.setPath(stripBasePath(request.getPath(), config));
if (request.getMultiValueHeaders() != null && request.getMultiValueHeaders().getFirst(HttpHeaders.CONTENT_TYPE) != null) {
String contentType = request.getMultiValueHeaders().getFirst(HttpHeaders.CONTENT_TYPE);
// put single as we always expect to have one and only one content type in a request.
request.getMultiValueHeaders().putSingle(HttpHeaders.CONTENT_TYPE, getContentTypeWithCharset(contentType, config));
}
AwsProxyHttpServletRequest servletRequest = new AwsProxyHttpServletRequest(request, lambdaContext, securityContext, config);
servletRequest.setServletContext(servletContext);
servletRequest.setAttribute(API_GATEWAY_CONTEXT_PROPERTY, request.getRequestContext());
servletRequest.setAttribute(API_GATEWAY_STAGE_VARS_PROPERTY, request.getStageVariables());
servletRequest.setAttribute(API_GATEWAY_EVENT_PROPERTY, request);
servletRequest.setAttribute(ALB_CONTEXT_PROPERTY, request.getRequestContext().getElb());
servletRequest.setAttribute(LAMBDA_CONTEXT_PROPERTY, lambdaContext);
servletRequest.setAttribute(JAX_SECURITY_CONTEXT_PROPERTY, securityContext);
return servletRequest;
}
|
@Test
void readRequest_emptyHeaders_expectSuccess() {
AwsProxyRequest req = new AwsProxyRequestBuilder("/path", "GET").build();
try {
HttpServletRequest servletReq = reader.readRequest(req, null, null, ContainerConfig.defaultConfig());
String headerValue = servletReq.getHeader(HttpHeaders.CONTENT_TYPE);
assertNull(headerValue);
} catch (InvalidRequestEventException e) {
e.printStackTrace();
fail("Failed to read request with null headers");
}
}
|
private static void parseAliases(JsonNode schema, Schema result) {
Set<String> aliases = parseAliases(schema);
if (aliases != null) // add aliases
for (String alias : aliases)
result.addAlias(alias);
}
|
@Test
public void parseAliases() throws JsonProcessingException {
String s1 = "{ \"aliases\" : [\"a1\", \"b1\"]}";
ObjectMapper mapper = new ObjectMapper();
JsonNode j1 = mapper.readTree(s1);
Set<String> aliases = Schema.parseAliases(j1);
assertEquals(2, aliases.size());
assertTrue(aliases.contains("a1"));
assertTrue(aliases.contains("b1"));
String s2 = "{ \"aliases\" : {\"a1\": \"b1\"}}";
JsonNode j2 = mapper.readTree(s2);
SchemaParseException ex = assertThrows(SchemaParseException.class, () -> Schema.parseAliases(j2));
assertTrue(ex.getMessage().contains("aliases not an array"));
String s3 = "{ \"aliases\" : [11, \"b1\"]}";
JsonNode j3 = mapper.readTree(s3);
SchemaParseException ex3 = assertThrows(SchemaParseException.class, () -> Schema.parseAliases(j3));
assertTrue(ex3.getMessage().contains("alias not a string"));
}
|
public String registerCoder(Coder<?> coder) throws IOException {
String existing = coderIds.get(coder);
if (existing != null) {
return existing;
}
// Unlike StructuredCoder, custom coders may not have proper implementation of hashCode() and
// equals(), this lead to unnecessary duplications. In order to avoid this we examine already
// registered coders and see if we can find a matching proto, and consider them same coder.
RunnerApi.Coder coderProto = CoderTranslation.toProto(coder, this);
if (coderProtoToId.containsKey(coderProto)) {
return coderProtoToId.get(coderProto);
}
String baseName = NameUtils.approximateSimpleName(coder);
String name = uniqify(baseName, coderIds.values());
coderIds.put(coder, name);
coderProtoToId.put(coderProto, name);
componentsBuilder.putCoders(name, coderProto);
return name;
}
|
@Test
public void registerCoder() throws IOException {
Coder<?> coder =
KvCoder.of(StringUtf8Coder.of(), IterableCoder.of(SetCoder.of(ByteArrayCoder.of())));
String id = components.registerCoder(coder);
assertThat(components.registerCoder(coder), equalTo(id));
assertThat(id, not(isEmptyOrNullString()));
Coder<?> equalCoder =
KvCoder.of(StringUtf8Coder.of(), IterableCoder.of(SetCoder.of(ByteArrayCoder.of())));
assertThat(components.registerCoder(equalCoder), equalTo(id));
Coder<?> otherCoder = VarLongCoder.of();
assertThat(components.registerCoder(otherCoder), not(equalTo(id)));
components.toComponents().getCodersOrThrow(id);
components.toComponents().getCodersOrThrow(components.registerCoder(otherCoder));
}
|
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final RedisConfigProperties that = (RedisConfigProperties) o;
return Objects.equals(database, that.database)
&& Objects.equals(master, that.master)
&& Objects.equals(mode, that.mode)
&& Objects.equals(url, that.url)
&& Objects.equals(password, that.password)
&& Objects.equals(maxIdle, that.maxIdle)
&& Objects.equals(minIdle, that.minIdle)
&& Objects.equals(maxActive, that.maxActive)
&& Objects.equals(maxWait, that.maxWait);
}
|
@Test
public void equalsTest() {
RedisConfigProperties defaultConfig = new RedisConfigProperties();
assertEquals(defaultConfig, this.redisConfigProperties);
defaultConfig.setMaster("master");
defaultConfig.setDatabase(2);
defaultConfig.setPassword("password");
defaultConfig.setMaxIdle(30);
defaultConfig.setMinIdle(10);
defaultConfig.setMaxActive(100);
defaultConfig.setMaxWait(-1);
defaultConfig.setUrl("url");
defaultConfig.setMode("mode");
RedisConfigProperties defaultConfig2 = new RedisConfigProperties();
defaultConfig2.setMaster("master");
defaultConfig2.setDatabase(2);
defaultConfig2.setPassword("password");
defaultConfig2.setMaxIdle(30);
defaultConfig2.setMinIdle(10);
defaultConfig2.setMaxActive(100);
defaultConfig2.setMaxWait(-1);
defaultConfig2.setUrl("url");
defaultConfig2.setMode("mode");
assertNotEquals(defaultConfig, this.redisConfigProperties);
assertEquals(defaultConfig, defaultConfig2);
assertEquals(defaultConfig, defaultConfig);
assertNotEquals(defaultConfig, "String");
assertNotEquals(defaultConfig, null);
assertEquals(defaultConfig.getMaster(), "master");
assertTrue(defaultConfig.hashCode() != 0);
}
|
public static boolean exceedsPushQueryCapacity(
final KsqlExecutionContext executionContext,
final KsqlRestConfig ksqlRestConfig
) {
return getNumLivePushQueries(executionContext) >= getPushQueryLimit(ksqlRestConfig);
}
|
@Test
public void shouldReportPushQueryAtCapacityLimit() {
// Given:
givenAllLiveQueries(10);
givenActivePersistentQueries(4);
givenPushQueryLimit(6);
// Then:
assertThat(QueryCapacityUtil.exceedsPushQueryCapacity(ksqlEngine, ksqlRestConfig),
equalTo(true));
}
|
@Override
@CacheEvict(cacheNames = RedisKeyConstants.SMS_TEMPLATE,
allEntries = true) // allEntries 清空所有缓存,因为可能修改到 code 字段,不好清理
public void updateSmsTemplate(SmsTemplateSaveReqVO updateReqVO) {
// 校验存在
validateSmsTemplateExists(updateReqVO.getId());
// 校验短信渠道
SmsChannelDO channelDO = validateSmsChannel(updateReqVO.getChannelId());
// 校验短信编码是否重复
validateSmsTemplateCodeDuplicate(updateReqVO.getId(), updateReqVO.getCode());
// 校验短信模板
validateApiTemplate(updateReqVO.getChannelId(), updateReqVO.getApiTemplateId());
// 更新
SmsTemplateDO updateObj = BeanUtils.toBean(updateReqVO, SmsTemplateDO.class);
updateObj.setParams(parseTemplateContentParams(updateObj.getContent()));
updateObj.setChannelCode(channelDO.getCode());
smsTemplateMapper.updateById(updateObj);
}
|
@Test
public void testUpdateSmsTemplate_notExists() {
// 准备参数
SmsTemplateSaveReqVO reqVO = randomPojo(SmsTemplateSaveReqVO.class);
// 调用, 并断言异常
assertServiceException(() -> smsTemplateService.updateSmsTemplate(reqVO), SMS_TEMPLATE_NOT_EXISTS);
}
|
public static void checkContextPath(String contextPath) {
if (contextPath == null) {
return;
}
Matcher matcher = CONTEXT_PATH_MATCH.matcher(contextPath);
if (matcher.find()) {
throw new IllegalArgumentException("Illegal url path expression");
}
}
|
@Test
void testContextPathIllegal1() {
assertThrows(IllegalArgumentException.class, () -> {
String contextPath1 = "//nacos/";
ValidatorUtils.checkContextPath(contextPath1);
});
}
|
@Override
protected void validateDataImpl(TenantId tenantId, AssetProfile assetProfile) {
validateString("Asset profile name", assetProfile.getName());
if (assetProfile.getTenantId() == null) {
throw new DataValidationException("Asset profile should be assigned to tenant!");
} else {
if (!tenantService.tenantExists(assetProfile.getTenantId())) {
throw new DataValidationException("Asset profile is referencing to non-existent tenant!");
}
}
if (assetProfile.isDefault()) {
AssetProfile defaultAssetProfile = assetProfileService.findDefaultAssetProfile(tenantId);
if (defaultAssetProfile != null && !defaultAssetProfile.getId().equals(assetProfile.getId())) {
throw new DataValidationException("Another default asset profile is present in scope of current tenant!");
}
}
if (StringUtils.isNotEmpty(assetProfile.getDefaultQueueName())) {
Queue queue = queueService.findQueueByTenantIdAndName(tenantId, assetProfile.getDefaultQueueName());
if (queue == null) {
throw new DataValidationException("Asset profile is referencing to non-existent queue!");
}
}
if (assetProfile.getDefaultRuleChainId() != null) {
RuleChain ruleChain = ruleChainService.findRuleChainById(tenantId, assetProfile.getDefaultRuleChainId());
if (ruleChain == null) {
throw new DataValidationException("Can't assign non-existent rule chain!");
}
if (!ruleChain.getTenantId().equals(assetProfile.getTenantId())) {
throw new DataValidationException("Can't assign rule chain from different tenant!");
}
}
if (assetProfile.getDefaultDashboardId() != null) {
DashboardInfo dashboard = dashboardService.findDashboardInfoById(tenantId, assetProfile.getDefaultDashboardId());
if (dashboard == null) {
throw new DataValidationException("Can't assign non-existent dashboard!");
}
if (!dashboard.getTenantId().equals(assetProfile.getTenantId())) {
throw new DataValidationException("Can't assign dashboard from different tenant!");
}
}
}
|
@Test
void testValidateNameInvocation() {
AssetProfile assetProfile = new AssetProfile();
assetProfile.setName("prod");
assetProfile.setTenantId(tenantId);
validator.validateDataImpl(tenantId, assetProfile);
verify(validator).validateString("Asset profile name", assetProfile.getName());
}
|
@Override
public PageResult<ProductSpuDO> getSpuPage(ProductSpuPageReqVO pageReqVO) {
return productSpuMapper.selectPage(pageReqVO);
}
|
@Test
void getSpuPage_alarmStock() {
// 准备参数
ArrayList<ProductSpuDO> createReqVOs = Lists.newArrayList(randomPojo(ProductSpuDO.class,o->{
o.setCategoryId(generateId());
o.setBrandId(generateId());
o.setDeliveryTemplateId(generateId());
o.setSort(RandomUtil.randomInt(1,100)); // 限制排序范围
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setVirtualSalesCount(generaInt()); // 限制范围为正整数
o.setPrice(generaInt()); // 限制范围为正整数
o.setMarketPrice(generaInt()); // 限制范围为正整数
o.setCostPrice(generaInt()); // 限制范围为正整数
o.setStock(5); // 限制范围为正整数
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setSalesCount(generaInt()); // 限制范围为正整数
o.setBrowseCount(generaInt()); // 限制范围为正整数
}), randomPojo(ProductSpuDO.class,o->{
o.setCategoryId(generateId());
o.setBrandId(generateId());
o.setDeliveryTemplateId(generateId());
o.setSort(RandomUtil.randomInt(1,100)); // 限制排序范围
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setVirtualSalesCount(generaInt()); // 限制范围为正整数
o.setPrice(generaInt()); // 限制范围为正整数
o.setMarketPrice(generaInt()); // 限制范围为正整数
o.setCostPrice(generaInt()); // 限制范围为正整数
o.setStock(9); // 限制范围为正整数
o.setGiveIntegral(generaInt()); // 限制范围为正整数
o.setSalesCount(generaInt()); // 限制范围为正整数
o.setBrowseCount(generaInt()); // 限制范围为正整数
}));
productSpuMapper.insertBatch(createReqVOs);
// 调用
ProductSpuPageReqVO productSpuPageReqVO = new ProductSpuPageReqVO();
productSpuPageReqVO.setTabType(ProductSpuPageReqVO.ALERT_STOCK);
PageResult<ProductSpuDO> spuPage = productSpuService.getSpuPage(productSpuPageReqVO);
assertEquals(createReqVOs.size(), spuPage.getTotal());
}
|
public static byte[] readBytes(ByteBuffer buffer, int offset, int length) {
byte[] dest = new byte[length];
if (buffer.hasArray()) {
System.arraycopy(buffer.array(), buffer.arrayOffset() + offset, dest, 0, length);
} else {
buffer.mark();
buffer.position(offset);
buffer.get(dest);
buffer.reset();
}
return dest;
}
|
@Test
public void testReadBytes() {
byte[] myvar = "Any String you want".getBytes();
ByteBuffer buffer = ByteBuffer.allocate(myvar.length);
buffer.put(myvar);
buffer.rewind();
this.subTest(buffer);
// test readonly buffer, different path
buffer = ByteBuffer.wrap(myvar).asReadOnlyBuffer();
this.subTest(buffer);
}
|
@Override
public Optional<EfestoOutputPMML> evaluateInput(EfestoInputPMML toEvaluate, EfestoRuntimeContext context) {
return executeEfestoInputPMML(toEvaluate, context);
}
|
@Test
void evaluateEfestoRuntimeContext() {
modelLocalUriId = getModelLocalUriIdFromPmmlIdFactory(FILE_NAME, MODEL_NAME);
EfestoRuntimeContext runtimeContext =
EfestoRuntimeContextUtils.buildWithParentClassLoader(memoryCompilerClassLoader);
EfestoInputPMML inputPMML = new EfestoInputPMML(modelLocalUriId, getPMMLContext(FILE_NAME, MODEL_NAME,
memoryCompilerClassLoader));
Optional<EfestoOutputPMML> retrieved = kieRuntimeServicePMML.evaluateInput(inputPMML,
runtimeContext);
assertThat(retrieved).isNotNull().isPresent();
commonEvaluateEfestoOutputPMML(retrieved.get(), inputPMML);
}
|
@Override
public void write(int byteValue) throws IOException {
byte[] bytes = new byte[] {(byte) byteValue};
write(bytes);
}
|
@Test
public void shouldFailOnPartialWrite() throws IOException {
if (!closed) {
blobStorage.maxWriteCount = 1;
byte[] bytes = new byte[2];
random.nextBytes(bytes);
assertThrows(IOException.class, () -> fsDataOutputStream.write(bytes));
}
}
|
@Override
public void run() {
try (DbSession dbSession = dbClient.openSession(false)) {
List<AlmSettingDto> githubSettingsDtos = dbClient.almSettingDao().selectByAlm(dbSession, ALM.GITHUB);
if (githubSettingsDtos.isEmpty()) {
metrics.setGithubStatusToRed();
return;
}
validateGithub(githubSettingsDtos);
}
}
|
@Test
public void run_githubValidatorDoesntThrowException_setRedStatusInMetricsOnce() {
List<AlmSettingDto> dtos = generateDtos(5, ALM.GITHUB);
when(almSettingsDao.selectByAlm(any(), any())).thenReturn(dtos);
doThrow(new RuntimeException()).when(githubValidator).validate(any());
underTest.run();
verify(metrics, times(0)).setGithubStatusToGreen();
verify(metrics, times(1)).setGithubStatusToRed();
}
|
public SortedMap<String, HealthCheck.Result> runHealthChecks() {
return runHealthChecks(HealthCheckFilter.ALL);
}
|
@Test
public void runsRegisteredHealthChecksWithFilter() {
final Map<String, HealthCheck.Result> results = registry.runHealthChecks((name, healthCheck) -> "hc1".equals(name));
assertThat(results).containsOnly(entry("hc1", r1));
}
|
public static String[] csvReadFile(BufferedReader infile, char delim)
throws IOException {
int ch;
ParserState state = ParserState.INITIAL;
List<String> list = new ArrayList<>();
CharArrayWriter baos = new CharArrayWriter(200);
boolean push = false;
while (-1 != (ch = infile.read())) {
push = false;
switch (state) {
case INITIAL:
if (ch == QUOTING_CHAR) {
state = ParserState.QUOTED;
} else if (isDelimOrEOL(delim, ch)) {
push = true;
} else {
baos.write(ch);
state = ParserState.PLAIN;
}
break;
case PLAIN:
if (ch == QUOTING_CHAR) {
baos.write(ch);
throw new IOException(
"Cannot have quote-char in plain field:["
+ baos.toString() + "]");
} else if (isDelimOrEOL(delim, ch)) {
push = true;
state = ParserState.INITIAL;
} else {
baos.write(ch);
}
break;
case QUOTED:
if (ch == QUOTING_CHAR) {
state = ParserState.EMBEDDEDQUOTE;
} else {
baos.write(ch);
}
break;
case EMBEDDEDQUOTE:
if (ch == QUOTING_CHAR) {
baos.write(QUOTING_CHAR); // doubled quote => quote
state = ParserState.QUOTED;
} else if (isDelimOrEOL(delim, ch)) {
push = true;
state = ParserState.INITIAL;
} else {
baos.write(QUOTING_CHAR);
throw new IOException(
"Cannot have single quote-char in quoted field:["
+ baos.toString() + "]");
}
break;
} // switch(state)
if (push) {
if (ch == '\r') {// Remove following \n if present
infile.mark(1);
if (infile.read() != '\n') {
infile.reset(); // did not find \n, put the character
// back
}
}
String s = baos.toString();
list.add(s);
baos.reset();
}
if ((ch == '\n' || ch == '\r') && state != ParserState.QUOTED) {
break;
}
} // while not EOF
if (ch == -1) {// EOF (or end of string) so collect any remaining data
if (state == ParserState.QUOTED) {
throw new IOException("Missing trailing quote-char in quoted field:[\""
+ baos.toString() + "]");
}
// Do we have some data, or a trailing empty field?
if (baos.size() > 0 // we have some data
|| push // we've started a field
|| state == ParserState.EMBEDDEDQUOTE // Just seen ""
) {
list.add(baos.toString());
}
}
return list.toArray(new String[list.size()]);
}
|
@Test
public void testBlankLineQuoted() throws Exception {
BufferedReader br = new BufferedReader(new StringReader("\"\"\n"));
String[] out = CSVSaveService.csvReadFile(br, ',');
checkStrings(new String[]{""}, out);
assertEquals(-1, br.read(), "Expected to be at EOF");
}
|
public ApplicationBuilder monitor(MonitorConfig monitor) {
this.monitor = monitor;
return getThis();
}
|
@Test
void monitor() {
MonitorConfig monitor = new MonitorConfig("monitor-addr");
ApplicationBuilder builder = new ApplicationBuilder();
builder.monitor(monitor);
Assertions.assertSame(monitor, builder.build().getMonitor());
Assertions.assertEquals("monitor-addr", builder.build().getMonitor().getAddress());
}
|
@NotNull @Override
public INode enrich(@NotNull INode node) {
if (node instanceof AES aes) {
return enrich(aes);
}
return node;
}
|
@Test
void ae() {
DetectionLocation testDetectionLocation =
new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "SSL");
final AES aes =
new AES(
128,
new GCM(testDetectionLocation),
new PKCS1(testDetectionLocation),
testDetectionLocation);
this.logBefore(aes);
final AESEnricher aesEnricher = new AESEnricher();
final INode enriched = aesEnricher.enrich(aes);
this.logAfter(enriched);
assertThat(enriched.is(AuthenticatedEncryption.class)).isTrue();
assertThat(enriched).isInstanceOf(AES.class);
final AES enrichedAES = (AES) enriched;
assertThat(enrichedAES.hasChildOfType(Oid.class)).isPresent();
assertThat(enrichedAES.hasChildOfType(Oid.class).get().asString())
.isEqualTo("2.16.840.1.101.3.4.1.6");
}
|
public final synchronized List<E> getAllAddOns() {
Logger.d(mTag, "getAllAddOns has %d add on for %s", mAddOns.size(), getClass().getName());
if (mAddOns.size() == 0) {
loadAddOns();
}
Logger.d(
mTag, "getAllAddOns will return %d add on for %s", mAddOns.size(), getClass().getName());
return unmodifiableList(mAddOns);
}
|
@Test
public void testDoesNotFiltersDebugAddOnOnDebugBuilds() throws Exception {
TestableAddOnsFactory factory = new TestableAddOnsFactory(true);
List<TestAddOn> list = factory.getAllAddOns();
// right now, we have 3 themes that are marked as dev.
Assert.assertEquals(STABLE_THEMES_COUNT + UNSTABLE_THEMES_COUNT, list.size());
}
|
public static Future<Integer> authTlsHash(SecretOperator secretOperations, String namespace, KafkaClientAuthentication auth, List<CertSecretSource> certSecretSources) {
Future<Integer> tlsFuture;
if (certSecretSources == null || certSecretSources.isEmpty()) {
tlsFuture = Future.succeededFuture(0);
} else {
// get all TLS trusted certs, compute hash from each of them, sum hashes
tlsFuture = Future.join(certSecretSources.stream().map(certSecretSource ->
getCertificateAsync(secretOperations, namespace, certSecretSource)
.compose(cert -> Future.succeededFuture(cert.hashCode()))).collect(Collectors.toList()))
.compose(hashes -> Future.succeededFuture(hashes.list().stream().mapToInt(e -> (int) e).sum()));
}
if (auth == null) {
return tlsFuture;
} else {
// compute hash from Auth
if (auth instanceof KafkaClientAuthenticationScram) {
// only passwordSecret can be changed
return tlsFuture.compose(tlsHash -> getPasswordAsync(secretOperations, namespace, auth)
.compose(password -> Future.succeededFuture(password.hashCode() + tlsHash)));
} else if (auth instanceof KafkaClientAuthenticationPlain) {
// only passwordSecret can be changed
return tlsFuture.compose(tlsHash -> getPasswordAsync(secretOperations, namespace, auth)
.compose(password -> Future.succeededFuture(password.hashCode() + tlsHash)));
} else if (auth instanceof KafkaClientAuthenticationTls) {
// custom cert can be used (and changed)
return ((KafkaClientAuthenticationTls) auth).getCertificateAndKey() == null ? tlsFuture :
tlsFuture.compose(tlsHash -> getCertificateAndKeyAsync(secretOperations, namespace, (KafkaClientAuthenticationTls) auth)
.compose(crtAndKey -> Future.succeededFuture(crtAndKey.certAsBase64String().hashCode() + crtAndKey.keyAsBase64String().hashCode() + tlsHash)));
} else if (auth instanceof KafkaClientAuthenticationOAuth) {
List<Future<Integer>> futureList = ((KafkaClientAuthenticationOAuth) auth).getTlsTrustedCertificates() == null ?
new ArrayList<>() : ((KafkaClientAuthenticationOAuth) auth).getTlsTrustedCertificates().stream().map(certSecretSource ->
getCertificateAsync(secretOperations, namespace, certSecretSource)
.compose(cert -> Future.succeededFuture(cert.hashCode()))).collect(Collectors.toList());
futureList.add(tlsFuture);
futureList.add(addSecretHash(secretOperations, namespace, ((KafkaClientAuthenticationOAuth) auth).getAccessToken()));
futureList.add(addSecretHash(secretOperations, namespace, ((KafkaClientAuthenticationOAuth) auth).getClientSecret()));
futureList.add(addSecretHash(secretOperations, namespace, ((KafkaClientAuthenticationOAuth) auth).getRefreshToken()));
return Future.join(futureList)
.compose(hashes -> Future.succeededFuture(hashes.list().stream().mapToInt(e -> (int) e).sum()));
} else {
// unknown Auth type
return tlsFuture;
}
}
}
|
@Test
void getHashForPattern(VertxTestContext context) {
String namespace = "ns";
CertSecretSource cert1 = new CertSecretSourceBuilder()
.withSecretName("cert-secret")
.withPattern("*.crt")
.build();
CertSecretSource cert2 = new CertSecretSourceBuilder()
.withSecretName("cert-secret2")
.withPattern("*.crt")
.build();
CertSecretSource cert3 = new CertSecretSourceBuilder()
.withSecretName("cert-secret3")
.withCertificate("my.crt")
.build();
Secret secret = new SecretBuilder()
.withData(Map.of("ca.crt", "value", "ca2.crt", "value2"))
.build();
Secret secret2 = new SecretBuilder()
.withData(Map.of("ca3.crt", "value3", "ca4.crt", "value4"))
.build();
Secret secret3 = new SecretBuilder()
.withData(Map.of("my.crt", "value5"))
.build();
SecretOperator secretOps = mock(SecretOperator.class);
when(secretOps.getAsync(eq(namespace), eq("cert-secret"))).thenReturn(Future.succeededFuture(secret));
when(secretOps.getAsync(eq(namespace), eq("cert-secret2"))).thenReturn(Future.succeededFuture(secret2));
when(secretOps.getAsync(eq(namespace), eq("cert-secret3"))).thenReturn(Future.succeededFuture(secret3));
Checkpoint async = context.checkpoint();
VertxUtil.authTlsHash(secretOps, "ns", null, List.of(cert1, cert2, cert3)).onComplete(context.succeeding(res -> {
assertThat(res, is("valuevalue2".hashCode() + "value3value4".hashCode() + "value5".hashCode()));
async.flag();
}));
}
|
@Override
public String getAuthenticationPassword() {
return this.authPassword;
}
|
@Test
public void testGetAuthenticationPassword() {
assertEquals(authPassword, v3SnmpConfiguration.getAuthenticationPassword());
}
|
public Notification setFieldValue(String field, @Nullable String value) {
fields.put(field, value);
return this;
}
|
@Test
void equals_whenTypeAndFieldsMatch_shouldReturnTrue() {
Notification notification1 = new Notification("type");
Notification notification2 = new Notification("type");
notification1.setFieldValue("key", "value");
notification2.setFieldValue("key", "value");
assertThat(notification1)
.hasSameHashCodeAs(notification2)
.isEqualTo(notification2);
}
|
private <T> RestResponse<T> get(final String path, final Class<T> type) {
return executeRequestSync(HttpMethod.GET,
path,
null,
r -> deserialize(r.getBody(), type),
Optional.empty());
}
|
@Test
public void shouldPostQueryRequest_chunkHandler_closeAfterFinish() {
ksqlTarget = new KsqlTarget(httpClient, socketAddress, localProperties, authHeader, HOST,
Collections.emptyMap(), RequestOptions.DEFAULT_TIMEOUT);
executor.submit(this::expectPostQueryRequestChunkHandler);
assertThatEventually(requestStarted::get, is(true));
handlerCaptor.getValue().handle(Buffer.buffer("{\"row\": {\"columns\": [1.0, 12.1]}},\n"));
handlerCaptor.getValue().handle(Buffer.buffer("{\"row\": {\"columns\": [5.0, 10.5]}},\n"));
endCaptor.getValue().handle(null);
closeConnection.complete(null);
assertThatEventually(response::get, notNullValue());
assertThat(response.get().getResponse(), is (2));
assertThat(rows.size(), is (2));
}
|
public static Criterion matchIPv6Dst(IpPrefix ip) {
return new IPCriterion(ip, Type.IPV6_DST);
}
|
@Test
public void testMatchIPv6DstMethod() {
Criterion matchIPv6Dst = Criteria.matchIPv6Dst(ipv61);
IPCriterion ipCriterion =
checkAndConvert(matchIPv6Dst,
Criterion.Type.IPV6_DST,
IPCriterion.class);
assertThat(ipCriterion.ip(), is(equalTo(ipv61)));
}
|
public static long tileToPixel(long tileNumber, int tileSize) {
return tileNumber * tileSize;
}
|
@Test
public void tileToPixelTest() {
for (int tileSize : TILE_SIZES) {
Assert.assertEquals(0, MercatorProjection.tileToPixel(0, tileSize));
Assert.assertEquals(tileSize, MercatorProjection.tileToPixel(1, tileSize));
Assert.assertEquals(tileSize * 2, MercatorProjection.tileToPixel(2, tileSize));
}
}
|
@Override
public TypeSerializerSchemaCompatibility<T> resolveSchemaCompatibility(
TypeSerializerSnapshot<T> oldSerializerSnapshot) {
if (!(oldSerializerSnapshot instanceof PojoSerializerSnapshot)) {
return TypeSerializerSchemaCompatibility.incompatible();
}
PojoSerializerSnapshot<T> previousPojoSerializerSnapshot =
(PojoSerializerSnapshot<T>) oldSerializerSnapshot;
final Class<T> previousPojoClass =
previousPojoSerializerSnapshot.snapshotData.getPojoClass();
final LinkedOptionalMap<Field, TypeSerializerSnapshot<?>> fieldSerializerSnapshots =
previousPojoSerializerSnapshot.snapshotData.getFieldSerializerSnapshots();
final LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>>
registeredSubclassSerializerSnapshots =
previousPojoSerializerSnapshot.snapshotData
.getRegisteredSubclassSerializerSnapshots();
final LinkedOptionalMap<Class<?>, TypeSerializerSnapshot<?>>
nonRegisteredSubclassSerializerSnapshots =
previousPojoSerializerSnapshot.snapshotData
.getNonRegisteredSubclassSerializerSnapshots();
if (previousPojoClass != snapshotData.getPojoClass()) {
return TypeSerializerSchemaCompatibility.incompatible();
}
if (registeredSubclassSerializerSnapshots.hasAbsentKeysOrValues()) {
return TypeSerializerSchemaCompatibility.incompatible();
}
if (nonRegisteredSubclassSerializerSnapshots.hasAbsentKeysOrValues()) {
return TypeSerializerSchemaCompatibility.incompatible();
}
final IntermediateCompatibilityResult<T> preExistingFieldSerializersCompatibility =
getCompatibilityOfPreExistingFields(fieldSerializerSnapshots);
if (preExistingFieldSerializersCompatibility.isIncompatible()) {
return TypeSerializerSchemaCompatibility.incompatible();
}
final IntermediateCompatibilityResult<T> preExistingRegistrationsCompatibility =
getCompatibilityOfPreExistingRegisteredSubclasses(
registeredSubclassSerializerSnapshots);
if (preExistingRegistrationsCompatibility.isIncompatible()) {
return TypeSerializerSchemaCompatibility.incompatible();
}
if (newPojoSerializerIsCompatibleAfterMigration(
preExistingFieldSerializersCompatibility,
preExistingRegistrationsCompatibility,
fieldSerializerSnapshots)) {
return TypeSerializerSchemaCompatibility.compatibleAfterMigration();
}
if (newPojoSerializerIsCompatibleWithReconfiguredSerializer(
preExistingFieldSerializersCompatibility,
preExistingRegistrationsCompatibility,
registeredSubclassSerializerSnapshots,
nonRegisteredSubclassSerializerSnapshots)) {
return TypeSerializerSchemaCompatibility.compatibleWithReconfiguredSerializer(
constructReconfiguredPojoSerializer(
preExistingFieldSerializersCompatibility,
registeredSubclassSerializerSnapshots,
preExistingRegistrationsCompatibility,
nonRegisteredSubclassSerializerSnapshots));
}
return TypeSerializerSchemaCompatibility.compatibleAsIs();
}
|
@Test
void testResolveSchemaCompatibilityWithIncompatibleFieldSerializers() {
final PojoSerializerSnapshot<TestPojo> oldSnapshot =
buildTestSnapshot(
Arrays.asList(
ID_FIELD,
mockFieldSerializerSnapshot(
NAME_FIELD,
new SchemaCompatibilityTestingSerializer()
.snapshotConfiguration()),
HEIGHT_FIELD));
final PojoSerializerSnapshot<TestPojo> newSnapshot =
buildTestSnapshot(
Arrays.asList(
ID_FIELD,
mockFieldSerializerSnapshot(
NAME_FIELD,
SchemaCompatibilityTestingSnapshot
.thatIsIncompatibleWithTheLastSerializer()),
HEIGHT_FIELD));
final TypeSerializerSchemaCompatibility<TestPojo> resultCompatibility =
newSnapshot.resolveSchemaCompatibility(oldSnapshot);
assertThat(resultCompatibility.isIncompatible()).isTrue();
}
|
public EntrySet entrySet()
{
if (null == entrySet)
{
entrySet = new EntrySet();
}
return entrySet;
}
|
@Test
void removeIfOnEntrySetThrowsUnsupportedOperationException()
{
final Predicate<Map.Entry<Integer, String>> filter = (entry) -> true;
final UnsupportedOperationException exception =
assertThrowsExactly(UnsupportedOperationException.class, () -> cache.entrySet().removeIf(filter));
assertEquals("Cannot remove from EntrySet", exception.getMessage());
}
|
public List<String> toPrefix(String in) {
List<String> tokens = buildTokens(alignINClause(in));
List<String> output = new ArrayList<>();
List<String> stack = new ArrayList<>();
for (String token : tokens) {
if (isOperand(token)) {
if (token.equals(")")) {
while (openParanthesesFound(stack)) {
output.add(stack.remove(stack.size() - 1));
}
if (!stack.isEmpty()) {
// temporarily fix for issue #189
stack.remove(stack.size() - 1);
}
} else {
while (openParanthesesFound(stack) && !hasHigherPrecedence(token, stack.get(stack.size() - 1))) {
output.add(stack.remove(stack.size() - 1));
}
stack.add(token);
}
} else {
output.add(token);
}
}
while (!stack.isEmpty()) {
output.add(stack.remove(stack.size() - 1));
}
return output;
}
|
@Test
public void parseEmpty() {
List<String> list = parser.toPrefix("");
assertTrue(list.isEmpty());
}
|
@Override
public List<Container> allocateContainers(ResourceBlacklistRequest blackList,
List<ResourceRequest> oppResourceReqs,
ApplicationAttemptId applicationAttemptId,
OpportunisticContainerContext opportContext, long rmIdentifier,
String appSubmitter) throws YarnException {
// Update black list.
updateBlacklist(blackList, opportContext);
// Add OPPORTUNISTIC requests to the outstanding ones.
opportContext.addToOutstandingReqs(oppResourceReqs);
Set<String> nodeBlackList = new HashSet<>(opportContext.getBlacklist());
Set<String> allocatedNodes = new HashSet<>();
List<Container> allocatedContainers = new ArrayList<>();
// Satisfy the outstanding OPPORTUNISTIC requests.
boolean continueLoop = true;
while (continueLoop) {
continueLoop = false;
List<Map<Resource, List<Allocation>>> allocations = new ArrayList<>();
for (SchedulerRequestKey schedulerKey :
opportContext.getOutstandingOpReqs().descendingKeySet()) {
// Allocated containers :
// Key = Requested Capability,
// Value = List of Containers of given cap (the actual container size
// might be different than what is requested, which is why
// we need the requested capability (key) to match against
// the outstanding reqs)
int remAllocs = -1;
int maxAllocationsPerAMHeartbeat = getMaxAllocationsPerAMHeartbeat();
if (maxAllocationsPerAMHeartbeat > 0) {
remAllocs =
maxAllocationsPerAMHeartbeat - allocatedContainers.size()
- getTotalAllocations(allocations);
if (remAllocs <= 0) {
LOG.info("Not allocating more containers as we have reached max "
+ "allocations per AM heartbeat {}",
maxAllocationsPerAMHeartbeat);
break;
}
}
Map<Resource, List<Allocation>> allocation = allocate(
rmIdentifier, opportContext, schedulerKey, applicationAttemptId,
appSubmitter, nodeBlackList, allocatedNodes, remAllocs);
if (allocation.size() > 0) {
allocations.add(allocation);
continueLoop = true;
}
}
matchAllocation(allocations, allocatedContainers, opportContext);
}
return allocatedContainers;
}
|
@Test
public void testMaxAllocationsPerAMHeartbeatWithNoLimit() throws Exception {
ResourceBlacklistRequest blacklistRequest =
ResourceBlacklistRequest.newInstance(
new ArrayList<>(), new ArrayList<>());
allocator.setMaxAllocationsPerAMHeartbeat(-1);
List<ResourceRequest> reqs = new ArrayList<>();
for (int i = 0; i < 20; i++) {
reqs.add(ResourceRequest.newBuilder().allocationRequestId(i + 1)
.priority(PRIORITY_NORMAL)
.resourceName("h1")
.capability(CAPABILITY_1GB)
.relaxLocality(true)
.executionType(ExecutionType.OPPORTUNISTIC).build());
}
ApplicationAttemptId appAttId = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(0L, 1), 1);
oppCntxt.updateNodeList(
Arrays.asList(
RemoteNode.newInstance(
NodeId.newInstance("h1", 1234), "h1:1234", "/r1"),
RemoteNode.newInstance(
NodeId.newInstance("h2", 1234), "h2:1234", "/r1")));
List<Container> containers = allocator.allocateContainers(
blacklistRequest, reqs, appAttId, oppCntxt, 1L, "user1");
// all containers should be allocated in single heartbeat.
Assert.assertEquals(20, containers.size());
}
|
public static MySQLBinaryProtocolValue getBinaryProtocolValue(final BinaryColumnType binaryColumnType) {
Preconditions.checkArgument(BINARY_PROTOCOL_VALUES.containsKey(binaryColumnType), "Cannot find MySQL type '%s' in column type when process binary protocol value", binaryColumnType);
return BINARY_PROTOCOL_VALUES.get(binaryColumnType);
}
|
@Test
void assertGetBinaryProtocolValueWithMySQLTypeMediumBlob() {
assertThat(MySQLBinaryProtocolValueFactory.getBinaryProtocolValue(MySQLBinaryColumnType.MEDIUM_BLOB), instanceOf(MySQLByteLenencBinaryProtocolValue.class));
}
|
public void setTransacted(boolean transacted) {
if (transacted) {
setAcknowledgementMode(SessionAcknowledgementType.SESSION_TRANSACTED);
}
this.transacted = transacted;
}
|
@Test
public void testSetTransacted() {
Endpoint endpoint = context.getEndpoint("sjms:queue:test.SjmsEndpointTest?transacted=true");
assertNotNull(endpoint);
assertTrue(endpoint instanceof SjmsEndpoint);
SjmsEndpoint qe = (SjmsEndpoint) endpoint;
assertTrue(qe.isTransacted());
}
|
@Override
public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException {
try {
session.getClient().putDirectory(folder.getAbsolute());
return folder;
}
catch(MantaException e) {
throw new MantaExceptionMappingService().map("Cannot create folder {0}", e, folder);
}
catch(MantaClientHttpResponseException e) {
throw new MantaHttpExceptionMappingService().map("Cannot create folder {0}", e, folder);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Cannot create folder {0}", e, folder);
}
}
|
@Test
public void testWhitespaceMkdir() throws Exception {
final RandomStringService randomStringService = new AlphanumericRandomStringService();
final Path target = new MantaDirectoryFeature(session)
.mkdir(
new Path(
testPathPrefix,
String.format("%s %s", randomStringService.random(), randomStringService.random()),
EnumSet.of(Path.Type.directory)
), null);
final Attributes found = new MantaAttributesFinderFeature(session).find(target);
assertNull(found.getOwner());
assertNotEquals(Permission.EMPTY, found.getPermission());
new MantaDeleteFeature(session).delete(Collections.singletonList(target), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static long toUnsignedLong(short value) {
return Short.toUnsignedLong(value);
}
|
@Test
public void testShortToUnsignedLong() {
getShortTestData().forEach(val -> assertEquals(val.toString(), toUnsignedLongPreviousImplementation(val),
BitmapUtils.toUnsignedLong(val)));
}
|
public EndpointResponse get() {
return EndpointResponse.ok(new ServerInfo(
appVersion.get(),
kafkaClusterId.get(),
ksqlServiceId.get(),
serverStatus.get().toString()));
}
|
@Test
public void shouldGetKafkaClusterIdWithTimeout()
throws InterruptedException, ExecutionException, TimeoutException{
// When:
serverInfoResource.get();
// Then:
verify(future).get(30, TimeUnit.SECONDS);
}
|
@Override
public ResultSet getSchemas() {
return null;
}
|
@Test
void assertGetSchemas() {
assertNull(metaData.getSchemas());
}
|
@Override
public Mono<Void> writeWith(final ServerWebExchange exchange, final ShenyuPluginChain chain) {
return chain.execute(exchange).then(Mono.defer(() -> {
Object result = exchange.getAttribute(Constants.RPC_RESULT);
if (Objects.isNull(result)) {
Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.SERVICE_RESULT_ERROR);
return WebFluxResultUtils.result(exchange, error);
}
Mono<Void> responseMono = WebFluxResultUtils.result(exchange, result);
exchange.getAttributes().put(Constants.RESPONSE_MONO, responseMono);
// watcher httpStatus
final Consumer<HttpStatusCode> consumer = exchange.getAttribute(Constants.WATCHER_HTTP_STATUS);
Optional.ofNullable(consumer).ifPresent(c -> c.accept(exchange.getResponse().getStatusCode()));
return responseMono;
}));
}
|
@Test
public void testExecuteWithNoResult() {
StepVerifier.create(rpcMessageWriter.writeWith(exchange, chain)).expectSubscription().verifyComplete();
}
|
@Udf(description = "Returns the portion of str from pos to the end of str")
public String substring(
@UdfParameter(description = "The source string.") final String str,
@UdfParameter(description = "The base-one position to start from.") final Integer pos
) {
if (str == null || pos == null) {
return null;
}
final int start = getStartIndex(str.length(), pos);
return str.substring(start);
}
|
@Test
public void shouldExtractFromEndForNegativePositionsOnBytes() {
assertThat(udf.substring(ByteBuffer.wrap(new byte[]{1,2,3,4}), -3),
is(ByteBuffer.wrap(new byte[]{2,3,4})));
assertThat(udf.substring(ByteBuffer.wrap(new byte[]{1,2,3,4}), -3, 3),
is(ByteBuffer.wrap(new byte[]{2,3,4})));
}
|
public static OffsetBasedPagination forOffset(int offset, int pageSize) {
checkArgument(offset >= 0, "offset must be >= 0");
checkArgument(pageSize >= 1, "page size must be >= 1");
return new OffsetBasedPagination(offset, pageSize);
}
|
@Test
void hashcode_whenDifferentOffset_shouldBeNotEquals() {
Assertions.assertThat(OffsetBasedPagination.forOffset(10, 20))
.doesNotHaveSameHashCodeAs(OffsetBasedPagination.forOffset(15, 20));
}
|
@Override
public List<Integer> applyTransforms(List<Integer> originalGlyphIds)
{
List<Integer> intermediateGlyphsFromGsub = originalGlyphIds;
for (String feature : FEATURES_IN_ORDER)
{
if (!gsubData.isFeatureSupported(feature))
{
LOG.debug("the feature {} was not found", feature);
continue;
}
LOG.debug("applying the feature {}", feature);
ScriptFeature scriptFeature = gsubData.getFeature(feature);
intermediateGlyphsFromGsub = applyGsubFeature(scriptFeature,
intermediateGlyphsFromGsub);
}
return Collections.unmodifiableList(repositionGlyphs(intermediateGlyphsFromGsub));
}
|
@Test
void testApplyTransforms_ou_kar()
{
// given
List<Integer> glyphsAfterGsub = Arrays.asList(108, 91, 114, 94);
// when
List<Integer> result = gsubWorkerForBengali.applyTransforms(getGlyphIds("মৌল"));
// then
assertEquals(glyphsAfterGsub, result);
}
|
@Override
public void validate(CruiseConfig cruiseConfig) {
ServerConfig serverConfig = cruiseConfig.server();
String artifactDir = serverConfig.artifactsDir();
if (isEmpty(artifactDir)) {
throw new RuntimeException("Please provide a not empty value for artifactsdir");
}
if (StringUtils.equals(".", artifactDir) || new File("").getAbsolutePath().equals(
new File(artifactDir).getAbsolutePath())) {
throw new RuntimeException("artifactsdir should not point to the root of sand box [" +
new File(artifactDir).getAbsolutePath()
+ "]");
}
}
|
@Test
public void shouldThrowExceptionWhenUserProvidesPathPointToServerSandBox() {
File file = new File("");
CruiseConfig cruiseConfig = new BasicCruiseConfig();
cruiseConfig.setServerConfig(new ServerConfig(file.getAbsolutePath(), null));
ArtifactDirValidator dirValidator = new ArtifactDirValidator();
try {
dirValidator.validate(cruiseConfig);
fail("should throw exception, see dot will make server check out the repository in the wrong place.");
} catch (Exception e) {
}
}
|
protected boolean isPassed() {
return !agentHealthHolder.hasLostContact();
}
|
@Test
void shouldReturnTrueIfAgentHasNotLostContact() {
AgentHealthHolder mock = mock(AgentHealthHolder.class);
when(mock.hasLostContact()).thenReturn(false);
IsConnectedToServerV1 handler = new IsConnectedToServerV1(mock);
assertThat(handler.isPassed()).isTrue();
}
|
public static <T> CompletableFuture<T> run(Callable<T> callable) {
CompletableFuture<T> result = new CompletableFuture<>();
CompletableFuture.runAsync(
() -> {
// we need to explicitly catch any exceptions,
// otherwise they will be silently discarded
try {
result.complete(callable.call());
} catch (Throwable e) {
result.completeExceptionally(e);
}
},
executor);
return result;
}
|
@Test
public void testRunException() {
assertThrows(
ExecutionException.class,
() -> {
Async.run(
() -> {
throw new RuntimeException("");
})
.get();
});
}
|
public static <
EventTypeT,
EventKeyTypeT,
ResultTypeT,
StateTypeT extends MutableState<EventTypeT, ResultTypeT>>
OrderedEventProcessor<EventTypeT, EventKeyTypeT, ResultTypeT, StateTypeT> create(
OrderedProcessingHandler<EventTypeT, EventKeyTypeT, StateTypeT, ResultTypeT> handler) {
return new AutoValue_OrderedEventProcessor<>(handler);
}
|
@Test
public void testSequenceGapProcessingInBufferedOutput() throws CannotProvideCoderException {
int maxResultsPerOutput = 3;
long[] sequences = new long[] {2, 3, 7, 8, 9, 10, 1, 4, 5, 6};
List<Event> events = new ArrayList<>(sequences.length);
List<KV<String, String>> expectedOutput = new ArrayList<>(sequences.length);
StringBuilder output = new StringBuilder();
String outputPerElement = ".";
String key = "id-1";
for (long sequence : sequences) {
events.add(Event.create(sequence, key, outputPerElement));
output.append(outputPerElement);
expectedOutput.add(KV.of(key, output.toString()));
}
int numberOfReceivedEvents = 0;
Collection<KV<String, OrderedProcessingStatus>> expectedStatuses = new ArrayList<>();
// First elements are out-of-sequence and they just get buffered. Earliest and latest sequence
// numbers keep changing.
expectedStatuses.add(
KV.of(
key,
OrderedProcessingStatus.create(
null, 1, 2L, 2L, ++numberOfReceivedEvents, 0L, 0, false)));
expectedStatuses.add(
KV.of(
key,
OrderedProcessingStatus.create(
null, 2, 2L, 3L, ++numberOfReceivedEvents, 0L, 0, false)));
expectedStatuses.add(
KV.of(
key,
OrderedProcessingStatus.create(
null, 3, 2L, 7L, ++numberOfReceivedEvents, 0L, 0, false)));
expectedStatuses.add(
KV.of(
key,
OrderedProcessingStatus.create(
null, 4, 2L, 8L, ++numberOfReceivedEvents, 0L, 0, false)));
expectedStatuses.add(
KV.of(
key,
OrderedProcessingStatus.create(
null, 5, 2L, 9L, ++numberOfReceivedEvents, 0L, 0, false)));
expectedStatuses.add(
KV.of(
key,
OrderedProcessingStatus.create(
null, 6, 2L, 10L, ++numberOfReceivedEvents, 0L, 0, false)));
// --- 1 has appeared and caused the batch to be sent out.
expectedStatuses.add(
KV.of(
key,
OrderedProcessingStatus.create(
3L, 4, 7L, 10L, ++numberOfReceivedEvents, 3L, 0, false)));
expectedStatuses.add(
KV.of(
key,
OrderedProcessingStatus.create(
4L, 4, 7L, 10L, ++numberOfReceivedEvents, 4L, 0, false)));
expectedStatuses.add(
KV.of(
key,
OrderedProcessingStatus.create(
5L, 4, 7L, 10L, ++numberOfReceivedEvents, 5L, 0, false)));
// --- 6 came and 6, 7, and 8 got output
expectedStatuses.add(
KV.of(
key,
OrderedProcessingStatus.create(
8L, 2, 9L, 10L, ++numberOfReceivedEvents, 8L, 0, false)));
// Last timer run produces the final status. Number of received events doesn't
// increase,
// this is the result of a timer processing
expectedStatuses.add(
KV.of(
key,
OrderedProcessingStatus.create(
10L, 0, null, null, numberOfReceivedEvents, 10L, 0, false)));
testProcessing(
events.toArray(new Event[events.size()]),
expectedStatuses,
expectedOutput,
EMISSION_FREQUENCY_ON_EVERY_ELEMENT,
1L /* This dataset assumes 1 as the starting sequence */,
maxResultsPerOutput,
PRODUCE_STATUS_ON_EVERY_EVENT);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.