focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static List<HttpCookie> decodeCookies(List<String> cookieStrs)
{
List<HttpCookie> cookies = new ArrayList<>();
if (cookieStrs == null)
{
return cookies;
}
for (String cookieStr : cookieStrs)
{
if (cookieStr == null)
{
continue;
}
StringTokenizer tokenizer = new StringTokenizer(cookieStr, ";");
String nameValuePair;
HttpCookie cookieToBeAdd = null;
while (tokenizer.hasMoreTokens())
{
nameValuePair = tokenizer.nextToken();
int index = nameValuePair.indexOf('=');
if (index != -1)
{
String name = nameValuePair.substring(0, index).trim();
String value = stripOffSurrounding(nameValuePair.substring(index + 1).trim());
if (name.charAt(0) != '$')
{
if (cookieToBeAdd != null)
{
cookies.add(cookieToBeAdd);
}
cookieToBeAdd = new HttpCookie(name, value);
}
else if (cookieToBeAdd != null)
{
if (name.equals("$Path"))
{
cookieToBeAdd.setPath(value);
}
else if (name.equals("$Domain"))
{
cookieToBeAdd.setDomain(value);
}
else if (name.equals("$Port"))
{
cookieToBeAdd.setPortlist(value);
}
}
}
else
{
throw new IllegalArgumentException("Invalid cookie name-value pair");
}
}
if (cookieToBeAdd != null)
{
cookies.add(cookieToBeAdd);
}
}
return cookies;
}
|
@Test
public void testInvalidCookieFromClient()
{
cookieA.setComment("nothing important");
List<String> encodeStrs = Collections.singletonList("$Domain=.linkedin.com; $Port=80; $Path=/; $Version=0;");
List<HttpCookie> cookieList = CookieUtil.decodeCookies(encodeStrs);
Assert.assertEquals(0, cookieList.size());
}
|
@Override
public void validatePostList(Collection<Long> ids) {
if (CollUtil.isEmpty(ids)) {
return;
}
// 获得岗位信息
List<PostDO> posts = postMapper.selectBatchIds(ids);
Map<Long, PostDO> postMap = convertMap(posts, PostDO::getId);
// 校验
ids.forEach(id -> {
PostDO post = postMap.get(id);
if (post == null) {
throw exception(POST_NOT_FOUND);
}
if (!CommonStatusEnum.ENABLE.getStatus().equals(post.getStatus())) {
throw exception(POST_NOT_ENABLE, post.getName());
}
});
}
|
@Test
public void testValidatePostList_success() {
// mock 数据
PostDO postDO = randomPostDO().setStatus(CommonStatusEnum.ENABLE.getStatus());
postMapper.insert(postDO);
// 准备参数
List<Long> ids = singletonList(postDO.getId());
// 调用,无需断言
postService.validatePostList(ids);
}
|
public static <FnT extends DoFn<?, ?>> DoFnSignature signatureForDoFn(FnT fn) {
return getSignature(fn.getClass());
}
|
@Test
public void testGenericStatefulDoFn() throws Exception {
class DoFnForTestGenericStatefulDoFn<T> extends DoFn<KV<String, T>, Long> {
// Note that in order to have a coder for T it will require initialization in the constructor,
// but that isn't important for this test
@StateId("foo")
private final StateSpec<ValueState<T>> bizzle = null;
@ProcessElement
public void foo(ProcessContext context) {}
}
// Test classes at the bottom of the file
DoFn<KV<String, Integer>, Long> myDoFn = new DoFnForTestGenericStatefulDoFn<Integer>() {};
DoFnSignature sig = DoFnSignatures.signatureForDoFn(myDoFn);
assertThat(sig.stateDeclarations().size(), equalTo(1));
DoFnSignature.StateDeclaration decl = sig.stateDeclarations().get("foo");
assertThat(decl.id(), equalTo("foo"));
assertThat(
decl.field(), equalTo(DoFnForTestGenericStatefulDoFn.class.getDeclaredField("bizzle")));
assertThat(
decl.stateType(),
Matchers.<TypeDescriptor<?>>equalTo(new TypeDescriptor<ValueState<Integer>>() {}));
}
|
@Override
public Map<V, GeoPosition> pos(V... members) {
return get(posAsync(members));
}
|
@Test
public void testPos() {
RGeo<String> geo = redisson.getGeo("test");
geo.add(new GeoEntry(13.361389, 38.115556, "Palermo"), new GeoEntry(15.087269, 37.502669, "Catania"));
Map<String, GeoPosition> expected = new LinkedHashMap<>();
expected.put("Palermo", new GeoPosition(13.361389338970184, 38.115556395496299));
expected.put("Catania", new GeoPosition(15.087267458438873, 37.50266842333162));
assertThat(geo.pos("test2", "Palermo", "test3", "Catania", "test1")).isEqualTo(expected);
}
|
public static byte[] short2bytes(short num) {
byte[] result = new byte[2];
result[0] = (byte) (num >>> 8); //取次低8位放到0下标
result[1] = (byte) (num); //取最低8位放到1下标
return result;
}
|
@Test
public void short2bytes() {
short i = 0;
byte[] bs = CodecUtils.short2bytes(i);
Assert.assertArrayEquals(bs, new byte[] { 0, 0 });
i = 1000;
bs = CodecUtils.short2bytes(i);
Assert.assertArrayEquals(bs, new byte[] { 3, -24 });
short s = 258; // =1*256+2
bs = CodecUtils.short2bytes(s);
Assert.assertEquals(bs[0], 1);
Assert.assertEquals(bs[1], 2);
}
|
public static <T> AsSingleton<T> asSingleton() {
return new AsSingleton<>();
}
|
@Test
@Category(ValidatesRunner.class)
public void testSingletonSideInput() {
final PCollectionView<Integer> view =
pipeline.apply("Create47", Create.of(47)).apply(View.asSingleton());
PCollection<Integer> output =
pipeline
.apply("Create123", Create.of(1, 2, 3))
.apply(
"OutputSideInputs",
ParDo.of(
new DoFn<Integer, Integer>() {
@ProcessElement
public void processElement(ProcessContext c) {
c.output(c.sideInput(view));
}
})
.withSideInputs(view));
PAssert.that(output).containsInAnyOrder(47, 47, 47);
pipeline.run();
}
|
public static String toOSStyleKey(String key) {
key = key.toUpperCase().replaceAll(DOT_REGEX, UNDERLINE_SEPARATOR);
if (!key.startsWith("DUBBO_")) {
key = "DUBBO_" + key;
}
return key;
}
|
@Test
void testToOSStyleKey() {
assertEquals("DUBBO_TAG1", StringUtils.toOSStyleKey("dubbo_tag1"));
assertEquals("DUBBO_TAG1", StringUtils.toOSStyleKey("dubbo.tag1"));
assertEquals("DUBBO_TAG1_TAG11", StringUtils.toOSStyleKey("dubbo.tag1.tag11"));
assertEquals("DUBBO_TAG1", StringUtils.toOSStyleKey("tag1"));
}
|
@Override
public boolean processArgument(final ShenyuRequest shenyuRequest, final Annotation annotation, final Object arg) {
String name = ANNOTATION.cast(annotation).value();
RequestTemplate requestTemplate = shenyuRequest.getRequestTemplate();
checkState(emptyToNull(name) != null, "RequestHeader.value() was empty on parameter %s", requestTemplate.getMethod().getName());
Map<String, Collection<String>> headers = shenyuRequest.getHeaders();
if (arg instanceof Map) {
((Map<?, ?>) arg).forEach((key, value) -> {
if (key instanceof String && value instanceof Collection) {
headers.put((String) key, (Collection) value);
shenyuRequest.setHeaders(headers);
} else if (key instanceof String && value instanceof String) {
headers.compute((String) key, (header, old) -> {
if (CollectionUtils.isEmpty(old)) {
return Lists.newArrayList((String) value);
}
old.add((String) value);
return old;
});
shenyuRequest.setHeaders(headers);
}
});
} else if (arg instanceof String) {
Collection<String> headerColl = Optional.ofNullable(headers.get(name)).orElseGet(ArrayList::new);
headerColl.add((String) arg);
headers.put(name, headerColl);
shenyuRequest.setHeaders(headers);
}
return true;
}
|
@Test
public void processArgumentEmptyTest() {
final RequestHeader header = spy(RequestHeader.class);
when(header.value()).thenReturn("");
assertThrows(IllegalStateException.class, () -> processor.processArgument(request, header, "value1"));
}
|
@Override
public void check( List<CheckResultInterface> remarks, TransMeta transMeta,
StepMeta stepMeta, RowMetaInterface prev, String[] input, String[] output,
RowMetaInterface info, VariableSpace space, Repository repository,
IMetaStore metaStore ) {
super.check( remarks, transMeta, stepMeta, prev, input, output, info, space, repository, metaStore );
StepOption.checkInteger( remarks, stepMeta, space, getString( PKG, "MQTTDialog.Options.KEEP_ALIVE_INTERVAL" ),
keepAliveInterval );
StepOption
.checkInteger( remarks, stepMeta, space, getString( PKG, "MQTTDialog.Options.MAX_INFLIGHT" ), maxInflight );
StepOption.checkInteger( remarks, stepMeta, space, getString( PKG, "MQTTDialog.Options.CONNECTION_TIMEOUT" ),
connectionTimeout );
StepOption
.checkBoolean( remarks, stepMeta, space, getString( PKG, "MQTTDialog.Options.CLEAN_SESSION" ), cleanSession );
checkVersion( remarks, stepMeta, space, mqttVersion );
StepOption.checkBoolean( remarks, stepMeta, space, getString( PKG, "MQTTDialog.Options.AUTOMATIC_RECONNECT" ),
automaticReconnect );
}
|
@Test
public void testCheckOptionsFail() {
List<CheckResultInterface> remarks = new ArrayList<>();
MQTTProducerMeta meta = new MQTTProducerMeta();
meta.mqttServer = "theserver:1883";
meta.clientId = "client100";
meta.topic = "newtopic";
meta.qos = "2";
meta.messageField = "Messages";
meta.username = "testuser";
meta.keepAliveInterval = "asdf";
meta.maxInflight = "asdf";
meta.connectionTimeout = "asdf";
meta.cleanSession = "asdf";
meta.automaticReconnect = "adsf";
meta.mqttVersion = "asdf";
meta.check( remarks, null, null, null, null, null, null, new Variables(), null, null );
assertEquals( 6, remarks.size() );
assertTrue( remarks.get( 0 ).getText()
.contains( BaseMessages.getString( PKG, "MQTTDialog.Options." + KEEP_ALIVE_INTERVAL ) ) );
assertTrue(
remarks.get( 1 ).getText().contains( BaseMessages.getString( PKG, "MQTTDialog.Options." + MAX_INFLIGHT ) ) );
assertTrue( remarks.get( 2 ).getText()
.contains( BaseMessages.getString( PKG, "MQTTDialog.Options." + CONNECTION_TIMEOUT ) ) );
assertTrue(
remarks.get( 3 ).getText().contains( BaseMessages.getString( PKG, "MQTTDialog.Options." + CLEAN_SESSION ) ) );
assertTrue(
remarks.get( 4 ).getText().contains( BaseMessages.getString( PKG, "MQTTDialog.Options." + MQTT_VERSION ) ) );
assertTrue( remarks.get( 5 ).getText()
.contains( BaseMessages.getString( PKG, "MQTTDialog.Options." + AUTOMATIC_RECONNECT ) ) );
}
|
public static double[] toDoubleArray(String name, Object value) {
try {
if (value instanceof BigDecimal[]) {
return Arrays.stream((BigDecimal[]) value).mapToDouble(BigDecimal::doubleValue).toArray();
} else if (value instanceof double[]) {
return (double[]) value;
} else if (value instanceof List) {
return ((List<?>) value)
.stream().mapToDouble(d -> new BigDecimal(String.valueOf(d)).doubleValue()).toArray();
} else {
throw new MaestroInternalError(
"Param [%s] has an invalid evaluated result [%s]", name, toTruncateString(value));
}
} catch (NumberFormatException nfe) {
throw new MaestroInternalError(
nfe,
"Invalid number format for evaluated result: %s for param [%s]",
toTruncateString(value),
name);
}
}
|
@Test
public void testDecimalArrayToDoubleArray() {
Object val =
new BigDecimal[] {new BigDecimal("1.2"), new BigDecimal("3.4"), new BigDecimal("5.6")};
double[] actual = ParamHelper.toDoubleArray("foo", val);
assertEquals(1.2, actual[0], 0.00000000);
assertEquals(3.4, actual[1], 0.00000000);
assertEquals(5.6, actual[2], 0.00000000);
}
|
@Override
public int getOrder() {
return PluginEnum.RATE_LIMITER.getCode();
}
|
@Test
public void getOrderTest() {
assertEquals(PluginEnum.RATE_LIMITER.getCode(), rateLimiterPlugin.getOrder());
}
|
public static <T> byte[] write(Writer<T> writer, T value) {
byte[] result = new byte[writer.sizeInBytes(value)];
WriteBuffer b = WriteBuffer.wrap(result);
try {
writer.write(value, b);
} catch (RuntimeException e) {
int lengthWritten = result.length;
for (int i = 0; i < result.length; i++) {
if (result[i] == 0) {
lengthWritten = i;
break;
}
}
// Don't use value directly in the message, as its toString might be implemented using this
// method. If that's the case, we'd stack overflow. Instead, emit what we've written so far.
String message =
format(
"Bug found using %s to write %s as json. Wrote %s/%s bytes: %s",
writer.getClass().getSimpleName(),
value.getClass().getSimpleName(),
lengthWritten,
result.length,
new String(result, 0, lengthWritten, UTF_8));
throw Platform.get().assertionError(message, e);
}
return result;
}
|
@Test void doesntStackOverflowOnToBufferWriterBug_Overflow() {
// pretend there was a bug calculating size, ex it calculated incorrectly as to small
class FooWriter implements WriteBuffer.Writer {
@Override public int sizeInBytes(Object value) {
return 2;
}
@Override public void write(Object value, WriteBuffer buffer) {
buffer.writeByte('a');
buffer.writeByte('b');
buffer.writeByte('c'); // wrote larger than size!
}
}
class Foo {
@Override public String toString() {
return new String(JsonWriter.write(new FooWriter(), this), UTF_8);
}
}
Foo foo = new Foo();
assertThatThrownBy(foo::toString)
.isInstanceOf(AssertionError.class)
.hasMessage("Bug found using FooWriter to write Foo as json. Wrote 2/2 bytes: ab");
}
|
@Override
public NacosUser authenticate(String username, String rawPassword) throws AccessException {
if (StringUtils.isBlank(username) || StringUtils.isBlank(rawPassword)) {
throw new AccessException("user not found!");
}
NacosUserDetails nacosUserDetails = (NacosUserDetails) userDetailsService.loadUserByUsername(username);
if (nacosUserDetails == null || !PasswordEncoderUtil.matches(rawPassword, nacosUserDetails.getPassword())) {
throw new AccessException("user not found!");
}
return new NacosUser(nacosUserDetails.getUsername(), jwtTokenManager.createToken(username));
}
|
@Test
void testAuthenticate5() {
assertThrows(AccessException.class, () -> {
abstractAuthenticationManager.authenticate("");
});
}
|
public static Object[] getMethodArguments(SofaRequest request) {
return request.getMethodArgs();
}
|
@Test
public void testGetMethodArguments() {
SofaRequest request = new SofaRequest();
request.setMethodArgs(new Object[]{"Sentinel", 2020});
Object[] arguments = SofaRpcUtils.getMethodArguments(request);
assertEquals(arguments.length, 2);
assertEquals("Sentinel", arguments[0]);
assertEquals(2020, arguments[1]);
}
|
public static synchronized Map<Pair<String, String>, String> getChanged() {
return CpeEcosystemCache.changed;
}
|
@Test
public void testGetChanged() {
Pair<String, String> key = new Pair<>("apache", "zookeeper");
Map<Pair<String, String>, String> map = new HashMap<>();
map.put(key, "java");
CpeEcosystemCache.setCache(map);
Map<Pair<String, String>, String> result = CpeEcosystemCache.getChanged();
assertTrue(result.isEmpty());
CpeEcosystemCache.getEcosystem("apache", "zookeeper", "java");
result = CpeEcosystemCache.getChanged();
assertTrue(result.isEmpty());
CpeEcosystemCache.getEcosystem("apache", "zookeeper", null);
result = CpeEcosystemCache.getChanged();
assertTrue(result.isEmpty());
//zookeeper is already at multiple- no change
CpeEcosystemCache.getEcosystem("apache", "zookeeper", "c++");
result = CpeEcosystemCache.getChanged();
assertFalse(result.isEmpty());
}
|
@VisibleForTesting
static FlinkSecurityManager fromConfiguration(Configuration configuration) {
final ClusterOptions.UserSystemExitMode userSystemExitMode =
configuration.get(ClusterOptions.INTERCEPT_USER_SYSTEM_EXIT);
boolean haltOnSystemExit = configuration.get(ClusterOptions.HALT_ON_FATAL_ERROR);
// If no check is needed, return null so that caller can avoid setting security manager not
// to incur any runtime cost.
if (userSystemExitMode == ClusterOptions.UserSystemExitMode.DISABLED && !haltOnSystemExit) {
return null;
}
LOG.info(
"FlinkSecurityManager is created with {} user system exit mode and {} exit",
userSystemExitMode,
haltOnSystemExit ? "forceful" : "graceful");
// Add more configuration parameters that need user security manager (currently only for
// system exit).
return new FlinkSecurityManager(userSystemExitMode, haltOnSystemExit);
}
|
@Test
void testHaltConfiguration() {
// Halt as forceful shutdown replacing graceful system exit
Configuration configuration = new Configuration();
configuration.set(ClusterOptions.HALT_ON_FATAL_ERROR, true);
FlinkSecurityManager flinkSecurityManager =
FlinkSecurityManager.fromConfiguration(configuration);
assertThat(flinkSecurityManager).isNotNull();
}
|
@Override
public void swap(int i, int j) {
final int segmentNumberI = i / this.indexEntriesPerSegment;
final int segmentOffsetI = (i % this.indexEntriesPerSegment) * this.indexEntrySize;
final int segmentNumberJ = j / this.indexEntriesPerSegment;
final int segmentOffsetJ = (j % this.indexEntriesPerSegment) * this.indexEntrySize;
swap(segmentNumberI, segmentOffsetI, segmentNumberJ, segmentOffsetJ);
}
|
@Test
void testSwap() throws Exception {
final int numSegments = MEMORY_SIZE / MEMORY_PAGE_SIZE;
final List<MemorySegment> memory =
this.memoryManager.allocatePages(new DummyInvokable(), numSegments);
NormalizedKeySorter<Tuple2<Integer, String>> sorter = newSortBuffer(memory);
TestData.TupleGenerator generator =
new TestData.TupleGenerator(
SEED, KEY_MAX, VALUE_LENGTH, KeyMode.RANDOM, ValueMode.RANDOM_LENGTH);
// write the records
Tuple2<Integer, String> record = new Tuple2<>();
int num = -1;
do {
generator.next(record);
num++;
} while (sorter.write(record));
// swap the records
int start = 0, end = num - 1;
while (start < end) {
sorter.swap(start++, end--);
}
// re-read the records
generator.reset();
Tuple2<Integer, String> readTarget = new Tuple2<>();
int i = num - 1;
while (i >= 0) {
generator.next(record);
readTarget = sorter.getRecord(readTarget, i--);
int rk = readTarget.f0;
int gk = record.f0;
String rv = readTarget.f1;
String gv = record.f1;
assertThat(rk).withFailMessage("The re-read key is wrong").isEqualTo(gk);
assertThat(rv).withFailMessage("The re-read value is wrong").isEqualTo(gv);
}
// release the memory occupied by the buffers
sorter.dispose();
this.memoryManager.release(memory);
}
|
public void tryLock() {
try {
if (!lock.tryLock()) {
failAlreadyInProgress(null);
}
} catch (OverlappingFileLockException e) {
failAlreadyInProgress(e);
}
}
|
@Test
public void tryLock() {
Path lockFilePath = worDir.toPath().resolve(DirectoryLock.LOCK_FILE_NAME);
lock.tryLock();
assertThat(Files.exists(lockFilePath)).isTrue();
assertThat(Files.isRegularFile(lockFilePath)).isTrue();
lock.stop();
assertThat(Files.exists(lockFilePath)).isTrue();
}
|
@Override
public EurekaHttpResponse<InstanceInfo> sendHeartBeat(String appName, String id, InstanceInfo info, InstanceStatus overriddenStatus) {
String urlPath = "apps/" + appName + '/' + id;
Response response = null;
try {
WebTarget webResource = jerseyClient.target(serviceUrl)
.path(urlPath)
.queryParam("status", info.getStatus().toString())
.queryParam("lastDirtyTimestamp", info.getLastDirtyTimestamp().toString());
if (overriddenStatus != null) {
webResource = webResource.queryParam("overriddenstatus", overriddenStatus.name());
}
Builder requestBuilder = webResource.request();
addExtraHeaders(requestBuilder);
response = requestBuilder.accept(MediaType.APPLICATION_JSON_TYPE).put(Entity.entity("{}", MediaType.APPLICATION_JSON_TYPE)); // Jersey2 refuses to handle PUT with no body
InstanceInfo infoFromPeer = null;
if (response.getStatus() == Status.CONFLICT.getStatusCode() && response.hasEntity()) {
infoFromPeer = response.readEntity(InstanceInfo.class);
}
return anEurekaHttpResponse(response.getStatus(), infoFromPeer).type(MediaType.APPLICATION_JSON_TYPE).build();
} finally {
if (logger.isDebugEnabled()) {
logger.debug("[heartbeat] Jersey HTTP PUT {}; statusCode={}", urlPath, response == null ? "N/A" : response.getStatus());
}
if (response != null) {
response.close();
}
}
}
|
@Test
public void testHeartbeatReplicationWithNoResponseBody() throws Exception {
serverMockClient.when(
request()
.withMethod("PUT")
.withHeader(header(PeerEurekaNode.HEADER_REPLICATION, "true"))
.withPath("/eureka/v2/apps/" + instanceInfo.getAppName() + '/' + instanceInfo.getId())
).respond(
response().withStatusCode(200)
);
EurekaHttpResponse<InstanceInfo> response = replicationClient.sendHeartBeat(instanceInfo.getAppName(), instanceInfo.getId(), instanceInfo, InstanceStatus.DOWN);
assertThat(response.getStatusCode(), is(equalTo(200)));
assertThat(response.getEntity(), is(nullValue()));
}
|
@Override
public Path find() throws BackgroundException {
final String directory;
try {
directory = session.getClient().printWorkingDirectory();
if(null == directory) {
throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString());
}
return new Path(PathNormalizer.normalize(directory), directory.equals(String.valueOf(Path.DELIMITER)) ? EnumSet.of(Path.Type.volume, Path.Type.directory) : EnumSet.of(Path.Type.directory));
}
catch(IOException e) {
throw new FTPExceptionMappingService().map(e);
}
}
|
@Test
public void testDefaultPath() throws Exception {
assertEquals("/", new FTPWorkdirService(session).find().getAbsolute());
}
|
@Override
public PageData<Asset> findAssetsByTenantIdAndCustomerIdAndType(UUID tenantId, UUID customerId, String type, PageLink pageLink) {
return DaoUtil.toPageData(assetRepository
.findByTenantIdAndCustomerIdAndType(
tenantId,
customerId,
type,
pageLink.getTextSearch(),
DaoUtil.toPageable(pageLink)));
}
|
@Test
public void testFindAssetsByTenantIdAndCustomerIdAndType() {
String type = "TYPE_2";
String testLabel = "test_label";
assets.add(saveAsset(Uuids.timeBased(), tenantId2, customerId2, "TEST_ASSET", type, testLabel));
List<Asset> foundedAssetsByType = assetDao
.findAssetsByTenantIdAndCustomerIdAndType(tenantId2, customerId2, type, new PageLink(3)).getData();
compareFoundedAssetByType(foundedAssetsByType, type);
List<Asset> foundedAssetsByTypeAndLabel = assetDao
.findAssetsByTenantIdAndCustomerIdAndType(tenantId2, customerId2, type, new PageLink(3, 0, testLabel)).getData();
assertEquals(1, foundedAssetsByTypeAndLabel.size());
}
|
public static SchemaAndValue parseString(String value) {
if (value == null) {
return NULL_SCHEMA_AND_VALUE;
}
if (value.isEmpty()) {
return new SchemaAndValue(Schema.STRING_SCHEMA, value);
}
ValueParser parser = new ValueParser(new Parser(value));
return parser.parse(false);
}
|
@Test
public void shouldParseLongAsInt64() {
Long value = Long.MAX_VALUE;
SchemaAndValue schemaAndValue = Values.parseString(
String.valueOf(value)
);
assertEquals(Schema.INT64_SCHEMA, schemaAndValue.schema());
assertInstanceOf(Long.class, schemaAndValue.value());
assertEquals(value.longValue(), ((Long) schemaAndValue.value()).longValue());
value = Long.MIN_VALUE;
schemaAndValue = Values.parseString(
String.valueOf(value)
);
assertEquals(Schema.INT64_SCHEMA, schemaAndValue.schema());
assertInstanceOf(Long.class, schemaAndValue.value());
assertEquals(value.longValue(), ((Long) schemaAndValue.value()).longValue());
}
|
public static int fromLogical(Schema schema, java.util.Date value) {
if (!(LOGICAL_NAME.equals(schema.name())))
throw new DataException("Requested conversion of Date object but the schema does not match.");
Calendar calendar = Calendar.getInstance(UTC);
calendar.setTime(value);
if (calendar.get(Calendar.HOUR_OF_DAY) != 0 || calendar.get(Calendar.MINUTE) != 0 ||
calendar.get(Calendar.SECOND) != 0 || calendar.get(Calendar.MILLISECOND) != 0) {
throw new DataException("Kafka Connect Date type should not have any time fields set to non-zero values.");
}
long unixMillis = calendar.getTimeInMillis();
return (int) (unixMillis / MILLIS_PER_DAY);
}
|
@Test
public void testFromLogical() {
assertEquals(0, Date.fromLogical(Date.SCHEMA, EPOCH.getTime()));
assertEquals(10000, Date.fromLogical(Date.SCHEMA, EPOCH_PLUS_TEN_THOUSAND_DAYS.getTime()));
}
|
@Override
public Set<Subnet> subnets() {
return osNetworkStore.subnets();
}
|
@Test
public void testGetSubnets() {
createBasicNetworks();
assertEquals("Number of subnet did not match", 1, target.subnets().size());
}
|
@Override
String getProperty(String key) {
String checkedKey = checkPropertyName(key);
if (checkedKey == null) {
final String upperCaseKey = key.toUpperCase();
if (!upperCaseKey.equals(key)) {
checkedKey = checkPropertyName(upperCaseKey);
}
}
if (checkedKey == null) {
return null;
}
return env.get(checkedKey);
}
|
@Test
void testGetEnvForUpperCaseKeyWithHyphenAndDot() {
assertEquals("value4", systemEnvPropertySource.getProperty("TEST_CASE.4"));
}
|
static MetricRegistry getOrCreateMetricRegistry(Registry camelRegistry, String registryName) {
LOG.debug("Looking up MetricRegistry from Camel Registry for name \"{}\"", registryName);
MetricRegistry result = getMetricRegistryFromCamelRegistry(camelRegistry, registryName);
if (result == null) {
LOG.debug("MetricRegistry not found from Camel Registry for name \"{}\"", registryName);
LOG.info("Creating new default MetricRegistry");
result = createMetricRegistry();
}
return result;
}
|
@Test
public void testGetOrCreateMetricRegistryFoundInCamelRegistry() {
when(camelRegistry.lookupByNameAndType("name", MetricRegistry.class)).thenReturn(metricRegistry);
MetricRegistry result = MetricsComponent.getOrCreateMetricRegistry(camelRegistry, "name");
assertThat(result, is(metricRegistry));
inOrder.verify(camelRegistry, times(1)).lookupByNameAndType("name", MetricRegistry.class);
inOrder.verifyNoMoreInteractions();
}
|
@Description("Returns the bounding rectangular polygon of a Geometry")
@ScalarFunction("ST_Envelope")
@SqlType(GEOMETRY_TYPE_NAME)
public static Slice stEnvelope(@SqlType(GEOMETRY_TYPE_NAME) Slice input)
{
Envelope envelope = deserializeEnvelope(input);
if (envelope.isEmpty()) {
return EMPTY_POLYGON;
}
return EsriGeometrySerde.serialize(envelope);
}
|
@Test
public void testSTEnvelope()
{
assertFunction("ST_AsText(ST_Envelope(ST_GeometryFromText('MULTIPOINT (1 2, 2 4, 3 6, 4 8)')))", VARCHAR, "POLYGON ((1 2, 1 8, 4 8, 4 2, 1 2))");
assertFunction("ST_AsText(ST_Envelope(ST_GeometryFromText('LINESTRING EMPTY')))", VARCHAR, "POLYGON EMPTY");
assertFunction("ST_AsText(ST_Envelope(ST_GeometryFromText('LINESTRING (1 1, 2 2, 1 3)')))", VARCHAR, "POLYGON ((1 1, 1 3, 2 3, 2 1, 1 1))");
assertFunction("ST_AsText(ST_Envelope(ST_GeometryFromText('LINESTRING (8 4, 5 7)')))", VARCHAR, "POLYGON ((5 4, 5 7, 8 7, 8 4, 5 4))");
assertFunction("ST_AsText(ST_Envelope(ST_GeometryFromText('MULTILINESTRING ((1 1, 5 1), (2 4, 4 4))')))", VARCHAR, "POLYGON ((1 1, 1 4, 5 4, 5 1, 1 1))");
assertFunction("ST_AsText(ST_Envelope(ST_GeometryFromText('POLYGON ((1 1, 4 1, 1 4, 1 1))')))", VARCHAR, "POLYGON ((1 1, 1 4, 4 4, 4 1, 1 1))");
assertFunction("ST_AsText(ST_Envelope(ST_GeometryFromText('MULTIPOLYGON (((1 1, 1 3, 3 3, 3 1, 1 1)), ((0 0, 0 2, 2 2, 2 0, 0 0)))')))", VARCHAR, "POLYGON ((0 0, 0 3, 3 3, 3 0, 0 0))");
assertFunction("ST_AsText(ST_Envelope(ST_GeometryFromText('GEOMETRYCOLLECTION (POINT (5 1), LINESTRING (3 4, 4 4))')))", VARCHAR, "POLYGON ((3 1, 3 4, 5 4, 5 1, 3 1))");
}
|
public final void registerGlobal(final Serializer serializer) {
registerGlobal(serializer, false);
}
|
@Test(expected = IllegalStateException.class)
public void testGlobalRegister_doubleRegistration() {
abstractSerializationService.registerGlobal(new StringBufferSerializer(true));
abstractSerializationService.registerGlobal(new StringBufferSerializer(true));
}
|
@Override
public void shutdown() throws NacosException {
NAMING_LOGGER.warn("[NamingHttpClientManager] Start destroying NacosRestTemplate");
try {
HttpClientBeanHolder.shutdownNacosSyncRest(HTTP_CLIENT_FACTORY.getClass().getName());
} catch (Exception ex) {
NAMING_LOGGER.error("[NamingHttpClientManager] An exception occurred when the HTTP client was closed : {}",
ExceptionUtil.getStackTrace(ex));
}
NAMING_LOGGER.warn("[NamingHttpClientManager] Destruction of the end");
}
|
@Test
void testShutdown() throws NoSuchFieldException, IllegalAccessException, NacosException, IOException {
//given
NamingHttpClientManager instance = NamingHttpClientManager.getInstance();
HttpClientRequest mockHttpClientRequest = Mockito.mock(HttpClientRequest.class);
Field requestClient = NacosRestTemplate.class.getDeclaredField("requestClient");
requestClient.setAccessible(true);
requestClient.set(instance.getNacosRestTemplate(), mockHttpClientRequest);
// when
NamingHttpClientManager.getInstance().shutdown();
// then
verify(mockHttpClientRequest, times(1)).close();
}
|
@Override
public void handle(SchedulerEvent event) {
switch (event.getType()) {
case NODE_ADDED:
if (!(event instanceof NodeAddedSchedulerEvent)) {
throw new RuntimeException("Unexpected event type: " + event);
}
NodeAddedSchedulerEvent nodeAddedEvent = (NodeAddedSchedulerEvent) event;
nodeMonitor.addNode(nodeAddedEvent.getContainerReports(),
nodeAddedEvent.getAddedRMNode());
break;
case NODE_REMOVED:
if (!(event instanceof NodeRemovedSchedulerEvent)) {
throw new RuntimeException("Unexpected event type: " + event);
}
NodeRemovedSchedulerEvent nodeRemovedEvent =
(NodeRemovedSchedulerEvent) event;
nodeMonitor.removeNode(nodeRemovedEvent.getRemovedRMNode());
break;
case NODE_UPDATE:
if (!(event instanceof NodeUpdateSchedulerEvent)) {
throw new RuntimeException("Unexpected event type: " + event);
}
NodeUpdateSchedulerEvent nodeUpdatedEvent = (NodeUpdateSchedulerEvent)
event;
nodeMonitor.updateNode(nodeUpdatedEvent.getRMNode());
break;
case NODE_RESOURCE_UPDATE:
if (!(event instanceof NodeResourceUpdateSchedulerEvent)) {
throw new RuntimeException("Unexpected event type: " + event);
}
NodeResourceUpdateSchedulerEvent nodeResourceUpdatedEvent =
(NodeResourceUpdateSchedulerEvent) event;
nodeMonitor.updateNodeResource(nodeResourceUpdatedEvent.getRMNode(),
nodeResourceUpdatedEvent.getResourceOption());
break;
// <-- IGNORED EVENTS : START -->
case APP_ADDED:
break;
case APP_REMOVED:
break;
case APP_ATTEMPT_ADDED:
break;
case APP_ATTEMPT_REMOVED:
break;
case CONTAINER_EXPIRED:
break;
case NODE_LABELS_UPDATE:
break;
case RELEASE_CONTAINER:
break;
case NODE_ATTRIBUTES_UPDATE:
break;
case KILL_RESERVED_CONTAINER:
break;
case MARK_CONTAINER_FOR_PREEMPTION:
break;
case MARK_CONTAINER_FOR_KILLABLE:
break;
case MARK_CONTAINER_FOR_NONKILLABLE:
break;
case MANAGE_QUEUE:
break;
// <-- IGNORED EVENTS : END -->
default:
LOG.error("Unknown event arrived at" +
"OpportunisticContainerAllocatorAMService: {}", event);
}
}
|
@Test(timeout = 60000)
public void testNodeRemovalDuringAllocate() throws Exception {
MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
nm1.registerNode();
nm2.registerNode();
nm1.nodeHeartbeat(oppContainersStatus, true);
nm2.nodeHeartbeat(oppContainersStatus, true);
OpportunisticContainerAllocatorAMService amservice =
(OpportunisticContainerAllocatorAMService) rm
.getApplicationMasterService();
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("default")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm, data);
ApplicationAttemptId attemptId =
app1.getCurrentAppAttempt().getAppAttemptId();
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2);
ResourceScheduler scheduler = rm.getResourceScheduler();
RMNode rmNode1 = rm.getRMContext().getRMNodes().get(nm1.getNodeId());
RMNode rmNode2 = rm.getRMContext().getRMNodes().get(nm2.getNodeId());
OpportunisticContainerContext ctxt = ((CapacityScheduler) scheduler)
.getApplicationAttempt(attemptId).getOpportunisticContainerContext();
// Both node 1 and node 2 will be applicable for scheduling.
nm1.nodeHeartbeat(oppContainersStatus, true);
nm2.nodeHeartbeat(oppContainersStatus, true);
for (int i = 0; i < 10; i++) {
am1.allocate(
Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1),
"*", Resources.createResource(1 * GB), 2)),
null);
if (ctxt.getNodeMap().size() == 2) {
break;
}
Thread.sleep(50);
}
Assert.assertEquals(2, ctxt.getNodeMap().size());
// Remove node from scheduler but not from AM Service.
scheduler.handle(new NodeRemovedSchedulerEvent(rmNode1));
// After removal of node 1, only 1 node will be applicable for scheduling.
for (int i = 0; i < 10; i++) {
try {
am1.allocate(
Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1),
"*", Resources.createResource(1 * GB), 2)),
null);
} catch (Exception e) {
Assert.fail("Allocate request should be handled on node removal");
}
if (ctxt.getNodeMap().size() == 1) {
break;
}
Thread.sleep(50);
}
Assert.assertEquals(1, ctxt.getNodeMap().size());
}
|
@Override
public Map<K, V> getAll(Set<K> keys) {
return cache.getAll(keys);
}
|
@Test
public void testGetAll() {
cache.put(23, "value-23");
cache.put(42, "value-42");
Map<Integer, String> expectedResult = new HashMap<>();
expectedResult.put(23, "value-23");
expectedResult.put(42, "value-42");
Map<Integer, String> result = adapter.getAll(expectedResult.keySet());
assertEquals(expectedResult, result);
}
|
@Override
public TreeNode<T> next() throws NoSuchElementException {
if (pathStack.isEmpty()) {
throw new NoSuchElementException();
}
var next = pathStack.pop();
pushPathToNextSmallest(next.getRight());
return next;
}
|
@Test
void nextOverEntirePopulatedTree() {
var iter = new BstIterator<>(nonEmptyRoot);
assertEquals(Integer.valueOf(1), iter.next().getVal(), "First Node is 1.");
assertEquals(Integer.valueOf(3), iter.next().getVal(), "Second Node is 3.");
assertEquals(Integer.valueOf(4), iter.next().getVal(), "Third Node is 4.");
assertEquals(Integer.valueOf(5), iter.next().getVal(), "Fourth Node is 5.");
assertEquals(Integer.valueOf(6), iter.next().getVal(), "Fifth Node is 6.");
assertEquals(Integer.valueOf(7), iter.next().getVal(), "Sixth Node is 7.");
}
|
public static String queryToFetchAllFieldsOf(final AbstractDescribedSObjectBase object) {
if (object == null) {
return null;
}
final SObjectDescription description = object.description();
final List<SObjectField> fields = description.getFields();
return fields.stream().map(SObjectField::getName)
.collect(Collectors.joining(", ", "SELECT ", " FROM " + description.getName()));
}
|
@Test
public void shouldGenerateQueryForAllFields() {
assertThat(QueryHelper.queryToFetchAllFieldsOf(new Account()))
.isEqualTo("SELECT Id, IsDeleted, MasterRecordId, Name, Type, ParentId, BillingStreet, BillingCity, "
+ "BillingState, BillingPostalCode, BillingCountry, BillingLatitude, BillingLongitude, "
+ "BillingGeocodeAccuracy, BillingAddress, ShippingStreet, ShippingCity, ShippingState, "
+ "ShippingPostalCode, ShippingCountry, ShippingLatitude, ShippingLongitude, "
+ "ShippingGeocodeAccuracy, ShippingAddress, Phone, Fax, AccountNumber, Website, "
+ "PhotoUrl, Sic, Industry, AnnualRevenue, NumberOfEmployees, Ownership, TickerSymbol, "
+ "Description, Rating, Site, OwnerId, CreatedDate, CreatedById, LastModifiedDate, "
+ "LastModifiedById, SystemModstamp, LastActivityDate, LastViewedDate, LastReferencedDate, "
+ "Jigsaw, JigsawCompanyId, CleanStatus, AccountSource, DunsNumber, Tradestyle, NaicsCode, "
+ "NaicsDesc, YearStarted, SicDesc, DandbCompanyId, OperatingHoursId, Shipping_Location__Latitude__s, "
+ "Shipping_Location__Longitude__s, Shipping_Location__c, External_Id__c FROM Account");
}
|
<K, V> ShareInFlightBatch<K, V> fetchRecords(final Deserializers<K, V> deserializers,
final int maxRecords,
final boolean checkCrcs) {
// Creating an empty ShareInFlightBatch
ShareInFlightBatch<K, V> inFlightBatch = new ShareInFlightBatch<>(partition);
if (cachedBatchException != null) {
// If the event that a CRC check fails, reject the entire record batch because it is corrupt.
rejectRecordBatch(inFlightBatch, currentBatch);
inFlightBatch.setException(cachedBatchException);
cachedBatchException = null;
return inFlightBatch;
}
if (cachedRecordException != null) {
inFlightBatch.addAcknowledgement(lastRecord.offset(), AcknowledgeType.RELEASE);
inFlightBatch.setException(cachedRecordException);
cachedRecordException = null;
return inFlightBatch;
}
if (isConsumed)
return inFlightBatch;
initializeNextAcquired();
try {
int recordsInBatch = 0;
while (recordsInBatch < maxRecords) {
lastRecord = nextFetchedRecord(checkCrcs);
if (lastRecord == null) {
// Any remaining acquired records are gaps
while (nextAcquired != null) {
inFlightBatch.addGap(nextAcquired.offset);
nextAcquired = nextAcquiredRecord();
}
break;
}
while (nextAcquired != null) {
if (lastRecord.offset() == nextAcquired.offset) {
// It's acquired, so we parse it and add it to the batch
Optional<Integer> leaderEpoch = maybeLeaderEpoch(currentBatch.partitionLeaderEpoch());
TimestampType timestampType = currentBatch.timestampType();
ConsumerRecord<K, V> record = parseRecord(deserializers, partition, leaderEpoch,
timestampType, lastRecord, nextAcquired.deliveryCount);
inFlightBatch.addRecord(record);
recordsRead++;
bytesRead += lastRecord.sizeInBytes();
recordsInBatch++;
nextAcquired = nextAcquiredRecord();
break;
} else if (lastRecord.offset() < nextAcquired.offset) {
// It's not acquired, so we skip it
break;
} else {
// It's acquired, but there's no non-control record at this offset, so it's a gap
inFlightBatch.addGap(nextAcquired.offset);
}
nextAcquired = nextAcquiredRecord();
}
}
} catch (SerializationException se) {
nextAcquired = nextAcquiredRecord();
if (inFlightBatch.isEmpty()) {
inFlightBatch.addAcknowledgement(lastRecord.offset(), AcknowledgeType.RELEASE);
inFlightBatch.setException(se);
} else {
cachedRecordException = se;
inFlightBatch.setHasCachedException(true);
}
} catch (CorruptRecordException e) {
if (inFlightBatch.isEmpty()) {
// If the event that a CRC check fails, reject the entire record batch because it is corrupt.
rejectRecordBatch(inFlightBatch, currentBatch);
inFlightBatch.setException(e);
} else {
cachedBatchException = e;
inFlightBatch.setHasCachedException(true);
}
}
return inFlightBatch;
}
|
@Test
public void testAcquireOddRecords() {
long firstMessageId = 5;
int startingOffset = 0;
int numRecords = 10; // Records for 0-9
// Acquiring all odd Records
List<ShareFetchResponseData.AcquiredRecords> acquiredRecords = new ArrayList<>();
for (long i = 1; i <= 9; i += 2) {
acquiredRecords.add(acquiredRecords(i, 1).get(0));
}
ShareFetchResponseData.PartitionData partitionData = new ShareFetchResponseData.PartitionData()
.setRecords(newRecords(startingOffset, numRecords, firstMessageId))
.setAcquiredRecords(acquiredRecords);
Deserializers<String, String> deserializers = newStringDeserializers();
ShareCompletedFetch completedFetch = newShareCompletedFetch(partitionData);
List<ConsumerRecord<String, String>> records = completedFetch.fetchRecords(deserializers, 10, true).getInFlightRecords();
assertEquals(5, records.size());
// The first offset should be 1
ConsumerRecord<String, String> record = records.get(0);
assertEquals(1L, record.offset());
assertEquals(Optional.of((short) 1), record.deliveryCount());
// The second offset should be 3
record = records.get(1);
assertEquals(3L, record.offset());
assertEquals(Optional.of((short) 1), record.deliveryCount());
records = completedFetch.fetchRecords(deserializers, 10, true).getInFlightRecords();
assertEquals(0, records.size());
}
|
@Override
public boolean isIn(String ipAddress) {
if (ipAddress == null || addressList == null) {
return false;
}
return addressList.includes(ipAddress);
}
|
@Test
public void testWithEmptyList() throws IOException {
String[] ips = {};
createFileWithEntries ("ips.txt", ips);
IPList ipl = new FileBasedIPList("ips.txt");
assertFalse("110.113.221.222 is in the list",
ipl.isIn("110.113.221.222"));
}
|
@Override
public Mono<GetUnversionedProfileResponse> getUnversionedProfile(final GetUnversionedProfileRequest request) {
final AuthenticatedDevice authenticatedDevice = AuthenticationUtil.requireAuthenticatedDevice();
final ServiceIdentifier targetIdentifier =
ServiceIdentifierUtil.fromGrpcServiceIdentifier(request.getServiceIdentifier());
return validateRateLimitAndGetAccount(authenticatedDevice.accountIdentifier(), targetIdentifier)
.map(targetAccount -> ProfileGrpcHelper.buildUnversionedProfileResponse(targetIdentifier,
authenticatedDevice.accountIdentifier(),
targetAccount,
profileBadgeConverter));
}
|
@Test
void getUnversionedProfileTargetAccountNotFound() {
when(accountsManager.getByServiceIdentifierAsync(any())).thenReturn(CompletableFuture.completedFuture(Optional.empty()));
final GetUnversionedProfileRequest request = GetUnversionedProfileRequest.newBuilder()
.setServiceIdentifier(ServiceIdentifier.newBuilder()
.setIdentityType(IdentityType.IDENTITY_TYPE_ACI)
.setUuid(ByteString.copyFrom(UUIDUtil.toBytes(UUID.randomUUID())))
.build())
.build();
assertStatusException(Status.NOT_FOUND, () -> authenticatedServiceStub().getUnversionedProfile(request));
}
|
@Override
@SuppressWarnings("unchecked")
public <K, V> List<Map<K, V>> toMaps(DataTable dataTable, Type keyType, Type valueType) {
requireNonNull(dataTable, "dataTable may not be null");
requireNonNull(keyType, "keyType may not be null");
requireNonNull(valueType, "valueType may not be null");
if (dataTable.isEmpty()) {
return emptyList();
}
DataTableType keyConverter = registry.lookupCellTypeByType(keyType);
DataTableType valueConverter = registry.lookupCellTypeByType(valueType);
List<String> problems = new ArrayList<>();
if (keyConverter == null) {
problems.add(problemNoTableCellTransformer(keyType));
}
if (valueConverter == null) {
problems.add(problemNoTableCellTransformer(valueType));
}
if (!problems.isEmpty()) {
throw mapsNoConverterDefined(keyType, valueType, problems);
}
DataTable header = dataTable.rows(0, 1);
List<Map<K, V>> result = new ArrayList<>();
List<K> keys = unpack((List<List<K>>) keyConverter.transform(header.cells()));
DataTable rows = dataTable.rows(1);
if (rows.isEmpty()) {
return emptyList();
}
List<List<V>> transform = (List<List<V>>) valueConverter.transform(rows.cells());
for (List<V> values : transform) {
result.add(createMap(keyType, keys, valueType, values));
}
return unmodifiableList(result);
}
|
@Test
void to_maps_cant_convert_table_with_duplicate_keys() {
DataTable table = parse("",
"| 1 | 1 | 1 |",
"| 4 | 5 | 6 |",
"| 7 | 8 | 9 |");
CucumberDataTableException exception = assertThrows(
CucumberDataTableException.class,
() -> converter.toMaps(table, Integer.class, Integer.class));
assertThat(exception.getMessage(), is(format("" +
"Can't convert DataTable to Map<%s, %s>.\n" +
"Encountered duplicate key 1 with values 4 and 5",
typeName(Integer.class), typeName(Integer.class))));
}
|
@Override
public TypeDefinition build(
ProcessingEnvironment processingEnv, DeclaredType type, Map<String, TypeDefinition> typeCache) {
TypeDefinition td = new TypeDefinition(type.toString());
return td;
}
|
@Test
void testBuild() {
buildAndAssertTypeDefinition(processingEnv, vField, builder);
buildAndAssertTypeDefinition(processingEnv, zField, builder);
buildAndAssertTypeDefinition(processingEnv, cField, builder);
buildAndAssertTypeDefinition(processingEnv, sField, builder);
buildAndAssertTypeDefinition(processingEnv, iField, builder);
buildAndAssertTypeDefinition(processingEnv, lField, builder);
buildAndAssertTypeDefinition(processingEnv, fField, builder);
buildAndAssertTypeDefinition(processingEnv, dField, builder);
buildAndAssertTypeDefinition(processingEnv, strField, builder);
buildAndAssertTypeDefinition(processingEnv, bdField, builder);
buildAndAssertTypeDefinition(processingEnv, biField, builder);
buildAndAssertTypeDefinition(processingEnv, dtField, builder);
}
|
public static boolean isPubKeyCompressed(byte[] encoded) {
if (encoded.length == 33 && (encoded[0] == 0x02 || encoded[0] == 0x03))
return true;
else if (encoded.length == 65 && encoded[0] == 0x04)
return false;
else
throw new IllegalArgumentException(ByteUtils.formatHex(encoded));
}
|
@Test(expected = IllegalArgumentException.class)
public void isPubKeyCompressed_illegalSign() {
ECKey.isPubKeyCompressed(ByteUtils.parseHex("0438746c59d46d5408bf8b1d0af5740fe1a6e1703fcb56b2953f0b965c740d256f"));
}
|
public UniVocityFixedDataFormat setFieldLengths(int[] fieldLengths) {
this.fieldLengths = fieldLengths;
return this;
}
|
@Test
public void shouldConfigureLineSeparator() {
UniVocityFixedDataFormat dataFormat = new UniVocityFixedDataFormat()
.setFieldLengths(new int[] { 1, 2, 3 })
.setLineSeparator("ls");
assertEquals("ls", dataFormat.getLineSeparator());
assertEquals("ls", dataFormat.createAndConfigureWriterSettings().getFormat().getLineSeparatorString());
assertEquals("ls", dataFormat.createAndConfigureParserSettings().getFormat().getLineSeparatorString());
}
|
@Override
@SuppressFBWarnings(value = "EI_EXPOSE_REP")
public KsqlConfig getKsqlConfig() {
return ksqlConfig;
}
|
@Test
public void shouldIgnoreRecordsWithUnparseableKey() {
// Given:
addPollResult(
"badkey".getBytes(StandardCharsets.UTF_8),
"whocares".getBytes(StandardCharsets.UTF_8));
addPollResult(KafkaConfigStore.CONFIG_MSG_KEY, serializer.serialize("", savedProperties));
expectRead(consumerBefore);
// When:
getKsqlConfig();
// Then:
verifyDrainLog(consumerBefore, 2);
inOrder.verifyNoMoreInteractions();
}
|
public PathSpecSet copyAndRemovePrefix(PathSpec prefix)
{
if (isAllInclusive() || isEmpty())
{
// allInclusive or empty projections stay the same
return this;
}
// if we contain the exact prefix or any sub prefix, it should be an all inclusive set
PathSpec partialPrefix = prefix;
do
{
if (getPathSpecs().contains(partialPrefix))
{
return allInclusive();
}
partialPrefix = partialPrefix.getParent();
} while (!partialPrefix.isEmptyPath());
List<String> prefixPathComponents = prefix.getPathComponents();
int prefixPathLength = prefixPathComponents.size();
return PathSpecSet.of(
getPathSpecs().stream()
.filter(pathSpec -> {
List<String> pathComponents = pathSpec.getPathComponents();
return pathComponents.size() > prefixPathLength && prefixPathComponents.equals(pathComponents.subList(0, prefixPathLength));
})
.map(pathSpec -> new PathSpec(pathSpec.getPathComponents().subList(prefixPathLength, pathSpec.getPathComponents().size()).toArray(new String[0])))
.collect(Collectors.toSet()));
}
|
@Test(dataProvider = "copyAndRemovePrefixProvider")
public void testCopyAndRemovePrefix(PathSpecSet input, PathSpec prefix, PathSpecSet expected) {
Assert.assertEquals(input.copyAndRemovePrefix(prefix), expected);
}
|
@Operation(summary = "batchMoveByCodes", description = "MOVE_PROCESS_DEFINITION_NOTES")
@Parameters({
@Parameter(name = "codes", description = "PROCESS_DEFINITION_CODES", required = true, schema = @Schema(implementation = String.class, example = "3,4")),
@Parameter(name = "targetProjectCode", description = "TARGET_PROJECT_CODE", required = true, schema = @Schema(implementation = long.class, example = "123"))
})
@PostMapping(value = "/batch-move")
@ResponseStatus(HttpStatus.OK)
@ApiException(BATCH_MOVE_PROCESS_DEFINITION_ERROR)
public Result moveProcessDefinition(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@Parameter(name = "projectCode", description = "PROJECT_CODE", required = true) @PathVariable long projectCode,
@RequestParam(value = "codes", required = true) String codes,
@RequestParam(value = "targetProjectCode", required = true) long targetProjectCode) {
return returnDataList(
processDefinitionService.batchMoveProcessDefinition(loginUser, projectCode, codes, targetProjectCode));
}
|
@Test
public void testBatchMoveProcessDefinition() {
long projectCode = 1L;
long targetProjectCode = 2L;
String id = "1";
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.SUCCESS);
Mockito.when(processDefinitionService.batchMoveProcessDefinition(user, projectCode, id, targetProjectCode))
.thenReturn(result);
Result response = processDefinitionController.moveProcessDefinition(user, projectCode, id, targetProjectCode);
Assertions.assertTrue(response != null && response.isSuccess());
}
|
public void logFrameIn(
final DirectBuffer buffer, final int offset, final int frameLength, final InetSocketAddress dstAddress)
{
final int length = frameLength + socketAddressLength(dstAddress);
final int captureLength = captureLength(length);
final int encodedLength = encodedLength(captureLength);
final ManyToOneRingBuffer ringBuffer = this.ringBuffer;
final int index = ringBuffer.tryClaim(toEventCodeId(FRAME_IN), encodedLength);
if (index > 0)
{
try
{
encode((UnsafeBuffer)ringBuffer.buffer(), index, captureLength, length, buffer, offset, dstAddress);
}
finally
{
ringBuffer.commit(index);
}
}
}
|
@Test
void logFrameIn()
{
final int recordOffset = align(100, ALIGNMENT);
logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, recordOffset);
final int length = 10_000;
final int captureLength = MAX_CAPTURE_LENGTH;
final int srcOffset = 4;
buffer.setMemory(srcOffset, MAX_CAPTURE_LENGTH, (byte)3);
final int encodedSocketLength = 12;
logger.logFrameIn(buffer, srcOffset, length, new InetSocketAddress("localhost", 5555));
verifyLogHeader(logBuffer, recordOffset, toEventCodeId(FRAME_IN), captureLength, length + encodedSocketLength);
assertEquals(5555, logBuffer.getInt(encodedMsgOffset(recordOffset + LOG_HEADER_LENGTH), LITTLE_ENDIAN));
assertEquals(srcOffset,
logBuffer.getInt(encodedMsgOffset(recordOffset + LOG_HEADER_LENGTH + SIZE_OF_INT), LITTLE_ENDIAN));
for (int i = 0; i < captureLength - encodedSocketLength; i++)
{
assertEquals(3,
logBuffer.getByte(encodedMsgOffset(recordOffset + LOG_HEADER_LENGTH + encodedSocketLength + i)));
}
}
|
public static SlotManagerConfiguration fromConfiguration(
Configuration configuration, WorkerResourceSpec defaultWorkerResourceSpec)
throws ConfigurationException {
final Time rpcTimeout =
Time.fromDuration(configuration.get(RpcOptions.ASK_TIMEOUT_DURATION));
final Time taskManagerTimeout =
Time.fromDuration(configuration.get(ResourceManagerOptions.TASK_MANAGER_TIMEOUT));
final Duration requirementCheckDelay =
configuration.get(ResourceManagerOptions.REQUIREMENTS_CHECK_DELAY);
final Duration declareNeededResourceDelay =
configuration.get(ResourceManagerOptions.DECLARE_NEEDED_RESOURCE_DELAY);
boolean waitResultConsumedBeforeRelease =
configuration.get(ResourceManagerOptions.TASK_MANAGER_RELEASE_WHEN_RESULT_CONSUMED);
TaskManagerLoadBalanceMode taskManagerLoadBalanceMode =
TaskManagerLoadBalanceMode.loadFromConfiguration(configuration);
int numSlotsPerWorker = configuration.get(TaskManagerOptions.NUM_TASK_SLOTS);
int minSlotNum = configuration.get(ResourceManagerOptions.MIN_SLOT_NUM);
int maxSlotNum = configuration.get(ResourceManagerOptions.MAX_SLOT_NUM);
int redundantTaskManagerNum =
configuration.get(ResourceManagerOptions.REDUNDANT_TASK_MANAGER_NUM);
return new SlotManagerConfiguration(
rpcTimeout,
taskManagerTimeout,
requirementCheckDelay,
declareNeededResourceDelay,
waitResultConsumedBeforeRelease,
taskManagerLoadBalanceMode,
defaultWorkerResourceSpec,
numSlotsPerWorker,
minSlotNum,
maxSlotNum,
getMinTotalCpu(configuration, defaultWorkerResourceSpec, minSlotNum),
getMaxTotalCpu(configuration, defaultWorkerResourceSpec, maxSlotNum),
getMinTotalMem(configuration, defaultWorkerResourceSpec, minSlotNum),
getMaxTotalMem(configuration, defaultWorkerResourceSpec, maxSlotNum),
redundantTaskManagerNum);
}
|
@Test
void testComputeMinMaxSlotNumIsInvalid() {
final Configuration configuration = new Configuration();
final int minSlotNum = 10;
final int maxSlotNum = 11;
final int numSlots = 3;
configuration.set(ResourceManagerOptions.MIN_SLOT_NUM, minSlotNum);
configuration.set(ResourceManagerOptions.MAX_SLOT_NUM, maxSlotNum);
assertThatIllegalStateException()
.isThrownBy(
() ->
SlotManagerConfiguration.fromConfiguration(
configuration,
new WorkerResourceSpec.Builder()
.setNumSlots(numSlots)
.build()));
}
|
@Override
protected String getFolderSuffix() {
return FOLDER_SUFFIX;
}
|
@Test
public void testGetFolderSuffix() {
Assert.assertEquals("/", mCOSUnderFileSystem.getFolderSuffix());
}
|
public static RunRequest createInternalWorkflowRunRequest(
WorkflowSummary workflowSummary,
StepRuntimeSummary runtimeSummary,
List<Tag> tags,
Map<String, ParamDefinition> runParams,
String dedupKey) {
UpstreamInitiator initiator =
UpstreamInitiator.withType(Initiator.Type.valueOf(runtimeSummary.getType().name()));
UpstreamInitiator.Info parent = new UpstreamInitiator.Info();
parent.setWorkflowId(workflowSummary.getWorkflowId());
parent.setInstanceId(workflowSummary.getWorkflowInstanceId());
parent.setRunId(workflowSummary.getWorkflowRunId());
parent.setStepId(runtimeSummary.getStepId());
parent.setStepAttemptId(runtimeSummary.getStepAttemptId());
List<UpstreamInitiator.Info> ancestors = new ArrayList<>();
if (workflowSummary.getInitiator() instanceof UpstreamInitiator) {
ancestors.addAll(((UpstreamInitiator) workflowSummary.getInitiator()).getAncestors());
}
ancestors.add(parent);
initiator.setAncestors(ancestors);
// enforce depth limit, e.g. 10, it can also prevent workflow-subworkflow cycle
Checks.checkTrue(
initiator.getDepth() < Constants.WORKFLOW_DEPTH_LIMIT,
"Workflow initiator [%s] is not less than the depth limit: %s",
initiator,
Constants.WORKFLOW_DEPTH_LIMIT);
return RunRequest.builder()
.initiator(initiator)
.requestId(IdHelper.createUuid(dedupKey))
.requestTime(System.currentTimeMillis())
.currentPolicy(workflowSummary.getRunPolicy()) // default and might be updated
.runtimeTags(tags)
.correlationId(workflowSummary.getCorrelationId())
.instanceStepConcurrency(workflowSummary.getInstanceStepConcurrency()) // pass it down
.runParams(runParams)
.restartConfig(
copyRestartConfigWithClonedPath(
ObjectHelper.valueOrDefault(
runtimeSummary.getRestartConfig(),
workflowSummary.getRestartConfig()))) // to be updated
.build();
}
|
@Test
public void testErrorForTooDeepWorkflow() {
WorkflowSummary summary = new WorkflowSummary();
summary.setWorkflowId("test-workflow");
summary.setWorkflowInstanceId(123);
summary.setWorkflowRunId(2);
SubworkflowInitiator initiator = new SubworkflowInitiator();
UpstreamInitiator.Info info = new UpstreamInitiator.Info();
info.setWorkflowId("foo");
initiator.setAncestors(Arrays.asList(info, info, info, info, info, info, info, info, info));
summary.setInitiator(initiator);
StepRuntimeSummary runtimeSummary =
StepRuntimeSummary.builder()
.type(StepType.SUBWORKFLOW)
.stepId("bar")
.stepAttemptId(3)
.build();
AssertHelper.assertThrows(
"Workflow depth is not less than the depth limit",
IllegalArgumentException.class,
"is not less than the depth limit: 10",
() ->
StepHelper.createInternalWorkflowRunRequest(summary, runtimeSummary, null, null, null));
}
|
public ProviderBuilder ioThreads(Integer ioThreads) {
this.iothreads = ioThreads;
return getThis();
}
|
@Test
void ioThreads() {
ProviderBuilder builder = ProviderBuilder.newBuilder();
builder.ioThreads(25);
Assertions.assertEquals(25, builder.build().getIothreads());
}
|
public int replicaId() {
return data.replicaId();
}
|
@Test
public void testForConsumerRequiresVersion3() {
OffsetsForLeaderEpochRequest.Builder builder = OffsetsForLeaderEpochRequest.Builder.forConsumer(new OffsetForLeaderTopicCollection());
for (short version = 0; version < 3; version++) {
final short v = version;
assertThrows(UnsupportedVersionException.class, () -> builder.build(v));
}
for (short version = 3; version <= ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion(); version++) {
OffsetsForLeaderEpochRequest request = builder.build(version);
assertEquals(OffsetsForLeaderEpochRequest.CONSUMER_REPLICA_ID, request.replicaId());
}
}
|
@Override
@SuppressWarnings("nullness")
public List<Map<String, Object>> readTable(String tableName) {
LOG.info("Reading all rows from {}.{}", databaseName, tableName);
List<Map<String, Object>> result = runSQLQuery(String.format("SELECT * FROM %s", tableName));
LOG.info("Successfully loaded rows from {}.{}", databaseName, tableName);
return result;
}
|
@Test
public void testReadTableShouldThrowErrorWhenDriverFailsToEstablishConnection()
throws SQLException {
when(container.getHost()).thenReturn(HOST);
when(container.getMappedPort(JDBC_PORT)).thenReturn(MAPPED_PORT);
doThrow(SQLException.class).when(driver).getConnection(any(), any(), any());
assertThrows(JDBCResourceManagerException.class, () -> testManager.readTable(TABLE_NAME));
}
|
public static SegmentAssignmentStrategy getSegmentAssignmentStrategy(HelixManager helixManager,
TableConfig tableConfig, String assignmentType, InstancePartitions instancePartitions) {
String assignmentStrategy = null;
TableType currentTableType = tableConfig.getTableType();
// TODO: Handle segment assignment strategy in future for CONSUMING segments in follow up PR
// See https://github.com/apache/pinot/issues/9047
// Accommodate new changes for assignment strategy
Map<String, SegmentAssignmentConfig> segmentAssignmentConfigMap = tableConfig.getSegmentAssignmentConfigMap();
if (tableConfig.isDimTable()) {
// Segment Assignment Strategy for DIM tables
Preconditions.checkState(currentTableType == TableType.OFFLINE,
"All Servers Segment assignment Strategy is only applicable to Dim OfflineTables");
SegmentAssignmentStrategy segmentAssignmentStrategy = new AllServersSegmentAssignmentStrategy();
segmentAssignmentStrategy.init(helixManager, tableConfig);
return segmentAssignmentStrategy;
} else {
// Try to determine segment assignment strategy from table config
if (segmentAssignmentConfigMap != null) {
SegmentAssignmentConfig segmentAssignmentConfig;
// Use the pre defined segment assignment strategy
segmentAssignmentConfig = segmentAssignmentConfigMap.get(assignmentType.toUpperCase());
// Segment assignment config is only applicable to offline tables and completed segments of real time tables
if (segmentAssignmentConfig != null) {
assignmentStrategy = segmentAssignmentConfig.getAssignmentStrategy().toLowerCase();
}
}
}
// Use the existing information to determine segment assignment strategy
SegmentAssignmentStrategy segmentAssignmentStrategy;
if (assignmentStrategy == null) {
// Calculate numReplicaGroups and numPartitions to determine segment assignment strategy
Preconditions
.checkState(instancePartitions != null, "Failed to find instance partitions for segment assignment strategy");
int numReplicaGroups = instancePartitions.getNumReplicaGroups();
int numPartitions = instancePartitions.getNumPartitions();
if (numReplicaGroups == 1 && numPartitions == 1) {
segmentAssignmentStrategy = new BalancedNumSegmentAssignmentStrategy();
} else {
segmentAssignmentStrategy = new ReplicaGroupSegmentAssignmentStrategy();
}
} else {
// Set segment assignment strategy depending on strategy set in table config
switch (assignmentStrategy) {
case AssignmentStrategy.REPLICA_GROUP_SEGMENT_ASSIGNMENT_STRATEGY:
segmentAssignmentStrategy = new ReplicaGroupSegmentAssignmentStrategy();
break;
case AssignmentStrategy.BALANCE_NUM_SEGMENT_ASSIGNMENT_STRATEGY:
default:
segmentAssignmentStrategy = new BalancedNumSegmentAssignmentStrategy();
break;
}
}
segmentAssignmentStrategy.init(helixManager, tableConfig);
return segmentAssignmentStrategy;
}
|
@Test
public void testBalancedNumSegmentAssignmentStrategyforOfflineTables() {
TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(RAW_TABLE_NAME).build();
InstancePartitions instancePartitions = new InstancePartitions(INSTANCE_PARTITIONS_NAME);
instancePartitions.setInstances(0, 0, INSTANCES);
SegmentAssignmentStrategy segmentAssignmentStrategy = SegmentAssignmentStrategyFactory
.getSegmentAssignmentStrategy(null, tableConfig, InstancePartitionsType.OFFLINE.toString(), instancePartitions);
Assert.assertNotNull(segmentAssignmentStrategy);
Assert.assertTrue(segmentAssignmentStrategy instanceof BalancedNumSegmentAssignmentStrategy);
}
|
public static <T> Bounded<T> from(BoundedSource<T> source) {
return new Bounded<>(null, source);
}
|
@Test
public void succeedsWhenCustomBoundedSourceIsSerializable() {
Read.from(new SerializableBoundedSource());
}
|
@VisibleForTesting
HiveClientPool clientPool() {
return clientPoolCache.get(key, k -> new HiveClientPool(clientPoolSize, conf));
}
|
@Test
public void testHmsCatalog() {
Map<String, String> properties =
ImmutableMap.of(
String.valueOf(EVICTION_INTERVAL),
String.valueOf(Integer.MAX_VALUE),
ICEBERG_CATALOG_TYPE,
ICEBERG_CATALOG_TYPE_HIVE);
Configuration conf1 = new Configuration();
conf1.set(HiveCatalog.HIVE_CONF_CATALOG, "foo");
Configuration conf2 = new Configuration();
conf2.set(HiveCatalog.HIVE_CONF_CATALOG, "foo");
Configuration conf3 = new Configuration();
conf3.set(HiveCatalog.HIVE_CONF_CATALOG, "bar");
HiveCatalog catalog1 = (HiveCatalog) CatalogUtil.buildIcebergCatalog("1", properties, conf1);
HiveCatalog catalog2 = (HiveCatalog) CatalogUtil.buildIcebergCatalog("2", properties, conf2);
HiveCatalog catalog3 = (HiveCatalog) CatalogUtil.buildIcebergCatalog("3", properties, conf3);
HiveCatalog catalog4 =
(HiveCatalog) CatalogUtil.buildIcebergCatalog("4", properties, new Configuration());
HiveClientPool pool1 = ((CachedClientPool) catalog1.clientPool()).clientPool();
HiveClientPool pool2 = ((CachedClientPool) catalog2.clientPool()).clientPool();
HiveClientPool pool3 = ((CachedClientPool) catalog3.clientPool()).clientPool();
HiveClientPool pool4 = ((CachedClientPool) catalog4.clientPool()).clientPool();
assertThat(pool2).isSameAs(pool1);
assertThat(pool1).isNotSameAs(pool3);
assertThat(pool2).isNotSameAs(pool3);
assertThat(pool4).isNotSameAs(pool3);
assertThat(pool1).isNotSameAs(pool4);
assertThat(pool2).isNotSameAs(pool4);
assertThat(pool1.hiveConf().get(HiveCatalog.HIVE_CONF_CATALOG)).isEqualTo("foo");
assertThat(pool3.hiveConf().get(HiveCatalog.HIVE_CONF_CATALOG)).isEqualTo("bar");
assertThat(pool4.hiveConf().get(HiveCatalog.HIVE_CONF_CATALOG)).isNull();
pool1.close();
pool3.close();
pool4.close();
}
|
HashPMap<K, V> underlying() {
return underlying;
}
|
@Test
public void testUnderlying() {
assertSame(SINGLETON_MAP, new PCollectionsImmutableMap<>(SINGLETON_MAP).underlying());
}
|
@Override
public void setApplicationState(OrchestratorContext context, ApplicationInstanceId applicationId,
ClusterControllerNodeState wantedState) throws ApplicationStateChangeDeniedException {
try {
ClusterControllerClientTimeouts timeouts = context.getClusterControllerTimeouts();
Inspector response = client.send(strategy(hosts), Method.POST)
.at("cluster", "v2", clusterName)
.deadline(timeouts.readBudget())
.parameters(() -> deadline(timeouts))
.body(stateChangeRequestBytes(wantedState, Condition.FORCE, false))
.throwing(retryOnRedirect)
.read(SlimeUtils::jsonToSlime).get();
if ( ! response.field("wasModified").asBool()) {
throw new ApplicationStateChangeDeniedException("Failed to set application " + applicationId + ", cluster name " +
clusterName + " to cluster state " + wantedState + " due to: " +
response.field("reason").asString());
}
}
catch (ResponseException e) {
throw new ApplicationStateChangeDeniedException("Failed to set application " + applicationId + " cluster name " +
clusterName + " to cluster state " + wantedState + " due to: " + e.getMessage());
}
catch (UncheckedIOException e) {
throw new ApplicationStateChangeDeniedException("Failed communicating with cluster controllers " + hosts +
" with cluster ID " + clusterName + ": " + e.getCause().getMessage());
}
catch (UncheckedTimeoutException e) {
throw new ApplicationStateChangeDeniedException("Timed out while waiting for cluster controllers " + hosts +
" with cluster ID " + clusterName + ": " + e.getMessage());
}
}
|
@Test
public void verifySetApplicationState() {
wire.expect((url, body) -> {
assertEquals("http://host1:19050/cluster/v2/cc?timeout=299.6",
url.asURI().toString());
assertEquals("{\"state\":{\"user\":{\"reason\":\"Orchestrator\",\"state\":\"up\"}},\"condition\":\"FORCE\"}",
body);
return "{ \"message\": \":<\" }";
},
500);
assertEquals("Failed to set application app cluster name cc to cluster state UP due to: " +
"got status code 500 for POST http://host1:19050/cluster/v2/cc?timeout=299.6: :<",
assertThrows(ApplicationStateChangeDeniedException.class,
() -> client.setApplicationState(OrchestratorContext.createContextForAdminOp(clock),
new ApplicationInstanceId("app"),
UP))
.getMessage());
}
|
public void clearStatus(String service) {
healthService.clearStatus(service);
}
|
@Test
void clearStatus() {
String service = "serv1";
manager.setStatus(service, ServingStatus.SERVING);
ServingStatus stored = manager.getHealthService()
.check(HealthCheckRequest.newBuilder().setService(service).build())
.getStatus();
Assertions.assertEquals(ServingStatus.SERVING, stored);
manager.clearStatus(service);
try {
manager.getHealthService()
.check(HealthCheckRequest.newBuilder().setService(service).build());
fail();
} catch (StatusRpcException e) {
Assertions.assertEquals(Code.NOT_FOUND, e.getStatus().code);
}
}
|
public int getUnknown_002c() {
return unknown_002c;
}
|
@Test
public void testGetUnknown_002() {
assertEquals(TestParameters.VP_ITSP_UNKNOWN_002C, chmItspHeader.getUnknown_002c());
}
|
public void setMenuItemEnabledState( List<UIRepositoryObject> selectedRepoObjects ) {
try {
boolean result = false;
if ( selectedRepoObjects.size() == 1 && selectedRepoObjects.get( 0 ) instanceof UIRepositoryDirectory ) {
lockFileMenuItem.setDisabled( true );
deleteFileMenuItem.setDisabled( false );
renameFileMenuItem.setDisabled( false );
} else if ( selectedRepoObjects.size() == 1 && selectedRepoObjects.get( 0 ) instanceof ILockObject ) {
final UIRepositoryContent contentToLock = (UIRepositoryContent) selectedRepoObjects.get( 0 );
if ( ( (ILockObject) contentToLock ).isLocked() ) {
if ( repository instanceof PurRepository
//repository can be Proxy of repository and first part condition will fail
|| repository.getRepositoryMeta() instanceof PurRepositoryMeta ) {
result = service.canUnlockFileById( contentToLock.getObjectId() );
} else {
result =
( (ILockObject) contentToLock ).getRepositoryLock().getLogin().equalsIgnoreCase(
repository.getUserInfo().getLogin() );
}
lockFileMenuItem.setDisabled( !result );
deleteFileMenuItem.setDisabled( !result );
renameFileMenuItem.setDisabled( !result );
} else {
lockFileMenuItem.setDisabled( false );
deleteFileMenuItem.setDisabled( false );
renameFileMenuItem.setDisabled( false );
}
} else {
lockFileMenuItem.setDisabled( true );
deleteFileMenuItem.setDisabled( true );
renameFileMenuItem.setDisabled( true );
}
} catch ( Exception e ) {
throw new RuntimeException( e );
}
}
|
@Test
public void testBlockLock() throws Exception {
RepositoryLockController repositoryLockController = new RepositoryLockController();
List<UIRepositoryObject> selectedRepoObjects = new ArrayList<>();
UIEETransformation lockObject = Mockito.mock( UIEETransformation.class );
selectedRepoObjects.add( lockObject );
Mockito.when( lockObject.isLocked() ).thenReturn( true );
ObjectId objectId = Mockito.mock( ObjectId.class );
Mockito.when( lockObject.getObjectId() ).thenReturn( objectId );
XulMenuitem lockFileMenuItem = Mockito.mock( XulMenuitem.class );
Field lockFileMenuItemField = repositoryLockController.getClass().getDeclaredField( "lockFileMenuItem" );
lockFileMenuItemField.setAccessible( true );
lockFileMenuItemField.set( repositoryLockController, lockFileMenuItem );
XulMenuitem deleteFileMenuItem = Mockito.mock( XulMenuitem.class );
Field deleteFileMenuItemField = repositoryLockController.getClass().getDeclaredField( "deleteFileMenuItem" );
deleteFileMenuItemField.setAccessible( true );
deleteFileMenuItemField.set( repositoryLockController, deleteFileMenuItem );
XulMenuitem renameFileMenuItem = Mockito.mock( XulMenuitem.class );
Field renameFileMenuItemField = repositoryLockController.getClass().getDeclaredField( "renameFileMenuItem" );
renameFileMenuItemField.setAccessible( true );
renameFileMenuItemField.set( repositoryLockController, renameFileMenuItem );
Repository repository = Mockito.mock( Repository.class );
PurRepositoryMeta repositoryMeta = Mockito.mock( PurRepositoryMeta.class );
Mockito.when( repository.getRepositoryMeta() ).thenReturn( repositoryMeta );
Field repositoryField = repositoryLockController.getClass().getDeclaredField( "repository" );
repositoryField.setAccessible( true );
repositoryField.set( repositoryLockController, repository );
ILockService service = Mockito.mock( ILockService.class );
Mockito.when( service.canUnlockFileById( objectId ) ).thenReturn( true );
Field serviceField = repositoryLockController.getClass().getDeclaredField( "service" );
serviceField.setAccessible( true );
serviceField.set( repositoryLockController, service );
repositoryLockController.setMenuItemEnabledState( selectedRepoObjects );
Assert.assertFalse( lockFileMenuItem.isDisabled() );
Mockito.verify( lockFileMenuItem ).setDisabled( false );
}
|
@VisibleForTesting
List<String> getHighlights()
{
final String configNpcs = config.getNpcToHighlight();
if (configNpcs.isEmpty())
{
return Collections.emptyList();
}
return Text.fromCSV(configNpcs);
}
|
@Test
public void getHighlights()
{
when(npcIndicatorsConfig.getNpcToHighlight()).thenReturn("goblin, , zulrah , *wyvern, ,");
final List<String> highlightedNpcs = npcIndicatorsPlugin.getHighlights();
assertEquals("Length of parsed NPCs is incorrect", 3, highlightedNpcs.size());
final Iterator<String> iterator = highlightedNpcs.iterator();
assertEquals("goblin", iterator.next());
assertEquals("zulrah", iterator.next());
assertEquals("*wyvern", iterator.next());
}
|
@SuppressWarnings("unchecked")
@Override
public void configure(final Map<String, ?> configs, final boolean isKey) {
//check to see if the window size config is set and the window size is already set from the constructor
final Long configWindowSize;
if (configs.get(StreamsConfig.WINDOW_SIZE_MS_CONFIG) instanceof String) {
configWindowSize = Long.parseLong((String) configs.get(StreamsConfig.WINDOW_SIZE_MS_CONFIG));
} else {
configWindowSize = (Long) configs.get(StreamsConfig.WINDOW_SIZE_MS_CONFIG);
}
if (windowSize != null && configWindowSize != null) {
throw new IllegalArgumentException("Window size should not be set in both the time windowed deserializer constructor and the window.size.ms config");
} else if (windowSize == null && configWindowSize == null) {
throw new IllegalArgumentException("Window size needs to be set either through the time windowed deserializer " +
"constructor or the window.size.ms config but not both");
} else {
windowSize = windowSize == null ? configWindowSize : windowSize;
}
final String windowedInnerClassSerdeConfig = (String) configs.get(StreamsConfig.WINDOWED_INNER_CLASS_SERDE);
Serde<T> windowInnerClassSerde = null;
if (windowedInnerClassSerdeConfig != null) {
try {
windowInnerClassSerde = Utils.newInstance(windowedInnerClassSerdeConfig, Serde.class);
} catch (final ClassNotFoundException e) {
throw new ConfigException(StreamsConfig.WINDOWED_INNER_CLASS_SERDE, windowedInnerClassSerdeConfig,
"Serde class " + windowedInnerClassSerdeConfig + " could not be found.");
}
}
if (inner != null && windowedInnerClassSerdeConfig != null) {
if (!inner.getClass().getName().equals(windowInnerClassSerde.deserializer().getClass().getName())) {
throw new IllegalArgumentException("Inner class deserializer set using constructor "
+ "(" + inner.getClass().getName() + ")" +
" is different from the one set in windowed.inner.class.serde config " +
"(" + windowInnerClassSerde.deserializer().getClass().getName() + ").");
}
} else if (inner == null && windowedInnerClassSerdeConfig == null) {
throw new IllegalArgumentException("Inner class deserializer should be set either via constructor " +
"or via the windowed.inner.class.serde config");
} else if (inner == null)
inner = windowInnerClassSerde.deserializer();
}
|
@Test
public void shouldThrowErrorIfWindowedInnerClassDeserialiserIsNotSet() {
props.put(StreamsConfig.WINDOW_SIZE_MS_CONFIG, "500");
final TimeWindowedDeserializer<?> deserializer = new TimeWindowedDeserializer<>();
assertThrows(IllegalArgumentException.class, () -> deserializer.configure(props, false));
}
|
@Override
public void doLimitForModifyRequest(ModifyRequest modifyRequest) throws SQLException {
if (null == modifyRequest || !enabledLimit) {
return;
}
doLimit(modifyRequest.getSql());
}
|
@Test
void testDoLimitForModifyRequestForDml() throws SQLException {
ModifyRequest insert = new ModifyRequest("insert into test(id,name) values(1,'test')");
ModifyRequest update = new ModifyRequest("update test set name='test' where id=1");
ModifyRequest delete = new ModifyRequest("delete from test where id=1");
List<ModifyRequest> modifyRequests = new LinkedList<>();
modifyRequests.add(insert);
modifyRequests.add(update);
modifyRequests.add(delete);
sqlLimiter.doLimitForModifyRequest(modifyRequests);
}
|
public static void main(String[] args) {
// get service
var userService = new UserService();
// use create service to add users
for (var user : generateSampleUsers()) {
var id = userService.createUser(user);
LOGGER.info("Add user" + user + "at" + id + ".");
}
// use list service to get users
var users = userService.listUser();
LOGGER.info(String.valueOf(users));
// use get service to get a user
var user = userService.getUser(1);
LOGGER.info(String.valueOf(user));
// change password of user 1
user.setPassword("new123");
// use update service to update user 1
userService.updateUser(1, user);
// use delete service to delete user 2
userService.deleteUser(2);
// close service
userService.close();
}
|
@Test
void shouldExecuteMetaMappingWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
}
|
private static void shutdown() {
if (!ALREADY_SHUTDOWN.compareAndSet(false, true)) {
return;
}
LOGGER.warn("[HttpClientBeanHolder] Start destroying common HttpClient");
try {
shutdown(DefaultHttpClientFactory.class.getName());
} catch (Exception ex) {
LOGGER.error("An exception occurred when the common HTTP client was closed : {}",
ExceptionUtil.getStackTrace(ex));
}
LOGGER.warn("[HttpClientBeanHolder] Destruction of the end");
}
|
@Test
void shutdown() throws Exception {
HttpClientBeanHolder.getNacosRestTemplate((Logger) null);
HttpClientBeanHolder.getNacosAsyncRestTemplate((Logger) null);
assertEquals(1, restMap.size());
assertEquals(1, restAsyncMap.size());
HttpClientBeanHolder.shutdown(DefaultHttpClientFactory.class.getName());
assertEquals(0, restMap.size());
assertEquals(0, restAsyncMap.size());
}
|
@Operation(summary = "Request a new mijn digid session based on an app session")
@PostMapping(value = "/request_session", consumes = "application/json")
public ResponseEntity<?> requestSession(@RequestBody @Valid MijnDigidSessionRequest request){
if(request == null || request.getAppSessionId() == null) {
return ResponseEntity.badRequest().build();
}
String mijnDigiDSessionId = mijnDigiDSessionService.createSession(request.getAppSessionId()).getId();
return ResponseEntity
.ok()
.header(MijnDigidSession.MIJN_DIGID_SESSION_HEADER, mijnDigiDSessionId)
.build();
}
|
@Test
void validateValidRequest() {
MijnDigidSessionRequest request = new MijnDigidSessionRequest();
String appSessionId = "id";
request.setAppSessionId(appSessionId);
MijnDigidSession session = new MijnDigidSession(1L);
when(mijnDigiDSessionService.createSession(appSessionId)).thenReturn(session);
ResponseEntity<?> response = mijnDigiDSessionController.requestSession(request);
verify(mijnDigiDSessionService, times(1)).createSession(appSessionId);
assertEquals(HttpStatus.OK, response.getStatusCode());
assertEquals(session.getId(), response.getHeaders().get(MijnDigidSession.MIJN_DIGID_SESSION_HEADER).get(0) );
}
|
public Map<String, String> getSessionVariables() {
return sessionVariables;
}
|
@Test
public void testGetSessionVariables() {
UserProperty userProperty = new UserProperty();
Map<String, String> sessionVariables = userProperty.getSessionVariables();
Assert.assertEquals(0, sessionVariables.size());
}
|
String name(String name) {
return sanitize(name, NAME_RESERVED);
}
|
@Test
public void replacesNonASCIICharacters() throws Exception {
assertThat(sanitize.name("M" + '\u00FC' + "nchen")).isEqualTo("M_nchen");
}
|
public void validatePassword(final String password) {
if (!this.password.equals(password)) {
throw new PasswordNotMatchedException();
}
}
|
@Test
void 패스워드가_다른_경우에_예외를_발생한다() {
// given
Member member = 일반_유저_생성();
String givenPassword = "wrongPassword";
// when & then
assertThatThrownBy(() -> member.validatePassword(givenPassword))
.isInstanceOf(PasswordNotMatchedException.class);
}
|
public IThrowableRenderer<ILoggingEvent> getThrowableRenderer() {
return throwableRenderer;
}
|
@Test
public void testAppendThrowable() throws Exception {
StringBuilder buf = new StringBuilder();
DummyThrowableProxy tp = new DummyThrowableProxy();
tp.setClassName("test1");
tp.setMessage("msg1");
StackTraceElement ste1 = new StackTraceElement("c1", "m1", "f1", 1);
StackTraceElement ste2 = new StackTraceElement("c2", "m2", "f2", 2);
StackTraceElementProxy[] stepArray = { new StackTraceElementProxy(ste1),
new StackTraceElementProxy(ste2) };
tp.setStackTraceElementProxyArray(stepArray);
DefaultThrowableRenderer renderer = (DefaultThrowableRenderer) layout
.getThrowableRenderer();
renderer.render(buf, tp);
System.out.println(buf.toString());
String[] result = buf.toString().split(CoreConstants.LINE_SEPARATOR);
System.out.println(result[0]);
assertEquals("test1: msg1", result[0]);
assertEquals(DefaultThrowableRenderer.TRACE_PREFIX + "at c1.m1(f1:1)", result[1]);
}
|
@VisibleForTesting
static void validateFips(final KsqlConfig config, final KsqlRestConfig restConfig) {
if (config.getBoolean(ConfluentConfigs.ENABLE_FIPS_CONFIG)) {
final FipsValidator fipsValidator = ConfluentConfigs.buildFipsValidator();
// validate cipher suites and TLS version
validateCipherSuites(fipsValidator, restConfig);
// validate broker
validateBroker(fipsValidator, config);
// validate ssl endpoint algorithm
validateSslEndpointAlgo(fipsValidator, restConfig);
// validate schema registry url
validateSrUrl(fipsValidator, restConfig);
// validate all listeners
validateListeners(fipsValidator, restConfig);
log.info("FIPS mode enabled for ksqlDB!");
}
}
|
@Test
public void shouldFailOnInvalidSSLEndpointIdentificationAlgorithm() {
// Given:
final KsqlConfig config = configWith(ImmutableMap.of(
ConfluentConfigs.ENABLE_FIPS_CONFIG, true,
CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_SSL.name
));
final KsqlRestConfig restConfig = new KsqlRestConfig(ImmutableMap.<String, Object>builder()
.put(KsqlRestConfig.SSL_CIPHER_SUITES_CONFIG,
Collections.singletonList("TLS_RSA_WITH_AES_256_CCM"))
.put(KsqlConfig.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "http")
.build()
);
// When:
final Exception e = assertThrows(
SecurityException.class,
() -> KsqlServerMain.validateFips(config, restConfig)
);
// Then:
assertThat(e.getMessage(), containsString(
"FIPS 140-2 Configuration Error, invalid rest protocol: http"
+ "\nInvalid rest protocol for "
+ KsqlConfig.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG));
}
|
public static void delete(final File rootFile) throws IOException {
if (rootFile == null)
return;
Files.walkFileTree(rootFile.toPath(), new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFileFailed(Path path, IOException exc) throws IOException {
if (exc instanceof NoSuchFileException) {
if (path.toFile().equals(rootFile)) {
// If the root path did not exist, ignore the error and terminate;
return FileVisitResult.TERMINATE;
} else {
// Otherwise, just continue walking as the file might already be deleted by other threads.
return FileVisitResult.CONTINUE;
}
}
throw exc;
}
@Override
public FileVisitResult visitFile(Path path, BasicFileAttributes attrs) throws IOException {
Files.deleteIfExists(path);
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path path, IOException exc) throws IOException {
// KAFKA-8999: if there's an exception thrown previously already, we should throw it
if (exc != null) {
throw exc;
}
Files.deleteIfExists(path);
return FileVisitResult.CONTINUE;
}
});
}
|
@SuppressWarnings("unchecked")
@Test
public void testRecursiveDeleteWithDeletedFile() throws IOException {
// Test recursive deletes, where the FileWalk is supplied with a deleted file path.
File rootDir = TestUtils.tempDirectory();
File subDir = TestUtils.tempDirectory(rootDir.toPath(), "a");
DirectoryStream<Path> mockDirectoryStream = (DirectoryStream<Path>) mock(DirectoryStream.class);
FileSystemProvider mockFileSystemProvider = mock(FileSystemProvider.class);
FileSystem mockFileSystem = mock(FileSystem.class);
Path mockRootPath = mock(Path.class);
BasicFileAttributes mockBasicFileAttributes = mock(BasicFileAttributes.class);
Iterator<Path> mockIterator = mock(Iterator.class);
File spyRootFile = spy(rootDir);
when(spyRootFile.toPath()).thenReturn(mockRootPath);
when(mockRootPath.getFileSystem()).thenReturn(mockFileSystem);
when(mockFileSystem.provider()).thenReturn(mockFileSystemProvider);
when(mockFileSystemProvider.readAttributes(any(), (Class<BasicFileAttributes>) any(), any())).thenReturn(mockBasicFileAttributes);
when(mockBasicFileAttributes.isDirectory()).thenReturn(true);
when(mockFileSystemProvider.newDirectoryStream(any(), any())).thenReturn(mockDirectoryStream);
when(mockDirectoryStream.iterator()).thenReturn(mockIterator);
// Here we pass the rootDir to the FileWalk which removes all Files recursively,
// and then we pass the subDir path again which is already deleted by this point.
when(mockIterator.next()).thenReturn(rootDir.toPath()).thenReturn(subDir.toPath());
when(mockIterator.hasNext()).thenReturn(true).thenReturn(true).thenReturn(false);
assertDoesNotThrow(() -> {
Utils.delete(spyRootFile);
});
assertFalse(Files.exists(rootDir.toPath()));
assertFalse(Files.exists(subDir.toPath()));
}
|
Capabilities getCapabilitiesFromResponseBody(String responseBody) {
final CapabilitiesDTO capabilitiesDTO = FORCED_EXPOSE_GSON.fromJson(responseBody, CapabilitiesDTO.class);
return capabilitiesConverterV5.fromDTO(capabilitiesDTO);
}
|
@Test
public void shouldGetCapabilitiesFromResponseBody() {
String responseBody = "{" +
" \"supports_plugin_status_report\":\"true\"," +
" \"supports_cluster_status_report\":\"true\"," +
" \"supports_agent_status_report\":\"true\"" +
"}";
Capabilities capabilities = new ElasticAgentExtensionConverterV5().getCapabilitiesFromResponseBody(responseBody);
assertTrue(capabilities.supportsPluginStatusReport());
assertTrue(capabilities.supportsClusterStatusReport());
assertTrue(capabilities.supportsAgentStatusReport());
}
|
@Override
@Nullable
public IdentifiedDataSerializable create(int typeId) {
if (typeId >= 0 && typeId < len) {
Supplier<IdentifiedDataSerializable> factory = constructors[typeId];
return factory != null ? factory.get() : null;
}
return null;
}
|
@Test
public void testCreateWithoutVersion() {
Supplier<IdentifiedDataSerializable>[] constructorFunctions = new Supplier[1];
Supplier<IdentifiedDataSerializable> function = mock(Supplier.class);
constructorFunctions[0] = function;
ArrayDataSerializableFactory factory = new ArrayDataSerializableFactory(constructorFunctions);
factory.create(0);
verify(function, times(1)).get();
}
|
public static Configuration adjustForLocalExecution(Configuration config) {
UNUSED_CONFIG_OPTIONS.forEach(
option -> warnAndRemoveOptionHasNoEffectIfSet(config, option));
setConfigOptionToPassedMaxIfNotSet(
config, TaskManagerOptions.CPU_CORES, LOCAL_EXECUTION_CPU_CORES);
setConfigOptionToPassedMaxIfNotSet(
config, TaskManagerOptions.TASK_HEAP_MEMORY, LOCAL_EXECUTION_TASK_MEMORY);
setConfigOptionToPassedMaxIfNotSet(
config, TaskManagerOptions.TASK_OFF_HEAP_MEMORY, LOCAL_EXECUTION_TASK_MEMORY);
adjustNetworkMemoryForLocalExecution(config);
setConfigOptionToDefaultIfNotSet(
config, TaskManagerOptions.MANAGED_MEMORY_SIZE, DEFAULT_MANAGED_MEMORY_SIZE);
// Set valid default values for unused config options which should have been removed.
config.set(
TaskManagerOptions.FRAMEWORK_HEAP_MEMORY,
TaskManagerOptions.FRAMEWORK_HEAP_MEMORY.defaultValue());
config.set(
TaskManagerOptions.FRAMEWORK_OFF_HEAP_MEMORY,
TaskManagerOptions.FRAMEWORK_OFF_HEAP_MEMORY.defaultValue());
config.set(
TaskManagerOptions.JVM_METASPACE, TaskManagerOptions.JVM_METASPACE.defaultValue());
config.set(
TaskManagerOptions.JVM_OVERHEAD_MAX,
TaskManagerOptions.JVM_OVERHEAD_MAX.defaultValue());
config.set(
TaskManagerOptions.JVM_OVERHEAD_MIN,
TaskManagerOptions.JVM_OVERHEAD_MAX.defaultValue());
return config;
}
|
@Test
void testUnusedOptionsAreIgnoredForLocalExecution() {
Configuration configuration = new Configuration();
configuration.set(TaskManagerOptions.TOTAL_PROCESS_MEMORY, MemorySize.ofMebiBytes(2024));
configuration.set(TaskManagerOptions.TOTAL_FLINK_MEMORY, MemorySize.ofMebiBytes(2024));
configuration.set(TaskManagerOptions.FRAMEWORK_HEAP_MEMORY, MemorySize.ofMebiBytes(2024));
configuration.set(
TaskManagerOptions.FRAMEWORK_OFF_HEAP_MEMORY, MemorySize.ofMebiBytes(2024));
configuration.set(TaskManagerOptions.JVM_METASPACE, MemorySize.ofMebiBytes(2024));
configuration.set(TaskManagerOptions.JVM_OVERHEAD_MIN, MemorySize.ofMebiBytes(2024));
configuration.set(TaskManagerOptions.JVM_OVERHEAD_MAX, MemorySize.ofMebiBytes(2024));
configuration.set(TaskManagerOptions.JVM_OVERHEAD_FRACTION, 2024.0f);
TaskExecutorResourceUtils.adjustForLocalExecution(configuration);
assertThat(configuration)
.isEqualTo(TaskExecutorResourceUtils.adjustForLocalExecution(new Configuration()));
}
|
@Override
public void loadData(Priority priority, DataCallback<? super T> callback) {
this.callback = callback;
serializer.startRequest(priority, url, this);
}
|
@Test
public void testRequestComplete_with200NotCancelledMatchingLength_callsCallbackWithValidData()
throws Exception {
String data = "data";
ByteBuffer expected = ByteBuffer.wrap(data.getBytes());
ArgumentCaptor<ByteBuffer> captor = ArgumentCaptor.forClass(ByteBuffer.class);
fetcher.loadData(Priority.LOW, callback);
succeed(
getInfo(expected.remaining(), 200),
urlRequestListenerCaptor.getValue(),
expected.duplicate());
verify(callback, timeout(1000)).onDataReady(captor.capture());
ByteBuffer received = captor.getValue();
assertThat(
new String(
received.array(),
received.arrayOffset() + received.position(),
received.remaining()))
.isEqualTo(data);
}
|
@Override
public void notifyTerminated() {
doAction(Executable::notifyTerminated);
}
|
@Test
public void shouldNotifyAllToShutdown() throws Exception {
// When:
multiExecutable.notifyTerminated();
// Then:
// Then:
final InOrder inOrder = Mockito.inOrder(executable1, executable2);
inOrder.verify(executable1).notifyTerminated();
inOrder.verify(executable2).notifyTerminated();
inOrder.verifyNoMoreInteractions();
}
|
@Override
public void askAccessPermissions(@NonNull final DecryptionContext context) {
this.context = context;
if (!DeviceAvailability.isPermissionsGranted(reactContext)) {
final CryptoFailedException failure = new CryptoFailedException(
"Could not start fingerprint Authentication. No permissions granted.");
onDecrypt(null, failure);
} else {
startAuthentication();
}
}
|
@Test(expected= NullPointerException.class)
@Config(sdk = Build.VERSION_CODES.M)
public void testBiometryAuthenticationErrorNoActivity() {
// GIVEN
final KeyguardManager keyguardManager = mock(KeyguardManager.class);
when(keyguardManager.isKeyguardSecure()).thenReturn(true);
final ReactApplicationContext mockContext = mock(ReactApplicationContext.class);
when(mockContext.getSystemService(Context.KEYGUARD_SERVICE)).thenReturn(keyguardManager);
when(mockContext.checkSelfPermission(Manifest.permission.USE_FINGERPRINT)).thenReturn(PERMISSION_GRANTED);
final CipherStorage storage = mock(CipherStorageBase.class);
final BiometricPrompt.PromptInfo promptInfo = mock(BiometricPrompt.PromptInfo.class);
final CipherStorage.DecryptionContext decryptionContext = mock(CipherStorage.DecryptionContext.class);
// WHEN
DecryptionResultHandlerInteractiveBiometric handler = new DecryptionResultHandlerInteractiveBiometric(mockContext, storage, promptInfo);
handler.askAccessPermissions(decryptionContext);
}
|
public static GoodTuring of(int[] r, int[] Nr) {
final double CONFID_FACTOR = 1.96;
if (r.length != Nr.length) {
throw new IllegalArgumentException("The sizes of r and Nr are not same.");
}
int len = r.length;
double[] p = new double[len];
double[] logR = new double[len];
double[] logZ = new double[len];
double[] Z = new double[len];
int N = 0;
for (int j = 0; j < len; ++j) {
N += r[j] * Nr[j];
}
int n1 = (r[0] != 1) ? 0 : Nr[0];
double p0 = n1 / (double) N;
for (int j = 0; j < len; ++j) {
int q = j == 0 ? 0 : r[j - 1];
int t = j == len - 1 ? 2 * r[j] - q : r[j + 1];
Z[j] = 2.0 * Nr[j] / (t - q);
logR[j] = Math.log(r[j]);
logZ[j] = Math.log(Z[j]);
}
// Simple linear regression.
double XYs = 0.0, Xsquares = 0.0, meanX = 0.0, meanY = 0.0;
for (int i = 0; i < len; ++i) {
meanX += logR[i];
meanY += logZ[i];
}
meanX /= len;
meanY /= len;
for (int i = 0; i < len; ++i) {
XYs += (logR[i] - meanX) * (logZ[i] - meanY);
Xsquares += MathEx.pow2(logR[i] - meanX);
}
double slope = XYs / Xsquares;
double intercept = meanY - slope * meanX;
boolean indiffValsSeen = false;
for (int j = 0; j < len; ++j) {
double y = (r[j] + 1) * smoothed(r[j] + 1, slope, intercept) / smoothed(r[j], slope, intercept);
if (row(r, r[j] + 1) < 0) {
indiffValsSeen = true;
}
if (!indiffValsSeen) {
int n = Nr[row(r, r[j] + 1)];
double x = (r[j] + 1) * n / (double) Nr[j];
if (Math.abs(x - y) <= CONFID_FACTOR * Math.sqrt(MathEx.pow2(r[j] + 1.0) * n / MathEx.pow2(Nr[j]) * (1 + n / (double) Nr[j]))) {
indiffValsSeen = true;
} else {
p[j] = x;
}
}
if (indiffValsSeen) {
p[j] = y;
}
}
double Nprime = 0.0;
for (int j = 0; j < len; ++j) {
Nprime += Nr[j] * p[j];
}
for (int j = 0; j < len; ++j) {
p[j] = (1 - p0) * p[j] / Nprime;
}
return new GoodTuring(p, p0);
}
|
@Test
public void test() {
System.out.println("GoodTuring");
int[] r = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12};
int[] Nr = {120, 40, 24, 13, 15, 5, 11, 2, 2, 1, 3};
double p0 = 0.2047782;
double[] p = {
0.0009267, 0.0024393, 0.0040945, 0.0058063, 0.0075464,
0.0093026, 0.0110689, 0.0128418, 0.0146194, 0.0164005, 0.0199696};
GoodTuring result = GoodTuring.of(r, Nr);
assertEquals(p0, result.p0, 1E-7);
for (int i = 0; i < r.length; i++) {
assertEquals(p[i], result.p[i], 1E-7);
}
}
|
static boolean shouldStoreMessage(final Message message) {
// XEP-0334: Implement the <no-store/> hint to override offline storage
if (message.getChildElement("no-store", "urn:xmpp:hints") != null) {
return false;
}
// OF-2083: Prevent storing offline message that is already stored
if (message.getChildElement("offline", "http://jabber.org/protocol/offline") != null) {
return false;
}
switch (message.getType()) {
case chat:
// XEP-0160: Messages with a 'type' attribute whose value is "chat" SHOULD be stored offline, with the exception of messages that contain only Chat State Notifications (XEP-0085) [7] content
// Iterate through the child elements to see if we can find anything that's not a chat state notification or
// real time text notification
Iterator<?> it = message.getElement().elementIterator();
while (it.hasNext()) {
Object item = it.next();
if (item instanceof Element) {
Element el = (Element) item;
if (Namespace.NO_NAMESPACE.equals(el.getNamespace())) {
continue;
}
if (!el.getNamespaceURI().equals("http://jabber.org/protocol/chatstates")
&& !(el.getQName().equals(QName.get("rtt", "urn:xmpp:rtt:0")))
) {
return true;
}
}
}
return message.getBody() != null && !message.getBody().isEmpty();
case groupchat:
case headline:
// XEP-0160: "groupchat" message types SHOULD NOT be stored offline
// XEP-0160: "headline" message types SHOULD NOT be stored offline
return false;
case error:
// XEP-0160: "error" message types SHOULD NOT be stored offline,
// although a server MAY store advanced message processing errors offline
if (message.getChildElement("amp", "http://jabber.org/protocol/amp") == null) {
return false;
}
break;
default:
// XEP-0160: Messages with a 'type' attribute whose value is "normal" (or messages with no 'type' attribute) SHOULD be stored offline.
break;
}
return true;
}
|
@Test
public void shouldStoreNonEmptyChatMessages() {
// XEP-0160: "chat" message types SHOULD be stored offline unless they only contain chat state notifications
Message message = new Message();
message.setType(Message.Type.chat);
message.setBody(" ");
assertTrue(OfflineMessageStore.shouldStoreMessage(message));
}
|
public static void notNullOrEmpty(String string) {
notNullOrEmpty(string, String.format("string [%s] is null or empty", string));
}
|
@Test
public void testNotNull1NotEmpty3() {
assertThrows(IllegalArgumentException.class, () -> Precondition.notNullOrEmpty(" "));
}
|
@Override
public Map<String, String> contextLabels() {
return Collections.unmodifiableMap(contextLabels);
}
|
@Test
public void testKafkaMetricsContextLabelsAreImmutable() {
context = new KafkaMetricsContext(namespace, labels);
assertThrows(UnsupportedOperationException.class, () -> context.contextLabels().clear());
}
|
@Override
public boolean confirm(String key) {
repo.lock(key);
try {
return repo.replace(key, false, true);
} finally {
repo.unlock(key);
}
}
|
@Test
public void testConfirm() throws Exception {
// ADD first key and confirm
assertTrue(repo.add(key01));
assertTrue(repo.confirm(key01));
// try to confirm a key that isn't there
assertFalse(repo.confirm(key02));
}
|
@Override
public ServerWebExchange convert(final JwtRuleHandle jwtRuleHandle, final ServerWebExchange exchange, final Map<String, Object> jwtBody) {
final DefaultJwtRuleHandle defaultJwtRuleHandle = (DefaultJwtRuleHandle) jwtRuleHandle;
if (CollectionUtils.isEmpty(defaultJwtRuleHandle.getConverter())) {
return exchange;
}
return convert(exchange, jwtBody, defaultJwtRuleHandle.getConverter());
}
|
@Test
public void testConvert() {
String handleJson = "{\"converter\":[{\"jwtVal\":\"sub\",\"headerVal\":\"id\"}]}";
DefaultJwtRuleHandle defaultJwtRuleHandle = defaultJwtConvertStrategy.parseHandleJson(handleJson);
ServerWebExchange newExchange = defaultJwtConvertStrategy
.convert(defaultJwtRuleHandle, exchange, jwtBody);
assertTrue(newExchange.getRequest().getHeaders().get("id").contains(jwtBody.get("sub")));
}
|
public URLConnection openConnection(URL url) throws IOException {
try {
return openConnection(url, false);
} catch (AuthenticationException e) {
// Unreachable
LOG.error("Open connection {} failed", url, e);
return null;
}
}
|
@Test
public void testConnConfiguratior() throws IOException {
final URL u = new URL("http://localhost");
final List<HttpURLConnection> conns = Lists.newArrayList();
URLConnectionFactory fc = new URLConnectionFactory(new ConnectionConfigurator() {
@Override
public HttpURLConnection configure(HttpURLConnection conn)
throws IOException {
Assert.assertEquals(u, conn.getURL());
conns.add(conn);
return conn;
}
});
fc.openConnection(u);
Assert.assertEquals(1, conns.size());
}
|
public static void checkNullOrNonNullNonEmptyEntries(
@Nullable Collection<String> values, String propertyName) {
if (values == null) {
// pass
return;
}
for (String value : values) {
Preconditions.checkNotNull(
value, "Property '" + propertyName + "' cannot contain null entries");
Preconditions.checkArgument(
!value.trim().isEmpty(), "Property '" + propertyName + "' cannot contain empty strings");
}
}
|
@Test
public void testCheckNullOrNonNullNonEmptyEntries_mapWithValuesPass() {
Validator.checkNullOrNonNullNonEmptyEntries(
ImmutableMap.of("key1", "val1", "key2", "val2"), "test");
// pass
}
|
@Override
public double distanceBtw(Point p1, Point p2) {
numCalls++;
confirmRequiredDataIsPresent(p1);
confirmRequiredDataIsPresent(p2);
Duration timeDelta = Duration.between(p1.time(), p2.time()); //can be positive of negative
timeDelta = timeDelta.abs();
Double horizontalDistanceInNm = p1.distanceInNmTo(p2);
Double horizontalDistanceInFeet = horizontalDistanceInNm * Spherical.feetPerNM();
Double altitudeDifferenceInFeet = Math.abs(p1.altitude().inFeet() - p2.altitude().inFeet());
Double distInFeet = hypot(horizontalDistanceInFeet, altitudeDifferenceInFeet);
return (distanceCoef * distInFeet) + (timeCoef * timeDelta.toMillis());
}
|
@Test
public void testDistanceComputation_latitude() {
PointDistanceMetric metric1 = new PointDistanceMetric(1.0, 1.0);
PointDistanceMetric metric2 = new PointDistanceMetric(1.0, 2.0);
Point p1 = new PointBuilder()
.latLong(0.0, 0.0)
.altitude(Distance.ofFeet(0.0))
.time(Instant.EPOCH)
.build();
Point p2 = new PointBuilder()
.latLong(1.0, 1.0)
.altitude(Distance.ofFeet(0.0))
.time(Instant.EPOCH)
.build();
double TOL = 0.00001;
assertTrue(metric1.distanceBtw(p1, p2) != 0.0);
assertEquals(
2.0 * metric1.distanceBtw(p1, p2), metric2.distanceBtw(p1, p2), TOL,
"Metric2 applies a coefficient of 2 on distance, thus metric1 needs to be expanded to match"
);
LatLong pair1 = new LatLong(0.0, 0.0);
LatLong pair2 = new LatLong(1.0, 1.0);
double DIST_IN_NM = pair1.distanceInNM(pair2);
double DIST_IN_FT = Spherical.feetPerNM() * DIST_IN_NM;
double SOME_TOL = DIST_IN_FT * 0.005;
assertEquals(
DIST_IN_FT, metric1.distanceBtw(p1, p2), SOME_TOL,
"The measure distance should be within 0.5% of the REAL distance"
);
}
|
public boolean initAndAddIssue(Issue issue) {
DefaultInputComponent inputComponent = (DefaultInputComponent) issue.primaryLocation().inputComponent();
if (noSonar(inputComponent, issue)) {
return false;
}
ActiveRule activeRule = activeRules.find(issue.ruleKey());
if (activeRule == null) {
// rule does not exist or is not enabled -> ignore the issue
return false;
}
ScannerReport.Issue rawIssue = createReportIssue(issue, inputComponent.scannerId(), activeRule.severity());
if (filters.accept(inputComponent, rawIssue)) {
write(inputComponent.scannerId(), rawIssue);
return true;
}
return false;
}
|
@Test
public void filter_issue() {
DefaultIssue issue = new DefaultIssue(project)
.at(new DefaultIssueLocation().on(file).at(file.selectLine(3)).message(""))
.forRule(JAVA_RULE_KEY);
when(filters.accept(any(InputComponent.class), any(ScannerReport.Issue.class))).thenReturn(false);
boolean added = moduleIssues.initAndAddIssue(issue);
assertThat(added).isFalse();
verifyNoInteractions(reportPublisher);
}
|
@Override
public int size() {
return values.length;
}
|
@Test
public void hasASize() {
assertThat(snapshot.size())
.isEqualTo(5);
}
|
@VisibleForTesting
Optional<Method> getGetSchedulerResourceTypesMethod() {
return getSchedulerResourceTypesMethod;
}
|
@Test
void testGetSchedulerResourceTypesMethodReflectiveHadoop26() {
final RegisterApplicationMasterResponseReflector
registerApplicationMasterResponseReflector =
new RegisterApplicationMasterResponseReflector(LOG);
assertThat(registerApplicationMasterResponseReflector.getGetSchedulerResourceTypesMethod())
.isPresent();
}
|
@Override
public boolean format() throws Exception {
// Clear underreplicated ledgers
store.deleteRecursive(PulsarLedgerUnderreplicationManager.getBasePath(ledgersRootPath)
+ BookKeeperConstants.DEFAULT_ZK_LEDGERS_ROOT_PATH)
.get(BLOCKING_CALL_TIMEOUT, MILLISECONDS);
// Clear underreplicatedledger locks
store.deleteRecursive(PulsarLedgerUnderreplicationManager.getUrLockPath(ledgersRootPath))
.get(BLOCKING_CALL_TIMEOUT, MILLISECONDS);
// Clear the cookies
store.deleteRecursive(cookiePath).get(BLOCKING_CALL_TIMEOUT, MILLISECONDS);
// Clear the INSTANCEID
if (store.exists(ledgersRootPath + "/" + BookKeeperConstants.INSTANCEID)
.get(BLOCKING_CALL_TIMEOUT, MILLISECONDS)) {
store.delete(ledgersRootPath + "/" + BookKeeperConstants.INSTANCEID, Optional.empty())
.get(BLOCKING_CALL_TIMEOUT, MILLISECONDS);
}
// create INSTANCEID
String instanceId = UUID.randomUUID().toString();
store.put(ledgersRootPath + "/" + BookKeeperConstants.INSTANCEID,
instanceId.getBytes(StandardCharsets.UTF_8), Optional.of(-1L))
.get(BLOCKING_CALL_TIMEOUT, MILLISECONDS);
log.info("Successfully formatted BookKeeper metadata");
return true;
}
|
@Test(dataProvider = "impl")
public void testFormatNonExistingCluster(String provider, Supplier<String> urlSupplier) throws Exception {
methodSetup(urlSupplier);
assertClusterNotExists();
assertTrue(registrationManager.format());
assertClusterExists();
}
|
public static String formatTime(long timestamp) {
return dateFmt.format(Instant.ofEpochMilli(timestamp));
}
|
@Test
public void testFormatTime() {
Assert.assertEquals("2019-06-15 12:13:14.000",
EagleEyeCoreUtils.formatTime(1560600794000L - TimeZone.getDefault().getRawOffset()));
}
|
public static GeneratorResult run(String resolverPath,
String defaultPackage,
final boolean generateImported,
final boolean generateDataTemplates,
RestliVersion version,
RestliVersion deprecatedByVersion,
String targetDirectoryPath,
String[] sources)
throws IOException
{
return run(resolverPath,
defaultPackage,
null,
generateImported,
generateDataTemplates,
version,
deprecatedByVersion,
targetDirectoryPath,
sources);
}
|
@Test
public void testLowercasePathForGeneratedFileDoesNotEffectTargetDirectory() throws IOException
{
if (!isFileSystemCaseSensitive) {
// If system is case insensitive, then this test is a NOP.
return;
}
// Given: Path with upper case letters as part of the target directory's path.
final File root = ExporterTestUtils.createTmpDir();
final String pathWithUpperCase = "mainGenerated";
final String tmpPath = root.getPath() + FS + pathWithUpperCase;
final File tmpDir = new File(tmpPath);
tmpDir.mkdir();
// Given: spec files.
final String pegasusDir = moduleDir + FS + RESOURCES_DIR + FS + "pegasus";
final String restspec = "arrayDuplicateB.namespace.restspec.json";
final String file1 = moduleDir + FS + RESOURCES_DIR + FS + "idls" + FS + restspec;
// When: Generate the files defined by spec.
GeneratorResult r = RestRequestBuilderGenerator.run(pegasusDir,
null,
moduleDir,
true,
false,
RestliVersion.RESTLI_2_0_0,
null,
tmpPath,
new String[] { file1 },
true);
// Then: Validate generated files are created in the path without modifying the root path's case.
Assert.assertTrue(r.getModifiedFiles().size() > 0);
for (File f : r.getModifiedFiles()) {
Assert.assertTrue(f.getCanonicalPath().contains(pathWithUpperCase));
Assert.assertTrue(f.getAbsolutePath().contains(pathWithUpperCase));
}
// Clean up.
ExporterTestUtils.rmdir(root);
}
|
public static Point<AriaCsvHit> parsePointFromAriaCsv(String rawCsvText) {
AriaCsvHit ariaHit = AriaCsvHit.from(rawCsvText);
Position pos = new Position(ariaHit.time(), ariaHit.latLong(), ariaHit.altitude());
return new Point<>(pos, null, ariaHit.linkId(), ariaHit);
}
|
@Test
public void exampleParsing_failCorrectlyWhenOutOfBounds() {
String rawCsv = ",,2018-03-24T14:41:09.371Z,vehicleIdNumber,42.9525,-83.7056,2700";
Point<AriaCsvHit> pt = AriaCsvHits.parsePointFromAriaCsv(rawCsv);
assertThat("The entire rawCsv text is accessible from the parsed point", pt.rawData().rawCsvText(), is(rawCsv));
assertThrows(ArrayIndexOutOfBoundsException.class, () -> pt.rawData().token(7));
}
|
public void runExtractor(Message msg) {
try(final Timer.Context ignored = completeTimer.time()) {
final String field;
try (final Timer.Context ignored2 = conditionTimer.time()) {
// We can only work on Strings.
if (!(msg.getField(sourceField) instanceof String)) {
conditionMissesCounter.inc();
return;
}
field = (String) msg.getField(sourceField);
// Decide if to extract at all.
if (conditionType.equals(ConditionType.STRING)) {
if (field.contains(conditionValue)) {
conditionHitsCounter.inc();
} else {
conditionMissesCounter.inc();
return;
}
} else if (conditionType.equals(ConditionType.REGEX)) {
if (regexConditionPattern.matcher(field).find()) {
conditionHitsCounter.inc();
} else {
conditionMissesCounter.inc();
return;
}
}
}
try (final Timer.Context ignored2 = executionTimer.time()) {
Result[] results;
try {
results = run(field);
} catch (ExtractorException e) {
final String error = "Could not apply extractor <" + getTitle() + " (" + getId() + ")>";
msg.addProcessingError(new Message.ProcessingError(
ProcessingFailureCause.ExtractorException, error, ExceptionUtils.getRootCauseMessage(e)));
return;
}
if (results == null || results.length == 0 || Arrays.stream(results).anyMatch(result -> result.getValue() == null)) {
return;
} else if (results.length == 1 && results[0].target == null) {
// results[0].target is null if this extractor cannot produce multiple fields use targetField in that case
msg.addField(targetField, results[0].getValue());
} else {
for (final Result result : results) {
msg.addField(result.getTarget(), result.getValue());
}
}
// Remove original from message?
if (cursorStrategy.equals(CursorStrategy.CUT) && !targetField.equals(sourceField) && !Message.RESERVED_FIELDS.contains(sourceField) && results[0].beginIndex != -1) {
final StringBuilder sb = new StringBuilder(field);
final List<Result> reverseList = Arrays.stream(results)
.sorted(Comparator.<Result>comparingInt(result -> result.endIndex).reversed())
.collect(Collectors.toList());
// remove all from reverse so that the indices still match
for (final Result result : reverseList) {
sb.delete(result.getBeginIndex(), result.getEndIndex());
}
final String builtString = sb.toString();
final String finalResult = builtString.trim().isEmpty() ? "fullyCutByExtractor" : builtString;
msg.removeField(sourceField);
// TODO don't add an empty field back, or rather don't add fullyCutByExtractor
msg.addField(sourceField, finalResult);
}
runConverters(msg);
}
}
}
|
@Test
public void testWithMultipleTargetValueResults() throws Exception {
final TestExtractor extractor = new TestExtractor.Builder()
.callback(new Callable<Result[]>() {
@Override
public Result[] call() throws Exception {
return new Result[]{
new Result(1, "one", -1, -1),
new Result("2", "two", -1, -1),
new Result(3, "three", -1, -1)
};
}
})
.build();
final Message msg = createMessage("the hello");
extractor.runExtractor(msg);
assertThat(msg.hasField("target")).isFalse();
assertThat(msg.getField("one")).isEqualTo(1);
assertThat(msg.getField("two")).isEqualTo("2");
assertThat(msg.getField("three")).isEqualTo(3);
}
|
@Override
public List<Catalogue> sort(List<Catalogue> catalogueTree, SortTypeEnum sortTypeEnum) {
log.debug(
"sort catalogue tree based on first letter. catalogueTree: {}, sortTypeEnum: {}",
catalogueTree,
sortTypeEnum);
Collator collator = Collator.getInstance(Locale.CHINA);
return recursionSortCatalogues(catalogueTree, sortTypeEnum, collator);
}
|
@Test
public void sortEmptyTest2() {
List<Catalogue> catalogueTree = null;
SortTypeEnum sortTypeEnum = SortTypeEnum.ASC;
List<Catalogue> resultList = catalogueTreeSortFirstLetterStrategyTest.sort(catalogueTree, sortTypeEnum);
assertEquals(Lists.newArrayList(), resultList);
}
|
@Override
public void profileSetOnce(JSONObject properties) {
}
|
@Test
public void profileSetOnce() {
mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() {
@Override
public boolean onTrackEvent(String eventName, JSONObject eventProperties) {
Assert.fail();
return false;
}
});
mSensorsAPI.profileSetOnce("abcde", "123");
}
|
@Override
public HttpResponse send(HttpRequest httpRequest) throws IOException {
return send(httpRequest, null);
}
|
@Test
public void send_whenHeadRequest_returnsHttpResponseWithoutBody() throws IOException {
String responseBody = "test response";
mockWebServer.enqueue(
new MockResponse()
.setResponseCode(HttpStatus.OK.code())
.setHeader(CONTENT_TYPE, MediaType.PLAIN_TEXT_UTF_8.toString())
.setBody(responseBody));
mockWebServer.start();
String requestUrl = mockWebServer.url("/test/head").toString();
HttpResponse response = httpClient.send(head(requestUrl).withEmptyHeaders().build());
assertThat(response)
.isEqualTo(
HttpResponse.builder()
.setStatus(HttpStatus.OK)
.setHeaders(
HttpHeaders.builder()
.addHeader(CONTENT_TYPE, MediaType.PLAIN_TEXT_UTF_8.toString())
// MockWebServer always adds this response header.
.addHeader(CONTENT_LENGTH, String.valueOf(responseBody.length()))
.build())
.setBodyBytes(Optional.empty())
.setResponseUrl(HttpUrl.parse(requestUrl))
.build());
}
|
@Override public Destination getDestination( JmsDelegate meta ) {
checkNotNull( meta.destinationName, getString( JmsConstants.PKG, "JmsWebsphereMQ.DestinationNameRequired" ) );
try {
String destName = meta.destinationName;
return isQueue( meta )
? new MQQueue( destName )
: new MQTopic( destName );
} catch ( JMSException e ) {
throw new IllegalStateException( e );
}
}
|
@Test
public void noDestinationNameSetCausesError() {
jmsDelegate.destinationType = QUEUE.name();
jmsDelegate.destinationName = null;
try {
jmsProvider.getDestination( jmsDelegate );
fail();
} catch ( Exception e ) {
assertTrue( e.getMessage().contains( "Destination name must be set." ) );
}
}
|
public long cardinality() {
switch (bitmapType) {
case EMPTY:
return 0;
case SINGLE_VALUE:
return 1;
case BITMAP_VALUE:
return bitmap.getLongCardinality();
case SET_VALUE:
return set.size();
}
return 0;
}
|
@Test
public void testCardinality() {
BitmapValue bitmapValue = new BitmapValue();
assertEquals(0, bitmapValue.cardinality());
bitmapValue.add(0);
bitmapValue.add(0);
bitmapValue.add(-1);
bitmapValue.add(-1);
bitmapValue.add(Integer.MAX_VALUE);
bitmapValue.add(Integer.MAX_VALUE);
bitmapValue.add(-Integer.MAX_VALUE);
bitmapValue.add(-Integer.MAX_VALUE);
bitmapValue.add(Long.MAX_VALUE);
bitmapValue.add(Long.MAX_VALUE);
bitmapValue.add(-Long.MAX_VALUE);
bitmapValue.add(-Long.MAX_VALUE);
assertEquals(6, bitmapValue.cardinality());
}
|
public static Write write() {
return new AutoValue_SnsIO_Write.Builder().build();
}
|
@Test
public void testRetries() throws Throwable {
thrown.expect(IOException.class);
thrown.expectMessage("Error writing to SNS");
thrown.expectMessage("No more attempts allowed");
final PublishRequest request1 = createSampleMessage("my message that will not be published");
final TupleTag<PublishResult> results = new TupleTag<>();
final AmazonSNS amazonSnsErrors = getAmazonSnsMockErrors();
p.apply(Create.of(request1))
.apply(
SnsIO.write()
.withTopicName(topicName)
.withRetryConfiguration(
SnsIO.RetryConfiguration.create(4, standardSeconds(10), millis(1)))
.withAWSClientsProvider(new Provider(amazonSnsErrors))
.withResultOutputTag(results));
try {
p.run();
} catch (final Pipeline.PipelineExecutionException e) {
// check 3 retries were initiated by inspecting the log before passing on the exception
snsWriterFnLogs.verifyWarn(
MessageFormatter.format(SnsIO.Write.SnsWriterFn.RETRY_ATTEMPT_LOG, 1).getMessage());
snsWriterFnLogs.verifyWarn(
MessageFormatter.format(SnsIO.Write.SnsWriterFn.RETRY_ATTEMPT_LOG, 2).getMessage());
snsWriterFnLogs.verifyWarn(
MessageFormatter.format(SnsIO.Write.SnsWriterFn.RETRY_ATTEMPT_LOG, 3).getMessage());
throw e.getCause();
}
}
|
@DELETE
@Produces(MediaType.APPLICATION_JSON)
@Path("/{device_id}")
@ChangesLinkedDevices
public void removeDevice(@Mutable @Auth AuthenticatedDevice auth, @PathParam("device_id") byte deviceId) {
if (auth.getAuthenticatedDevice().getId() != Device.PRIMARY_ID &&
auth.getAuthenticatedDevice().getId() != deviceId) {
throw new WebApplicationException(Response.Status.UNAUTHORIZED);
}
if (deviceId == Device.PRIMARY_ID) {
throw new ForbiddenException();
}
accounts.removeDevice(auth.getAccount(), deviceId).join();
}
|
@Test
void removeDevice() {
// this is a static mock, so it might have previous invocations
clearInvocations(AuthHelper.VALID_ACCOUNT);
final byte deviceId = 2;
when(accountsManager.removeDevice(AuthHelper.VALID_ACCOUNT, deviceId))
.thenReturn(CompletableFuture.completedFuture(AuthHelper.VALID_ACCOUNT));
final Response response = resources
.getJerseyTest()
.target("/v1/devices/" + deviceId)
.request()
.header("Authorization", AuthHelper.getAuthHeader(AuthHelper.VALID_UUID, AuthHelper.VALID_PASSWORD))
.header(HttpHeaders.USER_AGENT, "Signal-Android/5.42.8675309 Android/30")
.delete();
assertThat(response.getStatus()).isEqualTo(204);
assertThat(response.hasEntity()).isFalse();
verify(accountsManager).removeDevice(AuthHelper.VALID_ACCOUNT, deviceId);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.