focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public int launch(AgentLaunchDescriptor descriptor) {
LogConfigurator logConfigurator = new LogConfigurator("agent-launcher-logback.xml");
return logConfigurator.runWithLogger(() -> doLaunch(descriptor));
} | @Test
@DisabledOnOs(OS.WINDOWS)
public void should_NOT_Download_TfsImplJar_IfTheCurrentJarIsUpToDate() throws Exception {
TEST_AGENT_LAUNCHER.copyTo(AGENT_LAUNCHER_JAR);
TEST_AGENT.copyTo(AGENT_BINARY_JAR);
TEST_TFS_IMPL.copyTo(TFS_IMPL_JAR);
assertTrue(TFS_IMPL_JAR.setLastModified(0));
new AgentLauncherImpl().launch(launchDescriptor());
assertThat(TFS_IMPL_JAR.lastModified(), is(0L));
} |
@Override
public Optional<Entity> exportEntity(EntityDescriptor entityDescriptor, EntityDescriptorIds entityDescriptorIds) {
final ModelId modelId = entityDescriptor.id();
final Optional<NotificationDto> notificationDto = notificationService.get(modelId.id());
if (!notificationDto.isPresent()) {
LOG.debug("Couldn't find notification {}", entityDescriptor);
return Optional.empty();
}
final NotificationEntity entity = (NotificationEntity) notificationDto.get().toContentPackEntity(entityDescriptorIds);
final JsonNode data = objectMapper.convertValue(entity, JsonNode.class);
return Optional.of(
EntityV1.builder()
.id(ModelId.of(entityDescriptorIds.getOrThrow(notificationDto.get().id(), ModelTypes.NOTIFICATION_V1)))
.type(ModelTypes.NOTIFICATION_V1)
.data(data)
.build());
} | @Test
@MongoDBFixtures("NotificationFacadeTest.json")
public void exportEntity() {
final ModelId id = ModelId.of("5d4d33753d27460ad18e0c4d");
final EntityDescriptor descriptor = EntityDescriptor.create(id, ModelTypes.NOTIFICATION_V1);
final EntityDescriptorIds entityDescriptorIds = EntityDescriptorIds.of(descriptor);
final Optional<Entity> entity = facade.exportEntity(descriptor, entityDescriptorIds);
assertThat(entity).isPresent();
final EntityV1 entityV1 = (EntityV1) entity.get();
final NotificationEntity notificationEntity = objectMapper.convertValue(entityV1.data(),
NotificationEntity.class);
assertThat(notificationEntity.title().asString()).isEqualTo("title");
assertThat(notificationEntity.description().asString()).isEqualTo("description");
assertThat(notificationEntity.config().type()).isEqualTo("email-notification-v1");
} |
@Override
public Mono<Long> delete(final long id) {
return Mono.zip(
dataSourceRepository.existsByNamespace(id),
collectorRepository.existsByNamespace(id),
termRepository.existsByNamespace(id),
dataEntityRepository.existsNonDeletedByNamespaceId(id)
)
.map(t -> BooleanUtils.toBoolean(t.getT1())
|| BooleanUtils.toBoolean(t.getT2())
|| BooleanUtils.toBoolean(t.getT3())
|| BooleanUtils.toBoolean(t.getT4()))
.filter(exists -> !exists)
.switchIfEmpty(Mono.error(new CascadeDeleteException(
"Namespace cannot be deleted: there are still resources attached")))
.flatMap(ign -> namespaceRepository.delete(id))
.map(NamespacePojo::getId);
} | @Test
@DisplayName("Tries to delete a namespace which is tied with existing data entity and fails with an error")
public void testDeleteTiedNamespaceWithDataEntity() {
final long namespaceId = 1L;
when(collectorRepository.existsByNamespace(eq(namespaceId))).thenReturn(Mono.just(false));
when(dataSourceRepository.existsByNamespace(eq(namespaceId))).thenReturn(Mono.just(false));
when(termRepository.existsByNamespace(eq(namespaceId))).thenReturn(Mono.just(false));
when(dataEntityRepository.existsNonDeletedByNamespaceId(eq(namespaceId))).thenReturn(Mono.just(true));
namespaceService.delete(namespaceId)
.as(StepVerifier::create)
.verifyError(CascadeDeleteException.class);
verify(namespaceRepository, never()).delete(eq(namespaceId));
verify(dataSourceRepository, only()).existsByNamespace(eq(namespaceId));
verify(termRepository, only()).existsByNamespace(eq(namespaceId));
verify(collectorRepository, only()).existsByNamespace(eq(namespaceId));
verify(dataEntityRepository, only()).existsNonDeletedByNamespaceId(eq(namespaceId));
} |
public static Object construct(String className) throws JMeterException {
Object instance = null;
try {
instance = ClassUtils.getClass(className).getDeclaredConstructor().newInstance();
} catch (IllegalArgumentException | ReflectiveOperationException | SecurityException e) {
throw new JMeterException(e);
}
return instance;
} | @Test
public void testConstructStringString() throws JMeterException {
String dummy = (String) ClassTools.construct("java.lang.String",
"hello");
assertNotNull(dummy);
assertEquals("hello", dummy);
} |
@Override
public Boolean mSet(Map<byte[], byte[]> tuple) {
if (isQueueing() || isPipelined()) {
for (Entry<byte[], byte[]> entry: tuple.entrySet()) {
write(entry.getKey(), StringCodec.INSTANCE, RedisCommands.SET, entry.getKey(), entry.getValue());
}
return true;
}
CommandBatchService es = new CommandBatchService(executorService);
for (Entry<byte[], byte[]> entry: tuple.entrySet()) {
es.writeAsync(entry.getKey(), StringCodec.INSTANCE, RedisCommands.SET, entry.getKey(), entry.getValue());
}
es.execute();
return true;
} | @Test
public void testMSet() {
testInCluster(connection -> {
Map<byte[], byte[]> map = new HashMap<>();
for (int i = 0; i < 10; i++) {
map.put(("test" + i).getBytes(), ("test" + i*100).getBytes());
}
connection.mSet(map);
for (Map.Entry<byte[], byte[]> entry : map.entrySet()) {
assertThat(connection.get(entry.getKey())).isEqualTo(entry.getValue());
}
});
} |
public static Version loadApiVersion(System2 system) {
return getVersion(system, SONAR_API_VERSION_FILE_PATH);
} | @Test
void throw_ISE_if_fail_to_load_version() throws Exception {
when(system.getResource(anyString())).thenReturn(new File("target/unknown").toURI().toURL());
assertThatThrownBy(() -> MetadataLoader.loadApiVersion(system))
.isInstanceOf(IllegalStateException.class)
.hasMessageContaining("Can not load /sonar-api-version.txt from classpath");
} |
public void readErrorOf(InputStream in) {
context.console().readErrorOf(in);
} | @Test
public void shouldDelegateReadErrorOfToConsole() {
InputStream inputStream = mock(InputStream.class);
consoleLogger.readErrorOf(inputStream);
verify(mockedConsole).readErrorOf(inputStream);
} |
private boolean checkForError(Message message, Response<?> response) {
if (response.hasError()) {
int code = response.getError().getCode();
String data = response.getError().getData();
String messages = response.getError().getMessage();
message.setHeader(Web3jConstants.ERROR_CODE, code);
message.setHeader(Web3jConstants.ERROR_DATA, data);
message.setHeader(Web3jConstants.ERROR_MESSAGE, messages);
message.getExchange().setException(new CamelExchangeException(
"Web3j failed. Error code: " + code + " data: " + data + " messages: " + messages, message.getExchange()));
return true;
} else {
return false;
}
} | @Test
public void checkForErrorTest() throws Exception {
Web3ClientVersion response = Mockito.mock(Web3ClientVersion.class);
Mockito.when(mockWeb3j.web3ClientVersion()).thenReturn(request);
Mockito.when(request.send()).thenReturn(response);
Mockito.when(response.hasError()).thenReturn(true);
Response.Error error = Mockito.mock(Response.Error.class);
Mockito.when(response.getError()).thenReturn(error);
Mockito.when(error.getCode()).thenReturn(1);
Mockito.when(error.getMessage()).thenReturn("error message");
Mockito.when(error.getData()).thenReturn("error data");
Exchange exchange = createExchangeWithBodyAndHeader(null, Web3jConstants.ID, Long.valueOf(2));
template.send(exchange);
assertEquals(Integer.valueOf(1), exchange.getIn().getHeader(Web3jConstants.ERROR_CODE, Integer.class));
assertEquals("error message", exchange.getIn().getHeader(Web3jConstants.ERROR_MESSAGE, String.class));
assertEquals("error data", exchange.getIn().getHeader(Web3jConstants.ERROR_DATA, String.class));
} |
@Override
public void handle(ContainerLauncherEvent event) {
try {
eventQueue.put(event);
} catch (InterruptedException e) {
throw new YarnRuntimeException(e);
}
} | @SuppressWarnings({ "rawtypes", "unchecked" })
@Test(timeout = 5000)
public void testContainerCleaned() throws Exception {
LOG.info("STARTING testContainerCleaned");
CyclicBarrier startLaunchBarrier = new CyclicBarrier(2);
CyclicBarrier completeLaunchBarrier = new CyclicBarrier(2);
AppContext mockContext = mock(AppContext.class);
EventHandler mockEventHandler = mock(EventHandler.class);
when(mockContext.getEventHandler()).thenReturn(mockEventHandler);
ContainerManagementProtocolClient mockCM =
new ContainerManagerForTest(startLaunchBarrier, completeLaunchBarrier);
ContainerLauncherImplUnderTest ut =
new ContainerLauncherImplUnderTest(mockContext, mockCM);
Configuration conf = new Configuration();
ut.init(conf);
ut.start();
try {
ContainerId contId = makeContainerId(0l, 0, 0, 1);
TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
String cmAddress = "127.0.0.1:8000";
StartContainersResponse startResp =
recordFactory.newRecordInstance(StartContainersResponse.class);
startResp.setAllServicesMetaData(serviceResponse);
LOG.info("inserting launch event");
ContainerRemoteLaunchEvent mockLaunchEvent =
mock(ContainerRemoteLaunchEvent.class);
when(mockLaunchEvent.getType())
.thenReturn(EventType.CONTAINER_REMOTE_LAUNCH);
when(mockLaunchEvent.getContainerID())
.thenReturn(contId);
when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
when(mockLaunchEvent.getContainerToken()).thenReturn(
createNewContainerToken(contId, cmAddress));
ut.handle(mockLaunchEvent);
startLaunchBarrier.await();
LOG.info("inserting cleanup event");
ContainerLauncherEvent mockCleanupEvent =
mock(ContainerLauncherEvent.class);
when(mockCleanupEvent.getType())
.thenReturn(EventType.CONTAINER_REMOTE_CLEANUP);
when(mockCleanupEvent.getContainerID())
.thenReturn(contId);
when(mockCleanupEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
when(mockCleanupEvent.getContainerMgrAddress()).thenReturn(cmAddress);
ut.handle(mockCleanupEvent);
completeLaunchBarrier.await();
ut.waitForPoolToIdle();
ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
verify(mockEventHandler, atLeast(2)).handle(arg.capture());
boolean containerCleaned = false;
for (int i =0; i < arg.getAllValues().size(); i++) {
LOG.info(arg.getAllValues().get(i).toString());
Event currentEvent = arg.getAllValues().get(i);
if (currentEvent.getType() == TaskAttemptEventType.TA_CONTAINER_CLEANED) {
containerCleaned = true;
}
}
assert(containerCleaned);
} finally {
ut.stop();
}
} |
@Override
public Expression getExpression(String tableName, Alias tableAlias) {
// 只有有登陆用户的情况下,才进行数据权限的处理
LoginUser loginUser = SecurityFrameworkUtils.getLoginUser();
if (loginUser == null) {
return null;
}
// 只有管理员类型的用户,才进行数据权限的处理
if (ObjectUtil.notEqual(loginUser.getUserType(), UserTypeEnum.ADMIN.getValue())) {
return null;
}
// 获得数据权限
DeptDataPermissionRespDTO deptDataPermission = loginUser.getContext(CONTEXT_KEY, DeptDataPermissionRespDTO.class);
// 从上下文中拿不到,则调用逻辑进行获取
if (deptDataPermission == null) {
deptDataPermission = permissionApi.getDeptDataPermission(loginUser.getId());
if (deptDataPermission == null) {
log.error("[getExpression][LoginUser({}) 获取数据权限为 null]", JsonUtils.toJsonString(loginUser));
throw new NullPointerException(String.format("LoginUser(%d) Table(%s/%s) 未返回数据权限",
loginUser.getId(), tableName, tableAlias.getName()));
}
// 添加到上下文中,避免重复计算
loginUser.setContext(CONTEXT_KEY, deptDataPermission);
}
// 情况一,如果是 ALL 可查看全部,则无需拼接条件
if (deptDataPermission.getAll()) {
return null;
}
// 情况二,即不能查看部门,又不能查看自己,则说明 100% 无权限
if (CollUtil.isEmpty(deptDataPermission.getDeptIds())
&& Boolean.FALSE.equals(deptDataPermission.getSelf())) {
return new EqualsTo(null, null); // WHERE null = null,可以保证返回的数据为空
}
// 情况三,拼接 Dept 和 User 的条件,最后组合
Expression deptExpression = buildDeptExpression(tableName,tableAlias, deptDataPermission.getDeptIds());
Expression userExpression = buildUserExpression(tableName, tableAlias, deptDataPermission.getSelf(), loginUser.getId());
if (deptExpression == null && userExpression == null) {
// TODO 芋艿:获得不到条件的时候,暂时不抛出异常,而是不返回数据
log.warn("[getExpression][LoginUser({}) Table({}/{}) DeptDataPermission({}) 构建的条件为空]",
JsonUtils.toJsonString(loginUser), tableName, tableAlias, JsonUtils.toJsonString(deptDataPermission));
// throw new NullPointerException(String.format("LoginUser(%d) Table(%s/%s) 构建的条件为空",
// loginUser.getId(), tableName, tableAlias.getName()));
return EXPRESSION_NULL;
}
if (deptExpression == null) {
return userExpression;
}
if (userExpression == null) {
return deptExpression;
}
// 目前,如果有指定部门 + 可查看自己,采用 OR 条件。即,WHERE (dept_id IN ? OR user_id = ?)
return new Parenthesis(new OrExpression(deptExpression, userExpression));
} | @Test // 拼接 Dept 和 User 的条件(字段都不符合)
public void testGetExpression_noDeptColumn_noSelfColumn() {
try (MockedStatic<SecurityFrameworkUtils> securityFrameworkUtilsMock
= mockStatic(SecurityFrameworkUtils.class)) {
// 准备参数
String tableName = "t_user";
Alias tableAlias = new Alias("u");
// mock 方法(LoginUser)
LoginUser loginUser = randomPojo(LoginUser.class, o -> o.setId(1L)
.setUserType(UserTypeEnum.ADMIN.getValue()));
securityFrameworkUtilsMock.when(SecurityFrameworkUtils::getLoginUser).thenReturn(loginUser);
// mock 方法(DeptDataPermissionRespDTO)
DeptDataPermissionRespDTO deptDataPermission = new DeptDataPermissionRespDTO()
.setDeptIds(SetUtils.asSet(10L, 20L)).setSelf(true);
when(permissionApi.getDeptDataPermission(same(1L))).thenReturn(deptDataPermission);
// 调用
Expression expression = rule.getExpression(tableName, tableAlias);
// 断言
assertSame(EXPRESSION_NULL, expression);
assertSame(deptDataPermission, loginUser.getContext(DeptDataPermissionRule.CONTEXT_KEY, DeptDataPermissionRespDTO.class));
}
} |
public static List<File> loopFiles(String path, FileFilter fileFilter) {
return loopFiles(file(path), fileFilter);
} | @Test
@Disabled
public void loopFilesTest() {
final List<File> files = FileUtil.loopFiles("d:/");
for (final File file : files) {
Console.log(file.getPath());
}
} |
public EtcdClient(final String url, final long ttl, final long timeout) {
this.client = Client.builder().endpoints(url.split(",")).build();
this.ttl = ttl;
this.timeout = timeout;
initLease();
} | @Test
public void etcdClientTest() {
try (MockedStatic<Client> clientMockedStatic = mockStatic(Client.class)) {
final ClientBuilder clientBuilder = mock(ClientBuilder.class);
clientMockedStatic.when(Client::builder).thenReturn(clientBuilder);
when(clientBuilder.endpoints(anyString())).thenReturn(clientBuilder);
final Client client = mock(Client.class);
when(clientBuilder.endpoints(anyString()).build()).thenReturn(client);
final Lease lease = mock(Lease.class);
when(client.getLeaseClient()).thenReturn(lease);
final CompletableFuture<LeaseGrantResponse> completableFuture = mock(CompletableFuture.class);
final LeaseGrantResponse leaseGrantResponse = mock(LeaseGrantResponse.class);
when(client.getLeaseClient().grant(anyLong())).thenReturn(completableFuture);
when(completableFuture.get()).thenReturn(leaseGrantResponse);
Assertions.assertDoesNotThrow(() -> new EtcdClient("url", 60L, 3000L));
List<StreamObserver<LeaseKeepAliveResponse>> observerList = new ArrayList<>();
doAnswer(invocation -> {
observerList.add(invocation.getArgument(1));
return lease;
}).when(lease).keepAlive(anyLong(), any());
Assertions.assertDoesNotThrow(() -> new EtcdClient("url", 60L, 3000L));
final LeaseKeepAliveResponse leaseKeepAliveResponse = mock(LeaseKeepAliveResponse.class);
observerList.forEach(streamObserver -> {
streamObserver.onCompleted();
streamObserver.onError(new ShenyuException("test"));
streamObserver.onNext(leaseKeepAliveResponse);
});
doThrow(new InterruptedException("error")).when(completableFuture).get();
Assertions.assertDoesNotThrow(() -> new EtcdClient("url", 60L, 3000L));
} catch (Exception e) {
throw new ShenyuException(e.getCause());
}
} |
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
try {
final EueApiClient client = new EueApiClient(session);
final UiFsModel response;
final String resourceId = fileid.getFileId(file);
switch(resourceId) {
case EueResourceIdProvider.ROOT:
case EueResourceIdProvider.TRASH:
response = new ListResourceAliasApi(client).resourceAliasAliasGet(resourceId,
null, file.attributes().getETag(), null, null, null, null,
Collections.singletonList(OPTION_WIN_32_PROPS), null);
break;
default:
response = new ListResourceApi(client).resourceResourceIdGet(resourceId,
null, file.attributes().getETag(), null, null, null, null,
Collections.singletonList(OPTION_WIN_32_PROPS), null);
break;
}
switch(response.getUifs().getResourceType()) {
case "aliascontainer":
case "container":
if(file.isFile()) {
throw new NotfoundException(file.getAbsolute());
}
break;
default:
if(file.isDirectory()) {
throw new NotfoundException(file.getAbsolute());
}
break;
}
final PathAttributes attr = this.toAttributes(response.getUifs(), response.getUiwin32(),
EueShareFeature.findShareForResource(session.userShares(), resourceId));
if(client.getResponseHeaders().containsKey(HttpHeaders.ETAG)) {
attr.setETag(StringUtils.remove(client.getResponseHeaders().get(HttpHeaders.ETAG).stream().findFirst().orElse(null), '"'));
}
return attr;
}
catch(ApiException e) {
switch(e.getCode()) {
case HttpStatus.SC_NOT_MODIFIED:
if(log.isDebugEnabled()) {
log.debug(String.format("No changes for file %s with ETag %s", file, file.attributes().getETag()));
}
return file.attributes();
}
throw new EueExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
} | @Test
public void testRoot() throws Exception {
final EueResourceIdProvider fileid = new EueResourceIdProvider(session);
final PathAttributes attr = new EueAttributesFinderFeature(session, fileid).find(new Path("/", EnumSet.of(Path.Type.directory)));
assertNotEquals(PathAttributes.EMPTY, attr);
assertNotNull(attr.getETag());
} |
public static List<String> computeLangFromLocale(Locale locale) {
final List<String> resourceNames = new ArrayList<>(5);
if (StringUtils.isBlank(locale.getLanguage())) {
throw new IllegalArgumentException(
"Locale \"" + locale + "\" "
+ "cannot be used as it does not specify a language.");
}
resourceNames.add("default");
resourceNames.add(locale.getLanguage());
if (StringUtils.isNotBlank(locale.getCountry())) {
resourceNames.add(locale.getLanguage() + "_" + locale.getCountry());
}
if (StringUtils.isNotBlank(locale.getVariant())) {
resourceNames.add(
locale.getLanguage() + "_" + locale.getCountry() + "-" + locale.getVariant());
}
return resourceNames;
} | @Test
void computeLangFromLocaleWhenLanguageIsEmpty() {
assertThatThrownBy(() -> {
LanguageUtils.computeLangFromLocale(Locale.forLanguageTag(""));
}).isInstanceOf(IllegalArgumentException.class)
.hasMessage("Locale \"\" cannot be used as it does not specify a language.");
} |
public Generics(Fury fury) {
this.fury = fury;
} | @Test
public void testGenerics() throws NoSuchFieldException {
Fury fury = Fury.builder().withLanguage(Language.JAVA).build();
Generics generics = new Generics(fury);
{
GenericType genericType =
GenericType.build(Test4.class, Test2.class.getField("fromFieldNested").getGenericType());
// push generics in outer serialization.
generics.pushGenericType(genericType);
// increase serialization depth.
increaseFuryDepth(fury, 1);
// get generics in inner serialization.
GenericType genericType1 = generics.nextGenericType();
Assert.assertSame(genericType1, genericType);
increaseFuryDepth(fury, -1);
generics.popGenericType();
}
{
for (String fieldName : new String[] {"fromField2", "arrayWithTypeVar", "fromFieldNested"}) {
GenericType genericType =
GenericType.build(Test4.class, Test2.class.getField(fieldName).getGenericType());
generics.pushGenericType(genericType);
increaseFuryDepth(fury, 1);
}
for (String fieldName :
ImmutableList.of("fromField2", "arrayWithTypeVar", "fromFieldNested").reverse()) {
GenericType genericType =
GenericType.build(Test4.class, Test2.class.getField(fieldName).getGenericType());
GenericType genericType1 = generics.nextGenericType();
Assert.assertEquals(genericType1.typeRef, genericType.typeRef);
increaseFuryDepth(fury, -1);
generics.popGenericType();
}
}
Assert.assertEquals(TestUtils.getFieldValue(generics, "genericTypesSize"), Integer.valueOf(0));
GenericType[] genericTypes = TestUtils.getFieldValue(generics, "genericTypes");
Assert.assertNull(genericTypes[0]);
Assert.assertNull(genericTypes[1]);
} |
@Override
public Map<String, StepTransition> translate(WorkflowInstance workflowInstance) {
WorkflowInstance instance = objectMapper.convertValue(workflowInstance, WorkflowInstance.class);
if (instance.getRunConfig() != null) {
if (instance.getRunConfig().getPolicy() == RunPolicy.RESTART_FROM_INCOMPLETE
|| instance.getRunConfig().getPolicy() == RunPolicy.RESTART_FROM_SPECIFIC) {
Map<String, StepInstance.Status> statusMap =
instance.getAggregatedInfo().getStepAggregatedViews().entrySet().stream()
.collect(
Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getStatus()));
if (!statusMap.isEmpty()) {
instance
.getRunConfig()
.setStartStepIds(
statusMap.entrySet().stream()
.filter(
entry ->
!entry.getValue().isComplete()
&& (entry.getValue().isTerminal()
|| entry.getValue() == StepInstance.Status.NOT_CREATED))
.map(Map.Entry::getKey)
.collect(Collectors.toList()));
}
// handle the special case of restarting from a completed step
if (instance.getRunConfig().getPolicy() == RunPolicy.RESTART_FROM_SPECIFIC) {
String restartStepId =
RunRequest.getCurrentNode(instance.getRunConfig().getRestartConfig()).getStepId();
if (!instance.getRunConfig().getStartStepIds().contains(restartStepId)) {
instance.getRunConfig().getStartStepIds().add(restartStepId);
}
}
} else {
if (workflowInstance.getRunConfig().getStartStepIds() != null) {
instance
.getRunConfig()
.setStartStepIds(new ArrayList<>(workflowInstance.getRunConfig().getStartStepIds()));
}
if (workflowInstance.getRunConfig().getEndStepIds() != null) {
instance
.getRunConfig()
.setEndStepIds(new ArrayList<>(workflowInstance.getRunConfig().getEndStepIds()));
}
}
}
List<String> startStepIds =
instance.getRunConfig() != null && instance.getRunConfig().getStartStepIds() != null
? instance.getRunConfig().getStartStepIds()
: null;
List<String> endStepIds =
instance.getRunConfig() != null && instance.getRunConfig().getEndStepIds() != null
? instance.getRunConfig().getEndStepIds()
: null;
return WorkflowGraph.computeDag(instance.getRuntimeWorkflow(), startStepIds, endStepIds);
} | @Test
public void testTranslateForRestartFromSpecificWithCompleteBranch() {
instance.getRuntimeWorkflow().getSteps().get(2).getTransition().getSuccessors().remove("job.2");
instance
.getAggregatedInfo()
.getStepAggregatedViews()
.put("job.2", StepAggregatedView.builder().status(StepInstance.Status.STOPPED).build());
instance.getRunConfig().setPolicy(RunPolicy.RESTART_FROM_SPECIFIC);
instance
.getRunConfig()
.setRestartConfig(
RestartConfig.builder()
.addRestartNode("sample-dag-test-3", 1, "job1")
.restartPolicy(RunPolicy.RESTART_FROM_SPECIFIC)
.build());
Map<String, StepTransition> dag = translator.translate(instance);
Assert.assertEquals(
new HashSet<>(Arrays.asList("job1", "job.2", "job3", "job4")), dag.keySet());
} |
public void setAttribute(final String key, Object value) {
setAttributeObject(key, value);
} | @Test(expected=AttributeAlreadySetException.class)
public void cannotSetDifferentAttributeValue() {
Entry entry = new Entry();
entry.setAttribute("key", "value");
entry.setAttribute("key", "value2");
} |
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
if(directory.isRoot()) {
return new DeepBoxesListService().list(directory, listener);
}
if(containerService.isDeepbox(directory)) { // in DeepBox
return new BoxesListService().list(directory, listener);
}
if(containerService.isBox(directory)) { // in Box
return new BoxListService().list(directory, listener);
}
final String deepBoxNodeId = fileid.getDeepBoxNodeId(directory);
final String boxNodeId = fileid.getBoxNodeId(directory);
if(containerService.isThirdLevel(directory)) { // in Inbox/Documents/Trash
// N.B. although Documents and Trash have a nodeId, calling the listFiles1/listTrash1 API with
// parentNode may fail!
if(containerService.isInInbox(directory)) {
return new NodeListService(new Contents() {
@Override
public NodeContent getNodes(final int offset) throws ApiException {
return new BoxRestControllerApi(session.getClient()).listQueue(deepBoxNodeId,
boxNodeId,
null,
offset, chunksize, "displayName asc");
}
}).list(directory, listener);
}
if(containerService.isInDocuments(directory)) {
return new NodeListService(new Contents() {
@Override
public NodeContent getNodes(final int offset) throws ApiException {
return new BoxRestControllerApi(session.getClient()).listFiles(
deepBoxNodeId,
boxNodeId,
offset, chunksize, "displayName asc");
}
}).list(directory, listener);
}
if(containerService.isInTrash(directory)) {
return new NodeListService(new Contents() {
@Override
public NodeContent getNodes(final int offset) throws ApiException {
return new BoxRestControllerApi(session.getClient()).listTrash(
deepBoxNodeId,
boxNodeId,
offset, chunksize, "displayName asc");
}
}).list(directory, listener);
}
}
// in subfolder of Documents/Trash (Inbox has no subfolders)
final String nodeId = fileid.getFileId(directory);
if(containerService.isInTrash(directory)) {
return new NodeListService(new Contents() {
@Override
public NodeContent getNodes(final int offset) throws ApiException {
return new BoxRestControllerApi(session.getClient()).listTrash1(
deepBoxNodeId,
boxNodeId,
nodeId,
offset, chunksize, "displayName asc");
}
}).list(directory, listener);
}
return new NodeListService(new Contents() {
@Override
public NodeContent getNodes(final int offset) throws ApiException {
return new BoxRestControllerApi(session.getClient()).listFiles1(
deepBoxNodeId,
boxNodeId,
nodeId,
offset, chunksize, "displayName asc");
}
}).list(directory, listener);
} | @Test
public void testListInbox() throws Exception {
final DeepboxIdProvider nodeid = new DeepboxIdProvider(session);
final Path queue = new Path("/ORG 4 - DeepBox Desktop App/ORG3:Box1/Inbox", EnumSet.of(Path.Type.directory));
final AttributedList<Path> list = new DeepboxListService(session, nodeid).list(queue, new DisabledListProgressListener());
assertNotEquals(AttributedList.emptyList(), list);
assertFalse(list.isEmpty());
for(final Path f : list) {
assertSame(queue, f.getParent());
assertFalse(f.getName().contains(String.valueOf(Path.DELIMITER)));
assertTrue(f.attributes().getModificationDate() > 0);
assertTrue(f.attributes().getCreationDate() > 0);
assertNotNull(nodeid.getFileId(new Path(f).withAttributes(new PathAttributes())));
assertEquals(f.attributes(), new DeepboxAttributesFinderFeature(session, nodeid).find(new Path(f.getAbsolute(), f.getType())));
}
} |
static <T extends Type> String buildMethodSignature(
String methodName, List<TypeReference<T>> parameters) {
StringBuilder result = new StringBuilder();
result.append(methodName);
result.append("(");
String params =
parameters.stream().map(Utils::getTypeName).collect(Collectors.joining(","));
result.append(params);
result.append(")");
return result.toString();
} | @Test
void testBuildMethodSignatureWithDynamicStructs() {
assertEquals(
"nazzEvent((((string,string)[])[],uint256),(string,string))",
EventEncoder.buildMethodSignature(
AbiV2TestFixture.nazzEvent.getName(),
AbiV2TestFixture.nazzEvent.getParameters()));
} |
public AccessPrivilege getAccessPrivilege(InetAddress addr) {
return getAccessPrivilege(addr.getHostAddress(),
addr.getCanonicalHostName());
} | @Test
public void testCidrLongRO() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"192.168.0.0/255.255.252.0");
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertEquals(AccessPrivilege.NONE,
matcher.getAccessPrivilege(address2, hostname1));
} |
@Override
public LookupResult<BrokerKey> handleResponse(Set<BrokerKey> keys, AbstractResponse abstractResponse) {
validateLookupKeys(keys);
MetadataResponse response = (MetadataResponse) abstractResponse;
MetadataResponseData.MetadataResponseBrokerCollection brokers = response.data().brokers();
if (brokers.isEmpty()) {
log.debug("Metadata response contained no brokers. Will backoff and retry");
return LookupResult.empty();
} else {
log.debug("Discovered all brokers {} to send requests to", brokers);
}
Map<BrokerKey, Integer> brokerKeys = brokers.stream().collect(Collectors.toMap(
broker -> new BrokerKey(OptionalInt.of(broker.nodeId())),
MetadataResponseData.MetadataResponseBroker::nodeId
));
return new LookupResult<>(
Collections.singletonList(ANY_BROKER),
Collections.emptyMap(),
brokerKeys
);
} | @Test
public void testHandleResponseWithInvalidLookupKeys() {
AllBrokersStrategy strategy = new AllBrokersStrategy(logContext);
AllBrokersStrategy.BrokerKey key1 = new AllBrokersStrategy.BrokerKey(OptionalInt.empty());
AllBrokersStrategy.BrokerKey key2 = new AllBrokersStrategy.BrokerKey(OptionalInt.of(1));
MetadataResponse response = new MetadataResponse(new MetadataResponseData(), ApiKeys.METADATA.latestVersion());
assertThrows(IllegalArgumentException.class, () -> strategy.handleResponse(mkSet(key1), response));
assertThrows(IllegalArgumentException.class, () -> strategy.handleResponse(mkSet(key2), response));
assertThrows(IllegalArgumentException.class, () -> strategy.handleResponse(mkSet(key1, key2), response));
Set<AllBrokersStrategy.BrokerKey> keys = new HashSet<>(AllBrokersStrategy.LOOKUP_KEYS);
keys.add(key2);
assertThrows(IllegalArgumentException.class, () -> strategy.handleResponse(keys, response));
} |
@Override
public void start() {
builtInQProfileRepository.initialize();
} | @Test
void start_initializes_DefinedQProfileRepository() {
underTest.start();
assertThat(builtInQProfileRepositoryRule.isInitialized()).isTrue();
} |
public void runMigrations() {
List<SqlMigration> migrationsToRun = getMigrations()
.filter(migration -> migration.getFileName().endsWith(".sql"))
.sorted(comparing(SqlMigration::getFileName))
.filter(this::isNewMigration)
.collect(toList());
runMigrations(migrationsToRun);
} | @Test
void testH2ValidateWithTablesInWrongSchema() {
final JdbcDataSource dataSource = createH2DataSource("jdbc:h2:/tmp/test;INIT=CREATE SCHEMA IF NOT EXISTS schema1\\;CREATE SCHEMA IF NOT EXISTS schema2");
final DatabaseCreator databaseCreatorForSchema1 = new DatabaseCreator(dataSource, "schema1.prefix_", H2StorageProvider.class);
databaseCreatorForSchema1.runMigrations();
final DatabaseCreator databaseCreatorForSchema2 = new DatabaseCreator(dataSource, "schema2.prefix_", H2StorageProvider.class);
assertThatThrownBy(databaseCreatorForSchema2::validateTables).isInstanceOf(JobRunrException.class);
} |
@Override
public void check(final EncryptRule encryptRule, final ShardingSphereSchema schema, final SQLStatementContext sqlStatementContext) {
ShardingSpherePreconditions.checkState(JoinConditionsEncryptorComparator.isSame(((WhereAvailable) sqlStatementContext).getJoinConditions(), encryptRule),
() -> new UnsupportedSQLOperationException("Can not use different encryptor in join condition"));
check(encryptRule, schema, (WhereAvailable) sqlStatementContext);
} | @Test
void assertGenerateSQLTokensWhenJoinConditionUseDifferentEncryptor() {
assertThrows(UnsupportedSQLOperationException.class,
() -> new EncryptPredicateColumnSupportedChecker().check(EncryptGeneratorFixtureBuilder.createEncryptRule(), null, EncryptGeneratorFixtureBuilder.createSelectStatementContext()));
} |
@Override
public Set<EntityExcerpt> listEntityExcerpts() {
return lookupTableService.findAll().stream()
.map(this::createExcerpt)
.collect(Collectors.toSet());
} | @Test
@MongoDBFixtures("LookupTableFacadeTest.json")
public void listEntityExcerpts() {
final EntityExcerpt expectedEntityExcerpt = EntityExcerpt.builder()
.id(ModelId.of("5adf24dd4b900a0fdb4e530d"))
.type(ModelTypes.LOOKUP_TABLE_V1)
.title("HTTP DSV without Cache")
.build();
final Set<EntityExcerpt> entityExcerpts = facade.listEntityExcerpts();
assertThat(entityExcerpts).containsOnly(expectedEntityExcerpt);
} |
public static byte[] toArray(ByteBuffer buffer) {
return toArray(buffer, 0, buffer.remaining());
} | @Test
public void toArrayDirectByteBuffer() {
byte[] input = {0, 1, 2, 3, 4};
ByteBuffer buffer = ByteBuffer.allocateDirect(5);
buffer.put(input);
buffer.rewind();
assertArrayEquals(input, Utils.toArray(buffer));
assertEquals(0, buffer.position());
assertArrayEquals(new byte[] {1, 2}, Utils.toArray(buffer, 1, 2));
assertEquals(0, buffer.position());
buffer.position(2);
assertArrayEquals(new byte[] {2, 3, 4}, Utils.toArray(buffer));
assertEquals(2, buffer.position());
} |
public static HttpRequest newJDiscRequest(CurrentContainer container, HttpServletRequest servletRequest) {
try {
var jettyRequest = (Request) servletRequest;
var jdiscHttpReq = HttpRequest.newServerRequest(
container,
getUri(servletRequest),
getMethod(servletRequest),
HttpRequest.Version.fromString(servletRequest.getProtocol()),
new InetSocketAddress(servletRequest.getRemoteAddr(), servletRequest.getRemotePort()),
getConnection(jettyRequest).getCreatedTimeStamp(),
jettyRequest.getTimeStamp());
jdiscHttpReq.context().put(RequestUtils.JDISC_REQUEST_X509CERT, getCertChain(servletRequest));
jdiscHttpReq.context().put(RequestUtils.JDICS_REQUEST_PORT, servletRequest.getLocalPort());
SSLSession sslSession = (SSLSession) servletRequest.getAttribute(RequestUtils.JETTY_REQUEST_SSLSESSION);
jdiscHttpReq.context().put(RequestUtils.JDISC_REQUEST_SSLSESSION, sslSession);
servletRequest.setAttribute(HttpRequest.class.getName(), jdiscHttpReq);
return jdiscHttpReq;
} catch (Utf8Appendable.NotUtf8Exception e) {
throw createBadQueryException(e);
}
} | @Test
final void illegal_host_throws_requestexception3() {
try {
HttpRequestFactory.newJDiscRequest(
new MockContainer(),
createMockRequest("http", "*", "/foo", ""));
fail("Above statement should throw");
} catch (RequestException e) {
assertThat(e.getResponseStatus(), is(Response.Status.BAD_REQUEST));
}
} |
@Override
public V load(K key) {
awaitSuccessfulInit();
try (SqlResult queryResult = sqlService.execute(queries.load(), key)) {
Iterator<SqlRow> it = queryResult.iterator();
V value = null;
if (it.hasNext()) {
SqlRow sqlRow = it.next();
if (it.hasNext()) {
throw new IllegalStateException("multiple matching rows for a key " + key);
}
// If there is a single column as the value, return that column as the value
if (queryResult.getRowMetadata().getColumnCount() == 2 && genericMapStoreProperties.singleColumnAsValue) {
value = sqlRow.getObject(1);
} else {
//noinspection unchecked
value = (V) toGenericRecord(sqlRow, genericMapStoreProperties);
}
}
return value;
}
} | @Test
public void whenMapLoaderInitCalledOnNonMaster_thenInitAndLoadValue() {
ObjectSpec spec = objectProvider.createObject(mapName, false);
objectProvider.insertItems(spec, 1);
mapLoader = createMapLoader(instances()[1]);
GenericRecord genericRecord = mapLoader.load(0);
assertThat(genericRecord).isNotNull();
} |
@Override
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
ReflectionUtils.doWithMethods(bean.getClass(), recurringJobFinderMethodCallback);
return bean;
} | @Test
void beansWithoutMethodsAnnotatedWithRecurringAnnotationWillNotBeHandled() {
// GIVEN
final RecurringJobPostProcessor recurringJobPostProcessor = getRecurringJobPostProcessor();
// WHEN
recurringJobPostProcessor.postProcessAfterInitialization(new MyServiceWithoutRecurringAnnotation(), "not important");
// THEN
verifyNoInteractions(jobScheduler);
} |
public static long quorumPosition(final ClusterMember[] members, final long[] rankedPositions)
{
final int length = rankedPositions.length;
for (int i = 0; i < length; i++)
{
rankedPositions[i] = 0;
}
for (final ClusterMember member : members)
{
long newPosition = member.logPosition;
for (int i = 0; i < length; i++)
{
final long rankedPosition = rankedPositions[i];
if (newPosition > rankedPosition)
{
rankedPositions[i] = newPosition;
newPosition = rankedPosition;
}
}
}
return rankedPositions[length - 1];
} | @Test
void shouldRankClusterStart()
{
assertThat(quorumPosition(members, rankedPositions), is(0L));
} |
@Override
public V load(K k) throws CacheLoaderException {
long startNanos = Timer.nanos();
try {
return delegate.get().load(k);
} finally {
loadProbe.recordValue(Timer.nanosElapsed(startNanos));
}
} | @Test
public void load() {
String key = "key";
String value = "value";
when(delegate.load(key)).thenReturn(value);
String result = cacheLoader.load(key);
assertSame(value, result);
assertProbeCalledOnce("load");
} |
@Override
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain)
throws IOException, ServletException {
final HttpServletRequest httpServletRequest = (HttpServletRequest) servletRequest;
final HttpServletResponse response = (HttpServletResponse) servletResponse;
Method method = controllerMethodsCache.getMethod(httpServletRequest);
try {
if (method != null && method.isAnnotationPresent(TpsControl.class)
&& TpsControlConfig.isTpsControlEnabled()) {
TpsControl tpsControl = method.getAnnotation(TpsControl.class);
String pointName = tpsControl.pointName();
String parserName = StringUtils.isBlank(tpsControl.name()) ? pointName : tpsControl.name();
HttpTpsCheckRequestParser parser = HttpTpsCheckRequestParserRegistry.getParser(parserName);
TpsCheckRequest httpTpsCheckRequest = null;
if (parser != null) {
httpTpsCheckRequest = parser.parse(httpServletRequest);
}
if (httpTpsCheckRequest == null) {
httpTpsCheckRequest = new TpsCheckRequest();
}
if (StringUtils.isBlank(httpTpsCheckRequest.getPointName())) {
httpTpsCheckRequest.setPointName(pointName);
}
initTpsControlManager();
TpsCheckResponse checkResponse = tpsControlManager.check(httpTpsCheckRequest);
if (!checkResponse.isSuccess()) {
AsyncContext asyncContext = httpServletRequest.startAsync();
asyncContext.setTimeout(0);
RpcScheduledExecutor.CONTROL_SCHEDULER.schedule(
() -> generate503Response(httpServletRequest, response, checkResponse.getMessage(),
asyncContext), 1000L, TimeUnit.MILLISECONDS);
return;
}
}
} catch (Throwable throwable) {
Loggers.TPS.warn("Fail to http tps check", throwable);
}
filterChain.doFilter(httpServletRequest, response);
} | @Test
void testTpsCheckException() throws Exception {
HttpTpsCheckRequestParserRegistry.register(new HttpTpsCheckRequestParser() {
@Override
public TpsCheckRequest parse(HttpServletRequest httpServletRequest) {
return new TpsCheckRequest();
}
@Override
public String getPointName() {
return "HealthCheck";
}
@Override
public String getName() {
return "HealthCheck";
}
});
when(tpsControlManager.check(any(TpsCheckRequest.class))).thenThrow(new RuntimeException("324565"));
//mock http tps control method
Method method = HealthCheckRequestHandler.class.getMethod("handle", Request.class, RequestMeta.class);
HttpServletRequest httpServletRequest = Mockito.mock(HttpServletRequest.class);
HttpServletResponse httpServletResponse = Mockito.mock(HttpServletResponse.class);
MockFilterChain filterChain = Mockito.mock(MockFilterChain.class);
when(controllerMethodsCache.getMethod(eq(httpServletRequest))).thenReturn(method);
//execute test.
nacosHttpTpsFilter.doFilter(httpServletRequest, httpServletResponse, filterChain);
//verify
Mockito.verify(filterChain, Mockito.times(1)).doFilter(httpServletRequest, httpServletResponse);
} |
public Optional<Example<T>> generateExample(ColumnarIterator.Row row, boolean outputRequired) {
List<String> responseValues = responseProcessor.getFieldNames().stream()
.map(f -> row.getRowData().getOrDefault(f, ""))
.collect(Collectors.toList());
Optional<T> labelOpt = responseProcessor.process(responseValues);
if (!labelOpt.isPresent() && outputRequired) {
return Optional.empty();
}
List<ColumnarFeature> features = generateFeatures(row.getRowData());
if (features.isEmpty()) {
logger.warning(String.format("Row %d empty of features, omitting", row.getIndex()));
return Optional.empty();
} else {
T label = labelOpt.orElse(responseProcessor.getOutputFactory().getUnknownOutput());
Map<String, Object> metadata = generateMetadata(row);
Example<T> example;
if (weightExtractor == null) {
example = new ArrayExample<>(label, metadata);
} else {
example = new ArrayExample<>(label,
weightExtractor.extract(row).orElse(Example.DEFAULT_WEIGHT),
metadata);
}
example.addAll(features);
return Optional.of(example);
}
} | @Test
public void replaceNewlinesWithSpacesTest() {
final Pattern BLANK_LINES = Pattern.compile("(\n[\\s-]*\n)+");
final Function<CharSequence, CharSequence> newLiner = (CharSequence charSequence) -> {
if (charSequence == null || charSequence.length() == 0) {
return charSequence;
}
return BLANK_LINES.splitAsStream(charSequence).collect(Collectors.joining(" *\n\n"));
};
Tokenizer tokenizer = new MungingTokenizer(new BreakIteratorTokenizer(Locale.US), newLiner);
TokenPipeline textPipeline = new TokenPipeline(tokenizer, 2, false);
final Map<String, FieldProcessor> fieldProcessors = new HashMap<>();
fieldProcessors.put("order_text", new TextFieldProcessor("order_text", textPipeline));
MockResponseProcessor response = new MockResponseProcessor("Label");
Map<String,String> row = new HashMap<>();
row.put("order_text", "Jimmy\n\n\n\nHoffa");
row.put("Label", "Sheep");
RowProcessor<MockOutput> processor = new RowProcessor<>(Collections.emptyList(),null,response,fieldProcessors,Collections.emptyMap(),Collections.emptySet(), false);
Example<MockOutput> example = processor.generateExample(row,true).get();
// Check example is extracted correctly
assertEquals(5, example.size());
assertEquals("Sheep", example.getOutput().label);
Iterator<Feature> featureIterator = example.iterator();
Feature a = featureIterator.next();
assertEquals("order_text@1-N=*", a.getName());
assertEquals(1.0, a.getValue());
a = featureIterator.next();
assertEquals("order_text@1-N=Hoffa", a.getName());
a = featureIterator.next();
assertEquals("order_text@1-N=Jimmy", a.getName());
a = featureIterator.next();
assertEquals("order_text@2-N=*/Hoffa", a.getName());
a = featureIterator.next();
assertEquals("order_text@2-N=Jimmy/*", a.getName());
assertFalse(featureIterator.hasNext());
// same input with replaceNewlinesWithSpacesTest=true (the default) produces different features
processor = new RowProcessor<>(Collections.emptyList(),null,response,fieldProcessors,Collections.emptyMap(),Collections.emptySet(), true);
example = processor.generateExample(row,true).get();
// Check example is extracted correctly
assertEquals(3, example.size());
assertEquals("Sheep", example.getOutput().label);
featureIterator = example.iterator();
a = featureIterator.next();
assertEquals("order_text@1-N=Hoffa", a.getName());
assertEquals(1.0, a.getValue());
a = featureIterator.next();
assertEquals("order_text@1-N=Jimmy", a.getName());
a = featureIterator.next();
assertEquals("order_text@2-N=Jimmy/Hoffa", a.getName());
assertFalse(featureIterator.hasNext());
} |
@Override
public void close() throws IOException {
if (closed) {
return;
}
super.close();
closeStream();
closed = true;
} | @Test
public void testClose() throws Exception {
OSSURI uri = new OSSURI(location("closed.dat"));
SeekableInputStream closed = new OSSInputStream(ossClient().get(), uri);
closed.close();
assertThatThrownBy(() -> closed.seek(0))
.isInstanceOf(IllegalStateException.class)
.hasMessageContaining("Cannot seek: already closed");
} |
public void resetPositionsIfNeeded() {
Map<TopicPartition, Long> offsetResetTimestamps = offsetFetcherUtils.getOffsetResetTimestamp();
if (offsetResetTimestamps.isEmpty())
return;
resetPositionsAsync(offsetResetTimestamps);
} | @Test
public void testUpdateFetchPositionResetToDefaultOffset() {
buildFetcher();
assignFromUser(singleton(tp0));
subscriptions.requestOffsetReset(tp0);
client.prepareResponse(listOffsetRequestMatcher(ListOffsetsRequest.EARLIEST_TIMESTAMP,
validLeaderEpoch), listOffsetResponse(Errors.NONE, 1L, 5L));
offsetFetcher.resetPositionsIfNeeded();
consumerClient.pollNoWakeup();
assertFalse(subscriptions.isOffsetResetNeeded(tp0));
assertTrue(subscriptions.isFetchable(tp0));
assertEquals(5, subscriptions.position(tp0).offset);
} |
@ApiOperation(value = "Delete a deployment", tags = { "Deployment" }, code = 204)
@ApiResponses(value = {
@ApiResponse(code = 204, message = "Indicates the deployment was found and has been deleted. Response-body is intentionally empty."),
@ApiResponse(code = 404, message = "Indicates the requested deployment was not found.")
})
@DeleteMapping(value = "/repository/deployments/{deploymentId}", produces = "application/json")
@ResponseStatus(HttpStatus.NO_CONTENT)
public void deleteDeployment(@ApiParam(name = "deploymentId") @PathVariable String deploymentId, @RequestParam(value = "cascade", required = false, defaultValue = "false") Boolean cascade) {
Deployment deployment = repositoryService.createDeploymentQuery().deploymentId(deploymentId).singleResult();
if (deployment == null) {
throw new FlowableObjectNotFoundException("Could not find a deployment with id '" + deploymentId + "'.", Deployment.class);
}
if (restApiInterceptor != null) {
restApiInterceptor.deleteDeployment(deployment);
}
if (cascade) {
repositoryService.deleteDeployment(deploymentId, true);
} else {
repositoryService.deleteDeployment(deploymentId);
}
} | @Test
public void testPostNewDeploymentBarFile() throws Exception {
try {
// Create zip with bpmn-file and resource
ByteArrayOutputStream zipOutput = new ByteArrayOutputStream();
ZipOutputStream zipStream = new ZipOutputStream(zipOutput);
// Add bpmn-xml
zipStream.putNextEntry(new ZipEntry("oneTaskProcess.bpmn20.xml"));
IOUtils.copy(ReflectUtil.getResourceAsStream("org/flowable/rest/service/api/repository/oneTaskProcess.bpmn20.xml"), zipStream);
zipStream.closeEntry();
// Add text-resource
zipStream.putNextEntry(new ZipEntry("test.txt"));
IOUtils.write("Testing REST-deployment with tenant", zipStream, StandardCharsets.UTF_8);
zipStream.closeEntry();
zipStream.close();
// Upload a bar-file using multipart-data
HttpPost httpPost = new HttpPost(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_DEPLOYMENT_COLLECTION));
httpPost.setEntity(
HttpMultipartHelper.getMultiPartEntity("test-deployment.bar", "application/zip", new ByteArrayInputStream(zipOutput.toByteArray()), null));
CloseableHttpResponse response = executeBinaryRequest(httpPost, HttpStatus.SC_CREATED);
// Check deployment
JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent());
closeResponse(response);
String deploymentId = responseNode.get("id").textValue();
assertThatJson(responseNode)
.when(Option.IGNORING_EXTRA_FIELDS)
.isEqualTo("{"
+ "id: '${json-unit.any-string}',"
+ "name: 'test-deployment',"
+ "url: '" + SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_DEPLOYMENT, deploymentId) + "',"
+ "category: null,"
+ "deploymentTime: '${json-unit.any-string}',"
+ "tenantId: ''"
+ "}");
assertThat(repositoryService.createDeploymentQuery().deploymentId(deploymentId).count()).isEqualTo(1);
// Check if both resources are deployed and process is actually
// deployed in the deployment
List<String> resources = repositoryService.getDeploymentResourceNames(deploymentId);
assertThat(resources).hasSize(2);
assertThat(repositoryService.createProcessDefinitionQuery().deploymentId(deploymentId).count()).isEqualTo(1);
} finally {
// Always cleanup any created deployments, even if the test failed
List<Deployment> deployments = repositoryService.createDeploymentQuery().list();
for (Deployment deployment : deployments) {
repositoryService.deleteDeployment(deployment.getId(), true);
}
}
} |
DateRange getRange(String dateRangeString) throws ParseException {
if (dateRangeString == null || dateRangeString.isEmpty())
return null;
String[] dateArr = dateRangeString.split("-");
if (dateArr.length > 2 || dateArr.length < 1)
return null;
// throw new IllegalArgumentException("Only Strings containing two Date separated by a '-' or a single Date are allowed");
ParsedCalendar from = parseDateString(dateArr[0]);
ParsedCalendar to;
if (dateArr.length == 2)
to = parseDateString(dateArr[1]);
else
// faster and safe?
// to = new ParsedCalendar(from.parseType, (Calendar) from.parsedCalendar.clone());
to = parseDateString(dateArr[0]);
try {
return new DateRange(from, to);
} catch (IllegalArgumentException ex) {
return null;
}
} | @Test
public void testParseReverseDateRangeWithoutYearAndDay() throws ParseException {
DateRange dateRange = dateRangeParser.getRange("Sep-Mar");
assertFalse(dateRange.isInRange(getCalendar(2014, Calendar.AUGUST, 31)));
assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.SEPTEMBER, 1)));
assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.DECEMBER, 24)));
assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.JANUARY, 24)));
assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.MARCH, 31)));
assertFalse(dateRange.isInRange(getCalendar(2014, Calendar.APRIL, 1)));
} |
@VisibleForTesting
protected static CacheQuota generateCacheQuota(HiveSplit hiveSplit)
{
Optional<DataSize> quota = hiveSplit.getCacheQuotaRequirement().getQuota();
switch (hiveSplit.getCacheQuotaRequirement().getCacheQuotaScope()) {
case GLOBAL:
return new CacheQuota(".", quota);
case SCHEMA:
return new CacheQuota(hiveSplit.getDatabase(), quota);
case TABLE:
return new CacheQuota(hiveSplit.getDatabase() + "." + hiveSplit.getTable(), quota);
case PARTITION:
return new CacheQuota(hiveSplit.getDatabase() + "." + hiveSplit.getTable() + "." + hiveSplit.getPartitionName(), quota);
default:
throw new PrestoException(HIVE_UNKNOWN_ERROR, format("%s is not supported", quota));
}
} | @Test
public void testGenerateCacheQuota()
{
HiveClientConfig config = new HiveClientConfig();
HiveFileSplit fileSplit = new HiveFileSplit("file://test",
0,
10,
10,
Instant.now().toEpochMilli(),
Optional.empty(),
ImmutableMap.of(),
0);
HiveSplit split = new HiveSplit(
fileSplit,
SCHEMA_NAME,
TABLE_NAME,
PARTITION_NAME,
new Storage(
StorageFormat.create(config.getHiveStorageFormat().getSerDe(), config.getHiveStorageFormat().getInputFormat(), config.getHiveStorageFormat().getOutputFormat()),
"location",
Optional.empty(),
false,
ImmutableMap.of(),
ImmutableMap.of()),
ImmutableList.of(),
ImmutableList.of(),
OptionalInt.empty(),
OptionalInt.empty(),
NO_PREFERENCE,
getColumnHandles().size(),
TableToPartitionMapping.empty(),
Optional.empty(),
false,
NO_CACHE_REQUIREMENT,
Optional.empty(),
ImmutableSet.of(),
SplitWeight.standard(),
Optional.empty());
CacheQuota cacheQuota = HivePageSourceProvider.generateCacheQuota(split);
CacheQuota expectedCacheQuota = new CacheQuota(".", Optional.empty());
assertEquals(cacheQuota, expectedCacheQuota);
split = new HiveSplit(
fileSplit,
SCHEMA_NAME,
TABLE_NAME,
PARTITION_NAME,
new Storage(
StorageFormat.create(config.getHiveStorageFormat().getSerDe(), config.getHiveStorageFormat().getInputFormat(), config.getHiveStorageFormat().getOutputFormat()),
"location",
Optional.empty(),
false,
ImmutableMap.of(),
ImmutableMap.of()),
ImmutableList.of(),
ImmutableList.of(),
OptionalInt.empty(),
OptionalInt.empty(),
NO_PREFERENCE,
getColumnHandles().size(),
TableToPartitionMapping.empty(),
Optional.empty(),
false,
new CacheQuotaRequirement(PARTITION, Optional.of(DataSize.succinctDataSize(1, DataSize.Unit.MEGABYTE))),
Optional.empty(),
ImmutableSet.of(),
SplitWeight.standard(),
Optional.empty());
cacheQuota = HivePageSourceProvider.generateCacheQuota(split);
expectedCacheQuota = new CacheQuota(SCHEMA_NAME + "." + TABLE_NAME + "." + PARTITION_NAME, Optional.of(DataSize.succinctDataSize(1, DataSize.Unit.MEGABYTE)));
assertEquals(cacheQuota, expectedCacheQuota);
} |
public void publishRestartEvents(Pod pod, RestartReasons reasons) {
MicroTime k8sEventTime = new MicroTime(K8S_MICROTIME.format(ZonedDateTime.now(clock)));
ObjectReference podReference = createPodReference(pod);
try {
for (RestartReason reason : reasons) {
String note = maybeTruncated(reasons.getNoteFor(reason));
String type = "Normal";
String k8sFormattedReason = reason.pascalCased();
LOG.debug("Publishing K8s event, time {}, type, {}, reason, {}, note, {}, pod, {}",
k8sEventTime, type, k8sFormattedReason, note, podReference);
publishEvent(k8sEventTime, podReference, k8sFormattedReason, type, note);
}
} catch (Exception e) {
LOG.error("Exception on K8s event publication", e);
}
} | @Test
void testOneEventPublishedPerReason() {
Pod mockPod = Mockito.mock(Pod.class);
ObjectMeta mockPodMeta = new ObjectMetaBuilder().withName("pod").withNamespace("ns").build();
when(mockPod.getMetadata()).thenReturn(mockPodMeta);
KubernetesClient client = mock(KubernetesClient.class);
Set<String> capturedReasons = new HashSet<>();
KubernetesRestartEventPublisher capturingPublisher = new KubernetesRestartEventPublisher(client, "op") {
@Override
protected void publishEvent(MicroTime eventTime, ObjectReference podReference, String reason, String type, String note) {
capturedReasons.add(reason);
}
};
Set<String> expectedReasons = Set.of("ClientCaCertKeyReplaced", "ClusterCaCertKeyReplaced");
RestartReasons reasons = new RestartReasons().add(RestartReason.CLIENT_CA_CERT_KEY_REPLACED)
.add(RestartReason.CLUSTER_CA_CERT_KEY_REPLACED);
capturingPublisher.publishRestartEvents(mockPod, reasons);
assertThat(capturedReasons, is(expectedReasons));
} |
public static PostgreSQLBinaryProtocolValue getBinaryProtocolValue(final BinaryColumnType binaryColumnType) {
Preconditions.checkArgument(BINARY_PROTOCOL_VALUES.containsKey(binaryColumnType), "Cannot find PostgreSQL type '%s' in column type when process binary protocol value", binaryColumnType);
return BINARY_PROTOCOL_VALUES.get(binaryColumnType);
} | @Test
void assertGetDoubleBinaryProtocolValue() {
PostgreSQLBinaryProtocolValue binaryProtocolValue = PostgreSQLBinaryProtocolValueFactory.getBinaryProtocolValue(PostgreSQLColumnType.FLOAT8);
assertThat(binaryProtocolValue, instanceOf(PostgreSQLDoubleBinaryProtocolValue.class));
} |
public CompletableFuture<Integer> read(ByteBuffer buf, long offset, long len, FileId fileId,
String ufsPath, UfsReadOptions options) {
Objects.requireNonNull(buf);
if (offset < 0 || len < 0 || len > buf.remaining()) {
throw new OutOfRangeRuntimeException(String.format(
"offset is negative, len is negative, or len is greater than buf remaining. "
+ "offset: %s, len: %s, buf remaining: %s", offset, len, buf.remaining()));
}
if (mReadQueue.size() >= READ_CAPACITY) {
throw new ResourceExhaustedRuntimeException("UFS read at capacity", true);
}
CompletableFuture<Integer> future = new CompletableFuture<>();
if (len == 0) {
future.complete(0);
return future;
}
Meter meter = mUfsBytesReadThroughputMetrics.computeIfAbsent(mUfsClient.getUfsMountPointUri(),
uri -> MetricsSystem.meterWithTags(MetricKey.WORKER_BYTES_READ_UFS_THROUGHPUT.getName(),
MetricKey.WORKER_BYTES_READ_UFS_THROUGHPUT.isClusterAggregated(), MetricInfo.TAG_UFS,
MetricsSystem.escape(mUfsClient.getUfsMountPointUri()), MetricInfo.TAG_USER,
options.getTag()));
mReadQueue.add(new ReadTask(buf, ufsPath, fileId, offset,
len, options, future, meter));
return future;
} | @Test
public void readPartialBlock() throws Exception {
mUfsIOManager.read(TEST_BUF, 0, TEST_BLOCK_SIZE - 1, FIRST_BLOCK_ID, mTestFilePath,
UfsReadOptions.getDefaultInstance()).get();
assertTrue(checkBuf(0, (int) TEST_BLOCK_SIZE - 1, TEST_BUF));
TEST_BUF.clear();
} |
public final List<E> findAll(E key) {
if (key == null || size() == 0) {
return Collections.emptyList();
}
ArrayList<E> results = new ArrayList<>();
int slot = slot(elements, key);
for (int seen = 0; seen < elements.length; seen++) {
Element element = elements[slot];
if (element == null) {
break;
}
if (key.elementKeysAreEqual(element)) {
@SuppressWarnings("unchecked")
E result = (E) elements[slot];
results.add(result);
}
slot = (slot + 1) % elements.length;
}
return results;
} | @Test
public void testFindFindAllContainsRemoveOnEmptyCollection() {
ImplicitLinkedHashMultiCollection<TestElement> coll = new ImplicitLinkedHashMultiCollection<>();
assertNull(coll.find(new TestElement(2)));
assertFalse(coll.contains(new TestElement(2)));
assertFalse(coll.remove(new TestElement(2)));
assertTrue(coll.findAll(new TestElement(2)).isEmpty());
} |
@Bean
public MetaDataHandler apacheDubboMetaDataHandler() {
return new ApacheDubboMetaDataHandler();
} | @Test
public void testApacheDubboMetaDataHandler() {
applicationContextRunner.run(context -> {
MetaDataHandler handler = context.getBean("apacheDubboMetaDataHandler", MetaDataHandler.class);
assertNotNull(handler);
}
);
} |
@Override
public void collect(MetricsEmitter metricsEmitter) {
for (Map.Entry<MetricKey, KafkaMetric> entry : ledger.getMetrics()) {
MetricKey metricKey = entry.getKey();
KafkaMetric metric = entry.getValue();
try {
collectMetric(metricsEmitter, metricKey, metric);
} catch (Exception e) {
// catch and log to continue processing remaining metrics
log.error("Error processing Kafka metric {}", metricKey, e);
}
}
} | @Test
public void testMeasurableWithException() {
metrics.addMetric(metricName, null, (config, now) -> {
throw new RuntimeException();
});
collector.collect(testEmitter);
List<SinglePointMetric> result = testEmitter.emittedMetrics();
//Verify only the global count of metrics exist
assertEquals(1, result.size());
// Group is registered as kafka-metrics-count
assertEquals("test.domain.kafka.count.count", result.get(0).builder().build().getName());
//Verify metrics with measure() method throw exception is not returned
assertFalse(result.stream()
.flatMap(metrics -> Stream.of(metrics.builder().build()))
.anyMatch(metric -> metric.getName().equals("test.domain.group1.name1")));
} |
@Override
public final int position() {
return pos;
} | @Test
public void testPosition() {
assertEquals(0, in.position());
} |
@Override
public String toString() {
return String.format("%s,,", getType());
} | @Test
void assertToString() {
assertThat(new UnsupportedKeyIngestPosition().toString(), is("u,,"));
} |
@LiteralParameters("x")
@ScalarOperator(EQUAL)
@SqlType(StandardTypes.BOOLEAN)
@SqlNullable
public static Boolean equal(@SqlType("char(x)") Slice left, @SqlType("char(x)") Slice right)
{
return left.equals(right);
} | @Test
public void testEqual()
{
assertFunction("cast('foo' as char(3)) = cast('foo' as char(5))", BOOLEAN, true);
assertFunction("cast('foo' as char(3)) = cast('foo' as char(3))", BOOLEAN, true);
assertFunction("cast('foo' as char(3)) = cast('bar' as char(3))", BOOLEAN, false);
assertFunction("cast('bar' as char(3)) = cast('foo' as char(3))", BOOLEAN, false);
assertFunction("cast('bar' as char(5)) = 'bar'", BOOLEAN, true);
assertFunction("cast('bar' as char(5)) = 'bar '", BOOLEAN, true);
assertFunction("cast('a' as char(2)) = cast('a ' as char(2))", BOOLEAN, true);
assertFunction("cast('a ' as char(2)) = cast('a' as char(2))", BOOLEAN, true);
assertFunction("cast('a' as char(3)) = cast('a' as char(2))", BOOLEAN, true);
assertFunction("cast('' as char(3)) = cast('' as char(2))", BOOLEAN, true);
assertFunction("cast('' as char(2)) = cast('' as char(2))", BOOLEAN, true);
} |
@Override
public void open() throws Exception {
mainInputActivityClock = new PausableRelativeClock(getProcessingTimeService().getClock());
TaskIOMetricGroup taskIOMetricGroup =
getContainingTask().getEnvironment().getMetricGroup().getIOMetricGroup();
taskIOMetricGroup.registerBackPressureListener(mainInputActivityClock);
initReader();
// in the future when we this one is migrated to the "eager initialization" operator
// (StreamOperatorV2), then we should evaluate this during operator construction.
if (emitProgressiveWatermarks) {
eventTimeLogic =
TimestampsAndWatermarks.createProgressiveEventTimeLogic(
watermarkStrategy,
sourceMetricGroup,
getProcessingTimeService(),
getExecutionConfig().getAutoWatermarkInterval(),
mainInputActivityClock,
getProcessingTimeService().getClock(),
taskIOMetricGroup);
} else {
eventTimeLogic =
TimestampsAndWatermarks.createNoOpEventTimeLogic(
watermarkStrategy, sourceMetricGroup, mainInputActivityClock);
}
// restore the state if necessary.
final List<SplitT> splits = CollectionUtil.iterableToList(readerState.get());
if (!splits.isEmpty()) {
LOG.info("Restoring state for {} split(s) to reader.", splits.size());
splitsToInitializeOutput.addAll(splits);
sourceReader.addSplits(splits);
}
// Register the reader to the coordinator.
registerReader();
sourceMetricGroup.idlingStarted();
// Start the reader after registration, sending messages in start is allowed.
sourceReader.start();
eventTimeLogic.startPeriodicWatermarkEmits();
} | @Test
void testOpen() throws Exception {
// Initialize the operator.
operator.initializeState(context.createStateContext());
// Open the operator.
operator.open();
// The source reader should have been assigned a split.
assertThat(mockSourceReader.getAssignedSplits())
.containsExactly(SourceOperatorTestContext.MOCK_SPLIT);
// The source reader should have started.
assertThat(mockSourceReader.isStarted()).isTrue();
// A ReaderRegistrationRequest should have been sent.
assertThat(mockGateway.getEventsSent()).hasSize(1);
OperatorEvent operatorEvent = mockGateway.getEventsSent().get(0);
assertThat(operatorEvent).isInstanceOf(ReaderRegistrationEvent.class);
assertThat(((ReaderRegistrationEvent) operatorEvent).subtaskId())
.isEqualTo(SourceOperatorTestContext.SUBTASK_INDEX);
} |
public OpenConfigTransceiverHandler addConfig(OpenConfigConfigOfTransceiverHandler config) {
modelObject.config(config.getModelObject());
return this;
} | @Test
public void testAddConfig() {
// test Handler
OpenConfigTransceiverHandler transceiver = new OpenConfigTransceiverHandler(parent);
// call addConfig
OpenConfigConfigOfTransceiverHandler config = new OpenConfigConfigOfTransceiverHandler(transceiver);
// expected ModelObject
DefaultTransceiver modelObject = new DefaultTransceiver();
DefaultConfig con = new DefaultConfig();
modelObject.config(con);
assertEquals("[NG]addConfig:ModelObject(Config added) is not an expected one.\n",
modelObject, transceiver.getModelObject());
} |
@Override
public int getAttemptCount(int subtaskIndex) {
Preconditions.checkArgument(subtaskIndex >= 0);
if (subtaskIndex >= attemptCounts.size()) {
return 0;
}
return attemptCounts.get(subtaskIndex);
} | @Test
void testGetAttemptCount() {
final List<Integer> initialAttemptCounts = Arrays.asList(1, 2, 3);
final DefaultSubtaskAttemptNumberStore subtaskAttemptNumberStore =
new DefaultSubtaskAttemptNumberStore(initialAttemptCounts);
assertThat(subtaskAttemptNumberStore.getAttemptCount(1))
.isEqualTo(initialAttemptCounts.get(1));
} |
@Override
public CompletableFuture<Optional<BrokerLookupData>> assign(Optional<ServiceUnitId> topic,
ServiceUnitId serviceUnit,
LookupOptions options) {
final String bundle = serviceUnit.toString();
return dedupeLookupRequest(bundle, k -> {
final CompletableFuture<Optional<String>> owner;
// Assign the bundle to channel owner if is internal topic, to avoid circular references.
if (topic.isPresent() && isInternalTopic(topic.get().toString())) {
owner = serviceUnitStateChannel.getChannelOwnerAsync();
} else {
owner = getHeartbeatOrSLAMonitorBrokerId(serviceUnit).thenCompose(candidateBrokerId -> {
if (candidateBrokerId != null) {
return CompletableFuture.completedFuture(Optional.of(candidateBrokerId));
}
return getOrSelectOwnerAsync(serviceUnit, bundle, options).thenApply(Optional::ofNullable);
});
}
return getBrokerLookupData(owner, bundle);
});
} | @Test
public void testAssign() throws Exception {
Pair<TopicName, NamespaceBundle> topicAndBundle = getBundleIsNotOwnByChangeEventTopic("test-assign");
TopicName topicName = topicAndBundle.getLeft();
NamespaceBundle bundle = topicAndBundle.getRight();
Optional<BrokerLookupData> brokerLookupData = primaryLoadManager.assign(Optional.empty(), bundle,
LookupOptions.builder().build()).get();
assertTrue(brokerLookupData.isPresent());
log.info("Assign the bundle {} to {}", bundle, brokerLookupData);
// Should get owner info from channel.
Optional<BrokerLookupData> brokerLookupData1 = secondaryLoadManager.assign(Optional.empty(), bundle,
LookupOptions.builder().build()).get();
assertEquals(brokerLookupData, brokerLookupData1);
Optional<LookupResult> lookupResult = pulsar2.getNamespaceService()
.getBrokerServiceUrlAsync(topicName, LookupOptions.builder().build()).get();
assertTrue(lookupResult.isPresent());
assertEquals(lookupResult.get().getLookupData().getHttpUrl(), brokerLookupData.get().getWebServiceUrl());
Optional<URL> webServiceUrl = pulsar2.getNamespaceService()
.getWebServiceUrl(bundle, LookupOptions.builder().requestHttps(false).build());
assertTrue(webServiceUrl.isPresent());
assertEquals(webServiceUrl.get().toString(), brokerLookupData.get().getWebServiceUrl());
} |
@SuppressWarnings("unchecked")
boolean contains(DiscreteResource resource) {
return resource.valueAs(Object.class)
.map(x -> codec.encode(x))
.map(rangeSet::contains)
.orElse(false);
} | @Test
public void testContains() {
DiscreteResource res1 = Resources.discrete(DID, PN, VID1).resource();
DiscreteResource res2 = Resources.discrete(DID, PN, VID2).resource();
DiscreteResource res3 = Resources.discrete(DID, PN, VID3).resource();
Set<DiscreteResource> resources = ImmutableSet.of(res1, res2);
EncodedDiscreteResources sut = EncodedDiscreteResources.of(resources, new VlanIdCodec());
assertThat(sut.contains(res1), is(true));
assertThat(sut.contains(res3), is(false));
} |
@CanIgnoreReturnValue
@Override
public JsonWriter value(String value) throws IOException {
if (value == null) {
return nullValue();
}
put(new JsonPrimitive(value));
return this;
} | @Test
public void testBoolValue() throws Exception {
JsonTreeWriter writer = new JsonTreeWriter();
boolean bool = true;
assertThat(writer.value(bool)).isEqualTo(writer);
} |
public static String getTagValue( Node n, KettleAttributeInterface code ) {
return getTagValue( n, code.getXmlCode() );
} | @Test
public void getTagValueWithNullNode() {
assertNull( XMLHandler.getTagValue( null, "text" ) );
} |
@Override
public void marshal(final Exchange exchange, final Object graph, final OutputStream stream) throws Exception {
// ask for a mandatory type conversion to avoid a possible NPE beforehand as we do copy from the InputStream
final InputStream is = exchange.getContext().getTypeConverter().mandatoryConvertTo(InputStream.class, exchange, graph);
final Deflater deflater = new Deflater(compressionLevel);
final DeflaterOutputStream zipOutput = new DeflaterOutputStream(stream, deflater);
try {
IOHelper.copy(is, zipOutput);
} finally {
IOHelper.close(is, zipOutput);
/*
* As we create the Deflater our self and do not use the stream default
* (see {@link java.util.zip.DeflaterOutputStream#usesDefaultDeflater})
* we need to close the Deflater to not risk a OutOfMemoryException
* in native code parts (see {@link java.util.zip.Deflater#end})
*/
deflater.end();
}
} | @Test
public void testMarshalMandatoryConversionFailed() throws Exception {
DataFormat dataFormat = new ZipDeflaterDataFormat();
try {
dataFormat.marshal(new DefaultExchange(context), new Object(), new ByteArrayOutputStream());
fail("Should have thrown an exception");
} catch (NoTypeConversionAvailableException e) {
// expected
}
} |
public final void setStrictness(Strictness strictness) {
Objects.requireNonNull(strictness);
this.strictness = strictness;
} | @Test
public void testCapitalizedNullFailWhenStrict() {
JsonReader reader = new JsonReader(reader("NULL"));
reader.setStrictness(Strictness.STRICT);
IOException expected = assertThrows(IOException.class, reader::nextNull);
assertThat(expected)
.hasMessageThat()
.startsWith(
"Use JsonReader.setStrictness(Strictness.LENIENT) to accept malformed JSON"
+ " at line 1 column 1 path $\n");
reader = new JsonReader(reader("nulL"));
reader.setStrictness(Strictness.STRICT);
expected = assertThrows(IOException.class, reader::nextNull);
assertThat(expected)
.hasMessageThat()
.startsWith(
"Use JsonReader.setStrictness(Strictness.LENIENT) to accept malformed JSON"
+ " at line 1 column 1 path $\n");
} |
@Override
public boolean supportSchemaVersioning() {
return true;
} | @Test
public void testSupportMultiVersioningSupportByDefault() {
Assert.assertTrue(writerSchema.supportSchemaVersioning());
Assert.assertTrue(readerSchema.supportSchemaVersioning());
} |
public PeriodStats plus(PeriodStats toAdd) {
PeriodStats result = new PeriodStats();
result.messagesSent += this.messagesSent;
result.messageSendErrors += this.messageSendErrors;
result.bytesSent += this.bytesSent;
result.messagesReceived += this.messagesReceived;
result.bytesReceived += this.bytesReceived;
result.totalMessagesSent += this.totalMessagesSent;
result.totalMessageSendErrors += this.totalMessageSendErrors;
result.totalMessagesReceived += this.totalMessagesReceived;
result.publishLatency.add(this.publishLatency);
result.publishDelayLatency.add(this.publishDelayLatency);
result.endToEndLatency.add(this.endToEndLatency);
result.messagesSent += toAdd.messagesSent;
result.messageSendErrors += toAdd.messageSendErrors;
result.bytesSent += toAdd.bytesSent;
result.messagesReceived += toAdd.messagesReceived;
result.bytesReceived += toAdd.bytesReceived;
result.totalMessagesSent += toAdd.totalMessagesSent;
result.totalMessageSendErrors += toAdd.totalMessageSendErrors;
result.totalMessagesReceived += toAdd.totalMessagesReceived;
result.publishLatency.add(toAdd.publishLatency);
result.publishDelayLatency.add(toAdd.publishDelayLatency);
result.endToEndLatency.add(toAdd.endToEndLatency);
return result;
} | @Test
void plus() {
PeriodStats one = new PeriodStats();
one.messagesSent = 1;
one.messageSendErrors = 2;
one.bytesSent = 3;
one.messagesReceived = 4;
one.bytesReceived = 5;
one.totalMessagesSent = 6;
one.totalMessageSendErrors = 7;
one.totalMessagesReceived = 8;
PeriodStats two = new PeriodStats();
two.messagesSent = 10;
two.messageSendErrors = 20;
two.bytesSent = 30;
two.messagesReceived = 40;
two.bytesReceived = 50;
two.totalMessagesSent = 60;
two.totalMessageSendErrors = 70;
two.totalMessagesReceived = 80;
PeriodStats result = one.plus(two);
assertThat(result)
.satisfies(
r -> {
assertThat(r.messagesSent).isEqualTo(11);
assertThat(r.messageSendErrors).isEqualTo(22);
assertThat(r.bytesSent).isEqualTo(33);
assertThat(r.messagesReceived).isEqualTo(44);
assertThat(r.bytesReceived).isEqualTo(55);
assertThat(r.totalMessagesSent).isEqualTo(66);
assertThat(r.totalMessageSendErrors).isEqualTo(77);
assertThat(r.totalMessagesReceived).isEqualTo(88);
two.publishLatency.add(one.publishLatency);
two.publishDelayLatency.add(one.publishDelayLatency);
two.endToEndLatency.add(one.endToEndLatency);
assertThat(r.publishLatency).isEqualTo(two.publishLatency);
assertThat(r.publishDelayLatency).isEqualTo(two.publishDelayLatency);
assertThat(r.endToEndLatency).isEqualTo(two.endToEndLatency);
});
} |
public synchronized Topology build() {
return build(null);
} | @Test
public void shouldNotThrowNullPointerIfOptimizationsNotSpecified() {
final Properties properties = new Properties();
final StreamsBuilder builder = new StreamsBuilder();
builder.build(properties);
} |
public void isNotIn(@Nullable Iterable<?> iterable) {
checkNotNull(iterable);
if (Iterables.contains(iterable, actual)) {
failWithActual("expected not to be any of", iterable);
}
} | @Test
public void isNotInFailure() {
expectFailure.whenTesting().that("b").isNotIn(oneShotIterable("a", "b", "c"));
assertFailureKeys("expected not to be any of", "but was");
assertFailureValue("expected not to be any of", "[a, b, c]");
} |
public static HCatSchema getHCatSchemaFromTypeString(String typeString) throws HCatException {
return getHCatSchema(TypeInfoUtils.getTypeInfoFromTypeString(typeString));
} | @Test
public void testSimpleOperation() throws Exception {
String typeString = "struct<name:string,studentid:int,"
+ "contact:struct<phNo:string,email:string>,"
+ "currently_registered_courses:array<string>,"
+ "current_grades:map<string,string>,"
+ "phNos:array<struct<phNo:string,type:string>>,blah:array<int>>";
TypeInfo ti = TypeInfoUtils.getTypeInfoFromTypeString(typeString);
HCatSchema hsch = HCatSchemaUtils.getHCatSchemaFromTypeString(typeString);
LOG.info("Type name : {}", ti.getTypeName());
LOG.info("HCatSchema : {}", hsch);
assertEquals(hsch.size(), 1);
// Looks like HCatFieldSchema.getTypeString() lower-cases its results
assertEquals(ti.getTypeName().toLowerCase(), hsch.get(0).getTypeString());
assertEquals(hsch.get(0).getTypeString(), typeString.toLowerCase());
} |
@Override
public AppToken createAppToken(long appId, String privateKey) {
Algorithm algorithm = readApplicationPrivateKey(appId, privateKey);
LocalDateTime now = LocalDateTime.now(clock);
// Expiration period is configurable and could be greater if needed.
// See https://developer.github.com/apps/building-github-apps/authenticating-with-github-apps/#authenticating-as-a-github-app
LocalDateTime expiresAt = now.plus(AppToken.EXPIRATION_PERIOD_IN_MINUTES, ChronoUnit.MINUTES);
ZoneOffset offset = clock.getZone().getRules().getOffset(now);
Date nowDate = Date.from(now.toInstant(offset));
Date expiresAtDate = Date.from(expiresAt.toInstant(offset));
JWTCreator.Builder builder = JWT.create()
.withIssuer(String.valueOf(appId))
.withIssuedAt(nowDate)
.withExpiresAt(expiresAtDate);
return new AppToken(builder.sign(algorithm));
} | @Test
public void createAppToken_fails_with_IAE_if_privateKey_PKCS8_content_is_corrupted() {
String corruptedPrivateKey = "-----BEGIN RSA PRIVATE KEY-----\n" +
"MIIEowIBAAKCAQEA6C29ZdvrwHOu7Eewv+xvUd4inCnACTzAHukHKTSY4R16+lRI\n" +
"YC5qZ8Xo304J7lLhN4/d4Xnof3lDXZOHthVbJKik4fOuEGbTXTIcuFs3hdJtrJsb\n" +
"antv8SOl5iR4fYRAf2AILMdtZI4iMSicBLIIttR+wVXo6NJYMjpj1OuAU3uN8eET\n" +
"Gge09oJT3QOUBem7N8uaYi/p5uAfsf2/SVNsoMPV624X4kgNcyj/TMa6BosFJ8Y3\n" +
"oeg0Aguk2yuHhAnixDVGoz6N7Go0QjEipVNix2JOOJwpFH4k2iZfM6n+8sJTLilq\n" +
"yzT53JW/XI+M5AXVj4OjBJ/2yMPi3RFMNTdgRwIDAQABAoIBACcYBIsRI7oNAIgi\n" +
"bh1y1y+mwpce5Inpo8PQovcKNy+4gguCg4lGZ34/sb1f64YoiGmNnOOpXj+QkIpC\n" +
"HBjJscYTa2fsWwPB/Jb1qCZWnZu32eW1XEFqtWeaBAYjX/JqgV2xMs8vaTkEQbeb\n" +
// "SeH0hEkcsJcnOwdw247hjAu+96WWlyt10ZGgQaWPfXsdtelbaoaturNAVAJHdl9e\n" +
// "TIknCIbtLlbz/FtzjtCtdeiWr8gbKdVkshGtA8SKVhXGQwDwENjUkAUtSJ0aXR1t\n" +
// "+UjQcTISk7LiiYs0MrJ/CKoJ7mShwx7+YF3hgyqQ0qaqHwt9Yyd7wzWdCgdM5Eha\n" +
// "ccioIskCgYEA+EDJmcM5NGu5AYpZ1ogmG6jzsefAlr2NG1PQ/U03twal/B+ygAQb\n" +
// "5dholrq+aF+45Hrzfxije3Zrvpb08vxzKAs20lOlJsKftx2zkLR+mNvWTAORuO16\n" +
// "lG0c0cgYAKA1ld4R8KB8NmbuNb1w4LYZuyuFIEVmm2B3ca141WNHBwMCgYEA72yK\n" +
// "B4+xxomZn6dtbCGQZxziaI9WH/KEfDemKO5cfPlynQjmmMkiDpcyHa7mvdU+PGh3\n" +
"g+OmQxORXMmBkHEnYS1fl3ac3U5sLiHAQBmTKKcLuVQlIU4oDu/K6WEGL9DdPtaK\n" +
"gyOOWtSnfHTbT0bZ4IMm+gzdc4bCuEjvYyUhzG0CgYAEN011MAyTqFSvAwN9kjhb\n" +
"deYVmmL57GQuF6FP+/S7RgChpIQqimdS4vb7wFYlfaKtNq1V9jwoh51S0kt8qO7n\n" +
"ujEHJ2aBnwKJYJbBGV+hBvK/vbvG0TmotaWspmJJ+G6QigHx/Te+0Maw4PO+zTjo\n" +
"pdeP8b3JW70LkC+iKBp3swKBgFL/nm32m1tHEjFtehpVHFkSg05Z+jJDATiKlhh0\n" +
"YS2Vz+yuTDpE54CFW4M8wZKnXNbWJDBdd6KjIu42kKrA/zTJ5Ox92u1BJXFsk9fk\n" +
"xcX++qp5iBGepXZgHEiBMQLcdgY1m3jQl6XXOGSFog0+c4NIE/f1A8PrwI7gAdSt\n" +
"56SVAoGBAJp214Fo0oheMTTYKVtXuGiH/v3JNG1jKFgsmHqndf4wy7U6bbNctEzc\n" +
"ZXNIacuhWmko6YejMrWNhE57sX812MhXGZq6y0sYZGKtp7oDv8G3rWD6bpZywpcV\n" +
"kTtMJxm8J64u6bAkpWG3BocJP9qbXeAbILo1wuXgYqABBrpA9nnc\n" +
"-----END RSA PRIVATE KEY-----";
GithubAppConfiguration githubAppConfiguration = createAppConfigurationForPrivateKey(corruptedPrivateKey);
assertThatThrownBy(() -> underTest.createAppToken(githubAppConfiguration.getId(), githubAppConfiguration.getPrivateKey()))
.isInstanceOf(IllegalArgumentException.class)
.hasCauseInstanceOf(InvalidKeySpecException.class);
} |
public MailConfiguration getConfiguration() {
if (configuration == null) {
configuration = new MailConfiguration(getCamelContext());
}
return configuration;
} | @Test
public void testMailEndpointsAreConfiguredProperlyWhenUsingPop() {
MailEndpoint endpoint = checkEndpoint("pop3://james@myhost:110/subject");
MailConfiguration config = endpoint.getConfiguration();
assertEquals("pop3", config.getProtocol(), "getProtocol()");
assertEquals("myhost", config.getHost(), "getHost()");
assertEquals(110, config.getPort(), "getPort()");
assertEquals("james", config.getUsername(), "getUsername()");
assertEquals("james@myhost", config.getRecipients().get(Message.RecipientType.TO),
"getRecipients().get(Message.RecipientType.TO)");
assertEquals("INBOX", config.getFolderName(), "folder");
assertFalse(config.isDebugMode());
} |
@Override
public void saveProperty(DbSession session, PropertyDto property, @Nullable String userLogin,
@Nullable String projectKey, @Nullable String projectName, @Nullable String qualifier) {
// do nothing
} | @Test
public void insertProperty() {
underTest.saveProperty(dbSession, propertyDto, null, null, null, null);
assertNoInteraction();
} |
public Resource getQueueResource(String queueName, Set<String> queueLabels,
Resource clusterResource) {
readLock.lock();
try {
if (queueLabels.contains(ANY)) {
return clusterResource;
}
Queue q = queueCollections.get(queueName);
if (null == q) {
return Resources.none();
}
return q.resource;
} finally {
readLock.unlock();
}
} | @Test(timeout=5000)
public void testGetQueueResource() throws Exception {
Resource clusterResource = Resource.newInstance(9999, 1);
/*
* Node->Labels:
* host1 : red
* host2 : blue
* host3 : yellow
* host4 :
*/
mgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("red", "blue", "yellow"));
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("host1"), toSet("red")));
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("host2"), toSet("blue")));
mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("host3"), toSet("yellow")));
// active two NM to n1, one large and one small
mgr.activateNode(NodeId.newInstance("host1", 1), SMALL_RESOURCE);
mgr.activateNode(NodeId.newInstance("host2", 1), SMALL_RESOURCE);
mgr.activateNode(NodeId.newInstance("host3", 1), SMALL_RESOURCE);
mgr.activateNode(NodeId.newInstance("host4", 1), SMALL_RESOURCE);
// reinitialize queue
Set<String> q1Label = toSet("red", "blue");
Set<String> q2Label = toSet("blue", "yellow");
Set<String> q3Label = toSet("yellow");
Set<String> q4Label = RMNodeLabelsManager.EMPTY_STRING_SET;
Set<String> q5Label = toSet(RMNodeLabelsManager.ANY);
Map<String, Set<String>> queueToLabels = new HashMap<String, Set<String>>();
queueToLabels.put("Q1", q1Label);
queueToLabels.put("Q2", q2Label);
queueToLabels.put("Q3", q3Label);
queueToLabels.put("Q4", q4Label);
queueToLabels.put("Q5", q5Label);
mgr.reinitializeQueueLabels(queueToLabels);
// check resource
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 3),
mgr.getQueueResource("Q1", q1Label, clusterResource));
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 3),
mgr.getQueueResource("Q2", q2Label, clusterResource));
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 2),
mgr.getQueueResource("Q3", q3Label, clusterResource));
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 1),
mgr.getQueueResource("Q4", q4Label, clusterResource));
Assert.assertEquals(clusterResource,
mgr.getQueueResource("Q5", q5Label, clusterResource));
mgr.removeLabelsFromNode(ImmutableMap.of(toNodeId("host2"), toSet("blue")));
/*
* Check resource after changes some labels
* Node->Labels:
* host1 : red
* host2 : (was: blue)
* host3 : yellow
* host4 :
*/
// check resource
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 3),
mgr.getQueueResource("Q1", q1Label, clusterResource));
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 3),
mgr.getQueueResource("Q2", q2Label, clusterResource));
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 3),
mgr.getQueueResource("Q3", q3Label, clusterResource));
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 2),
mgr.getQueueResource("Q4", q4Label, clusterResource));
Assert.assertEquals(clusterResource,
mgr.getQueueResource("Q5", q5Label, clusterResource));
/*
* Check resource after deactive/active some nodes
* Node->Labels:
* (deactived) host1 : red
* host2 :
* (deactived and then actived) host3 : yellow
* host4 :
*/
mgr.deactivateNode(NodeId.newInstance("host1", 1));
mgr.deactivateNode(NodeId.newInstance("host3", 1));
mgr.activateNode(NodeId.newInstance("host3", 1), SMALL_RESOURCE);
// check resource
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 2),
mgr.getQueueResource("Q1", q1Label, clusterResource));
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 3),
mgr.getQueueResource("Q2", q2Label, clusterResource));
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 3),
mgr.getQueueResource("Q3", q3Label, clusterResource));
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 2),
mgr.getQueueResource("Q4", q4Label, clusterResource));
Assert.assertEquals(clusterResource,
mgr.getQueueResource("Q5", q5Label, clusterResource));
/*
* Check resource after refresh queue:
* Q1: blue
* Q2: red, blue
* Q3: red
* Q4:
* Q5: ANY
*/
q1Label = toSet("blue");
q2Label = toSet("blue", "red");
q3Label = toSet("red");
q4Label = RMNodeLabelsManager.EMPTY_STRING_SET;
q5Label = toSet(RMNodeLabelsManager.ANY);
queueToLabels.clear();
queueToLabels.put("Q1", q1Label);
queueToLabels.put("Q2", q2Label);
queueToLabels.put("Q3", q3Label);
queueToLabels.put("Q4", q4Label);
queueToLabels.put("Q5", q5Label);
mgr.reinitializeQueueLabels(queueToLabels);
// check resource
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 2),
mgr.getQueueResource("Q1", q1Label, clusterResource));
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 2),
mgr.getQueueResource("Q2", q2Label, clusterResource));
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 2),
mgr.getQueueResource("Q3", q3Label, clusterResource));
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 2),
mgr.getQueueResource("Q4", q4Label, clusterResource));
Assert.assertEquals(clusterResource,
mgr.getQueueResource("Q5", q5Label, clusterResource));
/*
* Active NMs in nodes already have NM
* Node->Labels:
* host2 :
* host3 : yellow (3 NMs)
* host4 : (2 NMs)
*/
mgr.activateNode(NodeId.newInstance("host3", 2), SMALL_RESOURCE);
mgr.activateNode(NodeId.newInstance("host3", 3), SMALL_RESOURCE);
mgr.activateNode(NodeId.newInstance("host4", 2), SMALL_RESOURCE);
// check resource
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 3),
mgr.getQueueResource("Q1", q1Label, clusterResource));
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 3),
mgr.getQueueResource("Q2", q2Label, clusterResource));
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 3),
mgr.getQueueResource("Q3", q3Label, clusterResource));
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 3),
mgr.getQueueResource("Q4", q4Label, clusterResource));
Assert.assertEquals(clusterResource,
mgr.getQueueResource("Q5", q5Label, clusterResource));
/*
* Deactive NMs in nodes already have NMs
* Node->Labels:
* host2 :
* host3 : yellow (2 NMs)
* host4 : (0 NMs)
*/
mgr.deactivateNode(NodeId.newInstance("host3", 3));
mgr.deactivateNode(NodeId.newInstance("host4", 2));
mgr.deactivateNode(NodeId.newInstance("host4", 1));
// check resource
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 1),
mgr.getQueueResource("Q1", q1Label, clusterResource));
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 1),
mgr.getQueueResource("Q2", q2Label, clusterResource));
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 1),
mgr.getQueueResource("Q3", q3Label, clusterResource));
Assert.assertEquals(Resources.multiply(SMALL_RESOURCE, 1),
mgr.getQueueResource("Q4", q4Label, clusterResource));
Assert.assertEquals(clusterResource,
mgr.getQueueResource("Q5", q5Label, clusterResource));
} |
public static String getMasterForEntry(JournalEntry entry) {
if (entry.hasAddMountPoint()
|| entry.hasAsyncPersistRequest()
|| entry.hasAddSyncPoint()
|| entry.hasActiveSyncTxId()
|| entry.hasCompleteFile()
|| entry.hasDeleteFile()
|| entry.hasDeleteMountPoint()
|| entry.hasInodeDirectory()
|| entry.hasInodeDirectoryIdGenerator()
|| entry.hasInodeFile()
|| entry.hasInodeLastModificationTime()
|| entry.hasNewBlock()
|| entry.hasPersistDirectory()
|| entry.hasRemoveSyncPoint()
|| entry.hasRename()
|| entry.hasSetAcl()
|| entry.hasSetAttribute()
|| entry.hasUpdateUfsMode()
|| entry.hasUpdateInode()
|| entry.hasUpdateInodeDirectory()
|| entry.hasUpdateInodeFile()
|| entry.hasLoadJob()
|| entry.hasCopyJob()
|| entry.hasMoveJob()) {
return Constants.FILE_SYSTEM_MASTER_NAME;
}
if (entry.hasBlockContainerIdGenerator()
|| entry.hasDeleteBlock()
|| entry.hasBlockInfo()) {
return Constants.BLOCK_MASTER_NAME;
}
if (entry.hasClusterInfo()
|| entry.hasPathProperties()
|| entry.hasRemovePathProperties()) {
return Constants.META_MASTER_NAME;
}
if (entry.hasPolicyDefinition()
|| entry.hasPolicyRemove()) {
return Constants.POLICY_ENGINE_NAME;
}
throw new IllegalStateException("Unrecognized journal entry: " + entry);
} | @Test
public void testUnknown() {
mThrown.expect(IllegalStateException.class);
JournalEntryAssociation.getMasterForEntry(JournalEntry.getDefaultInstance());
} |
@Override
public void consume(Update update) {
super.consume(update);
} | @Test
void canProcessRepliesRegisteredInCollection() {
Update firstUpdate = mock(Update.class);
Message firstMessage = mock(Message.class);
when(firstMessage.getText()).thenReturn(DefaultBot.FIRST_REPLY_KEY_MESSAGE);
when(firstMessage.getChatId()).thenReturn(1L);
Update secondUpdate = mock(Update.class);
Message secondMessage = mock(Message.class);
when(secondMessage.getText()).thenReturn(DefaultBot.SECOND_REPLY_KEY_MESSAGE);
when(secondMessage.getChatId()).thenReturn(1L);
mockUser(firstUpdate, firstMessage, USER);
mockUser(secondUpdate, secondMessage, USER);
bot.consume(firstUpdate);
bot.consume(secondUpdate);
verify(silent, times(2)).send(anyString(), anyLong());
verify(silent, times(1)).send("first reply answer", 1);
verify(silent, times(1)).send("second reply answer", 1);
} |
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
} | @Test
public void testFetcherIgnoresControlRecords() {
buildFetcher();
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
long producerId = 1;
short producerEpoch = 0;
int baseSequence = 0;
int partitionLeaderEpoch = 0;
ByteBuffer buffer = ByteBuffer.allocate(1024);
MemoryRecordsBuilder builder = MemoryRecords.idempotentBuilder(buffer, Compression.NONE, 0L, producerId,
producerEpoch, baseSequence);
builder.append(0L, "key".getBytes(), null);
builder.close();
MemoryRecords.writeEndTransactionalMarker(buffer, 1L, time.milliseconds(), partitionLeaderEpoch, producerId, producerEpoch,
new EndTransactionMarker(ControlRecordType.ABORT, 0));
buffer.flip();
client.prepareResponse(fullFetchResponse(tidp0, MemoryRecords.readableRecords(buffer), Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchRecords();
assertTrue(partitionRecords.containsKey(tp0));
List<ConsumerRecord<byte[], byte[]>> records = partitionRecords.get(tp0);
assertEquals(1, records.size());
assertEquals(2L, subscriptions.position(tp0).offset);
ConsumerRecord<byte[], byte[]> record = records.get(0);
assertArrayEquals("key".getBytes(), record.key());
} |
@Override
public void executeUpdate(final SetComputeNodeStateStatement sqlStatement, final ContextManager contextManager) {
if ("DISABLE".equals(sqlStatement.getState())) {
checkDisablingIsValid(contextManager, sqlStatement.getInstanceId());
} else {
checkEnablingIsValid(contextManager, sqlStatement.getInstanceId());
}
contextManager.getPersistServiceFacade().getComputeNodePersistService().updateComputeNodeState(sqlStatement.getInstanceId(),
"DISABLE".equals(sqlStatement.getState()) ? InstanceState.CIRCUIT_BREAK : InstanceState.OK);
} | @Test
void assertExecuteUpdateWithCurrentUsingInstance() {
ContextManager contextManager = mock(ContextManager.class, RETURNS_DEEP_STUBS);
when(contextManager.getComputeNodeInstanceContext().getInstance().getMetaData().getId()).thenReturn("instanceID");
assertThrows(UnsupportedSQLOperationException.class, () -> executor.executeUpdate(new SetComputeNodeStateStatement("DISABLE", "instanceID"), contextManager));
} |
@Override
public boolean matchesJdbcUrl(String jdbcConnectionURL) {
return StringUtils.startsWithIgnoreCase(jdbcConnectionURL, "jdbc:postgresql:");
} | @Test
void matchesJdbcURL() {
assertThat(underTest.matchesJdbcUrl("jdbc:postgresql://localhost/sonar")).isTrue();
assertThat(underTest.matchesJdbcUrl("jdbc:hsql:foo")).isFalse();
} |
int getCalculatedScale(String value) {
int index = value.indexOf(".");
return index == -1 ? 0 : value.length() - index - 1;
} | @Test
public void testGetCalculatedScale() {
PinotResultSet pinotResultSet = new PinotResultSet();
int calculatedResult;
calculatedResult = pinotResultSet.getCalculatedScale("1");
Assert.assertEquals(calculatedResult, 0);
calculatedResult = pinotResultSet.getCalculatedScale("1.0");
Assert.assertEquals(calculatedResult, 1);
calculatedResult = pinotResultSet.getCalculatedScale("1.2");
Assert.assertEquals(calculatedResult, 1);
calculatedResult = pinotResultSet.getCalculatedScale("1.23");
Assert.assertEquals(calculatedResult, 2);
calculatedResult = pinotResultSet.getCalculatedScale("1.234");
Assert.assertEquals(calculatedResult, 3);
calculatedResult = pinotResultSet.getCalculatedScale("-1.234");
Assert.assertEquals(calculatedResult, 3);
} |
static void checkValidTableId(String idToCheck) {
if (idToCheck.length() < MIN_TABLE_ID_LENGTH) {
throw new IllegalArgumentException("Table ID cannot be empty. ");
}
if (idToCheck.length() > MAX_TABLE_ID_LENGTH) {
throw new IllegalArgumentException(
"Table ID "
+ idToCheck
+ " cannot be longer than "
+ MAX_TABLE_ID_LENGTH
+ " characters.");
}
if (ILLEGAL_TABLE_CHARS.matcher(idToCheck).find()) {
throw new IllegalArgumentException(
"Table ID "
+ idToCheck
+ " is not a valid ID. Only letters, numbers, hyphens and underscores are allowed.");
}
} | @Test
public void testCheckValidTableIdWhenIdIsTooLong() {
char[] chars = new char[1025];
Arrays.fill(chars, 'a');
String s = new String(chars);
assertThrows(IllegalArgumentException.class, () -> checkValidTableId(s));
} |
@Override
public List<PostDO> getPostList(Collection<Long> ids) {
if (CollUtil.isEmpty(ids)) {
return Collections.emptyList();
}
return postMapper.selectBatchIds(ids);
} | @Test
public void testGetPostList_idsAndStatus() {
// mock 数据
PostDO postDO01 = randomPojo(PostDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus()));
postMapper.insert(postDO01);
// 测试 status 不匹配
PostDO postDO02 = randomPojo(PostDO.class, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()));
postMapper.insert(postDO02);
// 准备参数
List<Long> ids = Arrays.asList(postDO01.getId(), postDO02.getId());
// 调用
List<PostDO> list = postService.getPostList(ids, singletonList(CommonStatusEnum.ENABLE.getStatus()));
// 断言
assertEquals(1, list.size());
assertPojoEquals(postDO01, list.get(0));
} |
public boolean overlaps(final BoundingBox pBoundingBox, double pZoom) {
//FIXME this is a total hack but it works around a number of issues related to vertical map
//replication and horiztonal replication that can cause polygons to completely disappear when
//panning
if (pZoom < 3)
return true;
boolean latMatch = false;
boolean lonMatch = false;
//vertical wrapping detection
if (pBoundingBox.mLatSouth <= mLatNorth &&
pBoundingBox.mLatSouth >= mLatSouth)
latMatch = true;
//normal case, non overlapping
if (mLonWest >= pBoundingBox.mLonWest && mLonWest <= pBoundingBox.mLonEast)
lonMatch = true;
//normal case, non overlapping
if (mLonEast >= pBoundingBox.mLonWest && mLonWest <= pBoundingBox.mLonEast)
lonMatch = true;
//special case for when *this completely surrounds the pBoundbox
if (mLonWest <= pBoundingBox.mLonWest &&
mLonEast >= pBoundingBox.mLonEast &&
mLatNorth >= pBoundingBox.mLatNorth &&
mLatSouth <= pBoundingBox.mLatSouth)
return true;
//normal case, non overlapping
if (mLatNorth >= pBoundingBox.mLatSouth && mLatNorth <= mLatSouth)
latMatch = true;
//normal case, non overlapping
if (mLatSouth >= pBoundingBox.mLatSouth && mLatSouth <= mLatSouth)
latMatch = true;
if (mLonWest > mLonEast) {
//the date line is included in the bounding box
//we want to match lon from the dateline to the eastern bounds of the box
//and the dateline to the western bounds of the box
if (mLonEast <= pBoundingBox.mLonEast && pBoundingBox.mLonWest >= mLonWest)
lonMatch = true;
if (mLonWest >= pBoundingBox.mLonEast &&
mLonEast <= pBoundingBox.mLonEast) {
lonMatch = true;
if (pBoundingBox.mLonEast < mLonWest &&
pBoundingBox.mLonWest < mLonWest)
lonMatch = false;
if (pBoundingBox.mLonEast > mLonEast &&
pBoundingBox.mLonWest > mLonEast)
lonMatch = false;
}
if (mLonWest >= pBoundingBox.mLonEast &&
mLonEast >= pBoundingBox.mLonEast) {
lonMatch = true;
}
/*
//that is completely within this
if (mLonWest>= pBoundingBox.mLonEast &&
mLonEast<= pBoundingBox.mLonEast) {
lonMatch = true;
if (pBoundingBox.mLonEast < mLonWest &&
pBoundingBox.mLonWest < mLonWest)
lonMatch = false;
if (pBoundingBox.mLonEast > mLonEast &&
pBoundingBox.mLonWest > mLonEast )
lonMatch = false;
}
if (mLonWest>= pBoundingBox.mLonEast &&
mLonEast>= pBoundingBox.mLonEast) {
lonMatch = true;
}*/
}
return latMatch && lonMatch;
} | @Test
public void testDrawSetupLowZoom2() {
BoundingBox view = new BoundingBox(83.17404, 142.74437, -18.14585, 7.73437);
//in some tests, this was disappearing when panning left (westard)
BoundingBox drawing = new BoundingBox(69.65708, 112.85162, 48.45835, 76.64063);
Assert.assertTrue(view.overlaps(drawing, 4));
BoundingBox brokenView = new BoundingBox(83.18311, -167.51953, -18.31281, 57.48046);
//this should be partially offscreen but still within the view and should still draw.
Assert.assertTrue(brokenView.overlaps(drawing, 3));
} |
@SuppressWarnings("checkstyle:npathcomplexity")
public PartitionServiceState getPartitionServiceState() {
PartitionServiceState state = getPartitionTableState();
if (state != SAFE) {
return state;
}
if (!checkAndTriggerReplicaSync()) {
return REPLICA_NOT_SYNC;
}
return SAFE;
} | @Test
public void shouldNotBeSafe_whenMissingReplicasPresent() {
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory();
HazelcastInstance hz = factory.newHazelcastInstance();
InternalPartitionServiceImpl partitionService = getNode(hz).partitionService;
partitionService.firstArrangement();
PartitionStateManager partitionStateManager = partitionService.getPartitionStateManager();
InternalPartitionImpl partition = partitionStateManager.getPartitionImpl(0);
PartitionReplica[] members = partition.replicas();
partition.setReplicas(new PartitionReplica[members.length]);
PartitionReplicaStateChecker replicaStateChecker = partitionService.getPartitionReplicaStateChecker();
assertEquals(PartitionServiceState.REPLICA_NOT_OWNED, replicaStateChecker.getPartitionServiceState());
partition.setReplicas(members);
assertEquals(PartitionServiceState.SAFE, replicaStateChecker.getPartitionServiceState());
} |
Converter<E> compile() {
head = tail = null;
for (Node n = top; n != null; n = n.next) {
switch (n.type) {
case Node.LITERAL:
addToList(new LiteralConverter<E>((String) n.getValue()));
break;
case Node.COMPOSITE_KEYWORD:
CompositeNode cn = (CompositeNode) n;
CompositeConverter<E> compositeConverter = createCompositeConverter(cn);
if (compositeConverter == null) {
addError("Failed to create converter for [%" + cn.getValue() + "] keyword");
addToList(new LiteralConverter<E>("%PARSER_ERROR[" + cn.getValue() + "]"));
break;
}
compositeConverter.setFormattingInfo(cn.getFormatInfo());
compositeConverter.setOptionList(cn.getOptions());
Compiler<E> childCompiler = new Compiler<E>(cn.getChildNode(), converterMap);
childCompiler.setContext(context);
Converter<E> childConverter = childCompiler.compile();
compositeConverter.setChildConverter(childConverter);
addToList(compositeConverter);
break;
case Node.SIMPLE_KEYWORD:
SimpleKeywordNode kn = (SimpleKeywordNode) n;
DynamicConverter<E> dynaConverter = createConverter(kn);
if (dynaConverter != null) {
dynaConverter.setFormattingInfo(kn.getFormatInfo());
dynaConverter.setOptionList(kn.getOptions());
addToList(dynaConverter);
} else {
// if the appropriate dynaconverter cannot be found, then replace
// it with a dummy LiteralConverter indicating an error.
Converter<E> errConveter = new LiteralConverter<E>("%PARSER_ERROR[" + kn.getValue() + "]");
addStatus(new ErrorStatus("[" + kn.getValue() + "] is not a valid conversion word", this));
addToList(errConveter);
}
}
}
return head;
} | @Test
public void testLiteral() throws Exception {
Parser<Object> p = new Parser<Object>("hello");
Node t = p.parse();
Converter<Object> head = p.compile(t, converterMap);
String result = write(head, new Object());
assertEquals("hello", result);
} |
@Override
public Address getCaller() {
throw new UnsupportedOperationException();
} | @Test(expected = UnsupportedOperationException.class)
public void testGetCaller() {
dataEvent.getCaller();
} |
public static boolean isSystemClass(String name, List<String> systemClasses) {
boolean result = false;
if (systemClasses != null) {
String canonicalName = name.replace('/', '.');
while (canonicalName.startsWith(".")) {
canonicalName=canonicalName.substring(1);
}
for (String c : systemClasses) {
boolean shouldInclude = true;
if (c.startsWith("-")) {
c = c.substring(1);
shouldInclude = false;
}
if (canonicalName.startsWith(c)) {
if ( c.endsWith(".") // package
|| canonicalName.length() == c.length() // class
|| canonicalName.length() > c.length() // nested
&& canonicalName.charAt(c.length()) == '$' ) {
if (shouldInclude) {
result = true;
} else {
return false;
}
}
}
}
}
return result;
} | @Test
public void testIsSystemClass() {
testIsSystemClassInternal("");
} |
static void unTarUsingJava(File inFile, File untarDir,
boolean gzipped) throws IOException {
InputStream inputStream = null;
TarArchiveInputStream tis = null;
try {
if (gzipped) {
inputStream =
new GZIPInputStream(Files.newInputStream(inFile.toPath()));
} else {
inputStream = Files.newInputStream(inFile.toPath());
}
inputStream = new BufferedInputStream(inputStream);
tis = new TarArchiveInputStream(inputStream);
for (TarArchiveEntry entry = tis.getNextTarEntry(); entry != null;) {
unpackEntries(tis, entry, untarDir);
entry = tis.getNextTarEntry();
}
} finally {
IOUtils.cleanupWithLogger(LOG, tis, inputStream);
}
} | @Test(timeout = 30000)
public void testUntarMissingFileThroughJava() throws Throwable {
File dataDir = GenericTestUtils.getTestDir();
File tarFile = new File(dataDir, "missing; true");
File untarDir = new File(dataDir, "untarDir");
// java8 on unix throws java.nio.file.NoSuchFileException here;
// leaving as an IOE intercept in case windows throws something
// else.
intercept(IOException.class, () ->
FileUtil.unTarUsingJava(tarFile, untarDir, false));
} |
@SuppressWarnings("unchecked")
@Override
public void configure(Map<String, ?> configs) throws KafkaException {
if (sslEngineFactory != null) {
throw new IllegalStateException("SslFactory was already configured.");
}
this.endpointIdentification = (String) configs.get(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG);
// The input map must be a mutable RecordingMap in production.
Map<String, Object> nextConfigs = (Map<String, Object>) configs;
if (clientAuthConfigOverride != null) {
nextConfigs.put(BrokerSecurityConfigs.SSL_CLIENT_AUTH_CONFIG, clientAuthConfigOverride);
}
SslEngineFactory builder = instantiateSslEngineFactory(nextConfigs);
if (keystoreVerifiableUsingTruststore) {
try {
SslEngineValidator.validate(builder, builder);
} catch (Exception e) {
throw new ConfigException("A client SSLEngine created with the provided settings " +
"can't connect to a server SSLEngine created with those settings.", e);
}
}
this.sslEngineFactory = builder;
} | @Test
public void testUntrustedKeyStoreValidationFails() throws Exception {
File trustStoreFile1 = TestUtils.tempFile("truststore1", ".jks");
File trustStoreFile2 = TestUtils.tempFile("truststore2", ".jks");
Map<String, Object> sslConfig1 = sslConfigsBuilder(ConnectionMode.SERVER)
.createNewTrustStore(trustStoreFile1)
.build();
Map<String, Object> sslConfig2 = sslConfigsBuilder(ConnectionMode.SERVER)
.createNewTrustStore(trustStoreFile2)
.build();
SslFactory sslFactory = new SslFactory(ConnectionMode.SERVER, null, true);
for (String key : Arrays.asList(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG,
SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG,
SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG,
SslConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG)) {
sslConfig1.put(key, sslConfig2.get(key));
}
try {
sslFactory.configure(sslConfig1);
fail("Validation did not fail with untrusted truststore");
} catch (ConfigException e) {
// Expected exception
}
} |
@Override
@SuppressWarnings("unchecked")
public void onApplicationEvent(@NotNull final DataChangedEvent event) {
for (DataChangedListener listener : listeners) {
if ((!(listener instanceof AbstractDataChangedListener))
&& clusterProperties.isEnabled()
&& Objects.nonNull(shenyuClusterSelectMasterService)
&& !shenyuClusterSelectMasterService.isMaster()) {
LOG.info("received DataChangedEvent, not master, pass");
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("received DataChangedEvent, dispatching, event:{}", JsonUtils.toJson(event));
}
switch (event.getGroupKey()) {
case APP_AUTH:
listener.onAppAuthChanged((List<AppAuthData>) event.getSource(), event.getEventType());
break;
case PLUGIN:
listener.onPluginChanged((List<PluginData>) event.getSource(), event.getEventType());
break;
case RULE:
listener.onRuleChanged((List<RuleData>) event.getSource(), event.getEventType());
break;
case SELECTOR:
listener.onSelectorChanged((List<SelectorData>) event.getSource(), event.getEventType());
break;
case META_DATA:
listener.onMetaDataChanged((List<MetaData>) event.getSource(), event.getEventType());
break;
case PROXY_SELECTOR:
listener.onProxySelectorChanged((List<ProxySelectorData>) event.getSource(), event.getEventType());
break;
case DISCOVER_UPSTREAM:
listener.onDiscoveryUpstreamChanged((List<DiscoverySyncData>) event.getSource(), event.getEventType());
applicationContext.getBean(LoadServiceDocEntry.class).loadDocOnUpstreamChanged((List<DiscoverySyncData>) event.getSource(), event.getEventType());
break;
default:
throw new IllegalStateException("Unexpected value: " + event.getGroupKey());
}
}
} | @Test
public void onApplicationEventWithAppAuthConfigGroupTest() {
when(clusterProperties.isEnabled()).thenReturn(true);
when(shenyuClusterSelectMasterService.isMaster()).thenReturn(true);
ConfigGroupEnum configGroupEnum = ConfigGroupEnum.APP_AUTH;
DataChangedEvent dataChangedEvent = new DataChangedEvent(configGroupEnum, null, new ArrayList<>());
dataChangedEventDispatcher.onApplicationEvent(dataChangedEvent);
verify(httpLongPollingDataChangedListener, times(1)).onAppAuthChanged(anyList(), any());
verify(nacosDataChangedListener, times(1)).onAppAuthChanged(anyList(), any());
verify(websocketDataChangedListener, times(1)).onAppAuthChanged(anyList(), any());
verify(zookeeperDataChangedListener, times(1)).onAppAuthChanged(anyList(), any());
} |
public static boolean containsIgnoreCase(List<String> list, String str) {
for (String i : list) {
if (i.equalsIgnoreCase(str)) {
return true;
}
}
return false;
} | @Test
void testContainsIgnoreCase() {
List<String> list = Arrays.asList("foo", "bar");
assertTrue(StringUtils.containsIgnoreCase(list, "foo"));
assertTrue(StringUtils.containsIgnoreCase(list, "Foo"));
assertFalse(StringUtils.containsIgnoreCase(list, "baz"));
} |
@Override
public Host filter(final KsqlHostInfo host) {
if (!heartbeatAgent.isPresent()) {
return Host.include(host);
}
final Map<KsqlHostInfo, HostStatus> allHostsStatus = heartbeatAgent.get().getHostsStatus();
final HostStatus status = allHostsStatus.get(host);
if (status == null) {
return Host.include(host);
}
if (status.isHostAlive()) {
return Host.include(host);
} else {
return Host.exclude(host, "Host is not alive as of time " + status.getLastStatusUpdateMs());
}
} | @Test
public void shouldFilterStandbyAlive() {
// Given:
allHostsStatus = ImmutableMap.of(
activeHost, HOST_DEAD,
standByHost1, HOST_ALIVE,
standByHost2, HOST_DEAD
);
when(heartbeatAgent.getHostsStatus()).thenReturn(allHostsStatus);
// When:
final Host filterActive = livenessFilter.filter(activeHost);
final Host filterStandby1 = livenessFilter.filter(standByHost1);
final Host filterStandby2 = livenessFilter.filter(standByHost2);
// Then:
assertThat(filterActive.isSelected(), is(false));
assertThat(filterActive.getReasonNotSelected(), is("Host is not alive as of time 0"));
assertThat(filterStandby1.isSelected(), is(true));
assertThat(filterStandby2.isSelected(), is(false));
assertThat(filterActive.getReasonNotSelected(), is("Host is not alive as of time 0"));
} |
public static HealthConfig load(String configName) {
return new HealthConfig(configName);
} | @Test
public void testLoad() {
HealthConfig config = HealthConfig.load();
assert(config != null);
} |
public static String formatXml(String xml){
try {
TransformerFactory factory = TransformerFactory.newInstance();
factory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
Transformer serializer= factory.newTransformer();
serializer.setOutputProperty(OutputKeys.INDENT, "yes");
serializer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2");
Source xmlSource = new SAXSource(new InputSource(new StringReader(xml)));
StringWriter stringWriter = new StringWriter();
StreamResult res = new StreamResult(stringWriter);
serializer.transform(xmlSource, res);
return stringWriter.toString();
} catch (Exception e) {
return xml;
}
} | @Test
public void testFormatXmlInvalid() {
PrintStream origErr = System.err;
try {
// The parser will print an error, so let it go where we will not see it
System.setErr(new PrintStream(new OutputStream() {
@Override
public void write(int b) throws IOException {
// ignore output
}
}));
assertThat("No well formed xml here", CoreMatchers
.is(XPathUtil.formatXml("No well formed xml here")));
} finally {
System.setErr(origErr);
}
} |
public static long doubleHashCode(double value)
{
// canonicalize +0 and -0 to a single value
value = value == -0 ? 0 : value;
// doubleToLongBits converts all NaNs to the same representation
return AbstractLongType.hash(doubleToLongBits(value));
} | @Test
public void testDoubleHashCode()
{
assertEquals(doubleHashCode(0), doubleHashCode(Double.parseDouble("-0")));
//0x7ff8123412341234L is a different representation of NaN
assertEquals(doubleHashCode(Double.NaN), doubleHashCode(longBitsToDouble(0x7ff8123412341234L)));
} |
public <T> Serializer<T> getSerializer(Class<T> c) {
Serialization<T> serializer = getSerialization(c);
if (serializer != null) {
return serializer.getSerializer(c);
}
return null;
} | @Test
public void testGetSerializer() {
// Test that a valid serializer class is returned when its present
assertNotNull("A valid class must be returned for default Writable SerDe",
factory.getSerializer(Writable.class));
// Test that a null is returned when none can be found.
assertNull("A null should be returned if there are no serializers found.",
factory.getSerializer(TestSerializationFactory.class));
} |
public FileIO getFileIO(StorageType.Type storageType) throws IllegalArgumentException {
Supplier<? extends RuntimeException> exceptionSupplier =
() -> new IllegalArgumentException(storageType.getValue() + " is not configured");
if (HDFS.equals(storageType)) {
return Optional.ofNullable(hdfsFileIO).orElseThrow(exceptionSupplier);
} else if (LOCAL.equals(storageType)) {
return Optional.ofNullable(localFileIO).orElseThrow(exceptionSupplier);
} else if (S3.equals(storageType)) {
return Optional.ofNullable(s3FileIO).orElseThrow(exceptionSupplier);
} else if (ADLS.equals(storageType)) {
return Optional.ofNullable(adlsFileIO).orElseThrow(exceptionSupplier);
} else {
throw new IllegalArgumentException("FileIO not supported for storage type: " + storageType);
}
} | @Test
public void testGetLocalFileIO() {
// local storage is configured
Assertions.assertNotNull(fileIOManager.getFileIO(StorageType.LOCAL));
} |
@Udf(description = "Converts a string representation of a date in the given format"
+ " into a DATE value.")
public Date parseDate(
@UdfParameter(
description = "The string representation of a date.") final String formattedDate,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.text.SimpleDateFormat.") final String formatPattern) {
if (formattedDate == null || formatPattern == null) {
return null;
}
try {
final long time = formatters.get(formatPattern).parse(formattedDate).getTime();
if (time % MILLIS_IN_DAY != 0) {
throw new KsqlFunctionException("Date format contains time field.");
}
return new Date(time);
} catch (final ExecutionException | RuntimeException | ParseException e) {
throw new KsqlFunctionException("Failed to parse date '" + formattedDate
+ "' with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
} | @Test
public void shouldConvertYearMonthToDate() {
// When:
final Date result = udf.parseDate("2021-12", "yyyy-MM");
// Then:
assertThat(result.getTime(), is(1638316800000L));
} |
public boolean setNewAssignee(DefaultIssue issue, @Nullable UserIdDto userId, IssueChangeContext context) {
if (userId == null) {
return false;
}
checkState(issue.assignee() == null, "It's not possible to update the assignee with this method, please use assign()");
issue.setFieldChange(context, ASSIGNEE, UNUSED, userId.getUuid());
issue.setAssigneeUuid(userId.getUuid());
issue.setAssigneeLogin(userId.getLogin());
issue.setUpdateDate(context.date());
issue.setChanged(true);
issue.setSendNotifications(true);
return true;
} | @Test
void set_new_assignee() {
boolean updated = underTest.setNewAssignee(issue, new UserIdDto("user_uuid", "user_login"), context);
assertThat(updated).isTrue();
assertThat(issue.assignee()).isEqualTo("user_uuid");
assertThat(issue.assigneeLogin()).isEqualTo("user_login");
assertThat(issue.mustSendNotifications()).isTrue();
FieldDiffs.Diff diff = issue.currentChange().get(ASSIGNEE);
assertThat(diff.oldValue()).isEqualTo(UNUSED);
assertThat(diff.newValue()).isEqualTo("user_uuid");
} |
@Override
public synchronized T getValue(int index) {
BarSeries series = getBarSeries();
if (series == null) {
// Series is null; the indicator doesn't need cache.
// (e.g. simple computation of the value)
// --> Calculating the value
T result = calculate(index);
if (log.isTraceEnabled()) {
log.trace("{}({}): {}", this, index, result);
}
return result;
}
// Series is not null
final int removedBarsCount = series.getRemovedBarsCount();
final int maximumResultCount = series.getMaximumBarCount();
T result;
if (index < removedBarsCount) {
// Result already removed from cache
if (log.isTraceEnabled()) {
log.trace("{}: result from bar {} already removed from cache, use {}-th instead",
getClass().getSimpleName(), index, removedBarsCount);
}
increaseLengthTo(removedBarsCount, maximumResultCount);
highestResultIndex = removedBarsCount;
result = results.get(0);
if (result == null) {
// It should be "result = calculate(removedBarsCount);".
// We use "result = calculate(0);" as a workaround
// to fix issue #120 (https://github.com/mdeverdelhan/ta4j/issues/120).
result = calculate(0);
results.set(0, result);
}
} else {
if (index == series.getEndIndex()) {
// Don't cache result if last bar
result = calculate(index);
} else {
increaseLengthTo(index, maximumResultCount);
if (index > highestResultIndex) {
// Result not calculated yet
highestResultIndex = index;
result = calculate(index);
results.set(results.size() - 1, result);
} else {
// Result covered by current cache
int resultInnerIndex = results.size() - 1 - (highestResultIndex - index);
result = results.get(resultInnerIndex);
if (result == null) {
result = calculate(index);
results.set(resultInnerIndex, result);
}
}
}
}
if (log.isTraceEnabled()) {
log.trace("{}({}): {}", this, index, result);
}
return result;
} | @Test // should be not null
public void getValueWithNullBarSeries() {
ConstantIndicator<Num> constant = new ConstantIndicator<>(
new BaseBarSeriesBuilder().withNumTypeOf(numFunction).build(), numFunction.apply(10));
assertEquals(numFunction.apply(10), constant.getValue(0));
assertEquals(numFunction.apply(10), constant.getValue(100));
assertNotNull(constant.getBarSeries());
SMAIndicator sma = new SMAIndicator(constant, 10);
assertEquals(numFunction.apply(10), sma.getValue(0));
assertEquals(numFunction.apply(10), sma.getValue(100));
assertNotNull(sma.getBarSeries());
} |
public T getResult() {
return result;
} | @Test
public void testEthSyncingInProgress() {
buildResponse(
"{\n"
+ " \"id\":1,\n"
+ " \"jsonrpc\": \"2.0\",\n"
+ " \"result\": {\n"
+ " \"startingBlock\": \"0x384\",\n"
+ " \"currentBlock\": \"0x386\",\n"
+ " \"highestBlock\": \"0x454\"\n"
+ " }\n"
+ "}");
// Response received from Geth node
// "{\"jsonrpc\":\"2.0\",\"id\":1,\"result\":{\"currentBlock\":\"0x117a\",
// \"highestBlock\":\"0x21dab4\",\"knownStates\":\"0x0\",\"pulledStates\":\"0x0\",
// \"startingBlock\":\"0xa51\"}}"
EthSyncing ethSyncing = deserialiseResponse(EthSyncing.class);
assertEquals(
ethSyncing.getResult(),
(new EthSyncing.Syncing("0x384", "0x386", "0x454", null, null)));
} |
@SuppressWarnings("unchecked")
public static void validateFormat(Object offsetData) {
if (offsetData == null)
return;
if (!(offsetData instanceof Map))
throw new DataException("Offsets must be specified as a Map");
validateFormat((Map<Object, Object>) offsetData);
} | @Test
public void testValidateFormatMapWithNonStringKeys() {
Map<Object, Object> offsetData = new HashMap<>();
offsetData.put("k1", "v1");
offsetData.put(1, "v2");
DataException e = assertThrows(DataException.class, () -> OffsetUtils.validateFormat(offsetData));
assertThat(e.getMessage(), containsString("Offsets may only use String keys"));
} |
public static boolean containsObject(Object obj) {
if (obj == null) {
return false;
}
// get object set
Set<Object> objectSet = OBJECT_SET_LOCAL.get();
if (objectSet.isEmpty()) {
return false;
}
return objectSet.contains(getUniqueSubstituteObject(obj));
} | @Test
public void testContainsObject() {
Assertions.assertFalse(CycleDependencyHandler.containsObject(null));
} |
Future<Boolean> canRollController(int nodeId) {
LOGGER.debugCr(reconciliation, "Determining whether controller pod {} can be rolled", nodeId);
return describeMetadataQuorum().map(info -> {
boolean canRoll = isQuorumHealthyWithoutNode(nodeId, info);
if (!canRoll) {
LOGGER.debugCr(reconciliation, "Not restarting controller pod {}. Restart would affect the quorum health", nodeId);
}
return canRoll;
}).recover(error -> {
LOGGER.warnCr(reconciliation, "Error determining whether it is safe to restart controller pod {}", nodeId, error);
return Future.failedFuture(error);
});
} | @Test
public void shouldTestDynamicTimeoutValue(VertxTestContext context) {
Map<Integer, OptionalLong> controllers = new HashMap<>();
controllers.put(1, OptionalLong.of(10000L));
controllers.put(2, OptionalLong.of(9950L)); // Edge case close to the timeout
Admin admin = setUpMocks(1, controllers);
long dynamicTimeout = 100L; // Dynamic timeout value
KafkaQuorumCheck quorumCheck = new KafkaQuorumCheck(Reconciliation.DUMMY_RECONCILIATION, admin, vertx, dynamicTimeout);
quorumCheck.canRollController(1).onComplete(context.succeeding(result -> {
context.verify(() -> assertTrue(result));
context.completeNow();
}));
} |
public static Predicate parse(String expression)
{
final Stack<Predicate> predicateStack = new Stack<>();
final Stack<Character> operatorStack = new Stack<>();
final String trimmedExpression = TRIMMER_PATTERN.matcher(expression).replaceAll("");
final StringTokenizer tokenizer = new StringTokenizer(trimmedExpression, OPERATORS, true);
boolean isTokenMode = true;
while (true)
{
final Character operator;
final String token;
if (isTokenMode)
{
if (tokenizer.hasMoreTokens())
{
token = tokenizer.nextToken();
}
else
{
break;
}
if (OPERATORS.contains(token))
{
operator = token.charAt(0);
}
else
{
operator = null;
}
}
else
{
operator = operatorStack.pop();
token = null;
}
isTokenMode = true;
if (operator == null)
{
try
{
predicateStack.push(Class.forName(token).asSubclass(Predicate.class).getDeclaredConstructor().newInstance());
}
catch (ClassCastException e)
{
throw new RuntimeException(token + " must implement " + Predicate.class.getName(), e);
}
catch (Exception e)
{
throw new RuntimeException(e);
}
}
else
{
if (operatorStack.empty() || operator == '(')
{
operatorStack.push(operator);
}
else if (operator == ')')
{
while (operatorStack.peek() != '(')
{
evaluate(predicateStack, operatorStack);
}
operatorStack.pop();
}
else
{
if (OPERATOR_PRECEDENCE.get(operator) < OPERATOR_PRECEDENCE.get(operatorStack.peek()))
{
evaluate(predicateStack, operatorStack);
isTokenMode = false;
}
operatorStack.push(operator);
}
}
}
while (!operatorStack.empty())
{
evaluate(predicateStack, operatorStack);
}
if (predicateStack.size() > 1)
{
throw new RuntimeException("Invalid logical expression");
}
return predicateStack.pop();
} | @Test
public void testNotParenAnd()
{
final Predicate parsed = PredicateExpressionParser.parse("!(com.linkedin.data.it.AlwaysTruePredicate & com.linkedin.data.it.AlwaysFalsePredicate)");
Assert.assertEquals(parsed.getClass(), NotPredicate.class);
final Predicate intermediate = ((NotPredicate) parsed).getChildPredicate();
Assert.assertEquals(intermediate.getClass(), AndPredicate.class);
final List<Predicate> children = ((AndPredicate) intermediate).getChildPredicates();
Assert.assertEquals(children.get(0).getClass(), AlwaysTruePredicate.class);
Assert.assertEquals(children.get(1).getClass(), AlwaysFalsePredicate.class);
} |
public FloatArrayAsIterable usingTolerance(double tolerance) {
return new FloatArrayAsIterable(tolerance(tolerance), iterableSubject());
} | @Test
public void usingTolerance_containsAtLeast_primitiveFloatArray_success() {
assertThat(array(1.1f, TOLERABLE_2POINT2, 3.3f))
.usingTolerance(DEFAULT_TOLERANCE)
.containsAtLeast(array(2.2f, 1.1f));
} |
public static ValueLabel formatPacketRate(long packets) {
return new ValueLabel(packets, PACKETS_UNIT).perSec();
} | @Test
public void formatPacketRateKilo2() {
vl = TopoUtils.formatPacketRate(1034);
assertEquals(AM_WL, "1.01 Kpps", vl.toString());
} |
public static String toSQL(ParseNode statement) {
return new AST2SQLBuilderVisitor(false, false, true).visit(statement);
} | @Test
public void testInsertFromFiles() {
String sql = "insert into t0 (v1, v2)" +
"select * from files('path' = 's3://xxx/zzz', 'format' = 'parquet', 'aws.s3.access_key' = 'ghi', " +
"'aws.s3.secret_key' = 'jkl', 'aws.s3.region' = 'us-west-1')";
StatementBase stmt = SqlParser.parseSingleStatement(sql, SqlModeHelper.MODE_DEFAULT);
Assert.assertEquals(
"INSERT INTO `t0` (`v1`,`v2`) " +
"SELECT *\nFROM FILES(\"aws.s3.access_key\" = \"***\", \"aws.s3.region\" = \"us-west-1\", " +
"\"aws.s3.secret_key\" = \"***\", \"format\" = \"parquet\", \"path\" = \"s3://xxx/zzz\")",
AstToSQLBuilder.toSQL(stmt));
} |
static List<ClassLoader> selectClassLoaders(ClassLoader classLoader) {
// list prevents reordering!
List<ClassLoader> classLoaders = new ArrayList<>();
if (classLoader != null) {
classLoaders.add(classLoader);
}
// check if TCCL is same as given classLoader
ClassLoader tccl = Thread.currentThread().getContextClassLoader();
if (tccl != null && tccl != classLoader) {
classLoaders.add(tccl);
}
// Hazelcast core classLoader
ClassLoader coreClassLoader = ServiceLoader.class.getClassLoader();
if (coreClassLoader != classLoader && coreClassLoader != tccl) {
classLoaders.add(coreClassLoader);
}
// Hazelcast client classLoader
try {
Class<?> hzClientClass = Class.forName("com.hazelcast.client.HazelcastClient");
ClassLoader clientClassLoader = hzClientClass.getClassLoader();
if (clientClassLoader != classLoader && clientClassLoader != tccl && clientClassLoader != coreClassLoader) {
classLoaders.add(clientClassLoader);
}
} catch (ClassNotFoundException ignore) {
// ignore since we may not have the HazelcastClient in the classpath
ignore(ignore);
}
return classLoaders;
} | @Test
public void selectingSameTcclAndGivenClassLoader() {
ClassLoader same = new URLClassLoader(new URL[0]);
Thread currentThread = Thread.currentThread();
ClassLoader tccl = currentThread.getContextClassLoader();
currentThread.setContextClassLoader(same);
List<ClassLoader> classLoaders = ServiceLoader.selectClassLoaders(same);
currentThread.setContextClassLoader(tccl);
assertEquals(2, classLoaders.size());
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.