focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static void main(String[] args) throws Exception {
// construct a new executor that will run async tasks
var executor = new ThreadAsyncExecutor();
// start few async tasks with varying processing times, two last with callback handlers
final var asyncResult1 = executor.startProcess(lazyval(10, 500));
final var asyncResult2 = executor.startProcess(lazyval("test", 300));
final var asyncResult3 = executor.startProcess(lazyval(50L, 700));
final var asyncResult4 = executor.startProcess(lazyval(20, 400),
callback("Deploying lunar rover"));
final var asyncResult5 =
executor.startProcess(lazyval("callback", 600), callback("Deploying lunar rover"));
// emulate processing in the current thread while async tasks are running in their own threads
Thread.sleep(350); // Oh boy, we are working hard here
log("Mission command is sipping coffee");
// wait for completion of the tasks
final var result1 = executor.endProcess(asyncResult1);
final var result2 = executor.endProcess(asyncResult2);
final var result3 = executor.endProcess(asyncResult3);
asyncResult4.await();
asyncResult5.await();
// log the results of the tasks, callbacks log immediately when complete
log(String.format(ROCKET_LAUNCH_LOG_PATTERN, result1));
log(String.format(ROCKET_LAUNCH_LOG_PATTERN, result2));
log(String.format(ROCKET_LAUNCH_LOG_PATTERN, result3));
} | @Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
} |
public static SchemaPairCompatibility checkReaderWriterCompatibility(final Schema reader, final Schema writer) {
final SchemaCompatibilityResult compatibility = new ReaderWriterCompatibilityChecker().getCompatibility(reader,
writer);
final String message;
switch (compatibility.getCompatibility()) {
case INCOMPATIBLE: {
message = String.format(
"Data encoded using writer schema:%n%s%n" + "will or may fail to decode using reader schema:%n%s%n",
writer.toString(true), reader.toString(true));
break;
}
case COMPATIBLE: {
message = READER_WRITER_COMPATIBLE_MESSAGE;
break;
}
default:
throw new AvroRuntimeException("Unknown compatibility: " + compatibility);
}
return new SchemaPairCompatibility(compatibility, reader, writer, message);
} | @Test
void validateSchemaPairMissingSecondField() {
final List<Schema.Field> readerFields = list(new Schema.Field("oldfield2", STRING_SCHEMA, null, null));
final Schema reader = Schema.createRecord(null, null, null, false, readerFields);
final SchemaCompatibility.SchemaPairCompatibility expectedResult = new SchemaCompatibility.SchemaPairCompatibility(
SchemaCompatibility.SchemaCompatibilityResult.compatible(), reader, WRITER_SCHEMA,
SchemaCompatibility.READER_WRITER_COMPATIBLE_MESSAGE);
// Test omitting other field.
assertEquals(expectedResult, checkReaderWriterCompatibility(reader, WRITER_SCHEMA));
} |
@Override
public void getClient(Request request, RequestContext requestContext, Callback<TransportClient> clientCallback)
{
URI uri = request.getURI();
debug(_log, "get client for uri: ", uri);
if (!D2_SCHEME_NAME.equalsIgnoreCase(uri.getScheme()))
{
throw new IllegalArgumentException("Unsupported scheme in URI " + uri);
}
// get the service for this uri
String extractedServiceName = LoadBalancerUtil.getServiceNameFromUri(uri);
listenToServiceAndCluster(extractedServiceName, Callbacks.handle(service -> {
String serviceName = service.getServiceName();
String clusterName = service.getClusterName();
try
{
ClusterProperties cluster = getClusterProperties(serviceName, clusterName);
// Check if we want to override the service URL and bypass choosing among the existing
// tracker clients. This is useful when the service we want is not announcing itself to
// the cluster, ie a private service for a set of clients. This mechanism is deprecated;
// use host override list instead.
@SuppressWarnings("deprecation")
URI targetService = LoadBalancerUtil.TargetHints.getRequestContextTargetService(requestContext);
// Checks if we have a host override list provided in the request context. If present,
// get the override URI available override for the current cluster and service names.
HostOverrideList overrides = (HostOverrideList) requestContext.getLocalAttr(HOST_OVERRIDE_LIST);
URI override = overrides == null ? null : overrides.getOverride(clusterName, serviceName);
if (targetService == null && override == null)
{
LoadBalancerStateItem<UriProperties> uriItem = getUriItem(serviceName, clusterName, cluster);
UriProperties uris = uriItem.getProperty();
List<LoadBalancerState.SchemeStrategyPair> orderedStrategies =
_state.getStrategiesForService(serviceName, service.getPrioritizedSchemes());
TrackerClient trackerClient = null;
// Use client provided by CustomURIAffinityRoutingProvider when it's enabled
CustomAffinityRoutingURIProvider customAffinityRoutingURIProvider =
(CustomAffinityRoutingURIProvider) requestContext.getLocalAttr(CustomAffinityRoutingURIProvider.CUSTOM_AFFINITY_ROUTING_URI_PROVIDER);
boolean enableCustomAffinityRouting = isCustomAffinityRoutingEnabled(requestContext, customAffinityRoutingURIProvider);
if (enableCustomAffinityRouting)
{
trackerClient = customAffinityRoutingURIProvider.getTargetHostURI(clusterName)
.map(targetHost -> _state.getClient(serviceName, targetHost))
.orElse(null);
}
if (trackerClient == null)
{
trackerClient =
chooseTrackerClient(request, requestContext, serviceName, clusterName, cluster, uriItem, uris,
orderedStrategies, service);
// set host URI for the cluster. with that next time, for the same inbound request, if downstream request is
// made to same cluster and custom affinity routing is enabled, then it will go to same box.
if (enableCustomAffinityRouting)
{
customAffinityRoutingURIProvider.setTargetHostURI(clusterName, trackerClient.getUri());
}
}
String clusterAndServiceUriString = trackerClient.getUri() + service.getPath();
_serviceAvailableStats.inc();
clientCallback.onSuccess(new RewriteLoadBalancerClient(serviceName,
URI.create(clusterAndServiceUriString),
trackerClient));
}
else
{
URI target = override == null ? targetService : URI.create(override + service.getPath());
if (targetService != null && override != null)
{
_log.warn("Both TargetHints and HostOverrideList are found. HostOverList will take precedence %s.", target);
}
if (_log.isDebugEnabled())
{
_log.debug("Rewrite URI as specified in the TargetHints/HostOverrideList {} for cluster {} and service {}.",
target, clusterName, serviceName);
}
TransportClient transportClient = _state.getClient(serviceName, target.getScheme());
if (transportClient == null)
{
throw new ServiceUnavailableException(serviceName, String.format(
"PEGA_1001. Cannot find transportClient for service %s and scheme %s with URI specified in"
+ "TargetHints/HostOverrideList %s", serviceName, target.getScheme(), target));
}
clientCallback.onSuccess(new RewriteLoadBalancerClient(serviceName, target, transportClient));
}
}
catch (ServiceUnavailableException e)
{
clientCallback.onError(e);
}
}, clientCallback));
} | @Test (expectedExceptions = ServiceUnavailableException.class)
@SuppressWarnings("deprecation")
public void testGetClient() throws Exception
{
Map<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>> loadBalancerStrategyFactories =
new HashMap<>();
Map<String, TransportClientFactory> clientFactories = new HashMap<>();
List<String> prioritizedSchemes = new ArrayList<>();
MockStore<ServiceProperties> serviceRegistry = new MockStore<>();
MockStore<ClusterProperties> clusterRegistry = new MockStore<>();
MockStore<UriProperties> uriRegistry = new MockStore<>();
ScheduledExecutorService executorService = new SynchronousExecutorService();
//loadBalancerStrategyFactories.put("rr", new RandomLoadBalancerStrategyFactory());
loadBalancerStrategyFactories.put("degrader", new DegraderLoadBalancerStrategyFactoryV3());
// PrpcClientFactory();
clientFactories.put(PropertyKeys.HTTPS_SCHEME, new DoNothingClientFactory()); // new
// HttpClientFactory();
SimpleLoadBalancerState state =
new SimpleLoadBalancerState(executorService,
uriRegistry,
clusterRegistry,
serviceRegistry,
clientFactories,
loadBalancerStrategyFactories);
SimpleLoadBalancer loadBalancer =
new SimpleLoadBalancer(state, 5, TimeUnit.SECONDS, _d2Executor);
FutureCallback<None> balancerCallback = new FutureCallback<>();
loadBalancer.start(balancerCallback);
balancerCallback.get(5, TimeUnit.SECONDS);
Map<URI, Map<Integer, PartitionData>> uriData = new HashMap<>(3);
prioritizedSchemes.add(PropertyKeys.HTTPS_SCHEME);
clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1"));
serviceRegistry.put("foo", new ServiceProperties("foo",
"cluster-1",
"/foo", Collections.singletonList("degrader"),
Collections.<String,Object>emptyMap(),
null,
null,
prioritizedSchemes,
null));
uriRegistry.put("cluster-1", new UriProperties("cluster-1", uriData));
URI uri = URI.create("http://test.qd.com:1234/foo");
RequestContext requestContextWithHint = new RequestContext();
LoadBalancerUtil.TargetHints.setRequestContextTargetService(requestContextWithHint, uri);
URIRequest uriRequest = new URIRequest("d2://foo");
loadBalancer.getClient(uriRequest, requestContextWithHint);
} |
@Override
public String toString() {
return ", " + column;
} | @Test
void assertGeneratedKeyInsertColumnTokenTest() {
assertThat(generatedKeyInsertColumnToken.toString(), is(", id"));
} |
@Override
public String buildContext() {
final String selector = ((Collection<?>) getSource())
.stream()
.map(s -> ((ResourceDO) s).getTitle())
.collect(Collectors.joining(","));
return String.format("the resource [%s] is %s", selector, StringUtils.lowerCase(getType().getType().toString()));
} | @Test
public void resourceCreateBuildContextTest() {
BatchResourceCreatedEvent createdEvent = new BatchResourceCreatedEvent(Arrays.asList(one, two), "test-operator");
String context = String.format("the resource [%s] is %s",
one.getTitle() + "," + two.getTitle(), StringUtils.lowerCase(EventTypeEnum.RESOURCE_CREATE.getType().toString()));
assertEquals(context, createdEvent.buildContext());
} |
void prepareAndDumpMetadata(String taskId) {
Map<String, String> metadata = new LinkedHashMap<>();
metadata.put("projectKey", moduleHierarchy.root().key());
metadata.put("serverUrl", server.getPublicRootUrl());
metadata.put("serverVersion", server.getVersion());
properties.branch().ifPresent(branch -> metadata.put("branch", branch));
URL dashboardUrl = buildDashboardUrl(server.getPublicRootUrl(), moduleHierarchy.root().key());
metadata.put("dashboardUrl", dashboardUrl.toExternalForm());
URL taskUrl = HttpUrl.parse(server.getPublicRootUrl()).newBuilder()
.addPathSegment("api").addPathSegment("ce").addPathSegment("task")
.addQueryParameter(ID, taskId)
.build()
.url();
metadata.put("ceTaskId", taskId);
metadata.put("ceTaskUrl", taskUrl.toExternalForm());
ceTaskReportDataHolder.init(taskId, taskUrl.toExternalForm(), dashboardUrl.toExternalForm());
dumpMetadata(metadata);
} | @Test
public void dump_information_to_custom_path() {
underTest.prepareAndDumpMetadata("TASK-123");
assertThat(properties.metadataFilePath()).exists();
assertThat(logTester.logs(Level.DEBUG)).contains("Report metadata written to " + properties.metadataFilePath());
} |
protected String changeInboundMessage(String currentEnvelope){
return currentEnvelope;
} | @Test
public void changeEnvelopMessageInbound() throws IOException {
Message message = new MessageImpl();
ByteArrayInputStream inputStream = new ByteArrayInputStream("Test".getBytes(StandardCharsets.UTF_8));
message.setContent(InputStream.class, inputStream);
Exchange exchange = new ExchangeImpl();
message.setExchange(exchange);
DebugChangeMessage debugChangeMessage = spy(new DebugChangeMessage());
debugChangeMessage.handleMessage(message);
assertEquals("Test", IOUtils.toString(message.getContent(InputStream.class), StandardCharsets.UTF_8));
verify(debugChangeMessage).changeInboundMessage(anyString());
} |
public List<T> findCycle() {
resetState();
for (T vertex : graph.getVertices()) {
if (colors.get(vertex) == WHITE) {
if (visitDepthFirst(vertex, new ArrayList<>(List.of(vertex)))) {
if (cycle == null) throw new IllegalStateException("Null cycle - this should never happen");
if (cycle.isEmpty()) throw new IllegalStateException("Empty cycle - this should never happen");
log.log(FINE, () -> "Cycle detected: " + cycle);
return cycle;
}
}
}
return new ArrayList<>();
} | @Test
void leading_nodes_are_stripped_from_cycle() {
var graph = new Graph<Vertices>();
graph.edge(A, B);
graph.edge(B, C);
graph.edge(C, B);
var cycleFinder = new CycleFinder<>(graph);
assertTrue(cycleFinder.findCycle().containsAll(List.of(B, C, B)));
} |
public List<String[]> getPathHostMappings() {
return pathHostMappings;
} | @Test
public void testHostMapping() {
ExternalServiceConfig config = new ExternalServiceConfig();
assert config.getPathHostMappings().size() == 4;
if (config.getPathHostMappings() != null) {
for (String[] parts : config.getPathHostMappings()) {
if ("/sharepoint".startsWith(parts[0])) {
String endpoint = parts[0] + "@" + "post";
assert endpoint.equals("/sharepoint@post");
}
}
}
} |
static PendingRestarts triggerPendingRestarts(Function<Set<String>, ServiceListResponse> convergenceChecker,
BiConsumer<ApplicationId, Set<String>> restarter,
ApplicationId id,
PendingRestarts restarts,
Logger log) {
Set<String> restartingHosts = restarts.hostnames();
if (restartingHosts.isEmpty()) return restarts;
ServiceListResponse convergence = convergenceChecker.apply(restartingHosts);
long lowestGeneration = convergence.currentGeneration;
Set<String> nodesToRestart = restarts.restartsReadyAt(lowestGeneration);
if (nodesToRestart.isEmpty()) {
log.info(String.format("Cannot yet restart nodes of %s, as some services are still on generation %d:\n\t%s",
id.toFullString(),
lowestGeneration,
convergence.services().stream()
.filter(service -> service.currentGeneration == lowestGeneration)
.map(service -> service.serviceInfo.getHostName() + ":" + service.serviceInfo.getServiceName())
.collect(Collectors.joining("\n\t"))));
return restarts;
}
restarter.accept(id, nodesToRestart);
log.info(String.format("Scheduled restart of %d nodes after observing generation %d: %s",
nodesToRestart.size(), lowestGeneration, nodesToRestart.stream().sorted().collect(Collectors.joining(", "))));
return restarts.withoutPreviousGenerations(lowestGeneration);
} | @Test
void testMaintenance() {
// Nothing happens with no pending restarts.
assertSame(PendingRestarts.empty(),
triggerPendingRestarts(hosts -> { fail("Should not be called"); return null; },
(id, hosts) -> { fail("Should not be called"); },
ApplicationId.defaultId(),
PendingRestarts.empty(),
log));
// Nothing happens when services are on a too low generation.
assertEquals(Map.of(1L, Set.of("a", "b"), 2L, Set.of("c")),
triggerPendingRestarts(hosts -> new ServiceListResponse(Map.of(), 3, 0),
(id, hosts) -> { fail("Should not be called"); },
ApplicationId.defaultId(),
PendingRestarts.empty()
.withRestarts(1, List.of("a", "b"))
.withRestarts(2, List.of("c")),
log)
.generationsForRestarts());
// Only the first hosts are restarted before the second generation is reached.
assertEquals(Map.of(2L, Set.of("c")),
triggerPendingRestarts(hosts -> new ServiceListResponse(Map.of(), 3, 1),
(id, hosts) -> {
assertEquals(ApplicationId.defaultId(), id);
assertEquals(Set.of("a", "b"), hosts);
},
ApplicationId.defaultId(),
PendingRestarts.empty()
.withRestarts(1, List.of("a", "b"))
.withRestarts(2, List.of("c")),
log)
.generationsForRestarts());
// All hosts are restarted when the second generation is reached.
assertEquals(Map.of(),
triggerPendingRestarts(hosts -> new ServiceListResponse(Map.of(), 3, 2),
(id, hosts) -> {
assertEquals(ApplicationId.defaultId(), id);
assertEquals(Set.of("a", "b", "c"), hosts);
},
ApplicationId.defaultId(),
PendingRestarts.empty()
.withRestarts(1, List.of("a", "b"))
.withRestarts(2, List.of("c")),
log)
.generationsForRestarts());
} |
public RuntimeOptionsBuilder parse(String... args) {
return parse(Arrays.asList(args));
} | @Test
void creates_html_formatter() {
RuntimeOptions options = parser
.parse("--plugin", "html:target/deeply/nested.html", "--glue", "somewhere")
.build();
Plugins plugins = new Plugins(new PluginFactory(), options);
plugins.setEventBusOnEventListenerPlugins(new TimeServiceEventBus(Clock.systemUTC(), UUID::randomUUID));
assertThat(plugins.getPlugins().get(0).getClass().getName(), is("io.cucumber.core.plugin.HtmlFormatter"));
} |
public static boolean del(Path path) throws IORuntimeException {
if (Files.notExists(path)) {
return true;
}
try {
if (isDirectory(path)) {
Files.walkFileTree(path, DelVisitor.INSTANCE);
} else {
delFile(path);
}
} catch (IOException e) {
throw new IORuntimeException(e);
}
return true;
} | @Test
@Disabled
public void delDirTest(){
PathUtil.del(Paths.get("d:/test/looly"));
} |
@Around("@annotation(com.linecorp.flagship4j.javaflagr.annotations.ControllerFeatureToggle)")
public Object processControllerFeatureToggleAnnotation(ProceedingJoinPoint joinPoint) throws Throwable {
log.info("start processing controllerFeatureToggle annotation");
MethodSignature signature = (MethodSignature) joinPoint.getSignature();
Method method = signature.getMethod();
Annotation[][] parameterAnnotations = method.getParameterAnnotations();
Object[] args = joinPoint.getArgs();
ControllerFeatureToggle featureToggle = method.getAnnotation(ControllerFeatureToggle.class);
Boolean isFlagOn = flagrService.isFeatureFlagOn(featureToggle.value());
if (Boolean.FALSE.equals(isFlagOn)) {
throw new FlagrApiNotFoundException();
}
outerlabel:
for (int argIndex = 0; argIndex < args.length; argIndex++) {
for (Annotation annotation : parameterAnnotations[argIndex]) {
if (annotation instanceof VariantKey) {
args[argIndex] = isFlagOn;
break outerlabel;
}
}
}
return joinPoint.proceed(args);
} | @Test
public void processFlagrMethodWithControllerFeatureToggleTestWhenReturnedFlagKeyIsFalseFlagrApiNotFoundException() {
String methodName = "methodWithControllerFeatureToggleWithoutVariantKey";
FlagrAnnotationTest flagrAnnotationTest = new FlagrAnnotationTest();
Method method = Arrays.stream(flagrAnnotationTest.getClass().getMethods()).filter(m -> m.getName().equals(methodName)).findFirst().get();
when(joinPoint.getSignature()).thenReturn(signature);
when(signature.getMethod()).thenReturn(method);
when(joinPoint.getArgs()).thenReturn(args);
when(flagrService.isFeatureFlagOn(any(String.class)))
.thenReturn(givenPostEvaluationResponse().getVariantKey().equals(EffectiveVariant.OFF.toValue()));
assertThrows(FlagrApiNotFoundException.class, () -> featureToggleAspect.processControllerFeatureToggleAnnotation(joinPoint));
verify(joinPoint, times(1)).getSignature();
verify(signature, times(1)).getMethod();
verify(joinPoint, times(1)).getArgs();
verify(signature, times(1)).getMethod();
verify(flagrService, times(1)).isFeatureFlagOn(any(String.class));
} |
static String getRelativeFileInternal(File canonicalBaseFile, File canonicalFileToRelativize) {
List<String> basePath = getPathComponents(canonicalBaseFile);
List<String> pathToRelativize = getPathComponents(canonicalFileToRelativize);
//if the roots aren't the same (i.e. different drives on a windows machine), we can't construct a relative
//path from one to the other, so just return the canonical file
if (!basePath.get(0).equals(pathToRelativize.get(0))) {
return canonicalFileToRelativize.getPath();
}
int commonDirs;
StringBuilder sb = new StringBuilder();
for (commonDirs=1; commonDirs<basePath.size() && commonDirs<pathToRelativize.size(); commonDirs++) {
if (!basePath.get(commonDirs).equals(pathToRelativize.get(commonDirs))) {
break;
}
}
boolean first = true;
for (int i=commonDirs; i<basePath.size(); i++) {
if (!first) {
sb.append(File.separatorChar);
} else {
first = false;
}
sb.append("..");
}
first = true;
for (int i=commonDirs; i<pathToRelativize.size(); i++) {
if (first) {
if (sb.length() != 0) {
sb.append(File.separatorChar);
}
first = false;
} else {
sb.append(File.separatorChar);
}
sb.append(pathToRelativize.get(i));
}
if (sb.length() == 0) {
return ".";
}
return sb.toString();
} | @Test
public void pathUtilTest13() {
File[] roots = File.listRoots();
File basePath = new File(roots[0] + "some");
File relativePath = new File(roots[0] + "some" + File.separatorChar + "dir" + File.separatorChar + "dir2" + File.separatorChar);
String path = PathUtil.getRelativeFileInternal(basePath, relativePath);
Assert.assertEquals(path, "dir" + File.separatorChar + "dir2");
} |
@Override
public <Request extends RequestCommand> boolean serializeContent(Request request, InvokeContext invokeContext)
throws SerializationException {
if (request instanceof RpcRequestCommand) {
RpcRequestCommand requestCommand = (RpcRequestCommand) request;
RpcInvokeContext.getContext().put(INTERNAL_KEY_RPC_REQUEST_COMMAND, requestCommand);
Object requestObject = requestCommand.getRequestObject();
byte serializerCode = requestCommand.getSerializer();
long serializeStartTime = System.nanoTime();
try {
Map<String, String> header = (Map<String, String>) requestCommand.getRequestHeader();
if (header == null) {
header = new HashMap<String, String>();
}
putKV(header, RemotingConstants.HEAD_GENERIC_TYPE,
(String) invokeContext.get(RemotingConstants.HEAD_GENERIC_TYPE));
Serializer rpcSerializer = com.alipay.sofa.rpc.codec.SerializerFactory
.getSerializer(serializerCode);
AbstractByteBuf byteBuf = rpcSerializer.encode(requestObject, header);
request.setContent(byteBuf.array());
return true;
} catch (Exception ex) {
throw new SerializationException(ex.getMessage(), ex);
} finally {
// R5:record request serialization time
recordSerializeRequest(requestCommand, invokeContext, serializeStartTime);
RpcInvokeContext.getContext().remove(INTERNAL_KEY_RPC_REQUEST_COMMAND);
}
}
return false;
} | @Test
public void serializeResponseContent() {
String traceId = "traceId";
String rpcId = "rpcId";
RpcInternalContext.getContext().setAttachment("_trace_id", traceId);
RpcInternalContext.getContext().setAttachment("_span_id", rpcId);
RpcResponseCommand command = new RpcResponseCommand();
SofaRpcSerialization sofaRpcSerialization = new SofaRpcSerialization();
boolean exp = false;
try {
sofaRpcSerialization.serializeContent(command);
} catch (SerializationException e) {
exp = true;
Assert.assertTrue(e.getMessage().contains("traceId=" + traceId + ", rpcId=" + rpcId));
}
Assert.assertTrue(exp);
} |
private static void connectAllToAll(
ExecutionJobVertex jobVertex,
IntermediateResult result,
JobVertexInputInfo jobVertexInputInfo) {
// check the vertex input info is legal
jobVertexInputInfo
.getExecutionVertexInputInfos()
.forEach(
executionVertexInputInfo -> {
IndexRange partitionRange =
executionVertexInputInfo.getPartitionIndexRange();
checkArgument(partitionRange.getStartIndex() == 0);
checkArgument(
partitionRange.getEndIndex()
== (result.getNumberOfAssignedPartitions() - 1));
});
connectInternal(
Arrays.asList(jobVertex.getTaskVertices()),
Arrays.asList(result.getPartitions()),
result.getResultType(),
jobVertex.getGraph().getEdgeManager());
} | @Test
void testConnectAllToAll() throws Exception {
int upstream = 3;
int downstream = 2;
// use dynamic graph to specify the vertex input info
ExecutionGraph eg = setupExecutionGraph(upstream, downstream, POINTWISE, true);
List<ExecutionVertexInputInfo> executionVertexInputInfos = new ArrayList<>();
for (int i = 0; i < downstream; i++) {
executionVertexInputInfos.add(
new ExecutionVertexInputInfo(
i,
new IndexRange(0, upstream - 1),
// the subpartition range will not be used in edge manager, so set (0,
// 0)
new IndexRange(0, 0)));
}
final JobVertexInputInfo jobVertexInputInfo =
new JobVertexInputInfo(executionVertexInputInfos);
final Iterator<ExecutionJobVertex> vertexIterator =
eg.getVerticesTopologically().iterator();
final ExecutionJobVertex producer = vertexIterator.next();
final ExecutionJobVertex consumer = vertexIterator.next();
// initialize producer and consumer
eg.initializeJobVertex(producer, 1L, Collections.emptyMap());
eg.initializeJobVertex(
consumer,
1L,
Collections.singletonMap(
producer.getProducedDataSets()[0].getId(), jobVertexInputInfo));
IntermediateResult result =
Objects.requireNonNull(eg.getJobVertex(producer.getJobVertexId()))
.getProducedDataSets()[0];
IntermediateResultPartition partition1 = result.getPartitions()[0];
IntermediateResultPartition partition2 = result.getPartitions()[1];
IntermediateResultPartition partition3 = result.getPartitions()[2];
ExecutionVertex vertex1 = consumer.getTaskVertices()[0];
ExecutionVertex vertex2 = consumer.getTaskVertices()[1];
// check consumers of the partitions
ConsumerVertexGroup consumerVertexGroup = partition1.getConsumerVertexGroups().get(0);
assertThat(consumerVertexGroup).containsExactlyInAnyOrder(vertex1.getID(), vertex2.getID());
assertThat(partition2.getConsumerVertexGroups().get(0)).isEqualTo(consumerVertexGroup);
assertThat(partition3.getConsumerVertexGroups().get(0)).isEqualTo(consumerVertexGroup);
// check inputs of the execution vertices
ConsumedPartitionGroup consumedPartitionGroup = vertex1.getConsumedPartitionGroup(0);
assertThat(consumedPartitionGroup)
.containsExactlyInAnyOrder(
partition1.getPartitionId(),
partition2.getPartitionId(),
partition3.getPartitionId());
assertThat(vertex2.getConsumedPartitionGroup(0)).isEqualTo(consumedPartitionGroup);
// check the consumerVertexGroup and consumedPartitionGroup are set to each other
assertThat(consumerVertexGroup.getConsumedPartitionGroup())
.isEqualTo(consumedPartitionGroup);
assertThat(consumedPartitionGroup.getConsumerVertexGroup()).isEqualTo(consumerVertexGroup);
} |
static Map<Integer, List<Integer>> parseReplicaAssignment(String replicaAssignmentList) {
String[] partitionList = replicaAssignmentList.split(",");
Map<Integer, List<Integer>> ret = new LinkedHashMap<>();
for (int i = 0; i < partitionList.length; i++) {
List<Integer> brokerList = Arrays.stream(partitionList[i].split(":"))
.map(String::trim)
.mapToInt(Integer::parseInt)
.boxed()
.collect(Collectors.toList());
Collection<Integer> duplicateBrokers = ToolsUtils.duplicates(brokerList);
if (!duplicateBrokers.isEmpty()) {
throw new AdminCommandFailedException("Partition replica lists may not contain duplicate entries: " +
duplicateBrokers.stream()
.map(Object::toString)
.collect(Collectors.joining(","))
);
}
ret.put(i, brokerList);
if (ret.get(i).size() != ret.get(0).size()) {
throw new AdminOperationException("Partition " + i + " has different replication factor: " + brokerList);
}
}
return ret;
} | @Test
public void testParseAssignmentDuplicateEntries() {
assertThrows(AdminCommandFailedException.class, () -> TopicCommand.parseReplicaAssignment("5:5"));
} |
@Override
public Optional<SimpleLock> lock(LockConfiguration lockConfiguration) {
if (lockConfiguration.getLockAtMostFor().compareTo(minimalLockAtMostFor) < 0) {
throw new IllegalArgumentException(
"Can not use KeepAliveLockProvider with lockAtMostFor shorter than " + minimalLockAtMostFor);
}
Optional<SimpleLock> lock = wrapped.lock(lockConfiguration);
return lock.map(simpleLock -> new KeepAliveLock(lockConfiguration, simpleLock, executorService));
} | @Test
void shouldFailForShortLockAtMostFor() {
assertThatThrownBy(() -> provider.lock(new LockConfiguration(now(), "short", ofMillis(100), ZERO)))
.isInstanceOf(IllegalArgumentException.class);
} |
@Override
public int choosePartition(Message<?> msg, TopicMetadata metadata) {
// If the message has a key, it supersedes the single partition routing policy
if (msg.hasKey()) {
return signSafeMod(hash.makeHash(msg.getKey()), metadata.numPartitions());
}
return partitionIndex;
} | @Test
public void testChoosePartitionWithKey() {
String key1 = "key1";
String key2 = "key2";
Message<?> msg1 = mock(Message.class);
when(msg1.hasKey()).thenReturn(true);
when(msg1.getKey()).thenReturn(key1);
Message<?> msg2 = mock(Message.class);
when(msg2.hasKey()).thenReturn(true);
when(msg2.getKey()).thenReturn(key2);
SinglePartitionMessageRouterImpl router = new SinglePartitionMessageRouterImpl(1234, HashingScheme.JavaStringHash);
TopicMetadataImpl metadata = new TopicMetadataImpl(100);
assertEquals(key1.hashCode() % 100, router.choosePartition(msg1, metadata));
assertEquals(key2.hashCode() % 100, router.choosePartition(msg2, metadata));
} |
public static Slice add(Slice left, Slice right)
{
Slice result = unscaledDecimal();
add(left, right, result);
return result;
} | @Test
public void testAdd()
{
assertEquals(add(unscaledDecimal(0), unscaledDecimal(0)), unscaledDecimal(0));
assertEquals(add(unscaledDecimal(1), unscaledDecimal(0)), unscaledDecimal(1));
assertEquals(add(unscaledDecimal(1), unscaledDecimal(1)), unscaledDecimal(2));
assertEquals(add(unscaledDecimal(1L << 32), unscaledDecimal(0)), unscaledDecimal(1L << 32));
assertEquals(add(unscaledDecimal(1L << 31), unscaledDecimal(1L << 31)), unscaledDecimal(1L << 32));
assertEquals(add(unscaledDecimal(1L << 32), unscaledDecimal(1L << 33)), unscaledDecimal((1L << 32) + (1L << 33)));
} |
public static RuleDescriptionSectionContextDto of(String key, String displayName) {
return new RuleDescriptionSectionContextDto(key, displayName);
} | @Test
void equals_with_one_null_objet_should_return_false() {
RuleDescriptionSectionContextDto context1 = RuleDescriptionSectionContextDto.of(CONTEXT_KEY, CONTEXT_DISPLAY_NAME);
assertThat(context1).isNotEqualTo(null);
} |
public CompletableFuture<Void> commitAsync(final Map<TopicPartition, OffsetAndMetadata> offsets) {
if (offsets.isEmpty()) {
log.debug("Skipping commit of empty offsets");
return CompletableFuture.completedFuture(null);
}
OffsetCommitRequestState commitRequest = createOffsetCommitRequest(offsets, Long.MAX_VALUE);
pendingRequests.addOffsetCommitRequest(commitRequest);
CompletableFuture<Void> asyncCommitResult = new CompletableFuture<>();
commitRequest.future.whenComplete((committedOffsets, error) -> {
if (error != null) {
asyncCommitResult.completeExceptionally(commitAsyncExceptionForError(error));
} else {
asyncCommitResult.complete(null);
}
});
return asyncCommitResult;
} | @Test
public void testAsyncCommitWhileCoordinatorUnknownIsSentOutWhenCoordinatorDiscovered() {
CommitRequestManager commitRequestManager = create(false, 0);
assertPoll(false, 0, commitRequestManager);
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(new TopicPartition("t1", 0), new OffsetAndMetadata(0));
commitRequestManager.commitAsync(offsets);
assertPoll(false, 0, commitRequestManager);
assertPoll(true, 1, commitRequestManager);
} |
void appendValuesClause(StringBuilder sb) {
sb.append("VALUES ");
appendValues(sb, jdbcTable.dbFieldNames().size());
} | @Test
void appendValuesClause() {
MSSQLUpsertQueryBuilder builder = new MSSQLUpsertQueryBuilder(jdbcTable, dialect);
StringBuilder sb = new StringBuilder();
builder.appendValuesClause(sb);
String valuesClause = sb.toString();
assertThat(valuesClause).isEqualTo("VALUES (?, ?)");
} |
public OpenAPI read(Class<?> cls) {
return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>());
} | @Test(description = "Responses with ref")
public void testResponseWithRef() {
Components components = new Components();
components.addResponses("invalidJWT", new ApiResponse().description("when JWT token invalid/expired"));
OpenAPI oas = new OpenAPI()
.info(new Info().description("info"))
.components(components);
Reader reader = new Reader(oas);
OpenAPI openAPI = reader.read(RefResponsesResource.class);
String yaml = "openapi: 3.0.1\n" +
"info:\n" +
" description: info\n" +
"paths:\n" +
" /:\n" +
" get:\n" +
" summary: Simple get operation\n" +
" description: Defines a simple get operation with no inputs and a complex output\n" +
" object\n" +
" operationId: getWithPayloadResponse\n" +
" responses:\n" +
" \"200\":\n" +
" description: voila!\n" +
" content:\n" +
" application/json:\n" +
" schema:\n" +
" $ref: '#/components/schemas/SampleResponseSchema'\n" +
" default:\n" +
" description: boo\n" +
" content:\n" +
" '*/*':\n" +
" schema:\n" +
" $ref: '#/components/schemas/GenericError'\n" +
" \"401\":\n" +
" $ref: '#/components/responses/invalidJWT'\n" +
" deprecated: true\n" +
"components:\n" +
" schemas:\n" +
" GenericError:\n" +
" type: object\n" +
" SampleResponseSchema:\n" +
" type: object\n" +
" responses:\n" +
" invalidJWT:\n" +
" description: when JWT token invalid/expired";
SerializationMatchers.assertEqualsToYaml(openAPI, yaml);
} |
@Override
public Path move(final Path source, final Path target, final TransferStatus status, final Delete.Callback callback,
final ConnectionCallback connectionCallback) throws BackgroundException {
if(containerService.isContainer(source)) {
if(new SimplePathPredicate(source.getParent()).test(target.getParent())) {
// Rename only
return proxy.move(source, target, status, callback, connectionCallback);
}
}
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(source) ^ new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(target))) {
// Moving into or from an encrypted room
final Copy copy = new SDSDelegatingCopyFeature(session, nodeid, new SDSCopyFeature(session, nodeid));
if(log.isDebugEnabled()) {
log.debug(String.format("Move %s to %s using copy feature %s", source, target, copy));
}
final Path c = copy.copy(source, target, status, connectionCallback, new DisabledStreamListener());
// Delete source file after copy is complete
final Delete delete = new SDSDeleteFeature(session, nodeid);
if(delete.isSupported(source)) {
log.warn(String.format("Delete source %s copied to %s", source, target));
delete.delete(Collections.singletonMap(source, status), connectionCallback, callback);
}
return c;
}
else {
return proxy.move(source, target, status, callback, connectionCallback);
}
} | @Test
public void testMoveFromEncryptedDataRoom() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room1 = new SDSDirectoryFeature(session, nodeid).createRoom(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), true);
room1.setAttributes(new SDSAttributesFinderFeature(session, nodeid).find(room1));
final Path room2 = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
room2.setAttributes(new SDSAttributesFinderFeature(session, nodeid).find(room2));
final byte[] content = RandomUtils.nextBytes(32769);
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
final Path test = new Path(room1, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final SDSEncryptionBulkFeature bulk = new SDSEncryptionBulkFeature(session, nodeid);
bulk.pre(Transfer.Type.upload, Collections.singletonMap(new TransferItem(test), status), new DisabledConnectionCallback());
final TripleCryptWriteFeature writer = new TripleCryptWriteFeature(session, nodeid, new SDSDirectS3MultipartWriteFeature(session, nodeid));
final StatusOutputStream<Node> out = writer.write(test, status, new DisabledConnectionCallback());
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
final Path target = new Path(room2, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new SDSDelegatingMoveFeature(session, nodeid, new SDSMoveFeature(session, nodeid)).move(test, target, new TransferStatus().withLength(content.length), new Delete.DisabledCallback(), new DisabledConnectionCallback() {
@Override
public void warn(final Host bookmark, final String title, final String message, final String defaultButton, final String cancelButton, final String preference) {
//
}
@Override
public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) {
return new VaultCredentials("eth[oh8uv4Eesij");
}
});
test.attributes().setVersionId(null);
assertFalse(new SDSFindFeature(session, nodeid).find(test));
assertTrue(new SDSFindFeature(session, nodeid).find(target));
final byte[] compare = new byte[content.length];
final InputStream stream = new SDSReadFeature(session, nodeid).read(target, new TransferStatus().withLength(content.length), new DisabledConnectionCallback() {
@Override
public void warn(final Host bookmark, final String title, final String message, final String defaultButton, final String cancelButton, final String preference) {
//
}
@Override
public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) {
return new VaultCredentials("eth[oh8uv4Eesij");
}
});
IOUtils.readFully(stream, compare);
stream.close();
assertArrayEquals(content, compare);
new SDSDeleteFeature(session, nodeid).delete(Arrays.asList(room1, room2), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
private int getVolumeReport(String[] argv, int i) throws IOException {
ClientDatanodeProtocol datanode = getDataNodeProxy(argv[i]);
List<DatanodeVolumeInfo> volumeReport = datanode
.getVolumeReport();
System.out.println("Active Volumes : " + volumeReport.size());
for (DatanodeVolumeInfo info : volumeReport) {
System.out.println("\n" + info.getDatanodeVolumeReport());
}
return 0;
} | @Test(timeout = 30000)
public void testGetVolumeReport() throws Exception {
redirectStream();
final DFSAdmin dfsAdmin = new DFSAdmin(conf);
for (int i = 0; i < cluster.getDataNodes().size(); i++) {
resetStream();
final DataNode dn = cluster.getDataNodes().get(i);
final String addr = String.format("%s:%d", dn.getXferAddress()
.getHostString(), dn.getIpcPort());
final int ret = ToolRunner.run(dfsAdmin, new String[] {
"-getVolumeReport", addr });
assertEquals(0, ret);
/* collect outputs */
final List<String> outs = Lists.newArrayList();
scanIntoList(out, outs);
assertEquals(outs.get(0), "Active Volumes : 2");
}
} |
public String getQuery() throws Exception {
return getQuery(weatherConfiguration.getLocation());
} | @Test
public void testBoxedQuery() throws Exception {
WeatherConfiguration weatherConfiguration = new WeatherConfiguration();
weatherConfiguration.setLon("4");
weatherConfiguration.setLat("52");
weatherConfiguration.setRightLon("6");
weatherConfiguration.setTopLat("54");
weatherConfiguration.setZoom(8);
weatherConfiguration.setUnits(WeatherUnits.METRIC);
weatherConfiguration.setAppid(APPID);
WeatherQuery weatherQuery = new WeatherQuery(weatherConfiguration);
weatherConfiguration.setGeoLocationProvider(geoLocationProvider);
String query = weatherQuery.getQuery();
assertThat(query, is(
"http://api.openweathermap.org/data/2.5/box/city?bbox=4,52,6,54,8&cluster=yes&lang=en&units=metric&APPID=9162755b2efa555823cfe0451d7fff38"));
} |
static Optional<File> getFileFromURL(URL retrieved) {
logger.debug("getFileFromURL {}", retrieved);
logger.debug("retrieved.getProtocol() {}", retrieved.getProtocol());
if (logger.isDebugEnabled()) {
debugURLContent(retrieved);
}
logger.debug("retrieved.getPath() {}", retrieved.getPath());
switch (retrieved.getProtocol()) {
case "jar":
return getOptionalFileFromJar(retrieved);
case "resource":
return getOptionalFileFromResource(retrieved);
default:
return getOptionalFileFromURLFile(retrieved);
}
} | @Test
void getFileFromURL() throws IOException {
URL url = getJarUrl();
assertThat(url).isNotNull();
Optional<File> retrieved = MemoryFileUtils.getFileFromURL(url);
assertThat(retrieved).isNotNull().isPresent();
assertThat(retrieved.get()).isInstanceOf(MemoryFile.class);
assertThat(retrieved.get()).canRead();
url = getResourceUrl();
assertThat(url).isNotNull();
retrieved = MemoryFileUtils.getFileFromURL(url);
assertThat(retrieved).isNotNull().isPresent();
assertThat(retrieved.get()).isInstanceOf(File.class);
assertThat(retrieved.get()).canRead();
} |
public static void sortMessages(Message[] messages, final SortTerm[] sortTerm) {
final List<SortTermWithDescending> sortTermsWithDescending = getSortTermsWithDescending(sortTerm);
sortMessages(messages, sortTermsWithDescending);
} | @Test
public void testSortMessagesWithTie() {
Message[] given = new Message[] { MESSAGES[2], TIE_BREAKER };
// Sort according to the whole list. Only the last element breaks the tie
Message[] actual1 = given.clone();
MailSorter.sortMessages(actual1, POSSIBLE_TERMS);
assertArrayEquals(actual1, new Message[] { TIE_BREAKER, MESSAGES[2] });
// now reverse the last element (the tie breaker)
SortTerm[] reversed = new SortTerm[POSSIBLE_TERMS.length + 1];
System.arraycopy(POSSIBLE_TERMS, 0, reversed, 0, POSSIBLE_TERMS.length - 1);
reversed[reversed.length - 2] = SortTerm.REVERSE;
reversed[reversed.length - 1] = POSSIBLE_TERMS[POSSIBLE_TERMS.length - 1];
// And check again
Message[] actual2 = given.clone();
MailSorter.sortMessages(actual2, reversed);
assertArrayEquals(actual2, new Message[] { MESSAGES[2], TIE_BREAKER });
} |
@Override
public String toString() {
return toHexString();
} | @Test
public void shouldCreateFromHexString() {
String value = "12FF841344567899";
ByteArray byteArray = new ByteArray(value);
assertThat(byteArray.toString(), is(value.toLowerCase()));
} |
@Override
public Tuple apply(Object input) {
checkArgument(fieldsOrProperties != null, "The names of the fields/properties to read should not be null");
checkArgument(fieldsOrProperties.length > 0, "The names of the fields/properties to read should not be empty");
checkArgument(input != null, "The object to extract fields/properties from should not be null");
List<Function<Object, Object>> extractors = buildExtractors();
List<Object> values = extractValues(input, extractors);
return new Tuple(values.toArray());
} | @Test
void should_extract_tuples_from_fields_or_properties() {
// GIVEN
ByNameMultipleExtractor underTest = new ByNameMultipleExtractor("id", "age");
// WHEN
Tuple result = underTest.apply(YODA);
// THEN
then(result).isEqualTo(tuple(1L, 800));
} |
@Override
public void store(Measure newMeasure) {
saveMeasure(newMeasure.inputComponent(), (DefaultMeasure<?>) newMeasure);
} | @Test
public void shouldFailIfUnknownMetric() {
InputFile file = new TestInputFileBuilder("foo", "src/Foo.php").build();
assertThatThrownBy(() -> underTest.store(new DefaultMeasure()
.on(file)
.forMetric(CoreMetrics.LINES)
.withValue(10)))
.isInstanceOf(UnsupportedOperationException.class)
.hasMessage("Unknown metric: lines");
} |
static JarFileWithEntryClass findOnlyEntryClass(Iterable<File> jarFiles) throws IOException {
List<JarFileWithEntryClass> jarsWithEntryClasses = new ArrayList<>();
for (File jarFile : jarFiles) {
findEntryClass(jarFile)
.ifPresent(
entryClass ->
jarsWithEntryClasses.add(
new JarFileWithEntryClass(jarFile, entryClass)));
}
int size = jarsWithEntryClasses.size();
if (size == 0) {
throw new NoSuchElementException("No JAR with manifest attribute for entry class");
}
if (size == 1) {
return jarsWithEntryClasses.get(0);
}
// else: size > 1
throw new IllegalArgumentException(
"Multiple JARs with manifest attribute for entry class: " + jarsWithEntryClasses);
} | @Test
void testFindOnlyEntryClassMultipleJarsWithMultipleManifestEntries() throws IOException {
assertThatThrownBy(
() -> {
File jarFile = TestJob.getTestJobJar();
JarManifestParser.findOnlyEntryClass(
ImmutableList.of(jarFile, jarFile, jarFile));
})
.isInstanceOf(IllegalArgumentException.class);
} |
@Override
public Collection<Identity> getIdentities() {
if(null == proxy) {
log.warn("Missing proxy reference");
return Collections.emptyList();
}
if(log.isDebugEnabled()) {
log.debug(String.format("Retrieve identities from proxy %s", proxy));
}
final List<Identity> identities = Arrays.asList(proxy.getIdentities());
if(log.isDebugEnabled()) {
log.debug(String.format("Found %d identities", identities.size()));
}
return identities;
} | @Test(expected = AgentProxyException.class)
public void testGetIdentities() throws Exception {
assumeTrue(Factory.Platform.getDefault().equals(Factory.Platform.Name.mac));
final OpenSSHAgentAuthenticator authenticator = new OpenSSHAgentAuthenticator(StringUtils.EMPTY);
final Collection<Identity> identities = authenticator.getIdentities();
assertNotNull(authenticator.getProxy());
assertFalse(identities.isEmpty());
} |
@Override
public ParamCheckResponse checkParamInfoList(List<ParamInfo> paramInfos) {
ParamCheckResponse paramCheckResponse = new ParamCheckResponse();
if (paramInfos == null) {
paramCheckResponse.setSuccess(true);
return paramCheckResponse;
}
for (ParamInfo paramInfo : paramInfos) {
paramCheckResponse = checkParamInfoFormat(paramInfo);
if (!paramCheckResponse.isSuccess()) {
return paramCheckResponse;
}
}
paramCheckResponse.setSuccess(true);
return paramCheckResponse;
} | @Test
void testCheckParamInfoForClusters() {
ParamInfo paramInfo = new ParamInfo();
ArrayList<ParamInfo> paramInfos = new ArrayList<>();
paramInfos.add(paramInfo);
// Max Length
String cluster = buildStringLength(65);
paramInfo.setClusters(cluster + "," + cluster);
ParamCheckResponse actual = paramChecker.checkParamInfoList(paramInfos);
assertFalse(actual.isSuccess());
assertEquals("Param 'cluster' is illegal, the param length should not exceed 64.", actual.getMessage());
// Pattern
paramInfo.setClusters("@hsbfkj$@@!#khdkad啊@@");
actual = paramChecker.checkParamInfoList(paramInfos);
assertFalse(actual.isSuccess());
assertEquals("Param 'cluster' is illegal, illegal characters should not appear in the param.", actual.getMessage());
// Success
paramInfo.setClusters("0-9a-zA-Z-_,DEFAULT_abc-100");
actual = paramChecker.checkParamInfoList(paramInfos);
assertTrue(actual.isSuccess());
} |
public static int getClusterControllerIndex(ConfigId configId) {
Matcher matcher = CONTROLLER_INDEX_PATTERN.matcher(configId.s());
if (!matcher.matches()) {
throw new IllegalArgumentException("Unable to extract cluster controller index from config ID " + configId);
}
return Integer.parseInt(matcher.group(1));
} | @Test
public void testGetClusterControllerIndexWithStandaloneClusterController() {
ConfigId configId = new ConfigId("fantasy_sports/standalone/fantasy_sports-controllers/1");
assertEquals(1, VespaModelUtil.getClusterControllerIndex(configId));
} |
public static UReturn create(UExpression expression) {
return new AutoValue_UReturn(expression);
} | @Test
public void serialization() {
SerializableTester.reserializeAndAssert(UReturn.create(ULiteral.stringLit("foo")));
} |
public static CompositeEvictionChecker newCompositeEvictionChecker(CompositionOperator compositionOperator,
EvictionChecker... evictionCheckers) {
Preconditions.isNotNull(compositionOperator, "composition");
Preconditions.isNotNull(evictionCheckers, "evictionCheckers");
if (evictionCheckers.length == 0) {
throw new IllegalArgumentException("EvictionCheckers cannot be empty!");
}
switch (compositionOperator) {
case AND:
return new CompositeEvictionCheckerWithAndComposition(evictionCheckers);
case OR:
return new CompositeEvictionCheckerWithOrComposition(evictionCheckers);
default:
throw new IllegalArgumentException("Invalid composition operator: " + compositionOperator);
}
} | @Test
public void resultShouldReturnFalse_whenOneIsFalse_withAndCompositionOperator() {
EvictionChecker evictionChecker1ReturnsTrue = mock(EvictionChecker.class);
EvictionChecker evictionChecker2ReturnsFalse = mock(EvictionChecker.class);
when(evictionChecker1ReturnsTrue.isEvictionRequired()).thenReturn(true);
when(evictionChecker2ReturnsFalse.isEvictionRequired()).thenReturn(false);
CompositeEvictionChecker compositeEvictionChecker =
CompositeEvictionChecker.newCompositeEvictionChecker(
CompositeEvictionChecker.CompositionOperator.AND, evictionChecker1ReturnsTrue,
evictionChecker2ReturnsFalse);
assertFalse(compositeEvictionChecker.isEvictionRequired());
} |
public static <T> Write<T> write() {
return Write.<T>builder(MutationType.WRITE).build();
} | @Test
public void testWrite() {
ArrayList<ScientistWrite> data = new ArrayList<>();
for (int i = 0; i < NUM_ROWS; i++) {
ScientistWrite scientist = new ScientistWrite();
scientist.id = i;
scientist.name = "Name " + i;
scientist.department = "bio";
data.add(scientist);
}
pipeline
.apply(Create.of(data))
.apply(
CassandraIO.<ScientistWrite>write()
.withHosts(Collections.singletonList(CASSANDRA_HOST))
.withPort(cassandraPort)
.withKeyspace(CASSANDRA_KEYSPACE)
.withEntity(ScientistWrite.class));
// table to write to is specified in the entity in @Table annotation (in that case
// scientist_write)
pipeline.run();
List<Row> results = getRows(CASSANDRA_TABLE_WRITE);
assertEquals(NUM_ROWS, results.size());
for (Row row : results) {
assertTrue(row.getString("person_name").matches("Name (\\d*)"));
}
} |
@Override
public String rebootOnu(String target) {
DriverHandler handler = handler();
NetconfController controller = handler.get(NetconfController.class);
MastershipService mastershipService = handler.get(MastershipService.class);
DeviceId ncDeviceId = handler.data().deviceId();
checkNotNull(controller, "Netconf controller is null");
String reply = null;
String[] onuId = null;
if (!mastershipService.isLocalMaster(ncDeviceId)) {
log.warn("Not master for {} Use {} to execute command",
ncDeviceId,
mastershipService.getMasterFor(ncDeviceId));
return null;
}
onuId = checkIdString(target, TWO);
if (onuId == null) {
log.error("Invalid ONU identifier {}", target);
return null;
}
try {
StringBuilder request = new StringBuilder();
request.append(ANGLE_LEFT + ONU_REBOOT + SPACE);
request.append(VOLT_NE_NAMESPACE + ANGLE_RIGHT + NEW_LINE);
request.append(buildStartTag(PONLINK_ID, false))
.append(onuId[FIRST_PART])
.append(buildEndTag(PONLINK_ID))
.append(buildStartTag(ONU_ID, false))
.append(onuId[SECOND_PART])
.append(buildEndTag(ONU_ID))
.append(buildEndTag(ONU_REBOOT));
reply = controller
.getDevicesMap()
.get(ncDeviceId)
.getSession()
.doWrappedRpc(request.toString());
} catch (NetconfException e) {
log.error("Cannot communicate to device {} exception {}", ncDeviceId, e);
}
return reply;
} | @Test
public void testValidRebootOnu() throws Exception {
String reply;
String target;
for (int i = ZERO; i < VALID_REBOOT_TCS.length; i++) {
target = VALID_REBOOT_TCS[i];
currentKey = i;
reply = voltConfig.rebootOnu(target);
assertNotNull("Incorrect response for VALID_REBOOT_TCS", reply);
}
} |
@Override
public void execute( RunConfiguration runConfiguration, ExecutionConfiguration executionConfiguration,
AbstractMeta meta, VariableSpace variableSpace, Repository repository ) throws KettleException {
DefaultRunConfiguration defaultRunConfiguration = (DefaultRunConfiguration) runConfiguration;
if ( executionConfiguration instanceof TransExecutionConfiguration ) {
configureTransExecution( (TransExecutionConfiguration) executionConfiguration, defaultRunConfiguration,
variableSpace, meta, repository );
}
if ( executionConfiguration instanceof JobExecutionConfiguration ) {
configureJobExecution( (JobExecutionConfiguration) executionConfiguration, defaultRunConfiguration, variableSpace,
meta, repository );
}
variableSpace.setVariable( "engine", null );
variableSpace.setVariable( "engine.remote", null );
variableSpace.setVariable( "engine.scheme", null );
variableSpace.setVariable( "engine.url", null );
} | @Test
public void testExecuteLocalTrans() throws Exception {
DefaultRunConfiguration defaultRunConfiguration = new DefaultRunConfiguration();
defaultRunConfiguration.setName( "Default Configuration" );
defaultRunConfiguration.setLocal( true );
TransExecutionConfiguration transExecutionConfiguration = new TransExecutionConfiguration();
defaultRunConfigurationExecutor
.execute( defaultRunConfiguration, transExecutionConfiguration, abstractMeta, variableSpace, null );
assertTrue( transExecutionConfiguration.isExecutingLocally() );
} |
@Override
public FileSystem create(URI fsUri) throws IOException {
checkNotNull(fsUri, "fsUri");
final String scheme = fsUri.getScheme();
checkArgument(scheme != null, "file system has null scheme");
// from here on, we need to handle errors due to missing optional
// dependency classes
try {
// -- (1) get the loaded Hadoop config (or fall back to one loaded from the classpath)
final org.apache.hadoop.conf.Configuration hadoopConfig;
if (this.hadoopConfig != null) {
hadoopConfig = this.hadoopConfig;
} else if (flinkConfig != null) {
hadoopConfig = HadoopUtils.getHadoopConfiguration(flinkConfig);
this.hadoopConfig = hadoopConfig;
} else {
LOG.warn(
"Hadoop configuration has not been explicitly initialized prior to loading a Hadoop file system."
+ " Using configuration from the classpath.");
hadoopConfig = new org.apache.hadoop.conf.Configuration();
}
// -- (2) get the Hadoop file system class for that scheme
final Class<? extends org.apache.hadoop.fs.FileSystem> fsClass;
try {
fsClass = org.apache.hadoop.fs.FileSystem.getFileSystemClass(scheme, hadoopConfig);
} catch (IOException e) {
throw new UnsupportedFileSystemSchemeException(
"Hadoop File System abstraction does not support scheme '"
+ scheme
+ "'. "
+ "Either no file system implementation exists for that scheme, "
+ "or the relevant classes are missing from the classpath.",
e);
}
// -- (3) instantiate the Hadoop file system
LOG.debug(
"Instantiating for file system scheme {} Hadoop File System {}",
scheme,
fsClass.getName());
final org.apache.hadoop.fs.FileSystem hadoopFs = fsClass.newInstance();
// -- (4) create the proper URI to initialize the file system
final URI initUri;
if (fsUri.getAuthority() != null) {
initUri = fsUri;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug(
"URI {} does not specify file system authority, trying to load default authority (fs.defaultFS)",
fsUri);
}
String configEntry = hadoopConfig.get("fs.defaultFS", null);
if (configEntry == null) {
// fs.default.name deprecated as of hadoop 2.2.0 - see
// http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/DeprecatedProperties.html
configEntry = hadoopConfig.get("fs.default.name", null);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Hadoop's 'fs.defaultFS' is set to {}", configEntry);
}
if (configEntry == null) {
throw new IOException(
getMissingAuthorityErrorPrefix(fsUri)
+ "Hadoop configuration did not contain an entry for the default file system ('fs.defaultFS').");
} else {
try {
initUri = URI.create(configEntry);
} catch (IllegalArgumentException e) {
throw new IOException(
getMissingAuthorityErrorPrefix(fsUri)
+ "The configuration contains an invalid file system default name "
+ "('fs.default.name' or 'fs.defaultFS'): "
+ configEntry);
}
if (initUri.getAuthority() == null) {
throw new IOException(
getMissingAuthorityErrorPrefix(fsUri)
+ "Hadoop configuration for default file system ('fs.default.name' or 'fs.defaultFS') "
+ "contains no valid authority component (like hdfs namenode, S3 host, etc)");
}
}
}
// -- (5) configure the Hadoop file system
try {
hadoopFs.initialize(initUri, hadoopConfig);
} catch (UnknownHostException e) {
String message =
"The Hadoop file system's authority ("
+ initUri.getAuthority()
+ "), specified by either the file URI or the configuration, cannot be resolved.";
throw new IOException(message, e);
}
HadoopFileSystem fs = new HadoopFileSystem(hadoopFs);
// create the Flink file system, optionally limiting the open connections
if (flinkConfig != null) {
return limitIfConfigured(fs, scheme, flinkConfig);
} else {
return fs;
}
} catch (ReflectiveOperationException | LinkageError e) {
throw new UnsupportedFileSystemSchemeException(
"Cannot support file system for '"
+ fsUri.getScheme()
+ "' via Hadoop, because Hadoop is not in the classpath, or some classes "
+ "are missing from the classpath.",
e);
} catch (IOException e) {
throw e;
} catch (Exception e) {
throw new IOException("Cannot instantiate file system for URI: " + fsUri, e);
}
} | @Test
public void testCreateHadoopFsWithMissingAuthority() throws Exception {
final URI uri = URI.create("hdfs:///my/path");
HadoopFsFactory factory = new HadoopFsFactory();
try {
factory.create(uri);
fail("should have failed with an exception");
} catch (IOException e) {
assertTrue(e.getMessage().contains("authority"));
}
} |
@Override
public Map<String, Object> assembleFrom(OAuth2AccessTokenEntity accessToken, UserInfo userInfo, Set<String> authScopes) {
Map<String, Object> result = newLinkedHashMap();
OAuth2Authentication authentication = accessToken.getAuthenticationHolder().getAuthentication();
result.put(ACTIVE, true);
if (accessToken.getPermissions() != null && !accessToken.getPermissions().isEmpty()) {
Set<Object> permissions = Sets.newHashSet();
for (Permission perm : accessToken.getPermissions()) {
Map<String, Object> o = newLinkedHashMap();
o.put("resource_set_id", perm.getResourceSet().getId().toString());
Set<String> scopes = Sets.newHashSet(perm.getScopes());
o.put("scopes", scopes);
permissions.add(o);
}
result.put("permissions", permissions);
} else {
Set<String> scopes = Sets.intersection(authScopes, accessToken.getScope());
result.put(SCOPE, Joiner.on(SCOPE_SEPARATOR).join(scopes));
}
if (accessToken.getExpiration() != null) {
try {
result.put(EXPIRES_AT, dateFormat.valueToString(accessToken.getExpiration()));
result.put(EXP, accessToken.getExpiration().getTime() / 1000L);
} catch (ParseException e) {
logger.error("Parse exception in token introspection", e);
}
}
if (userInfo != null) {
// if we have a UserInfo, use that for the subject
result.put(SUB, userInfo.getSub());
} else {
// otherwise, use the authentication's username
result.put(SUB, authentication.getName());
}
if(authentication.getUserAuthentication() != null) {
result.put(USER_ID, authentication.getUserAuthentication().getName());
}
result.put(CLIENT_ID, authentication.getOAuth2Request().getClientId());
result.put(TOKEN_TYPE, accessToken.getTokenType());
return result;
} | @Test
public void shouldAssembleExpectedResultForAccessTokenWithoutExpiry() {
// given
OAuth2AccessTokenEntity accessToken = accessToken(null, scopes("foo", "bar"), null, "Bearer",
oauth2AuthenticationWithUser(oauth2Request("clientId"), "name"));
UserInfo userInfo = userInfo("sub");
Set<String> authScopes = scopes("foo", "bar", "baz");
// when
Map<String, Object> result = assembler.assembleFrom(accessToken, userInfo, authScopes);
// then
Map<String, Object> expected = new ImmutableMap.Builder<String, Object>()
.put("sub", "sub")
.put("scope", "bar foo")
.put("active", Boolean.TRUE)
.put("user_id", "name")
.put("client_id", "clientId")
.put("token_type", "Bearer")
.build();
assertThat(result, is(equalTo(expected)));
} |
public static long readUint32(ByteBuffer buf) throws BufferUnderflowException {
return Integer.toUnsignedLong(buf.order(ByteOrder.LITTLE_ENDIAN).getInt());
} | @Test(expected = ArrayIndexOutOfBoundsException.class)
public void testReadUint32ThrowsException1() {
ByteUtils.readUint32(new byte[]{1, 2, 3}, 2);
} |
public List<Analyzer> getAnalyzers() {
return getAnalyzers(AnalysisPhase.values());
} | @Test
public void testGetExperimentalAnalyzers() {
AnalyzerService instance = new AnalyzerService(Thread.currentThread().getContextClassLoader(), getSettings());
List<Analyzer> result = instance.getAnalyzers();
String experimental = "CMake Analyzer";
boolean found = false;
boolean retiredFound = false;
for (Analyzer a : result) {
if (experimental.equals(a.getName())) {
found = true;
}
}
assertFalse("Experimental analyzer loaded when set to false", found);
assertFalse("Retired analyzer loaded when set to false", retiredFound);
getSettings().setBoolean(Settings.KEYS.ANALYZER_EXPERIMENTAL_ENABLED, true);
instance = new AnalyzerService(Thread.currentThread().getContextClassLoader(), getSettings());
result = instance.getAnalyzers();
found = false;
retiredFound = false;
for (Analyzer a : result) {
if (experimental.equals(a.getName())) {
found = true;
}
}
assertTrue("Experimental analyzer not loaded when set to true", found);
assertFalse("Retired analyzer loaded when set to false", retiredFound);
getSettings().setBoolean(Settings.KEYS.ANALYZER_EXPERIMENTAL_ENABLED, false);
getSettings().setBoolean(Settings.KEYS.ANALYZER_RETIRED_ENABLED, true);
instance = new AnalyzerService(Thread.currentThread().getContextClassLoader(), getSettings());
result = instance.getAnalyzers();
found = false;
for (Analyzer a : result) {
if (experimental.equals(a.getName())) {
found = true;
}
}
assertFalse("Experimental analyzer loaded when set to false", found);
} |
public List<QueryMetadata> sql(final String sql) {
return sql(sql, Collections.emptyMap());
} | @Test
public void shouldInferTopicWithValidArgs() {
// Given:
when(schemaInjector.inject(any())).thenAnswer(inv -> inv.getArgument(0));
// When:
ksqlContext.sql("Some SQL", SOME_PROPERTIES);
// Then:
verify(topicInjector, times(2) /* once to validate, once to execute */)
.inject(CFG_STMT_0);
} |
public static NetworkMultiplexer shared(Network net) {
return new NetworkMultiplexer(net, true);
} | @Test
void testShared() {
MockNetwork net = new MockNetwork();
MockOwner owner1 = new MockOwner();
MockOwner owner2 = new MockOwner();
NetworkMultiplexer shared = NetworkMultiplexer.shared(net);
assertEquals(Set.of(shared), net.attached);
assertEquals(Set.of(), net.registered);
assertFalse(net.shutDown.get());
shared.attach(owner1);
shared.registerSession("s1", owner1, true);
try {
shared.registerSession("s1", owner1, true);
fail("Illegal to register same session multiple times with the same owner");
}
catch (IllegalArgumentException expected) {
assertEquals("Session 's1' with owner 'mock owner' already registered with network multiplexer with owners: [mock owner], sessions: {s1=[mock owner]} and destructible: false",
expected.getMessage());
}
assertEquals(Set.of("s1"), net.registered);
shared.attach(owner2);
shared.registerSession("s2", owner2, true);
shared.registerSession("s3", owner2, false);
assertEquals(Set.of("s1", "s2"), net.registered);
Utf8String name = new Utf8String("protocol");
Protocol protocol1 = new SimpleProtocol();
Protocol protocol2 = new SimpleProtocol();
owner1.protocols.put(name, protocol1);
assertEquals(protocol1, shared.getProtocol(name));
owner2.protocols.put(name, protocol2);
assertEquals(protocol2, shared.getProtocol(name));
Message message1 = new SimpleMessage("one");
Message message2 = new SimpleMessage("two");
Message message3 = new SimpleMessage("three");
Message message4 = new SimpleMessage("four");
Message message5 = new SimpleMessage("five");
shared.deliverMessage(message1, "s1");
shared.deliverMessage(message2, "s2");
// New "s1" owner connects, and should have new requests.
shared.registerSession("s1", owner2, true);
shared.deliverMessage(message3, "s1");
shared.deliverMessage(message4, "s3");
shared.unregisterSession("s1", owner1, true);
shared.deliverMessage(message5, "s1");
assertEquals(Map.of("s1", List.of(message1)), owner1.messages);
assertEquals(Map.of("s2", List.of(message2), "s1", List.of(message3, message5), "s3", List.of(message4)), owner2.messages);
shared.detach(owner1);
assertEquals(protocol2, shared.getProtocol(name));
shared.detach(owner2);
assertFalse(net.shutDown.get());
shared.attach(owner2);
shared.disown();
assertFalse(net.shutDown.get());
shared.detach(owner2);
assertTrue(net.shutDown.get());
} |
@Override
public TradePriceCalculateRespBO calculatePrice(TradePriceCalculateReqBO calculateReqBO) {
// 1.1 获得商品 SKU 数组
List<ProductSkuRespDTO> skuList = checkSkuList(calculateReqBO);
// 1.2 获得商品 SPU 数组
List<ProductSpuRespDTO> spuList = checkSpuList(skuList);
// 2.1 计算价格
TradePriceCalculateRespBO calculateRespBO = TradePriceCalculatorHelper
.buildCalculateResp(calculateReqBO, spuList, skuList);
priceCalculators.forEach(calculator -> calculator.calculate(calculateReqBO, calculateRespBO));
// 2.2 如果最终支付金额小于等于 0,则抛出业务异常
if (calculateRespBO.getPrice().getPayPrice() <= 0) {
log.error("[calculatePrice][价格计算不正确,请求 calculateReqDTO({}),结果 priceCalculate({})]",
calculateReqBO, calculateRespBO);
throw exception(PRICE_CALCULATE_PAY_PRICE_ILLEGAL);
}
return calculateRespBO;
} | @Test
public void testCalculatePrice() {
// 准备参数
TradePriceCalculateReqBO calculateReqBO = new TradePriceCalculateReqBO()
.setUserId(10L)
.setCouponId(20L).setAddressId(30L)
.setItems(Arrays.asList(
new TradePriceCalculateReqBO.Item().setSkuId(100L).setCount(1).setSelected(true),
new TradePriceCalculateReqBO.Item().setSkuId(200L).setCount(3).setSelected(true),
new TradePriceCalculateReqBO.Item().setSkuId(300L).setCount(6).setCartId(233L).setSelected(false)
));
// mock 方法
List<ProductSkuRespDTO> skuList = Arrays.asList(
new ProductSkuRespDTO().setId(100L).setStock(500).setPrice(1000).setPicUrl("https://t.cn/1.png").setSpuId(1001L)
.setProperties(singletonList(new ProductPropertyValueDetailRespDTO().setPropertyId(1L).setPropertyName("颜色")
.setValueId(2L).setValueName("红色"))),
new ProductSkuRespDTO().setId(200L).setStock(400).setPrice(2000).setPicUrl("https://t.cn/2.png").setSpuId(1001L)
.setProperties(singletonList(new ProductPropertyValueDetailRespDTO().setPropertyId(1L).setPropertyName("颜色")
.setValueId(3L).setValueName("黄色"))),
new ProductSkuRespDTO().setId(300L).setStock(600).setPrice(3000).setPicUrl("https://t.cn/3.png").setSpuId(1001L)
.setProperties(singletonList(new ProductPropertyValueDetailRespDTO().setPropertyId(1L).setPropertyName("颜色")
.setValueId(4L).setValueName("黑色")))
);
when(productSkuApi.getSkuList(Mockito.eq(asSet(100L, 200L, 300L)))).thenReturn(skuList);
when(productSpuApi.getSpuList(Mockito.eq(asSet(1001L))))
.thenReturn(singletonList(new ProductSpuRespDTO().setId(1001L).setName("小菜").setCategoryId(666L)
.setStatus(ProductSpuStatusEnum.ENABLE.getStatus())));
// 调用
TradePriceCalculateRespBO calculateRespBO = tradePriceService.calculatePrice(calculateReqBO);
// 断言
assertEquals(TradeOrderTypeEnum.NORMAL.getType(), calculateRespBO.getType());
assertEquals(0, calculateRespBO.getPromotions().size());
assertNull(calculateRespBO.getCouponId());
// 断言:订单价格
assertEquals(7000, calculateRespBO.getPrice().getTotalPrice());
assertEquals(0, calculateRespBO.getPrice().getDiscountPrice());
assertEquals(0, calculateRespBO.getPrice().getDeliveryPrice());
assertEquals(0, calculateRespBO.getPrice().getCouponPrice());
assertEquals(0, calculateRespBO.getPrice().getPointPrice());
assertEquals(7000, calculateRespBO.getPrice().getPayPrice());
// 断言:SKU 1
assertEquals(1001L, calculateRespBO.getItems().get(0).getSpuId());
assertEquals(100L, calculateRespBO.getItems().get(0).getSkuId());
assertEquals(1, calculateRespBO.getItems().get(0).getCount());
assertNull(calculateRespBO.getItems().get(0).getCartId());
assertTrue(calculateRespBO.getItems().get(0).getSelected());
assertEquals(1000, calculateRespBO.getItems().get(0).getPrice());
assertEquals(0, calculateRespBO.getItems().get(0).getDiscountPrice());
assertEquals(0, calculateRespBO.getItems().get(0).getDeliveryPrice());
assertEquals(0, calculateRespBO.getItems().get(0).getCouponPrice());
assertEquals(0, calculateRespBO.getItems().get(0).getPointPrice());
assertEquals(1000, calculateRespBO.getItems().get(0).getPayPrice());
assertEquals("小菜", calculateRespBO.getItems().get(0).getSpuName());
assertEquals("https://t.cn/1.png", calculateRespBO.getItems().get(0).getPicUrl());
assertEquals(666L, calculateRespBO.getItems().get(0).getCategoryId());
assertEquals(skuList.get(0).getProperties(), calculateRespBO.getItems().get(0).getProperties());
// 断言:SKU 2
assertEquals(1001L, calculateRespBO.getItems().get(1).getSpuId());
assertEquals(200L, calculateRespBO.getItems().get(1).getSkuId());
assertEquals(3, calculateRespBO.getItems().get(1).getCount());
assertNull(calculateRespBO.getItems().get(1).getCartId());
assertTrue(calculateRespBO.getItems().get(1).getSelected());
assertEquals(2000, calculateRespBO.getItems().get(1).getPrice());
assertEquals(0, calculateRespBO.getItems().get(1).getDiscountPrice());
assertEquals(0, calculateRespBO.getItems().get(1).getDeliveryPrice());
assertEquals(0, calculateRespBO.getItems().get(1).getCouponPrice());
assertEquals(0, calculateRespBO.getItems().get(1).getPointPrice());
assertEquals(6000, calculateRespBO.getItems().get(1).getPayPrice());
assertEquals("小菜", calculateRespBO.getItems().get(1).getSpuName());
assertEquals("https://t.cn/2.png", calculateRespBO.getItems().get(1).getPicUrl());
assertEquals(666L, calculateRespBO.getItems().get(1).getCategoryId());
assertEquals(skuList.get(1).getProperties(), calculateRespBO.getItems().get(1).getProperties());
// 断言:SKU 3
assertEquals(1001L, calculateRespBO.getItems().get(2).getSpuId());
assertEquals(300L, calculateRespBO.getItems().get(2).getSkuId());
assertEquals(6, calculateRespBO.getItems().get(2).getCount());
assertEquals(233L, calculateRespBO.getItems().get(2).getCartId());
assertFalse(calculateRespBO.getItems().get(2).getSelected());
assertEquals(3000, calculateRespBO.getItems().get(2).getPrice());
assertEquals(0, calculateRespBO.getItems().get(2).getDiscountPrice());
assertEquals(0, calculateRespBO.getItems().get(2).getDeliveryPrice());
assertEquals(0, calculateRespBO.getItems().get(2).getCouponPrice());
assertEquals(0, calculateRespBO.getItems().get(2).getPointPrice());
assertEquals(18000, calculateRespBO.getItems().get(2).getPayPrice());
assertEquals("小菜", calculateRespBO.getItems().get(2).getSpuName());
assertEquals("https://t.cn/3.png", calculateRespBO.getItems().get(2).getPicUrl());
assertEquals(666L, calculateRespBO.getItems().get(2).getCategoryId());
assertEquals(skuList.get(2).getProperties(), calculateRespBO.getItems().get(2).getProperties());
} |
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || !(o instanceof Set))
return false;
if (o instanceof RangeSet) {
RangeSet integers = (RangeSet) o;
return size == integers.size;
} else {
Set set = (Set) o;
return size == set.size() && containsAll(set);
}
} | @Test
public void equals() throws Exception {
RangeSet rs = new RangeSet(4);
RangeSet rs2 = new RangeSet(5);
// Verify equals both ways
assertNotEquals(rs, rs2);
assertNotEquals(rs2, rs);
RangeSet rs3 = new RangeSet(4);
assertEquals(rs3, rs);
assertEquals(rs, rs3);
} |
public double calcOrientation(double lat1, double lon1, double lat2, double lon2) {
return calcOrientation(lat1, lon1, lat2, lon2, true);
} | @Test
public void testOrientationExact() {
assertEquals(90.0, Math.toDegrees(AC.calcOrientation(0, 0, 1, 0)), 0.01);
assertEquals(45.0, Math.toDegrees(AC.calcOrientation(0, 0, 1, 1)), 0.01);
assertEquals(0.0, Math.toDegrees(AC.calcOrientation(0, 0, 0, 1)), 0.01);
assertEquals(-45.0, Math.toDegrees(AC.calcOrientation(0, 0, -1, 1)), 0.01);
assertEquals(-135.0, Math.toDegrees(AC.calcOrientation(0, 0, -1, -1)), 0.01);
// is symmetric?
assertEquals(90 - 32.76, Math.toDegrees(AC.calcOrientation(49.942, 11.580, 49.944, 11.582)), 0.01);
assertEquals(-90 - 32.76, Math.toDegrees(AC.calcOrientation(49.944, 11.582, 49.942, 11.580)), 0.01);
} |
@Override
public void onNewResourcesAvailable() {
checkDesiredOrSufficientResourcesAvailable();
} | @Test
void testSchedulingWithSufficientResourcesAfterStabilizationTimeout() {
Duration initialResourceTimeout = Duration.ofMillis(-1);
Duration stabilizationTimeout = Duration.ofMillis(50_000L);
WaitingForResources wfr =
new WaitingForResources(
ctx,
LOG,
initialResourceTimeout,
stabilizationTimeout,
ctx.getClock(),
null);
// sufficient resources available
ctx.setHasDesiredResources(() -> false);
ctx.setHasSufficientResources(() -> true);
// notify about sufficient resources
wfr.onNewResourcesAvailable();
ctx.setExpectCreatingExecutionGraph();
// execute all runnables and trigger expected state transition
final Duration afterStabilizationTimeout = stabilizationTimeout.plusMillis(1);
ctx.advanceTimeByMillis(afterStabilizationTimeout.toMillis());
ctx.runScheduledTasks(afterStabilizationTimeout.toMillis());
assertThat(ctx.hasStateTransition()).isTrue();
} |
@Override
protected <R> EurekaHttpResponse<R> execute(RequestExecutor<R> requestExecutor) {
List<EurekaEndpoint> candidateHosts = null;
int endpointIdx = 0;
for (int retry = 0; retry < numberOfRetries; retry++) {
EurekaHttpClient currentHttpClient = delegate.get();
EurekaEndpoint currentEndpoint = null;
if (currentHttpClient == null) {
if (candidateHosts == null) {
candidateHosts = getHostCandidates();
if (candidateHosts.isEmpty()) {
throw new TransportException("There is no known eureka server; cluster server list is empty");
}
}
if (endpointIdx >= candidateHosts.size()) {
throw new TransportException("Cannot execute request on any known server");
}
currentEndpoint = candidateHosts.get(endpointIdx++);
currentHttpClient = clientFactory.newClient(currentEndpoint);
}
try {
EurekaHttpResponse<R> response = requestExecutor.execute(currentHttpClient);
if (serverStatusEvaluator.accept(response.getStatusCode(), requestExecutor.getRequestType())) {
delegate.set(currentHttpClient);
if (retry > 0) {
logger.info("Request execution succeeded on retry #{}", retry);
}
return response;
}
logger.warn("Request execution failure with status code {}; retrying on another server if available", response.getStatusCode());
} catch (Exception e) {
logger.warn("Request execution failed with message: {}", e.getMessage()); // just log message as the underlying client should log the stacktrace
}
// Connection error or 5xx from the server that must be retried on another server
delegate.compareAndSet(currentHttpClient, null);
if (currentEndpoint != null) {
quarantineSet.add(currentEndpoint);
}
}
throw new TransportException("Retry limit reached; giving up on completing the request");
} | @Test
public void testRequestsReuseSameConnectionIfThereIsNoError() throws Exception {
when(clientFactory.newClient(Matchers.<EurekaEndpoint>anyVararg())).thenReturn(clusterDelegates.get(0));
when(requestExecutor.execute(clusterDelegates.get(0))).thenReturn(EurekaHttpResponse.status(200));
// First request creates delegate, second reuses it
for (int i = 0; i < 3; i++) {
EurekaHttpResponse<Void> httpResponse = retryableClient.execute(requestExecutor);
assertThat(httpResponse.getStatusCode(), is(equalTo(200)));
}
verify(clientFactory, times(1)).newClient(Matchers.<EurekaEndpoint>anyVararg());
verify(requestExecutor, times(3)).execute(clusterDelegates.get(0));
} |
public void runPickle(Pickle pickle) {
try {
StepTypeRegistry stepTypeRegistry = createTypeRegistryForPickle(pickle);
snippetGenerators = createSnippetGeneratorsForPickle(stepTypeRegistry);
// Java8 step definitions will be added to the glue here
buildBackendWorlds();
glue.prepareGlue(stepTypeRegistry);
TestCase testCase = createTestCaseForPickle(pickle);
testCase.run(bus);
} finally {
glue.removeScenarioScopedGlue();
disposeBackendWorlds();
}
} | @Test
void backends_are_asked_for_snippets_for_undefined_steps() {
Backend backend = mock(Backend.class);
when(backend.getSnippet()).thenReturn(new TestSnippet());
ObjectFactory objectFactory = mock(ObjectFactory.class);
Runner runner = new Runner(bus, singletonList(backend), objectFactory, runtimeOptions);
runner.runPickle(createPicklesWithSteps());
verify(backend).getSnippet();
} |
public static PathSpecSet empty()
{
return EMPTY;
} | @Test
public void testEmpty() {
PathSpecSet pathSpecSet = PathSpecSet.empty();
Assert.assertEquals(pathSpecSet.getPathSpecs(), Collections.emptySet());
Assert.assertFalse(pathSpecSet.isAllInclusive());
Assert.assertTrue(pathSpecSet.isEmpty());
Assert.assertEquals(pathSpecSet.toArray(), new PathSpec[0]);
} |
@Override
public int deduceNumPartitions() {
// for source rdd, the partitioner is None
final Optional<Partitioner> partitioner = rddData.partitioner();
if (partitioner.isPresent()) {
int partPartitions = partitioner.get().numPartitions();
if (partPartitions > 0) {
return partPartitions;
}
}
if (SQLConf.get().contains(SQLConf.SHUFFLE_PARTITIONS().key())) {
return Integer.parseInt(SQLConf.get().getConfString(SQLConf.SHUFFLE_PARTITIONS().key()));
} else if (rddData.context().conf().contains("spark.default.parallelism")) {
return rddData.context().defaultParallelism();
} else {
return rddData.getNumPartitions();
}
} | @Test
public void testDeduceNumPartitions() {
int numPartitions = 100;
jsc.sc().conf().remove("spark.default.parallelism");
SQLConf.get().unsetConf("spark.sql.shuffle.partitions");
// rdd parallelize
SQLConf.get().setConfString("spark.sql.shuffle.partitions", "5");
HoodieData<Integer> rddData = HoodieJavaRDD.of(jsc.parallelize(
IntStream.rangeClosed(0, 100).boxed().collect(Collectors.toList()), numPartitions));
assertEquals(5, rddData.deduceNumPartitions());
// sql parallelize
SQLConf.get().unsetConf("spark.sql.shuffle.partitions");
jsc.sc().conf().set("spark.default.parallelism", "6");
rddData = HoodieJavaRDD.of(jsc.parallelize(
IntStream.rangeClosed(0, 100).boxed().collect(Collectors.toList()), numPartitions));
assertEquals(6, rddData.deduceNumPartitions());
// use partitioner num
HoodiePairData<Integer, Integer> shuffleRDD = rddData.mapToPair(key -> Pair.of(key, 1))
.reduceByKey((p1, p2) -> p1, 11);
assertEquals(11, shuffleRDD.deduceNumPartitions());
} |
public Optional<AbsoluteUnixPath> getAppRoot() {
if (appRoot == null) {
return Optional.empty();
}
return Optional.of(AbsoluteUnixPath.fromPath(appRoot));
} | @Test
public void testParse_appRoot() {
War warCommand =
CommandLine.populateCommand(
new War(), "--target=test-image-ref", "--app-root=/path/to/app", "my-app.war");
assertThat(warCommand.getAppRoot()).hasValue(AbsoluteUnixPath.get("/path/to/app"));
} |
public Set<String> indexNamesForStreamsInTimeRange(final Set<String> streamIds,
final TimeRange timeRange) {
Set<String> dataStreamIndices = streamIds.stream()
.filter(s -> s.startsWith(Stream.DATASTREAM_PREFIX))
.map(s -> s.substring(Stream.DATASTREAM_PREFIX.length()))
.collect(Collectors.toSet());
final Set<String> candidateIndices = indexRangesForStreamsInTimeRange(streamIds, timeRange).stream().map(IndexRange::indexName).collect(Collectors.toSet());
return Sets.union(dataStreamIndices, candidateIndices);
} | @Test
void findsIndicesBelongingToStreamsInTimeRange() {
final IndexRange indexRange1 = mockIndexRange("index1");
final IndexRange indexRange2 = mockIndexRange("index2");
final SortedSet<IndexRange> indexRanges = sortedSetOf(indexRange1, indexRange2);
final IndexLookup sut = new IndexLookup(
mockIndexRangeService(indexRanges, timeRangeWithMatchingIndexRange),
mockStreamService(streamIds),
mockIndexRangeContains(indexRange1));
Set<String> result = sut.indexNamesForStreamsInTimeRange(streamIds, timeRangeWithMatchingIndexRange);
assertThat(result).containsExactly(indexRange1.indexName());
} |
public WorkflowInstanceActionResponse stop(
String workflowId, long workflowInstanceId, long workflowRunId, User caller) {
return terminate(
workflowId, workflowInstanceId, workflowRunId, Actions.WorkflowInstanceAction.STOP, caller);
} | @Test
public void testInvalidRunId() {
when(instance.getWorkflowRunId()).thenReturn(2L);
when(instance.getStatus()).thenReturn(WorkflowInstance.Status.IN_PROGRESS);
AssertHelper.assertThrows(
"run id has to be the latest one",
MaestroBadRequestException.class,
"Cannot STOP the workflow instance run [1] as it is not the latest run [2]",
() -> actionHandler.stop("test-workflow", 1, 1, user));
} |
@Override
public TransformResultMetadata getResultMetadata() {
return _resultMetadata;
} | @Test
public void testArrayIndexOfAllLong() {
ExpressionContext expression = RequestContextUtils.getExpression(
String.format("array_indexes_of_long(%s, 1)", LONG_MV_COLUMN_2));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper);
assertEquals(transformFunction.getResultMetadata().getDataType(), DataType.INT);
assertFalse(transformFunction.getResultMetadata().isSingleValue());
int[][] expectedValues = new int[NUM_ROWS][];
for (int i = 0; i < NUM_ROWS; i++) {
int len = _longMV2Values[i].length;
int[] expectedValue = new int[len];
for (int j = 0; j < len; j++) {
expectedValue[j] = j;
}
expectedValues[i] = expectedValue;
}
testTransformFunctionMV(transformFunction, expectedValues);
} |
public Certificate add(CvCertificate cert) {
final Certificate db = Certificate.from(cert);
if (repository.countByIssuerAndSubject(db.getIssuer(), db.getSubject()) > 0) {
throw new ClientException(String.format(
"Certificate of subject %s and issuer %s already exists", db.getSubject(), db.getIssuer()));
}
// Special case for first CVCA certificate for this document type
if (db.getType() == Certificate.Type.CVCA
&& repository.countByDocumentTypeAndType(db.getDocumentType(), db.getType()) == 0) {
signatureService.verify(cert, cert.getBody().getPublicKey(), cert.getBody().getPublicKey().getParams());
logger.warn("Added first CVCA certificate for {}, set trusted flag manually", db.getDocumentType());
} else {
verify(cert);
if (db.getType() == Certificate.Type.AT) {
verifyPublicKey(cert);
}
}
return repository.saveAndFlush(db);
} | @Test
public void shouldNotAddCertificateIfFirstButInvalidSignature() throws Exception {
final byte[] der = readFixture("rdw/acc/cvca.cvcert");
der[461] = 2;
ClientException thrown = assertThrows(ClientException.class, () -> service.add(mapper.read(der, CvCertificate.class)));
assertEquals("Invalid signature", thrown.getMessage());
} |
@Override
public DistroDataResponse handle(DistroDataRequest request, RequestMeta meta) throws NacosException {
try {
switch (request.getDataOperation()) {
case VERIFY:
return handleVerify(request.getDistroData(), meta);
case SNAPSHOT:
return handleSnapshot();
case ADD:
case CHANGE:
case DELETE:
return handleSyncData(request.getDistroData());
case QUERY:
return handleQueryData(request.getDistroData());
default:
return new DistroDataResponse();
}
} catch (Exception e) {
Loggers.DISTRO.error("[DISTRO-FAILED] distro handle with exception", e);
DistroDataResponse result = new DistroDataResponse();
result.setErrorCode(ResponseCode.FAIL.getCode());
result.setMessage("handle distro request with exception");
return result;
}
} | @Test
void testHandle() throws NacosException {
Mockito.when(distroProtocol.onVerify(Mockito.any(), Mockito.anyString())).thenReturn(false);
DistroDataRequest distroDataRequest = new DistroDataRequest();
distroDataRequest.setDataOperation(VERIFY);
RequestMeta requestMeta = new RequestMeta();
DistroDataResponse response = distroDataRequestHandler.handle(distroDataRequest, requestMeta);
assertEquals(response.getErrorCode(), ResponseCode.FAIL.getCode());
DistroData distroData = new DistroData();
Mockito.when(distroProtocol.onSnapshot(Mockito.any())).thenReturn(distroData);
distroDataRequest.setDataOperation(SNAPSHOT);
DistroDataResponse response1 = distroDataRequestHandler.handle(distroDataRequest, requestMeta);
assertEquals(response1.getDistroData(), distroData);
distroDataRequest.setDataOperation(DELETE);
Mockito.when(distroProtocol.onReceive(Mockito.any())).thenReturn(false);
DistroDataResponse response2 = distroDataRequestHandler.handle(distroDataRequest, requestMeta);
assertEquals(response2.getErrorCode(), ResponseCode.FAIL.getCode());
distroDataRequest.setDataOperation(QUERY);
Mockito.when(distroProtocol.onQuery(Mockito.any())).thenReturn(distroData);
distroDataRequest.setDistroData(new DistroData());
DistroDataResponse response3 = distroDataRequestHandler.handle(distroDataRequest, requestMeta);
assertEquals(response3.getDistroData(), distroData);
distroDataRequest.setDataOperation(ADD);
DistroDataResponse response4 = distroDataRequestHandler.handle(distroDataRequest, requestMeta);
assertNull(response4.getDistroData());
} |
public void isEmpty() {
if (actual == null) {
failWithActual(simpleFact("expected an empty string"));
} else if (!actual.isEmpty()) {
failWithActual(simpleFact("expected to be empty"));
}
} | @Test
public void stringIsEmpty() {
assertThat("").isEmpty();
} |
@Override
public OverlayData createOverlayData(ComponentName remoteApp) {
if (!OS_SUPPORT_FOR_ACCENT) {
return EMPTY;
}
try {
final ActivityInfo activityInfo =
mLocalContext
.getPackageManager()
.getActivityInfo(remoteApp, PackageManager.GET_META_DATA);
final Context context =
mLocalContext.createPackageContext(remoteApp.getPackageName(), CONTEXT_IGNORE_SECURITY);
context.setTheme(activityInfo.getThemeResource());
fetchRemoteColors(mCurrentOverlayData, context);
Logger.d(
"OverlyDataCreatorForAndroid",
"For component %s we fetched %s",
remoteApp,
mCurrentOverlayData);
return mCurrentOverlayData;
} catch (Exception e) {
Logger.w("OverlyDataCreatorForAndroid", e, "Failed to fetch colors for %s", remoteApp);
return EMPTY;
}
} | @Test
public void testGetRawColorsHappyPath() throws Exception {
setupReturnedColors(R.style.HappyPathRawColors);
final OverlayData overlayData = mUnderTest.createOverlayData(mComponentName);
Assert.assertEquals(Color.parseColor("#ffcc9900"), overlayData.getPrimaryColor());
// notice: we also changing the alpha channel
Assert.assertEquals(Color.parseColor("#ffcc9911"), overlayData.getPrimaryDarkColor());
Assert.assertEquals(Color.parseColor("#ff0099cc"), overlayData.getPrimaryTextColor());
Assert.assertTrue(overlayData.isValid());
} |
@Override
public void addConfigListener(String dataId, ConfigurationChangeListener listener) {
if (StringUtils.isBlank(dataId) || listener == null) {
return;
}
configListenersMap.computeIfAbsent(dataId, key -> ConcurrentHashMap.newKeySet())
.add(listener);
listenedConfigMap.put(dataId, ConfigurationFactory.getInstance().getConfig(dataId));
// Start config change listener for the dataId.
fileListener.addListener(dataId, listener);
} | @Test
void addConfigListener() throws InterruptedException {
logger.info("addConfigListener");
ConfigurationFactory.reload();
Configuration fileConfig = ConfigurationFactory.getInstance();
CountDownLatch countDownLatch = new CountDownLatch(1);
String dataId = "service.disableGlobalTransaction";
boolean value = fileConfig.getBoolean(dataId);
fileConfig.addConfigListener(dataId, (CachedConfigurationChangeListener)event -> {
logger.info("before dataId: {}, oldValue: {}, newValue: {}", event.getDataId(), event.getOldValue(),
event.getNewValue());
Assertions.assertEquals(Boolean.parseBoolean(event.getNewValue()),
!Boolean.parseBoolean(event.getOldValue()));
logger.info("after dataId: {}, oldValue: {}, newValue: {}", event.getDataId(), event.getOldValue(),
event.getNewValue());
countDownLatch.countDown();
});
System.setProperty(dataId, String.valueOf(!value));
logger.info(System.currentTimeMillis()+", dataId: {}, oldValue: {}", dataId, value);
countDownLatch.await(60,TimeUnit.SECONDS);
logger.info(System.currentTimeMillis()+", dataId: {}, currenValue: {}", dataId, fileConfig.getBoolean(dataId));
Assertions.assertNotEquals(fileConfig.getBoolean(dataId), value);
//wait for loop safety, loop time is LISTENER_CONFIG_INTERVAL=1s
CountDownLatch countDownLatch2 = new CountDownLatch(1);
fileConfig.addConfigListener("file.listener.enabled", (CachedConfigurationChangeListener)event -> {
if (!Boolean.parseBoolean(event.getNewValue())) {
countDownLatch2.countDown();
}
});
System.setProperty("file.listener.enabled", "false");
countDownLatch2.await(10, TimeUnit.SECONDS);
System.setProperty(dataId, String.valueOf(value));
//sleep for a period of time to simulate waiting for a cache refresh.Actually, it doesn't trigger.
Thread.sleep(1000);
boolean currentValue = fileConfig.getBoolean(dataId);
Assertions.assertNotEquals(value, currentValue);
System.setProperty(dataId, String.valueOf(!value));
} |
public Map<String, Collection<Class<? extends ShardingSphereRule>>> getInUsedStorageUnitNameAndRulesMap() {
Map<String, Collection<Class<? extends ShardingSphereRule>>> result = new LinkedHashMap<>();
for (ShardingSphereRule each : rules) {
Optional<DataSourceMapperRuleAttribute> ruleAttribute = each.getAttributes().findAttribute(DataSourceMapperRuleAttribute.class);
if (ruleAttribute.isPresent()) {
mergeInUsedStorageUnitNameAndRules(result, getInUsedStorageUnitNameAndRulesMap(each, getInUsedStorageUnitNames(ruleAttribute.get())));
continue;
}
each.getAttributes().findAttribute(DataNodeRuleAttribute.class)
.ifPresent(optional -> mergeInUsedStorageUnitNameAndRules(result, getInUsedStorageUnitNameAndRulesMap(each, getInUsedStorageUnitNames(optional))));
}
return result;
} | @Test
void assertGetInUsedStorageUnitNameAndRulesMapWhenRulesAreEmpty() {
assertTrue(new RuleMetaData(Collections.emptyList()).getInUsedStorageUnitNameAndRulesMap().isEmpty());
} |
static BytecodeExpression greaterThanOrEqual(BytecodeExpression left, BytecodeExpression right)
{
checkArgumentTypes(left, right);
OpCode comparisonInstruction;
OpCode noMatchJumpInstruction;
Class<?> type = left.getType().getPrimitiveType();
if (type == int.class) {
comparisonInstruction = null;
noMatchJumpInstruction = IF_ICMPLT;
}
else if (type == long.class) {
comparisonInstruction = LCMP;
noMatchJumpInstruction = IFLT;
}
else if (type == float.class) {
comparisonInstruction = FCMPL;
noMatchJumpInstruction = IFLT;
}
else if (type == double.class) {
comparisonInstruction = DCMPL;
noMatchJumpInstruction = IFLT;
}
else {
throw new IllegalArgumentException("Greater than or equal does not support " + type);
}
return new ComparisonBytecodeExpression(">=", comparisonInstruction, noMatchJumpInstruction, left, right);
} | @Test
public void testGreaterThanOrEqual()
throws Exception
{
assertBytecodeExpression(greaterThanOrEqual(constantInt(3), constantInt(7)), 3 >= 7, "(3 >= 7)");
assertBytecodeExpression(greaterThanOrEqual(constantInt(7), constantInt(3)), 7 >= 3, "(7 >= 3)");
assertBytecodeExpression(greaterThanOrEqual(constantInt(7), constantInt(7)), 7 >= 7, "(7 >= 7)");
assertBytecodeExpression(greaterThanOrEqual(constantLong(3L), constantLong(7L)), 3L >= 7L, "(3L >= 7L)");
assertBytecodeExpression(greaterThanOrEqual(constantLong(7L), constantLong(3L)), 7L >= 3L, "(7L >= 3L)");
assertBytecodeExpression(greaterThanOrEqual(constantLong(7L), constantLong(7L)), 7L >= 7L, "(7L >= 7L)");
assertBytecodeExpression(greaterThanOrEqual(constantFloat(3.3f), constantFloat(7.7f)), 3.3f >= 7.7f, "(3.3f >= 7.7f)");
assertBytecodeExpression(greaterThanOrEqual(constantFloat(7.7f), constantFloat(3.3f)), 7.7f >= 3.3f, "(7.7f >= 3.3f)");
assertBytecodeExpression(greaterThanOrEqual(constantFloat(7.7f), constantFloat(7.7f)), 7.7f >= 7.7f, "(7.7f >= 7.7f)");
assertBytecodeExpression(greaterThanOrEqual(constantFloat(Float.NaN), constantFloat(7.7f)), Float.NaN >= 7.7f, "(NaNf >= 7.7f)");
assertBytecodeExpression(greaterThanOrEqual(constantFloat(7.7f), constantFloat(Float.NaN)), 7.7f >= Float.NaN, "(7.7f >= NaNf)");
assertBytecodeExpression(greaterThanOrEqual(constantDouble(3.3), constantDouble(7.7)), 3.3 >= 7.7, "(3.3 >= 7.7)");
assertBytecodeExpression(greaterThanOrEqual(constantDouble(7.7), constantDouble(3.3)), 7.7 >= 3.3, "(7.7 >= 3.3)");
assertBytecodeExpression(greaterThanOrEqual(constantDouble(7.7), constantDouble(7.7)), 7.7 >= 7.7, "(7.7 >= 7.7)");
assertBytecodeExpression(greaterThanOrEqual(constantDouble(Double.NaN), constantDouble(7.7)), Double.NaN >= 7.7, "(NaN >= 7.7)");
assertBytecodeExpression(greaterThanOrEqual(constantDouble(7.7), constantDouble(Double.NaN)), 7.7 >= Double.NaN, "(7.7 >= NaN)");
} |
public MultiMap<Value, T, List<T>> search() {
if (matcher.isNegate()) {
if (map.containsKey(matcher.getValue())) {
return MultiMap.merge(map.subMap(map.firstKey(), true,
matcher.getValue(), false),
map.subMap(matcher.getValue(), false,
map.lastKey(), true));
} else {
return map;
}
} else {
return map.subMap(matcher.getValue(), true,
matcher.getValue(), true);
}
} | @Test
void testNegatedNullSearch() throws Exception {
search = new ExactMatcherSearch<>(new ExactMatcher(KeyDefinition.newKeyDefinition().withId("value").build(),
null,
true),
map);
MultiMap<Value, Object, List<Object>> search1 = search.search();
assertThat(search1.get(new Value("helloKey")).get(0)).isEqualTo("hello");
} |
public static Stream<MediaType> parseList(String mediaTypeList) {
if (mediaTypeList == null || mediaTypeList.isEmpty()) throw CONTAINER.missingMediaType();
Matcher matcher = TREE_PATTERN.matcher(mediaTypeList);
List<MediaType> list = new ArrayList<>();
while (true) {
MediaType mediaType = parseSingleMediaType(mediaTypeList, matcher, true);
list.add(mediaType);
if (matcher.end() == mediaTypeList.length())
break;
matcher.region(matcher.end(), mediaTypeList.length());
}
list.sort(Comparator.comparingDouble(MediaType::getWeight).reversed());
return list.stream();
} | @Test
public void testParseList() {
List<MediaType> mediaTypes = MediaType.parseList("text/html, image/png,*/*").collect(Collectors.toList());
assertEquals(asList(MediaType.TEXT_HTML, MediaType.IMAGE_PNG, MediaType.MATCH_ALL), mediaTypes);
Exceptions.expectException(EncodingException.class, () -> MediaType.parseList("text/html,"));
Exceptions.expectException(EncodingException.class, () -> MediaType.parseList("text/html;a=b,"));
Exceptions.expectException(EncodingException.class, () -> MediaType.parseList("text/html;a=b,;b=c"));
Exceptions.expectException(EncodingException.class, () -> MediaType.parseList("text/html;a=b,image/png/"));
List<MediaType> mediaTypes2 = MediaType.parseList("text/html;a=\"b,\" ,*/*").collect(Collectors.toList());
assertEquals(asList(MediaType.TEXT_HTML.withParameter("a", "\"b,\""), MediaType.MATCH_ALL), mediaTypes2);
} |
@Override
public void deleteCategory(Long id) {
// 校验分类是否存在
validateProductCategoryExists(id);
// 校验是否还有子分类
if (productCategoryMapper.selectCountByParentId(id) > 0) {
throw exception(CATEGORY_EXISTS_CHILDREN);
}
// 校验分类是否绑定了 SPU
Long spuCount = productSpuService.getSpuCountByCategoryId(id);
if (spuCount > 0) {
throw exception(CATEGORY_HAVE_BIND_SPU);
}
// 删除
productCategoryMapper.deleteById(id);
} | @Test
public void testDeleteCategory_success() {
// mock 数据
ProductCategoryDO dbCategory = randomPojo(ProductCategoryDO.class);
productCategoryMapper.insert(dbCategory);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbCategory.getId();
// 调用
productCategoryService.deleteCategory(id);
// 校验数据不存在了
assertNull(productCategoryMapper.selectById(id));
} |
static JobManagerProcessSpec processSpecFromConfig(Configuration config) {
return createMemoryProcessSpec(PROCESS_MEMORY_UTILS.memoryProcessSpecFromConfig(config));
} | @Test
void testConfigJvmHeapMemory() {
MemorySize jvmHeapSize = MemorySize.parse("50m");
Configuration conf = new Configuration();
conf.set(JobManagerOptions.JVM_HEAP_MEMORY, jvmHeapSize);
JobManagerProcessSpec jobManagerProcessSpec =
JobManagerProcessUtils.processSpecFromConfig(conf);
assertThat(jobManagerProcessSpec.getJvmHeapMemorySize()).isEqualTo(jvmHeapSize);
} |
@VisibleForTesting
Iterable<SnapshotContext> getSnapshots(Iterable<Snapshot> snapshots, SnapshotContext since) {
List<SnapshotContext> result = Lists.newArrayList();
boolean foundSince = Objects.isNull(since);
for (Snapshot snapshot : snapshots) {
if (!foundSince) {
if (snapshot.snapshotId() == since.getSnapshotId()) {
foundSince = true;
}
} else {
result.add(toSnapshotContext(snapshot));
}
}
return foundSince ? result : Collections.emptyList();
} | @Test
public void testGetSnapshotContextsReturnsEmptyIterableWhenTableIsEmptyAndGivenSnapShotIsNull() {
HiveIcebergStorageHandler storageHandler = new HiveIcebergStorageHandler();
Iterable<SnapshotContext> result = storageHandler.getSnapshots(Collections.emptyList(), null);
assertThat(result.iterator().hasNext(), is(false));
} |
public void writeUbyte(int value) throws IOException {
if (value < 0 || value > 0xFF) {
throw new ExceptionWithContext("Unsigned byte value out of range: %d", value);
}
write(value);
} | @Test
public void testWriteUbyte() throws IOException {
writer.writeUbyte(0);
writer.writeUbyte(1);
writer.writeUbyte(0x12);
writer.writeUbyte(0xFF);
expectData(0x00, 0x01, 0x12, 0xFF);
} |
@Override
public int choosePartition(Message msg, TopicMetadata metadata) {
// if key is specified, we should use key as routing;
// if key is not specified and no sequence id is provided, not an effectively-once publish, use the default
// round-robin routing.
if (msg.hasKey() || msg.getSequenceId() < 0) {
// TODO: the message key routing is problematic at this moment.
// https://github.com/apache/pulsar/pull/1029 is fixing that.
return super.choosePartition(msg, metadata);
}
// if there is no key and sequence id is provided, it is an effectively-once publish, we need to ensure
// for a given message it always go to one partition, so we use sequence id to do a deterministic routing.
return (int) (msg.getSequenceId() % metadata.numPartitions());
} | @Test
public void testChoosePartitionWithoutKeySequenceId() {
TopicMetadata topicMetadata = mock(TopicMetadata.class);
when(topicMetadata.numPartitions()).thenReturn(5);
Clock clock = mock(Clock.class);
FunctionResultRouter router = new FunctionResultRouter(0, clock);
for (int i = 0; i < 10; i++) {
Message<?> msg = mock(Message.class);
when(msg.hasKey()).thenReturn(false);
when(msg.getKey()).thenReturn(null);
when(msg.getSequenceId()).thenReturn((long) (2 * i));
assertEquals((2 * i) % 5, router.choosePartition(msg, topicMetadata));
}
} |
@Override
@DataPermission(enable = false) // 发送短信时,无需考虑数据权限
public Long sendSingleSmsToAdmin(String mobile, Long userId, String templateCode, Map<String, Object> templateParams) {
// 如果 mobile 为空,则加载用户编号对应的手机号
if (StrUtil.isEmpty(mobile)) {
AdminUserDO user = adminUserService.getUser(userId);
if (user != null) {
mobile = user.getMobile();
}
}
// 执行发送
return sendSingleSms(mobile, userId, UserTypeEnum.ADMIN.getValue(), templateCode, templateParams);
} | @Test
public void testSendSingleSmsToAdmin() {
// 准备参数
Long userId = randomLongId();
String templateCode = randomString();
Map<String, Object> templateParams = MapUtil.<String, Object>builder().put("code", "1234")
.put("op", "login").build();
// mock adminUserService 的方法
AdminUserDO user = randomPojo(AdminUserDO.class, o -> o.setMobile("15601691300"));
when(adminUserService.getUser(eq(userId))).thenReturn(user);
// mock SmsTemplateService 的方法
SmsTemplateDO template = randomPojo(SmsTemplateDO.class, o -> {
o.setStatus(CommonStatusEnum.ENABLE.getStatus());
o.setContent("验证码为{code}, 操作为{op}");
o.setParams(Lists.newArrayList("code", "op"));
});
when(smsTemplateService.getSmsTemplateByCodeFromCache(eq(templateCode))).thenReturn(template);
String content = randomString();
when(smsTemplateService.formatSmsTemplateContent(eq(template.getContent()), eq(templateParams)))
.thenReturn(content);
// mock SmsChannelService 的方法
SmsChannelDO smsChannel = randomPojo(SmsChannelDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus()));
when(smsChannelService.getSmsChannel(eq(template.getChannelId()))).thenReturn(smsChannel);
// mock SmsLogService 的方法
Long smsLogId = randomLongId();
when(smsLogService.createSmsLog(eq(user.getMobile()), eq(userId), eq(UserTypeEnum.ADMIN.getValue()), eq(Boolean.TRUE), eq(template),
eq(content), eq(templateParams))).thenReturn(smsLogId);
// 调用
Long resultSmsLogId = smsSendService.sendSingleSmsToAdmin(null, userId, templateCode, templateParams);
// 断言
assertEquals(smsLogId, resultSmsLogId);
// 断言调用
verify(smsProducer).sendSmsSendMessage(eq(smsLogId), eq(user.getMobile()),
eq(template.getChannelId()), eq(template.getApiTemplateId()),
eq(Lists.newArrayList(new KeyValue<>("code", "1234"), new KeyValue<>("op", "login"))));
} |
@Override public Message receive() {
Message message = delegate.receive();
handleReceive(message);
return message;
} | @Test void receive_continues_parent_trace_single_header() throws Exception {
ActiveMQTextMessage message = new ActiveMQTextMessage();
message.setStringProperty("b3", B3SingleFormat.writeB3SingleFormatWithoutParentId(parent));
receive(message);
// Ensure the current span in on the message, not the parent
MutableSpan consumer = testSpanHandler.takeRemoteSpan(CONSUMER);
assertChildOf(consumer, parent);
TraceContext messageContext = parseB3SingleFormat(message.getStringProperty("b3")).context();
assertThat(messageContext.traceIdString()).isEqualTo(consumer.traceId());
assertThat(messageContext.spanIdString()).isEqualTo(consumer.id());
} |
@Override
public double getDoubleValue() {
checkValueType(DOUBLE);
return measure.getDoubleValue();
} | @Test
public void fail_with_ISE_when_not_double_value() {
assertThatThrownBy(() -> {
MeasureImpl measure = new MeasureImpl(Measure.newMeasureBuilder().create(1));
measure.getDoubleValue();
})
.isInstanceOf(IllegalStateException.class)
.hasMessage("Value can not be converted to double because current value type is a INT");
} |
@Override
public void commitJob(JobContext jobContext) throws IOException {
Configuration conf = jobContext.getConfiguration();
syncFolder = conf.getBoolean(DistCpConstants.CONF_LABEL_SYNC_FOLDERS, false);
overwrite = conf.getBoolean(DistCpConstants.CONF_LABEL_OVERWRITE, false);
updateRoot =
conf.getBoolean(CONF_LABEL_UPDATE_ROOT, false);
targetPathExists = conf.getBoolean(
DistCpConstants.CONF_LABEL_TARGET_PATH_EXISTS, true);
ignoreFailures = conf.getBoolean(
DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false);
if (blocksPerChunk > 0) {
concatFileChunks(conf);
}
super.commitJob(jobContext);
cleanupTempFiles(jobContext);
try {
if (conf.getBoolean(DistCpConstants.CONF_LABEL_DELETE_MISSING, false)) {
deleteMissing(conf);
} else if (conf.getBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, false)) {
commitData(conf);
} else if (conf.get(CONF_LABEL_TRACK_MISSING) != null) {
// save missing information to a directory
trackMissing(conf);
}
// for HDFS-14621, should preserve status after -delete
String attributes = conf.get(DistCpConstants.CONF_LABEL_PRESERVE_STATUS);
final boolean preserveRawXattrs = conf.getBoolean(
DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, false);
if ((attributes != null && !attributes.isEmpty()) || preserveRawXattrs) {
preserveFileAttributesForDirectories(conf);
}
taskAttemptContext.setStatus("Commit Successful");
}
finally {
cleanup(conf);
}
} | @Test
public void testAtomicCommitExistingFinal() throws IOException {
TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
JobContext jobContext = new JobContextImpl(taskAttemptContext.getConfiguration(),
taskAttemptContext.getTaskAttemptID().getJobID());
Configuration conf = jobContext.getConfiguration();
String workPath = "/tmp1/" + String.valueOf(rand.nextLong());
String finalPath = "/tmp1/" + String.valueOf(rand.nextLong());
FileSystem fs = null;
try {
OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
fs = FileSystem.get(conf);
fs.mkdirs(new Path(workPath));
fs.mkdirs(new Path(finalPath));
conf.set(CONF_LABEL_TARGET_WORK_PATH, workPath);
conf.set(CONF_LABEL_TARGET_FINAL_PATH, finalPath);
conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, true);
assertPathExists(fs, "Work path", new Path(workPath));
assertPathExists(fs, "Final path", new Path(finalPath));
try {
committer.commitJob(jobContext);
Assert.fail("Should not be able to atomic-commit to pre-existing path.");
} catch(Exception exception) {
assertPathExists(fs, "Work path", new Path(workPath));
assertPathExists(fs, "Final path", new Path(finalPath));
LOG.info("Atomic-commit Test pass.");
}
} finally {
TestDistCpUtils.delete(fs, workPath);
TestDistCpUtils.delete(fs, finalPath);
conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, false);
}
} |
@Override
public String getName() {
if (_distinctResult == 1) {
return TransformFunctionType.IS_DISTINCT_FROM.getName();
}
return TransformFunctionType.IS_NOT_DISTINCT_FROM.getName();
} | @Test
public void testDistinctFromRightNull()
throws Exception {
ExpressionContext expression =
RequestContextUtils.getExpression(String.format(_expression, INT_SV_COLUMN, INT_SV_NULL_COLUMN));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertEquals(transformFunction.getName(), _isDistinctFrom ? "is_distinct_from" : "is_not_distinct_from");
boolean[] expectedIntValues = new boolean[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
if (isEqualRow(i)) {
expectedIntValues[i] = !_isDistinctFrom;
} else if (isNotEqualRow(i)) {
expectedIntValues[i] = _isDistinctFrom;
} else if (isNullRow(i)) {
expectedIntValues[i] = _isDistinctFrom;
}
}
testTransformFunction(expression, expectedIntValues, _projectionBlock, _dataSourceMap);
} |
@Override
@SuppressWarnings("unchecked")
public int run() throws IOException {
RewriteOptions options = buildOptionsOrFail();
ParquetRewriter rewriter = new ParquetRewriter(options);
rewriter.processBlocks();
rewriter.close();
return 0;
} | @Test
public void testRewriteCommandWithOverwrite() throws IOException {
File file = parquetFile();
RewriteCommand command = new RewriteCommand(createLogger());
command.inputs = Arrays.asList(file.getAbsolutePath());
File output = new File(getTempFolder(), "converted.parquet");
command.output = output.getAbsolutePath();
command.overwrite = true;
command.setConf(new Configuration());
Files.createFile(output.toPath());
Assert.assertEquals(0, command.run());
Assert.assertTrue(output.exists());
} |
public void save(final ScheduleTask scheduleTask) {
String sql = "INSERT INTO schedule_task (task_id, execution_time, status) VALUES (:taskId, :executionTime, :status)";
MapSqlParameterSource parameters = new MapSqlParameterSource()
.addValue("taskId", scheduleTask.getTaskId())
.addValue("executionTime", scheduleTask.getExecutionTime())
.addValue("status", scheduleTask.getStatus().toString());
namedParameterJdbcTemplate.update(sql, parameters);
} | @Test
void 태스크를_저장한다() {
// when & then
Assertions.assertDoesNotThrow(() -> scheduleTaskJdbcRepository.save(스케줄_생성_진행중()));
} |
@Override
public Path move(final Path source, final Path target, final TransferStatus status, final Delete.Callback callback,
final ConnectionCallback connectionCallback) throws BackgroundException {
if(containerService.isContainer(source)) {
if(new SimplePathPredicate(source.getParent()).test(target.getParent())) {
// Rename only
return proxy.move(source, target, status, callback, connectionCallback);
}
}
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(source) ^ new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(target))) {
// Moving into or from an encrypted room
final Copy copy = new SDSDelegatingCopyFeature(session, nodeid, new SDSCopyFeature(session, nodeid));
if(log.isDebugEnabled()) {
log.debug(String.format("Move %s to %s using copy feature %s", source, target, copy));
}
final Path c = copy.copy(source, target, status, connectionCallback, new DisabledStreamListener());
// Delete source file after copy is complete
final Delete delete = new SDSDeleteFeature(session, nodeid);
if(delete.isSupported(source)) {
log.warn(String.format("Delete source %s copied to %s", source, target));
delete.delete(Collections.singletonMap(source, status), connectionCallback, callback);
}
return c;
}
else {
return proxy.move(source, target, status, callback, connectionCallback);
}
} | @Test
public void testMoveBetweenEncryptedDataRooms() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room1 = new SDSDirectoryFeature(session, nodeid).createRoom(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), true);
room1.setAttributes(new SDSAttributesFinderFeature(session, nodeid).find(room1));
final Path room2 = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final EncryptRoomRequest encrypt = new EncryptRoomRequest().isEncrypted(true);
new NodesApi(session.getClient()).encryptRoom(encrypt, Long.parseLong(new SDSNodeIdProvider(session).getVersionId(room2)), StringUtils.EMPTY, null);
room2.attributes().withCustom(KEY_ENCRYPTED, String.valueOf(true));
final byte[] content = RandomUtils.nextBytes(32769);
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
final Path test = new Path(room1, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final SDSEncryptionBulkFeature bulk = new SDSEncryptionBulkFeature(session, nodeid);
bulk.pre(Transfer.Type.upload, Collections.singletonMap(new TransferItem(test), status), new DisabledConnectionCallback());
final TripleCryptWriteFeature writer = new TripleCryptWriteFeature(session, nodeid, new SDSDirectS3MultipartWriteFeature(session, nodeid));
final StatusOutputStream<Node> out = writer.write(test, status, new DisabledConnectionCallback());
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
final Path target = new Path(room2, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new SDSDelegatingMoveFeature(session, nodeid, new SDSMoveFeature(session, nodeid)).move(test, target, new TransferStatus().withLength(content.length), new Delete.DisabledCallback(), new DisabledConnectionCallback() {
@Override
public void warn(final Host bookmark, final String title, final String message, final String defaultButton, final String cancelButton, final String preference) {
//
}
@Override
public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) {
return new VaultCredentials("eth[oh8uv4Eesij");
}
});
test.attributes().setVersionId(null);
assertFalse(new SDSFindFeature(session, nodeid).find(test));
assertTrue(new SDSFindFeature(session, nodeid).find(target));
final byte[] compare = new byte[content.length];
final InputStream stream = new TripleCryptReadFeature(session, nodeid, new SDSReadFeature(session, nodeid)).read(target, new TransferStatus().withLength(content.length), new DisabledConnectionCallback() {
@Override
public void warn(final Host bookmark, final String title, final String message, final String defaultButton, final String cancelButton, final String preference) {
//
}
@Override
public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) {
return new VaultCredentials("eth[oh8uv4Eesij");
}
});
IOUtils.readFully(stream, compare);
stream.close();
assertArrayEquals(content, compare);
new SDSDeleteFeature(session, nodeid).delete(Arrays.asList(room1, room2), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Udf(description = "Returns a new string with all occurences of oldStr in str with newStr")
public String replace(
@UdfParameter(
description = "The source string. If null, then function returns null.") final String str,
@UdfParameter(
description = "The substring to replace."
+ " If null, then function returns null.") final String oldStr,
@UdfParameter(
description = "The string to replace the old substrings with."
+ " If null, then function returns null.") final String newStr) {
if (str == null || oldStr == null || newStr == null) {
return null;
}
return str.replace(oldStr, newStr);
} | @Test
public void shouldHandleNull() {
assertThat(udf.replace(null, "foo", "bar"), isEmptyOrNullString());
assertThat(udf.replace("foo", null, "bar"), isEmptyOrNullString());
assertThat(udf.replace("foo", "bar", null), isEmptyOrNullString());
} |
Callable<Path> download(Path destDirPath, LocalResource rsrc,
UserGroupInformation ugi) throws IOException {
// For private localization FsDownload creates folder in destDirPath. Parent
// directories till user filecache folder is created here.
if (rsrc.getVisibility() == LocalResourceVisibility.PRIVATE) {
createParentDirs(destDirPath);
}
diskValidator
.checkStatus(new File(destDirPath.getParent().toUri().getRawPath()));
return new FSDownloadWrapper(lfs, ugi, conf, destDirPath, rsrc);
} | @Test(timeout = 10000)
public void testUserCacheDirPermission() throws Exception {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext lfs = FileContext.getLocalFSFileContext(conf);
Path fileCacheDir = lfs.makeQualified(new Path(basedir, "filecache"));
lfs.mkdir(fileCacheDir, FsPermission.getDefault(), true);
RecordFactory recordFactory = mock(RecordFactory.class);
ContainerLocalizer localizer = new ContainerLocalizer(lfs,
UserGroupInformation.getCurrentUser().getUserName(), "application_01",
"container_01", String.format(ContainerExecutor.TOKEN_FILE_NAME_FMT,
"container_01"), new ArrayList<>(), recordFactory);
LocalResource rsrc = mock(LocalResource.class);
when(rsrc.getVisibility()).thenReturn(LocalResourceVisibility.PRIVATE);
Path destDirPath = new Path(fileCacheDir, "0/0/85");
//create one of the parent directories with the wrong permissions first
FsPermission wrongPerm = new FsPermission((short) 0700);
lfs.mkdir(destDirPath.getParent().getParent(), wrongPerm, false);
lfs.mkdir(destDirPath.getParent(), wrongPerm, false);
//Localize and check the directory permission are correct.
localizer
.download(destDirPath, rsrc, UserGroupInformation.getCurrentUser());
Assert
.assertEquals("Cache directory permissions filecache/0/0 is incorrect",
USERCACHE_DIR_PERM,
lfs.getFileStatus(destDirPath.getParent()).getPermission());
Assert.assertEquals("Cache directory permissions filecache/0 is incorrect",
USERCACHE_DIR_PERM,
lfs.getFileStatus(destDirPath.getParent().getParent()).getPermission());
} |
public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, Timer timer) {
invokeCompletedOffsetCommitCallbacks();
if (offsets.isEmpty()) {
// We guarantee that the callbacks for all commitAsync() will be invoked when
// commitSync() completes, even if the user tries to commit empty offsets.
return invokePendingAsyncCommits(timer);
}
long attempts = 0L;
do {
if (coordinatorUnknownAndUnreadySync(timer)) {
return false;
}
RequestFuture<Void> future = sendOffsetCommitRequest(offsets);
client.poll(future, timer);
// We may have had in-flight offset commits when the synchronous commit began. If so, ensure that
// the corresponding callbacks are invoked prior to returning in order to preserve the order that
// the offset commits were applied.
invokeCompletedOffsetCommitCallbacks();
if (future.succeeded()) {
if (interceptors != null)
interceptors.onCommit(offsets);
return true;
}
if (future.failed() && !future.isRetriable())
throw future.exception();
timer.sleep(retryBackoff.backoff(attempts++));
} while (timer.notExpired());
return false;
} | @Test
public void testCommitOffsetIllegalGeneration() {
// we cannot retry if a rebalance occurs before the commit completed
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.ILLEGAL_GENERATION);
assertThrows(CommitFailedException.class, () -> coordinator.commitOffsetsSync(singletonMap(t1p,
new OffsetAndMetadata(100L, "metadata")), time.timer(Long.MAX_VALUE)));
} |
public ServiceBusConfiguration getConfiguration() {
return configuration;
} | @Test
void testCreateEndpointWithAzureIdentity() throws Exception {
final String uri = "azure-servicebus://testTopicOrQueue";
final String remaining = "testTopicOrQueue";
final String fullyQualifiedNamespace = "namespace.servicebus.windows.net";
final TokenCredential credential = new DefaultAzureCredentialBuilder().build();
final Map<String, Object> params = new HashMap<>();
params.put("serviceBusType", ServiceBusType.topic);
params.put("prefetchCount", 10);
params.put("fullyQualifiedNamespace", fullyQualifiedNamespace);
params.put("credentialType", CredentialType.AZURE_IDENTITY);
final ServiceBusEndpoint endpoint
= (ServiceBusEndpoint) context.getComponent("azure-servicebus", ServiceBusComponent.class)
.createEndpoint(uri, remaining, params);
assertEquals(ServiceBusType.topic, endpoint.getConfiguration().getServiceBusType());
assertEquals("testTopicOrQueue", endpoint.getConfiguration().getTopicOrQueueName());
assertEquals(10, endpoint.getConfiguration().getPrefetchCount());
assertEquals(fullyQualifiedNamespace, endpoint.getConfiguration().getFullyQualifiedNamespace());
assertNull(endpoint.getConfiguration().getTokenCredential());
} |
public void wrap(final byte[] buffer)
{
capacity = buffer.length;
addressOffset = ARRAY_BASE_OFFSET;
byteBuffer = null;
wrapAdjustment = 0;
if (buffer != byteArray)
{
byteArray = buffer;
}
} | @Test
void shouldExposePositionAtWhichHeapByteBufferGetsWrapped()
{
final ByteBuffer wibbleByteBuffer = ByteBuffer.wrap(wibbleBytes);
shouldExposePositionAtWhichByteBufferGetsWrapped(wibbleByteBuffer);
} |
@CanDistro
@PutMapping("/beat")
@TpsControl(pointName = "HttpHealthCheck", name = "HttpHealthCheck")
@Secured(action = ActionTypes.WRITE)
@ExtractorManager.Extractor(httpExtractor = NamingInstanceBeatHttpParamExtractor.class)
public ObjectNode beat(@RequestParam(defaultValue = Constants.DEFAULT_NAMESPACE_ID) String namespaceId,
@RequestParam String serviceName, @RequestParam(defaultValue = StringUtils.EMPTY) String ip,
@RequestParam(defaultValue = UtilsAndCommons.DEFAULT_CLUSTER_NAME) String clusterName,
@RequestParam(defaultValue = "0") Integer port, @RequestParam(defaultValue = StringUtils.EMPTY) String beat)
throws Exception {
ObjectNode result = JacksonUtils.createEmptyJsonNode();
result.put(SwitchEntry.CLIENT_BEAT_INTERVAL, switchDomain.getClientBeatInterval());
RsInfo clientBeat = null;
if (StringUtils.isNotBlank(beat)) {
clientBeat = JacksonUtils.toObj(beat, RsInfo.class);
}
if (clientBeat != null) {
if (StringUtils.isNotBlank(clientBeat.getCluster())) {
clusterName = clientBeat.getCluster();
} else {
// fix #2533
clientBeat.setCluster(clusterName);
}
ip = clientBeat.getIp();
port = clientBeat.getPort();
}
NamingUtils.checkServiceNameFormat(serviceName);
Loggers.SRV_LOG.debug("[CLIENT-BEAT] full arguments: beat: {}, serviceName: {}, namespaceId: {}", clientBeat,
serviceName, namespaceId);
BeatInfoInstanceBuilder builder = BeatInfoInstanceBuilder.newBuilder();
int resultCode = instanceServiceV2.handleBeat(namespaceId, serviceName, ip, port, clusterName, clientBeat,
builder);
result.put(CommonParams.CODE, resultCode);
result.put(SwitchEntry.CLIENT_BEAT_INTERVAL,
instanceServiceV2.getHeartBeatInterval(namespaceId, serviceName, ip, port, clusterName));
result.put(SwitchEntry.LIGHT_BEAT_ENABLED, switchDomain.isLightBeatEnabled());
return result;
} | @Test
void beat() throws Exception {
MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.put(
UtilsAndCommons.DEFAULT_NACOS_NAMING_CONTEXT_V2 + UtilsAndCommons.NACOS_NAMING_INSTANCE_CONTEXT + "/beat")
.param("namespaceId", TEST_NAMESPACE).param("serviceName", TEST_SERVICE_NAME).param("ip", TEST_IP)
.param("clusterName", "clusterName").param("port", "0").param("beat", "");
String actualValue = mockmvc.perform(builder).andReturn().getResponse().getContentAsString();
assertNotNull(actualValue);
} |
@Override
public Set<NodeHealth> readAll() {
long clusterTime = hzMember.getClusterTime();
long timeout = clusterTime - TIMEOUT_30_SECONDS;
Map<UUID, TimestampedNodeHealth> sqHealthState = readReplicatedMap();
Set<UUID> hzMemberUUIDs = hzMember.getMemberUuids();
Set<NodeHealth> existingNodeHealths = sqHealthState.entrySet().stream()
.filter(outOfDate(timeout))
.filter(ofNonExistentMember(hzMemberUUIDs))
.map(entry -> entry.getValue().getNodeHealth())
.collect(Collectors.toSet());
if (LOG.isTraceEnabled()) {
LOG.trace("Reading {} and keeping {}", new HashMap<>(sqHealthState), existingNodeHealths);
}
return ImmutableSet.copyOf(existingNodeHealths);
} | @Test
public void readAll_logs_map_sq_health_state_content_and_the_content_effectively_returned_if_TRACE() {
logging.setLevel(Level.TRACE);
Map<UUID, TimestampedNodeHealth> map = new HashMap<>();
UUID uuid = UUID.randomUUID();
NodeHealth nodeHealth = randomNodeHealth();
map.put(uuid, new TimestampedNodeHealth(nodeHealth, clusterTime - 1));
when(hazelcastMember.getClusterTime()).thenReturn(clusterTime);
when(hazelcastMember.getMemberUuids()).thenReturn(singleton(uuid));
doReturn(map).when(hazelcastMember).getReplicatedMap(MAP_SQ_HEALTH_STATE);
underTest.readAll();
assertThat(logging.getLogs()).hasSize(1);
assertThat(logging.hasLog(Level.TRACE, "Reading " + new HashMap<>(map) + " and keeping " + singleton(nodeHealth))).isTrue();
} |
@VisibleForTesting
void clearCurrentDirectoryChangedListenersWhenImporting( boolean importfile, JobMeta jobMeta ) {
if ( importfile ) {
jobMeta.clearCurrentDirectoryChangedListeners();
}
} | @Test
public void testClearCurrentDirectoryChangedListenersWhenNotImporting() {
JobMeta jm = mock( JobMeta.class );
jobFileListener.clearCurrentDirectoryChangedListenersWhenImporting( false, jm );
verify( jm, times( 0 ) ).clearCurrentDirectoryChangedListeners();
} |
@Override
public AverageCounter createNewCounter() {
return new AverageCounter();
} | @Test
public void check_new_counter_class() {
assertThat(BASIC_AVERAGE_FORMULA.createNewCounter().getClass()).isEqualTo(AverageFormula.AverageCounter.class);
} |
public static <K, V> WithKeys<K, V> of(SerializableFunction<V, K> fn) {
checkNotNull(
fn, "WithKeys constructed with null function. Did you mean WithKeys.of((Void) null)?");
return new WithKeys<>(fn, null);
} | @Test
public void testWithKeysGetName() {
assertEquals("WithKeys", WithKeys.<Integer, String>of(100).getName());
} |
public ServerHealthState trump(ServerHealthState otherServerHealthState) {
int result = healthStateLevel.compareTo(otherServerHealthState.healthStateLevel);
return result > 0 ? this : otherServerHealthState;
} | @Test
public void shouldNotTrumpWarningIfCurrentIsSuccess() {
assertThat(WARNING_SERVER_HEALTH_STATE.trump(SUCCESS_SERVER_HEALTH_STATE), is(WARNING_SERVER_HEALTH_STATE));
} |
public void start() {
if (stateUpdaterThread == null) {
if (!restoredActiveTasks.isEmpty() || !exceptionsAndFailedTasks.isEmpty()) {
throw new IllegalStateException("State updater started with non-empty output queues. "
+ BUG_ERROR_MESSAGE);
}
stateUpdaterThread = new StateUpdaterThread(name, metrics, changelogReader);
stateUpdaterThread.start();
shutdownGate = new CountDownLatch(1);
// initialize the last commit as of now to prevent first commit happens immediately
this.lastCommitMs = time.milliseconds();
}
} | @Test
public void shouldNotPausingNonExistTasks() throws Exception {
stateUpdater.start();
when(topologyMetadata.isPaused(null)).thenReturn(true);
verifyPausedTasks();
verifyRestoredActiveTasks();
verifyUpdatingTasks();
verifyExceptionsAndFailedTasks();
} |
@Override
public boolean put(PageId pageId, ByteBuffer page, CacheContext cacheContext) {
LOG.debug("put({},{} bytes) enters", pageId, page.remaining());
if (mState.get() != READ_WRITE) {
Metrics.PUT_NOT_READY_ERRORS.inc();
Metrics.PUT_ERRORS.inc();
return false;
}
int originPosition = page.position();
if (!mOptions.isAsyncWriteEnabled()) {
boolean ok = putInternal(pageId, page, cacheContext);
LOG.debug("put({},{} bytes) exits: {}", pageId, page.position() - originPosition, ok);
if (!ok) {
Metrics.PUT_ERRORS.inc();
}
return ok;
}
if (!mPendingRequests.add(pageId)) { // already queued
return false;
}
try {
mAsyncCacheExecutor.get().submit(() -> {
try {
boolean ok = putInternal(pageId, page, cacheContext);
if (!ok) {
Metrics.PUT_ERRORS.inc();
}
} finally {
mPendingRequests.remove(pageId);
}
});
} catch (RejectedExecutionException e) { // queue is full, skip
// RejectedExecutionException may be thrown in extreme cases when the
// highly concurrent caching workloads. In these cases, return false
mPendingRequests.remove(pageId);
Metrics.PUT_ASYNC_REJECTION_ERRORS.inc();
Metrics.PUT_ERRORS.inc();
LOG.debug("put({},{} bytes) fails due to full queue", pageId,
page.position() - originPosition);
return false;
}
LOG.debug("put({},{} bytes) exits with async write", pageId, page.position() - originPosition);
return true;
} | @Test
public void evictBigPagesByPutSmallPage() throws Exception {
mConf.set(PropertyKey.USER_CLIENT_CACHE_SIZE, String.valueOf(PAGE_SIZE_BYTES));
mCacheManager = createLocalCacheManager();
PageId bigPageId = pageId(-1, 0);
assertTrue(mCacheManager.put(bigPageId, page(0, PAGE_SIZE_BYTES)));
int smallPageLen = 8;
long numPages = mConf.getBytes(PropertyKey.USER_CLIENT_CACHE_PAGE_SIZE) / smallPageLen;
byte[] smallPage = BufferUtils.getIncreasingByteArray(smallPageLen);
for (int i = 0; i < numPages; i++) {
PageId id = pageId(i, 0);
assertTrue(mCacheManager.put(id, smallPage));
}
} |
public static int MAXIM(@NonNull final byte[] data, final int offset, final int length) {
return CRC(0x8005, 0x0000, data, offset, length, true, true, 0xFFFF);
} | @Test
public void MAXIM_A() {
final byte[] data = new byte[] { 'A' };
assertEquals(0xCF3F, CRC16.MAXIM(data, 0, 1));
} |
@Override
public void setConfiguration(final Path file, final LoggingConfiguration configuration) throws BackgroundException {
final Path bucket = containerService.getContainer(file);
try {
final Storage.Buckets.Patch request = session.getClient().buckets().patch(bucket.getName(),
new Bucket().setLogging(new Bucket.Logging()
.setLogObjectPrefix(configuration.isEnabled() ? new HostPreferences(session.getHost()).getProperty("google.logging.prefix") : null)
.setLogBucket(StringUtils.isNotBlank(configuration.getLoggingTarget()) ? configuration.getLoggingTarget() : bucket.getName()))
);
if(bucket.attributes().getCustom().containsKey(GoogleStorageAttributesFinderFeature.KEY_REQUESTER_PAYS)) {
request.setUserProject(session.getHost().getCredentials().getUsername());
}
request.execute();
}
catch(IOException e) {
throw new GoogleStorageExceptionMappingService().map("Failure to write attributes of {0}", e, file);
}
} | @Test(expected = NotfoundException.class)
public void testWriteNotFound() throws Exception {
new GoogleStorageLoggingFeature(session).setConfiguration(
new Path(new AsciiRandomStringService().random(), EnumSet.of(Path.Type.directory)), new LoggingConfiguration(false)
);
} |
static Map<Integer, Schema.Field> mapFieldPositions(CSVFormat format, Schema schema) {
List<String> header = Arrays.asList(format.getHeader());
Map<Integer, Schema.Field> indexToFieldMap = new HashMap<>();
for (Schema.Field field : schema.getFields()) {
int index = getIndex(header, field);
if (index >= 0) {
indexToFieldMap.put(index, field);
}
}
return indexToFieldMap;
} | @Test
public void testHeaderWithComments() {
String[] comments = {"first line", "second line", "third line"};
Schema schema =
Schema.builder().addStringField("a_string").addStringField("another_string").build();
ImmutableMap<Integer, Schema.Field> want =
ImmutableMap.of(0, schema.getField("a_string"), 1, schema.getField("another_string"));
Map<Integer, Schema.Field> got =
CsvIOParseHelpers.mapFieldPositions(
csvFormat()
.withHeader("a_string", "another_string")
.withHeaderComments((Object) comments),
schema);
assertEquals(want, got);
} |
public final boolean isDifficultyTransitionPoint(final int previousHeight) {
return ((previousHeight + 1) % this.getInterval()) == 0;
} | @Test
public void isDifficultyTransitionPoint() {
assertFalse(BITCOIN_PARAMS.isDifficultyTransitionPoint(2014));
assertTrue(BITCOIN_PARAMS.isDifficultyTransitionPoint(2015));
assertFalse(BITCOIN_PARAMS.isDifficultyTransitionPoint(2016));
} |
@Override
public void listenToCluster(final String clusterName,
final LoadBalancerStateListenerCallback callback)
{
trace(_log, "listenToCluster: ", clusterName);
// wrap the callback since we need to wait for both uri and cluster listeners to
// onInit before letting the callback know that we're done.
final LoadBalancerStateListenerCallback wrappedCallback =
new LoadBalancerStateListenerCallback()
{
private final AtomicInteger _count = new AtomicInteger(2);
@Override
public void done(int type, String name)
{
if (_count.decrementAndGet() <= 0)
{
callback.done(type, clusterName);
}
}
};
_clusterSubscriber.ensureListening(clusterName, wrappedCallback);
_uriSubscriber.ensureListening(clusterName, wrappedCallback);
} | @Test(groups = { "small", "back-end" })
public void testListenToCluster() throws URISyntaxException,
InterruptedException
{
reset();
List<String> schemes = new ArrayList<>();
schemes.add("http");
assertFalse(_state.isListeningToCluster("cluster-1"));
assertNull(_state.getClusterProperties("cluster-1"));
final CountDownLatch latch = new CountDownLatch(1);
LoadBalancerStateListenerCallback callback = new LoadBalancerStateListenerCallback()
{
@Override
public void done(int type, String name)
{
latch.countDown();
}
};
_state.listenToCluster("cluster-1", callback);
if (!latch.await(5, TimeUnit.SECONDS))
{
fail("didn't get callback when listenToCluster was called");
}
assertTrue(_state.isListeningToCluster("cluster-1"));
assertNotNull(_state.getClusterProperties("cluster-1"));
assertNull(_state.getClusterProperties("cluster-1").getProperty());
ClusterProperties property = new ClusterProperties("cluster-1", schemes);
_clusterRegistry.put("cluster-1", property);
assertTrue(_state.isListeningToCluster("cluster-1"));
assertNotNull(_state.getClusterProperties("cluster-1"));
assertEquals(_state.getClusterProperties("cluster-1").getProperty(), property);
} |
@Override
public void process(Exchange exchange) throws Exception {
@SuppressWarnings("unchecked")
Map<String, Object> body = exchange.getIn().getBody(Map.class);
LdapOperation operation = endpoint.getOperation();
if (null == operation) {
throw new UnsupportedOperationException("LDAP operation must not be empty, but you provided an empty operation");
}
LdapTemplate ldapTemplate = endpoint.getLdapTemplate();
String dn = (String) body.get(DN);
boolean dnSetOnLdapTemplate = false;
ContextSource contextSource = ldapTemplate.getContextSource();
if (contextSource instanceof BaseLdapPathContextSource) {
if (ObjectHelper.isNotEmpty(((BaseLdapPathContextSource) contextSource).getBaseLdapPathAsString())) {
dn = ""; // DN already set on the ldapTemplate
dnSetOnLdapTemplate = true;
}
}
if (operation != LdapOperation.FUNCTION_DRIVEN && ObjectHelper.isEmpty(dn) && !dnSetOnLdapTemplate) {
throw new UnsupportedOperationException("DN must not be empty, but you provided an empty DN");
}
switch (operation) {
case SEARCH:
String filter = (String) body.get(FILTER);
exchange.getIn().setBody(ldapTemplate.search(dn, filter, endpoint.scopeValue(), mapper));
break;
case BIND:
Attributes attributes = (Attributes) body.get(ATTRIBUTES);
ldapTemplate.bind(dn, null, attributes);
break;
case UNBIND:
ldapTemplate.unbind(dn);
break;
case AUTHENTICATE:
ldapTemplate.authenticate(LdapQueryBuilder.query().base(dn).filter((String) body.get(FILTER)),
(String) body.get(PASSWORD));
break;
case MODIFY_ATTRIBUTES:
ModificationItem[] modificationItems = (ModificationItem[]) body.get(MODIFICATION_ITEMS);
ldapTemplate.modifyAttributes(dn, modificationItems);
break;
case FUNCTION_DRIVEN:
BiFunction<LdapOperations, Object, ?> ldapOperationFunction
= (BiFunction<LdapOperations, Object, ?>) body.get(FUNCTION);
Object ldapOperationRequest = body.get(REQUEST);
exchange.getIn().setBody(ldapOperationFunction.apply(ldapTemplate, ldapOperationRequest));
break;
default:
throw new UnsupportedOperationException(
"Bug in the Spring-LDAP component. Despite of all assertions, you managed to call an unsupported operation '"
+ operation
+ "'");
}
} | @Test
public void testEmptyExchange() throws Exception {
Exchange exchange = new DefaultExchange(context);
assertThrows(UnsupportedOperationException.class,
() -> ldapProducer.process(exchange));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.