focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Description("Infinity")
@ScalarFunction
@SqlType(StandardTypes.DOUBLE)
public static double infinity()
{
return Double.POSITIVE_INFINITY;
}
|
@Test
public void testInfinity()
{
assertFunction("infinity()", DOUBLE, Double.POSITIVE_INFINITY);
assertFunction("-rand() / 0.0", DOUBLE, Double.NEGATIVE_INFINITY);
}
|
public static boolean equal(Number lhs, Number rhs) {
Class lhsClass = lhs.getClass();
Class rhsClass = rhs.getClass();
assert lhsClass != rhsClass;
if (isDoubleRepresentable(lhsClass)) {
if (isDoubleRepresentable(rhsClass)) {
return equalDoubles(lhs.doubleValue(), rhs.doubleValue());
} else if (isLongRepresentable(rhsClass)) {
return equalLongAndDouble(rhs.longValue(), lhs.doubleValue());
}
} else if (isLongRepresentable(lhsClass)) {
if (isDoubleRepresentable(rhsClass)) {
return equalLongAndDouble(lhs.longValue(), rhs.doubleValue());
} else if (isLongRepresentable(rhsClass)) {
return lhs.longValue() == rhs.longValue();
}
}
return lhs.equals(rhs);
}
|
@Test
public void testEqual() {
assertNotEqual(1L, 2);
assertEqual(1, 1L);
assertEqual(1, (short) 1);
assertEqual(1, (byte) 1);
assertNotEqual(new AtomicLong(1), new AtomicInteger(1));
assertNotEqual(1, 1.1);
// 1.100000000000000088817841970012523233890533447265625 != 1.10000002384185791015625
assertNotEqual(1.1, 1.1F);
assertEqual(1, 1.0);
assertEqual(1, 1.0F);
assertEqual(1.0F, 1.0);
assertEqual(1.5F, 1.5);
assertEqual(1.1F, (double) 1.1F);
assertEqual(0, 0.0);
assertNotEqual(0, -0.0);
assertNotEqual(Long.MIN_VALUE, Double.NEGATIVE_INFINITY);
assertNotEqual(Long.MAX_VALUE, Double.POSITIVE_INFINITY);
assertNotEqual(0, Double.NaN);
assertNotEqual(Long.MAX_VALUE, Long.MAX_VALUE + 5000.0);
assertNotEqual(Long.MIN_VALUE, Long.MIN_VALUE - 5000.0);
assertEqual(1L << 53, 0x1p53);
// with Double, all things are possible
assertEqual(1L << 53, 0x1p53 + 1);
assertNotEqual(1L << 53, 0x1p53 - 1);
assertEqual((1L << 53) - 1, 0x1p53 - 1);
assertEqual((1L << 53) + 2, 0x1p53 + 2);
assertEqual(-(1L << 53), -0x1p53);
assertEqual(-(1L << 53), -0x1p53 - 1);
assertNotEqual(-(1L << 53), -0x1p53 + 1);
assertEqual(-(1L << 53) + 1, -0x1p53 + 1);
assertEqual(-(1L << 53) - 2, -0x1p53 - 2);
assertNotEqual(Integer.MAX_VALUE, Long.MAX_VALUE);
assertNotEqual(Integer.MIN_VALUE, Long.MIN_VALUE);
}
|
public void generateResponse(HttpServletResponse response, ArtifactResolveRequest artifactResolveRequest) throws SamlParseException {
try {
final var context = new MessageContext();
final var signType = determineSignType(artifactResolveRequest.getSamlSession());
String entityId = determineEntityId(signType);
context.setMessage(buildArtifactResponse(artifactResolveRequest, entityId, signType));
SAMLBindingSupport.setRelayState(context, artifactResolveRequest.getSamlSession().getRelayState());
final var encoder = new HTTPSOAP11Encoder();
encoder.setMessageContext(context);
encoder.setHttpServletResponse(response);
encoder.prepareContext();
encoder.initialize();
encoder.encode();
} catch (MessageEncodingException e) {
throw new SamlParseException("ArtifactResolveRequest soap11 decode exception", e);
} catch (ComponentInitializationException e) {
throw new SamlParseException("ArtifactResolveRequest initialization exception", e);
} catch (ValidationException e) {
throw new SamlParseException("Failed to sign request", e);
} catch (InstantiationException | ArtifactBuildException e) {
throw new SamlParseException("Failed to build artifact response", e);
} catch (BvdException e) {
throw new SamlParseException("Failed to connect to BVD", e);
}
}
|
@Test
void generateResponseBVD() throws SamlParseException, MetadataException, BvdException, JsonProcessingException, UnsupportedEncodingException {
when(bvdClientMock.retrieveRepresentationAffirmations(anyString())).thenReturn(getBvdResponse());
when(bvdMetadataServiceMock.generateMetadata()).thenReturn(getEntityDescriptor(BVD_ENTITY_ID));
var artifactResolveRequest = getArtifactResolveRequest("success", false, true, SAML_ROUTERINGSDIENST, EncryptionType.BSN, BVD_ENTITY_ID);
artifactResolveRequest.getSamlSession().setRequesterId(BVD_ENTITY_ID);
MockHttpServletResponse response = new MockHttpServletResponse();
artifactResponseService.generateResponse(response, artifactResolveRequest);
verify(bvdClientMock, times(1)).retrieveRepresentationAffirmations(anyString());
verify(bvdMetadataServiceMock, times(1)).generateMetadata();
}
|
@Deprecated
@VisibleForTesting
static native void nativeVerifyChunkedSums(
int bytesPerSum, int checksumType,
ByteBuffer sums, int sumsOffset,
ByteBuffer data, int dataOffset, int dataLength,
String fileName, long basePos) throws ChecksumException;
|
@Test
@SuppressWarnings("deprecation")
public void testNativeVerifyChunkedSumsFail() {
allocateDirectByteBuffers();
fillDataAndInvalidChecksums();
assertThrows(ChecksumException.class,
() -> NativeCrc32.nativeVerifyChunkedSums(bytesPerChecksum,
checksumType.id, checksums, checksums.position(), data,
data.position(), data.remaining(), fileName, BASE_POSITION));
}
|
@Override
public V load(K key) {
awaitSuccessfulInit();
try (SqlResult queryResult = sqlService.execute(queries.load(), key)) {
Iterator<SqlRow> it = queryResult.iterator();
V value = null;
if (it.hasNext()) {
SqlRow sqlRow = it.next();
if (it.hasNext()) {
throw new IllegalStateException("multiple matching rows for a key " + key);
}
// If there is a single column as the value, return that column as the value
if (queryResult.getRowMetadata().getColumnCount() == 2 && genericMapStoreProperties.singleColumnAsValue) {
value = sqlRow.getObject(1);
} else {
//noinspection unchecked
value = (V) toGenericRecord(sqlRow, genericMapStoreProperties);
}
}
return value;
}
}
|
@Test
public void whenSetNonExistingColumn_thenFailToInitialize() {
ObjectSpec spec = objectProvider.createObject(mapName, false);
objectProvider.insertItems(spec, 1);
Properties properties = new Properties();
properties.setProperty(DATA_CONNECTION_REF_PROPERTY, TEST_DATABASE_REF);
properties.setProperty("columns", "name,age");
mapLoader = createMapLoader(properties, hz);
assertThatThrownBy(() -> mapLoader.load(0))
.isInstanceOf(HazelcastException.class)
.hasStackTraceContaining("Column 'age' not found");
}
|
public String getName() {
return name;
}
|
@Test
public void getName() {
JobScheduleParam jobScheduleParam = mock( JobScheduleParam.class );
when( jobScheduleParam.getName() ).thenCallRealMethod();
String name = "hitachi";
ReflectionTestUtils.setField( jobScheduleParam, "name", name );
Assert.assertEquals( name, jobScheduleParam.getName() );
}
|
@Override
public void destroy() {
if (this.pubSubClient != null) {
try {
this.pubSubClient.shutdown();
this.pubSubClient.awaitTermination(1, TimeUnit.SECONDS);
} catch (Exception e) {
log.error("Failed to shutdown PubSub client during destroy()", e);
}
}
}
|
@Test
public void givenPubSubClientIsNull_whenDestroy_thenShutDownAndAwaitTermination() {
ReflectionTestUtils.setField(node, "pubSubClient", null);
node.destroy();
then(pubSubClientMock).shouldHaveNoInteractions();
}
|
public static <T> T getBean(Class<T> interfaceClass, Class typeClass) {
Object object = serviceMap.get(interfaceClass.getName() + "<" + typeClass.getName() + ">");
if(object == null) return null;
if(object instanceof Object[]) {
return (T)Array.get(object, 0);
} else {
return (T)object;
}
}
|
@Test
public void testObjectNotDefined() {
Dummy dummy = SingletonServiceFactory.getBean(Dummy.class);
Assert.assertNull(dummy);
}
|
public int getPartitionId() {
return partitionId;
}
|
@Test
public void testGetPartitionId() {
assertEquals(42, dataEvent.getPartitionId());
assertEquals(42, objectEvent.getPartitionId());
}
|
@Override
public DataflowPipelineJob run(Pipeline pipeline) {
// Multi-language pipelines and pipelines that include upgrades should automatically be upgraded
// to Runner v2.
if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_runner_v2")) {
LOG.info(
"Automatically enabling Dataflow Runner v2 since the pipeline used cross-language"
+ " transforms or pipeline needed a transform upgrade.");
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build());
}
}
if (useUnifiedWorker(options)) {
if (hasExperiment(options, "disable_runner_v2")
|| hasExperiment(options, "disable_runner_v2_until_2023")
|| hasExperiment(options, "disable_prime_runner_v2")) {
throw new IllegalArgumentException(
"Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set.");
}
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("use_runner_v2")) {
experiments.add("use_runner_v2");
}
if (!experiments.contains("use_unified_worker")) {
experiments.add("use_unified_worker");
}
if (!experiments.contains("beam_fn_api")) {
experiments.add("beam_fn_api");
}
if (!experiments.contains("use_portable_job_submission")) {
experiments.add("use_portable_job_submission");
}
options.setExperiments(ImmutableList.copyOf(experiments));
}
logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline);
logWarningIfBigqueryDLQUnused(pipeline);
if (shouldActAsStreaming(pipeline)) {
options.setStreaming(true);
if (useUnifiedWorker(options)) {
options.setEnableStreamingEngine(true);
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("enable_streaming_engine")) {
experiments.add("enable_streaming_engine");
}
if (!experiments.contains("enable_windmill_service")) {
experiments.add("enable_windmill_service");
}
}
}
if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) {
ProjectionPushdownOptimizer.optimize(pipeline);
}
LOG.info(
"Executing pipeline on the Dataflow Service, which will have billing implications "
+ "related to Google Compute Engine usage and other Google Cloud Services.");
DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class);
String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions);
// This incorrectly puns the worker harness container image (which implements v1beta3 API)
// with the SDK harness image (which implements Fn API).
//
// The same Environment is used in different and contradictory ways, depending on whether
// it is a v1 or v2 job submission.
RunnerApi.Environment defaultEnvironmentForDataflow =
Environments.createDockerEnvironment(workerHarnessContainerImageURL);
// The SdkComponents for portable an non-portable job submission must be kept distinct. Both
// need the default environment.
SdkComponents portableComponents = SdkComponents.create();
portableComponents.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
RunnerApi.Pipeline portablePipelineProto =
PipelineTranslation.toProto(pipeline, portableComponents, false);
// Note that `stageArtifacts` has to be called before `resolveArtifact` because
// `resolveArtifact` updates local paths to staged paths in pipeline proto.
portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto);
List<DataflowPackage> packages = stageArtifacts(portablePipelineProto);
portablePipelineProto = resolveArtifacts(portablePipelineProto);
portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Portable pipeline proto:\n{}",
TextFormat.printer().printToString(portablePipelineProto));
}
// Stage the portable pipeline proto, retrieving the staged pipeline path, then update
// the options on the new job
// TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options
LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation());
byte[] serializedProtoPipeline = portablePipelineProto.toByteArray();
DataflowPackage stagedPipeline =
options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME);
dataflowOptions.setPipelineUrl(stagedPipeline.getLocation());
if (useUnifiedWorker(options)) {
LOG.info("Skipping v1 transform replacements since job will run on v2.");
} else {
// Now rewrite things to be as needed for v1 (mutates the pipeline)
// This way the job submitted is valid for v1 and v2, simultaneously
replaceV1Transforms(pipeline);
}
// Capture the SdkComponents for look up during step translations
SdkComponents dataflowV1Components = SdkComponents.create();
dataflowV1Components.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
// No need to perform transform upgrading for the Runner v1 proto.
RunnerApi.Pipeline dataflowV1PipelineProto =
PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Dataflow v1 pipeline proto:\n{}",
TextFormat.printer().printToString(dataflowV1PipelineProto));
}
// Set a unique client_request_id in the CreateJob request.
// This is used to ensure idempotence of job creation across retried
// attempts to create a job. Specifically, if the service returns a job with
// a different client_request_id, it means the returned one is a different
// job previously created with the same job name, and that the job creation
// has been effectively rejected. The SDK should return
// Error::Already_Exists to user in that case.
int randomNum = new Random().nextInt(9000) + 1000;
String requestId =
DateTimeFormat.forPattern("YYYYMMddHHmmssmmm")
.withZone(DateTimeZone.UTC)
.print(DateTimeUtils.currentTimeMillis())
+ "_"
+ randomNum;
JobSpecification jobSpecification =
translator.translate(
pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages);
if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_staged_dataflow_worker_jar")) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("use_staged_dataflow_worker_jar")
.build());
}
}
Job newJob = jobSpecification.getJob();
try {
newJob
.getEnvironment()
.setSdkPipelineOptions(
MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class));
} catch (IOException e) {
throw new IllegalArgumentException(
"PipelineOptions specified failed to serialize to JSON.", e);
}
newJob.setClientRequestId(requestId);
DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo();
String version = dataflowRunnerInfo.getVersion();
checkState(
!"${pom.version}".equals(version),
"Unable to submit a job to the Dataflow service with unset version ${pom.version}");
LOG.info("Dataflow SDK version: {}", version);
newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties());
// The Dataflow Service may write to the temporary directory directly, so
// must be verified.
if (!isNullOrEmpty(options.getGcpTempLocation())) {
newJob
.getEnvironment()
.setTempStoragePrefix(
dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation()));
}
newJob.getEnvironment().setDataset(options.getTempDatasetId());
if (options.getWorkerRegion() != null) {
newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion());
}
if (options.getWorkerZone() != null) {
newJob.getEnvironment().setWorkerZone(options.getWorkerZone());
}
if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED");
} else if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED");
}
// Represent the minCpuPlatform pipeline option as an experiment, if not already present.
if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
List<String> minCpuFlags =
experiments.stream()
.filter(p -> p.startsWith("min_cpu_platform"))
.collect(Collectors.toList());
if (minCpuFlags.isEmpty()) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform())
.build());
} else {
LOG.warn(
"Flag min_cpu_platform is defined in both top level PipelineOption, "
+ "as well as under experiments. Proceed using {}.",
minCpuFlags.get(0));
}
}
newJob
.getEnvironment()
.setExperiments(
ImmutableList.copyOf(
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList())));
// Set the Docker container image that executes Dataflow worker harness, residing in Google
// Container Registry. Translator is guaranteed to create a worker pool prior to this point.
// For runner_v1, only worker_harness_container is set.
// For runner_v2, both worker_harness_container and sdk_harness_container are set to the same
// value.
String containerImage = getContainerImageForJob(options);
for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) {
workerPool.setWorkerHarnessContainerImage(containerImage);
}
configureSdkHarnessContainerImages(options, portablePipelineProto, newJob);
newJob.getEnvironment().setVersion(getEnvironmentVersion(options));
if (hooks != null) {
hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment());
}
// enable upload_graph when the graph is too large
byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8);
int jobGraphByteSize = jobGraphBytes.length;
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES
&& !hasExperiment(options, "upload_graph")
&& !useUnifiedWorker(options)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build());
LOG.info(
"The job graph size ({} in bytes) is larger than {}. Automatically add "
+ "the upload_graph option to experiments.",
jobGraphByteSize,
CREATE_JOB_REQUEST_LIMIT_BYTES);
}
if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) {
ArrayList<String> experiments = new ArrayList<>(options.getExperiments());
while (experiments.remove("upload_graph")) {}
options.setExperiments(experiments);
LOG.warn(
"The upload_graph experiment was specified, but it does not apply "
+ "to runner v2 jobs. Option has been automatically removed.");
}
// Upload the job to GCS and remove the graph object from the API call. The graph
// will be downloaded from GCS by the service.
if (hasExperiment(options, "upload_graph")) {
DataflowPackage stagedGraph =
options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME);
newJob.getSteps().clear();
newJob.setStepsLocation(stagedGraph.getLocation());
}
if (!isNullOrEmpty(options.getDataflowJobFile())
|| !isNullOrEmpty(options.getTemplateLocation())) {
boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation());
if (isTemplate) {
checkArgument(
isNullOrEmpty(options.getDataflowJobFile()),
"--dataflowJobFile and --templateLocation are mutually exclusive.");
}
String fileLocation =
firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile());
checkArgument(
fileLocation.startsWith("/") || fileLocation.startsWith("gs://"),
"Location must be local or on Cloud Storage, got %s.",
fileLocation);
ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */);
String workSpecJson = DataflowPipelineTranslator.jobToString(newJob);
try (PrintWriter printWriter =
new PrintWriter(
new BufferedWriter(
new OutputStreamWriter(
Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)),
UTF_8)))) {
printWriter.print(workSpecJson);
LOG.info("Printed job specification to {}", fileLocation);
} catch (IOException ex) {
String error = String.format("Cannot create output file at %s", fileLocation);
if (isTemplate) {
throw new RuntimeException(error, ex);
} else {
LOG.warn(error, ex);
}
}
if (isTemplate) {
LOG.info("Template successfully created.");
return new DataflowTemplateJob();
}
}
String jobIdToUpdate = null;
if (options.isUpdate()) {
jobIdToUpdate = getJobIdFromName(options.getJobName());
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setReplaceJobId(jobIdToUpdate);
}
if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) {
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot());
}
Job jobResult;
try {
jobResult = dataflowClient.createJob(newJob);
} catch (GoogleJsonResponseException e) {
String errorMessages = "Unexpected errors";
if (e.getDetails() != null) {
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) {
errorMessages =
"The size of the serialized JSON representation of the pipeline "
+ "exceeds the allowable limit. "
+ "For more information, please see the documentation on job submission:\n"
+ "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs";
} else {
errorMessages = e.getDetails().getMessage();
}
}
throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e);
} catch (IOException e) {
throw new RuntimeException("Failed to create a workflow job", e);
}
// Use a raw client for post-launch monitoring, as status calls may fail
// regularly and need not be retried automatically.
DataflowPipelineJob dataflowPipelineJob =
new DataflowPipelineJob(
DataflowClient.create(options),
jobResult.getId(),
options,
jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(),
portablePipelineProto);
// If the service returned client request id, the SDK needs to compare it
// with the original id generated in the request, if they are not the same
// (i.e., the returned job is not created by this request), throw
// DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException
// depending on whether this is a reload or not.
if (jobResult.getClientRequestId() != null
&& !jobResult.getClientRequestId().isEmpty()
&& !jobResult.getClientRequestId().equals(requestId)) {
// If updating a job.
if (options.isUpdate()) {
throw new DataflowJobAlreadyUpdatedException(
dataflowPipelineJob,
String.format(
"The job named %s with id: %s has already been updated into job id: %s "
+ "and cannot be updated again.",
newJob.getName(), jobIdToUpdate, jobResult.getId()));
} else {
throw new DataflowJobAlreadyExistsException(
dataflowPipelineJob,
String.format(
"There is already an active job named %s with id: %s. If you want to submit a"
+ " second job, try again by setting a different name using --jobName.",
newJob.getName(), jobResult.getId()));
}
}
LOG.info(
"To access the Dataflow monitoring console, please navigate to {}",
MonitoringUtil.getJobMonitoringPageURL(
options.getProject(), options.getRegion(), jobResult.getId()));
LOG.info("Submitted job: {}", jobResult.getId());
LOG.info(
"To cancel the job using the 'gcloud' tool, run:\n> {}",
MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId()));
return dataflowPipelineJob;
}
|
@Test
public void testRunBatchJobThatFails() throws Exception {
Pipeline p = TestPipeline.create(options);
PCollection<Integer> pc = p.apply(Create.of(1, 2, 3));
PAssert.that(pc).containsInAnyOrder(1, 2, 3);
DataflowPipelineJob mockJob = Mockito.mock(DataflowPipelineJob.class);
when(mockJob.getState()).thenReturn(State.FAILED);
when(mockJob.getProjectId()).thenReturn("test-project");
when(mockJob.getJobId()).thenReturn("test-job");
DataflowRunner mockRunner = Mockito.mock(DataflowRunner.class);
when(mockRunner.run(any(Pipeline.class))).thenReturn(mockJob);
TestDataflowRunner runner = TestDataflowRunner.fromOptionsAndClient(options, mockClient);
when(mockClient.getJobMetrics(anyString()))
.thenReturn(generateMockMetricResponse(true /* success */, false /* tentative */));
expectedException.expect(RuntimeException.class);
runner.run(p, mockRunner);
// Note that fail throws an AssertionError which is why it is placed out here
// instead of inside the try-catch block.
fail("AssertionError expected");
}
|
public void removePublisherIndexesByEmptyService(Service service) {
if (publisherIndexes.containsKey(service) && publisherIndexes.get(service).isEmpty()) {
publisherIndexes.remove(service);
}
}
|
@Test
void testRemovePublisherIndexesByEmptyService() throws NoSuchFieldException, IllegalAccessException {
clientServiceIndexesManager.removePublisherIndexesByEmptyService(service);
Class<ClientServiceIndexesManager> clientServiceIndexesManagerClass = ClientServiceIndexesManager.class;
Field publisherIndexesField = clientServiceIndexesManagerClass.getDeclaredField("publisherIndexes");
publisherIndexesField.setAccessible(true);
ConcurrentMap<Service, Set<String>> publisherIndexes = (ConcurrentMap<Service, Set<String>>) publisherIndexesField.get(
clientServiceIndexesManager);
assertEquals(1, publisherIndexes.size());
}
|
public static long parseBytesToLong(List<Byte> data) {
return parseBytesToLong(data, 0);
}
|
@Test
public void parseBytesToLong() {
byte[] longValByte = {64, -101, 4, -79, 12, -78, -107, -22};
Assertions.assertEquals(longVal, TbUtils.parseBytesToLong(longValByte, 0, 8));
Bytes.reverse(longValByte);
Assertions.assertEquals(longVal, TbUtils.parseBytesToLong(longValByte, 0, 8, false));
List<Byte> longVaList = Bytes.asList(longValByte);
Assertions.assertEquals(longVal, TbUtils.parseBytesToLong(longVaList, 0, 8, false));
long longValRev = 0xEA95B20CB1049B40L;
Assertions.assertEquals(longValRev, TbUtils.parseBytesToLong(longVaList, 0, 8));
}
|
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return PathAttributes.EMPTY;
}
if(new DefaultPathContainerService().isContainer(file)) {
return PathAttributes.EMPTY;
}
final Path query;
if(file.isPlaceholder()) {
query = new Path(file.getParent(), FilenameUtils.removeExtension(file.getName()), file.getType(), file.attributes());
}
else {
query = file;
}
final AttributedList<Path> list;
if(new SimplePathPredicate(DriveHomeFinderService.SHARED_DRIVES_NAME).test(file.getParent())) {
list = new DriveTeamDrivesListService(session, fileid).list(file.getParent(), listener);
}
else {
list = new FileidDriveListService(session, fileid, query).list(file.getParent(), listener);
}
final Path found = list.find(new ListFilteringFeature.ListFilteringPredicate(session.getCaseSensitivity(), file));
if(null == found) {
throw new NotfoundException(file.getAbsolute());
}
return found.attributes();
}
|
@Test
public void testFindRoot() throws Exception {
final DriveAttributesFinderFeature f = new DriveAttributesFinderFeature(session, new DriveFileIdProvider(session));
assertEquals(PathAttributes.EMPTY, f.find(new Path("/", EnumSet.of(Path.Type.volume, Path.Type.directory))));
}
|
@Override
public List<Plugin> plugins() {
List<Plugin> plugins = configurationParameters.get(PLUGIN_PROPERTY_NAME, s -> Arrays.stream(s.split(","))
.map(String::trim)
.map(PluginOption::parse)
.map(pluginOption -> (Plugin) pluginOption)
.collect(Collectors.toList()))
.orElseGet(ArrayList::new);
getPublishPlugin()
.ifPresent(plugins::add);
return plugins;
}
|
@Test
void getPluginNamesWithPublishQuiteEnabled() {
ConfigurationParameters config = new MapConfigurationParameters(
Constants.PLUGIN_PUBLISH_QUIET_PROPERTY_NAME, "true");
assertThat(new CucumberEngineOptions(config).plugins().stream()
.map(Options.Plugin::pluginString)
.collect(toList()),
empty());
}
|
public static String getUserId() {
ServletRequestAttributes attributes = (ServletRequestAttributes) RequestContextHolder.getRequestAttributes();
if (attributes != null) {
HttpServletRequest request = attributes.getRequest();
return request.getHeader(CommonConstants.USER_ID_HEADER);
}
return "";
}
|
@Test
public void testGetUserIdFromHeader() {
// Set up the expected user ID in the mock request object
String expectedUserId = "12345";
when(request.getHeader(CommonConstants.USER_ID_HEADER)).thenReturn(expectedUserId);
// Call the method under test
String userId = HeaderUtil.getUserId();
// Assert the expected result
assertEquals(expectedUserId, userId);
}
|
public LongValue increment(long increment) {
this.value += increment;
this.set = true;
return this;
}
|
@Test
public void multiples_calls_to_increment_long_increment_the_value() {
LongValue variationValue = new LongValue()
.increment(10L)
.increment(95L);
verifySetVariationValue(variationValue, 105L);
}
|
@Nullable
@Override
public Message decode(@Nonnull RawMessage rawMessage) {
final byte[] payload = rawMessage.getPayload();
final Map<String, Object> event;
try {
event = objectMapper.readValue(payload, TypeReferences.MAP_STRING_OBJECT);
} catch (IOException e) {
LOG.error("Couldn't decode raw message {}", rawMessage);
return null;
}
return parseEvent(event);
}
|
@Test
public void decodeMessagesHandlesGenericBeatWithKubernetes() throws Exception {
final Message message = codec.decode(messageFromJson("generic-with-kubernetes.json"));
assertThat(message).isNotNull();
assertThat(message.getMessage()).isEqualTo("null");
assertThat(message.getSource()).isEqualTo("unknown");
assertThat(message.getTimestamp()).isEqualTo(new DateTime(2016, 4, 1, 0, 0, DateTimeZone.UTC));
assertThat(message.getField("facility")).isEqualTo("genericbeat");
assertThat(message.getField("beat_foo")).isEqualTo("bar");
assertThat(message.getField("beat_kubernetes_pod_name")).isEqualTo("testpod");
assertThat(message.getField("beat_kubernetes_namespace")).isEqualTo("testns");
assertThat(message.getField("beat_kubernetes_labels_labelkey")).isEqualTo("labelvalue");
}
|
public synchronized void reset() {
if (trackedThread != null) {
CURRENT_TRACKERS.remove(trackedThread.getId());
trackedThread = null;
}
currentState = null;
numTransitions = 0;
millisSinceLastTransition = 0;
transitionsAtLastSample = 0;
nextLullReportMs = LULL_REPORT_MS;
}
|
@Test
public void testReset() throws Exception {
ExecutionStateTracker tracker = createTracker();
try (Closeable c1 = tracker.activate(new Thread())) {
try (Closeable c2 = tracker.enterState(testExecutionState)) {
sampler.doSampling(400);
assertThat(testExecutionState.totalMillis, equalTo(400L));
}
}
tracker.reset();
assertThat(tracker.getTrackedThread(), equalTo(null));
assertThat(tracker.getCurrentState(), equalTo(null));
assertThat(tracker.getNumTransitions(), equalTo(0L));
assertThat(tracker.getMillisSinceLastTransition(), equalTo(0L));
assertThat(tracker.getTransitionsAtLastSample(), equalTo(0L));
assertThat(tracker.getNextLullReportMs(), equalTo(TimeUnit.MINUTES.toMillis(5)));
}
|
@Override
public void commit(Commit commit) {
commitQueue.put(commit);
}
|
@Test
public void testCommit() {
List<CompleteCommit> completeCommits = new ArrayList<>();
workCommitter = createWorkCommitter(completeCommits::add);
List<Commit> commits = new ArrayList<>();
for (int i = 1; i <= 5; i++) {
Work work = createMockWork(i);
Windmill.WorkItemCommitRequest commitRequest =
Windmill.WorkItemCommitRequest.newBuilder()
.setKey(work.getWorkItem().getKey())
.setShardingKey(work.getWorkItem().getShardingKey())
.setWorkToken(work.getWorkItem().getWorkToken())
.setCacheToken(work.getWorkItem().getCacheToken())
.build();
commits.add(Commit.create(commitRequest, createComputationState("computationId-" + i), work));
}
workCommitter.start();
commits.forEach(workCommitter::commit);
Map<Long, Windmill.WorkItemCommitRequest> committed =
fakeWindmillServer.waitForAndGetCommits(commits.size());
for (Commit commit : commits) {
Windmill.WorkItemCommitRequest request =
committed.get(commit.work().getWorkItem().getWorkToken());
assertNotNull(request);
assertThat(request).isEqualTo(commit.request());
}
assertThat(completeCommits).hasSize(commits.size());
assertThat(completeCommits)
.comparingElementsUsing(
Correspondence.from(
(CompleteCommit completeCommit, Commit commit) ->
completeCommit.computationId().equals(commit.computationId())
&& completeCommit.status() == Windmill.CommitStatus.OK
&& completeCommit.workId().equals(commit.work().id())
&& completeCommit
.shardedKey()
.equals(
ShardedKey.create(
commit.request().getKey(), commit.request().getShardingKey())),
"expected to equal"))
.containsExactlyElementsIn(commits);
}
|
public static StructType convert(Schema schema) {
return (StructType) TypeUtil.visit(schema, new TypeToSparkType());
}
|
@Test
public void testSchemaConversionWithMetaDataColumnSchema() {
StructType structType = SparkSchemaUtil.convert(TEST_SCHEMA_WITH_METADATA_COLS);
List<AttributeReference> attrRefs =
scala.collection.JavaConverters.seqAsJavaList(DataTypeUtils.toAttributes(structType));
for (AttributeReference attrRef : attrRefs) {
if (MetadataColumns.isMetadataColumn(attrRef.name())) {
assertThat(MetadataAttribute.unapply(attrRef).isDefined())
.as("metadata columns should have __metadata_col in attribute metadata")
.isTrue();
} else {
assertThat(MetadataAttribute.unapply(attrRef).isDefined())
.as("non metadata columns should not have __metadata_col in attribute metadata")
.isFalse();
}
}
}
|
@Override
public boolean apply(InputFile f) {
if (path == null) {
return false;
}
return path.equals(f.relativePath());
}
|
@Test
public void returns_true_if_matches() {
RelativePathPredicate predicate = new RelativePathPredicate("path");
InputFile inputFile = mock(InputFile.class);
when(inputFile.relativePath()).thenReturn("path");
assertThat(predicate.apply(inputFile)).isTrue();
}
|
public static <T> CompressedSource<T> from(FileBasedSource<T> sourceDelegate) {
return new CompressedSource<>(sourceDelegate, CompressionMode.AUTO);
}
|
@Test
public void testGzipProgress() throws IOException {
int numRecords = 3;
File tmpFile = tmpFolder.newFile("nonempty.gz");
String filename = tmpFile.toPath().toString();
writeFile(tmpFile, new byte[numRecords], Compression.GZIP);
PipelineOptions options = PipelineOptionsFactory.create();
CompressedSource<Byte> source = CompressedSource.from(new ByteSource(filename, 1));
try (BoundedReader<Byte> readerOrig = source.createReader(options)) {
assertThat(readerOrig, instanceOf(CompressedReader.class));
CompressedReader<Byte> reader = (CompressedReader<Byte>) readerOrig;
// before starting
assertEquals(0.0, reader.getFractionConsumed(), delta);
assertEquals(0, reader.getSplitPointsConsumed());
assertEquals(1, reader.getSplitPointsRemaining());
// confirm has three records
for (int i = 0; i < numRecords; ++i) {
if (i == 0) {
assertTrue(reader.start());
} else {
assertTrue(reader.advance());
}
assertEquals(0, reader.getSplitPointsConsumed());
assertEquals(1, reader.getSplitPointsRemaining());
}
assertFalse(reader.advance());
// after reading source
assertEquals(1.0, reader.getFractionConsumed(), delta);
assertEquals(1, reader.getSplitPointsConsumed());
assertEquals(0, reader.getSplitPointsRemaining());
}
}
|
public static URI parse(String featureIdentifier) {
requireNonNull(featureIdentifier, "featureIdentifier may not be null");
if (featureIdentifier.isEmpty()) {
throw new IllegalArgumentException("featureIdentifier may not be empty");
}
// Legacy from the Cucumber Eclipse plugin
// Older versions of Cucumber allowed it.
if (CLASSPATH_SCHEME_PREFIX.equals(featureIdentifier)) {
return rootPackageUri();
}
if (nonStandardPathSeparatorInUse(featureIdentifier)) {
String standardized = replaceNonStandardPathSeparator(featureIdentifier);
return parseAssumeFileScheme(standardized);
}
if (isWindowsOS() && pathContainsWindowsDrivePattern(featureIdentifier)) {
return parseAssumeFileScheme(featureIdentifier);
}
if (probablyURI(featureIdentifier)) {
return parseProbableURI(featureIdentifier);
}
return parseAssumeFileScheme(featureIdentifier);
}
|
@Test
@EnabledOnOs(WINDOWS)
void can_parse_windows_absolute_path_form() {
URI uri = FeaturePath.parse("C:\\path\\to\\file.feature");
assertAll(
() -> assertThat(uri.getScheme(), is(is("file"))),
() -> assertThat(uri.getSchemeSpecificPart(), is("/C:/path/to/file.feature")));
}
|
@Override
public InterpreterResult interpret(final String st, final InterpreterContext context)
throws InterpreterException {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("st:\n{}", st);
}
final FormType form = getFormType();
RemoteInterpreterProcess interpreterProcess = null;
try {
interpreterProcess = getOrCreateInterpreterProcess();
} catch (IOException e) {
throw new InterpreterException(e);
}
if (!interpreterProcess.isRunning()) {
return new InterpreterResult(InterpreterResult.Code.ERROR,
"Interpreter process is not running\n" + interpreterProcess.getErrorMessage());
}
return interpreterProcess.callRemoteFunction(client -> {
RemoteInterpreterResult remoteResult = client.interpret(
sessionId, className, st, convert(context));
Map<String, Object> remoteConfig = (Map<String, Object>) GSON.fromJson(
remoteResult.getConfig(), new TypeToken<Map<String, Object>>() {
}.getType());
context.getConfig().clear();
if (remoteConfig != null) {
context.getConfig().putAll(remoteConfig);
}
GUI currentGUI = context.getGui();
GUI currentNoteGUI = context.getNoteGui();
if (form == FormType.NATIVE) {
GUI remoteGui = GUI.fromJson(remoteResult.getGui());
GUI remoteNoteGui = GUI.fromJson(remoteResult.getNoteGui());
currentGUI.clear();
currentGUI.setParams(remoteGui.getParams());
currentGUI.setForms(remoteGui.getForms());
currentNoteGUI.setParams(remoteNoteGui.getParams());
currentNoteGUI.setForms(remoteNoteGui.getForms());
} else if (form == FormType.SIMPLE) {
final Map<String, Input> currentForms = currentGUI.getForms();
final Map<String, Object> currentParams = currentGUI.getParams();
final GUI remoteGUI = GUI.fromJson(remoteResult.getGui());
final Map<String, Input> remoteForms = remoteGUI.getForms();
final Map<String, Object> remoteParams = remoteGUI.getParams();
currentForms.putAll(remoteForms);
currentParams.putAll(remoteParams);
}
return convert(remoteResult);
}
);
}
|
@Test
void testFailToLaunchInterpreterProcess_Timeout() {
try {
System.setProperty(ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_REMOTE_RUNNER.getVarName(),
zeppelinHome.getAbsolutePath() + "/zeppelin-zengine/src/test/resources/bin/interpreter_timeout.sh");
System.setProperty(ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_CONNECT_TIMEOUT.getVarName(), "10s");
final Interpreter interpreter1 = interpreterSetting.getInterpreter("user1", note1Id, "sleep");
final InterpreterContext context1 = createDummyInterpreterContext();
// run this dummy interpret method first to launch the RemoteInterpreterProcess to avoid the
// time overhead of launching the process.
try {
interpreter1.interpret("1", context1);
fail("Should not be able to launch interpreter process");
} catch (InterpreterException e) {
assertTrue(ExceptionUtils.getStackTrace(e).contains("Interpreter Process creation is time out"));
}
} finally {
System.clearProperty(ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_REMOTE_RUNNER.getVarName());
System.clearProperty(ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_CONNECT_TIMEOUT.getVarName());
}
}
|
public static None createNoneHealthChecker() {
return new None();
}
|
@Test
void testCreateNoneHealthChecker() {
assertEquals(AbstractHealthChecker.None.class, HealthCheckerFactory.createNoneHealthChecker().getClass());
}
|
@Config("failure-resolver.enabled")
public FailureResolverConfig setEnabled(boolean enabled)
{
this.enabled = enabled;
return this;
}
|
@Test
public void testDefault()
{
assertRecordedDefaults(recordDefaults(FailureResolverConfig.class)
.setEnabled(true));
}
|
@Override
public <VAgg> KTable<K, VAgg> aggregate(final Initializer<VAgg> initializer,
final Aggregator<? super K, ? super V, VAgg> adder,
final Aggregator<? super K, ? super V, VAgg> subtractor,
final Materialized<K, VAgg, KeyValueStore<Bytes, byte[]>> materialized) {
return aggregate(initializer, adder, subtractor, NamedInternal.empty(), materialized);
}
|
@Test
public void shouldNotAllowNullInitializerOnAggregate() {
assertThrows(NullPointerException.class, () -> groupedTable.aggregate(
null,
MockAggregator.TOSTRING_ADDER,
MockAggregator.TOSTRING_REMOVER,
Materialized.as("store")));
}
|
public void setSortKey(SortKey sortkey) {
if (Objects.equals(this.sortkey, sortkey)) {
return;
}
invalidate();
if (sortkey != null) {
int column = sortkey.getColumn();
if (valueComparators[column] == null) {
throw new IllegalArgumentException(
format("Can't sort column %s, it is mapped to type %s and this one have no natural order. So an explicit one must be specified",
column, model.getColumnClass(column)));
}
}
this.sortkey = sortkey;
this.comparator = null;
}
|
@Test
public void sortValueDescending() {
sorter.setSortKey(new SortKey(1, SortOrder.DESCENDING));
assertRowOrderAndIndexes(asList(d4(), a3(), b2(), c1()));
}
|
@Override
public AWSCredentials getCredentials() {
Credentials sessionCredentials = credentials.get();
if (Duration.between(clock.instant(), sessionCredentials.expiry).compareTo(REFRESH_INTERVAL)<0) {
refresh();
sessionCredentials = credentials.get();
}
return sessionCredentials;
}
|
@Test
void deserializes_credentials() throws IOException {
Instant originalExpiry = clock.instant().plus(Duration.ofHours(12));
writeCredentials(credentialsPath, originalExpiry);
VespaAwsCredentialsProvider credentialsProvider = new VespaAwsCredentialsProvider(credentialsPath, clock);
AWSCredentials credentials = credentialsProvider.getCredentials();
assertExpiryEquals(originalExpiry, credentials);
Assertions.assertEquals("awsAccessKey", credentials.getAWSAccessKeyId());
Assertions.assertEquals("awsSecretKey", credentials.getAWSSecretKey());
Assertions.assertEquals("sessionToken", ((VespaAwsCredentialsProvider.Credentials)credentials).getSessionToken());
}
|
public static Schema create(Type type) {
switch (type) {
case STRING:
return new StringSchema();
case BYTES:
return new BytesSchema();
case INT:
return new IntSchema();
case LONG:
return new LongSchema();
case FLOAT:
return new FloatSchema();
case DOUBLE:
return new DoubleSchema();
case BOOLEAN:
return new BooleanSchema();
case NULL:
return new NullSchema();
default:
throw new AvroRuntimeException("Can't create a: " + type);
}
}
|
@Test
void validLongAsIntDefaultValue() {
Schema.Field field = new Schema.Field("myField", Schema.create(Schema.Type.INT), "doc", 1L);
assertTrue(field.hasDefaultValue());
assertEquals(1, field.defaultVal());
assertEquals(1, GenericData.get().getDefaultValue(field));
field = new Schema.Field("myField", Schema.create(Schema.Type.INT), "doc", Long.valueOf(Integer.MIN_VALUE));
assertTrue(field.hasDefaultValue());
assertEquals(Integer.MIN_VALUE, field.defaultVal());
assertEquals(Integer.MIN_VALUE, GenericData.get().getDefaultValue(field));
field = new Schema.Field("myField", Schema.create(Schema.Type.INT), "doc", Long.valueOf(Integer.MAX_VALUE));
assertTrue(field.hasDefaultValue());
assertEquals(Integer.MAX_VALUE, field.defaultVal());
assertEquals(Integer.MAX_VALUE, GenericData.get().getDefaultValue(field));
}
|
@Override
public String doSharding(final Collection<String> availableTargetNames, final PreciseShardingValue<Comparable<?>> shardingValue) {
ShardingSpherePreconditions.checkNotNull(shardingValue.getValue(), NullShardingValueException::new);
return doSharding(availableTargetNames, Range.singleton(shardingValue.getValue())).stream().findFirst().orElse(null);
}
|
@Test
void assertLowerHalfRangeDoSharding() {
Collection<String> actual = shardingAlgorithmByQuarter.doSharding(availableTablesForQuarterDataSources,
new RangeShardingValue<>("t_order", "create_time", DATA_NODE_INFO, Range.atLeast("2018-10-15 10:59:08")));
assertThat(actual.size(), is(9));
}
|
@Override
public KeyValue<Bytes, byte[]> next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
return currentIterator.next();
}
|
@Test
public void shouldThrowNoSuchElementOnNextIfNoNext() {
iterator = new SegmentIterator<>(
Arrays.asList(segmentOne, segmentTwo).iterator(),
hasNextCondition,
Bytes.wrap("f".getBytes()),
Bytes.wrap("h".getBytes()),
true);
assertThrows(NoSuchElementException.class, () -> iterator.next());
}
|
public SmppMessage createSmppMessage(CamelContext camelContext, AlertNotification alertNotification) {
SmppMessage smppMessage = new SmppMessage(camelContext, alertNotification, configuration);
smppMessage.setHeader(SmppConstants.MESSAGE_TYPE, SmppMessageType.AlertNotification.toString());
smppMessage.setHeader(SmppConstants.SEQUENCE_NUMBER, alertNotification.getSequenceNumber());
smppMessage.setHeader(SmppConstants.COMMAND_ID, alertNotification.getCommandId());
smppMessage.setHeader(SmppConstants.COMMAND_STATUS, alertNotification.getCommandStatus());
smppMessage.setHeader(SmppConstants.SOURCE_ADDR, alertNotification.getSourceAddr());
smppMessage.setHeader(SmppConstants.SOURCE_ADDR_NPI, alertNotification.getSourceAddrNpi());
smppMessage.setHeader(SmppConstants.SOURCE_ADDR_TON, alertNotification.getSourceAddrTon());
smppMessage.setHeader(SmppConstants.ESME_ADDR, alertNotification.getEsmeAddr());
smppMessage.setHeader(SmppConstants.ESME_ADDR_NPI, alertNotification.getEsmeAddrNpi());
smppMessage.setHeader(SmppConstants.ESME_ADDR_TON, alertNotification.getEsmeAddrTon());
return smppMessage;
}
|
@Test
public void createSmppMessageFromDeliveryReceiptShouldReturnASmppMessage() throws Exception {
DeliverSm deliverSm = new DeliverSm();
deliverSm.setSmscDeliveryReceipt();
deliverSm.setShortMessage(
"id:2 sub:001 dlvrd:001 submit date:0908312310 done date:0908312311 stat:DELIVRD err:xxx Text:Hello SMPP world!"
.getBytes());
SmppMessage smppMessage = binding.createSmppMessage(camelContext, deliverSm);
assertEquals("Hello SMPP world!", smppMessage.getBody());
assertEquals(8, smppMessage.getHeaders().size());
assertEquals("2", smppMessage.getHeader(SmppConstants.ID));
assertEquals(1, smppMessage.getHeader(SmppConstants.DELIVERED));
// To avoid the test failure when running in different TimeZone
//assertEquals(new Date(1251753060000L), smppMessage.getHeader(SmppConstants.DONE_DATE));
assertEquals("xxx", smppMessage.getHeader(SmppConstants.ERROR));
//assertEquals(new Date(1251753000000L), smppMessage.getHeader(SmppConstants.SUBMIT_DATE));
assertEquals(1, smppMessage.getHeader(SmppConstants.SUBMITTED));
assertEquals(DeliveryReceiptState.DELIVRD, smppMessage.getHeader(SmppConstants.FINAL_STATUS));
assertEquals(SmppMessageType.DeliveryReceipt.toString(), smppMessage.getHeader(SmppConstants.MESSAGE_TYPE));
assertNull(smppMessage.getHeader(SmppConstants.OPTIONAL_PARAMETERS));
}
|
@Override
public void run() {
final Instant now = time.get();
try {
final Collection<PersistentQueryMetadata> queries = engine.getPersistentQueries();
final Optional<Double> saturation = queries.stream()
.collect(Collectors.groupingBy(PersistentQueryMetadata::getQueryApplicationId))
.entrySet()
.stream()
.map(e -> measure(now, e.getKey(), e.getValue()))
.max(PersistentQuerySaturationMetrics::compareSaturation)
.orElse(Optional.of(0.0));
saturation.ifPresent(s -> report(now, s));
final Set<String> appIds = queries.stream()
.map(PersistentQueryMetadata::getQueryApplicationId)
.collect(Collectors.toSet());
for (final String appId
: Sets.difference(new HashSet<>(perKafkaStreamsStats.keySet()), appIds)) {
perKafkaStreamsStats.get(appId).cleanup(reporter);
perKafkaStreamsStats.remove(appId);
}
} catch (final RuntimeException e) {
LOGGER.error("Error collecting saturation", e);
throw e;
}
}
|
@Test
public void shouldIgnoreSamplesOutsideMargin() {
// Given:
final Instant start = Instant.now();
when(clock.get()).thenReturn(start);
givenMetrics(kafkaStreams1)
.withThreadStartTime("t1", start.minus(WINDOW.multipliedBy(2)))
.withBlockedTime("t1", Duration.ofMinutes(2));
collector.run();
when(clock.get()).thenReturn(start.plus(WINDOW.plus(SAMPLE_MARGIN.multipliedBy(2))));
givenMetrics(kafkaStreams1)
.withThreadStartTime("t1", start.minus(WINDOW.multipliedBy(2)))
.withBlockedTime("t1", Duration.ofMinutes(3));
// When:
collector.run();
// Then:
verifyNoDataPoints("max-node-query-saturation", Collections.emptyMap());
}
|
public SubscriptionGroupConfig getSubscriptionGroupConfig(final String brokerAddr, String group,
long timeoutMillis) throws InterruptedException,
RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException, MQBrokerException {
GetSubscriptionGroupConfigRequestHeader header = new GetSubscriptionGroupConfigRequestHeader();
header.setGroup(group);
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_SUBSCRIPTIONGROUP_CONFIG, header);
RemotingCommand response = this.remotingClient
.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), brokerAddr), request, timeoutMillis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
return RemotingSerializable.decode(response.getBody(), SubscriptionGroupConfig.class);
}
default:
break;
}
throw new MQBrokerException(response.getCode(), response.getRemark(), brokerAddr);
}
|
@Test
public void assertGetAllSubscriptionGroupForSubscriptionGroupConfig() throws RemotingException, InterruptedException, MQBrokerException {
mockInvokeSync();
SubscriptionGroupConfig responseBody = new SubscriptionGroupConfig();
responseBody.setGroupName(group);
responseBody.setBrokerId(MixAll.MASTER_ID);
setResponseBody(responseBody);
SubscriptionGroupConfig actual = mqClientAPI.getSubscriptionGroupConfig(defaultBrokerAddr, group, defaultTimeout);
assertNotNull(actual);
assertEquals(group, actual.getGroupName());
assertEquals(MixAll.MASTER_ID, actual.getBrokerId());
}
|
@Override
public Map<String, String> loadNamespaceMetadata(Namespace namespace)
throws NoSuchNamespaceException {
if (!namespaceExists(namespace)) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
}
Map<String, String> properties = Maps.newHashMap();
properties.putAll(fetchProperties(namespace));
if (!properties.containsKey("location")) {
properties.put("location", defaultNamespaceLocation(namespace));
}
properties.remove(NAMESPACE_EXISTS_PROPERTY); // do not return reserved existence property
return ImmutableMap.copyOf(properties);
}
|
@Test
public void testLoadNamespaceMeta() {
TableIdentifier tbl1 = TableIdentifier.of("db", "ns1", "ns2", "metadata");
TableIdentifier tbl2 = TableIdentifier.of("db", "ns2", "ns3", "tbl2");
TableIdentifier tbl3 = TableIdentifier.of("db", "ns3", "tbl4");
TableIdentifier tbl4 = TableIdentifier.of("db", "metadata");
Lists.newArrayList(tbl1, tbl2, tbl3, tbl4)
.forEach(t -> catalog.createTable(t, SCHEMA, PartitionSpec.unpartitioned()));
assertThat(catalog.loadNamespaceMetadata(Namespace.of("db"))).containsKey("location");
assertThatThrownBy(() -> catalog.loadNamespaceMetadata(Namespace.of("db", "db2", "ns2")))
.isInstanceOf(NoSuchNamespaceException.class)
.hasMessage("Namespace does not exist: db.db2.ns2");
}
|
public static <T> void swapTo(List<T> list, T element, Integer targetIndex) {
if (CollUtil.isNotEmpty(list)) {
final int index = list.indexOf(element);
if (index >= 0) {
Collections.swap(list, index, targetIndex);
}
}
}
|
@Test
public void swapIndex() {
final List<Integer> list = Arrays.asList(7, 2, 8, 9);
ListUtil.swapTo(list, 8, 1);
assertEquals(8, (int) list.get(1));
}
|
@Override
public boolean tableExists(String dbName, String tblName) {
return hmsOps.tableExists(dbName, tblName);
}
|
@Test
public void testTableExists() {
boolean exists = hudiMetadata.tableExists("db1", "table1");
Assert.assertTrue(exists);
}
|
@Override
public double d(double[] x, double[] y) {
return cor.applyAsDouble(x, y);
}
|
@Test
public void testDistance() {
System.out.println("distance");
double[] x = {1.0, 2.0, 3.0, 4.0};
double[] y = {4.0, 3.0, 2.0, 1.0};
double[] z = {4.0, 2.0, 3.0, 1.0};
double[] w = {-2.1968219, -0.9559913, -0.0431738, 1.0567679, 0.3853515};
double[] v = {-1.7781325, -0.6659839, 0.9526148, -0.9460919, -0.3925300};
CorrelationDistance cor = new CorrelationDistance();
assertEquals(0.0, cor.d(x, x), 1E-5);
assertEquals(2.0, cor.d(x, y), 1E-5);
assertEquals(0.2, cor.d(y, z), 1E-5);
assertEquals(0.5313153, cor.d(w, v), 1E-7);
}
|
public static ResourceModel processResource(final Class<?> resourceClass)
{
return processResource(resourceClass, null);
}
|
@Test(expectedExceptions = ResourceConfigException.class)
public void failsOnInconsistentMethodWithTooManyCallbackParams() {
@RestLiCollection(name = "tooManyCallbacks")
class LocalClass extends CollectionResourceTemplate<Long, EmptyRecord>
{
@Action(name = "tooManyCallbacks")
public void tooManyCallbacks(@CallbackParam Callback<EmptyRecord> callback1, @CallbackParam Callback<EmptyRecord> callback2) {
}
}
RestLiAnnotationReader.processResource(LocalClass.class);
Assert.fail("#getParamIndex should fail throwing a ResourceConfigException");
}
|
@Override
public boolean expireEntryIfNotSet(K key, Duration ttl) {
return get(expireEntryIfNotSetAsync(key, ttl));
}
|
@Test
public void testExpireEntryIfNotSet() {
RMapCacheNative<String, String> testMap = redisson.getMapCacheNative("map");
testMap.put("key", "value");
testMap.expireEntryIfNotSet("key", Duration.ofMillis(20000));
assertThat(testMap.remainTimeToLive("key")).isBetween(19800L, 20000L);
}
|
public void pushWithCallback(String connectionId, ServerRequest request, PushCallBack requestCallBack,
Executor executor) {
Connection connection = connectionManager.getConnection(connectionId);
if (connection != null) {
try {
connection.asyncRequest(request, new AbstractRequestCallBack(requestCallBack.getTimeout()) {
@Override
public Executor getExecutor() {
return executor;
}
@Override
public void onResponse(Response response) {
if (response.isSuccess()) {
requestCallBack.onSuccess();
} else {
requestCallBack.onFail(new NacosException(response.getErrorCode(), response.getMessage()));
}
}
@Override
public void onException(Throwable e) {
requestCallBack.onFail(e);
}
});
} catch (ConnectionAlreadyClosedException e) {
connectionManager.unregister(connectionId);
requestCallBack.onSuccess();
} catch (Exception e) {
Loggers.REMOTE_DIGEST
.error("error to send push response to connectionId ={},push response={}", connectionId,
request, e);
requestCallBack.onFail(e);
}
} else {
requestCallBack.onSuccess();
}
}
|
@Test
void testPushWithCallback() {
try {
Mockito.when(connectionManager.getConnection(Mockito.any())).thenReturn(null);
rpcPushService.pushWithCallback(connectId, null, new PushCallBack() {
@Override
public long getTimeout() {
return 0;
}
@Override
public void onSuccess() {
System.out.println("success");
}
@Override
public void onFail(Throwable e) {
e.printStackTrace();
fail(e.getMessage());
}
}, null);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
|
public String toJson()
{
return JsonCodec.jsonCodec(DruidIngestTask.class).toJson(this);
}
|
@Test
public void testDruidIngestTaskToJson()
{
DruidIngestTask ingestTask = new DruidIngestTask.Builder()
.withDataSource("test_table_name")
.withInputSource(new Path("file://test_path"), Collections.emptyList())
.withTimestampColumn("__time")
.withDimensions(ImmutableList.of(
new DruidIngestTask.DruidIngestDimension("string", "__time"),
new DruidIngestTask.DruidIngestDimension("double", "test_double_column")))
.withAppendToExisting(true)
.build();
assertEquals(ingestTask.toJson(), "{\n" +
" \"type\" : \"index_parallel\",\n" +
" \"spec\" : {\n" +
" \"dataSchema\" : {\n" +
" \"dataSource\" : \"test_table_name\",\n" +
" \"timestampSpec\" : {\n" +
" \"column\" : \"__time\"\n" +
" },\n" +
" \"dimensionsSpec\" : {\n" +
" \"dimensions\" : [ {\n" +
" \"type\" : \"string\",\n" +
" \"name\" : \"__time\"\n" +
" }, {\n" +
" \"type\" : \"double\",\n" +
" \"name\" : \"test_double_column\"\n" +
" } ]\n" +
" }\n" +
" },\n" +
" \"ioConfig\" : {\n" +
" \"type\" : \"index_parallel\",\n" +
" \"inputSource\" : {\n" +
" \"type\" : \"local\",\n" +
" \"baseDir\" : \"file://test_path\",\n" +
" \"filter\" : \"*.json.gz\"\n" +
" },\n" +
" \"inputFormat\" : {\n" +
" \"type\" : \"json\"\n" +
" },\n" +
" \"appendToExisting\" : true\n" +
" }\n" +
" }\n" +
"}");
}
|
public void set(int index, Object val) {
values[index] = val;
}
|
@Test
public void testSerialization() {
HeapRow original = new HeapRow(2);
original.set(0, 1);
original.set(1, new SqlCustomClass(1));
HeapRow restored = serializeAndCheck(original, JetSqlSerializerHook.ROW_HEAP);
checkEquals(original, restored, true);
}
|
public static long parseTimeoutMs(Property property, String value) {
long l = Long.parseLong(value);
checkState(l >= 1, "value of %s must be >= 1", property);
return l;
}
|
@Test
public void parseTimeoutMs_throws_ISE_if_value_is_0() {
assertThatThrownBy(() -> parseTimeoutMs(ProcessProperties.Property.WEB_GRACEFUL_STOP_TIMEOUT, 0 + ""))
.isInstanceOf(IllegalStateException.class)
.hasMessage("value of WEB_GRACEFUL_STOP_TIMEOUT must be >= 1");
}
|
@Override
public void stdOutput(String line) {
consumer.stdOutput(cropLongLine(line));
}
|
@Test
public void shouldNotCropShortLines() {
InMemoryStreamConsumer actualConsumer = ProcessOutputStreamConsumer.inMemoryConsumer();
BoundedOutputStreamConsumer streamConsumer = new BoundedOutputStreamConsumer(actualConsumer, 30);
streamConsumer.stdOutput("A short line");
assertThat(actualConsumer.getAllOutput(), is("A short line\n"));
}
|
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
}
|
@Test
void power2() {
String inputExpression = "(y * 5) ** 3";
BaseNode infix = parse( inputExpression, mapOf(entry("y", BuiltInType.NUMBER)) );
assertThat( infix).isInstanceOf(InfixOpNode.class);
assertThat( infix.getResultType()).isEqualTo(BuiltInType.NUMBER);
assertThat( infix.getText()).isEqualTo(inputExpression);
InfixOpNode exp = (InfixOpNode) infix;
assertThat( exp.getLeft()).isInstanceOf(InfixOpNode.class);
assertThat( exp.getLeft().getText()).isEqualTo( "y * 5");
assertThat( exp.getOperator()).isEqualTo(InfixOperator.POW);
assertThat( exp.getRight()).isInstanceOf(NumberNode.class);
assertThat( exp.getRight().getText()).isEqualTo("3");
InfixOpNode mult = (InfixOpNode) exp.getLeft();
assertThat( mult.getLeft()).isInstanceOf(NameRefNode.class);
assertThat( mult.getLeft().getText()).isEqualTo("y");
assertThat( mult.getOperator()).isEqualTo(InfixOperator.MULT);
assertThat( mult.getRight()).isInstanceOf(NumberNode.class);
assertThat( mult.getRight().getText()).isEqualTo("5");
}
|
public static PDImageXObject createFromImage(PDDocument document, BufferedImage image)
throws IOException
{
if (isGrayImage(image))
{
return createFromGrayImage(image, document);
}
// We try to encode the image with predictor
if (USE_PREDICTOR_ENCODER)
{
PDImageXObject pdImageXObject = new PredictorEncoder(document, image).encode();
if (pdImageXObject != null)
{
if (pdImageXObject.getColorSpace() == PDDeviceRGB.INSTANCE &&
pdImageXObject.getBitsPerComponent() < 16 &&
image.getWidth() * image.getHeight() <= 50 * 50)
{
// also create classic compressed image, compare sizes
PDImageXObject pdImageXObjectClassic = createFromRGBImage(image, document);
if (pdImageXObjectClassic.getCOSObject().getLength() <
pdImageXObject.getCOSObject().getLength())
{
pdImageXObject.getCOSObject().close();
return pdImageXObjectClassic;
}
else
{
pdImageXObjectClassic.getCOSObject().close();
}
}
return pdImageXObject;
}
}
// Fallback: We export the image as 8-bit sRGB and might lose color information
return createFromRGBImage(image, document);
}
|
@Test
void testCreateLosslessFromImageRGB() throws IOException
{
PDDocument document = new PDDocument();
BufferedImage image = ImageIO.read(this.getClass().getResourceAsStream("png.png"));
PDImageXObject ximage1 = LosslessFactory.createFromImage(document, image);
validate(ximage1, 8, image.getWidth(), image.getHeight(), "png", PDDeviceRGB.INSTANCE.getName());
checkIdent(image, ximage1.getImage());
// Create a grayscale image
BufferedImage grayImage = new BufferedImage(image.getWidth(), image.getHeight(), BufferedImage.TYPE_BYTE_GRAY);
Graphics g = grayImage.getGraphics();
g.drawImage(image, 0, 0, null);
g.dispose();
PDImageXObject ximage2 = LosslessFactory.createFromImage(document, grayImage);
validate(ximage2, 8, grayImage.getWidth(), grayImage.getHeight(), "png", PDDeviceGray.INSTANCE.getName());
checkIdent(grayImage, ximage2.getImage());
// Create a bitonal image
BufferedImage bitonalImage = new BufferedImage(image.getWidth(), image.getHeight(), BufferedImage.TYPE_BYTE_BINARY);
// avoid multiple of 8 to test padding
assertNotEquals(0, bitonalImage.getWidth() % 8);
g = bitonalImage.getGraphics();
g.drawImage(image, 0, 0, null);
g.dispose();
PDImageXObject ximage3 = LosslessFactory.createFromImage(document, bitonalImage);
validate(ximage3, 1, bitonalImage.getWidth(), bitonalImage.getHeight(), "png", PDDeviceGray.INSTANCE.getName());
checkIdent(bitonalImage, ximage3.getImage());
// This part isn't really needed because this test doesn't break
// if the mask has the wrong colorspace (PDFBOX-2057), but it is still useful
// if something goes wrong in the future and we want to have a PDF to open.
PDPage page = new PDPage();
document.addPage(page);
PDPageContentStream contentStream = new PDPageContentStream(document, page, AppendMode.APPEND, false);
contentStream.drawImage(ximage1, 200, 300, ximage1.getWidth() / 2, ximage1.getHeight() / 2);
contentStream.drawImage(ximage2, 200, 450, ximage2.getWidth() / 2, ximage2.getHeight() / 2);
contentStream.drawImage(ximage3, 200, 600, ximage3.getWidth() / 2, ximage3.getHeight() / 2);
contentStream.close();
File pdfFile = new File(TESTRESULTSDIR, "misc.pdf");
document.save(pdfFile);
document.close();
document = Loader.loadPDF(pdfFile, (String) null);
new PDFRenderer(document).renderImage(0);
document.close();
}
|
public String[] getSupportedExtensions() {
return new String[] { "ktr", "xml" };
}
|
@Test
public void testGetSupportedExtensions() throws Exception {
String[] extensions = transFileListener.getSupportedExtensions();
assertNotNull( extensions );
assertEquals( 2, extensions.length );
assertEquals( "ktr", extensions[0] );
assertEquals( "xml", extensions[1] );
}
|
@Override
public Result detect(ChannelBuffer in) {
int prefaceLen = Preface.readableBytes();
int bytesRead = min(in.readableBytes(), prefaceLen);
if (bytesRead == 0 || !ChannelBuffers.prefixEquals(in, Preface, bytesRead)) {
return Result.UNRECOGNIZED;
}
if (bytesRead == prefaceLen) {
return Result.RECOGNIZED;
}
return Result.NEED_MORE_DATA;
}
|
@Test
void testDetect_NeedMoreData() {
DubboDetector detector = new DubboDetector();
ChannelBuffer in = ChannelBuffers.wrappedBuffer(new byte[] {(byte) 0xda});
assertEquals(DubboDetector.Result.NEED_MORE_DATA, detector.detect(in));
}
|
public static long getTileIndex(final int pZoom, final int pX, final int pY) {
checkValues(pZoom, pX, pY);
return (((long) pZoom) << (mMaxZoomLevel * 2))
+ (((long) pX) << mMaxZoomLevel)
+ (long) pY;
}
|
@Test
public void testIndex() {
final int iterations = 1000;
for (int i = 0; i < iterations; i++) {
final int zoom = getRandomZoom();
final int x = getRandomXY(zoom);
final int y = getRandomXY(zoom);
final long index = MapTileIndex.getTileIndex(zoom, x, y);
checkIndex(index, zoom, x, y);
}
}
|
public Schema getSchema() {
return context.getSchema();
}
|
@Test
public void testWktMessageSchema() {
ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(WktMessage.getDescriptor());
Schema schema = schemaProvider.getSchema();
assertEquals(WKT_MESSAGE_SCHEMA, schema);
}
|
@Override
protected Map<String, Object> getBeans(final ApplicationContext context) {
// Filter out is not controller out
if (Boolean.TRUE.equals(isFull)) {
LOG.info("init spring websocket client success with isFull mode");
publisher.publishEvent(buildURIRegisterDTO(context, Collections.emptyMap()));
return Collections.emptyMap();
}
Map<String, Object> endpointBeans = context.getBeansWithAnnotation(ShenyuServerEndpoint.class);
registerEndpointsBeans(context, endpointBeans);
return context.getBeansWithAnnotation(ShenyuSpringWebSocketClient.class);
}
|
@Test
public void testGetBeans() {
Map<String, Object> beans = eventListener.getBeans(applicationContext);
assertNotNull(beans);
verify(publisher, never()).publishEvent(any());
}
|
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return PathAttributes.EMPTY;
}
try {
if(containerService.isContainer(file)) {
final Storage.Buckets.Get request = session.getClient().buckets().get(containerService.getContainer(file).getName());
if(containerService.getContainer(file).attributes().getCustom().containsKey(GoogleStorageAttributesFinderFeature.KEY_REQUESTER_PAYS)) {
request.setUserProject(session.getHost().getCredentials().getUsername());
}
return this.toAttributes(request.execute());
}
else {
final Storage.Objects.Get get = session.getClient().objects().get(
containerService.getContainer(file).getName(), containerService.getKey(file));
if(containerService.getContainer(file).attributes().getCustom().containsKey(GoogleStorageAttributesFinderFeature.KEY_REQUESTER_PAYS)) {
get.setUserProject(session.getHost().getCredentials().getUsername());
}
final VersioningConfiguration versioning = null != session.getFeature(Versioning.class) ? session.getFeature(Versioning.class).getConfiguration(
containerService.getContainer(file)
) : VersioningConfiguration.empty();
if(versioning.isEnabled()) {
if(StringUtils.isNotBlank(file.attributes().getVersionId())) {
get.setGeneration(Long.parseLong(file.attributes().getVersionId()));
}
}
final PathAttributes attributes;
try {
attributes = this.toAttributes(get.execute());
}
catch(IOException e) {
if(file.isDirectory()) {
final BackgroundException failure = new GoogleStorageExceptionMappingService().map("Failure to read attributes of {0}", e, file);
if(failure instanceof NotfoundException) {
if(log.isDebugEnabled()) {
log.debug(String.format("Search for common prefix %s", file));
}
// File may be marked as placeholder but no placeholder file exists. Check for common prefix returned.
try {
new GoogleStorageObjectListService(session).list(file, new CancellingListProgressListener(), String.valueOf(Path.DELIMITER), 1, VersioningConfiguration.empty());
}
catch(ListCanceledException l) {
// Found common prefix
return PathAttributes.EMPTY;
}
catch(NotfoundException n) {
throw e;
}
// Found common prefix
return PathAttributes.EMPTY;
}
}
throw e;
}
if(versioning.isEnabled()) {
// Determine if latest version
try {
// Duplicate if not latest version
final Storage.Objects.Get request = session.getClient().objects().get(
containerService.getContainer(file).getName(), containerService.getKey(file));
if(containerService.getContainer(file).attributes().getCustom().containsKey(GoogleStorageAttributesFinderFeature.KEY_REQUESTER_PAYS)) {
request.setUserProject(session.getHost().getCredentials().getUsername());
}
final String latest = this.toAttributes(request.execute()).getVersionId();
if(null != latest) {
attributes.setDuplicate(!latest.equals(attributes.getVersionId()));
}
}
catch(IOException e) {
// Noncurrent versions only appear in requests that explicitly call for object versions to be included
final BackgroundException failure = new GoogleStorageExceptionMappingService().map("Failure to read attributes of {0}", e, file);
if(failure instanceof NotfoundException) {
// The latest version is a delete marker
attributes.setDuplicate(true);
}
else {
throw failure;
}
}
}
return attributes;
}
}
catch(IOException e) {
throw new GoogleStorageExceptionMappingService().map("Failure to read attributes of {0}", e, file);
}
}
|
@Test(expected = NotfoundException.class)
public void testDeleted() throws Exception {
final Path container = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path test = new GoogleStorageTouchFeature(session).touch(new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
assertNotNull(test.attributes().getVersionId());
assertNotEquals(PathAttributes.EMPTY, new GoogleStorageAttributesFinderFeature(session).find(test));
new GoogleStorageDeleteFeature(session).delete(Collections.singletonList(test), new DisabledPasswordCallback(), new Delete.DisabledCallback());
try {
new GoogleStorageAttributesFinderFeature(session).find(test);
fail();
}
catch(NotfoundException e) {
throw e;
}
}
|
public static ScanReport fromJson(String json) {
return JsonUtil.parse(json, ScanReportParser::fromJson);
}
|
@Test
public void extraFields() {
ScanMetrics scanMetrics = ScanMetrics.of(new DefaultMetricsContext());
scanMetrics.totalPlanningDuration().record(10, TimeUnit.MINUTES);
scanMetrics.resultDataFiles().increment(5L);
scanMetrics.resultDeleteFiles().increment(5L);
scanMetrics.scannedDataManifests().increment(5L);
scanMetrics.skippedDataManifests().increment(5L);
scanMetrics.totalFileSizeInBytes().increment(1024L);
scanMetrics.totalDataManifests().increment(5L);
scanMetrics.totalFileSizeInBytes().increment(45L);
scanMetrics.totalDeleteFileSizeInBytes().increment(23L);
scanMetrics.skippedDataFiles().increment(3L);
scanMetrics.skippedDeleteFiles().increment(3L);
scanMetrics.scannedDeleteManifests().increment(3L);
scanMetrics.skippedDeleteManifests().increment(3L);
scanMetrics.indexedDeleteFiles().increment(10L);
scanMetrics.positionalDeleteFiles().increment(6L);
scanMetrics.equalityDeleteFiles().increment(4L);
String tableName = "roundTripTableName";
ScanReport scanReport =
ImmutableScanReport.builder()
.tableName(tableName)
.schemaId(4)
.addProjectedFieldIds(1, 2, 3)
.addProjectedFieldNames("c1", "c2", "c3")
.snapshotId(23L)
.filter(Expressions.alwaysTrue())
.scanMetrics(ScanMetricsResult.fromScanMetrics(scanMetrics))
.build();
assertThat(
ScanReportParser.fromJson(
"{\"table-name\":\"roundTripTableName\",\"snapshot-id\":23,"
+ "\"filter\":true,\"schema-id\": 4,\"projected-field-ids\": [ 1, 2, 3 ],\"projected-field-names\": [ \"c1\", \"c2\", \"c3\" ],"
+ "\"metrics\":{\"total-planning-duration\":{\"count\":1,\"time-unit\":\"nanoseconds\",\"total-duration\":600000000000},"
+ "\"result-data-files\":{\"unit\":\"count\",\"value\":5},"
+ "\"result-delete-files\":{\"unit\":\"count\",\"value\":5},"
+ "\"total-data-manifests\":{\"unit\":\"count\",\"value\":5},"
+ "\"total-delete-manifests\":{\"unit\":\"count\",\"value\":0},"
+ "\"scanned-data-manifests\":{\"unit\":\"count\",\"value\":5},"
+ "\"skipped-data-manifests\":{\"unit\":\"count\",\"value\":5},"
+ "\"total-file-size-in-bytes\":{\"unit\":\"bytes\",\"value\":1069},"
+ "\"total-delete-file-size-in-bytes\":{\"unit\":\"bytes\",\"value\":23},"
+ "\"skipped-data-files\":{\"unit\":\"count\",\"value\":3},"
+ "\"skipped-delete-files\":{\"unit\":\"count\",\"value\":3},"
+ "\"scanned-delete-manifests\":{\"unit\":\"count\",\"value\":3},"
+ "\"skipped-delete-manifests\":{\"unit\":\"count\",\"value\":3},"
+ "\"indexed-delete-files\":{\"unit\":\"count\",\"value\":10},"
+ "\"equality-delete-files\":{\"unit\":\"count\",\"value\":4},"
+ "\"positional-delete-files\":{\"unit\":\"count\",\"value\":6},"
+ "\"extra-metric\":\"extra-val\"},"
+ "\"extra\":\"extraVal\"}"))
.isEqualTo(scanReport);
}
|
@Override
public ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets,
ListOffsetsOptions options) {
AdminApiFuture.SimpleAdminApiFuture<TopicPartition, ListOffsetsResultInfo> future =
ListOffsetsHandler.newFuture(topicPartitionOffsets.keySet());
Map<TopicPartition, Long> offsetQueriesByPartition = topicPartitionOffsets.entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, e -> getOffsetFromSpec(e.getValue())));
ListOffsetsHandler handler = new ListOffsetsHandler(offsetQueriesByPartition, options, logContext);
invokeDriver(handler, future, options.timeoutMs);
return new ListOffsetsResult(future.all());
}
|
@Test
public void testListOffsetsPartialResponse() throws Exception {
Node node0 = new Node(0, "localhost", 8120);
Node node1 = new Node(1, "localhost", 8121);
List<Node> nodes = asList(node0, node1);
List<PartitionInfo> pInfos = new ArrayList<>();
pInfos.add(new PartitionInfo("foo", 0, node0, new Node[]{node0, node1}, new Node[]{node0, node1}));
pInfos.add(new PartitionInfo("foo", 1, node0, new Node[]{node0, node1}, new Node[]{node0, node1}));
final Cluster cluster =
new Cluster(
"mockClusterId",
nodes,
pInfos,
Collections.emptySet(),
Collections.emptySet(),
node0);
final TopicPartition tp0 = new TopicPartition("foo", 0);
final TopicPartition tp1 = new TopicPartition("foo", 1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -2L, 123L, 456);
ListOffsetsResponseData data = new ListOffsetsResponseData()
.setThrottleTimeMs(0)
.setTopics(singletonList(t0));
env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(data), node0);
Map<TopicPartition, OffsetSpec> partitions = new HashMap<>();
partitions.put(tp0, OffsetSpec.latest());
partitions.put(tp1, OffsetSpec.latest());
ListOffsetsResult result = env.adminClient().listOffsets(partitions);
assertNotNull(result.partitionResult(tp0).get());
TestUtils.assertFutureThrows(result.partitionResult(tp1), ApiException.class);
TestUtils.assertFutureThrows(result.all(), ApiException.class);
}
}
|
public Matrix getStateTransitionProbabilities() {
return a;
}
|
@Test
public void testGetStateTransitionProbabilities() {
System.out.println("getStateTransitionProbabilities");
HMM hmm = new HMM(pi, Matrix.of(a), Matrix.of(b));
Matrix result = hmm.getStateTransitionProbabilities();
for (int i = 0; i < a.length; i++) {
for (int j = 0; j < a[i].length; j++) {
assertEquals(a[i][j], result.get(i, j), 1E-7);
}
}
}
|
private void clear() {
garbageCollectionTypeForwardReference = null;
gcCauseForwardReference = GCCause.UNKNOWN_GCCAUSE;
fullGCTimeStamp = null;
scavengeTimeStamp = null;
youngMemoryPoolSummaryForwardReference = null;
tenuredForwardReference = null;
heapForwardReference = null;
scavengeDurationForwardReference = 0.0;
scavengeCPUSummaryForwardReference = null;
referenceGCForwardReference = null;
totalFreeSpaceForwardReference = 0;
maxChunkSizeForwardReference = 0;
numberOfBlocksForwardReference = 0;
averageBlockSizeForwardReference = 0;
treeHeightForwardReference = 0;
}
|
@Test
public void testThat2CMFDoNotHappen() {
String[][] lines = new String[][]{
{
"57721.729: [GC [1 CMS-initial-mark: 763361K(786432K)] 767767K(1022400K), 0.0022735 secs] [Times: user=0.00 sys=0.00, real=0.00 secs]",
"57721.732: [CMS-concurrent-mark-start]",
"57722.918: [GC 57722.918: [ParNew: 209792K->26176K(235968K), 0.0618431 secs] 973153K->802453K(1022400K), 0.0620347 secs] [Times: user=0.38 sys=0.00, real=0.06 secs]",
"57724.218: [Full GC 57724.218: [CMS2010-04-21T10:45:33.367+0100: 57724.319: [CMS-concurrent-mark: 2.519/2.587 secs] [Times: user=12.58 sys=0.09, real=2.59 secs]",
"(concurrent mode failure): 776277K->770654K(786432K), 6.0499857 secs] 1012245K->770654K(1022400K), [CMS Perm : 23211K->23211K(38736K)], 6.0501617 secs] [Times: user=6.09 sys=0.00, real=6.05 secs]"
},
{
"58272.354: [GC [1 CMS-initial-mark: 786431K(786432K)] 794666K(1022400K), 0.0088514 secs] [Times: user=0.01 sys=0.00, real=0.02 secs]",
"58272.363: [CMS-concurrent-mark-start]",
"58273.778: [Full GC 58273.778: [CMS2010-04-21T10:54:43.688+0100: 58274.663: [CMS-concurrent-mark: 2.299/2.300 secs] [Times: user=8.69 sys=0.11, real=2.30 secs]",
"(concurrent mode failure): 786431K->785452K(786432K), 6.5738696 secs] 1022399K->785452K(1022400K), [CMS Perm : 23211K->23211K(38736K)], 6.5740517 secs] [Times: user=7.44 sys=0.00, real=6.56 secs]"
}
};
Class[][] eventTypes = {
{InitialMark.class, ConcurrentMark.class, ParNew.class, ConcurrentModeFailure.class},
{InitialMark.class, ConcurrentMark.class, ConcurrentModeFailure.class}
};
for (int i = 0; i < lines.length; i++) {
List<JVMEvent> jvmEvents = feedParser(lines[i]);
for (int j = 0; j < eventTypes[i].length; j++) {
assertEquals(jvmEvents.get(j).getClass(), eventTypes[i][j]);
}
jvmEvents.clear();
}
}
|
@Override
public AppendFiles appendManifest(ManifestFile manifest) {
Preconditions.checkArgument(
!manifest.hasExistingFiles(), "Cannot append manifest with existing files");
Preconditions.checkArgument(
!manifest.hasDeletedFiles(), "Cannot append manifest with deleted files");
Preconditions.checkArgument(
manifest.snapshotId() == null || manifest.snapshotId() == -1,
"Snapshot id must be assigned during commit");
Preconditions.checkArgument(
manifest.sequenceNumber() == -1, "Sequence must be assigned during commit");
add(manifest);
return this;
}
|
@TestTemplate
public void testMergedAppendManifestCleanupWithSnapshotIdInheritance() throws IOException {
table.updateProperties().set(TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED, "true").commit();
assertThat(listManifestFiles()).isEmpty();
assertThat(readMetadata().lastSequenceNumber()).isEqualTo(0);
TableMetadata base = readMetadata();
assertThat(base.currentSnapshot()).isNull();
table.updateProperties().set(TableProperties.MANIFEST_MIN_MERGE_COUNT, "1").commit();
ManifestFile manifest1 = writeManifestWithName("manifest-file-1.avro", FILE_A, FILE_B);
Snapshot snap1 = commit(table, table.newAppend().appendManifest(manifest1), branch);
long commitId1 = snap1.snapshotId();
validateSnapshot(null, snap1, 1, FILE_A, FILE_B);
assertThat(snap1.allManifests(table.io())).hasSize(1);
validateManifest(
snap1.allManifests(table.io()).get(0),
dataSeqs(1L, 1L),
fileSeqs(1L, 1L),
ids(commitId1, commitId1),
files(FILE_A, FILE_B),
statuses(Status.ADDED, Status.ADDED));
assertThat(new File(manifest1.path())).exists();
ManifestFile manifest2 = writeManifestWithName("manifest-file-2.avro", FILE_C, FILE_D);
Snapshot snap2 = commit(table, table.newAppend().appendManifest(manifest2), branch);
long commitId2 = snap2.snapshotId();
V2Assert.assertEquals("Snapshot sequence number should be 2", 2, snap2.sequenceNumber());
V2Assert.assertEquals(
"Last sequence number should be 2", 2, readMetadata().lastSequenceNumber());
V1Assert.assertEquals(
"Table should end with last-sequence-number 0", 0, readMetadata().lastSequenceNumber());
assertThat(snap2.allManifests(table.io())).hasSize(1);
validateManifest(
latestSnapshot(table, branch).allManifests(table.io()).get(0),
dataSeqs(2L, 2L, 1L, 1L),
fileSeqs(2L, 2L, 1L, 1L),
ids(commitId2, commitId2, commitId1, commitId1),
files(FILE_C, FILE_D, FILE_A, FILE_B),
statuses(Status.ADDED, Status.ADDED, Status.EXISTING, Status.EXISTING));
assertThat(new File(manifest2.path())).doesNotExist();
}
|
@Nonnull
public static Number and(@Nonnull Number first, @Nonnull Number second) {
// Check for widest types first, go down the type list to narrower types until reaching int.
if (second instanceof Long || first instanceof Long) {
return first.longValue() & second.longValue();
} else {
return first.intValue() & second.intValue();
}
}
|
@Test
void testAnd() {
assertEquals(0b00100, NumberUtil.and(0b11100, 0b00111));
assertEquals(0b00100L, NumberUtil.and(0b11100, 0b00111L));
}
|
public static short translateBucketAcl(GSAccessControlList acl, String userId) {
short mode = (short) 0;
for (GrantAndPermission gp : acl.getGrantAndPermissions()) {
Permission perm = gp.getPermission();
GranteeInterface grantee = gp.getGrantee();
if (perm.equals(Permission.PERMISSION_READ)) {
if (isUserIdInGrantee(grantee, userId)) {
// If the bucket is readable by the user, add r and x to the owner mode.
mode |= (short) 0500;
}
} else if (perm.equals(Permission.PERMISSION_WRITE)) {
if (isUserIdInGrantee(grantee, userId)) {
// If the bucket is writable by the user, +w to the owner mode.
mode |= (short) 0200;
}
} else if (perm.equals(Permission.PERMISSION_FULL_CONTROL)) {
if (isUserIdInGrantee(grantee, userId)) {
// If the user has full control to the bucket, +rwx to the owner mode.
mode |= (short) 0700;
}
}
}
return mode;
}
|
@Test
public void translateEveryoneWritePermission() {
GroupGrantee allUsersGrantee = GroupGrantee.ALL_USERS;
mAcl.grantPermission(allUsersGrantee, Permission.PERMISSION_WRITE);
assertEquals((short) 0200, GCSUtils.translateBucketAcl(mAcl, ID));
assertEquals((short) 0200, GCSUtils.translateBucketAcl(mAcl, OTHER_ID));
}
|
public boolean matches(@Nullable final String hostOrIp) {
if (pattern == null) {
LOG.debug("No proxy host pattern defined");
return false;
}
if (isNullOrEmpty(hostOrIp)) {
LOG.debug("Host or IP address <{}> doesn't match <{}>", hostOrIp, noProxyHosts);
return false;
}
if (pattern.matcher(hostOrIp.toLowerCase(Locale.ROOT)).matches()) {
LOG.debug("Host or IP address <{}> matches <{}>", hostOrIp, noProxyHosts);
return true;
} else {
LOG.debug("Host or IP address <{}> doesn't match <{}>", hostOrIp, noProxyHosts);
return false;
}
}
|
@Test
public void matches() {
assertPattern(null, "127.0.0.1").isFalse();
assertPattern("", "127.0.0.1").isFalse();
assertPattern(",,", "127.0.0.1").isFalse();
assertPattern("127.0.0.1", "127.0.0.1").isTrue();
assertPattern("127.0.0.1", "127.0.0.2").isFalse();
assertPattern("127.0.0.*", "127.0.0.1").isTrue();
assertPattern("127.0.*", "127.0.0.1").isTrue();
assertPattern("127.0.*,10.0.0.*", "127.0.0.1").isTrue();
assertPattern("node0.graylog.example.com", "node0.graylog.example.com").isTrue();
assertPattern("node0.graylog.example.com", "node1.graylog.example.com").isFalse();
assertPattern("*.graylog.example.com", "node0.graylog.example.com").isTrue();
assertPattern("*.graylog.example.com", "node1.graylog.example.com").isTrue();
assertPattern("node0.graylog.example.*", "node0.GRAYLOG.example.com").isTrue();
assertPattern("node0.graylog.example.*,127.0.0.1,*.graylog.example.com", "node1.graylog.example.com").isTrue();
// Wildcard is only supported at beginning or end of the pattern
assertPattern("127.0.*.1", "127.0.0.1").isFalse();
assertPattern("node0.*.example.com", "node0.graylog.example.com").isFalse();
assertPattern("*.0.0.*", "127.0.0.1").isFalse();
}
|
@Override
public void start() {
this.executorService.scheduleAtFixedRate(this::tryBroadcastEvents, getInitialDelay(), getPeriod(), TimeUnit.SECONDS);
}
|
@Test
public void scheduler_should_be_resilient_to_failures() {
when(clientsRegistry.getClients()).thenThrow(new RuntimeException("I have a bad feelings about this"));
var underTest = new PushEventPollScheduler(executorService, clientsRegistry, db.getDbClient(), system2, config);
underTest.start();
assertThatCode(executorService::runCommand)
.doesNotThrowAnyException();
verify(clientsRegistry, times(0)).broadcastMessage(any(SonarLintPushEvent.class));
}
|
void reset() {
int count = 0;
for (int i = 0; i < table.length; i++) {
count += Long.bitCount(table[i] & ONE_MASK);
table[i] = (table[i] >>> 1) & RESET_MASK;
}
size = (size - (count >>> 2)) >>> 1;
}
|
@Test
public void reset() {
boolean reset = false;
var sketch = new FrequencySketch<Integer>();
sketch.ensureCapacity(64);
for (int i = 1; i < 20 * sketch.table.length; i++) {
sketch.increment(i);
if (sketch.size != i) {
reset = true;
break;
}
}
assertThat(reset).isTrue();
assertThat(sketch.size).isAtMost(sketch.sampleSize / 2);
}
|
public Encoding getEncoding() {
return encoding;
}
|
@Test
public void testNonAsciiEncoding() {
MetaStringEncoder encoder = new MetaStringEncoder('_', '$');
String testString = "こんにちは"; // Non-ASCII string
MetaString encodedMetaString = encoder.encode(testString);
assertEquals(encodedMetaString.getEncoding(), MetaString.Encoding.UTF_8);
}
|
public String getLegacyColumnName( DatabaseMetaData dbMetaData, ResultSetMetaData rsMetaData, int index ) throws KettleDatabaseException {
if ( dbMetaData == null ) {
throw new KettleDatabaseException( BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameNoDBMetaDataException" ) );
}
if ( rsMetaData == null ) {
throw new KettleDatabaseException( BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameNoRSMetaDataException" ) );
}
try {
return dbMetaData.getDriverMajorVersion() > 3 ? rsMetaData.getColumnLabel( index ) : rsMetaData.getColumnName( index );
} catch ( Exception e ) {
throw new KettleDatabaseException( String.format( "%s: %s", BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameException" ), e.getMessage() ), e );
}
}
|
@Test
public void testGetLegacyColumnNameDriverGreaterThanThreeFieldFirstName() throws Exception {
DatabaseMetaData databaseMetaData = mock( DatabaseMetaData.class );
doReturn( 5 ).when( databaseMetaData ).getDriverMajorVersion();
assertEquals( "FIRST_NAME", new MySQLDatabaseMeta().getLegacyColumnName( databaseMetaData, getResultSetMetaData(), 4 ) );
}
|
private DbProviderConfig() {
this(CONFIG_NAME);
}
|
@Test
public void testDbProviderConfig() {
}
|
public COM verifyCom(byte[] data, Class<? extends COM> type) throws RdaException {
final COM com = read(data, type);
if (!com.getDataGroups().containsAll(com.getRdaDataGroups())) {
throw new RdaException(
RdaError.COM, String.format("Not all data groups are available: %s", com.getDataGroups())
);
}
return com;
}
|
@Test
public void shouldThrowErrorIfBasicDataGroupsAreMissingFromDrivingLicence() throws Exception {
final CardVerifier verifier = verifier(null, null);
final byte[] com = readFixture("dl2/efCom");
com[1] -= 2;
com[8] -= 2;
Exception exception = assertThrows(RdaException.class, () -> {
verifier.verifyCom(com, DrivingLicenceCOM.class);
});
assertEquals(RdaError.COM, ((RdaException) exception).error);
assertEquals("Not all data groups are available: [1, 5, 6, 11, 12]", exception.getMessage());
}
|
@VisibleForTesting
Object evaluate(final GenericRow row) {
return term.getValue(new TermEvaluationContext(row));
}
|
@Test
public void shouldEvaluateComparisons_bytes() {
// Given:
final Expression expression1 = new ComparisonExpression(
ComparisonExpression.Type.GREATER_THAN,
BYTESCOL,
new BytesLiteral(ByteBuffer.wrap(new byte[] {123}))
);
final Expression expression2 = new ComparisonExpression(
ComparisonExpression.Type.LESS_THAN,
BYTESCOL,
new BytesLiteral(ByteBuffer.wrap(new byte[] {123}))
);
// When:
InterpretedExpression interpreter1 = interpreter(expression1);
InterpretedExpression interpreter2 = interpreter(expression2);
// Then:
assertThat(interpreter1.evaluate(make(14, ByteBuffer.wrap(new byte[] {123}))), is(false));
assertThat(interpreter2.evaluate(make(14, ByteBuffer.wrap(new byte[] {110}))), is(true));
}
|
@Override
public Optional<String> validate(String password) {
return password.matches(DIGIT_REGEX)
? Optional.empty()
: Optional.of(DIGIT_REASONING);
}
|
@Test
public void testValidateFailure() {
Optional<String> result = digitValidator.validate("Password");
Assert.assertTrue(result.isPresent());
Assert.assertEquals(result.get(), "must contain at least one digit between 0 and 9");
}
|
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
}
|
@Test
public void testFetcherMetricsTemplates() {
Map<String, String> clientTags = Collections.singletonMap("client-id", "clientA");
buildFetcher(new MetricConfig().tags(clientTags), OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(),
new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED);
// Fetch from topic to generate topic metrics
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, sendFetches());
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchRecords();
assertTrue(partitionRecords.containsKey(tp0));
// Verify that all metrics except metrics-count have registered templates
Set<MetricNameTemplate> allMetrics = new HashSet<>();
for (MetricName n : metrics.metrics().keySet()) {
String name = n.name().replaceAll(tp0.toString(), "{topic}-{partition}");
if (!n.group().equals("kafka-metrics-count"))
allMetrics.add(new MetricNameTemplate(name, n.group(), "", n.tags().keySet()));
}
TestUtils.checkEquals(allMetrics, new HashSet<>(metricsRegistry.getAllTemplates()), "metrics", "templates");
}
|
private static String getHost(String contMgrAddress) {
String host = contMgrAddress;
String[] hostport = host.split(":");
if (hostport.length == 2) {
host = hostport[0];
}
return host;
}
|
@Test
public void testBlackListedNodesWithSchedulingToThatNode() throws Exception {
LOG.info("Running testBlackListedNodesWithSchedulingToThatNode");
Configuration conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE, true);
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, 1);
conf.setInt(
MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT, -1);
MyResourceManager rm = new MyResourceManager(conf);
rm.start();
// Submit the application
RMApp app = MockRMAppSubmitter.submitWithMemory(1024, rm);
rm.drainEvents();
MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
amNodeManager.nodeHeartbeat(true);
rm.drainEvents();
ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
.getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
rm.drainEvents();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
appAttemptId, mockJob);
// add resources to scheduler
MockNM nodeManager1 = rm.registerNode("h1:1234", 10240);
MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
rm.drainEvents();
LOG.info("Requesting 1 Containers _1 on H1");
// create the container request
ContainerRequestEvent event1 =
ContainerRequestCreator.createRequest(jobId, 1,
Resource.newInstance(1024, 1),
new String[] {"h1"});
allocator.sendRequest(event1);
LOG.info("RM Heartbeat (to send the container requests)");
// this tells the scheduler about the requests
// as nodes are not added, no allocations
List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
rm.drainEvents();
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
LOG.info("h1 Heartbeat (To actually schedule the containers)");
// update resources in scheduler
nodeManager1.nodeHeartbeat(true); // Node heartbeat
rm.drainEvents();
LOG.info("RM Heartbeat (To process the scheduled containers)");
assigned = allocator.schedule();
rm.drainEvents();
assertBlacklistAdditionsAndRemovals(0, 0, rm);
Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
LOG.info("Failing container _1 on H1 (should blacklist the node)");
// Send events to blacklist nodes h1 and h2
ContainerFailedEvent f1 = createFailEvent(jobId, 1, "h1", false);
allocator.sendFailure(f1);
//At this stage, a request should be created for a fast fail map
//Create a FAST_FAIL request for a previously failed map.
ContainerRequestEvent event1f = createRequest(jobId, 1,
Resource.newInstance(1024, 1),
new String[] {"h1"}, true, false);
allocator.sendRequest(event1f);
//Update the Scheduler with the new requests.
assigned = allocator.schedule();
rm.drainEvents();
assertBlacklistAdditionsAndRemovals(1, 0, rm);
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
// send another request with different resource and priority
ContainerRequestEvent event3 =
ContainerRequestCreator.createRequest(jobId, 3,
Resource.newInstance(1024, 1),
new String[] {"h1", "h3"});
allocator.sendRequest(event3);
//Allocator is aware of prio:5 container, and prio:20 (h1+h3) container.
//RM is only aware of the prio:5 container
LOG.info("h1 Heartbeat (To actually schedule the containers)");
// update resources in scheduler
nodeManager1.nodeHeartbeat(true); // Node heartbeat
rm.drainEvents();
LOG.info("RM Heartbeat (To process the scheduled containers)");
assigned = allocator.schedule();
rm.drainEvents();
assertBlacklistAdditionsAndRemovals(0, 0, rm);
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
//RMContainerAllocator gets assigned a p:5 on a blacklisted node.
//Send a release for the p:5 container + another request.
LOG.info("RM Heartbeat (To process the re-scheduled containers)");
assigned = allocator.schedule();
rm.drainEvents();
assertBlacklistAdditionsAndRemovals(0, 0, rm);
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
//Hearbeat from H3 to schedule on this host.
LOG.info("h3 Heartbeat (To re-schedule the containers)");
nodeManager3.nodeHeartbeat(true); // Node heartbeat
rm.drainEvents();
LOG.info("RM Heartbeat (To process the re-scheduled containers for H3)");
assigned = allocator.schedule();
assertBlacklistAdditionsAndRemovals(0, 0, rm);
rm.drainEvents();
// For debugging
for (TaskAttemptContainerAssignedEvent assig : assigned) {
LOG.info(assig.getTaskAttemptID() +
" assgined to " + assig.getContainer().getId() +
" with priority " + assig.getContainer().getPriority());
}
Assert.assertEquals("No of assignments must be 2", 2, assigned.size());
// validate that all containers are assigned to h3
for (TaskAttemptContainerAssignedEvent assig : assigned) {
Assert.assertEquals("Assigned container " + assig.getContainer().getId()
+ " host not correct", "h3", assig.getContainer().getNodeId().getHost());
}
}
|
@Override
public Iterable<K> get() {
return StateFetchingIterators.readAllAndDecodeStartingFrom(
cache, beamFnStateClient, keysRequest, keyCoder);
}
|
@Test
public void testGet() throws Exception {
FakeBeamFnStateClient fakeBeamFnStateClient =
new FakeBeamFnStateClient(
ImmutableMap.of(
keysStateKey(), KV.of(ByteArrayCoder.of(), asList(A, B)),
key(A), KV.of(StringUtf8Coder.of(), asList("A1", "A2", "A3")),
key(B), KV.of(StringUtf8Coder.of(), asList("B1", "B2"))));
MultimapSideInput<byte[], String> multimapSideInput =
new MultimapSideInput<>(
Caches.noop(),
fakeBeamFnStateClient,
"instructionId",
keysStateKey(),
ByteArrayCoder.of(),
StringUtf8Coder.of(),
true);
assertArrayEquals(
new String[] {"A1", "A2", "A3"}, Iterables.toArray(multimapSideInput.get(A), String.class));
assertArrayEquals(
new String[] {"B1", "B2"}, Iterables.toArray(multimapSideInput.get(B), String.class));
assertArrayEquals(
new String[] {}, Iterables.toArray(multimapSideInput.get(UNKNOWN), String.class));
assertArrayEquals(
new byte[][] {A, B}, Iterables.toArray(multimapSideInput.get(), byte[].class));
}
|
public static void unloadAll() {
InnerEnhancedServiceLoader.removeAllServiceLoader();
}
|
@Test
public void testUnloadAll() throws NoSuchFieldException, IllegalAccessException {
Hello hello = EnhancedServiceLoader.load(Hello.class);
assertThat(hello).isInstanceOf(Hello.class);
Hello2 hello2 = EnhancedServiceLoader.load(Hello2.class, "JapaneseHello", new Object[]{"msg"});
assertThat(hello2).isInstanceOf(Hello2.class);
EnhancedServiceLoader.unloadAll();
Class<EnhancedServiceLoader> clazz = EnhancedServiceLoader.class;
Field serviceLoadersField = clazz.getDeclaredField("SERVICE_LOADERS");
serviceLoadersField.setAccessible(true);
Map<Class<?>, Object> serviceLoaders = (Map<Class<?>, Object>)serviceLoadersField.get(null);
assertThat(CollectionUtils.isEmpty(serviceLoaders)).isTrue();
}
|
StringBuilder codeForScalarFieldExtraction(Descriptors.FieldDescriptor desc, String fieldNameInCode, int indent) {
StringBuilder code = new StringBuilder();
if (desc.isRepeated()) {
code.append(addIndent(String.format("if (msg.%s() > 0) {", getCountMethodName(fieldNameInCode)), indent));
code.append(completeLine(
putFieldInMsgMapCode(desc.getName(),
getProtoFieldListMethodName(fieldNameInCode) + "().toArray", null, null),
++indent));
code.append(addIndent("}", --indent));
} else if (desc.hasPresence()) {
code.append(addIndent(String.format("if (msg.%s()) {", hasPresenceMethodName(fieldNameInCode)), indent));
code.append(completeLine(
putFieldInMsgMapCode(desc.getName(), getProtoFieldMethodName(fieldNameInCode), null, null),
++indent));
code.append(addIndent("}", --indent));
} else {
code.append(completeLine(
putFieldInMsgMapCode(desc.getName(), getProtoFieldMethodName(fieldNameInCode), null, null), indent));
}
return code;
}
|
@Test
public void testCodeForScalarFieldExtraction() {
MessageCodeGen messageCodeGen = new MessageCodeGen();
// Simple field
Descriptors.FieldDescriptor fd = ComplexTypes.TestMessage.getDescriptor().findFieldByName(STRING_FIELD);
String fieldNameInCode = ProtobufInternalUtils.underScoreToCamelCase(fd.getName(), true);
assertEquals(messageCodeGen.codeForScalarFieldExtraction(fd, fieldNameInCode, 1).toString(),
" msgMap.put(\"string_field\", msg.getStringField());\n");
// Nullable field Or Has Presence
fd = ComplexTypes.TestMessage.getDescriptor().findFieldByName(NULLABLE_STRING_FIELD);
fieldNameInCode = ProtobufInternalUtils.underScoreToCamelCase(fd.getName(), true);
assertEquals(messageCodeGen.codeForScalarFieldExtraction(fd, fieldNameInCode, 1).toString(),
" if (msg.hasNullableStringField()) {\n"
+ " msgMap.put(\"nullable_string_field\", msg.getNullableStringField());\n" + " }\n");
// Repeated field
fd = ComplexTypes.TestMessage.getDescriptor().findFieldByName(REPEATED_STRINGS);
fieldNameInCode = ProtobufInternalUtils.underScoreToCamelCase(fd.getName(), true);
assertEquals(messageCodeGen.codeForScalarFieldExtraction(fd, fieldNameInCode, 1).toString(),
" if (msg.getRepeatedStringsCount() > 0) {\n"
+ " msgMap.put(\"repeated_strings\", msg.getRepeatedStringsList().toArray());\n" + " }\n");
}
|
public Future<KafkaVersionChange> reconcile() {
return getVersionFromController()
.compose(i -> getPods())
.compose(this::detectToAndFromVersions)
.compose(i -> prepareVersionChange());
}
|
@Test
public void testDowngradeWithAllVersions(VertxTestContext context) {
String oldKafkaVersion = KafkaVersionTestUtils.LATEST_KAFKA_VERSION;
String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION;
String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION;
String kafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION;
String interBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION;
String logMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION;
VersionChangeCreator vcc = mockVersionChangeCreator(
mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion),
mockNewCluster(
null,
mockSps(oldKafkaVersion),
mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion)
)
);
Checkpoint async = context.checkpoint();
vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> {
assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion)));
assertThat(c.to(), is(VERSIONS.version(kafkaVersion)));
assertThat(c.interBrokerProtocolVersion(), nullValue());
assertThat(c.logMessageFormatVersion(), nullValue());
async.flag();
})));
}
|
@Override
public void abort(long checkpointId, Throwable cause, boolean cleanup) {
LOG.debug("{} aborting, checkpoint {}", taskName, checkpointId);
enqueue(
ChannelStateWriteRequest.abort(jobVertexID, subtaskIndex, checkpointId, cause),
true); // abort already started
enqueue(
ChannelStateWriteRequest.abort(jobVertexID, subtaskIndex, checkpointId, cause),
false); // abort enqueued but not started
if (cleanup) {
results.remove(checkpointId);
}
}
|
@Test
void testAbort() throws Exception {
NetworkBuffer buffer = getBuffer();
executeCallbackWithSyncWorker(
(writer, worker) -> {
callStart(writer);
ChannelStateWriteResult result = writer.getAndRemoveWriteResult(CHECKPOINT_ID);
callAddInputData(writer, buffer);
callAbort(writer);
worker.processAllRequests();
assertThat(result.isDone()).isTrue();
assertThat(buffer.isRecycled()).isTrue();
});
}
|
@Override
public LispLcafAddress mapMappingAddress(ExtensionMappingAddress mappingAddress) {
ExtensionMappingAddressType type = mappingAddress.type();
if (type.equals(LIST_ADDRESS.type())) {
LispListAddress listAddress = (LispListAddress) mappingAddress;
LispAfiAddress ipv4 = mapping2afi(listAddress.getIpv4());
LispAfiAddress ipv6 = mapping2afi(listAddress.getIpv6());
if (ipv4 != null && ipv6 != null) {
return new LispListLcafAddress(ImmutableList.of(ipv4, ipv6));
} else {
return new LispListLcafAddress(ImmutableList.of());
}
}
if (type.equals(SEGMENT_ADDRESS.type())) {
LispSegmentAddress segmentAddress = (LispSegmentAddress) mappingAddress;
return new LispSegmentLcafAddress.SegmentAddressBuilder()
.withInstanceId(segmentAddress.getInstanceId())
.withAddress(getAfiAddress(segmentAddress.getAddress()))
.build();
}
if (type.equals(AS_ADDRESS.type())) {
LispAsAddress asAddress = (LispAsAddress) mappingAddress;
return new LispAsLcafAddress.AsAddressBuilder()
.withAsNumber(asAddress.getAsNumber())
.withAddress(getAfiAddress(asAddress.getAddress()))
.build();
}
if (type.equals(APPLICATION_DATA_ADDRESS.type())) {
LispAppDataAddress appDataAddress = (LispAppDataAddress) mappingAddress;
return new LispAppDataLcafAddress.AppDataAddressBuilder()
.withProtocol(appDataAddress.getProtocol())
.withIpTos(appDataAddress.getIpTos())
.withLocalPortLow(appDataAddress.getLocalPortLow())
.withLocalPortHigh(appDataAddress.getLocalPortHigh())
.withRemotePortLow(appDataAddress.getRemotePortLow())
.withRemotePortHigh(appDataAddress.getRemotePortHigh())
.withAddress(getAfiAddress(appDataAddress.getAddress()))
.build();
}
if (type.equals(GEO_COORDINATE_ADDRESS.type())) {
LispGcAddress gcAddress = (LispGcAddress) mappingAddress;
return new LispGeoCoordinateLcafAddress.GeoCoordinateAddressBuilder()
.withIsNorth(gcAddress.isNorth())
.withLatitudeDegree(gcAddress.getLatitudeDegree())
.withLatitudeMinute(gcAddress.getLatitudeMinute())
.withLatitudeSecond(gcAddress.getLatitudeSecond())
.withIsEast(gcAddress.isEast())
.withLongitudeDegree(gcAddress.getLongitudeDegree())
.withLongitudeMinute(gcAddress.getLongitudeMinute())
.withLongitudeSecond(gcAddress.getLongitudeSecond())
.withAltitude(gcAddress.getAltitude())
.withAddress(getAfiAddress(gcAddress.getAddress()))
.build();
}
if (type.equals(NAT_ADDRESS.type())) {
LispNatAddress natAddress = (LispNatAddress) mappingAddress;
List<LispAfiAddress> aas = Lists.newArrayList();
natAddress.getRtrRlocAddresses()
.forEach(rtr -> aas.add(getAfiAddress(rtr)));
return new LispNatLcafAddress.NatAddressBuilder()
.withMsUdpPortNumber(natAddress.getMsUdpPortNumber())
.withEtrUdpPortNumber(natAddress.getEtrUdpPortNumber())
.withMsRlocAddress(getAfiAddress(natAddress.getMsRlocAddress()))
.withGlobalEtrRlocAddress(
getAfiAddress(natAddress.getGlobalEtrRlocAddress()))
.withPrivateEtrRlocAddress(
getAfiAddress(natAddress.getPrivateEtrRlocAddress()))
.withRtrRlocAddresses(aas)
.build();
}
if (type.equals(NONCE_ADDRESS.type())) {
LispNonceAddress nonceAddress = (LispNonceAddress) mappingAddress;
return new LispNonceLcafAddress.NonceAddressBuilder()
.withNonce(nonceAddress.getNonce())
.withAddress(getAfiAddress(nonceAddress.getAddress()))
.build();
}
if (type.equals(MULTICAST_ADDRESS.type())) {
LispMulticastAddress multicastAddress = (LispMulticastAddress) mappingAddress;
return new LispMulticastLcafAddress.MulticastAddressBuilder()
.withInstanceId(multicastAddress.getInstanceId())
.withSrcAddress(getAfiAddress(multicastAddress.getSrcAddress()))
.withSrcMaskLength(multicastAddress.getSrcMaskLength())
.withGrpAddress(getAfiAddress(multicastAddress.getGrpAddress()))
.withGrpMaskLength(multicastAddress.getGrpMaskLength())
.build();
}
if (type.equals(TRAFFIC_ENGINEERING_ADDRESS.type())) {
LispTeAddress teAddress = (LispTeAddress) mappingAddress;
List<LispTeRecord> records = Lists.newArrayList();
teAddress.getTeRecords().forEach(record -> {
LispTeRecord teRecord =
new LispTeRecord.TeRecordBuilder()
.withIsLookup(record.isLookup())
.withIsRlocProbe(record.isRlocProbe())
.withIsStrict(record.isStrict())
.withRtrRlocAddress(getAfiAddress(
record.getAddress()))
.build();
records.add(teRecord);
});
return new LispTeLcafAddress.TeAddressBuilder()
.withTeRecords(records)
.build();
}
if (type.equals(SOURCE_DEST_ADDRESS.type())) {
LispSrcDstAddress srcDstAddress = (LispSrcDstAddress) mappingAddress;
return new LispSourceDestLcafAddress.SourceDestAddressBuilder()
.withSrcPrefix(getAfiAddress(srcDstAddress.getSrcPrefix()))
.withSrcMaskLength(srcDstAddress.getSrcMaskLength())
.withDstPrefix(getAfiAddress(srcDstAddress.getDstPrefix()))
.withDstMaskLength(srcDstAddress.getDstMaskLength())
.build();
}
log.error("Unsupported extension mapping address type {}", mappingAddress.type());
return null;
}
|
@Test
public void testMapMappingAddress() {
new EqualsTester()
.addEqualityGroup(listLcafAddress, interpreter.mapMappingAddress(listExtAddress))
.addEqualityGroup(segmentLcafAddress, interpreter.mapMappingAddress(segmentExtAddress))
.addEqualityGroup(asLcafAddress, interpreter.mapMappingAddress(asExtAddress))
.addEqualityGroup(appDataLcafAddress, interpreter.mapMappingAddress(appDataExtAddress))
.addEqualityGroup(gcLcafAddress, interpreter.mapMappingAddress(gcExtAddress))
.addEqualityGroup(natLcafAddress, interpreter.mapMappingAddress(natExtAddress))
.addEqualityGroup(nonceLcafAddress, interpreter.mapMappingAddress(nonceExtAddress))
.addEqualityGroup(multicastLcafAddress, interpreter.mapMappingAddress(multicastExtAddress))
.addEqualityGroup(teLcafAddress, interpreter.mapMappingAddress(teExtAddress))
.addEqualityGroup(srcDstLcafAddress, interpreter.mapMappingAddress(srcDstExtAddress))
.testEquals();
}
|
public JWTValidator validateAlgorithm() throws ValidateException {
return validateAlgorithm(null);
}
|
@Test
public void validateAlgorithmTest() {
final String token = JWT.create()
.setNotBefore(DateUtil.date())
.setKey("123456".getBytes())
.sign();
// 验证算法
JWTValidator.of(token).validateAlgorithm(JWTSignerUtil.hs256("123456".getBytes()));
}
|
@Override
public void handleSavepointCreation(
CompletedCheckpoint completedSavepoint, Throwable throwable) {
if (throwable != null) {
checkArgument(
completedSavepoint == null,
"No savepoint should be provided if a throwable is passed.");
handleSavepointCreationFailure(throwable);
} else {
handleSavepointCreationSuccess(checkNotNull(completedSavepoint));
}
}
|
@Test
void testSavepointCreationParameterBothNull() {
assertThatThrownBy(
() ->
createTestInstanceFailingOnGlobalFailOver()
.handleSavepointCreation(null, null))
.isInstanceOf(NullPointerException.class);
}
|
public static <T> T convert(Class<T> type, Object value) throws ConvertException {
return convert((Type) type, value);
}
|
@Test
public void toHashtableTest() {
final Map<String, String> map = MapUtil.newHashMap();
map.put("a1", "v1");
map.put("a2", "v2");
map.put("a3", "v3");
@SuppressWarnings("unchecked") final Hashtable<String, String> hashtable = Convert.convert(Hashtable.class, map);
assertEquals("v1", hashtable.get("a1"));
assertEquals("v2", hashtable.get("a2"));
assertEquals("v3", hashtable.get("a3"));
}
|
public PGReplicationStream createReplicationStream(final Connection connection, final String slotName, final BaseLogSequenceNumber startPosition) throws SQLException {
return connection.unwrap(PGConnection.class).getReplicationAPI()
.replicationStream()
.logical()
.withStartPosition((LogSequenceNumber) startPosition.get())
.withSlotName(slotName)
.withSlotOption("include-xids", true)
.withSlotOption("skip-empty-xacts", true)
.start();
}
|
@Test
void assertCreateReplicationStreamFailure() throws SQLException {
when(connection.unwrap(PGConnection.class)).thenThrow(new SQLException(""));
assertThrows(SQLException.class, () -> logicalReplication.createReplicationStream(connection, "", new PostgreSQLLogSequenceNumber(LogSequenceNumber.valueOf(100L))));
}
|
@Override
public Runner get() {
return runners.get();
}
|
@Test
void should_create_a_runner_per_thread() throws InterruptedException {
final Runner[] runners = new Runner[2];
Thread thread0 = new Thread(() -> runners[0] = runnerSupplier.get());
Thread thread1 = new Thread(() -> runners[1] = runnerSupplier.get());
thread0.start();
thread1.start();
thread0.join();
thread1.join();
assertAll(
() -> assertThat(runners[0], is(not(equalTo(runners[1])))),
() -> assertThat(runners[1], is(not(equalTo(runners[0])))));
}
|
public boolean compatibleVersion(String acceptableVersionRange, String actualVersion) {
V pluginVersion = parseVersion(actualVersion);
// Treat a single version "1.4" as a left bound, equivalent to "[1.4,)"
if (acceptableVersionRange.matches(VERSION_REGEX)) {
return ge(pluginVersion, parseVersion(acceptableVersionRange));
}
// Otherwise ensure it is a version range with bounds
Matcher matcher = INTERVAL_PATTERN.matcher(acceptableVersionRange);
Preconditions.checkArgument(matcher.matches(), "invalid version range");
String leftBound = matcher.group("left");
String rightBound = matcher.group("right");
Preconditions.checkArgument(
leftBound != null || rightBound != null, "left and right bounds cannot both be empty");
BiPredicate<V, V> leftComparator =
acceptableVersionRange.startsWith("[") ? VersionChecker::ge : VersionChecker::gt;
BiPredicate<V, V> rightComparator =
acceptableVersionRange.endsWith("]") ? VersionChecker::le : VersionChecker::lt;
if (leftBound != null && !leftComparator.test(pluginVersion, parseVersion(leftBound))) {
return false;
}
if (rightBound != null && !rightComparator.test(pluginVersion, parseVersion(rightBound))) {
return false;
}
return true;
}
|
@Test
public void testRange_rightOpen_exact() {
Assert.assertFalse(checker.compatibleVersion("[2.3,4.3)", "4.3"));
Assert.assertFalse(checker.compatibleVersion("(2.3,4.3)", "4.3"));
Assert.assertFalse(checker.compatibleVersion("[,4.3)", "4.3"));
Assert.assertFalse(checker.compatibleVersion("(,4.3)", "4.3"));
}
|
public static String encode(Event event) {
String methodSignature = buildMethodSignature(event.getName(), event.getParameters());
return buildEventSignature(methodSignature);
}
|
@Test
public void testEncode() {
Event event =
new Event(
"Notify",
Arrays.<TypeReference<?>>asList(
new TypeReference<Uint256>() {}, new TypeReference<Uint256>() {}));
assertEquals(
EventEncoder.encode(event),
"0x71e71a8458267085d5ab16980fd5f114d2d37f232479c245d523ce8d23ca40ed");
}
|
List<Set<UiNode>> splitByLayer(List<String> layerTags,
Set<? extends UiNode> nodes) {
final int nLayers = layerTags.size();
if (!layerTags.get(nLayers - 1).equals(LAYER_DEFAULT)) {
throw new IllegalArgumentException(E_DEF_NOT_LAST);
}
List<Set<UiNode>> splitList = new ArrayList<>(layerTags.size());
Map<String, Set<UiNode>> byLayer = new HashMap<>(layerTags.size());
for (String tag : layerTags) {
Set<UiNode> set = new HashSet<>();
byLayer.put(tag, set);
splitList.add(set);
}
for (UiNode n : nodes) {
String which = n.layer();
if (!layerTags.contains(which)) {
which = LAYER_DEFAULT;
}
byLayer.get(which).add(n);
}
return splitList;
}
|
@Test
public void threeLayers() {
title("threeLayers()");
List<Set<UiNode>> result = t2.splitByLayer(ALL_TAGS, NODES);
print(result);
assertEquals("wrong split size", 3, result.size());
Set<UiNode> opt = result.get(0);
Set<UiNode> pkt = result.get(1);
Set<UiNode> def = result.get(2);
assertEquals("opt bad size", 2, opt.size());
assertEquals("missing node A", true, opt.contains(NODE_A));
assertEquals("missing node C", true, opt.contains(NODE_C));
assertEquals("pkt bad size", 2, pkt.size());
assertEquals("missing node B", true, pkt.contains(NODE_B));
assertEquals("missing node E", true, pkt.contains(NODE_E));
assertEquals("def bad size", 2, def.size());
assertEquals("missing node D", true, def.contains(NODE_D));
assertEquals("missing node F", true, def.contains(NODE_F));
}
|
@Override
public String getName() {
return "CircleCI";
}
|
@Test
public void getName() {
assertThat(underTest.getName()).isEqualTo("CircleCI");
}
|
@Override
protected ReadableByteChannel open(LocalResourceId resourceId) throws IOException {
LOG.debug("opening file {}", resourceId);
@SuppressWarnings("resource") // The caller is responsible for closing the channel.
FileInputStream inputStream = new FileInputStream(resourceId.getPath().toFile());
// Use this method for creating the channel (rather than new FileChannel) so that we get
// regular FileNotFoundException. Closing the underyling channel will close the inputStream.
return inputStream.getChannel();
}
|
@Test
public void testReadWithExistingFile() throws Exception {
String expected = "my test string";
File existingFile = temporaryFolder.newFile();
Files.asCharSink(existingFile, StandardCharsets.UTF_8).write(expected);
String data;
try (Reader reader =
Channels.newReader(
localFileSystem.open(
LocalResourceId.fromPath(existingFile.toPath(), false /* isDirectory */)),
StandardCharsets.UTF_8.name())) {
data = new LineReader(reader).readLine();
}
assertEquals(expected, data);
}
|
public static IntervalSet intersect(List<IntervalSet> intervalSets) {
if (intervalSets.isEmpty()) {
return IntervalSet.NEVER;
}
if (intervalSets.size() == 1) {
return intervalSets.get(0);
}
// at least 2 lists of intervals
IntervalSet intersection = intervalSets.get(0);
// scan entire list from the second interval
for (int i = 1; i < intervalSets.size(); i++) {
intersection = intersection.intersect(intervalSets.get(i));
}
return intersection;
}
|
@Test
public void intersect() {
IntervalSet s1;
IntervalSet s2;
Set<Interval> s;
s1 = new IntervalSet(Arrays.asList(Interval.between(1, 3), Interval.between(11, 13)));
s = Sets.newHashSet(
IntervalUtils.intersect(Arrays.asList(s1, IntervalSet.NEVER)).getIntervals());
Assert.assertEquals(1, s.size());
Assert.assertTrue(s.contains(Interval.NEVER));
s1 = new IntervalSet(Arrays.asList(Interval.between(1, 3), Interval.between(11, 13)));
s2 = new IntervalSet(Arrays.asList(Interval.between(22, 24), Interval.between(32, 34)));
s = Sets.newHashSet(IntervalUtils.intersect(Arrays.asList(s1, s2)).getIntervals());
Assert.assertEquals(1, s.size());
Assert.assertTrue(s.contains(Interval.NEVER));
s1 = new IntervalSet(Arrays.asList(Interval.between(1, 3), Interval.between(11, 13)));
s2 = new IntervalSet(Arrays.asList(Interval.between(2, 4), Interval.between(12, 14)));
s = Sets.newHashSet(IntervalUtils.intersect(Arrays.asList(s1, s2)).getIntervals());
Assert.assertEquals(2, s.size());
Assert
.assertTrue(s.containsAll(Arrays.asList(Interval.between(2, 3), Interval.between(12, 13))));
s1 = new IntervalSet(Arrays.asList(Interval.between(11, 13), Interval.between(1, 3)));
s2 = new IntervalSet(Arrays.asList(Interval.between(12, 14), Interval.between(2, 4)));
s = Sets.newHashSet(IntervalUtils.intersect(Arrays.asList(s1, s2)).getIntervals());
Assert.assertEquals(2, s.size());
Assert
.assertTrue(s.containsAll(Arrays.asList(Interval.between(2, 3), Interval.between(12, 13))));
s1 = new IntervalSet(
Arrays.asList(Interval.between(11, 13), Interval.between(1, 3), Interval.between(21, 23)));
s2 = new IntervalSet(
Arrays.asList(Interval.between(12, 14), Interval.between(2, 4), Interval.between(31, 33)));
s = Sets.newHashSet(IntervalUtils.intersect(Arrays.asList(s1, s2)).getIntervals());
Assert.assertEquals(2, s.size());
Assert
.assertTrue(s.containsAll(Arrays.asList(Interval.between(2, 3), Interval.between(12, 13))));
s1 = new IntervalSet(Arrays.asList(Interval.after(8), Interval.after(18)));
s2 = new IntervalSet(Arrays.asList(Interval.before(10), Interval.before(2)));
s = Sets.newHashSet(IntervalUtils.intersect(Arrays.asList(s1, s2)).getIntervals());
Assert.assertEquals(1, s.size());
Assert.assertTrue(s.containsAll(Arrays.asList(Interval.between(8, 10))));
}
|
public static ScmInfo create(ScannerReport.Changesets changesets) {
requireNonNull(changesets);
Changeset[] lineChangesets = new Changeset[changesets.getChangesetIndexByLineCount()];
LineIndexToChangeset lineIndexToChangeset = new LineIndexToChangeset(changesets);
for (int i = 0; i < changesets.getChangesetIndexByLineCount(); i++) {
lineChangesets[i] = lineIndexToChangeset.apply(i);
}
return new ScmInfoImpl(lineChangesets);
}
|
@Test
public void return_changeset_for_a_given_line() {
ScmInfo scmInfo = ReportScmInfo.create(ScannerReport.Changesets.newBuilder()
.setComponentRef(FILE_REF)
.addChangeset(ScannerReport.Changesets.Changeset.newBuilder()
.setAuthor("john")
.setDate(123456789L)
.setRevision("rev-1")
.build())
.addChangeset(ScannerReport.Changesets.Changeset.newBuilder()
.setAuthor("henry")
.setDate(1234567810L)
.setRevision("rev-2")
.build())
.addChangesetIndexByLine(0)
.addChangesetIndexByLine(1)
.addChangesetIndexByLine(1)
.addChangesetIndexByLine(0)
.build());
assertThat(scmInfo.getAllChangesets()).hasSize(4);
Changeset changeset = scmInfo.getChangesetForLine(4);
assertThat(changeset.getAuthor()).isEqualTo("john");
assertThat(changeset.getDate()).isEqualTo(123456789L);
assertThat(changeset.getRevision()).isEqualTo("rev-1");
}
|
public static void trim(String[] strs) {
if (null == strs) {
return;
}
String str;
for (int i = 0; i < strs.length; i++) {
str = strs[i];
if (null != str) {
strs[i] = trim(str);
}
}
}
|
@Test
public void trimNewLineTest() {
String str = "\r\naaa";
assertEquals("aaa", StrUtil.trim(str));
str = "\raaa";
assertEquals("aaa", StrUtil.trim(str));
str = "\naaa";
assertEquals("aaa", StrUtil.trim(str));
str = "\r\n\r\naaa";
assertEquals("aaa", StrUtil.trim(str));
}
|
public static boolean isValidRootUrl(String url) {
UrlValidator validator = new CustomUrlValidator();
return validator.isValid(url);
}
|
@Test
@Issue("JENKINS-31661")
public void regularCases() {
assertTrue(UrlHelper.isValidRootUrl("http://www.google.com"));
// trailing slash is optional
assertTrue(UrlHelper.isValidRootUrl("http://www.google.com/"));
// path is allowed
assertTrue(UrlHelper.isValidRootUrl("http://www.google.com/jenkins"));
// port is allowed to be precised
assertTrue(UrlHelper.isValidRootUrl("http://www.google.com:8080"));
assertTrue(UrlHelper.isValidRootUrl("http://www.google.com:8080/jenkins"));
// http or https are only valid schemes
assertTrue(UrlHelper.isValidRootUrl("https://www.google.com:8080/jenkins"));
// also with their UPPERCASE equivalent
assertTrue(UrlHelper.isValidRootUrl("HTTP://www.google.com:8080/jenkins"));
assertTrue(UrlHelper.isValidRootUrl("HTTPS://www.google.com:8080/jenkins"));
assertTrue(UrlHelper.isValidRootUrl("http://localhost:8080/jenkins"));
assertTrue(UrlHelper.isValidRootUrl("http://localhost:8080/jenkins/"));
assertTrue(UrlHelper.isValidRootUrl("http://my_server:8080/jenkins"));
assertTrue(UrlHelper.isValidRootUrl("http://MY_SERVER_IN_PRIVATE_NETWORK:8080/jenkins"));
assertTrue(UrlHelper.isValidRootUrl("http://jenkins"));
assertTrue(UrlHelper.isValidRootUrl("http://j"));
assertTrue(UrlHelper.isValidRootUrl("http://j.io"));
assertFalse(UrlHelper.isValidRootUrl("http://jenkins::"));
assertFalse(UrlHelper.isValidRootUrl("http://jenkins::80"));
// scheme must be correctly spelled (missing :)
assertFalse(UrlHelper.isValidRootUrl("http//jenkins"));
// scheme is mandatory
assertFalse(UrlHelper.isValidRootUrl("com."));
// spaces are forbidden
assertFalse(UrlHelper.isValidRootUrl("http:// "));
// examples not passing with a simple `new URL(url).toURI()` check
assertFalse(UrlHelper.isValidRootUrl("http://jenkins//context"));
assertFalse(UrlHelper.isValidRootUrl("http:/jenkins"));
assertFalse(UrlHelper.isValidRootUrl("http://.com"));
assertFalse(UrlHelper.isValidRootUrl("http:/:"));
assertFalse(UrlHelper.isValidRootUrl("http://..."));
assertFalse(UrlHelper.isValidRootUrl("http://::::@example.com"));
assertFalse(UrlHelper.isValidRootUrl("ftp://jenkins"));
}
|
public static UserOperatorConfig buildFromMap(Map<String, String> map) {
Map<String, String> envMap = new HashMap<>(map);
envMap.keySet().retainAll(UserOperatorConfig.keyNames());
Map<String, Object> generatedMap = ConfigParameter.define(envMap, CONFIG_VALUES);
return new UserOperatorConfig(generatedMap);
}
|
@Test
public void testInvalidOperationTimeout() {
Map<String, String> envVars = new HashMap<>(UserOperatorConfigTest.ENV_VARS);
envVars.put(UserOperatorConfig.OPERATION_TIMEOUT_MS.key(), "abcdefg");
assertThrows(InvalidConfigurationException.class, () -> UserOperatorConfig.buildFromMap(envVars));
}
|
public static String substVars(String val, PropertyContainer pc1) throws ScanException {
return substVars(val, pc1, null);
}
|
@Test
public void testSubstVarsVariableNotClosed() throws ScanException {
String noSubst = "testing if ${v1 works";
try {
@SuppressWarnings("unused")
String result = OptionHelper.substVars(noSubst, context);
fail();
} catch (IllegalArgumentException e) {
// ok
}
}
|
@Override
public String getSessionId() {
return sessionID;
}
|
@Test
public void testGetRequest() {
log.info("Starting get async");
assertNotNull("Incorrect sessionId", session1.getSessionId());
try {
assertTrue("NETCONF get running command failed. ",
GET_REPLY_PATTERN.matcher(session1.get(SAMPLE_REQUEST, null)).matches());
} catch (NetconfException e) {
e.printStackTrace();
fail("NETCONF get test failed: " + e.getMessage());
}
log.info("Finishing get async");
}
|
@Override
public TaskAttemptID acquireTaskAttemptIdLock(Configuration conf, int taskId) {
String jobJtIdentifier = getJobJtIdentifier(conf);
JobID jobId = HadoopFormats.getJobId(conf);
int taskAttemptCandidate = 0;
boolean taskAttemptAcquired = false;
while (!taskAttemptAcquired) {
taskAttemptCandidate++;
Path path =
new Path(
locksDir,
String.format(
LOCKS_DIR_TASK_ATTEMPT_PATTERN, jobJtIdentifier, taskId, taskAttemptCandidate));
taskAttemptAcquired = tryCreateFile(conf, path);
}
return HadoopFormats.createTaskAttemptID(jobId, taskId, taskAttemptCandidate);
}
|
@Test
public void testTaskAttemptIdAcquire() {
int tasksCount = 100;
int taskId = 25;
for (int i = 0; i < tasksCount; i++) {
TaskAttemptID taskAttemptID = tested.acquireTaskAttemptIdLock(configuration, taskId);
assertTrue(isFileExists(getTaskAttemptIdPath(taskId, taskAttemptID.getId())));
}
}
|
public String getString(HazelcastProperty property) {
String value = properties.getProperty(property.getName());
if (value != null) {
return value;
}
value = property.getSystemProperty();
if (value != null) {
return value;
}
HazelcastProperty parent = property.getParent();
if (parent != null) {
return getString(parent);
}
String deprecatedName = property.getDeprecatedName();
if (deprecatedName != null) {
value = get(deprecatedName);
if (value == null) {
value = System.getProperty(deprecatedName);
}
if (value != null) {
// we don't have a logger available, and the Logging service is constructed after the Properties are created.
System.err.print("Don't use deprecated '" + deprecatedName + "' "
+ "but use '" + property.getName() + "' instead. "
+ "The former name will be removed in the next Hazelcast release.");
return value;
}
}
Function<HazelcastProperties, ?> function = property.getFunction();
if (function != null) {
return "" + function.apply(this);
}
return property.getDefaultValue();
}
|
@Test
public void setProperty_ensureUsageOfSystemProperty() {
ENTERPRISE_LICENSE_KEY.setSystemProperty("systemValue");
HazelcastProperties hazelcastProperties = new HazelcastProperties(config);
String value = hazelcastProperties.getString(ENTERPRISE_LICENSE_KEY);
System.clearProperty(ENTERPRISE_LICENSE_KEY.getName());
assertEquals("systemValue", value);
}
|
private ContentType getContentType(Exchange exchange) throws ParseException {
String contentTypeStr = ExchangeHelper.getContentType(exchange);
if (contentTypeStr == null) {
contentTypeStr = DEFAULT_CONTENT_TYPE;
}
ContentType contentType = new ContentType(contentTypeStr);
String contentEncoding = ExchangeHelper.getContentEncoding(exchange);
// add a charset parameter for text subtypes
if (contentEncoding != null && contentType.match("text/*")) {
contentType.setParameter("charset", MimeUtility.mimeCharset(contentEncoding));
}
return contentType;
}
|
@Test
@Disabled("Fails on CI servers and some platforms - maybe due locale or something")
public void roundtripWithTextAttachmentsAndSpecialCharacters() throws IOException {
String attContentType = "text/plain";
String attText = "Attachment Text with special characters: \u00A9";
String attFileName = "Attachment File Name with special characters: \u00A9";
in.setBody("Body text with special characters: \u00A9");
in.setHeader(Exchange.CONTENT_TYPE, "text/plain");
in.setHeader(Exchange.CONTENT_ENCODING, "UTF8");
addAttachment(attContentType, attText, attFileName);
Exchange result = template.send("direct:roundtrip", exchange);
AttachmentMessage out = result.getMessage(AttachmentMessage.class);
assertEquals("Body text with special characters: \u00A9", out.getBody(String.class));
assertTrue(out.getHeader(Exchange.CONTENT_TYPE, String.class).startsWith("text/plain"));
assertEquals("UTF8", out.getHeader(Exchange.CONTENT_ENCODING));
assertTrue(out.hasAttachments());
assertEquals(1, out.getAttachmentNames().size());
assertTrue(out.getAttachmentNames().contains(attFileName));
DataHandler dh = out.getAttachment(attFileName);
assertNotNull(dh);
assertEquals(attContentType, dh.getContentType());
InputStream is = dh.getInputStream();
ByteArrayOutputStream os = new ByteArrayOutputStream();
IOHelper.copyAndCloseInput(is, os);
assertEquals(attText, new String(os.toByteArray()));
}
|
public static Set<String> fetchBrokerNameByClusterName(final MQAdminExt adminExt, final String clusterName)
throws Exception {
ClusterInfo clusterInfoSerializeWrapper = adminExt.examineBrokerClusterInfo();
Set<String> brokerNameSet = clusterInfoSerializeWrapper.getClusterAddrTable().get(clusterName);
if (brokerNameSet == null || brokerNameSet.isEmpty()) {
throw new Exception(ERROR_MESSAGE);
}
return brokerNameSet;
}
|
@Test
public void testFetchBrokerNameByClusterName() throws Exception {
Set<String> result = CommandUtil.fetchBrokerNameByClusterName(defaultMQAdminExtImpl, "default-cluster");
assertThat(result.contains("default-broker")).isTrue();
assertThat(result.contains("default-broker-one")).isTrue();
assertThat(result.size()).isEqualTo(2);
}
|
public CompletableFuture<Triple<MessageExt, String, Boolean>> getMessageAsync(String topic, long offset, int queueId, String brokerName, boolean deCompressBody) {
MessageStore messageStore = brokerController.getMessageStoreByBrokerName(brokerName);
if (messageStore != null) {
return messageStore.getMessageAsync(innerConsumerGroupName, topic, queueId, offset, 1, null)
.thenApply(result -> {
if (result == null) {
LOG.warn("getMessageResult is null , innerConsumerGroupName {}, topic {}, offset {}, queueId {}", innerConsumerGroupName, topic, offset, queueId);
return Triple.of(null, "getMessageResult is null", false); // local store, so no retry
}
List<MessageExt> list = decodeMsgList(result, deCompressBody);
if (list == null || list.isEmpty()) {
// OFFSET_FOUND_NULL returned by TieredMessageStore indicates exception occurred
boolean needRetry = GetMessageStatus.OFFSET_FOUND_NULL.equals(result.getStatus())
&& messageStore instanceof TieredMessageStore;
LOG.warn("Can not get msg , topic {}, offset {}, queueId {}, needRetry {}, result is {}",
topic, offset, queueId, needRetry, result);
return Triple.of(null, "Can not get msg", needRetry);
}
return Triple.of(list.get(0), "", false);
});
} else {
return getMessageFromRemoteAsync(topic, offset, queueId, brokerName);
}
}
|
@Test
public void getMessageAsyncTest_localStore_message_found() throws Exception {
when(brokerController.getMessageStoreByBrokerName(any())).thenReturn(defaultMessageStore);
when(defaultMessageStore.getMessageAsync(anyString(), anyString(), anyInt(), anyLong(), anyInt(), any()))
.thenReturn(CompletableFuture.completedFuture(mockGetMessageResult(2, TEST_TOPIC, "HW".getBytes())));
Triple<MessageExt, String, Boolean> rst = escapeBridge.getMessageAsync(TEST_TOPIC, 0, DEFAULT_QUEUE_ID, BROKER_NAME, false).join();
Assert.assertNotNull(rst.getLeft());
Assert.assertEquals(0, rst.getLeft().getQueueOffset());
Assert.assertTrue(Arrays.equals("HW".getBytes(), rst.getLeft().getBody()));
Assert.assertFalse(rst.getRight());
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.