focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static Builder withMaximumSizeBytes(long maxBloomFilterSizeBytes) { checkArgument(maxBloomFilterSizeBytes > 0, "Expected Bloom filter size limit to be positive."); long optimalNumberOfElements = optimalNumInsertions(maxBloomFilterSizeBytes, DEFAULT_FALSE_POSITIVE_PROBABILITY); checkArgument( optimalNumberOfElements <= MAX_ELEMENTS, "The specified size limit would attempt to create a Bloom filter builder larger than " + "the maximum supported size of 2^63."); return withMaximumNumberOfInsertionsForOptimalBloomFilter(optimalNumberOfElements); }
@Test public void testBuilderWithMaxSize() throws Exception { ScalableBloomFilter.Builder builder = ScalableBloomFilter.withMaximumSizeBytes(MAX_SIZE); int maxValue = insertAndVerifyContents(builder, (int) (MAX_SIZE * 1.1)); ScalableBloomFilter bloomFilter = builder.build(); // Verify that the decoded value contains all the values and that it is much smaller // than the maximum size. verifyCoder(bloomFilter, maxValue, MAX_SIZE); }
public static Map<String, InstanceInfo> selectInstancesMappedById(Application application) { Map<String, InstanceInfo> result = new HashMap<>(); for (InstanceInfo instance : application.getInstances()) { result.put(instance.getId(), instance); } return result; }
@Test public void testSelectInstancesMappedByIdIfNotNullReturnMapOfInstances() { Application application = createSingleInstanceApp("foo", "foo", InstanceInfo.ActionType.ADDED); HashMap<String, InstanceInfo> hashMap = new HashMap<>(); hashMap.put("foo", application.getByInstanceId("foo")); Assert.assertEquals(hashMap, EurekaEntityFunctions.selectInstancesMappedById(application)); }
@Nonnull public <K, V> KafkaProducer<K, V> getProducer(@Nullable String transactionalId) { if (getConfig().isShared()) { if (transactionalId != null) { throw new IllegalArgumentException("Cannot use transactions with shared " + "KafkaProducer for DataConnection" + getConfig().getName()); } retain(); //noinspection unchecked return (KafkaProducer<K, V>) producerSupplier.get(); } else { if (transactionalId != null) { @SuppressWarnings({"rawtypes", "unchecked"}) Map<String, Object> castProperties = (Map) getConfig().getProperties(); Map<String, Object> copy = new HashMap<>(castProperties); copy.put("transactional.id", transactionalId); return new KafkaProducer<>(copy); } else { return new KafkaProducer<>(getConfig().getProperties()); } } }
@Test public void shared_data_connection_should_return_same_producer() { kafkaDataConnection = createKafkaDataConnection(kafkaTestSupport); try (Producer<Object, Object> p1 = kafkaDataConnection.getProducer(null); Producer<Object, Object> p2 = kafkaDataConnection.getProducer(null)) { assertThat(p1).isSameAs(p2); } }
public ArrayList<AnalysisResult<T>> getOutliers(Track<T> track) { // the stream is wonky due to the raw type, probably could be improved return track.points().stream() .map(point -> analyzePoint(point, track)) .filter(analysisResult -> analysisResult.isOutlier()) .collect(toCollection(ArrayList::new)); }
@Test public void testBuggedOutlier() { /* * This track contains an "outlier" that shouldn't really be an outlier. This test ensure * that future editions of VerticalOutlierDetector do not flag this track. */ Track<NopHit> testTrack = createTrackFromResource( VerticalOutlierDetector.class, "outlierBug-A11-HAG75A-7.txt" ); /* * An imperfect outlier detector can consider this point an Outlier: * * [RH],STARS,A11,10/18/2016,02:38:16.518,,,,2261,213,217,110,061.36202,-153.26353,0038,0000,-93.3379,13.3411,,,,A11,,,,,,ACT,IFR,,00000,,,,,,,1,,0,{RH} */ Collection<AnalysisResult<NopHit>> outliers = (new VerticalOutlierDetector<NopHit>()).getOutliers(testTrack); confirmExactlyTheseOutliers( outliers, "[RH],STARS,A11,10/18/2016,02:25:16.460,,,,2261,000,000,xxx,061.64543,-154.94138,0038,0000,-140.2090,33.3177,,,,A11,,,,,,ACT,IFR,,00000,,,,,,,1,,0,{RH}" ); }
@Override protected ConfigData<MetaData> fromJson(final JsonObject data) { return GsonUtils.getGson().fromJson(data, new TypeToken<ConfigData<MetaData>>() { }.getType()); }
@Test public void testFromJson() { ConfigData<MetaData> metaDataConfigData = new ConfigData<>(); MetaData metaData = new MetaData(); metaDataConfigData.setData(Collections.singletonList(metaData)); JsonObject jsonObject = GsonUtils.getGson().fromJson(GsonUtils.getGson().toJson(metaDataConfigData), JsonObject.class); assertThat(mockMetaDataRefresh.fromJson(jsonObject), is(metaDataConfigData)); }
@Override public Num calculate(BarSeries series, Position position) { if (position.isClosed()) { Num profit = excludeCosts ? position.getGrossProfit() : position.getProfit(); return profit.isPositive() ? profit : series.zero(); } return series.zero(); }
@Test public void calculateComparingIncludingVsExcludingCosts() { MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 100, 80, 85, 120); FixedTransactionCostModel transactionCost = new FixedTransactionCostModel(1); ZeroCostModel holdingCost = new ZeroCostModel(); TradingRecord tradingRecord = new BaseTradingRecord(Trade.TradeType.BUY, transactionCost, holdingCost); // entry price = 100 (cost = 1) => netPrice = 101, grossPrice = 100 tradingRecord.enter(0, series.getBar(0).getClosePrice(), numOf(1)); // exit price = 105 (cost = 1) => netPrice = 104, grossPrice = 105 tradingRecord.exit(1, series.getBar(1).getClosePrice(), tradingRecord.getCurrentPosition().getEntry().getAmount()); // entry price = 100 (cost = 1) => netPrice = 101, grossPrice = 100 tradingRecord.enter(2, series.getBar(2).getClosePrice(), numOf(1)); // exit price = 120 (cost = 1) => netPrice = 119, grossPrice = 120 tradingRecord.exit(5, series.getBar(5).getClosePrice(), tradingRecord.getCurrentPosition().getEntry().getAmount()); // include costs, i.e. profit - costs: // [(104 - 101)] + [(119 - 101)] = 3 + 18 = +21 profit // [(105 - 100)] + [(120 - 100)] = 5 + 20 = +25 profit - 4 = +21 profit AnalysisCriterion profitIncludingCosts = getCriterion(false); assertNumEquals(21, profitIncludingCosts.calculate(series, tradingRecord)); // exclude costs, i.e. costs are not contained: // [(105 - 100)] + [(120 - 100)] = 5 + 20 = +25 profit AnalysisCriterion profitExcludingCosts = getCriterion(true); assertNumEquals(25, profitExcludingCosts.calculate(series, tradingRecord)); }
@Override public CEFParserResult evaluate(FunctionArgs args, EvaluationContext context) { final String cef = valueParam.required(args, context); final boolean useFullNames = useFullNamesParam.optional(args, context).orElse(false); final CEFParser parser = CEFParserFactory.create(); if (cef == null || cef.isEmpty()) { LOG.debug("NULL or empty parameter passed to CEF parser function. Not evaluating."); return null; } LOG.debug("Running CEF parser for [{}].", cef); final MappedMessage message; try (Timer.Context timer = parseTime.time()) { message = new MappedMessage(parser.parse(cef.trim()), useFullNames); } catch (Exception e) { LOG.error("Error while parsing CEF message: {}", cef, e); return null; } final Map<String, Object> fields = new HashMap<>(); /* * Add all CEF standard fields. We are prefixing with cef_ to avoid overwriting existing fields or to be * overwritten ourselves later in the processing. The user is encouraged to run another pipeline function * to clean up field names if desired. */ fields.put("cef_version", message.cefVersion()); fields.put("device_vendor", message.deviceVendor()); fields.put("device_product", message.deviceProduct()); fields.put("device_version", message.deviceVersion()); fields.put("device_event_class_id", message.deviceEventClassId()); fields.put("name", message.name()); fields.put("severity", message.severity()); // Add all custom CEF fields. fields.putAll(message.mappedExtensions()); return new CEFParserResult(fields); }
@Test public void evaluate_returns_result_for_valid_CEF_string_with_full_names() throws Exception { final CEFParserFunction function = new CEFParserFunction(new MetricRegistry()); final Map<String, Expression> arguments = ImmutableMap.of( CEFParserFunction.VALUE, new StringExpression(new CommonToken(0), "CEF:0|vendor|product|1.0|id|name|low|dvc=example.com msg=Foobar"), CEFParserFunction.USE_FULL_NAMES, new BooleanExpression(new CommonToken(0), true) ); final FunctionArgs functionArgs = new FunctionArgs(function, arguments); final Message message = messageFactory.createMessage("__dummy", "__dummy", DateTime.parse("2010-07-30T16:03:25Z")); final EvaluationContext evaluationContext = new EvaluationContext(message); final CEFParserResult result = function.evaluate(functionArgs, evaluationContext); assertNotNull(result); assertEquals(0, result.get("cef_version")); assertEquals("vendor", result.get("device_vendor")); assertEquals("product", result.get("device_product")); assertEquals("1.0", result.get("device_version")); assertEquals("id", result.get("device_event_class_id")); assertEquals("low", result.get("severity")); assertEquals("example.com", result.get("deviceAddress")); assertEquals("Foobar", result.get("message")); }
@Override public HostToKeyMapper<Integer> getAllPartitionsMultipleHosts(URI serviceUri, int numHostPerPartition) throws ServiceUnavailableException { return getHostToKeyMapper(serviceUri, null, numHostPerPartition, null); }
@Test(dataProvider = "ringFactories") public void testAllPartitionMultipleHostsStickKey(RingFactory<URI> ringFactory) throws URISyntaxException, ServiceUnavailableException { int numHost = 2; URI serviceURI = new URI("d2://articles"); ConsistentHashKeyMapper mapper = getConsistentHashKeyMapper(ringFactory); String myStickyKey = "sticky"; HostToKeyMapper<Integer> result = mapper.getAllPartitionsMultipleHosts(serviceURI, numHost, myStickyKey); Map<Integer, List<URI>> originalOrderingOfHost = getOrderingOfHostsForEachKey(result, numHost); // repeat 100 times. The ordering of the hosts should always be the same because of sticky key int numOfMatch = 0; for (int i = 0; i < 100; i++) { result = mapper.getAllPartitionsMultipleHosts(serviceURI, numHost, myStickyKey); Map<Integer, List<URI>> newOrderingOfHost = getOrderingOfHostsForEachKey(result, numHost); if (newOrderingOfHost.equals(originalOrderingOfHost)) { numOfMatch++; } } Assert.assertEquals(100, numOfMatch); }
public boolean removeAll(Collection<?> c) { throw e; }
@Test void require_that_removeAll_throws_exception() { assertThrows(NodeVector.ReadOnlyException.class, () -> new TestNodeVector("foo").removeAll(null)); }
public MessageListener messageListener(MessageListener messageListener, boolean addConsumerSpan) { if (messageListener instanceof TracingMessageListener) return messageListener; return new TracingMessageListener(messageListener, this, addConsumerSpan); }
@Test void messageListener_doesntDoubleWrap() { MessageListener wrapped = jmsTracing.messageListener(mock(MessageListener.class), false); assertThat(jmsTracing.messageListener(wrapped, false)) .isSameAs(wrapped); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() < 2) { onInvalidDataReceived(device, data); return; } // Read the Op Code final int opCode = data.getIntValue(Data.FORMAT_UINT8, 0); // Estimate the expected operand size based on the Op Code int expectedOperandSize; switch (opCode) { case OP_CODE_COMMUNICATION_INTERVAL_RESPONSE -> // UINT8 expectedOperandSize = 1; case OP_CODE_CALIBRATION_VALUE_RESPONSE -> // Calibration Value expectedOperandSize = 10; case OP_CODE_PATIENT_HIGH_ALERT_LEVEL_RESPONSE, OP_CODE_PATIENT_LOW_ALERT_LEVEL_RESPONSE, OP_CODE_HYPO_ALERT_LEVEL_RESPONSE, OP_CODE_HYPER_ALERT_LEVEL_RESPONSE, OP_CODE_RATE_OF_DECREASE_ALERT_LEVEL_RESPONSE, OP_CODE_RATE_OF_INCREASE_ALERT_LEVEL_RESPONSE -> // SFLOAT expectedOperandSize = 2; case OP_CODE_RESPONSE_CODE -> // Request Op Code (UINT8), Response Code Value (UINT8) expectedOperandSize = 2; default -> { onInvalidDataReceived(device, data); return; } } // Verify packet length if (data.size() != 1 + expectedOperandSize && data.size() != 1 + expectedOperandSize + 2) { onInvalidDataReceived(device, data); return; } // Verify CRC if present final boolean crcPresent = data.size() == 1 + expectedOperandSize + 2; // opCode + expected operand + CRC if (crcPresent) { final int expectedCrc = data.getIntValue(Data.FORMAT_UINT16_LE, 1 + expectedOperandSize); final int actualCrc = CRC16.MCRF4XX(data.getValue(), 0, 1 + expectedOperandSize); if (expectedCrc != actualCrc) { onCGMSpecificOpsResponseReceivedWithCrcError(device, data); return; } } switch (opCode) { case OP_CODE_COMMUNICATION_INTERVAL_RESPONSE -> { final int interval = data.getIntValue(Data.FORMAT_UINT8, 1); onContinuousGlucoseCommunicationIntervalReceived(device, interval, crcPresent); return; } case OP_CODE_CALIBRATION_VALUE_RESPONSE -> { final float glucoseConcentrationOfCalibration = data.getFloatValue(Data.FORMAT_SFLOAT, 1); final int calibrationTime = data.getIntValue(Data.FORMAT_UINT16_LE, 3); final int calibrationTypeAndSampleLocation = data.getIntValue(Data.FORMAT_UINT8, 5); @SuppressLint("WrongConstant") final int calibrationType = calibrationTypeAndSampleLocation & 0x0F; final int calibrationSampleLocation = calibrationTypeAndSampleLocation >> 4; final int nextCalibrationTime = data.getIntValue(Data.FORMAT_UINT16_LE, 6); final int calibrationDataRecordNumber = data.getIntValue(Data.FORMAT_UINT16_LE, 8); final int calibrationStatus = data.getIntValue(Data.FORMAT_UINT8, 10); onContinuousGlucoseCalibrationValueReceived(device, glucoseConcentrationOfCalibration, calibrationTime, nextCalibrationTime, calibrationType, calibrationSampleLocation, calibrationDataRecordNumber, new CGMCalibrationStatus(calibrationStatus), crcPresent); return; } case OP_CODE_RESPONSE_CODE -> { final int requestCode = data.getIntValue(Data.FORMAT_UINT8, 1); // ignore final int responseCode = data.getIntValue(Data.FORMAT_UINT8, 2); if (responseCode == CGM_RESPONSE_SUCCESS) { onCGMSpecificOpsOperationCompleted(device, requestCode, crcPresent); } else { onCGMSpecificOpsOperationError(device, requestCode, responseCode, crcPresent); } return; } } // Read SFLOAT value final float value = data.getFloatValue(Data.FORMAT_SFLOAT, 1); switch (opCode) { case OP_CODE_PATIENT_HIGH_ALERT_LEVEL_RESPONSE -> onContinuousGlucosePatientHighAlertReceived(device, value, crcPresent); case OP_CODE_PATIENT_LOW_ALERT_LEVEL_RESPONSE -> onContinuousGlucosePatientLowAlertReceived(device, value, crcPresent); case OP_CODE_HYPO_ALERT_LEVEL_RESPONSE -> onContinuousGlucoseHypoAlertReceived(device, value, crcPresent); case OP_CODE_HYPER_ALERT_LEVEL_RESPONSE -> onContinuousGlucoseHyperAlertReceived(device, value, crcPresent); case OP_CODE_RATE_OF_DECREASE_ALERT_LEVEL_RESPONSE -> onContinuousGlucoseRateOfDecreaseAlertReceived(device, value, crcPresent); case OP_CODE_RATE_OF_INCREASE_ALERT_LEVEL_RESPONSE -> onContinuousGlucoseRateOfIncreaseAlertReceived(device, value, crcPresent); } }
@Test public void onContinuousGlucoseRateOfDecreaseAlertReceived() { final Data data = new Data(new byte[] { 21, 1, 16}); callback.onDataReceived(null, data); assertEquals("Level", 10.0f, rateOfDecreaseAlertLevel, 0.00); assertFalse(secured); }
public static DeploymentDescriptor merge(List<DeploymentDescriptor> descriptorHierarchy, MergeMode mode) { if (descriptorHierarchy == null || descriptorHierarchy.isEmpty()) { throw new IllegalArgumentException("Descriptor hierarchy list cannot be empty"); } if (descriptorHierarchy.size() == 1) { return descriptorHierarchy.get(0); } Deque<DeploymentDescriptor> stack = new ArrayDeque<>(); descriptorHierarchy.forEach(stack::push); while (stack.size() > 1) { stack.push(merge(stack.pop(), stack.pop(), mode)); } // last element from the stack is the one that contains all merged descriptors return stack.pop(); }
@Test public void testDeploymentDesciptorMergeHierarchy() { DeploymentDescriptor primary = new DeploymentDescriptorImpl("org.jbpm.domain"); primary.getBuilder() .addMarshalingStrategy(new ObjectModel("org.jbpm.test.CustomStrategy", new Object[]{"param2"})); assertThat(primary).isNotNull(); assertThat(primary.getPersistenceUnit()).isEqualTo("org.jbpm.domain"); assertThat(primary.getAuditPersistenceUnit()).isEqualTo("org.jbpm.domain"); assertThat(primary.getAuditMode()).isEqualTo(AuditMode.JPA); assertThat(primary.getPersistenceMode()).isEqualTo(PersistenceMode.JPA); assertThat(primary.getRuntimeStrategy()).isEqualTo(RuntimeStrategy.SINGLETON); assertThat(primary.getMarshallingStrategies().size()).isEqualTo(1); assertThat(primary.getConfiguration().size()).isEqualTo(0); assertThat(primary.getEnvironmentEntries().size()).isEqualTo(0); assertThat(primary.getEventListeners().size()).isEqualTo(0); assertThat(primary.getGlobals().size()).isEqualTo(0); assertThat(primary.getTaskEventListeners().size()).isEqualTo(0); assertThat(primary.getWorkItemHandlers().size()).isEqualTo(0); DeploymentDescriptor secondary = new DeploymentDescriptorImpl("org.jbpm.domain"); secondary.getBuilder() .auditMode(AuditMode.NONE) .persistenceMode(PersistenceMode.JPA) .persistenceUnit("my.custom.unit") .auditPersistenceUnit("my.custom.unit2"); assertThat(secondary).isNotNull(); assertThat(secondary.getPersistenceUnit()).isEqualTo("my.custom.unit"); assertThat(secondary.getAuditPersistenceUnit()).isEqualTo("my.custom.unit2"); assertThat(secondary.getAuditMode()).isEqualTo(AuditMode.NONE); assertThat(secondary.getPersistenceMode()).isEqualTo(PersistenceMode.JPA); assertThat(secondary.getRuntimeStrategy()).isEqualTo(RuntimeStrategy.SINGLETON); assertThat(secondary.getMarshallingStrategies().size()).isEqualTo(0); assertThat(secondary.getConfiguration().size()).isEqualTo(0); assertThat(secondary.getEnvironmentEntries().size()).isEqualTo(0); assertThat(secondary.getEventListeners().size()).isEqualTo(0); assertThat(secondary.getGlobals().size()).isEqualTo(0); assertThat(secondary.getTaskEventListeners().size()).isEqualTo(0); assertThat(secondary.getWorkItemHandlers().size()).isEqualTo(0); DeploymentDescriptor third = new DeploymentDescriptorImpl("org.jbpm.domain"); third.getBuilder() .auditMode(AuditMode.JMS) .persistenceMode(PersistenceMode.JPA) .persistenceUnit("my.custom.unit2") .auditPersistenceUnit("my.custom.altered") .runtimeStrategy(RuntimeStrategy.PER_PROCESS_INSTANCE) .addEnvironmentEntry(new NamedObjectModel("IS_JTA", "java.lang.Boolean", new Object[]{"false"})); assertThat(third).isNotNull(); assertThat(third.getPersistenceUnit()).isEqualTo("my.custom.unit2"); assertThat(third.getAuditPersistenceUnit()).isEqualTo("my.custom.altered"); assertThat(third.getAuditMode()).isEqualTo(AuditMode.JMS); assertThat(third.getPersistenceMode()).isEqualTo(PersistenceMode.JPA); assertThat(third.getRuntimeStrategy()).isEqualTo(RuntimeStrategy.PER_PROCESS_INSTANCE); assertThat(third.getMarshallingStrategies().size()).isEqualTo(0); assertThat(third.getConfiguration().size()).isEqualTo(0); assertThat(third.getEnvironmentEntries().size()).isEqualTo(1); assertThat(third.getEventListeners().size()).isEqualTo(0); assertThat(third.getGlobals().size()).isEqualTo(0); assertThat(third.getTaskEventListeners().size()).isEqualTo(0); assertThat(third.getWorkItemHandlers().size()).isEqualTo(0); // assemble hierarchy List<DeploymentDescriptor> hierarchy = new ArrayList<DeploymentDescriptor>(); hierarchy.add(third); hierarchy.add(secondary); hierarchy.add(primary); // and now let's merge them DeploymentDescriptor outcome = DeploymentDescriptorMerger.merge(hierarchy, MergeMode.MERGE_COLLECTIONS); assertThat(outcome).isNotNull(); assertThat(outcome.getPersistenceUnit()).isEqualTo("my.custom.unit2"); assertThat(outcome.getAuditPersistenceUnit()).isEqualTo("my.custom.altered"); assertThat(outcome.getAuditMode()).isEqualTo(AuditMode.JMS); assertThat(outcome.getPersistenceMode()).isEqualTo(PersistenceMode.JPA); assertThat(outcome.getRuntimeStrategy()).isEqualTo(RuntimeStrategy.PER_PROCESS_INSTANCE); assertThat(outcome.getMarshallingStrategies().size()).isEqualTo(1); assertThat(outcome.getConfiguration().size()).isEqualTo(0); assertThat(outcome.getEnvironmentEntries().size()).isEqualTo(1); assertThat(outcome.getEventListeners().size()).isEqualTo(0); assertThat(outcome.getGlobals().size()).isEqualTo(0); assertThat(outcome.getTaskEventListeners().size()).isEqualTo(0); assertThat(outcome.getWorkItemHandlers().size()).isEqualTo(0); }
public static long toUnsignedLong(short value) { return Short.toUnsignedLong(value); }
@Test public void testIntToUnsignedLong() { getIntegerTestData().forEach(val -> assertEquals(val.toString(), toUnsignedLongPreviousImplementation(val), BitmapUtils.toUnsignedLong(val))); }
public static String getTimestampFromFile(String filename) { return filename.split("\\.")[0]; }
@Test public void testGetTimestamp() { Assertions.assertTrue(HoodieConsistentHashingMetadata.getTimestampFromFile("0000.hashing_metadata").equals("0000")); Assertions.assertTrue(HoodieConsistentHashingMetadata.getTimestampFromFile("1234.hashing_metadata").equals("1234")); }
public static FuryBuilder builder() { return new FuryBuilder(); }
@Test public void testPkgAccessLevelParentClass() { Fury fury = Fury.builder().withRefTracking(true).requireClassRegistration(false).build(); HashBasedTable<Object, Object, Object> table = HashBasedTable.create(2, 4); table.put("r", "c", 100); serDeCheckSerializer(fury, table, "Codec"); }
public static List<String> splitPlainTextParagraphs( List<String> lines, int maxTokensPerParagraph) { return internalSplitTextParagraphs( lines, maxTokensPerParagraph, (text) -> internalSplitLines( text, maxTokensPerParagraph, false, s_plaintextSplitOptions)); }
@Test public void canSplitTextParagraphsEvenly() { List<String> input = Arrays.asList( "This is a test of the emergency broadcast system. This is only a test.", "We repeat, this is only a test. A unit test.", "A small note. And another. And once again. Seriously, this is the end." + " We're finished. All set. Bye.", "Done."); List<String> expected = Arrays.asList( "This is a test of the emergency broadcast system.", "This is only a test.", "We repeat, this is only a test. A unit test.", "A small note. And another. And once again.", "Seriously, this is the end. We're finished. All set. Bye. Done."); List<String> result = TextChunker.splitPlainTextParagraphs(input, 15); Assertions.assertEquals(expected, result); }
public String getLocation() { String location = properties.getProperty(NACOS_LOGGING_CONFIG_PROPERTY); if (StringUtils.isBlank(location)) { if (isDefaultLocationEnabled()) { return defaultLocation; } return null; } return location; }
@Test void testGetLocationWithDefault() { assertEquals("classpath:test.xml", loggingProperties.getLocation()); }
public boolean isUnknown() { return this.major == 0 && this.minor == 0 && this.patch == 0; }
@Test public void testIsUnknown() { assertTrue(MemberVersion.UNKNOWN.isUnknown()); assertFalse(MemberVersion.of(VERSION_3_8_SNAPSHOT_STRING).isUnknown()); assertFalse(MemberVersion.of(VERSION_3_8_1_RC1_STRING).isUnknown()); assertFalse(MemberVersion.of(VERSION_3_8_1_BETA_1_STRING).isUnknown()); assertFalse(MemberVersion.of(VERSION_3_8_BETA_2_STRING).isUnknown()); assertFalse(MemberVersion.of(VERSION_3_8_2_STRING).isUnknown()); }
static Optional<String> globalResponseError(Optional<ClientResponse> response) { if (!response.isPresent()) { return Optional.of("Timeout"); } if (response.get().authenticationException() != null) { return Optional.of("AuthenticationException"); } if (response.get().wasTimedOut()) { return Optional.of("Disonnected[Timeout]"); } if (response.get().wasDisconnected()) { return Optional.of("Disconnected"); } if (response.get().versionMismatch() != null) { return Optional.of("UnsupportedVersionException"); } if (response.get().responseBody() == null) { return Optional.of("EmptyResponse"); } if (!(response.get().responseBody() instanceof AssignReplicasToDirsResponse)) { return Optional.of("ClassCastException"); } AssignReplicasToDirsResponseData data = ((AssignReplicasToDirsResponse) response.get().responseBody()).data(); Errors error = Errors.forCode(data.errorCode()); if (error != Errors.NONE) { return Optional.of("Response-level error: " + error.name()); } return Optional.empty(); }
@Test public void testGlobalResponseErrorResponseLevelError() { assertEquals(Optional.of("Response-level error: INVALID_REQUEST"), AssignmentsManager.globalResponseError(Optional.of( new ClientResponse(null, null, "", 0, 0, false, false, null, null, new AssignReplicasToDirsResponse( new AssignReplicasToDirsResponseData(). setErrorCode(Errors.INVALID_REQUEST.code())))))); }
@Override public boolean tryLock(String name) { return tryLock(name, DEFAULT_LOCK_DURATION_SECONDS); }
@Test @UseDataProvider("randomValidDuration") public void tryLock_with_duration_delegates_to_InternalPropertiesDao_and_commits(int randomValidDuration) { String lockName = "foo"; boolean expected = new Random().nextBoolean(); when(internalPropertiesDao.tryLock(dbSession, lockName, randomValidDuration)) .thenReturn(expected); assertThat(underTest.tryLock(lockName, randomValidDuration)).isEqualTo(expected); verify(dbClient).openSession(false); verify(internalPropertiesDao).tryLock(dbSession, lockName, randomValidDuration); verify(dbSession).commit(); verifyNoMoreInteractions(internalPropertiesDao); }
@InvokeOnHeader(Web3jConstants.ETH_SUBMIT_WORK) void ethSubmitWork(Message message) throws IOException { String nonce = message.getHeader(Web3jConstants.NONCE, configuration::getNonce, String.class); String headerPowHash = message.getHeader(Web3jConstants.HEADER_POW_HASH, configuration::getHeaderPowHash, String.class); String mixDigest = message.getHeader(Web3jConstants.MIX_DIGEST, configuration::getMixDigest, String.class); Request<?, EthSubmitWork> request = web3j.ethSubmitWork(nonce, headerPowHash, mixDigest); setRequestId(message, request); EthSubmitWork response = request.send(); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.solutionValid()); } }
@Test public void ethSubmitWorkTest() throws Exception { EthSubmitWork response = Mockito.mock(EthSubmitWork.class); Mockito.when(mockWeb3j.ethSubmitWork(any(), any(), any())).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Mockito.when(response.solutionValid()).thenReturn(Boolean.TRUE); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_SUBMIT_WORK); template.send(exchange); Boolean body = exchange.getIn().getBody(Boolean.class); assertTrue(body); }
@SneakyThrows public static String readUtf8String(String path) { String resultReadStr; ClassPathResource classPathResource = new ClassPathResource(path); try ( InputStream inputStream = classPathResource.getInputStream(); BufferedInputStream bis = new BufferedInputStream(inputStream); ByteArrayOutputStream buf = new ByteArrayOutputStream()) { int result = bis.read(); while (result != ERROR_CODE) { buf.write((byte) result); result = bis.read(); } resultReadStr = buf.toString("UTF-8"); } return resultReadStr; }
@Test public void assertReadUtf8String() { String testFilePath = "test/test_utf8.txt"; String contentByFileUtil = FileUtil.readUtf8String(testFilePath); Assert.assertFalse(contentByFileUtil.isEmpty()); }
@CanIgnoreReturnValue public final Ordered containsExactlyEntriesIn(Multimap<?, ?> expectedMultimap) { checkNotNull(expectedMultimap, "expectedMultimap"); checkNotNull(actual); ListMultimap<?, ?> missing = difference(expectedMultimap, actual); ListMultimap<?, ?> extra = difference(actual, expectedMultimap); // TODO(kak): Possible enhancement: Include "[1 copy]" if the element does appear in // the subject but not enough times. Similarly for unexpected extra items. if (!missing.isEmpty()) { if (!extra.isEmpty()) { boolean addTypeInfo = hasMatchingToStringPair(missing.entries(), extra.entries()); // Note: The usage of countDuplicatesAndAddTypeInfo() below causes entries no longer to be // grouped by key in the 'missing' and 'unexpected items' parts of the message (we still // show the actual and expected multimaps in the standard format). String missingDisplay = addTypeInfo ? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(missing).entries()) : countDuplicatesMultimap(annotateEmptyStringsMultimap(missing)); String extraDisplay = addTypeInfo ? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(extra).entries()) : countDuplicatesMultimap(annotateEmptyStringsMultimap(extra)); failWithActual( fact("missing", missingDisplay), fact("unexpected", extraDisplay), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } else { failWithActual( fact("missing", countDuplicatesMultimap(annotateEmptyStringsMultimap(missing))), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } } else if (!extra.isEmpty()) { failWithActual( fact("unexpected", countDuplicatesMultimap(annotateEmptyStringsMultimap(extra))), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } return new MultimapInOrder(/* allowUnexpected = */ false, expectedMultimap); }
@Test public void containsExactlyEmpty() { ImmutableListMultimap<Integer, String> actual = ImmutableListMultimap.of(); ImmutableSetMultimap<Integer, String> expected = ImmutableSetMultimap.of(); assertThat(actual).containsExactlyEntriesIn(expected); assertThat(actual).containsExactlyEntriesIn(expected).inOrder(); }
@VisibleForTesting protected void copyFromHost(MapHost host) throws IOException { // reset retryStartTime for a new host retryStartTime = 0; // Get completed maps on 'host' List<TaskAttemptID> maps = scheduler.getMapsForHost(host); // Sanity check to catch hosts with only 'OBSOLETE' maps, // especially at the tail of large jobs if (maps.size() == 0) { return; } if (LOG.isDebugEnabled()) { LOG.debug("Fetcher " + id + " going to fetch from " + host + " for: " + maps); } // List of maps to be fetched yet Set<TaskAttemptID> remaining = new HashSet<TaskAttemptID>(maps); // Construct the url and connect URL url = getMapOutputURL(host, maps); DataInputStream input = null; try { input = openShuffleUrl(host, remaining, url); if (input == null) { return; } // Loop through available map-outputs and fetch them // On any error, faildTasks is not null and we exit // after putting back the remaining maps to the // yet_to_be_fetched list and marking the failed tasks. TaskAttemptID[] failedTasks = null; while (!remaining.isEmpty() && failedTasks == null) { try { failedTasks = copyMapOutput(host, input, remaining, fetchRetryEnabled); } catch (IOException e) { IOUtils.cleanupWithLogger(LOG, input); // // Setup connection again if disconnected by NM connection.disconnect(); // Get map output from remaining tasks only. url = getMapOutputURL(host, remaining); input = openShuffleUrl(host, remaining, url); if (input == null) { return; } } } if(failedTasks != null && failedTasks.length > 0) { LOG.warn("copyMapOutput failed for tasks "+Arrays.toString(failedTasks)); scheduler.hostFailed(host.getHostName()); for(TaskAttemptID left: failedTasks) { scheduler.copyFailed(left, host, true, false); } } // Sanity check if (failedTasks == null && !remaining.isEmpty()) { throw new IOException("server didn't return all expected map outputs: " + remaining.size() + " left."); } input.close(); input = null; } finally { if (input != null) { IOUtils.cleanupWithLogger(LOG, input); input = null; } for (TaskAttemptID left : remaining) { scheduler.putBackKnownMapOutput(host, left); } } }
@Test public void testCopyFromHostBogusHeader() throws Exception { Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(job, id, ss, mm, r, metrics, except, key, connection); String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key); when(connection.getResponseCode()).thenReturn(200); when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME)) .thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME); when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION)) .thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION); when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH)) .thenReturn(replyHash); ByteArrayInputStream in = new ByteArrayInputStream( "\u00010 BOGUS DATA\nBOGUS DATA\nBOGUS DATA\n".getBytes()); when(connection.getInputStream()).thenReturn(in); underTest.copyFromHost(host); verify(connection).addRequestProperty( SecureShuffleUtils.HTTP_HEADER_URL_HASH, encHash); verify(allErrs).increment(1); verify(ss).copyFailed(map1ID, host, true, false); verify(ss).copyFailed(map2ID, host, true, false); verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map1ID)); verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map2ID)); }
@Override public <T> Mono<T> run(final Mono<T> run, final Function<Throwable, Mono<T>> fallback, final Resilience4JConf resilience4JConf) { RateLimiter rateLimiter = Resilience4JRegistryFactory.rateLimiter(resilience4JConf.getId(), resilience4JConf.getRateLimiterConfig()); CircuitBreaker circuitBreaker = Resilience4JRegistryFactory.circuitBreaker(resilience4JConf.getId(), resilience4JConf.getCircuitBreakerConfig()); final Duration timeoutDuration = resilience4JConf.getTimeLimiterConfig().getTimeoutDuration(); Mono<T> to = run.transformDeferred(CircuitBreakerOperator.of(circuitBreaker)) .transformDeferred(RateLimiterOperator.of(rateLimiter)) .timeout(timeoutDuration, Mono.error(() -> new TimeoutException("Response took longer than timeout: " + timeoutDuration))) .doOnError(TimeoutException.class, t -> circuitBreaker.onError( resilience4JConf.getTimeLimiterConfig().getTimeoutDuration().toMillis(), TimeUnit.MILLISECONDS, t)); if (Objects.nonNull(fallback)) { to = to.onErrorResume(fallback); } return to; }
@Test public void errorTest() { Resilience4JConf conf = mock(Resilience4JConf.class); when(conf.getId()).thenReturn("SHENYU"); when(conf.getRateLimiterConfig()).thenReturn(RateLimiterConfig.ofDefaults()); when(conf.getTimeLimiterConfig()).thenReturn(TimeLimiterConfig.ofDefaults()); when(conf.getCircuitBreakerConfig()).thenReturn(CircuitBreakerConfig.ofDefaults()); StepVerifier.create(combinedExecutor.run(Mono.error(new RuntimeException()), Mono::error, conf)) .expectSubscription() .expectError(RuntimeException.class) .verify(); }
public CompletableFuture<Void> storeKemOneTimePreKeys(final UUID identifier, final byte deviceId, final List<KEMSignedPreKey> preKeys) { return pqPreKeys.store(identifier, deviceId, preKeys); }
@Test void storeKemOneTimePreKeys() { assertEquals(0, keysManager.getPqCount(ACCOUNT_UUID, DEVICE_ID).join(), "Initial pre-key count for an account should be zero"); keysManager.storeKemOneTimePreKeys(ACCOUNT_UUID, DEVICE_ID, List.of(generateTestKEMSignedPreKey(1))).join(); assertEquals(1, keysManager.getPqCount(ACCOUNT_UUID, DEVICE_ID).join()); keysManager.storeKemOneTimePreKeys(ACCOUNT_UUID, DEVICE_ID, List.of(generateTestKEMSignedPreKey(1))).join(); assertEquals(1, keysManager.getPqCount(ACCOUNT_UUID, DEVICE_ID).join()); }
@Override public long transferTo(long position, long count, WritableByteChannel target) throws IOException { checkNotNull(target); Util.checkNotNegative(position, "position"); Util.checkNotNegative(count, "count"); checkOpen(); checkReadable(); long transferred = 0; // will definitely either be assigned or an exception will be thrown // no need to synchronize here; this method does not make use of the channel's position boolean completed = false; try { if (!beginBlocking()) { return 0; // AsynchronousCloseException will be thrown } file.readLock().lockInterruptibly(); try { transferred = file.transferTo(position, count, target); file.setLastAccessTime(fileSystemState.now()); completed = true; } finally { file.readLock().unlock(); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); } finally { endBlocking(completed); } return transferred; }
@Test public void testTransferToNegative() throws IOException { FileChannel channel = channel(regularFile(0), READ, WRITE); try { channel.transferTo(-1, 0, new ByteBufferChannel(10)); fail(); } catch (IllegalArgumentException expected) { } try { channel.transferTo(0, -1, new ByteBufferChannel(10)); fail(); } catch (IllegalArgumentException expected) { } }
public void close(long timeoutMs) { ThreadUtils.shutdownExecutorServiceQuietly(commitExecutorService, timeoutMs, TimeUnit.MILLISECONDS); }
@Test public void testCloseTimeout() throws Exception { long timeoutMs = 1000; // Normal termination, where termination times out. when(executor.awaitTermination(timeoutMs, TimeUnit.MILLISECONDS)).thenReturn(false); try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(ThreadUtils.class)) { committer.close(timeoutMs); assertTrue(logCaptureAppender.getEvents().stream().anyMatch(e -> e.getLevel().equals("ERROR"))); } verify(executor).shutdown(); }
public static boolean isInvalidStanzaSentPriorToResourceBinding(final Packet stanza, final ClientSession session) { // Openfire sets 'authenticated' only after resource binding. if (session.getStatus() == Session.Status.AUTHENTICATED) { return false; } // Beware, the 'to' address in the stanza will have been overwritten by the final JID intendedRecipient = stanza.getTo(); final JID serverDomain = new JID(XMPPServer.getInstance().getServerInfo().getXMPPDomain()); // If there's no 'to' address, then the stanza is implicitly addressed at the user itself. if (intendedRecipient == null) { return false; } // TODO: after authentication (but prior to resource binding), it should be possible to verify that the // intended recipient's bare JID corresponds with the authorized user. Openfire currently does not have an API // that can be used to obtain the authorized username, prior to resource binding. if (intendedRecipient.equals(serverDomain)) { return false; } return true; }
@Test public void testIsInvalid_addressedAtDomain_authenticated() throws Exception { // Setup test fixture. final Packet stanza = new Message(); stanza.setTo(XMPPServer.getInstance().getServerInfo().getXMPPDomain()); final LocalClientSession session = mock(LocalClientSession.class, withSettings().strictness(Strictness.LENIENT)); when(session.getStatus()).thenReturn(Session.Status.AUTHENTICATED); // Openfire sets 'AUTHENTICATED' only after resource binding has been done. // Execute system under test. final boolean result = SessionPacketRouter.isInvalidStanzaSentPriorToResourceBinding(stanza, session); // Verify results. assertFalse(result); }
@Override public Map<String, Object> load(String configKey) { if (targetFilePath != null) { try { Map<String, Object> raw = (Map<String, Object>) Utils.readYamlFile(targetFilePath); if (raw != null) { return (Map<String, Object>) raw.get(configKey); } } catch (Exception e) { LOG.error("Failed to load from file {}", targetFilePath); } } return null; }
@Test public void testInvalidConfig() { Config conf = new Config(); FileConfigLoader testLoader = new FileConfigLoader(conf); Map<String, Object> result = testLoader.load(DaemonConfig.MULTITENANT_SCHEDULER_USER_POOLS); assertNull(result, "Unexpectedly returned a map"); }
public static List<String> findVariablesForEncodedValuesString(CustomModel model, NameValidator nameValidator, ClassHelper classHelper) { Set<String> variables = new LinkedHashSet<>(); // avoid parsing exception for backward_xy or in_xy ... NameValidator nameValidatorIntern = s -> { // some literals are no variables and would throw an exception (encoded value not found) if (Character.isUpperCase(s.charAt(0)) || s.startsWith(BACKWARD_PREFIX) || s.startsWith(IN_AREA_PREFIX)) return true; if (nameValidator.isValid(s)) { variables.add(s); return true; } return false; }; findVariablesForEncodedValuesString(model.getPriority(), nameValidatorIntern, classHelper); findVariablesForEncodedValuesString(model.getSpeed(), nameValidatorIntern, classHelper); return new ArrayList<>(variables); }
@Test public void findVariablesForEncodedValueString() { CustomModel customModel = new CustomModel(); customModel.addToPriority(If("backward_car_access != car_access", MULTIPLY, "0.5")); List<String> variables = findVariablesForEncodedValuesString(customModel, s -> new DefaultImportRegistry().createImportUnit(s) != null, s -> ""); assertEquals(List.of("car_access"), variables); customModel = new CustomModel(); customModel.addToPriority(If("!foot_access && (hike_rating < 4 || road_access == PRIVATE)", MULTIPLY, "0")); //, {"if": "true", "multiply_by": foot_priority}, {"if": "foot_network == INTERNATIONAL || foot_network == NATIONAL", "multiply_by": 1.7}, {"else_if": "foot_network == REGIONAL || foot_network == LOCAL", "multiply_by": 1.5}]|areas=[]|turnCostsConfig=transportationMode=null, restrictions=false, uTurnCosts=-1 variables = findVariablesForEncodedValuesString(customModel, s -> new DefaultImportRegistry().createImportUnit(s) != null, s -> ""); assertEquals(List.of("foot_access", "hike_rating", "road_access"), variables); }
@Activate public void activate() { providerService = providerRegistry.register(this); // listens all LISP router related events controller.addRouterListener(listener); // listens all LISP control message controller.addMessageListener(listener); log.info("Started"); }
@Test public void activate() throws Exception { assertEquals("Provider should be registered", 1, providerRegistry.getProviders().size()); assertTrue("LISP device provider should be registered", providerRegistry.getProviders().contains(provider.id())); assertEquals("Incorrect provider service", providerService, provider.providerService); assertEquals("LISP router listener should be registered", 1, routerListeners.size()); }
public WithJsonPath(JsonPath jsonPath, Matcher<T> resultMatcher) { this.jsonPath = jsonPath; this.resultMatcher = resultMatcher; }
@Test public void shouldMatchJsonPathEvaluatedToIntegerValue() { assertThat(BOOKS_JSON, withJsonPath(compile("$.expensive"), equalTo(10))); assertThat(BOOKS_JSON, withJsonPath("$.expensive", equalTo(10))); }
@Override public List<Container> allocateContainers(ResourceBlacklistRequest blackList, List<ResourceRequest> oppResourceReqs, ApplicationAttemptId applicationAttemptId, OpportunisticContainerContext opportContext, long rmIdentifier, String appSubmitter) throws YarnException { // Update black list. updateBlacklist(blackList, opportContext); // Add OPPORTUNISTIC requests to the outstanding ones. opportContext.addToOutstandingReqs(oppResourceReqs); Set<String> nodeBlackList = new HashSet<>(opportContext.getBlacklist()); Set<String> allocatedNodes = new HashSet<>(); List<Container> allocatedContainers = new ArrayList<>(); // Satisfy the outstanding OPPORTUNISTIC requests. boolean continueLoop = true; while (continueLoop) { continueLoop = false; List<Map<Resource, List<Allocation>>> allocations = new ArrayList<>(); for (SchedulerRequestKey schedulerKey : opportContext.getOutstandingOpReqs().descendingKeySet()) { // Allocated containers : // Key = Requested Capability, // Value = List of Containers of given cap (the actual container size // might be different than what is requested, which is why // we need the requested capability (key) to match against // the outstanding reqs) int remAllocs = -1; int maxAllocationsPerAMHeartbeat = getMaxAllocationsPerAMHeartbeat(); if (maxAllocationsPerAMHeartbeat > 0) { remAllocs = maxAllocationsPerAMHeartbeat - allocatedContainers.size() - getTotalAllocations(allocations); if (remAllocs <= 0) { LOG.info("Not allocating more containers as we have reached max " + "allocations per AM heartbeat {}", maxAllocationsPerAMHeartbeat); break; } } Map<Resource, List<Allocation>> allocation = allocate( rmIdentifier, opportContext, schedulerKey, applicationAttemptId, appSubmitter, nodeBlackList, allocatedNodes, remAllocs); if (allocation.size() > 0) { allocations.add(allocation); continueLoop = true; } } matchAllocation(allocations, allocatedContainers, opportContext); } return allocatedContainers; }
@Test public void testBlacklistRejection() throws Exception { ResourceBlacklistRequest blacklistRequest = ResourceBlacklistRequest.newInstance( Arrays.asList("h1", "h2"), new ArrayList<>()); List<ResourceRequest> reqs = Arrays.asList(ResourceRequest.newInstance(PRIORITY_NORMAL, "*", CAPABILITY_1GB, 1, true, null, OPPORTUNISTIC_REQ)); ApplicationAttemptId appAttId = ApplicationAttemptId.newInstance( ApplicationId.newInstance(0L, 1), 1); oppCntxt.updateNodeList( Arrays.asList( RemoteNode.newInstance( NodeId.newInstance("h1", 1234), "h1:1234", "/r1"), RemoteNode.newInstance( NodeId.newInstance("h2", 1234), "h2:1234", "/r2"))); List<Container> containers = allocator.allocateContainers( blacklistRequest, reqs, appAttId, oppCntxt, 1L, "luser"); Assert.assertEquals(0, containers.size()); Assert.assertEquals(1, oppCntxt.getOutstandingOpReqs().size()); }
@VisibleForTesting public void validateSmsTemplateCodeDuplicate(Long id, String code) { SmsTemplateDO template = smsTemplateMapper.selectByCode(code); if (template == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的字典类型 if (id == null) { throw exception(SMS_TEMPLATE_CODE_DUPLICATE, code); } if (!template.getId().equals(id)) { throw exception(SMS_TEMPLATE_CODE_DUPLICATE, code); } }
@Test public void testValidateDictDataValueUnique_success() { // 调用,成功 smsTemplateService.validateSmsTemplateCodeDuplicate(randomLongId(), randomString()); }
List<StatisticsEntry> takeStatistics() { if (reporterEnabled) throw new IllegalStateException("Cannot take consistent snapshot while reporter is enabled"); var ret = new ArrayList<StatisticsEntry>(); consume((metric, value) -> ret.add(new StatisticsEntry(metric, value))); return ret; }
@Test void request_type_can_be_set_explicitly() { testRequest("http", 200, "GET", "/search", com.yahoo.jdisc.Request.RequestType.WRITE); var stats = collector.takeStatistics(); assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, "write", 200, 1L); }
@Override public long tick() throws InterruptedException { long now = mClock.millis(); mSleeper.sleep( () -> Duration.ofMillis(mIntervalSupplier.getNextInterval(mPreviousTickedMs, now))); mPreviousTickedMs = mClock.millis(); return mIntervalSupplier.getRunLimit(mPreviousTickedMs); }
@Test public void warnWhenExecutionTakesLongerThanInterval() throws Exception { SleepingTimer timer = new SleepingTimer(THREAD_NAME, mMockLogger, mFakeClock, new SteppingThreadSleeper(mMockSleeper, mFakeClock), () -> new FixedIntervalSupplier(INTERVAL_MS, mMockLogger)); timer.tick(); mFakeClock.addTimeMs(5 * INTERVAL_MS); timer.tick(); verify(mMockLogger).warn(anyString(), anyString(), Mockito.anyLong(), Mockito.anyLong()); }
@Override public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { final List<Path> containers = new ArrayList<Path>(); for(Path file : files.keySet()) { if(containerService.isContainer(file)) { containers.add(file); } else { callback.delete(file); try { final BlobRequestOptions options = new BlobRequestOptions(); session.getClient().getContainerReference(containerService.getContainer(file).getName()) .getBlockBlobReference(containerService.getKey(file)).delete( DeleteSnapshotsOption.INCLUDE_SNAPSHOTS, AccessCondition.generateEmptyCondition(), options, context); } catch(StorageException e) { switch(e.getHttpStatusCode()) { case HttpStatus.SC_NOT_FOUND: if(file.isPlaceholder()) { // Ignore failure with no placeholder object found return; } } throw new AzureExceptionMappingService().map("Cannot delete {0}", e, file); } catch(URISyntaxException e) { throw new NotfoundException(e.getMessage(), e); } } } for(Path file : containers) { callback.delete(file); try { final BlobRequestOptions options = new BlobRequestOptions(); session.getClient().getContainerReference(containerService.getContainer(file).getName()).delete( AccessCondition.generateEmptyCondition(), options, context); } catch(StorageException e) { throw new AzureExceptionMappingService().map("Cannot delete {0}", e, file); } catch(URISyntaxException e) { throw new NotfoundException(e.getMessage(), e); } } }
@Test public void testDeletePlaceholder() throws Exception { final Path container = new Path("cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new AzureDirectoryFeature(session, null).mkdir(new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); assertTrue(new AzureFindFeature(session, null).find(test)); new AzureDeleteFeature(session, null).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(new AzureFindFeature(session, null).find(test)); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testLambdaInAggregationContext() { analyze("SELECT apply(sum(x), i -> i * i) FROM (VALUES 1, 2, 3, 4, 5) t(x)"); analyze("SELECT apply(x, i -> i - 1), sum(y) FROM (VALUES (1, 10), (1, 20), (2, 50)) t(x,y) group by x"); analyze("SELECT x, apply(sum(y), i -> i * 10) FROM (VALUES (1, 10), (1, 20), (2, 50)) t(x,y) group by x"); analyze("SELECT apply(8, x -> x + 1) FROM (VALUES (1, 2)) t(x,y) GROUP BY y"); assertFails( MUST_BE_AGGREGATE_OR_GROUP_BY, ".* must be an aggregate expression or appear in GROUP BY clause", "SELECT apply(sum(x), i -> i * x) FROM (VALUES 1, 2, 3, 4, 5) t(x)"); assertFails( MUST_BE_AGGREGATE_OR_GROUP_BY, ".* must be an aggregate expression or appear in GROUP BY clause", "SELECT apply(1, y -> x) FROM (VALUES (1,2)) t(x,y) GROUP BY y"); assertFails( MUST_BE_AGGREGATE_OR_GROUP_BY, ".* must be an aggregate expression or appear in GROUP BY clause", "SELECT apply(1, y -> x.someField) FROM (VALUES (CAST(ROW(1) AS ROW(someField BIGINT)), 2)) t(x,y) GROUP BY y"); analyze("SELECT apply(CAST(ROW(1) AS ROW(someField BIGINT)), x -> x.someField) FROM (VALUES (1,2)) t(x,y) GROUP BY y"); analyze("SELECT apply(sum(x), x -> x * x) FROM (VALUES 1, 2, 3, 4, 5) t(x)"); // nested lambda expression uses the same variable name analyze("SELECT apply(sum(x), x -> apply(x, x -> x * x)) FROM (VALUES 1, 2, 3, 4, 5) t(x)"); // illegal use of a column whose name is the same as a lambda variable name assertFails( MUST_BE_AGGREGATE_OR_GROUP_BY, ".* must be an aggregate expression or appear in GROUP BY clause", "SELECT apply(sum(x), x -> x * x) + x FROM (VALUES 1, 2, 3, 4, 5) t(x)"); assertFails( MUST_BE_AGGREGATE_OR_GROUP_BY, ".* must be an aggregate expression or appear in GROUP BY clause", "SELECT apply(sum(x), x -> apply(x, x -> x * x)) + x FROM (VALUES 1, 2, 3, 4, 5) t(x)"); // x + y within lambda should not be treated as group expression assertFails( MUST_BE_AGGREGATE_OR_GROUP_BY, ".* must be an aggregate expression or appear in GROUP BY clause", "SELECT apply(1, y -> x + y) FROM (VALUES (1,2)) t(x, y) GROUP BY x+y"); }
@Override public byte[] get(byte[] key) { return read(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key); }
@Test public void testGeo() { RedisTemplate<String, String> redisTemplate = new RedisTemplate<>(); redisTemplate.setConnectionFactory(new RedissonConnectionFactory(redisson)); redisTemplate.afterPropertiesSet(); String key = "test_geo_key"; Point point = new Point(116.401001, 40.119499); redisTemplate.opsForGeo().add(key, point, "a"); point = new Point(111.545998, 36.133499); redisTemplate.opsForGeo().add(key, point, "b"); point = new Point(111.483002, 36.030998); redisTemplate.opsForGeo().add(key, point, "c"); Circle within = new Circle(116.401001, 40.119499, 80000); RedisGeoCommands.GeoRadiusCommandArgs args = RedisGeoCommands.GeoRadiusCommandArgs.newGeoRadiusArgs().includeCoordinates(); GeoResults<RedisGeoCommands.GeoLocation<String>> res = redisTemplate.opsForGeo().radius(key, within, args); assertThat(res.getContent().get(0).getContent().getName()).isEqualTo("a"); }
public static Map<String, AdvertisedListener> validateAndAnalysisAdvertisedListener(ServiceConfiguration config) { if (StringUtils.isBlank(config.getAdvertisedListeners())) { return Collections.emptyMap(); } Optional<String> firstListenerName = Optional.empty(); Map<String, List<String>> listeners = new LinkedHashMap<>(); for (final String str : StringUtils.split(config.getAdvertisedListeners(), ",")) { int index = str.indexOf(":"); if (index <= 0) { throw new IllegalArgumentException("the configure entry `advertisedListeners` is invalid. because " + str + " do not contain listener name"); } String listenerName = StringUtils.trim(str.substring(0, index)); if (!firstListenerName.isPresent()) { firstListenerName = Optional.of(listenerName); } String value = StringUtils.trim(str.substring(index + 1)); listeners.computeIfAbsent(listenerName, k -> new ArrayList<>(2)); listeners.get(listenerName).add(value); } if (StringUtils.isBlank(config.getInternalListenerName())) { config.setInternalListenerName(firstListenerName.get()); } if (!listeners.containsKey(config.getInternalListenerName())) { throw new IllegalArgumentException("the `advertisedListeners` configure do not contain " + "`internalListenerName` entry"); } final Map<String, AdvertisedListener> result = new LinkedHashMap<>(); final Map<String, Set<String>> reverseMappings = new LinkedHashMap<>(); for (final Map.Entry<String, List<String>> entry : listeners.entrySet()) { if (entry.getValue().size() > 2) { throw new IllegalArgumentException("there are redundant configure for listener `" + entry.getKey() + "`"); } URI pulsarAddress = null, pulsarSslAddress = null, pulsarHttpAddress = null, pulsarHttpsAddress = null; for (final String strUri : entry.getValue()) { try { URI uri = URI.create(strUri); if (StringUtils.equalsIgnoreCase(uri.getScheme(), "pulsar")) { if (pulsarAddress == null) { pulsarAddress = uri; } else { throw new IllegalArgumentException("there are redundant configure for listener `" + entry.getKey() + "`"); } } else if (StringUtils.equalsIgnoreCase(uri.getScheme(), "pulsar+ssl")) { if (pulsarSslAddress == null) { pulsarSslAddress = uri; } else { throw new IllegalArgumentException("there are redundant configure for listener `" + entry.getKey() + "`"); } } else if (StringUtils.equalsIgnoreCase(uri.getScheme(), "http")) { if (pulsarHttpAddress == null) { pulsarHttpAddress = uri; } else { throw new IllegalArgumentException("there are redundant configure for listener `" + entry.getKey() + "`"); } } else if (StringUtils.equalsIgnoreCase(uri.getScheme(), "https")) { if (pulsarHttpsAddress == null) { pulsarHttpsAddress = uri; } else { throw new IllegalArgumentException("there are redundant configure for listener `" + entry.getKey() + "`"); } } String hostPort = String.format("%s:%d", uri.getHost(), uri.getPort()); Set<String> sets = reverseMappings.computeIfAbsent(hostPort, k -> new TreeSet<>()); sets.add(entry.getKey()); if (sets.size() > 1) { throw new IllegalArgumentException("must not specify `" + hostPort + "` to different listener."); } } catch (Throwable cause) { throw new IllegalArgumentException("the value " + strUri + " in the `advertisedListeners` " + "configure is invalid", cause); } } result.put(entry.getKey(), AdvertisedListener.builder() .brokerServiceUrl(pulsarAddress) .brokerServiceUrlTls(pulsarSslAddress) .brokerHttpUrl(pulsarHttpAddress) .brokerHttpsUrl(pulsarHttpsAddress) .build()); } return result; }
@Test(expectedExceptions = IllegalArgumentException.class) public void testListenerDuplicate_2() { ServiceConfiguration config = new ServiceConfiguration(); config.setAdvertisedListeners(" internal:pulsar://127.0.0.1:6660," + " internal:pulsar://192.168.1.11:6660"); config.setInternalListenerName("internal"); MultipleListenerValidator.validateAndAnalysisAdvertisedListener(config); }
public static @NonNull String printThrowable(@CheckForNull Throwable t) { if (t == null) { return Messages.Functions_NoExceptionDetails(); } StringBuilder s = new StringBuilder(); doPrintStackTrace(s, t, null, "", new HashSet<>()); return s.toString(); }
@Issue("JDK-6507809") @Test public void printThrowable() { // Basics: a single exception. No change. assertPrintThrowable(new Stack("java.lang.NullPointerException: oops", "p.C.method1:17", "m.Main.main:1"), "java.lang.NullPointerException: oops\n" + "\tat p.C.method1(C.java:17)\n" + "\tat m.Main.main(Main.java:1)\n", "java.lang.NullPointerException: oops\n" + "\tat p.C.method1(C.java:17)\n" + "\tat m.Main.main(Main.java:1)\n"); // try {…} catch (Exception x) {throw new IllegalStateException(x);} assertPrintThrowable(new Stack("java.lang.IllegalStateException: java.lang.NullPointerException: oops", "p.C.method1:19", "m.Main.main:1"). cause(new Stack("java.lang.NullPointerException: oops", "p.C.method2:23", "p.C.method1:17", "m.Main.main:1")), "java.lang.IllegalStateException: java.lang.NullPointerException: oops\n" + "\tat p.C.method1(C.java:19)\n" + "\tat m.Main.main(Main.java:1)\n" + "Caused by: java.lang.NullPointerException: oops\n" + "\tat p.C.method2(C.java:23)\n" + "\tat p.C.method1(C.java:17)\n" + "\t... 1 more\n", "java.lang.NullPointerException: oops\n" + "\tat p.C.method2(C.java:23)\n" + "\tat p.C.method1(C.java:17)\n" + "Caused: java.lang.IllegalStateException\n" + "\tat p.C.method1(C.java:19)\n" + "\tat m.Main.main(Main.java:1)\n"); // try {…} catch (Exception x) {throw new IllegalStateException("more info");} assertPrintThrowable(new Stack("java.lang.IllegalStateException: more info", "p.C.method1:19", "m.Main.main:1"). cause(new Stack("java.lang.NullPointerException: oops", "p.C.method2:23", "p.C.method1:17", "m.Main.main:1")), "java.lang.IllegalStateException: more info\n" + "\tat p.C.method1(C.java:19)\n" + "\tat m.Main.main(Main.java:1)\n" + "Caused by: java.lang.NullPointerException: oops\n" + "\tat p.C.method2(C.java:23)\n" + "\tat p.C.method1(C.java:17)\n" + "\t... 1 more\n", "java.lang.NullPointerException: oops\n" + "\tat p.C.method2(C.java:23)\n" + "\tat p.C.method1(C.java:17)\n" + "Caused: java.lang.IllegalStateException: more info\n" + "\tat p.C.method1(C.java:19)\n" + "\tat m.Main.main(Main.java:1)\n"); // try {…} catch (Exception x) {throw new IllegalStateException("more info: " + x);} assertPrintThrowable(new Stack("java.lang.IllegalStateException: more info: java.lang.NullPointerException: oops", "p.C.method1:19", "m.Main.main:1"). cause(new Stack("java.lang.NullPointerException: oops", "p.C.method2:23", "p.C.method1:17", "m.Main.main:1")), "java.lang.IllegalStateException: more info: java.lang.NullPointerException: oops\n" + "\tat p.C.method1(C.java:19)\n" + "\tat m.Main.main(Main.java:1)\n" + "Caused by: java.lang.NullPointerException: oops\n" + "\tat p.C.method2(C.java:23)\n" + "\tat p.C.method1(C.java:17)\n" + "\t... 1 more\n", "java.lang.NullPointerException: oops\n" + "\tat p.C.method2(C.java:23)\n" + "\tat p.C.method1(C.java:17)\n" + "Caused: java.lang.IllegalStateException: more info\n" + "\tat p.C.method1(C.java:19)\n" + "\tat m.Main.main(Main.java:1)\n"); // Synthetic stack showing an exception made elsewhere, such as happens with hudson.remoting.Channel.attachCallSiteStackTrace. Throwable t = new Stack("remote.Exception: oops", "remote.Place.method:17", "remote.Service.run:9"); StackTraceElement[] callSite = new Stack("wrapped.Exception", "local.Side.call:11", "local.Main.main:1").getStackTrace(); StackTraceElement[] original = t.getStackTrace(); StackTraceElement[] combined = new StackTraceElement[original.length + 1 + callSite.length]; System.arraycopy(original, 0, combined, 0, original.length); combined[original.length] = new StackTraceElement(".....", "remote call", null, -2); System.arraycopy(callSite, 0, combined, original.length + 1, callSite.length); t.setStackTrace(combined); assertPrintThrowable(t, "remote.Exception: oops\n" + "\tat remote.Place.method(Place.java:17)\n" + "\tat remote.Service.run(Service.java:9)\n" + "\tat ......remote call(Native Method)\n" + "\tat local.Side.call(Side.java:11)\n" + "\tat local.Main.main(Main.java:1)\n", "remote.Exception: oops\n" + "\tat remote.Place.method(Place.java:17)\n" + "\tat remote.Service.run(Service.java:9)\n" + "\tat ......remote call(Native Method)\n" + "\tat local.Side.call(Side.java:11)\n" + "\tat local.Main.main(Main.java:1)\n"); // Same but now using a cause on the remote side. t = new Stack("remote.Wrapper: remote.Exception: oops", "remote.Place.method2:19", "remote.Service.run:9").cause(new Stack("remote.Exception: oops", "remote.Place.method1:11", "remote.Place.method2:17", "remote.Service.run:9")); callSite = new Stack("wrapped.Exception", "local.Side.call:11", "local.Main.main:1").getStackTrace(); original = t.getStackTrace(); combined = new StackTraceElement[original.length + 1 + callSite.length]; System.arraycopy(original, 0, combined, 0, original.length); combined[original.length] = new StackTraceElement(".....", "remote call", null, -2); System.arraycopy(callSite, 0, combined, original.length + 1, callSite.length); t.setStackTrace(combined); assertPrintThrowable(t, "remote.Wrapper: remote.Exception: oops\n" + "\tat remote.Place.method2(Place.java:19)\n" + "\tat remote.Service.run(Service.java:9)\n" + "\tat ......remote call(Native Method)\n" + "\tat local.Side.call(Side.java:11)\n" + "\tat local.Main.main(Main.java:1)\n" + "Caused by: remote.Exception: oops\n" + "\tat remote.Place.method1(Place.java:11)\n" + "\tat remote.Place.method2(Place.java:17)\n" + "\tat remote.Service.run(Service.java:9)\n", "remote.Exception: oops\n" + "\tat remote.Place.method1(Place.java:11)\n" + "\tat remote.Place.method2(Place.java:17)\n" + "\tat remote.Service.run(Service.java:9)\n" + // we do not know how to elide the common part in this case "Caused: remote.Wrapper\n" + "\tat remote.Place.method2(Place.java:19)\n" + "\tat remote.Service.run(Service.java:9)\n" + "\tat ......remote call(Native Method)\n" + "\tat local.Side.call(Side.java:11)\n" + "\tat local.Main.main(Main.java:1)\n"); // Suppressed exceptions: assertPrintThrowable(new Stack("java.lang.IllegalStateException: java.lang.NullPointerException: oops", "p.C.method1:19", "m.Main.main:1"). cause(new Stack("java.lang.NullPointerException: oops", "p.C.method2:23", "p.C.method1:17", "m.Main.main:1")). suppressed(new Stack("java.io.IOException: could not close", "p.C.close:99", "p.C.method1:18", "m.Main.main:1"), new Stack("java.io.IOException: java.lang.NullPointerException", "p.C.flush:77", "p.C.method1:18", "m.Main.main:1"). cause(new Stack("java.lang.NullPointerException", "p.C.findFlushee:70", "p.C.flush:75", "p.C.method1:18", "m.Main.main:1"))), "java.lang.IllegalStateException: java.lang.NullPointerException: oops\n" + "\tat p.C.method1(C.java:19)\n" + "\tat m.Main.main(Main.java:1)\n" + "\tSuppressed: java.io.IOException: could not close\n" + "\t\tat p.C.close(C.java:99)\n" + "\t\tat p.C.method1(C.java:18)\n" + "\t\t... 1 more\n" + "\tSuppressed: java.io.IOException: java.lang.NullPointerException\n" + "\t\tat p.C.flush(C.java:77)\n" + "\t\tat p.C.method1(C.java:18)\n" + "\t\t... 1 more\n" + "\tCaused by: java.lang.NullPointerException\n" + "\t\tat p.C.findFlushee(C.java:70)\n" + "\t\tat p.C.flush(C.java:75)\n" + "\t\t... 2 more\n" + "Caused by: java.lang.NullPointerException: oops\n" + "\tat p.C.method2(C.java:23)\n" + "\tat p.C.method1(C.java:17)\n" + "\t... 1 more\n", "java.lang.NullPointerException: oops\n" + "\tat p.C.method2(C.java:23)\n" + "\tat p.C.method1(C.java:17)\n" + "Also: java.io.IOException: could not close\n" + "\t\tat p.C.close(C.java:99)\n" + "\t\tat p.C.method1(C.java:18)\n" + "Also: java.lang.NullPointerException\n" + "\t\tat p.C.findFlushee(C.java:70)\n" + "\t\tat p.C.flush(C.java:75)\n" + "\tCaused: java.io.IOException\n" + "\t\tat p.C.flush(C.java:77)\n" + "\t\tat p.C.method1(C.java:18)\n" + "Caused: java.lang.IllegalStateException\n" + "\tat p.C.method1(C.java:19)\n" + "\tat m.Main.main(Main.java:1)\n"); // Custom printStackTrace implementations: assertPrintThrowable(new Throwable() { @Override public void printStackTrace(PrintWriter s) { s.println("Some custom exception"); } }, "Some custom exception\n", "Some custom exception\n"); // Circular references: Stack stack1 = new Stack("p.Exc1", "p.C.method1:17"); Stack stack2 = new Stack("p.Exc2", "p.C.method2:27"); stack1.cause(stack2); stack2.cause(stack1); //Format changed in 11.0.9 / 8.0.272 (JDK-8226809 / JDK-8252444 / JDK-8252489) if (getVersion().isNewerThanOrEqualTo(new VersionNumber("11.0.9")) || (getVersion().getDigitAt(0) == 8 && getVersion().isNewerThanOrEqualTo(new VersionNumber("8.0.272")))) { assertPrintThrowable(stack1, "p.Exc1\n" + "\tat p.C.method1(C.java:17)\n" + "Caused by: p.Exc2\n" + "\tat p.C.method2(C.java:27)\n" + "Caused by: [CIRCULAR REFERENCE: p.Exc1]\n", "<cycle to p.Exc1>\n" + "Caused: p.Exc2\n" + "\tat p.C.method2(C.java:27)\n" + "Caused: p.Exc1\n" + "\tat p.C.method1(C.java:17)\n"); } else { assertPrintThrowable(stack1, "p.Exc1\n" + "\tat p.C.method1(C.java:17)\n" + "Caused by: p.Exc2\n" + "\tat p.C.method2(C.java:27)\n" + "\t[CIRCULAR REFERENCE:p.Exc1]\n", "<cycle to p.Exc1>\n" + "Caused: p.Exc2\n" + "\tat p.C.method2(C.java:27)\n" + "Caused: p.Exc1\n" + "\tat p.C.method1(C.java:17)\n"); } }
public void write(D datum, Encoder out) throws IOException { Objects.requireNonNull(out, "Encoder cannot be null"); try { write(root, datum, out); } catch (TracingNullPointException | TracingClassCastException | TracingAvroTypeException e) { throw e.summarize(root); } }
@Test void write() throws IOException { String json = "{\"type\": \"record\", \"name\": \"r\", \"fields\": [" + "{ \"name\": \"f1\", \"type\": \"long\" }" + "]}"; Schema s = new Schema.Parser().parse(json); GenericRecord r = new GenericData.Record(s); r.put("f1", 100L); ByteArrayOutputStream bao = new ByteArrayOutputStream(); GenericDatumWriter<GenericRecord> w = new GenericDatumWriter<>(s); Encoder e = EncoderFactory.get().jsonEncoder(s, bao); w.write(r, e); e.flush(); Object o = new GenericDatumReader<GenericRecord>(s).read(null, DecoderFactory.get().jsonDecoder(s, new ByteArrayInputStream(bao.toByteArray()))); assertEquals(r, o); }
@Override public synchronized void editSchedule() { updateConfigIfNeeded(); long startTs = clock.getTime(); CSQueue root = scheduler.getRootQueue(); Resource clusterResources = Resources.clone(scheduler.getClusterResource()); containerBasedPreemptOrKill(root, clusterResources); if (LOG.isDebugEnabled()) { LOG.debug("Total time used=" + (clock.getTime() - startTs) + " ms."); } }
@Test public void testZeroGuarOverCap() { int[][] qData = new int[][] { // / A B C D E F { 200, 100, 0, 100, 0, 100, 100 }, // abs { 200, 200, 200, 200, 200, 200, 200 }, // maxCap { 170, 170, 60, 20, 90, 0, 0 }, // used { 85, 50, 30, 10, 10, 20, 20 }, // pending { 0, 0, 0, 0, 0, 0, 0 }, // reserved { 4, 3, 1, 1, 1, 1, 1 }, // apps { -1, -1, 1, 1, 1, -1, 1 }, // req granularity { 2, 3, 0, 0, 0, 1, 0 }, // subqueues }; ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData); policy.editSchedule(); // No preemption should happen because zero guaranteed queues should be // treated as always satisfied, they should not preempt from each other. verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA))); verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appB))); verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appC))); verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appD))); }
public void buyProduct(Product product) { LOGGER.info( String.format( "%s want to buy %s($%.2f)...", name, product.getName(), product.getSalePrice().getAmount())); try { withdraw(product.getSalePrice()); } catch (IllegalArgumentException ex) { LOGGER.error(ex.getMessage()); return; } try { customerDao.addProduct(product, this); purchases.add(product); LOGGER.info(String.format("%s bought %s!", name, product.getName())); } catch (SQLException exception) { receiveMoney(product.getSalePrice()); LOGGER.error(exception.getMessage()); } }
@Test void shouldAddProductToPurchases() { product.setPrice(Money.of(USD, 200.0)); customer.buyProduct(product); assertEquals(customer.getPurchases(), new ArrayList<>()); assertEquals(customer.getMoney(), Money.of(USD,100)); product.setPrice(Money.of(USD, 100.0)); customer.buyProduct(product); assertEquals(new ArrayList<>(Arrays.asList(product)), customer.getPurchases()); assertEquals(Money.zero(USD), customer.getMoney()); }
public boolean isSynchronous() { return synchronous; }
@Test public void testAsyncProducer() { Endpoint endpoint = context.getEndpoint("sjms:queue:test.SjmsEndpointTest?synchronous=true"); assertNotNull(endpoint); assertTrue(endpoint instanceof SjmsEndpoint); SjmsEndpoint qe = (SjmsEndpoint) endpoint; assertTrue(qe.isSynchronous()); }
public Optional<Measure> toMeasure(@Nullable MeasureDto measureDto, Metric metric) { requireNonNull(metric); if (measureDto == null) { return Optional.empty(); } Double value = measureDto.getValue(); String data = measureDto.getData(); switch (metric.getType().getValueType()) { case INT: return toIntegerMeasure(measureDto, value, data); case LONG: return toLongMeasure(measureDto, value, data); case DOUBLE: return toDoubleMeasure(measureDto, value, data); case BOOLEAN: return toBooleanMeasure(measureDto, value, data); case STRING: return toStringMeasure(measureDto, data); case LEVEL: return toLevelMeasure(measureDto, data); case NO_VALUE: return toNoValueMeasure(measureDto); default: throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType()); } }
@Test public void toMeasure_returns_false_value_if_dto_has_invalid_value_for_Boolean_metric() { Optional<Measure> measure = underTest.toMeasure(new MeasureDto().setValue(1.987d), SOME_BOOLEAN_METRIC); assertThat(measure).isPresent(); assertThat(measure.get().getValueType()).isEqualTo(Measure.ValueType.BOOLEAN); assertThat(measure.get().getBooleanValue()).isFalse(); }
@Override protected String doGetSubscribedURLs(SubscriberMetadataIdentifier subscriberMetadataIdentifier) { return zkClient.getContent(getNodePath(subscriberMetadataIdentifier)); }
@Test void testDoGetSubscribedURLs() throws ExecutionException, InterruptedException { String interfaceName = "org.apache.dubbo.metadata.store.zookeeper.ZookeeperMetadataReport4TstService"; String version = "1.0.0"; String group = null; String application = "etc-metadata-report-consumer-test"; String revision = "90980"; String protocol = "xxx"; URL url = generateURL(interfaceName, version, group, application); SubscriberMetadataIdentifier subscriberMetadataIdentifier = new SubscriberMetadataIdentifier(application, revision); String r = JsonUtils.toJson(Arrays.asList(url.toString())); zookeeperMetadataReport.doSaveSubscriberData(subscriberMetadataIdentifier, r); String fileContent = zookeeperMetadataReport.zkClient.getContent( zookeeperMetadataReport.getNodePath(subscriberMetadataIdentifier)); Assertions.assertNotNull(fileContent); Assertions.assertEquals(fileContent, r); }
@Override public Mono<GetUnversionedProfileResponse> getUnversionedProfile(final GetUnversionedProfileAnonymousRequest request) { final ServiceIdentifier targetIdentifier = ServiceIdentifierUtil.fromGrpcServiceIdentifier(request.getRequest().getServiceIdentifier()); // Callers must be authenticated to request unversioned profiles by PNI if (targetIdentifier.identityType() == IdentityType.PNI) { throw Status.UNAUTHENTICATED.asRuntimeException(); } final Mono<Account> account = switch (request.getAuthenticationCase()) { case GROUP_SEND_TOKEN -> groupSendTokenUtil.checkGroupSendToken(request.getGroupSendToken(), List.of(targetIdentifier)) .then(Mono.fromFuture(() -> accountsManager.getByServiceIdentifierAsync(targetIdentifier))) .flatMap(Mono::justOrEmpty) .switchIfEmpty(Mono.error(Status.NOT_FOUND.asException())); case UNIDENTIFIED_ACCESS_KEY -> getTargetAccountAndValidateUnidentifiedAccess(targetIdentifier, request.getUnidentifiedAccessKey().toByteArray()); default -> Mono.error(Status.INVALID_ARGUMENT.asException()); }; return account.map(targetAccount -> ProfileGrpcHelper.buildUnversionedProfileResponse(targetIdentifier, null, targetAccount, profileBadgeConverter)); }
@Test void getUnversionedProfileUnidentifiedAccessKey() { final UUID targetUuid = UUID.randomUUID(); final org.whispersystems.textsecuregcm.identity.ServiceIdentifier serviceIdentifier = new AciServiceIdentifier(targetUuid); final byte[] unidentifiedAccessKey = TestRandomUtil.nextBytes(UnidentifiedAccessUtil.UNIDENTIFIED_ACCESS_KEY_LENGTH); final ECKeyPair identityKeyPair = Curve.generateKeyPair(); final IdentityKey identityKey = new IdentityKey(identityKeyPair.getPublicKey()); final List<Badge> badges = List.of(new Badge( "TEST", "other", "Test Badge", "This badge is in unit tests.", List.of("l", "m", "h", "x", "xx", "xxx"), "SVG", List.of( new BadgeSvg("sl", "sd"), new BadgeSvg("ml", "md"), new BadgeSvg("ll", "ld"))) ); when(account.getBadges()).thenReturn(Collections.emptyList()); when(profileBadgeConverter.convert(any(), any(), anyBoolean())).thenReturn(badges); when(account.isUnrestrictedUnidentifiedAccess()).thenReturn(false); when(account.getUnidentifiedAccessKey()).thenReturn(Optional.of(unidentifiedAccessKey)); when(account.getIdentityKey(org.whispersystems.textsecuregcm.identity.IdentityType.ACI)).thenReturn(identityKey); when(accountsManager.getByServiceIdentifierAsync(serviceIdentifier)).thenReturn(CompletableFuture.completedFuture(Optional.of(account))); final GetUnversionedProfileAnonymousRequest request = GetUnversionedProfileAnonymousRequest.newBuilder() .setUnidentifiedAccessKey(ByteString.copyFrom(unidentifiedAccessKey)) .setRequest(GetUnversionedProfileRequest.newBuilder() .setServiceIdentifier(ServiceIdentifier.newBuilder() .setIdentityType(IdentityType.IDENTITY_TYPE_ACI) .setUuid(ByteString.copyFrom(UUIDUtil.toBytes(targetUuid))) .build()) .build()) .build(); final GetUnversionedProfileResponse response = unauthenticatedServiceStub().getUnversionedProfile(request); final byte[] unidentifiedAccessChecksum = UnidentifiedAccessChecksum.generateFor(unidentifiedAccessKey); final GetUnversionedProfileResponse expectedResponse = GetUnversionedProfileResponse.newBuilder() .setIdentityKey(ByteString.copyFrom(identityKey.serialize())) .setUnidentifiedAccess(ByteString.copyFrom(unidentifiedAccessChecksum)) .setUnrestrictedUnidentifiedAccess(false) .setCapabilities(ProfileGrpcHelper.buildUserCapabilities(UserCapabilities.createForAccount(account))) .addAllBadges(ProfileGrpcHelper.buildBadges(badges)) .build(); verify(accountsManager).getByServiceIdentifierAsync(serviceIdentifier); assertEquals(expectedResponse, response); }
@Override public void writeAttribute(String prefix, String namespaceURI, String localName, String value) throws XMLStreamException { String filteredValue = nonXmlCharFilterer.filter(value); writer.writeAttribute(prefix, namespaceURI, localName, filteredValue); }
@Test public void testWriteAttribute3Args() throws XMLStreamException { filteringXmlStreamWriter.writeAttribute("namespaceURI", "localName", "value"); verify(xmlStreamWriterMock).writeAttribute("namespaceURI", "localName", "filteredValue"); }
public TableMetadata maybeAppendSnapshots( TableMetadata metadata, List<Snapshot> snapshotsToAppend, Map<String, SnapshotRef> snapshotRefs, boolean recordAction) { TableMetadata.Builder metadataBuilder = TableMetadata.buildFrom(metadata); List<String> appendedSnapshots = new ArrayList<>(); List<String> stagedSnapshots = new ArrayList<>(); List<String> cherryPickedSnapshots = new ArrayList<>(); // Throw an exception if client sent request that included non-main branches in the // snapshotRefs. for (Map.Entry<String, SnapshotRef> entry : snapshotRefs.entrySet()) { if (!entry.getKey().equals(SnapshotRef.MAIN_BRANCH)) { throw new UnsupportedOperationException("OpenHouse supports only MAIN branch"); } } /** * First check if there are new snapshots to be appended to current TableMetadata. If yes, * following are the cases to be handled: * * <p>[1] A regular (non-wap) snapshot is being added to the MAIN branch. * * <p>[2] A staged (wap) snapshot is being created on top of current snapshot as its base. * Recognized by STAGED_WAP_ID_PROP. * * <p>[3] A staged (wap) snapshot is being cherry picked to the MAIN branch wherein current * snapshot in the MAIN branch is not the same as the base snapshot the staged (wap) snapshot * was created on. Recognized by SOURCE_SNAPSHOT_ID_PROP. This case is called non-fast forward * cherry pick. * * <p>In case no new snapshots are to be appended to current TableMetadata, there could be a * cherrypick of a staged (wap) snapshot on top of the current snapshot in the MAIN branch which * is the same as the base snapshot the staged (wap) snapshot was created on. This case is * called fast forward cherry pick. */ if (CollectionUtils.isNotEmpty(snapshotsToAppend)) { for (Snapshot snapshot : snapshotsToAppend) { snapshotInspector.validateSnapshot(snapshot); if (snapshot.summary().containsKey(SnapshotSummary.STAGED_WAP_ID_PROP)) { // a stage only snapshot using wap.id metadataBuilder.addSnapshot(snapshot); stagedSnapshots.add(String.valueOf(snapshot.snapshotId())); } else if (snapshot.summary().containsKey(SnapshotSummary.SOURCE_SNAPSHOT_ID_PROP)) { // a snapshot created on a non fast-forward cherry-pick snapshot metadataBuilder.setBranchSnapshot(snapshot, SnapshotRef.MAIN_BRANCH); appendedSnapshots.add(String.valueOf(snapshot.snapshotId())); cherryPickedSnapshots.add( String.valueOf(snapshot.summary().get(SnapshotSummary.SOURCE_SNAPSHOT_ID_PROP))); } else { // a regular snapshot metadataBuilder.setBranchSnapshot(snapshot, SnapshotRef.MAIN_BRANCH); appendedSnapshots.add(String.valueOf(snapshot.snapshotId())); } } } else if (MapUtils.isNotEmpty(snapshotRefs)) { // Updated ref in the main branch with no new snapshot means this is a // fast-forward cherry-pick or rollback operation. long newSnapshotId = snapshotRefs.get(SnapshotRef.MAIN_BRANCH).snapshotId(); // Either the current snapshot is null or the current snapshot is not equal // to the new snapshot indicates an update. The first case happens when the // stage/wap snapshot being cherry-picked is the first snapshot. if (MapUtils.isEmpty(metadata.refs()) || metadata.refs().get(SnapshotRef.MAIN_BRANCH).snapshotId() != newSnapshotId) { metadataBuilder.setBranchSnapshot(newSnapshotId, SnapshotRef.MAIN_BRANCH); cherryPickedSnapshots.add(String.valueOf(newSnapshotId)); } } if (recordAction) { Map<String, String> updatedProperties = new HashMap<>(metadata.properties()); if (CollectionUtils.isNotEmpty(appendedSnapshots)) { updatedProperties.put( getCanonicalFieldName(CatalogConstants.APPENDED_SNAPSHOTS), appendedSnapshots.stream().collect(Collectors.joining(","))); metricsReporter.count( InternalCatalogMetricsConstant.SNAPSHOTS_ADDED_CTR, appendedSnapshots.size()); } if (CollectionUtils.isNotEmpty(stagedSnapshots)) { updatedProperties.put( getCanonicalFieldName(CatalogConstants.STAGED_SNAPSHOTS), stagedSnapshots.stream().collect(Collectors.joining(","))); metricsReporter.count( InternalCatalogMetricsConstant.SNAPSHOTS_STAGED_CTR, stagedSnapshots.size()); } if (CollectionUtils.isNotEmpty(cherryPickedSnapshots)) { updatedProperties.put( getCanonicalFieldName(CatalogConstants.CHERRY_PICKED_SNAPSHOTS), cherryPickedSnapshots.stream().collect(Collectors.joining(","))); metricsReporter.count( InternalCatalogMetricsConstant.SNAPSHOTS_CHERRY_PICKED_CTR, cherryPickedSnapshots.size()); } metadataBuilder.setProperties(updatedProperties); } return metadataBuilder.build(); }
@Test void testAppendSnapshotsWithOldSnapshots() throws IOException { TableMetadata metadata = TableMetadata.buildFrom(BASE_TABLE_METADATA) .setPreviousFileLocation("tmp_location") .setLocation(BASE_TABLE_METADATA.metadataFileLocation()) .build(); // all snapshots are from the past and snapshots add should fail the validation List<Snapshot> snapshots = IcebergTestUtil.getSnapshots(); Assertions.assertThrows( IllegalArgumentException.class, () -> openHouseInternalTableOperations.maybeAppendSnapshots( metadata, snapshots, ImmutableMap.of(), false)); // the latest snapshots have larger timestamp than the previous metadata timestamp, so it should // pass the validation snapshots.addAll(IcebergTestUtil.getFutureSnapshots()); openHouseInternalTableOperations.maybeAppendSnapshots( metadata, snapshots, ImmutableMap.of(), false); }
public static List<String> finalDestination(List<String> elements) { if (isMagicPath(elements)) { List<String> destDir = magicPathParents(elements); List<String> children = magicPathChildren(elements); checkArgument(!children.isEmpty(), "No path found under the prefix " + MAGIC_PATH_PREFIX); ArrayList<String> dest = new ArrayList<>(destDir); if (containsBasePath(children)) { // there's a base marker in the path List<String> baseChildren = basePathChildren(children); checkArgument(!baseChildren.isEmpty(), "No path found under " + BASE); dest.addAll(baseChildren); } else { dest.add(filename(children)); } return dest; } else { return elements; } }
@Test(expected = IllegalArgumentException.class) public void testFinalDestinationBaseNoChild() { assertEquals(l(), finalDestination(l(MAGIC_PATH_PREFIX, BASE))); }
public static Node build(final List<JoinInfo> joins) { Node root = null; for (final JoinInfo join : joins) { if (root == null) { root = new Leaf(join.getLeftSource()); } if (root.containsSource(join.getRightSource()) && root.containsSource(join.getLeftSource())) { throw new KsqlException("Cannot perform circular join - both " + join.getRightSource() + " and " + join.getLeftJoinExpression() + " are already included in the current join tree: " + root.debugString(0)); } else if (root.containsSource(join.getLeftSource())) { root = new Join(root, new Leaf(join.getRightSource()), join); } else if (root.containsSource(join.getRightSource())) { root = new Join(root, new Leaf(join.getLeftSource()), join.flip()); } else { throw new KsqlException( "Cannot build JOIN tree; neither source in the join is the FROM source or included " + "in a previous JOIN: " + join + ". The current join tree is " + root.debugString(0) ); } } return root; }
@Test public void shouldIncludeOnlyColFromFirstInViableKeyIfOverlap() { // Given: when(j1.getLeftSource()).thenReturn(a); when(j1.getRightSource()).thenReturn(b); when(j2.getLeftSource()).thenReturn(a); when(j2.getRightSource()).thenReturn(c); when(j1.getLeftJoinExpression()).thenReturn(e1); when(j1.getRightJoinExpression()).thenReturn(col2); when(j2.getLeftJoinExpression()).thenReturn(e1); when(j2.getRightJoinExpression()).thenReturn(e2); final List<JoinInfo> joins = ImmutableList.of(j1, j2); final Node root = JoinTree.build(joins); // When: final List<?> keys = root.viableKeyColumns(); // Then: assertThat(keys, contains(col2)); }
@Override public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer, final Merger<? super K, V> sessionMerger) { return aggregate(initializer, sessionMerger, Materialized.with(null, null)); }
@Test public void shouldNotHaveNullSessionMerger3OnAggregate() { assertThrows(NullPointerException.class, () -> windowedCogroupedStream.aggregate(MockInitializer.STRING_INIT, null, Named.as("name"), Materialized.as("test"))); }
@Override public final boolean wasNull() throws SQLException { return mergedResult.wasNull(); }
@Test void assertWasNull() throws SQLException { when(mergedResult.wasNull()).thenReturn(true); assertTrue(decoratorMergedResult.wasNull()); }
public static <T> T requireNonNull(T obj, String msg) { if (obj == null) { throw new PowerJobException(msg); } if (obj instanceof String) { if (StringUtils.isEmpty((String) obj)) { throw new PowerJobException(msg); } } if (obj instanceof Collection) { if (CollectionUtils.isEmpty((Collection<?>) obj)) { throw new PowerJobException(msg); } } if (obj instanceof Map) { if (MapUtils.isEmpty((Map<?, ?>) obj)) { throw new PowerJobException(msg); } } return obj; }
@Test void testRequireNonNull() { assertThrowsExactly(PowerJobException.class, () -> CommonUtils.requireNonNull(null, "NULL_OBJ")); assertThrowsExactly(PowerJobException.class, () -> CommonUtils.requireNonNull("", "EMPTY_STR")); assertThrowsExactly(PowerJobException.class, () -> CommonUtils.requireNonNull(Lists.newArrayList(), "EMPTY_COLLECTION")); assertThrowsExactly(PowerJobException.class, () -> CommonUtils.requireNonNull(Collections.emptyMap(), "EMPTY_MAP")); Map<String, Object> map = Maps.newHashMap(); map.put("1", 1); CommonUtils.requireNonNull(1, "NORMAL"); CommonUtils.requireNonNull("1", "NORMAL"); CommonUtils.requireNonNull(Lists.newArrayList("1"), "NORMAL"); CommonUtils.requireNonNull(map, "NORMAL"); }
public static <T extends PulsarConfiguration> T create(String configFile, Class<? extends PulsarConfiguration> clazz) throws IOException, IllegalArgumentException { requireNonNull(configFile); try (InputStream inputStream = new FileInputStream(configFile)) { return create(inputStream, clazz); } }
@Test public void testBackwardCompatibility() throws IOException { File testConfigFile = new File("tmp." + System.currentTimeMillis() + ".properties"); if (testConfigFile.exists()) { testConfigFile.delete(); } try (PrintWriter printWriter = new PrintWriter(new OutputStreamWriter(new FileOutputStream(testConfigFile)))) { printWriter.println("zooKeeperSessionTimeoutMillis=60"); printWriter.println("zooKeeperOperationTimeoutSeconds=600"); printWriter.println("zooKeeperCacheExpirySeconds=500"); } testConfigFile.deleteOnExit(); InputStream stream = new FileInputStream(testConfigFile); ServiceConfiguration serviceConfig = PulsarConfigurationLoader.create(stream, ServiceConfiguration.class); stream.close(); assertEquals(serviceConfig.getMetadataStoreSessionTimeoutMillis(), 60); assertEquals(serviceConfig.getMetadataStoreOperationTimeoutSeconds(), 600); assertEquals(serviceConfig.getMetadataStoreCacheExpirySeconds(), 500); testConfigFile = new File("tmp." + System.currentTimeMillis() + ".properties"); if (testConfigFile.exists()) { testConfigFile.delete(); } try (PrintWriter printWriter = new PrintWriter(new OutputStreamWriter(new FileOutputStream(testConfigFile)))) { printWriter.println("metadataStoreSessionTimeoutMillis=60"); printWriter.println("metadataStoreOperationTimeoutSeconds=600"); printWriter.println("metadataStoreCacheExpirySeconds=500"); printWriter.println("zooKeeperSessionTimeoutMillis=-1"); printWriter.println("zooKeeperOperationTimeoutSeconds=-1"); printWriter.println("zooKeeperCacheExpirySeconds=-1"); } testConfigFile.deleteOnExit(); stream = new FileInputStream(testConfigFile); serviceConfig = PulsarConfigurationLoader.create(stream, ServiceConfiguration.class); stream.close(); assertEquals(serviceConfig.getMetadataStoreSessionTimeoutMillis(), 60); assertEquals(serviceConfig.getMetadataStoreOperationTimeoutSeconds(), 600); assertEquals(serviceConfig.getMetadataStoreCacheExpirySeconds(), 500); testConfigFile = new File("tmp." + System.currentTimeMillis() + ".properties"); if (testConfigFile.exists()) { testConfigFile.delete(); } try (PrintWriter printWriter = new PrintWriter(new OutputStreamWriter(new FileOutputStream(testConfigFile)))) { printWriter.println("metadataStoreSessionTimeoutMillis=10"); printWriter.println("metadataStoreOperationTimeoutSeconds=20"); printWriter.println("metadataStoreCacheExpirySeconds=30"); printWriter.println("zooKeeperSessionTimeoutMillis=100"); printWriter.println("zooKeeperOperationTimeoutSeconds=200"); printWriter.println("zooKeeperCacheExpirySeconds=300"); } testConfigFile.deleteOnExit(); stream = new FileInputStream(testConfigFile); serviceConfig = PulsarConfigurationLoader.create(stream, ServiceConfiguration.class); stream.close(); assertEquals(serviceConfig.getMetadataStoreSessionTimeoutMillis(), 100); assertEquals(serviceConfig.getMetadataStoreOperationTimeoutSeconds(), 200); assertEquals(serviceConfig.getMetadataStoreCacheExpirySeconds(), 300); }
public static ResourceModel processResource(final Class<?> resourceClass) { return processResource(resourceClass, null); }
@Test(expectedExceptions = ResourceConfigException.class) public void failsOnSimpleResourceWithInvalidMethod() { @RestLiSimpleResource(name = "simpleResourceWithUnsupportedMethod") class LocalClass extends SimpleResourceTemplate<EmptyRecord> { @RestMethod.GetAll public List<EmptyRecord> getAll() { return Collections.emptyList(); } } RestLiAnnotationReader.processResource(LocalClass.class); Assert.fail("#validateSimpleResource should fail throwing a ResourceConfigException"); }
@Override public Serializer<AvroWrapper<T>> getSerializer(Class<AvroWrapper<T>> c) { Configuration conf = getConf(); Schema schema; if (AvroKey.class.isAssignableFrom(c)) { schema = getKeyWriterSchema(conf); } else if (AvroValue.class.isAssignableFrom(c)) { schema = getValueWriterSchema(conf); } else { throw new IllegalStateException("Only AvroKey and AvroValue are supported."); } GenericData dataModel = createDataModel(conf); DatumWriter<T> datumWriter = dataModel.createDatumWriter(schema); return new AvroSerializer<>(schema, datumWriter); }
@Test void getSerializerForValue() throws IOException { // Set the writer schema in the job configuration. Schema writerSchema = Schema.create(Schema.Type.STRING); Job job = Job.getInstance(); AvroJob.setMapOutputValueSchema(job, writerSchema); // Get a serializer from the configuration. AvroSerialization serialization = ReflectionUtils.newInstance(AvroSerialization.class, job.getConfiguration()); @SuppressWarnings("unchecked") Serializer<AvroWrapper> serializer = serialization.getSerializer(AvroValue.class); assertTrue(serializer instanceof AvroSerializer); AvroSerializer avroSerializer = (AvroSerializer) serializer; // Check that the writer schema is set correctly on the serializer. assertEquals(writerSchema, avroSerializer.getWriterSchema()); }
public static RowCoder of(Schema schema) { return new RowCoder(schema); }
@Test public void testEncodingPositionReorderFields() throws Exception { Schema schema1 = Schema.builder() .addNullableField("f_int32", FieldType.INT32) .addNullableField("f_string", FieldType.STRING) .build(); Schema schema2 = Schema.builder() .addNullableField("f_string", FieldType.STRING) .addNullableField("f_int32", FieldType.INT32) .build(); schema2.setEncodingPositions(ImmutableMap.of("f_int32", 0, "f_string", 1)); Row row = Row.withSchema(schema1) .withFieldValue("f_int32", 42) .withFieldValue("f_string", "hello world!") .build(); Row expected = Row.withSchema(schema2) .withFieldValue("f_int32", 42) .withFieldValue("f_string", "hello world!") .build(); ByteArrayOutputStream os = new ByteArrayOutputStream(); RowCoder.of(schema1).encode(row, os); Row decoded = RowCoder.of(schema2).decode(new ByteArrayInputStream(os.toByteArray())); assertEquals(expected, decoded); }
void removeQueuedBlock(DatanodeStorageInfo storageInfo, Block block) { if (storageInfo == null || block == null) { return; } Block blk = new Block(block); if (BlockIdManager.isStripedBlockID(block.getBlockId())) { blk = new Block(BlockIdManager.convertToStripedID(block .getBlockId())); } Queue<ReportedBlockInfo> queue = queueByBlockId.get(blk); if (queue == null) { return; } // We only want the latest non-future reported block to be queued for each // DataNode. Otherwise, there can be a race condition that causes an old // reported block to be kept in the queue until the SNN switches to ANN and // the old reported block will be processed and marked as corrupt by the ANN. // See HDFS-17453 int size = queue.size(); if (queue.removeIf(rbi -> storageInfo.equals(rbi.storageInfo))) { count -= (size - queue.size()); } // If the block message queue is now empty, we should remove the block // from the queue. if (queue.isEmpty()) { queueByBlockId.remove(blk); } }
@Test public void testRemoveQueuedBlock() { DatanodeDescriptor fakeDN1 = DFSTestUtil.getDatanodeDescriptor( "localhost", 8898, "/default-rack"); DatanodeDescriptor fakeDN2 = DFSTestUtil.getDatanodeDescriptor( "localhost", 8899, "/default-rack"); DatanodeStorage storage1 = new DatanodeStorage("STORAGE_ID_1"); DatanodeStorage storage2 = new DatanodeStorage("STORAGE_ID_2"); DatanodeStorageInfo storageInfo1 = new DatanodeStorageInfo(fakeDN1, storage1); DatanodeStorageInfo storageInfo2 = new DatanodeStorageInfo(fakeDN2, storage2); msgs.enqueueReportedBlock(storageInfo1, block1Gs1, ReplicaState.FINALIZED); msgs.enqueueReportedBlock(storageInfo2, block1Gs1, ReplicaState.FINALIZED); msgs.enqueueReportedBlock(storageInfo1, block1Gs2, ReplicaState.FINALIZED); msgs.enqueueReportedBlock(storageInfo2, block1Gs2, ReplicaState.FINALIZED); List<ReportedBlockInfo> rbis = Arrays.asList( new ReportedBlockInfo(storageInfo2, block1Gs1, ReplicaState.FINALIZED), new ReportedBlockInfo(storageInfo2, block1Gs2, ReplicaState.FINALIZED)); assertEquals(4, msgs.count()); // Nothing queued yet for block 2 assertNull(msgs.takeBlockQueue(block2Gs1)); assertEquals(4, msgs.count()); msgs.removeQueuedBlock(storageInfo1, block1Gs1); Queue<ReportedBlockInfo> q = msgs.takeBlockQueue(block1Gs2DifferentInstance); assertEquals(Joiner.on(",").join(rbis), Joiner.on(",").join(q)); assertEquals(0, msgs.count()); // Should be null if we pull again; assertNull(msgs.takeBlockQueue(block1Gs2)); assertEquals(0, msgs.count()); }
public static void stopProxy(Object proxy) { if (proxy == null) { throw new HadoopIllegalArgumentException( "Cannot close proxy since it is null"); } try { if (proxy instanceof Closeable) { ((Closeable) proxy).close(); return; } else { InvocationHandler handler = Proxy.getInvocationHandler(proxy); if (handler instanceof Closeable) { ((Closeable) handler).close(); return; } } } catch (IOException e) { LOG.error("Closing proxy or invocation handler caused exception", e); } catch (IllegalArgumentException e) { LOG.error("RPC.stopProxy called on non proxy: class=" + proxy.getClass().getName(), e); } // If you see this error on a mock object in a unit test you're // developing, make sure to use MockitoUtil.mockProtocol() to // create your mock. throw new HadoopIllegalArgumentException( "Cannot close proxy - is not Closeable or " + "does not provide closeable invocation handler " + proxy.getClass()); }
@Test public void testStopMockObject() throws IOException { RPC.stopProxy(MockitoUtil.mockProtocol(TestProtocol.class)); }
static void verifyAddMissingValues(final List<KiePMMLMiningField> notTargetMiningFields, final PMMLRequestData requestData) { logger.debug("verifyMissingValues {} {}", notTargetMiningFields, requestData); Collection<ParameterInfo> requestParams = requestData.getRequestParams(); notTargetMiningFields .forEach(miningField -> { ParameterInfo parameterInfo = requestParams.stream() .filter(paramInfo -> miningField.getName().equals(paramInfo.getName())) .findFirst() .orElse(null); if (parameterInfo == null) { manageMissingValues(miningField, requestData); } }); }
@Test void verifyAddMissingValuesNotMissingNotReturnInvalidReplacement() { KiePMMLMiningField miningField0 = KiePMMLMiningField.builder("FIELD-0", null) .withDataType(DATA_TYPE.STRING) .withMissingValueTreatmentMethod(MISSING_VALUE_TREATMENT_METHOD.AS_IS) .withMissingValueReplacement("123") .withAllowedValues(Arrays.asList("123", "124", "125")) .build(); KiePMMLMiningField miningField1 = KiePMMLMiningField.builder("FIELD-1", null) .withDataType(DATA_TYPE.DOUBLE) .withMissingValueTreatmentMethod(MISSING_VALUE_TREATMENT_METHOD.AS_IS) .withMissingValueReplacement("1.23") .withAllowedValues(Arrays.asList("1.23", "12.4", "1.25")) .build(); List<KiePMMLInterval> intervals = Arrays.asList(new KiePMMLInterval(0.0, 12.4, CLOSURE.CLOSED_CLOSED), new KiePMMLInterval(12.6, 14.5, CLOSURE.OPEN_CLOSED)); KiePMMLMiningField miningField2 = KiePMMLMiningField.builder("FIELD-2", null) .withDataType(DATA_TYPE.FLOAT) .withMissingValueTreatmentMethod(MISSING_VALUE_TREATMENT_METHOD.AS_IS) .withMissingValueReplacement("12.9") .withIntervals(intervals) .build(); List<KiePMMLMiningField> miningFields = Arrays.asList(miningField0, miningField1, miningField2); PMMLRequestData pmmlRequestData = new PMMLRequestData("123", "modelName"); assertThat(pmmlRequestData.getRequestParams()).isEmpty(); PreProcess.verifyAddMissingValues(miningFields, pmmlRequestData); Map<String, ParameterInfo> mappedRequestParams = pmmlRequestData.getMappedRequestParams(); assertThat(mappedRequestParams).hasSameSizeAs(miningFields); assertThat(mappedRequestParams.get("FIELD-0").getValue()).isEqualTo("123"); assertThat(mappedRequestParams.get("FIELD-1").getValue()).isEqualTo(1.23); assertThat(mappedRequestParams.get("FIELD-2").getValue()).isEqualTo(12.9f); }
public B addProtocol(ProtocolConfig protocol) { if (this.protocols == null) { this.protocols = new ArrayList<>(); } this.protocols.add(protocol); return getThis(); }
@Test void addProtocol() { ProtocolConfig protocol = new ProtocolConfig(); ServiceBuilder builder = new ServiceBuilder(); Assertions.assertNull(builder.build().getProtocols()); builder.addProtocol(protocol); Assertions.assertNotNull(builder.build().getProtocols()); Assertions.assertEquals(1, builder.build().getProtocols().size()); Assertions.assertEquals(protocol, builder.build().getProtocol()); }
@Override public void start() { if (taskExecutorThread == null) { taskExecutorThread = new TaskExecutorThread(name); taskExecutorThread.start(); shutdownGate = new CountDownLatch(1); } }
@Test public void shouldProcessTasks() { when(taskExecutionMetadata.canProcessTask(any(), anyLong())).thenReturn(true); when(task.isProcessable(anyLong())).thenReturn(true); taskExecutor.start(); verify(task, timeout(VERIFICATION_TIMEOUT).atLeast(2)).process(anyLong()); verify(task, timeout(VERIFICATION_TIMEOUT).atLeastOnce()).recordProcessBatchTime(anyLong()); }
@Override public List<TaskProperty> getPropertiesForDisplay() { ArrayList<TaskProperty> taskProperties = new ArrayList<>(); if (PluggableTaskConfigStore.store().hasPreferenceFor(pluginConfiguration.getId())) { TaskPreference preference = taskPreference(); List<? extends Property> propertyDefinitions = preference.getConfig().list(); for (Property propertyDefinition : propertyDefinitions) { ConfigurationProperty configuredProperty = configuration.getProperty(propertyDefinition.getKey()); if (configuredProperty == null) continue; taskProperties.add(new TaskProperty(propertyDefinition.getOption(Property.DISPLAY_NAME), configuredProperty.getDisplayValue(), configuredProperty.getConfigKeyName())); } return taskProperties; } for (ConfigurationProperty property : configuration) { taskProperties.add(new TaskProperty(property.getConfigKeyName(), property.getDisplayValue())); } return taskProperties; }
@Test public void shouldPopulatePropertiesForDisplayRetainingOrderAndDisplayNameIfConfigured() throws Exception { Task taskDetails = mock(Task.class); TaskConfig taskConfig = new TaskConfig(); addProperty(taskConfig, "KEY2", "Key 2", 1); addProperty(taskConfig, "KEY1", "Key 1", 0); addProperty(taskConfig, "KEY3", "Key 3", 2); when(taskDetails.config()).thenReturn(taskConfig); when(taskDetails.view()).thenReturn(mock(TaskView.class)); String pluginId = "plugin_with_all_details"; PluggableTaskConfigStore.store().setPreferenceFor(pluginId, new TaskPreference(taskDetails)); Configuration configuration = new Configuration( ConfigurationPropertyMother.create("KEY3", true, "encryptedValue1"), ConfigurationPropertyMother.create("KEY1", false, "value1"), ConfigurationPropertyMother.create("KEY2", false, "value2") ); PluggableTask task = new PluggableTask(new PluginConfiguration(pluginId, "1"), configuration); List<TaskProperty> propertiesForDisplay = task.getPropertiesForDisplay(); assertThat(propertiesForDisplay.size(), is(3)); assertProperty(propertiesForDisplay.get(0), "Key 1", "value1", "key1"); assertProperty(propertiesForDisplay.get(1), "Key 2", "value2", "key2"); assertProperty(propertiesForDisplay.get(2), "Key 3", "****", "key3"); }
@UdafFactory(description = "Compute sample standard deviation of column with type Integer.", aggregateSchema = "STRUCT<SUM integer, COUNT bigint, M2 double>") public static TableUdaf<Integer, Struct, Double> stdDevInt() { return getStdDevImplementation( 0, STRUCT_INT, (agg, newValue) -> newValue + agg.getInt32(SUM), (agg, newValue) -> Double.valueOf(newValue * (agg.getInt64(COUNT) + 1) - (agg.getInt32(SUM) + newValue)), (agg1, agg2) -> agg1.getInt32(SUM).doubleValue() / agg1.getInt64(COUNT).doubleValue() - agg2.getInt32(SUM).doubleValue() / agg2.getInt64(COUNT).doubleValue(), (agg1, agg2) -> agg1.getInt32(SUM) + agg2.getInt32(SUM), (agg, valueToRemove) -> agg.getInt32(SUM) - valueToRemove); }
@Test public void shouldAverageEmpty() { final TableUdaf<Integer, Struct, Double> udaf = stdDevInt(); final Struct agg = udaf.initialize(); final double standardDev = udaf.map(agg); assertThat(standardDev, equalTo(0.0)); }
@Override public void close() { stopAsync(); awaitTerminated(); }
@Test public void constructAndClose() throws IOException { WalletAppKit kit = new WalletAppKit(REGTEST, P2WPKH, BIP43, tmpFolder.newFolder(), filePrefix); kit.close(); }
public Date getGregorianDate() { return DateUtil.date(getGregorianCalendar()); }
@Test public void getGregorianDateTest(){ // https://gitee.com/dromara/hutool/issues/I4ZSGJ ChineseDate chineseDate = new ChineseDate(1998, 5, 1); assertEquals("1998-06-24 00:00:00", chineseDate.getGregorianDate().toString()); chineseDate = new ChineseDate(1998, 5, 1, false); assertEquals("1998-05-26 00:00:00", chineseDate.getGregorianDate().toString()); }
@Override public ListView<String> getServiceList(int pageNo, int pageSize, String groupName, AbstractSelector selector) throws NacosException { Map<String, String> params = new HashMap<>(16); params.put("pageNo", String.valueOf(pageNo)); params.put("pageSize", String.valueOf(pageSize)); params.put(CommonParams.NAMESPACE_ID, namespaceId); params.put(CommonParams.GROUP_NAME, groupName); if (selector != null) { switch (SelectorType.valueOf(selector.getType())) { case none: break; case label: ExpressionSelector expressionSelector = (ExpressionSelector) selector; params.put(SELECTOR_PARAM, JacksonUtils.toJson(expressionSelector)); break; default: break; } } String result = reqApi(UtilAndComs.nacosUrlBase + "/service/list", params, HttpMethod.GET); JsonNode json = JacksonUtils.toObj(result); ListView<String> listView = new ListView<>(); listView.setCount(json.get("count").asInt()); listView.setData(JacksonUtils.toObj(json.get("doms").toString(), new TypeReference<List<String>>() { })); return listView; }
@Test void testGetServiceList() throws Exception { //given NacosRestTemplate nacosRestTemplate = mock(NacosRestTemplate.class); HttpRestResult<Object> a = new HttpRestResult<Object>(); a.setData("{\"count\":2,\"doms\":[\"aaa\",\"bbb\"]}"); a.setCode(200); when(nacosRestTemplate.exchangeForm(any(), any(), any(), any(), any(), any())).thenReturn(a); final Field nacosRestTemplateField = NamingHttpClientProxy.class.getDeclaredField("nacosRestTemplate"); nacosRestTemplateField.setAccessible(true); nacosRestTemplateField.set(clientProxy, nacosRestTemplate); String groupName = "group1"; //when ListView<String> serviceList = clientProxy.getServiceList(1, 10, groupName, new NoneSelector()); //then verify(nacosRestTemplate, times(1)).exchangeForm(endsWith("/service/list"), any(), any(), any(), eq(HttpMethod.GET), any()); assertEquals(2, serviceList.getCount()); assertEquals("aaa", serviceList.getData().get(0)); assertEquals("bbb", serviceList.getData().get(1)); }
public static UserOperatorConfig buildFromMap(Map<String, String> map) { Map<String, String> envMap = new HashMap<>(map); envMap.keySet().retainAll(UserOperatorConfig.keyNames()); Map<String, Object> generatedMap = ConfigParameter.define(envMap, CONFIG_VALUES); return new UserOperatorConfig(generatedMap); }
@Test public void testFromMapCaNameEnvVarMissingThrows() { Map<String, String> envVars = new HashMap<>(UserOperatorConfigTest.ENV_VARS); envVars.remove(UserOperatorConfig.CA_CERT_SECRET_NAME.key()); assertThrows(InvalidConfigurationException.class, () -> UserOperatorConfig.buildFromMap(envVars)); }
public Capabilities getCapabilities(String pluginId) { return pluginRequestHelper.submitRequest(pluginId, REQUEST_GET_CAPABILITIES, new DefaultPluginInteractionCallback<>() { @Override public Capabilities onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) { return getMessageConverter(resolvedExtensionVersion).getCapabilitiesFromResponseBody(responseBody); } }); }
@Test public void shouldTalkToPlugin_To_GetCapabilities() throws Exception { String responseBody = """ { "supported_analytics": [ {"type": "dashboard", "id": "abc", "title": "Title 1"}, {"type": "pipeline", "id": "abc", "title": "Title 1"} ]}"""; when(pluginManager.submitTo(eq(PLUGIN_ID), eq(ANALYTICS_EXTENSION), requestArgumentCaptor.capture())).thenReturn(new DefaultGoPluginApiResponse(SUCCESS_RESPONSE_CODE, responseBody)); com.thoughtworks.go.plugin.domain.analytics.Capabilities capabilities = analyticsExtension.getCapabilities(PLUGIN_ID); assertRequest(requestArgumentCaptor.getValue(), PluginConstants.ANALYTICS_EXTENSION, "1.0", REQUEST_GET_CAPABILITIES, null); assertThat(capabilities.supportedDashboardAnalytics(), containsInAnyOrder(new SupportedAnalytics("dashboard", "abc", "Title 1"))); assertThat(capabilities.supportedPipelineAnalytics(), containsInAnyOrder(new SupportedAnalytics("pipeline", "abc", "Title 1"))); }
public static boolean isViewSelfVisible(View view) { if (view == null || view.getWindowVisibility() == View.GONE) { return false; } if (WindowHelper.isDecorView(view.getClass())) { return true; } if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB) { boolean viewLocalVisiable = view.getLocalVisibleRect(new Rect()); if (view.getWidth() <= 0 || view.getHeight() <= 0 || view.getAlpha() <= 0.0f || !viewLocalVisiable) { return false; } } if ((view.getVisibility() == View.VISIBLE || view.getAnimation() == null || !view.getAnimation().getFillAfter()) && view.getVisibility() != View.VISIBLE) { return false; } return true; }
@Test public void isViewSelfVisible() { CheckBox textView1 = new CheckBox(mApplication); textView1.setVisibility(View.VISIBLE); Assert.assertFalse(SAViewUtils.isViewSelfVisible(textView1)); }
@Override public void close() throws IOException { boolean triedToClose = false, success = false; try { flush(); ((FileOutputStream)out).getChannel().force(true); triedToClose = true; super.close(); success = true; } finally { if (success) { boolean renamed = tmpFile.renameTo(origFile); if (!renamed) { // On windows, renameTo does not replace. if (origFile.exists()) { try { Files.delete(origFile.toPath()); } catch (IOException e) { throw new IOException("Could not delete original file " + origFile, e); } } try { NativeIO.renameTo(tmpFile, origFile); } catch (NativeIOException e) { throw new IOException("Could not rename temporary file " + tmpFile + " to " + origFile + " due to failure in native rename. " + e.toString()); } } } else { if (!triedToClose) { // If we failed when flushing, try to close it to not leak an FD IOUtils.closeStream(out); } // close wasn't successful, try to delete the tmp file if (!tmpFile.delete()) { LOG.warn("Unable to delete tmp file " + tmpFile); } } } }
@Test public void testOverwriteFile() throws IOException { assertTrue("Creating empty dst file", DST_FILE.createNewFile()); OutputStream fos = new AtomicFileOutputStream(DST_FILE); assertTrue("Empty file still exists", DST_FILE.exists()); fos.write(TEST_STRING.getBytes()); fos.flush(); // Original contents still in place assertEquals("", DFSTestUtil.readFile(DST_FILE)); fos.close(); // New contents replace original file String readBackData = DFSTestUtil.readFile(DST_FILE); assertEquals(TEST_STRING, readBackData); }
public static void addFileSliceCommonMetrics(List<FileSlice> fileSlices, Map<String, Double> metrics, long defaultBaseFileSize) { int numLogFiles = 0; long totalLogFileSize = 0; long totalIORead = 0; long totalIOWrite = 0; long totalIO = 0; for (FileSlice slice : fileSlices) { numLogFiles += slice.getLogFiles().count(); // Total size of all the log files totalLogFileSize += slice.getLogFiles().map(HoodieLogFile::getFileSize).filter(size -> size >= 0) .reduce(Long::sum).orElse(0L); long baseFileSize = slice.getBaseFile().isPresent() ? slice.getBaseFile().get().getFileSize() : 0L; totalIORead += baseFileSize; // Total write will be similar to the size of the base file totalIOWrite += baseFileSize > 0 ? baseFileSize : defaultBaseFileSize; } // Total read will be the base file + all the log files totalIORead = FSUtils.getSizeInMB(totalIORead + totalLogFileSize); totalIOWrite = FSUtils.getSizeInMB(totalIOWrite); // Total IO will be the IO for read + write totalIO = totalIORead + totalIOWrite; metrics.put(TOTAL_IO_READ_MB, (double) totalIORead); metrics.put(TOTAL_IO_WRITE_MB, (double) totalIOWrite); metrics.put(TOTAL_IO_MB, (double) totalIO); metrics.put(TOTAL_LOG_FILE_SIZE, (double) totalLogFileSize); metrics.put(TOTAL_LOG_FILES, (double) numLogFiles); }
@Test public void testFileSliceMetricUtilsWithoutLogFile() { Map<String, Double> metrics = new HashMap<>(); List<FileSlice> fileSlices = new ArrayList<>(); final long defaultBaseFileSize = 10 * 1024 * 1024; final double epsilon = 1e-5; fileSlices.add(buildFileSlice(15 * 1024 * 1024, new ArrayList<>())); fileSlices.add(buildFileSlice(20 * 1024 * 1024, new ArrayList<>())); fileSlices.add(buildFileSlice(0, new ArrayList<>())); FileSliceMetricUtils.addFileSliceCommonMetrics(fileSlices, metrics, defaultBaseFileSize); assertEquals(35.0, metrics.get(FileSliceMetricUtils.TOTAL_IO_READ_MB), epsilon); assertEquals(45.0, metrics.get(FileSliceMetricUtils.TOTAL_IO_WRITE_MB), epsilon); assertEquals(80.0, metrics.get(FileSliceMetricUtils.TOTAL_IO_MB), epsilon); assertEquals(0.0, metrics.get(FileSliceMetricUtils.TOTAL_LOG_FILE_SIZE), epsilon); assertEquals(0.0, metrics.get(FileSliceMetricUtils.TOTAL_LOG_FILES), epsilon); }
public void findIntersections(Rectangle query, Consumer<T> consumer) { IntArrayList todoNodes = new IntArrayList(levelOffsets.length * degree); IntArrayList todoLevels = new IntArrayList(levelOffsets.length * degree); int rootLevel = levelOffsets.length - 1; int rootIndex = levelOffsets[rootLevel]; if (doesIntersect(query, rootIndex)) { todoNodes.push(rootIndex); todoLevels.push(rootLevel); } while (!todoNodes.isEmpty()) { int nodeIndex = todoNodes.popInt(); int level = todoLevels.popInt(); if (level == 0) { // This is a leaf node consumer.accept(items[nodeIndex / ENVELOPE_SIZE]); } else { int childrenOffset = getChildrenOffset(nodeIndex, level); for (int i = 0; i < degree; i++) { int childIndex = childrenOffset + ENVELOPE_SIZE * i; if (doesIntersect(query, childIndex)) { todoNodes.push(childIndex); todoLevels.push(level - 1); } } } } }
@Test public void testDoubletonFlatbush() { // This is the smallest Rtree with height > 1 // Also test for some degeneracies Rectangle rect0 = new Rectangle(1, 1, 1, 1); Rectangle rect1 = new Rectangle(-1, -2, -1, -1); List<Rectangle> items = ImmutableList.of(rect0, rect1); Flatbush<Rectangle> rtree = new Flatbush<>(items.toArray(new Rectangle[] {})); List<Rectangle> allResults = findIntersections(rtree, EVERYTHING); assertEqualsSorted(allResults, items, RECTANGLE_COMPARATOR); assertEquals(findIntersections(rtree, new Rectangle(1, 1, 2, 2)), ImmutableList.of(rect0)); assertEquals(findIntersections(rtree, new Rectangle(-2, -2, -1, -2)), ImmutableList.of(rect1)); // This should test missing at the root level assertEquals(findIntersections(rtree, new Rectangle(10, 10, 12, 12)), ImmutableList.of()); // This should test missing at the leaf level assertEquals(findIntersections(rtree, new Rectangle(0, 0, 0, 0)), ImmutableList.of()); }
@Override public Properties info(RedisClusterNode node) { Map<String, String> info = execute(node, RedisCommands.INFO_ALL); Properties result = new Properties(); for (Entry<String, String> entry : info.entrySet()) { result.setProperty(entry.getKey(), entry.getValue()); } return result; }
@Test public void testInfo() { RedisClusterNode master = getFirstMaster(); Properties info = connection.info(master); assertThat(info.size()).isGreaterThan(10); }
public static Object getValueOrCachedValue(Record record, SerializationService serializationService) { Object cachedValue = record.getCachedValueUnsafe(); if (cachedValue == NOT_CACHED) { //record does not support caching at all return record.getValue(); } for (; ; ) { if (cachedValue == null) { Object valueBeforeCas = record.getValue(); if (!shouldCache(valueBeforeCas)) { //it's either a null or value which we do not want to cache. let's just return it. return valueBeforeCas; } Object fromCache = tryStoreIntoCache(record, valueBeforeCas, serializationService); if (fromCache != null) { return fromCache; } } else if (cachedValue instanceof Thread) { //the cachedValue is either locked by another thread or it contains a wrapped thread cachedValue = ThreadWrapper.unwrapOrNull(cachedValue); if (cachedValue != null) { //exceptional case: the cachedValue is not locked, it just contains an instance of Thread. //this can happen when user put an instance of Thread into a map //(=it should never happen, but never say never...) return cachedValue; } //it looks like some other thread actually locked the cachedValue. let's give it another try (iteration) } else { //it's not the 'in-progress' marker/lock && it's not a null -> it has to be the actual cachedValue return cachedValue; } Thread.yield(); cachedValue = record.getCachedValueUnsafe(); } }
@Test public void getValueOrCachedValue_whenRecordIsCachedDataRecordWithStats_thenCache() { String objectPayload = "foo"; Data dataPayload = serializationService.toData(objectPayload); Record record = new CachedDataRecordWithStats(dataPayload); Object firstDeserializedValue = Records.getValueOrCachedValue(record, serializationService); assertEquals(objectPayload, firstDeserializedValue); // we don't need serialization service for the 2nd call Object secondDeserializedValue = Records.getValueOrCachedValue(record, null); assertSame(firstDeserializedValue, secondDeserializedValue); }
@Override public CompletableFuture<Void> closeAsync() { synchronized (lock) { if (isShutdown) { return terminationFuture; } else { isShutdown = true; final Collection<CompletableFuture<Void>> terminationFutures = new ArrayList<>(3); final Time gracePeriod = Time.seconds(1L); if (metricQueryServiceRpcService != null) { final CompletableFuture<Void> metricQueryServiceRpcServiceTerminationFuture = metricQueryServiceRpcService.closeAsync(); terminationFutures.add(metricQueryServiceRpcServiceTerminationFuture); } Throwable throwable = null; for (ReporterAndSettings reporterAndSettings : reporters) { try { reporterAndSettings.getReporter().close(); } catch (Throwable t) { throwable = ExceptionUtils.firstOrSuppressed(t, throwable); } } reporters.clear(); if (throwable != null) { terminationFutures.add( FutureUtils.completedExceptionally( new FlinkException( "Could not shut down the metric reporters properly.", throwable))); } final CompletableFuture<Void> reporterExecutorShutdownFuture = ExecutorUtils.nonBlockingShutdown( gracePeriod.toMilliseconds(), TimeUnit.MILLISECONDS, reporterScheduledExecutor); terminationFutures.add(reporterExecutorShutdownFuture); final CompletableFuture<Void> viewUpdaterExecutorShutdownFuture = ExecutorUtils.nonBlockingShutdown( gracePeriod.toMilliseconds(), TimeUnit.MILLISECONDS, viewUpdaterScheduledExecutor); terminationFutures.add(viewUpdaterExecutorShutdownFuture); FutureUtils.completeAll(terminationFutures) .whenComplete( (Void ignored, Throwable error) -> { if (error != null) { terminationFuture.completeExceptionally(error); } else { terminationFuture.complete(null); } }); return terminationFuture; } } }
@Test void testConfigurableDelimiterForReportersInGroup() throws Exception { String name = "C"; MetricConfig config1 = new MetricConfig(); config1.setProperty(MetricOptions.REPORTER_SCOPE_DELIMITER.key(), "_"); MetricConfig config2 = new MetricConfig(); config2.setProperty(MetricOptions.REPORTER_SCOPE_DELIMITER.key(), "-"); MetricConfig config3 = new MetricConfig(); config3.setProperty(MetricOptions.REPORTER_SCOPE_DELIMITER.key(), "AA"); Configuration config = new Configuration(); config.set(MetricOptions.SCOPE_NAMING_TM, "A.B"); List<ReporterSetup> reporterConfigurations = Arrays.asList( ReporterSetup.forReporter( "test1", config1, new CollectingMetricsReporter()), ReporterSetup.forReporter( "test2", config2, new CollectingMetricsReporter()), ReporterSetup.forReporter( "test3", config3, new CollectingMetricsReporter()), ReporterSetup.forReporter("test4", new CollectingMetricsReporter())); MetricRegistryImpl registry = new MetricRegistryImpl( MetricRegistryTestUtils.fromConfiguration(config), reporterConfigurations); TaskManagerMetricGroup group = TaskManagerMetricGroup.createTaskManagerMetricGroup( registry, "host", new ResourceID("id")); group.counter(name); group.close(); registry.closeAsync().get(); for (ReporterSetup cfg : reporterConfigurations) { String delimiter = cfg.getConfiguration() .getProperty(MetricOptions.REPORTER_SCOPE_DELIMITER.key()); if (delimiter == null || delimiter.equals("AA")) { // test3 reporter: 'AA' - not correct // for test4 reporter use global delimiter delimiter = String.valueOf(GLOBAL_DEFAULT_DELIMITER); } String expected = (config.get(MetricOptions.SCOPE_NAMING_TM) + ".C").replaceAll("\\.", delimiter); CollectingMetricsReporter reporter = (CollectingMetricsReporter) cfg.getReporter(); for (MetricGroupAndName groupAndName : Arrays.asList(reporter.findAdded(name), reporter.findRemoved(name))) { assertThat(groupAndName.group.getMetricIdentifier(name)).isEqualTo(expected); assertThat(groupAndName.group.getMetricIdentifier(name, reporter)) .isEqualTo(expected); } } }
@Override public void checkBeforeUpdate(final DropMaskRuleStatement sqlStatement) { if (!sqlStatement.isIfExists()) { checkToBeDroppedMaskTableNames(sqlStatement); } }
@Test void assertCheckSQLStatementWithoutToBeDroppedRule() { MaskRule rule = mock(MaskRule.class); when(rule.getConfiguration()).thenReturn(new MaskRuleConfiguration(Collections.emptyList(), Collections.emptyMap())); executor.setRule(rule); assertThrows(MissingRequiredRuleException.class, () -> executor.checkBeforeUpdate(createSQLStatement(false, "t_mask"))); }
@Override public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) { if (GraceContext.INSTANCE.getStartWarmUpTime() == 0) { GraceContext.INSTANCE.setStartWarmUpTime(System.currentTimeMillis()); } addGraceAddress(request); final GraceShutDownManager graceShutDownManager = GraceContext.INSTANCE.getGraceShutDownManager(); graceShutDownManager.increaseRequestCount(); if (graceShutDownManager.isShutDown() && graceConfig.isEnableGraceShutdown()) { // It has been marked as closed, and the number of incoming requests has been counted final ClientInfo clientInfo = RegisterContext.INSTANCE.getClientInfo(); response.addHeader(GraceConstants.MARK_SHUTDOWN_SERVICE_ENDPOINT, buildEndpoint(clientInfo.getIp(), clientInfo.getPort())); response.addHeader(GraceConstants.MARK_SHUTDOWN_SERVICE_ENDPOINT, buildEndpoint(clientInfo.getHost(), clientInfo.getPort())); response.addHeader(GraceConstants.MARK_SHUTDOWN_SERVICE_NAME, clientInfo.getServiceName()); } return true; }
@Test public void preHandle() { GraceContext.INSTANCE.getGraceShutDownManager().setShutDown(true); interceptor.preHandle(request, response, new Object()); Assert.assertTrue(GraceContext.INSTANCE.getStartWarmUpTime() > 0); Assert.assertTrue(addresses.contains(testAddress)); Assert.assertTrue(GraceContext.INSTANCE.getGraceShutDownManager().getRequestCount() > 0); Mockito.verify(response, Mockito.atLeastOnce()).addHeader(Mockito.anyString(), Mockito.anyString()); GraceContext.INSTANCE.getGraceShutDownManager().setShutDown(false); GraceContext.INSTANCE.setStartWarmUpTime(0); GraceContext.INSTANCE.getGraceShutDownManager().decreaseRequestCount(); }
@Override public ConsumerConfig build() { ConsumerConfig consumer = new ConsumerConfig(); super.build(consumer); consumer.setDefault(isDefault); consumer.setClient(client); consumer.setThreadpool(threadpool); consumer.setCorethreads(corethreads); consumer.setThreads(threads); consumer.setQueues(queues); consumer.setShareconnections(shareconnections); consumer.setUrlMergeProcessor(urlMergeProcessor); return consumer; }
@Test void build() { ConsumerBuilder builder = ConsumerBuilder.newBuilder(); builder.isDefault(true) .client("client") .threadPool("threadPool") .coreThreads(10) .threads(100) .queues(200) .shareConnections(300) .id("id"); ConsumerConfig config = builder.build(); ConsumerConfig config2 = builder.build(); Assertions.assertTrue(config.isDefault()); Assertions.assertEquals("client", config.getClient()); Assertions.assertEquals("threadPool", config.getThreadpool()); Assertions.assertEquals("id", config.getId()); Assertions.assertEquals(10, config.getCorethreads()); Assertions.assertEquals(100, config.getThreads()); Assertions.assertEquals(200, config.getQueues()); Assertions.assertEquals(300, config.getShareconnections()); Assertions.assertNotSame(config, config2); }
@Override public AttributedList<Path> read(final Path directory, final List<String> replies) throws FTPInvalidListException { final AttributedList<Path> children = new AttributedList<>(); if(replies.isEmpty()) { return children; } // At least one entry successfully parsed boolean success = false; for(String line : replies) { final Map<String, Map<String, String>> file = this.parseFacts(line); if(null == file) { log.error(String.format("Error parsing line %s", line)); continue; } for(Map.Entry<String, Map<String, String>> f : file.entrySet()) { final String name = f.getKey(); // size -- Size in octets // modify -- Last modification time // create -- Creation time // type -- Entry type // unique -- Unique id of file/directory // perm -- File permissions, whether read, write, execute is allowed for the login id. // lang -- Language of the file name per IANA [11] registry. // media-type -- MIME media-type of file contents per IANA registry. // charset -- Character set per IANA registry (if not UTF-8) final Map<String, String> facts = f.getValue(); if(!facts.containsKey("type")) { log.error(String.format("No type fact in line %s", line)); continue; } final Path parsed; if("dir".equals(facts.get("type").toLowerCase(Locale.ROOT))) { parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.directory)); } else if("file".equals(facts.get("type").toLowerCase(Locale.ROOT))) { parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file)); } else if(facts.get("type").toLowerCase(Locale.ROOT).matches("os\\.unix=slink:.*")) { parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file, Path.Type.symboliclink)); // Parse symbolic link target in Type=OS.unix=slink:/foobar;Perm=;Unique=keVO1+4G4; foobar final String[] type = facts.get("type").split(":"); if(type.length == 2) { final String target = type[1]; if(target.startsWith(String.valueOf(Path.DELIMITER))) { parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(target), EnumSet.of(Path.Type.file))); } else { parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(String.format("%s/%s", directory.getAbsolute(), target)), EnumSet.of(Path.Type.file))); } } else { log.warn(String.format("Missing symbolic link target for type %s in line %s", facts.get("type"), line)); continue; } } else { log.warn(String.format("Ignored type %s in line %s", facts.get("type"), line)); continue; } if(!success) { if(parsed.isDirectory() && directory.getName().equals(name)) { log.warn(String.format("Possibly bogus response line %s", line)); } else { success = true; } } if(name.equals(".") || name.equals("..")) { if(log.isDebugEnabled()) { log.debug(String.format("Skip %s", name)); } continue; } if(facts.containsKey("size")) { parsed.attributes().setSize(Long.parseLong(facts.get("size"))); } if(facts.containsKey("unix.uid")) { parsed.attributes().setOwner(facts.get("unix.uid")); } if(facts.containsKey("unix.owner")) { parsed.attributes().setOwner(facts.get("unix.owner")); } if(facts.containsKey("unix.gid")) { parsed.attributes().setGroup(facts.get("unix.gid")); } if(facts.containsKey("unix.group")) { parsed.attributes().setGroup(facts.get("unix.group")); } if(facts.containsKey("unix.mode")) { parsed.attributes().setPermission(new Permission(facts.get("unix.mode"))); } else if(facts.containsKey("perm")) { if(PreferencesFactory.get().getBoolean("ftp.parser.mlsd.perm.enable")) { Permission.Action user = Permission.Action.none; final String flags = facts.get("perm"); if(StringUtils.contains(flags, 'r') || StringUtils.contains(flags, 'l')) { // RETR command may be applied to that object // Listing commands, LIST, NLST, and MLSD may be applied user = user.or(Permission.Action.read); } if(StringUtils.contains(flags, 'w') || StringUtils.contains(flags, 'm') || StringUtils.contains(flags, 'c')) { user = user.or(Permission.Action.write); } if(StringUtils.contains(flags, 'e')) { // CWD command naming the object should succeed user = user.or(Permission.Action.execute); if(parsed.isDirectory()) { user = user.or(Permission.Action.read); } } final Permission permission = new Permission(user, Permission.Action.none, Permission.Action.none); parsed.attributes().setPermission(permission); } } if(facts.containsKey("modify")) { // Time values are always represented in UTC parsed.attributes().setModificationDate(this.parseTimestamp(facts.get("modify"))); } if(facts.containsKey("create")) { // Time values are always represented in UTC parsed.attributes().setCreationDate(this.parseTimestamp(facts.get("create"))); } children.add(parsed); } } if(!success) { throw new FTPInvalidListException(children); } return children; }
@Test public void testParseWhitespaceInResponseLine() throws Exception { Path path = new Path("/", EnumSet.of(Path.Type.directory)); String[] replies = new String[]{ " Type=file;Size=38955938;Modify=20230328150158.830; IMG_2625–JK.psd" }; final AttributedList<Path> children = new FTPMlsdListResponseReader().read(path, Arrays.asList(replies)); assertEquals(1, children.size()); assertEquals("IMG_2625–JK.psd", children.get(0).getName()); }
AclBinding targetAclBinding(AclBinding sourceAclBinding) { String targetTopic = formatRemoteTopic(sourceAclBinding.pattern().name()); final AccessControlEntry entry; if (sourceAclBinding.entry().permissionType() == AclPermissionType.ALLOW && sourceAclBinding.entry().operation() == AclOperation.ALL) { entry = downgradeAllowAllACL(sourceAclBinding.entry()); } else { entry = sourceAclBinding.entry(); } return new AclBinding(new ResourcePattern(ResourceType.TOPIC, targetTopic, PatternType.LITERAL), entry); }
@Test public void testAclTransformation() { MirrorSourceConnector connector = new MirrorSourceConnector(new SourceAndTarget("source", "target"), new DefaultReplicationPolicy(), x -> true, getConfigPropertyFilter()); AclBinding allowAllAclBinding = new AclBinding( new ResourcePattern(ResourceType.TOPIC, "test_topic", PatternType.LITERAL), new AccessControlEntry("kafka", "", AclOperation.ALL, AclPermissionType.ALLOW)); AclBinding processedAllowAllAclBinding = connector.targetAclBinding(allowAllAclBinding); String expectedRemoteTopicName = "source" + DefaultReplicationPolicy.SEPARATOR_DEFAULT + allowAllAclBinding.pattern().name(); assertEquals(expectedRemoteTopicName, processedAllowAllAclBinding.pattern().name(), "should change topic name"); assertEquals(processedAllowAllAclBinding.entry().operation(), AclOperation.READ, "should change ALL to READ"); assertEquals(processedAllowAllAclBinding.entry().permissionType(), AclPermissionType.ALLOW, "should not change ALLOW"); AclBinding denyAllAclBinding = new AclBinding( new ResourcePattern(ResourceType.TOPIC, "test_topic", PatternType.LITERAL), new AccessControlEntry("kafka", "", AclOperation.ALL, AclPermissionType.DENY)); AclBinding processedDenyAllAclBinding = connector.targetAclBinding(denyAllAclBinding); assertEquals(processedDenyAllAclBinding.entry().operation(), AclOperation.ALL, "should not change ALL"); assertEquals(processedDenyAllAclBinding.entry().permissionType(), AclPermissionType.DENY, "should not change DENY"); }
@SuppressWarnings("FutureReturnValueIgnored") public void start() { running.set(true); configFetcher.start(); memoryMonitor.start(); streamingWorkerHarness.start(); sampler.start(); workerStatusReporter.start(); activeWorkRefresher.start(); }
@Test public void testExceptionInvalidatesCache() throws Exception { // We'll need to force the system to limit bundles to one message at a time. // Sequence is as follows: // 01. GetWork[0] (token 0) // 02. Create counter reader // 03. Counter yields 0 // 04. GetData[0] (state as null) // 05. Read state as null // 06. Set state as 42 // 07. THROW on taking counter reader checkpoint // 08. Create counter reader // 09. Counter yields 0 // 10. GetData[1] (state as null) // 11. Read state as null (*** not 42 ***) // 12. Take counter reader checkpoint as 0 // 13. CommitWork[0] (message 0:0, state 42, checkpoint 0) // 14. GetWork[1] (token 1, checkpoint as 0) // 15. Counter yields 1 // 16. Read (cached) state as 42 // 17. Take counter reader checkpoint 1 // 18. CommitWork[1] (message 0:1, checkpoint 1) // 19. GetWork[2] (token 2, checkpoint as 1) // 20. Counter yields 2 // 21. THROW on processElement // 22. Recreate reader from checkpoint 1 // 23. Counter yields 2 (*** not eof ***) // 24. GetData[2] (state as 42) // 25. Read state as 42 // 26. Take counter reader checkpoint 2 // 27. CommitWork[2] (message 0:2, checkpoint 2) server.setExpectedExceptionCount(2); DataflowPipelineOptions options = createTestingPipelineOptions(); options.setNumWorkers(1); options.setUnboundedReaderMaxElements(1); CloudObject codec = CloudObjects.asCloudObject( WindowedValue.getFullCoder( ValueWithRecordId.ValueWithRecordIdCoder.of( KvCoder.of(VarIntCoder.of(), VarIntCoder.of())), GlobalWindow.Coder.INSTANCE), /* sdkComponents= */ null); TestCountingSource counter = new TestCountingSource(3).withThrowOnFirstSnapshot(true); List<ParallelInstruction> instructions = Arrays.asList( new ParallelInstruction() .setOriginalName("OriginalReadName") .setSystemName("Read") .setName(DEFAULT_PARDO_USER_NAME) .setRead( new ReadInstruction() .setSource( CustomSources.serializeToCloudSource(counter, options).setCodec(codec))) .setOutputs( Collections.singletonList( new InstructionOutput() .setName("read_output") .setOriginalName(DEFAULT_OUTPUT_ORIGINAL_NAME) .setSystemName(DEFAULT_OUTPUT_SYSTEM_NAME) .setCodec(codec))), makeDoFnInstruction( new TestExceptionInvalidatesCacheFn(), 0, StringUtf8Coder.of(), WindowingStrategy.globalDefault()), makeSinkInstruction(StringUtf8Coder.of(), 1, GlobalWindow.Coder.INSTANCE)); StreamingDataflowWorker worker = makeWorker( defaultWorkerParams() .setInstructions(instructions) .setOptions(options.as(DataflowWorkerHarnessOptions.class)) .setLocalRetryTimeoutMs(100) .publishCounters() .build()); worker.start(); // Three GetData requests for (int i = 0; i < 3; i++) { ByteString state; if (i == 0 || i == 1) { state = ByteString.EMPTY; } else { state = ByteString.copyFrom(new byte[] {42}); } Windmill.GetDataResponse.Builder dataResponse = Windmill.GetDataResponse.newBuilder(); dataResponse .addDataBuilder() .setComputationId(DEFAULT_COMPUTATION_ID) .addDataBuilder() .setKey(ByteString.copyFromUtf8("0000000000000001")) .setShardingKey(1) .addValuesBuilder() .setTag(ByteString.copyFromUtf8("//+uint")) .setStateFamily(DEFAULT_PARDO_STATE_FAMILY) .getValueBuilder() .setTimestamp(0) .setData(state); server.whenGetDataCalled().thenReturn(dataResponse.build()); } // Three GetWork requests and commits for (int i = 0; i < 3; i++) { StringBuilder sb = new StringBuilder(); sb.append("work {\n"); sb.append(" computation_id: \"computation\"\n"); sb.append(" input_data_watermark: 0\n"); sb.append(" work {\n"); sb.append(" key: \"0000000000000001\"\n"); sb.append(" sharding_key: 1\n"); sb.append(" work_token: "); sb.append(i); sb.append(" cache_token: 1"); sb.append("\n"); if (i > 0) { int previousCheckpoint = i - 1; sb.append(" source_state {\n"); sb.append(" state: \""); sb.append((char) previousCheckpoint); sb.append("\"\n"); // We'll elide the finalize ids since it's not necessary to trigger the finalizer // for this test. sb.append(" }\n"); } sb.append(" }\n"); sb.append("}\n"); server.whenGetWorkCalled().thenReturn(buildInput(sb.toString(), null)); Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1); Windmill.WorkItemCommitRequest commit = result.get((long) i); UnsignedLong finalizeId = UnsignedLong.fromLongBits(commit.getSourceStateUpdates().getFinalizeIds(0)); sb = new StringBuilder(); sb.append("key: \"0000000000000001\"\n"); sb.append("sharding_key: 1\n"); sb.append("work_token: "); sb.append(i); sb.append("\n"); sb.append("cache_token: 1\n"); sb.append("output_messages {\n"); sb.append(" destination_stream_id: \"out\"\n"); sb.append(" bundles {\n"); sb.append(" key: \"0000000000000001\"\n"); int messageNum = i; sb.append(" messages {\n"); sb.append(" timestamp: "); sb.append(messageNum * 1000); sb.append("\n"); sb.append(" data: \"0:"); sb.append(messageNum); sb.append("\"\n"); sb.append(" }\n"); sb.append(" messages_ids: \"\"\n"); sb.append(" }\n"); sb.append("}\n"); if (i == 0) { sb.append("value_updates {\n"); sb.append(" tag: \"//+uint\"\n"); sb.append(" value {\n"); sb.append(" timestamp: 0\n"); sb.append(" data: \""); sb.append((char) 42); sb.append("\"\n"); sb.append(" }\n"); sb.append(" state_family: \"parDoStateFamily\"\n"); sb.append("}\n"); } int sourceState = i; sb.append("source_state_updates {\n"); sb.append(" state: \""); sb.append((char) sourceState); sb.append("\"\n"); sb.append(" finalize_ids: "); sb.append(finalizeId); sb.append("}\n"); sb.append("source_watermark: "); sb.append((sourceState + 1) * 1000); sb.append("\n"); sb.append("source_backlog_bytes: 7\n"); assertThat( // The commit will include a timer to clean up state - this timer is irrelevant // for the current test. Also remove source_bytes_processed because it's dynamic. setValuesTimestamps( removeDynamicFields(commit) .toBuilder() .clearOutputTimers() .clearSourceBytesProcessed()) .build(), equalTo( setMessagesMetadata( PaneInfo.NO_FIRING, CoderUtils.encodeToByteArray( CollectionCoder.of(GlobalWindow.Coder.INSTANCE), ImmutableList.of(GlobalWindow.INSTANCE)), parseCommitRequest(sb.toString())) .build())); } }
static ClockImpl createClock() { String clockImplClassName = System.getProperty(ClockProperties.HAZELCAST_CLOCK_IMPL); if (clockImplClassName != null) { try { return ClassLoaderUtil.newInstance(null, clockImplClassName); } catch (Exception e) { throw rethrow(e); } } String clockOffset = System.getProperty(ClockProperties.HAZELCAST_CLOCK_OFFSET); long offset = 0L; if (clockOffset != null) { try { offset = Long.parseLong(clockOffset); } catch (NumberFormatException e) { throw rethrow(e); } } if (offset != 0L) { return new SystemOffsetClock(offset); } return new SystemClock(); }
@Test public void testCreateClock_withClockImpl() { setJumpingClock(30); Clock.ClockImpl clock = Clock.createClock(); assertInstanceOf(JumpingSystemClock.class, clock); }
@Override public Optional<DatabaseAdminExecutor> create(final SQLStatementContext sqlStatementContext) { SQLStatement sqlStatement = sqlStatementContext.getSqlStatement(); if (sqlStatement instanceof ShowFunctionStatusStatement) { return Optional.of(new ShowFunctionStatusExecutor((ShowFunctionStatusStatement) sqlStatement)); } if (sqlStatement instanceof ShowProcedureStatusStatement) { return Optional.of(new ShowProcedureStatusExecutor((ShowProcedureStatusStatement) sqlStatement)); } if (sqlStatement instanceof ShowTablesStatement) { return Optional.of(new ShowTablesExecutor((ShowTablesStatement) sqlStatement, sqlStatementContext.getDatabaseType())); } return Optional.empty(); }
@Test void assertCreateWithMySQLShowDatabasesStatement() { when(sqlStatementContext.getSqlStatement()).thenReturn(new MySQLShowDatabasesStatement()); Optional<DatabaseAdminExecutor> actual = new MySQLAdminExecutorCreator().create(sqlStatementContext, "", "", Collections.emptyList()); assertTrue(actual.isPresent()); assertThat(actual.get(), instanceOf(ShowDatabasesExecutor.class)); }
public static String buildWebApplicationRootUrl(NetworkService networkService) { checkNotNull(networkService); if (!isWebService(networkService)) { return "http://" + NetworkEndpointUtils.toUriAuthority(networkService.getNetworkEndpoint()) + "/"; } String rootUrl = (isPlainHttp(networkService) ? "http://" : "https://") + buildWebUriAuthority(networkService) + buildWebAppRootPath(networkService); return rootUrl.endsWith("/") ? rootUrl : rootUrl + "/"; }
@Test public void buildWebApplicationRootUrl_whenHttpsServiceOnPort443_removesTrailingPortFromUrl() { assertThat( NetworkServiceUtils.buildWebApplicationRootUrl( NetworkService.newBuilder() .setNetworkEndpoint(forIpAndPort("127.0.0.1", 443)) .setServiceName("ssl/https") .setServiceContext( ServiceContext.newBuilder() .setWebServiceContext( WebServiceContext.newBuilder().setApplicationRoot("test_root"))) .build())) .isEqualTo("https://127.0.0.1/test_root/"); }
public T send() throws IOException { return web3jService.send(this, responseType); }
@Test public void testWeb3Sha3() throws Exception { web3j.web3Sha3("0x68656c6c6f20776f726c64").send(); verifyResult( "{\"jsonrpc\":\"2.0\",\"method\":\"web3_sha3\"," + "\"params\":[\"0x68656c6c6f20776f726c64\"],\"id\":1}"); }
public void stop() { try { checkNotNull(httpServer, "httpServer").stop(); checkNotNull(guiceFilter, "guiceFilter").destroy(); } catch (Exception e) { throw new WebAppException(e); } }
@Test void testCreate() { WebApp app = WebApps.$for(this).start(); app.stop(); }
void upload(String json) throws IOException { Request request = buildHttpRequest(serverUrl, json); execute(okHttpClient.newCall(request)); }
@Test void upload() throws IOException { ArgumentCaptor<Request> requestCaptor = ArgumentCaptor.forClass(Request.class); settings.setProperty(SONAR_TELEMETRY_COMPRESSION.getKey(), false); underTest.start(); underTest.upload(JSON); verify(okHttpClient).newCall(requestCaptor.capture()); Request request = requestCaptor.getValue(); assertThat(request.method()).isEqualTo("POST"); assertThat(request.body().contentType()).isEqualTo(MediaType.parse("application/json; charset=utf-8")); Buffer body = new Buffer(); request.body().writeTo(body); assertThat(body.readUtf8()).isEqualTo(JSON); assertThat(request.url()).hasToString(TELEMETRY_URL); }
@Override public List<String> assignSegment(String segmentName, Map<String, Map<String, String>> currentAssignment, InstancePartitions instancePartitions, InstancePartitionsType instancePartitionsType) { String serverTag = _tenantConfig.getServer(); Set<String> instances = HelixHelper.getServerInstancesForTenant(_helixManager, serverTag); int numInstances = instances.size(); Preconditions.checkState(numInstances > 0, "No instance found with tag: %s or %s", TagNameUtils.getOfflineTagForTenant(serverTag), TagNameUtils.getRealtimeTagForTenant(serverTag)); return new ArrayList<>(instances); }
@Test public void testSegmentAssignmentAndRebalance() { List<HelixProperty> instanceConfigList = new ArrayList<>(); for (String instance : INSTANCES) { ZNRecord znRecord = new ZNRecord(instance); znRecord.setListField(TAG_LIST.name(), ImmutableList.of(OFFLINE_SERVER_TAG, REALTIME_SERVER_TAG)); instanceConfigList.add(new InstanceConfig(znRecord)); } HelixDataAccessor dataAccessor = mock(HelixDataAccessor.class); PropertyKey.Builder builder = new PropertyKey.Builder("cluster"); when(dataAccessor.keyBuilder()).thenReturn(builder); when(dataAccessor.getChildValues(builder.instanceConfigs(), true)).thenReturn(instanceConfigList); when(_helixManager.getHelixDataAccessor()).thenReturn(dataAccessor); List<String> instances = _segmentAssignment.assignSegment(SEGMENT_NAME, new TreeMap(), _instancePartitionsMap); assertEquals(instances.size(), NUM_INSTANCES); assertEqualsNoOrder(instances.toArray(), INSTANCES.toArray()); // Remove one instance and rebalance table Map<String, Map<String, String>> currentAssignment = new TreeMap<>(); Map<String, String> segment1Assginment = new TreeMap<>(); instances.stream().forEach(instance -> segment1Assginment.put(instance, "ONLINE")); currentAssignment.put(SEGMENT_NAME, segment1Assginment); ZNRecord znRecord = new ZNRecord(instanceConfigList.get(0).getId()); znRecord.setListField(TAG_LIST.name(), ImmutableList.of(BROKER_TAG)); InstanceConfig newInstanceConfig = new InstanceConfig(znRecord); instanceConfigList.set(0, newInstanceConfig); when(dataAccessor.getChildValues(builder.instanceConfigs(), true)).thenReturn(instanceConfigList); Map<String, Map<String, String>> newAssignment = _segmentAssignment.rebalanceTable(currentAssignment, _instancePartitionsMap, null, null, new RebalanceConfig()); assertEquals(newAssignment.get(SEGMENT_NAME).size(), NUM_INSTANCES - 1); }
@Override public LongMaximum clone() { LongMaximum clone = new LongMaximum(); clone.max = this.max; return clone; }
@Test void testClone() { LongMaximum max = new LongMaximum(); long value = 4242424242424242L; max.add(value); LongMaximum clone = max.clone(); assertThat(clone.getLocalValue().longValue()).isEqualTo(value); }
static int mainNoExit(String... args) { try { execute(args); return 0; } catch (HelpScreenException e) { return 0; } catch (ArgumentParserException e) { e.getParser().handleError(e); return 1; } catch (TerseException e) { System.err.println(e.getMessage()); return 1; } catch (Throwable e) { System.err.println(e.getMessage()); System.err.println(Utils.stackTrace(e)); return 1; } }
@Test public void testCommandConfig() throws IOException { // specifying a --command-config containing properties that would prevent login must fail File tmpfile = TestUtils.tempFile(AdminClientConfig.SECURITY_PROTOCOL_CONFIG + "=SSL_PLAINTEXT"); assertEquals(1, MetadataQuorumCommand.mainNoExit("--bootstrap-server", "localhost:9092", "--command-config", tmpfile.getAbsolutePath(), "describe", "--status")); }
public void writeMethodHandle(MethodHandleReference methodHandleReference) throws IOException { writer.write(MethodHandleType.toString(methodHandleReference.getMethodHandleType())); writer.write('@'); Reference memberReference = methodHandleReference.getMemberReference(); if (memberReference instanceof MethodReference) { writeMethodDescriptor((MethodReference)memberReference); } else { writeFieldDescriptor((FieldReference)memberReference); } }
@Test public void testWriteMethodHandle_methodAccess() throws IOException { DexFormattedWriter writer = new DexFormattedWriter(output); writer.writeMethodHandle(getMethodHandleReferenceForMethod()); Assert.assertEquals("invoke-instance@Ldefining/class;->methodName(Lparam1;Lparam2;)Lreturn/type;", output.toString()); }
public static String formatSql(final AstNode root) { final StringBuilder builder = new StringBuilder(); new Formatter(builder).process(root, 0); return StringUtils.stripEnd(builder.toString(), "\n"); }
@Test public void shouldFormatCreateTableStatementWithExplicitKey() { // Given: final CreateTable createTable = new CreateTable( TEST, ELEMENTS_WITH_PRIMARY_KEY, false, false, SOME_WITH_PROPS, false); // When: final String sql = SqlFormatter.formatSql(createTable); // Then: assertThat(sql, is("CREATE TABLE TEST (`k3` STRING PRIMARY KEY, `Foo` STRING) " + "WITH (KAFKA_TOPIC='topic_test', VALUE_FORMAT='JSON');")); }
@Override public void alert(Anomaly anomaly, boolean autoFixTriggered, long selfHealingStartTime, AnomalyType anomalyType) { super.alert(anomaly, autoFixTriggered, selfHealingStartTime, anomalyType); if (_slackWebhook == null) { LOG.warn("Slack webhook is null, can't send Slack self healing notification"); return; } if (_slackChannel == null) { LOG.warn("Slack channel name is null, can't send Slack self healing notification"); return; } String text = String.format("%s%s detected %s. Self healing %s.%s", _slackPreamble, anomalyType, anomaly, _selfHealingEnabled.get(anomalyType) ? String.format("start time %s", utcDateFor(selfHealingStartTime)) : "is disabled", autoFixTriggered ? "%nSelf-healing has been triggered." : ""); try { sendSlackMessage(new SlackMessage(_slackUser, text, _slackIcon, _slackChannel), _slackWebhook); } catch (IOException e) { LOG.warn("ERROR sending alert to Slack", e); } }
@Test public void testSlackAlertWithDefaultOptions() { _notifier = new MockSlackSelfHealingNotifier(mockTime); _notifier._slackWebhook = "http://dummy.slack.webhook"; _notifier._slackChannel = "#dummy-channel"; _notifier.alert(failures, false, 1L, KafkaAnomalyType.BROKER_FAILURE); assertEquals(1, _notifier.getSlackMessageList().size()); SlackMessage message = _notifier.getSlackMessageList().get(0); assertEquals("#dummy-channel", message.getChannel()); }
public <T> void resolve(T resolvable) { ParamResolver resolver = this; if (ParamScope.class.isAssignableFrom(resolvable.getClass())) { ParamScope newScope = (ParamScope) resolvable; resolver = newScope.applyOver(resolver); } resolveStringLeaves(resolvable, resolver); resolveNonStringLeaves(resolvable, resolver); resolveNodes(resolvable, resolver); }
@Test public void shouldProvideContextWhenAnExceptionOccursBecauseOfHashAtEnd() { PipelineConfig pipelineConfig = PipelineConfigMother.createPipelineConfig("cruise", "dev", "ant"); pipelineConfig.setLabelTemplate("abc#"); new ParamResolver(new ParamSubstitutionHandlerFactory(params(param("foo", "pavan"), param("bar", "jj"))), fieldCache).resolve(pipelineConfig); assertThat(pipelineConfig.errors().on("labelTemplate"), is("Error when processing params for 'abc#' used in field 'labelTemplate', # must be followed by a parameter pattern or escaped by another #")); }
public void resetPositionsIfNeeded() { Map<TopicPartition, Long> offsetResetTimestamps = offsetFetcherUtils.getOffsetResetTimestamp(); if (offsetResetTimestamps.isEmpty()) return; resetPositionsAsync(offsetResetTimestamps); }
@Test public void testresetPositionsMetadataRefresh() { buildFetcher(); assignFromUser(singleton(tp0)); subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST); // First fetch fails with stale metadata client.prepareResponse(listOffsetRequestMatcher(ListOffsetsRequest.LATEST_TIMESTAMP, validLeaderEpoch), listOffsetResponse(Errors.NOT_LEADER_OR_FOLLOWER, 1L, 5L), false); offsetFetcher.resetPositionsIfNeeded(); consumerClient.pollNoWakeup(); assertFalse(subscriptions.hasValidPosition(tp0)); // Expect a metadata refresh client.prepareMetadataUpdate(initialUpdateResponse); consumerClient.pollNoWakeup(); assertFalse(client.hasPendingMetadataUpdates()); // Next fetch succeeds time.sleep(retryBackoffMs); client.prepareResponse(listOffsetRequestMatcher(ListOffsetsRequest.LATEST_TIMESTAMP), listOffsetResponse(Errors.NONE, 1L, 5L)); offsetFetcher.resetPositionsIfNeeded(); consumerClient.pollNoWakeup(); assertFalse(subscriptions.isOffsetResetNeeded(tp0)); assertTrue(subscriptions.isFetchable(tp0)); assertEquals(5, subscriptions.position(tp0).offset); }
@GetMapping @PreAuthorize("hasAnyAuthority('ADMIN', 'USER')") public CustomResponse<CustomPagingResponse<ProductResponse>> getProducts( @RequestBody @Valid final ProductPagingRequest productPagingRequest) { final CustomPage<Product> productPage = productReadService.getProducts(productPagingRequest); final CustomPagingResponse<ProductResponse> productPagingResponse = customPageToCustomPagingResponseMapper.toPagingResponse(productPage); return CustomResponse.successOf(productPagingResponse); }
@Test void givenProductPagingRequest_whenGetProductsFromUser_thenReturnCustomPageProduct() throws Exception { // Given ProductPagingRequest pagingRequest = ProductPagingRequest.builder() .pagination( CustomPaging.builder() .pageSize(1) .pageNumber(1) .build() ).build(); String productId = UUID.randomUUID().toString(); ProductEntity expected = ProductEntity.builder() .id(productId) .name("Test Product") .unitPrice(BigDecimal.valueOf(12)) .amount(BigDecimal.valueOf(5)) .build(); List<ProductEntity> productEntities = new ArrayList<>(); productEntities.addAll(Collections.singletonList(expected)); Page<ProductEntity> productEntityPage = new PageImpl<>(productEntities, PageRequest.of(1, 1), productEntities.size()); List<Product> productDomainModels = productEntities.stream() .map(entity -> new Product(entity.getId(), entity.getName(), entity.getAmount(),entity.getUnitPrice())) .collect(Collectors.toList()); CustomPage<Product> productPage = CustomPage.of(productDomainModels, productEntityPage); // When when(productReadService.getProducts(any(ProductPagingRequest.class))).thenReturn(productPage); // Then mockMvc.perform(MockMvcRequestBuilders.get("/api/v1/products") .contentType(MediaType.APPLICATION_JSON) .content(objectMapper.writeValueAsString(pagingRequest)) .header(HttpHeaders.AUTHORIZATION, "Bearer " + mockUserToken.getAccessToken())) .andDo(MockMvcResultHandlers.print()) .andExpect(MockMvcResultMatchers.status().isOk()) .andExpect(MockMvcResultMatchers.jsonPath("$.httpStatus").value("OK")) .andExpect(MockMvcResultMatchers.jsonPath("$.isSuccess").value(true)) .andExpect(MockMvcResultMatchers.jsonPath("$.response.content[0].id").value(expected.getId())) .andExpect(MockMvcResultMatchers.jsonPath("$.response.content[0].name").value(expected.getName())) .andExpect(MockMvcResultMatchers.jsonPath("$.response.content[0].amount").value(expected.getAmount())) .andExpect(MockMvcResultMatchers.jsonPath("$.response.content[0].unitPrice").value(expected.getUnitPrice())); // Verify verify(productReadService, times(1)).getProducts(any(ProductPagingRequest.class)); }