focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public <T> boolean isLiveObject(T instance) { return instance instanceof RLiveObject; }
@Test public void testIsLiveObject() { RLiveObjectService service = redisson.getLiveObjectService(); TestClass ts = new TestClass(new ObjectId(100)); assertFalse(service.isLiveObject(ts)); TestClass persisted = service.persist(ts); assertFalse(service.isLiveObject(ts)); assertTrue(service.isLiveObject(persisted)); }
@Override public HttpResponse handle(HttpRequest request) { final List<String> uris = circularArrayAccessLogKeeper.getUris(); return new HttpResponse(200) { @Override public void render(OutputStream outputStream) throws IOException { JsonGenerator generator = jsonFactory.createGenerator(outputStream); generator.writeStartObject(); generator.writeArrayFieldStart("entries"); for (String uri : uris) { generator.writeStartObject(); generator.writeStringField("url", uri); generator.writeEndObject(); } generator.writeEndArray(); generator.writeEndObject(); generator.close(); } }; }
@Test void testOneLogLine() throws IOException { keeper.addUri("foo"); HttpResponse response = handler.handle(null); response.render(out); assertEquals("{\"entries\":[{\"url\":\"foo\"}]}", out.toString()); }
@Override public Hotel getHotel(String id) { return hotelRepository.findById(id).orElseThrow( () -> new ResourceNotFoundException("Hotel no encontrado con el id: " + id) ); }
@Test void testGetHotel() { // Given (Dado) String hotelId = "1"; Hotel hotel = new Hotel(hotelId, "Hotel Test", "Info Test", "Ubicacion Test"); // Cuando se llame hotelRepository.findById con hotelId, retorna Optional.of(hotel) when(hotelRepository.findById(hotelId)).thenReturn(Optional.of(hotel)); // When (Cuando) Hotel foundHotel = hotelService.getHotel(hotelId); // Then (Entonces) // Verifica que el hotel encontrado no sea nulo y tenga el ID y nombre esperados assertThat(foundHotel).isNotNull(); assertThat(foundHotel.getId()).isEqualTo(hotelId); assertThat(foundHotel.getNombre()).isEqualTo("Hotel Test"); }
@Override public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException { if(containerService.isContainer(folder)) { final S3BucketCreateService service = new S3BucketCreateService(session); service.create(folder, StringUtils.isBlank(status.getRegion()) ? new S3LocationFeature(session, session.getClient().getRegionEndpointCache()).getDefault().getIdentifier() : status.getRegion()); return folder; } else { final EnumSet<Path.Type> type = EnumSet.copyOf(folder.getType()); type.add(Path.Type.placeholder); return new S3TouchFeature(session, acl).withWriter(writer).touch(folder .withType(type), status // Add placeholder object .withMime(MIMETYPE) .withChecksum(writer.checksum(folder, status).compute(new NullInputStream(0L), status))); } }
@Test(expected = InteroperabilityException.class) public void testCreateBucketInvalidName() throws Exception { final S3AccessControlListFeature acl = new S3AccessControlListFeature(session); final Path test = new Path(new DefaultHomeFinderService(session).find(), "untitled folder", EnumSet.of(Path.Type.directory, Path.Type.volume)); assertFalse(new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).isSupported(test.getParent(), test.getName())); assertTrue(new S3DirectoryFeature(virtualhost, new S3WriteFeature(session, acl), acl).isSupported(test.getParent(), test.getName())); final S3LocationFeature.S3Region region = new S3LocationFeature.S3Region("eu-west-2"); test.attributes().setRegion(region.getIdentifier()); new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir(test, new TransferStatus().withRegion(region.getIdentifier())); assertTrue(new S3FindFeature(session, acl).find(test)); new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static Range<Integer> integerRange(String range) { return ofString(range, Integer::parseInt, Integer.class); }
@Test public void testUnboundedRangeStringIsRejected() { PostgreSQLGuavaRangeType instance = PostgreSQLGuavaRangeType.INSTANCE; assertEquals(Range.all(), instance.integerRange("(,)")); }
public SearchOptions setPage(int page, int pageSize) { checkArgument(page >= 1, "Page must be greater or equal to 1 (got " + page + ")"); setLimit(pageSize); int lastResultIndex = page * pageSize; checkArgument(lastResultIndex <= MAX_RETURNABLE_RESULTS, "Can return only the first %s results. %sth result asked.", MAX_RETURNABLE_RESULTS, lastResultIndex); setOffset(lastResultIndex - pageSize); return this; }
@Test public void fail_if_ps_is_zero() { assertThatThrownBy(() -> new SearchOptions().setPage(1, 0)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Page size must be between 1 and 500 (got 0)"); }
public void execute(){ logger.debug("[" + getOperationName() + "] Starting execution of paged operation. maximum time: " + maxTime + ", maximum pages: " + maxPages); long startTime = System.currentTimeMillis(); long executionTime = 0; int i = 0; int exceptionsSwallowedCount = 0; int operationsCompleted = 0; Set<String> exceptionsSwallowedClasses = new HashSet<String>(); while (i< maxPages && executionTime < maxTime){ Collection<T> page = fetchPage(); if(page == null || page.size() == 0){ break; } for (T item : page) { try { doOperation(item); operationsCompleted++; } catch (Exception e){ if(swallowExceptions){ exceptionsSwallowedCount++; exceptionsSwallowedClasses.add(e.getClass().getName()); logger.debug("Swallowing exception " + e.getMessage(), e); } else { logger.debug("Rethrowing exception " + e.getMessage()); throw e; } } } i++; executionTime = System.currentTimeMillis() - startTime; } finalReport(operationsCompleted, exceptionsSwallowedCount, exceptionsSwallowedClasses); }
@Test(timeout = 1000L) public void execute_negpage() { CountingPageOperation op = new CountingPageOperation(-1,Long.MAX_VALUE); op.execute(); assertEquals(0L, op.counter); }
@Override public Iterable<Duplication> getDuplications(Component file) { checkFileComponentArgument(file); Collection<Duplication> res = this.duplications.asMap().get(file.getKey()); if (res == null) { return Collections.emptyList(); } return res; }
@Test @UseDataProvider("allComponentTypesButFile") public void getDuplications_throws_IAE_if_Component_type_is_not_FILE(Component.Type type) { assertThatThrownBy(() -> { Component component = mockComponentGetType(type); underTest.getDuplications(component); }) .isInstanceOf(IllegalArgumentException.class) .hasMessage("type of file must be FILE"); }
boolean isWriteEnclosureForFieldName( ValueMetaInterface v, String fieldName ) { return ( isWriteEnclosed( v ) ) || isEnclosureFixDisabledAndContainsSeparatorOrEnclosure( fieldName.getBytes() ); }
@Test public void testWriteEnclosureForFieldNameWithEnclosureForced() { TextFileOutputData data = new TextFileOutputData(); data.binarySeparator = new byte[1]; data.binaryEnclosure = new byte[1]; data.writer = new ByteArrayOutputStream(); TextFileOutputMeta meta = getTextFileOutputMeta(); meta.setEnclosureForced(true); meta.setEnclosureFixDisabled(false); stepMockHelper.stepMeta.setStepMetaInterface( meta ); TextFileOutput textFileOutput = getTextFileOutput(data, meta); ValueMetaBase valueMetaInterface = getValueMetaInterface(); assertTrue(textFileOutput.isWriteEnclosureForFieldName(valueMetaInterface, "fieldName")); }
@Override public Set<GrokPattern> loadAll() { return Sets.newHashSet(store.values()); }
@Test public void loadAll() throws Exception { GrokPattern pattern1 = service.save(GrokPattern.create("NAME1", ".*")); GrokPattern pattern2 = service.save(GrokPattern.create("NAME2", ".*")); GrokPattern pattern3 = service.save(GrokPattern.create("NAME3", ".*")); assertThat(service.loadAll()).containsExactlyInAnyOrder(pattern1, pattern2, pattern3); }
public static Map<String, Object> flatten(Map<String, Object> originalMap, String parentKey, String separator) { final Map<String, Object> result = new HashMap<>(); for (Map.Entry<String, Object> entry : originalMap.entrySet()) { final String key = parentKey.isEmpty() ? entry.getKey() : parentKey + separator + entry.getKey(); final Object value = entry.getValue(); if (value instanceof Map) { @SuppressWarnings("unchecked") final Map<String, Object> valueMap = (Map<String, Object>) value; result.putAll(flatten(valueMap, key, separator)); } else { result.put(key, value); } } return result; }
@Test public void flattenAddsParentKey() throws Exception { final Map<String, Object> map = ImmutableMap.of( "map", ImmutableMap.of( "foo", "bar", "baz", "qux")); final Map<String, Object> expected = ImmutableMap.of( "map_foo", "bar", "map_baz", "qux"); assertThat(MapUtils.flatten(map, "", "_")).isEqualTo(expected); }
@Override public boolean equals(Object object) { if (object instanceof MapUpdate) { MapUpdate that = (MapUpdate) object; return this.type == that.type && Objects.equals(this.key, that.key) && Objects.equals(this.value, that.value) && Objects.equals(this.version, that.version); } return false; }
@Test public void testEquals() { new EqualsTester() .addEqualityGroup(stats1, stats1) .addEqualityGroup(stats2) .testEquals(); new EqualsTester() .addEqualityGroup(stats3, stats3) .addEqualityGroup(stats4) .testEquals(); }
public Ticket addTicket(long interval, TimerHandler handler, Object... args) { return ticket.add(interval, handler, args); }
@Test(expected = IllegalArgumentException.class) public void testInvalidTicketInsertion() { ticker.addTicket(-1, NOOP); }
public static boolean checkpw(String plaintext, String hashed) { byte hashed_bytes[]; byte try_bytes[]; try { String try_pw = hashpw(plaintext, hashed); hashed_bytes = hashed.getBytes("UTF-8"); try_bytes = try_pw.getBytes("UTF-8"); } catch (UnsupportedEncodingException uee) { return false; } if (hashed_bytes.length != try_bytes.length) return false; byte ret = 0; for (int i = 0; i < try_bytes.length; i++) ret |= hashed_bytes[i] ^ try_bytes[i]; return ret == 0; }
@Test public void testCheckpw_failure() { System.out.print("BCrypt.checkpw w/ bad passwords: "); for (int i = 0; i < test_vectors.length; i++) { int broken_index = (i + 4) % test_vectors.length; String plain = test_vectors[i][0]; String expected = test_vectors[broken_index][2]; Assert.assertFalse(BCrypt.checkpw(plain, expected)); System.out.print("."); } System.out.println(""); }
public WsResponse call(WsRequest request) { checkState(!globalMode.isMediumTest(), "No WS call should be made in medium test mode"); WsResponse response = target.wsConnector().call(request); failIfUnauthorized(response); checkAuthenticationWarnings(response); return response; }
@Test public void call_whenTokenExpirationApproaches_shouldLogWarnings() { WsRequest request = newRequest(); var fiveDaysLatter = LocalDateTime.now().atZone(ZoneOffset.UTC).plusDays(5); String expirationDate = DateTimeFormatter .ofPattern(DATETIME_FORMAT) .format(fiveDaysLatter); server.stubFor(get(urlEqualTo(URL_ENDPOINT)) .willReturn(ok() .withHeader(SQ_TOKEN_EXPIRATION_HEADER, expirationDate))); logTester.setLevel(Level.DEBUG); DefaultScannerWsClient underTest = new DefaultScannerWsClient(wsClient, false, new GlobalAnalysisMode(new ScannerProperties(Collections.emptyMap())), analysisWarnings); underTest.call(request); // the second call should not add the same warning twice underTest.call(request); // check logs List<String> warningLogs = logTester.logs(Level.WARN); assertThat(warningLogs).hasSize(2); assertThat(warningLogs.get(0)).contains("The token used for this analysis will expire on: " + fiveDaysLatter.format(DateTimeFormatter.ofPattern("MMMM dd, yyyy"))); assertThat(warningLogs.get(1)).contains("Analysis executed with this token will fail after the expiration date."); }
public static synchronized EventTimerManager getInstance() { return SingletonHolder.mInstance; }
@Test public void getInstance() { mInstance = EventTimerManager.getInstance(); Assert.assertNotNull(mInstance); }
@Override public void updateReplace(Object key, Object oldValue, Object newValue) { int keyHash = key.hashCode(); int oldValueHash = oldValue.hashCode(); int newValueHash = newValue.hashCode(); int leafOrder = MerkleTreeUtil.getLeafOrderForHash(keyHash, leafLevel); int leafCurrentHash = getNodeHash(leafOrder); int leafNewHash = MerkleTreeUtil.removeHash(leafCurrentHash, oldValueHash); leafNewHash = MerkleTreeUtil.addHash(leafNewHash, newValueHash); setNodeHash(leafOrder, leafNewHash); updateBranch(leafOrder); }
@Test public void testUpdateReplace() { MerkleTree merkleTree = new ArrayMerkleTree(3); merkleTree.updateAdd(1, 1); merkleTree.updateAdd(2, 2); merkleTree.updateAdd(3, 3); merkleTree.updateReplace(2, 2, 4); int expectedHash = 0; expectedHash = MerkleTreeUtil.addHash(expectedHash, 1); expectedHash = MerkleTreeUtil.addHash(expectedHash, 3); expectedHash = MerkleTreeUtil.addHash(expectedHash, 4); int nodeHash = merkleTree.getNodeHash(5); assertEquals(expectedHash, nodeHash); }
protected static boolean isDoubleQuoted(String input) { if (input == null || input.isBlank()) { return false; } return input.matches("(^" + QUOTE_CHAR + "{2}([^" + QUOTE_CHAR + "]+)" + QUOTE_CHAR + "{2})"); }
@Test public void testEmptyDoubleQuotedNegative() { assertFalse(isDoubleQuoted("\"\"\"\"")); }
public FEELFnResult<BigDecimal> invoke(@ParameterName( "list" ) List list) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } FEELFnResult<BigDecimal> s = sum.invoke( list ); Function<FEELEvent, FEELFnResult<BigDecimal>> ifLeft = event -> FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "unable to sum the elements which is required to calculate the mean")); Function<BigDecimal, FEELFnResult<BigDecimal>> ifRight = (sum) -> { try { return FEELFnResult.ofResult( sum.divide( BigDecimal.valueOf( list.size() ), MathContext.DECIMAL128 ) ); } catch (Exception e) { return FEELFnResult.ofError( new InvalidParametersEvent(Severity.ERROR, "unable to perform division to calculate the mean", e) ); } }; return s.cata(ifLeft, ifRight); }
@Test void invokeArrayWithDoubles() { FunctionTestUtil.assertResult(meanFunction.invoke(new Object[]{10.0d, 20.0d, 30.0d}), BigDecimal.valueOf(20)); FunctionTestUtil.assertResult(meanFunction.invoke(new Object[]{10.2d, 20.2d, 30.2d}), BigDecimal.valueOf(20.2)); }
@Override public Object convertToPropertyType(Class<?> entityType, String[] propertyPath, String value) { IndexValueFieldDescriptor fieldDescriptor = getValueFieldDescriptor(entityType, propertyPath); if (fieldDescriptor == null) { return super.convertToPropertyType(entityType, propertyPath, value); } Class<?> type = fieldDescriptor.type().dslArgumentClass(); if (Date.class != type) { return super.convertToPropertyType(entityType, propertyPath, value); } try { return DateTools.stringToDate(value); } catch (ParseException e) { throw new ParsingException(e); } }
@Test public void testConvertFloatProperty() { assertThat(convertToPropertyType(TestEntity.class, "f", "42.0")).isEqualTo(42.0F); }
public Future<OAuth2AccessToken> getAccessTokenPasswordGrantAsync(String username, String password) { return getAccessTokenPasswordGrantAsync(username, password, (OAuthAsyncRequestCallback<OAuth2AccessToken>) null); }
@Test public void shouldProduceCorrectRequestAsync() throws ExecutionException, InterruptedException, IOException { final OAuth20Service service = new ServiceBuilder("your_api_key") .apiSecret("your_api_secret") .build(new OAuth20ApiUnit()); final OAuth2AccessToken token = service.getAccessTokenPasswordGrantAsync("user1", "password1").get(); assertNotNull(token); final JsonNode response = OBJECT_MAPPER.readTree(token.getRawResponse()); assertEquals(OAuth20ServiceUnit.TOKEN, response.get(OAuthConstants.ACCESS_TOKEN).asText()); assertEquals(OAuth20ServiceUnit.EXPIRES, response.get("expires_in").asInt()); final String authorize = Base64.encode( String.format("%s:%s", service.getApiKey(), service.getApiSecret()).getBytes(Charset.forName("UTF-8"))); assertEquals(OAuthConstants.BASIC + ' ' + authorize, response.get(OAuthConstants.HEADER).asText()); assertEquals("user1", response.get("query-username").asText()); assertEquals("password1", response.get("query-password").asText()); assertEquals("password", response.get("query-grant_type").asText()); }
@Override public void report(SortedMap<MetricName, Gauge> gauges, SortedMap<MetricName, Counter> counters, SortedMap<MetricName, Histogram> histograms, SortedMap<MetricName, Meter> meters, SortedMap<MetricName, Timer> timers) { if (loggerProxy.isEnabled(marker)) { for (Entry<MetricName, Gauge> entry : gauges.entrySet()) { logGauge(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Counter> entry : counters.entrySet()) { logCounter(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Histogram> entry : histograms.entrySet()) { logHistogram(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Meter> entry : meters.entrySet()) { logMeter(entry.getKey(), entry.getValue()); } for (Entry<MetricName, Timer> entry : timers.entrySet()) { logTimer(entry.getKey(), entry.getValue()); } } }
@Test public void reportsTimerValuesAtError() throws Exception { final Timer timer = mock(Timer.class); when(timer.getCount()).thenReturn(1L); when(timer.getMeanRate()).thenReturn(2.0); when(timer.getOneMinuteRate()).thenReturn(3.0); when(timer.getFiveMinuteRate()).thenReturn(4.0); when(timer.getFifteenMinuteRate()).thenReturn(5.0); final Snapshot snapshot = mock(Snapshot.class); when(snapshot.getMax()).thenReturn(TimeUnit.MILLISECONDS.toNanos(100)); when(snapshot.getMean()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(200)); when(snapshot.getMin()).thenReturn(TimeUnit.MILLISECONDS.toNanos(300)); when(snapshot.getStdDev()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(400)); when(snapshot.getMedian()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(500)); when(snapshot.get75thPercentile()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(600)); when(snapshot.get95thPercentile()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(700)); when(snapshot.get98thPercentile()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(800)); when(snapshot.get99thPercentile()).thenReturn((double) TimeUnit.MILLISECONDS.toNanos(900)); when(snapshot.get999thPercentile()).thenReturn((double) TimeUnit.MILLISECONDS .toNanos(1000)); when(timer.getSnapshot()).thenReturn(snapshot); when(logger.isErrorEnabled(marker)).thenReturn(true); errorReporter.report(this.map(), this.map(), this.map(), this.map(), map("test.another.timer", timer)); verify(logger).error(marker, "type={}, name={}, count={}, min={}, max={}, mean={}, stddev={}, median={}, p75={}, p95={}, p98={}, p99={}, p999={}, mean_rate={}, m1={}, m5={}, m15={}, rate_unit={}, duration_unit={}", "TIMER", "test.another.timer", 1L, 300.0, 100.0, 200.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0, 2.0, 3.0, 4.0, 5.0, "events/second", "milliseconds"); }
@Override // TODO(yimin) integrate this method with load() method public void cacheData(String ufsPath, long length, long pos, boolean isAsync) throws IOException { List<CompletableFuture<Void>> futures = new ArrayList<>(); // TODO(yimin) To implement the sync data caching. alluxio.grpc.FileInfo fi = getGrpcFileInfo(ufsPath, -1); String fileId = new AlluxioURI(ufsPath).hash(); for (long i = pos / mPageSize; i <= Math.min(pos + length, fi.getLength()) / mPageSize; ++i) { PageId pageId = new PageId(fileId, i); // TODO(yimin) As an optimization, data does not need to load on a page basis. // Can implement a bulk load mechanism and load a couple of pages at the same time, // to improve the performance. if (mCacheManager.hasPageUnsafe(pageId)) { continue; } long loadPos = i * mPageSize; long loadLength = Math.min(mPageSize, fi.getLength() - loadPos); if (loadLength == 0) { continue; } if (!mLoadingPages.addIfAbsent(pageId)) { continue; } futures.add(CompletableFuture.runAsync(() -> { try { if (mCacheManager.hasPageUnsafe(pageId)) { return; } LOG.debug("Preloading {} pos: {} length: {} started", ufsPath, loadPos, loadLength); loadPages(ufsPath, Collections.singletonList(pageId), fi.getLength()); LOG.debug("Preloading {} pos: {} length: {} finished", ufsPath, loadPos, loadLength); } catch (Exception e) { LOG.info("Preloading failed for {} page: {}", ufsPath, pageId, e); } finally { mLoadingPages.remove(pageId); } }, mCacheDataExecutor)); if (!isAsync) { try { CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).get(); } catch (Exception e) { throw new RuntimeException(e); } } } }
@Test public void testCacheDataPartial() throws Exception { int numPages = 10; long length = mPageSize * numPages; String ufsPath = mTestFolder.newFile("test").getAbsolutePath(); byte[] buffer = BufferUtils.getIncreasingByteArray((int) length); BufferUtils.writeBufferToFile(ufsPath, buffer); int startPage = 2; // Loading bytes [19, 40] -> page 1,2,3,4 will be loaded mWorker.cacheData(ufsPath, 2 * mPageSize + 2, startPage * mPageSize - 1, false); List<PageId> cachedPages = mCacheManager.getCachedPageIdsByFileId(new AlluxioURI(ufsPath).hash(), length); assertEquals(4, cachedPages.size()); int start = (int) mPageSize; for (PageId pageId : cachedPages) { byte[] buff = new byte[(int) mPageSize]; mCacheManager.get(pageId, (int) mPageSize, buff, 0); assertTrue(BufferUtils.equalIncreasingByteArray(start, (int) mPageSize, buff)); start += mPageSize; } }
@Override public void open() { super.open(); for (String propertyKey : properties.stringPropertyNames()) { LOGGER.debug("propertyKey: {}", propertyKey); String[] keyValue = propertyKey.split("\\.", 2); if (2 == keyValue.length) { LOGGER.debug("key: {}, value: {}", keyValue[0], keyValue[1]); Properties prefixProperties; if (basePropertiesMap.containsKey(keyValue[0])) { prefixProperties = basePropertiesMap.get(keyValue[0]); } else { prefixProperties = new Properties(); basePropertiesMap.put(keyValue[0].trim(), prefixProperties); } prefixProperties.put(keyValue[1].trim(), getProperty(propertyKey)); } } Set<String> removeKeySet = new HashSet<>(); for (String key : basePropertiesMap.keySet()) { if (!COMMON_KEY.equals(key)) { Properties properties = basePropertiesMap.get(key); if (!properties.containsKey(DRIVER_KEY) || !properties.containsKey(URL_KEY)) { LOGGER.error("{} will be ignored. {}.{} and {}.{} is mandatory.", key, DRIVER_KEY, key, key, URL_KEY); removeKeySet.add(key); } } } for (String key : removeKeySet) { basePropertiesMap.remove(key); } LOGGER.debug("JDBC PropertiesMap: {}", basePropertiesMap); setMaxLineResults(); setMaxRows(); //TODO(zjffdu) Set different sql splitter for different sql dialects. this.sqlSplitter = new SqlSplitter(); }
@Test void testDBPrefixProhibited() throws IOException, InterpreterException { Properties properties = new Properties(); properties.setProperty("common.max_count", "1000"); properties.setProperty("common.max_retry", "3"); properties.setProperty("default.driver", "org.h2.Driver"); properties.setProperty("default.url", getJdbcConnection()); properties.setProperty("default.user", ""); properties.setProperty("default.password", ""); JDBCInterpreter t = new JDBCInterpreter(properties); t.open(); String sqlQuery = "select * from test_table"; Map<String, String> localProperties = new HashMap<>(); localProperties.put("db", "fake"); InterpreterContext context = InterpreterContext.builder() .setAuthenticationInfo(new AuthenticationInfo("testUser")) .setLocalProperties(localProperties) .setParagraphId("paragraphId") .setInterpreterOut(new InterpreterOutput()) .build(); InterpreterResult interpreterResult = t.interpret(sqlQuery, context); // The result should be the same as that run with default config assertEquals(InterpreterResult.Code.SUCCESS, interpreterResult.code()); List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage(); assertEquals("ID\tNAME\na\ta_name\nb\tb_name\nc\tnull\n", resultMessages.get(0).getData()); }
public NioAsyncSocketBuilder setWriteQueueCapacity(int writeQueueCapacity) { verifyNotBuilt(); this.writeQueueCapacity = checkPositive(writeQueueCapacity, "writeQueueCapacity"); return this; }
@Test public void test_setWriteQueueCapacity() { Reactor reactor = newReactor(); NioAsyncSocketBuilder builder = (NioAsyncSocketBuilder) reactor.newAsyncSocketBuilder(); builder.setWriteQueueCapacity(16384); assertEquals(16384, builder.writeQueueCapacity); }
Mono<ImmutableMap<String, String>> resolve(List<SchemaReference> refs) { return resolveReferences(refs, new Resolving(ImmutableMap.of(), ImmutableSet.of())) .map(Resolving::resolved); }
@Test void resolvesRefsUsingSrClient() { mockSrCall("sub1", 1, new SchemaSubject() .schema("schema1")); mockSrCall("sub2", 1, new SchemaSubject() .schema("schema2") .references( List.of( new SchemaReference().name("ref2_1").subject("sub2_1").version(2), new SchemaReference().name("ref2_2").subject("sub1").version(1)))); mockSrCall("sub2_1", 2, new SchemaSubject() .schema("schema2_1") .references( List.of( new SchemaReference().name("ref2_1_1").subject("sub2_1_1").version(3), new SchemaReference().name("ref1").subject("should_not_be_called").version(1) )) ); mockSrCall("sub2_1_1", 3, new SchemaSubject() .schema("schema2_1_1")); var resolvedRefsMono = schemaReferencesResolver.resolve( List.of( new SchemaReference().name("ref1").subject("sub1").version(1), new SchemaReference().name("ref2").subject("sub2").version(1))); StepVerifier.create(resolvedRefsMono) .assertNext(refs -> Assertions.assertThat(refs) .containsExactlyEntriesOf( // checking map should be ordered ImmutableMap.<String, String>builder() .put("ref1", "schema1") .put("ref2_1_1", "schema2_1_1") .put("ref2_1", "schema2_1") .put("ref2_2", "schema1") .put("ref2", "schema2") .build())) .verifyComplete(); }
public static HttpAsyncClientBuilder custom(MetricRegistry metricRegistry) { return custom(metricRegistry, METHOD_ONLY); }
@Test public void registersExpectedExceptionMetrics() throws Exception { client = InstrumentedHttpAsyncClients.custom(metricRegistry, metricNameStrategy).disableAutomaticRetries().build(); client.start(); final CountDownLatch countDownLatch = new CountDownLatch(1); final SimpleHttpRequest request = SimpleRequestBuilder .get("http://localhost:" + httpServer.getAddress().getPort() + "/") .build(); final String requestMetricName = "request"; final String exceptionMetricName = "exception"; httpServer.createContext("/", HttpExchange::close); httpServer.start(); when(metricNameStrategy.getNameFor(any(), any(HttpRequest.class))) .thenReturn(requestMetricName); when(metricNameStrategy.getNameFor(any(), any(Exception.class))) .thenReturn(exceptionMetricName); try { final Future<SimpleHttpResponse> responseFuture = client.execute(request, new FutureCallback<SimpleHttpResponse>() { @Override public void completed(SimpleHttpResponse result) { fail(); } @Override public void failed(Exception ex) { countDownLatch.countDown(); } @Override public void cancelled() { fail(); } }); countDownLatch.await(5, TimeUnit.SECONDS); responseFuture.get(5, TimeUnit.SECONDS); fail(); } catch (ExecutionException e) { assertThat(e).hasCauseInstanceOf(ConnectionClosedException.class); await().atMost(5, TimeUnit.SECONDS) .untilAsserted(() -> assertThat(metricRegistry.getMeters()).containsKey("exception")); } }
@Override public Double dist(V firstMember, V secondMember, GeoUnit geoUnit) { return get(distAsync(firstMember, secondMember, geoUnit)); }
@Test public void testDistEmpty() { RGeo<String> geo = redisson.getGeo("test"); assertThat(geo.dist("Palermo", "Catania", GeoUnit.METERS)).isNull(); }
public <T> void addClientLevelMutableMetric(final String name, final String description, final RecordingLevel recordingLevel, final Gauge<T> valueProvider) { final MetricName metricName = metrics.metricName(name, CLIENT_LEVEL_GROUP, description, clientLevelTagMap()); final MetricConfig metricConfig = new MetricConfig().recordLevel(recordingLevel); synchronized (clientLevelMetrics) { metrics.addMetric(metricName, metricConfig, valueProvider); clientLevelMetrics.push(metricName); } }
@Test public void shouldAddClientLevelMutableMetric() { final Metrics metrics = mock(Metrics.class); final RecordingLevel recordingLevel = RecordingLevel.INFO; final MetricConfig metricConfig = new MetricConfig().recordLevel(recordingLevel); final Gauge<String> valueProvider = (config, now) -> "mutable-value"; when(metrics.metricName(METRIC_NAME1, CLIENT_LEVEL_GROUP, DESCRIPTION1, clientLevelTags)) .thenReturn(metricName1); doNothing().when(metrics).addMetric(eq(metricName1), eqMetricConfig(metricConfig), eq(valueProvider)); final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, VERSION, time); streamsMetrics.addClientLevelMutableMetric(METRIC_NAME1, DESCRIPTION1, recordingLevel, valueProvider); }
@Override public V move(Duration timeout, DequeMoveArgs args) { return get(moveAsync(timeout, args)); }
@Test public void testMove() { RBlockingDeque<Integer> deque1 = redisson.getBlockingDeque("deque1"); RBlockingDeque<Integer> deque2 = redisson.getBlockingDeque("deque2"); deque2.add(4); deque2.add(5); deque2.add(6); Executors.newSingleThreadScheduledExecutor().schedule(() -> { deque1.add(1); deque1.add(2); deque1.add(3); }, 3, TimeUnit.SECONDS); Awaitility.await().atLeast(Duration.ofSeconds(1)).untilAsserted(() -> { Integer r = deque1.move(Duration.ofSeconds(1), DequeMoveArgs.pollFirst().addLastTo(deque2.getName())); assertThat(r).isNull(); }); Awaitility.await().between(Duration.ofMillis(1700), Duration.ofSeconds(2)).untilAsserted(() -> { Integer r = deque1.move(Duration.ofSeconds(2), DequeMoveArgs.pollFirst().addLastTo(deque2.getName())); assertThat(r).isEqualTo(1); }); assertThat(deque1).containsExactly(2, 3); assertThat(deque2).containsExactly(4, 5, 6, 1); deque2.clear(); Executors.newSingleThreadScheduledExecutor().schedule(() -> { deque2.addAll(Arrays.asList(4, 5, 6, 1)); }, 3, TimeUnit.SECONDS); Awaitility.await().atLeast(Duration.ofSeconds(1)).untilAsserted(() -> { Integer r = deque2.move(Duration.ofSeconds(1), DequeMoveArgs.pollFirst().addLastTo(deque1.getName())); assertThat(r).isNull(); }); Awaitility.await().between(Duration.ofMillis(1700), Duration.ofSeconds(2)).untilAsserted(() -> { Integer r = deque2.move(Duration.ofSeconds(2), DequeMoveArgs.pollLast().addFirstTo(deque1.getName())); assertThat(r).isEqualTo(1); }); assertThat(deque1).containsExactly(1, 2, 3); assertThat(deque2).containsExactly(4, 5, 6); }
@VisibleForTesting public RequestInterceptorChainWrapper getInterceptorChain() throws IOException { String user = UserGroupInformation.getCurrentUser().getUserName(); RequestInterceptorChainWrapper chain = userPipelineMap.get(user); if (chain != null && chain.getRootInterceptor() != null) { return chain; } return initializePipeline(user); }
@Test public void testClientPipelineConcurrent() throws InterruptedException { final String user = "test1"; /* * ClientTestThread is a thread to simulate a client request to get a * ClientRequestInterceptor for the user. */ class ClientTestThread extends Thread { private ClientRequestInterceptor interceptor; @Override public void run() { try { interceptor = pipeline(); } catch (IOException | InterruptedException e) { e.printStackTrace(); } } private ClientRequestInterceptor pipeline() throws IOException, InterruptedException { return UserGroupInformation.createRemoteUser(user).doAs( new PrivilegedExceptionAction<ClientRequestInterceptor>() { @Override public ClientRequestInterceptor run() throws Exception { RequestInterceptorChainWrapper wrapper = getRouterClientRMService().getInterceptorChain(); ClientRequestInterceptor interceptor = wrapper.getRootInterceptor(); Assert.assertNotNull(interceptor); LOG.info("init client interceptor success for user " + user); return interceptor; } }); } } /* * We start the first thread. It should not finish initing a chainWrapper * before the other thread starts. In this way, the second thread can * init at the same time of the first one. In the end, we validate that * the 2 threads get the same chainWrapper without going into error. */ ClientTestThread client1 = new ClientTestThread(); ClientTestThread client2 = new ClientTestThread(); client1.start(); client2.start(); client1.join(); client2.join(); Assert.assertNotNull(client1.interceptor); Assert.assertNotNull(client2.interceptor); Assert.assertTrue(client1.interceptor == client2.interceptor); }
@Override public void doFilter(ServletRequest req, ServletResponse res, FilterChain chain) throws IOException, ServletException { HttpServletRequest request = (HttpServletRequest)req; HttpServletResponse response = (HttpServletResponse)res; // Do not allow framing; OF-997 response.setHeader("X-Frame-Options", JiveGlobals.getProperty("adminConsole.frame-options", "SAMEORIGIN")); // Reset the defaultLoginPage variable String loginPage = defaultLoginPage; if (loginPage == null) { loginPage = request.getContextPath() + (AuthFactory.isOneTimeAccessTokenEnabled() ? "/loginToken.jsp" : "/login.jsp" ); } // Get the page we're on: String url = request.getRequestURI().substring(1); if (url.startsWith("plugins/")) { url = url.substring("plugins/".length()); } // See if it's contained in the exclude list. If so, skip filter execution boolean doExclude = false; for (String exclude : excludes) { if (testURLPassesExclude(url, exclude)) { doExclude = true; break; } } if (!doExclude || IP_ACCESS_IGNORE_EXCLUDES.getValue()) { if (!passesBlocklist(req) || !passesAllowList(req)) { response.sendError(HttpServletResponse.SC_FORBIDDEN); return; } } if (!doExclude) { WebManager manager = new WebManager(); manager.init(request, response, request.getSession(), context); boolean haveOneTimeToken = manager.getAuthToken() instanceof AuthToken.OneTimeAuthToken; User loggedUser = manager.getUser(); boolean loggedAdmin = loggedUser == null ? false : adminManager.isUserAdmin(loggedUser.getUsername(), true); if (!haveOneTimeToken && !loggedAdmin && !authUserFromRequest(request)) { response.sendRedirect(getRedirectURL(request, loginPage, null)); return; } } chain.doFilter(req, res); }
@Test public void nonExcludedUrlWillNotErrorWhenListsEmpty() throws Exception { // Setup test fixture. AuthCheckFilter.SERVLET_REQUEST_AUTHENTICATOR.setValue(AdminUserServletAuthenticatorClass.class); final AuthCheckFilter filter = new AuthCheckFilter(adminManager, loginLimitManager); // Execute system under test. filter.doFilter(request, response, filterChain); // Verify result verify(response, never()).sendError(anyInt()); verify(filterChain, atLeastOnce()).doFilter(any(), any()); }
@Override public void authenticate( final JsonObject authInfo, final Handler<AsyncResult<User>> resultHandler ) { final String username = authInfo.getString("username"); if (username == null) { resultHandler.handle(Future.failedFuture("authInfo missing 'username' field")); return; } final String password = authInfo.getString("password"); if (password == null) { resultHandler.handle(Future.failedFuture("authInfo missing 'password' field")); return; } server.getWorkerExecutor().executeBlocking( promisedUser -> getUser(contextName, username, password, promisedUser), false, resultHandler ); }
@Test public void shouldAuthenticateWithWildcardAllowedRole() throws Exception { // Given: givenAllowedRoles("**"); givenUserRoles(); // When: authProvider.authenticate(authInfo, userHandler); // Then: verifyAuthorizedSuccessfulLogin(); }
@Override public void getConfig(ZookeeperServerConfig.Builder builder) { ConfigServer[] configServers = getConfigServers(); int[] zookeeperIds = getConfigServerZookeeperIds(); if (configServers.length != zookeeperIds.length) { throw new IllegalArgumentException(String.format("Number of provided config server hosts (%d) must be the " + "same as number of provided config server zookeeper ids (%d)", configServers.length, zookeeperIds.length)); } String myhostname = HostName.getLocalhost(); // TODO: Server index should be in interval [1, 254] according to doc, // however, we cannot change this id for an existing server for (int i = 0; i < configServers.length; i++) { if (zookeeperIds[i] < 0) { throw new IllegalArgumentException(String.format("Zookeeper ids cannot be negative, was %d for %s", zookeeperIds[i], configServers[i].hostName)); } if (configServers[i].hostName.equals(myhostname)) { builder.myid(zookeeperIds[i]); } builder.server(getZkServer(configServers[i], zookeeperIds[i])); } if (options.zookeeperClientPort().isPresent()) { builder.clientPort(options.zookeeperClientPort().get()); } if (options.hostedVespa().orElse(false)) { builder.vespaTlsConfigFile(Defaults.getDefaults().underVespaHome("var/zookeeper/conf/tls.conf.json")); } boolean isHostedVespa = options.hostedVespa().orElse(false); builder.dynamicReconfiguration(isHostedVespa); builder.reconfigureEnsemble(!isHostedVespa); builder.snapshotMethod(options.zooKeeperSnapshotMethod()); builder.juteMaxBuffer(options.zookeeperJuteMaxBuffer()); }
@Test void zookeeperConfig_uneven_number_of_config_servers_and_zk_ids() { assertThrows(IllegalArgumentException.class, () -> { TestOptions testOptions = createTestOptions(List.of("cfg1", "localhost", "cfg3"), List.of(1)); getConfig(ZookeeperServerConfig.class, testOptions); }); }
@Subscribe public void onChatMessage(ChatMessage chatMessage) { if (chatMessage.getType() != ChatMessageType.TRADE && chatMessage.getType() != ChatMessageType.GAMEMESSAGE && chatMessage.getType() != ChatMessageType.SPAM && chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION) { return; } String message = chatMessage.getMessage(); Matcher matcher = KILLCOUNT_PATTERN.matcher(message); if (matcher.find()) { final String boss = matcher.group("boss"); final int kc = Integer.parseInt(matcher.group("kc")); final String pre = matcher.group("pre"); final String post = matcher.group("post"); if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post)) { unsetKc(boss); return; } String renamedBoss = KILLCOUNT_RENAMES .getOrDefault(boss, boss) // The config service doesn't support keys with colons in them .replace(":", ""); if (boss != renamedBoss) { // Unset old TOB kc unsetKc(boss); unsetPb(boss); unsetKc(boss.replace(":", ".")); unsetPb(boss.replace(":", ".")); // Unset old story mode unsetKc("Theatre of Blood Story Mode"); unsetPb("Theatre of Blood Story Mode"); } setKc(renamedBoss, kc); // We either already have the pb, or need to remember the boss for the upcoming pb if (lastPb > -1) { log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb); if (renamedBoss.contains("Theatre of Blood")) { // TOB team size isn't sent in the kill message, but can be computed from varbits int tobTeamSize = tobTeamSize(); lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players"); } else if (renamedBoss.contains("Tombs of Amascut")) { // TOA team size isn't sent in the kill message, but can be computed from varbits int toaTeamSize = toaTeamSize(); lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players"); } final double pb = getPb(renamedBoss); // If a raid with a team size, only update the pb if it is lower than the existing pb // so that the pb is the overall lowest of any team size if (lastTeamSize == null || pb == 0 || lastPb < pb) { log.debug("Setting overall pb (old: {})", pb); setPb(renamedBoss, lastPb); } if (lastTeamSize != null) { log.debug("Setting team size pb: {}", lastTeamSize); setPb(renamedBoss + " " + lastTeamSize, lastPb); } lastPb = -1; lastTeamSize = null; } else { lastBossKill = renamedBoss; lastBossTime = client.getTickCount(); } return; } matcher = DUEL_ARENA_WINS_PATTERN.matcher(message); if (matcher.find()) { final int oldWins = getKc("Duel Arena Wins"); final int wins = matcher.group(2).equals("one") ? 1 : Integer.parseInt(matcher.group(2).replace(",", "")); final String result = matcher.group(1); int winningStreak = getKc("Duel Arena Win Streak"); int losingStreak = getKc("Duel Arena Lose Streak"); if (result.equals("won") && wins > oldWins) { losingStreak = 0; winningStreak += 1; } else if (result.equals("were defeated")) { losingStreak += 1; winningStreak = 0; } else { log.warn("unrecognized duel streak chat message: {}", message); } setKc("Duel Arena Wins", wins); setKc("Duel Arena Win Streak", winningStreak); setKc("Duel Arena Lose Streak", losingStreak); } matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message); if (matcher.find()) { int losses = matcher.group(1).equals("one") ? 1 : Integer.parseInt(matcher.group(1).replace(",", "")); setKc("Duel Arena Losses", losses); } matcher = KILL_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = NEW_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = HS_PB_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group("floor")); String floortime = matcher.group("floortime"); String floorpb = matcher.group("floorpb"); String otime = matcher.group("otime"); String opb = matcher.group("opb"); String pb = MoreObjects.firstNonNull(floorpb, floortime); setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb)); if (otime != null) { pb = MoreObjects.firstNonNull(opb, otime); setPb("Hallowed Sepulchre", timeStringToSeconds(pb)); } } matcher = HS_KC_FLOOR_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group(1)); int kc = Integer.parseInt(matcher.group(2).replaceAll(",", "")); setKc("Hallowed Sepulchre Floor " + floor, kc); } matcher = HS_KC_GHC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hallowed Sepulchre", kc); } matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hunter Rumours", kc); } if (lastBossKill != null && lastBossTime != client.getTickCount()) { lastBossKill = null; lastBossTime = -1; } matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message); if (matcher.find()) { String item = matcher.group(1); int petId = findPet(item); if (petId != -1) { final List<Integer> petList = new ArrayList<>(getPetList()); if (!petList.contains(petId)) { log.debug("New pet added: {}/{}", item, petId); petList.add(petId); setPetList(petList); } } } matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1)); setKc("Guardians of the Rift", kc); } }
@Test public void testTheatreOfBloodNoPb() { when(client.getVarbitValue(Varbits.THEATRE_OF_BLOOD_ORB1)).thenReturn(1); ChatMessage chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Wave 'The Final Challenge' (Normal Mode) complete!<br>" + "Duration: <col=ff0000>2:42</col><br>" + "Theatre of Blood completion time: <col=ff0000>17:00</col>. Personal best: 13:52.80", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Theatre of Blood total completion time: <col=ff0000>24:40.20</col>. Personal best: 20:45.00", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your completed Theatre of Blood count is: <col=ff0000>73</col>.", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration("killcount", "theatre of blood", 73); verify(configManager).setRSProfileConfiguration("personalbest", "theatre of blood", 13 * 60 + 52.8); verify(configManager).setRSProfileConfiguration("personalbest", "theatre of blood solo", 13 * 60 + 52.8); }
@Override public UltraLogLog applyAggregatedValue(UltraLogLog value, UltraLogLog aggregatedValue) { value.add(aggregatedValue); return value; }
@Test public void applyAggregatedValueShouldUnion() { UltraLogLog input1 = UltraLogLog.create(12); IntStream.range(0, 1000).mapToObj(UltraLogLogUtils::hashObject).forEach(h -> h.ifPresent(input1::add)); UltraLogLog input2 = UltraLogLog.create(12); IntStream.range(0, 1000).mapToObj(UltraLogLogUtils::hashObject).forEach(h -> h.ifPresent(input2::add)); DistinctCountULLValueAggregator agg = new DistinctCountULLValueAggregator(Collections.emptyList()); UltraLogLog result = agg.applyAggregatedValue(input1, input2); UltraLogLog union = UltraLogLog.create(12).add(input1).add(input2); assertEquals(result.getDistinctCountEstimate(), union.getDistinctCountEstimate()); }
@PUT @Path("/{connector}/topics/reset") @Operation(summary = "Reset the list of topics actively used by the specified connector") public Response resetConnectorActiveTopics(final @PathParam("connector") String connector, final @Context HttpHeaders headers) { if (isTopicTrackingDisabled) { throw new ConnectRestException(Response.Status.FORBIDDEN.getStatusCode(), "Topic tracking is disabled."); } if (isTopicTrackingResetDisabled) { throw new ConnectRestException(Response.Status.FORBIDDEN.getStatusCode(), "Topic tracking reset is disabled."); } herder.resetConnectorActiveTopics(connector); return Response.accepted().build(); }
@Test public void testResetConnectorActiveTopicsWithTopicTrackingEnabled() { when(serverConfig.topicTrackingEnabled()).thenReturn(true); when(serverConfig.topicTrackingResetEnabled()).thenReturn(false); HttpHeaders headers = mock(HttpHeaders.class); connectorsResource = new ConnectorsResource(herder, serverConfig, restClient, REQUEST_TIMEOUT); Exception e = assertThrows(ConnectRestException.class, () -> connectorsResource.resetConnectorActiveTopics(CONNECTOR_NAME, headers)); assertEquals("Topic tracking reset is disabled.", e.getMessage()); }
public void validateReadPermission(String serverUrl, String personalAccessToken) { HttpUrl url = buildUrl(serverUrl, "/rest/api/1.0/repos"); doGet(personalAccessToken, url, body -> buildGson().fromJson(body, RepositoryList.class)); }
@Test public void fail_validate_url_on_text_result_log_the_returned_payload() { server.enqueue(new MockResponse() .setResponseCode(500) .setBody("this is a text payload")); String serverUrl = server.url("/").toString(); assertThatThrownBy(() -> underTest.validateReadPermission(serverUrl, "token")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Unable to contact Bitbucket server"); assertThat(String.join(", ", logTester.logs())).contains("Unable to contact Bitbucket server: 500 this is a text payload"); }
public Map<Integer, Schema> schemasById() { return schemasById; }
@Test public void testParseSchemaIdentifierFields() throws Exception { String data = readTableMetadataInputFile("TableMetadataV2Valid.json"); TableMetadata parsed = TableMetadataParser.fromJson(data); assertThat(parsed.schemasById().get(0).identifierFieldIds()).isEmpty(); assertThat(parsed.schemasById().get(1).identifierFieldIds()).containsExactly(1, 2); }
public DropSourceCommand create(final DropStream statement) { return create( statement.getName(), statement.getIfExists(), statement.isDeleteTopic(), DataSourceType.KSTREAM ); }
@Test public void shouldCreateCommandForDropStream() { // Given: final DropStream ddlStatement = new DropStream(SOME_NAME, true, true); // When: final DdlCommand result = dropSourceFactory.create(ddlStatement); // Then: assertThat(result, instanceOf(DropSourceCommand.class)); }
@Override public boolean supports( ConnectionType type ) { return type == WEBSPHERE; }
@Test public void onlySupportsWebsphere() { assertTrue( jmsProvider.supports( WEBSPHERE ) ); assertFalse( jmsProvider.supports( ACTIVEMQ ) ); }
@Bean public CircuitBreakerRegistry circuitBreakerRegistry( EventConsumerRegistry<CircuitBreakerEvent> eventConsumerRegistry, RegistryEventConsumer<CircuitBreaker> circuitBreakerRegistryEventConsumer, @Qualifier("compositeCircuitBreakerCustomizer") CompositeCustomizer<CircuitBreakerConfigCustomizer> compositeCircuitBreakerCustomizer) { CircuitBreakerRegistry circuitBreakerRegistry = createCircuitBreakerRegistry( circuitBreakerProperties, circuitBreakerRegistryEventConsumer, compositeCircuitBreakerCustomizer); registerEventConsumer(circuitBreakerRegistry, eventConsumerRegistry); // then pass the map here initCircuitBreakerRegistry(circuitBreakerRegistry, compositeCircuitBreakerCustomizer); return circuitBreakerRegistry; }
@Test public void testCreateCircuitBreakerRegistryWithSharedConfigs() { InstanceProperties defaultProperties = new InstanceProperties(); defaultProperties.setSlidingWindowSize(1000); defaultProperties.setPermittedNumberOfCallsInHalfOpenState(100); InstanceProperties sharedProperties = new InstanceProperties(); sharedProperties.setSlidingWindowSize(1337); sharedProperties.setPermittedNumberOfCallsInHalfOpenState(1000); InstanceProperties backendWithDefaultConfig = new InstanceProperties(); backendWithDefaultConfig.setBaseConfig("default"); backendWithDefaultConfig.setPermittedNumberOfCallsInHalfOpenState(99); InstanceProperties backendWithSharedConfig = new InstanceProperties(); backendWithSharedConfig.setBaseConfig("sharedConfig"); backendWithSharedConfig.setPermittedNumberOfCallsInHalfOpenState(999); CircuitBreakerConfigurationProperties circuitBreakerConfigurationProperties = new CircuitBreakerConfigurationProperties(); circuitBreakerConfigurationProperties.getConfigs().put("default", defaultProperties); circuitBreakerConfigurationProperties.getConfigs().put("sharedConfig", sharedProperties); circuitBreakerConfigurationProperties.getInstances() .put("backendWithDefaultConfig", backendWithDefaultConfig); circuitBreakerConfigurationProperties.getInstances() .put("backendWithSharedConfig", backendWithSharedConfig); CircuitBreakerConfiguration circuitBreakerConfiguration = new CircuitBreakerConfiguration( circuitBreakerConfigurationProperties); DefaultEventConsumerRegistry<CircuitBreakerEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>(); CircuitBreakerRegistry circuitBreakerRegistry = circuitBreakerConfiguration .circuitBreakerRegistry(eventConsumerRegistry, new CompositeRegistryEventConsumer<>(emptyList()), compositeCircuitBreakerCustomizerTestInstance()); assertThat(circuitBreakerRegistry.getAllCircuitBreakers().size()).isEqualTo(2); // Should get default config and overwrite setPermittedNumberOfCallsInHalfOpenState CircuitBreaker circuitBreaker1 = circuitBreakerRegistry .circuitBreaker("backendWithDefaultConfig"); assertThat(circuitBreaker1).isNotNull(); assertThat(circuitBreaker1.getCircuitBreakerConfig().getSlidingWindowSize()) .isEqualTo(1000); assertThat( circuitBreaker1.getCircuitBreakerConfig().getPermittedNumberOfCallsInHalfOpenState()) .isEqualTo(99); // Should get shared config and overwrite setPermittedNumberOfCallsInHalfOpenState CircuitBreaker circuitBreaker2 = circuitBreakerRegistry .circuitBreaker("backendWithSharedConfig"); assertThat(circuitBreaker2).isNotNull(); assertThat(circuitBreaker2.getCircuitBreakerConfig().getSlidingWindowSize()) .isEqualTo(1337); assertThat( circuitBreaker2.getCircuitBreakerConfig().getPermittedNumberOfCallsInHalfOpenState()) .isEqualTo(999); // Unknown backend should get default config of Registry CircuitBreaker circuitBreaker3 = circuitBreakerRegistry.circuitBreaker("unknownBackend"); assertThat(circuitBreaker3).isNotNull(); assertThat(circuitBreaker3.getCircuitBreakerConfig().getSlidingWindowSize()) .isEqualTo(1000); assertThat(eventConsumerRegistry.getAllEventConsumer()).hasSize(3); }
public List<ContextPropagator> getContextPropagators() { return Collections.unmodifiableList(this.contextPropagators); }
@Test public void testConfigs() { assertThat(schedulerService.getCorePoolSize()).isEqualTo(20); assertThat(schedulerService.getThreadFactory()).isInstanceOf(NamingThreadFactory.class); assertThat(schedulerService.getContextPropagators()).hasSize(1) .hasOnlyElementsOfTypes(TestContextPropagators.TestThreadLocalContextPropagatorWithHolder.class); }
public Map<Periodical, ScheduledFuture> getFutures() { return Maps.newHashMap(futures); }
@Test public void testGetFutures() throws Exception { periodicals.registerAndStart(periodical); assertTrue("missing periodical in future Map", periodicals.getFutures().containsKey(periodical)); assertEquals(1, periodicals.getFutures().size()); }
private CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> consumerGroupHeartbeat( String groupId, String memberId, int memberEpoch, String instanceId, String rackId, int rebalanceTimeoutMs, String clientId, String clientHost, List<String> subscribedTopicNames, String assignorName, List<ConsumerGroupHeartbeatRequestData.TopicPartitions> ownedTopicPartitions ) throws ApiException { final long currentTimeMs = time.milliseconds(); final List<CoordinatorRecord> records = new ArrayList<>(); // Get or create the consumer group. boolean createIfNotExists = memberEpoch == 0; final ConsumerGroup group = getOrMaybeCreateConsumerGroup(groupId, createIfNotExists, records); throwIfConsumerGroupIsFull(group, memberId); // Get or create the member. if (memberId.isEmpty()) memberId = Uuid.randomUuid().toString(); final ConsumerGroupMember member; if (instanceId == null) { member = getOrMaybeSubscribeDynamicConsumerGroupMember( group, memberId, memberEpoch, ownedTopicPartitions, createIfNotExists, false ); } else { member = getOrMaybeSubscribeStaticConsumerGroupMember( group, memberId, memberEpoch, instanceId, ownedTopicPartitions, createIfNotExists, false, records ); } // 1. Create or update the member. If the member is new or has changed, a ConsumerGroupMemberMetadataValue // record is written to the __consumer_offsets partition to persist the change. If the subscriptions have // changed, the subscription metadata is updated and persisted by writing a ConsumerGroupPartitionMetadataValue // record to the __consumer_offsets partition. Finally, the group epoch is bumped if the subscriptions have // changed, and persisted by writing a ConsumerGroupMetadataValue record to the partition. ConsumerGroupMember updatedMember = new ConsumerGroupMember.Builder(member) .maybeUpdateInstanceId(Optional.ofNullable(instanceId)) .maybeUpdateRackId(Optional.ofNullable(rackId)) .maybeUpdateRebalanceTimeoutMs(ofSentinel(rebalanceTimeoutMs)) .maybeUpdateServerAssignorName(Optional.ofNullable(assignorName)) .maybeUpdateSubscribedTopicNames(Optional.ofNullable(subscribedTopicNames)) .setClientId(clientId) .setClientHost(clientHost) .setClassicMemberMetadata(null) .build(); boolean bumpGroupEpoch = hasMemberSubscriptionChanged( groupId, member, updatedMember, records ); int groupEpoch = group.groupEpoch(); Map<String, TopicMetadata> subscriptionMetadata = group.subscriptionMetadata(); Map<String, Integer> subscribedTopicNamesMap = group.subscribedTopicNames(); SubscriptionType subscriptionType = group.subscriptionType(); if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) { // The subscription metadata is updated in two cases: // 1) The member has updated its subscriptions; // 2) The refresh deadline has been reached. subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember); subscriptionMetadata = group.computeSubscriptionMetadata( subscribedTopicNamesMap, metadataImage.topics(), metadataImage.cluster() ); int numMembers = group.numMembers(); if (!group.hasMember(updatedMember.memberId()) && !group.hasStaticMember(updatedMember.instanceId())) { numMembers++; } subscriptionType = ModernGroup.subscriptionType( subscribedTopicNamesMap, numMembers ); if (!subscriptionMetadata.equals(group.subscriptionMetadata())) { log.info("[GroupId {}] Computed new subscription metadata: {}.", groupId, subscriptionMetadata); bumpGroupEpoch = true; records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata)); } if (bumpGroupEpoch) { groupEpoch += 1; records.add(newConsumerGroupEpochRecord(groupId, groupEpoch)); log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch); metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME); } group.setMetadataRefreshDeadline(currentTimeMs + consumerGroupMetadataRefreshIntervalMs, groupEpoch); } // 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between // the existing and the new target assignment is persisted to the partition. final int targetAssignmentEpoch; final Assignment targetAssignment; if (groupEpoch > group.assignmentEpoch()) { targetAssignment = updateTargetAssignment( group, groupEpoch, member, updatedMember, subscriptionMetadata, subscriptionType, records ); targetAssignmentEpoch = groupEpoch; } else { targetAssignmentEpoch = group.assignmentEpoch(); targetAssignment = group.targetAssignment(updatedMember.memberId(), updatedMember.instanceId()); } // 3. Reconcile the member's assignment with the target assignment if the member is not // fully reconciled yet. updatedMember = maybeReconcile( groupId, updatedMember, group::currentPartitionEpoch, targetAssignmentEpoch, targetAssignment, ownedTopicPartitions, records ); scheduleConsumerGroupSessionTimeout(groupId, memberId); // Prepare the response. ConsumerGroupHeartbeatResponseData response = new ConsumerGroupHeartbeatResponseData() .setMemberId(updatedMember.memberId()) .setMemberEpoch(updatedMember.memberEpoch()) .setHeartbeatIntervalMs(consumerGroupHeartbeatIntervalMs(groupId)); // The assignment is only provided in the following cases: // 1. The member sent a full request. It does so when joining or rejoining the group with zero // as the member epoch; or on any errors (e.g. timeout). We use all the non-optional fields // (rebalanceTimeoutMs, subscribedTopicNames and ownedTopicPartitions) to detect a full request // as those must be set in a full request. // 2. The member's assignment has been updated. boolean isFullRequest = memberEpoch == 0 || (rebalanceTimeoutMs != -1 && subscribedTopicNames != null && ownedTopicPartitions != null); if (isFullRequest || hasAssignedPartitionsChanged(member, updatedMember)) { response.setAssignment(createConsumerGroupResponseAssignment(updatedMember)); } return new CoordinatorResult<>(records, response); }
@Test public void testStaticMemberRejoinsWithNewSubscribedTopics() { String groupId = "fooup"; // Use a static member id as it makes the test easier. String memberId1 = Uuid.randomUuid().toString(); String memberId2 = Uuid.randomUuid().toString(); String member2RejoinId = Uuid.randomUuid().toString(); Uuid fooTopicId = Uuid.randomUuid(); String fooTopicName = "foo"; Uuid barTopicId = Uuid.randomUuid(); String barTopicName = "bar"; MockPartitionAssignor assignor = new MockPartitionAssignor("range"); ConsumerGroupMember member1 = new ConsumerGroupMember.Builder(memberId1) .setState(MemberState.STABLE) .setInstanceId("instance-id-1") .setMemberEpoch(10) .setPreviousMemberEpoch(9) .setRebalanceTimeoutMs(5000) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setSubscribedTopicNames(Collections.singletonList("foo")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) .build(); ConsumerGroupMember member2 = new ConsumerGroupMember.Builder(memberId2) .setState(MemberState.STABLE) .setInstanceId("instance-id-2") .setMemberEpoch(10) .setPreviousMemberEpoch(9) .setRebalanceTimeoutMs(5000) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setSubscribedTopicNames(Collections.singletonList("foo")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5))) .build(); // Consumer group with two static members. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .withConsumerGroupAssignors(Collections.singletonList(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) .addRacks() .build()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) .withMember(member1) .withMember(member2) .withAssignment(memberId1, mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) .withAssignment(memberId2, mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5))) .withAssignmentEpoch(10) .withSubscriptionMetadata(new HashMap<String, TopicMetadata>() { { put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6, mkMapOfPartitionRacks(6))); } })) .build(); assignor.prepareGroupAssignment(new GroupAssignment( new HashMap<String, MemberAssignment>() { { put(memberId1, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2) ))); put(member2RejoinId, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 0, 1, 2) ))); } } )); // Member 2 leaves the consumer group. CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result = context.consumerGroupHeartbeat( new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId2) .setInstanceId("instance-id-2") .setMemberEpoch(-2)); // Member epoch of the response would be set to -2. assertResponseEquals( new ConsumerGroupHeartbeatResponseData() .setMemberId(memberId2) .setMemberEpoch(-2), result.response() ); // The departing static member will have it's epoch set to -2. ConsumerGroupMember member2UpdatedEpoch = new ConsumerGroupMember.Builder(member2) .setMemberEpoch(-2) .build(); assertEquals(1, result.records().size()); assertRecordEquals(result.records().get(0), GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, member2UpdatedEpoch)); // Member 2 rejoins the group with the same instance id. CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> rejoinResult = context.consumerGroupHeartbeat( new ConsumerGroupHeartbeatRequestData() .setMemberId(member2RejoinId) .setGroupId(groupId) .setInstanceId("instance-id-2") .setMemberEpoch(0) .setRebalanceTimeoutMs(5000) .setServerAssignor("range") .setSubscribedTopicNames(Arrays.asList("foo", "bar")) // bar is new. .setTopicPartitions(Collections.emptyList())); assertResponseEquals( new ConsumerGroupHeartbeatResponseData() .setMemberId(member2RejoinId) .setMemberEpoch(11) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() .setTopicPartitions(Arrays.asList( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) .setPartitions(Arrays.asList(3, 4, 5)), new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(barTopicId) .setPartitions(Arrays.asList(0, 1, 2)) ))), rejoinResult.response() ); ConsumerGroupMember expectedCopiedMember = new ConsumerGroupMember.Builder(member2RejoinId) .setState(MemberState.STABLE) .setMemberEpoch(0) .setPreviousMemberEpoch(0) .setInstanceId("instance-id-2") .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) .setSubscribedTopicNames(Collections.singletonList("foo")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5))) .build(); ConsumerGroupMember expectedRejoinedMember = new ConsumerGroupMember.Builder(member2RejoinId) .setState(MemberState.STABLE) .setMemberEpoch(11) .setPreviousMemberEpoch(0) .setInstanceId("instance-id-2") .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 0, 1, 2))) .build(); List<CoordinatorRecord> expectedRecordsAfterRejoin = Arrays.asList( // The previous member is deleted. GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentTombstoneRecord(groupId, memberId2), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentTombstoneRecord(groupId, memberId2), GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionTombstoneRecord(groupId, memberId2), // The new member is created as a copy of the previous one but // with its new member id and new epochs. GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedCopiedMember), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, member2RejoinId, mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5))), GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedCopiedMember), // As the new member as a different subscribed topic set, a rebalance is triggered. GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedRejoinedMember), GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap<String, TopicMetadata>() { { put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6, mkMapOfPartitionRacks(6))); put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3, mkMapOfPartitionRacks(3))); } }), GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, member2RejoinId, mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 0, 1, 2) )), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 11), GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedRejoinedMember) ); assertRecordsEquals(expectedRecordsAfterRejoin, rejoinResult.records()); // Verify that there are no timers. context.assertNoSessionTimeout(groupId, memberId2); context.assertNoRebalanceTimeout(groupId, memberId2); }
@VisibleForTesting SmsTemplateDO validateSmsTemplate(String templateCode) { // 获得短信模板。考虑到效率,从缓存中获取 SmsTemplateDO template = smsTemplateService.getSmsTemplateByCodeFromCache(templateCode); // 短信模板不存在 if (template == null) { throw exception(SMS_SEND_TEMPLATE_NOT_EXISTS); } return template; }
@Test public void testCheckSmsTemplateValid_notExists() { // 准备参数 String templateCode = randomString(); // mock 方法 // 调用,并断言异常 assertServiceException(() -> smsSendService.validateSmsTemplate(templateCode), SMS_SEND_TEMPLATE_NOT_EXISTS); }
@Override public Collection<CompressionProvider> getCompressionProviders() { Collection<CompressionProvider> providerClasses = new ArrayList<CompressionProvider>(); List<PluginInterface> providers = getPlugins(); if ( providers != null ) { for ( PluginInterface plugin : providers ) { try { providerClasses.add( PluginRegistry.getInstance().loadClass( plugin, CompressionProvider.class ) ); } catch ( Exception e ) { // Do nothing here, if we can't load the provider, don't add it to the list } } } return providerClasses; }
@Test public void getCoreProviders() { @SuppressWarnings( "serial" ) final HashMap<String, Boolean> foundProvider = new HashMap<String, Boolean>() { { put( "None", false ); put( "Zip", false ); put( "GZip", false ); put( "Snappy", false ); put( "Hadoop-snappy", false ); } }; Collection<CompressionProvider> providers = factory.getCompressionProviders(); assertNotNull( providers ); for ( CompressionProvider provider : providers ) { assertNotNull( foundProvider.get( provider.getName() ) ); foundProvider.put( provider.getName(), true ); } boolean foundAllProviders = true; for ( Boolean b : foundProvider.values() ) { foundAllProviders = foundAllProviders && b; } assertTrue( foundAllProviders ); }
public void checkOin(String entityId, String oin) { Pattern pattern = Pattern.compile("urn:nl-eid-gdi:1.0:\\w+:" + oin + ":entities:\\d+"); Matcher matcher = pattern.matcher(entityId); if (!matcher.matches()) { throw new MetadataParseException("OIN certificate does not match entityID"); } }
@Test public void checkOinTest() { metadataProcessorServiceMock.checkOin("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", "SSSSSSSSSSSSSSSSSSSS"); metadataProcessorServiceMock.checkOin("urn:nl-eid-gdi:1:0:DV:00000008888888888001:entities:0001", "SSSSSSSSSSSSSSSSSSSS"); }
@PUT @Path("/{logger}") @Operation(summary = "Set the log level for the specified logger") @SuppressWarnings("fallthrough") public Response setLevel(final @PathParam("logger") String namespace, final Map<String, String> levelMap, @DefaultValue("worker") @QueryParam("scope") @Parameter(description = "The scope for the logging modification (single-worker, cluster-wide, etc.)") String scope) { if (scope == null) { log.warn("Received null scope in request to adjust logging level; will default to {}", WORKER_SCOPE); scope = WORKER_SCOPE; } String levelString = levelMap.get("level"); if (levelString == null) { throw new BadRequestException("Desired 'level' parameter was not specified in request."); } // Make sure that this is a valid level Level level = Level.toLevel(levelString.toUpperCase(Locale.ROOT), null); if (level == null) { throw new NotFoundException("invalid log level '" + levelString + "'."); } switch (scope.toLowerCase(Locale.ROOT)) { default: log.warn("Received invalid scope '{}' in request to adjust logging level; will default to {}", scope, WORKER_SCOPE); case WORKER_SCOPE: List<String> affectedLoggers = herder.setWorkerLoggerLevel(namespace, levelString); return Response.ok(affectedLoggers).build(); case CLUSTER_SCOPE: herder.setClusterLoggerLevel(namespace, levelString); return Response.noContent().build(); } }
@Test public void setLevelWithInvalidArgTest() { for (String scope : Arrays.asList("worker", "cluster", "N/A", null)) { assertThrows( NotFoundException.class, () -> loggingResource.setLevel( "@root", Collections.singletonMap("level", "HIGH"), scope ) ); } }
@VisibleForTesting static Document buildQuery(TupleDomain<ColumnHandle> tupleDomain) { Document query = new Document(); if (tupleDomain.getDomains().isPresent()) { for (Map.Entry<ColumnHandle, Domain> entry : tupleDomain.getDomains().get().entrySet()) { MongoColumnHandle column = (MongoColumnHandle) entry.getKey(); query.putAll(buildPredicate(column, entry.getValue())); } } return query; }
@Test public void testBuildQueryIn() { TupleDomain<ColumnHandle> tupleDomain = TupleDomain.withColumnDomains(ImmutableMap.of( COL2, Domain.create(ValueSet.ofRanges(equal(createUnboundedVarcharType(), utf8Slice("hello")), equal(createUnboundedVarcharType(), utf8Slice("world"))), false))); Document query = MongoSession.buildQuery(tupleDomain); Document expected = new Document(COL2.getName(), new Document("$in", ImmutableList.of("hello", "world"))); assertEquals(query, expected); }
public GithubAppConfiguration validate(AlmSettingDto almSettingDto) { return validate(almSettingDto.getAppId(), almSettingDto.getClientId(), almSettingDto.getClientSecret(), almSettingDto.getPrivateKey(), almSettingDto.getUrl()); }
@Test public void github_global_settings_validation() { AlmSettingDto almSettingDto = createNewGithubDto("clientId", "clientSecret", EXAMPLE_APP_ID, EXAMPLE_PRIVATE_KEY); when(encryption.isEncrypted(any())).thenReturn(false); GithubAppConfiguration configuration = underTest.validate(almSettingDto); ArgumentCaptor<GithubAppConfiguration> configurationArgumentCaptor = ArgumentCaptor.forClass(GithubAppConfiguration.class); verify(appClient).checkApiEndpoint(configurationArgumentCaptor.capture()); verify(appClient).checkAppPermissions(configurationArgumentCaptor.capture()); assertThat(configuration.getId()).isEqualTo(configurationArgumentCaptor.getAllValues().get(0).getId()); assertThat(configuration.getId()).isEqualTo(configurationArgumentCaptor.getAllValues().get(1).getId()); }
public SmppMessage createSmppMessage(CamelContext camelContext, AlertNotification alertNotification) { SmppMessage smppMessage = new SmppMessage(camelContext, alertNotification, configuration); smppMessage.setHeader(SmppConstants.MESSAGE_TYPE, SmppMessageType.AlertNotification.toString()); smppMessage.setHeader(SmppConstants.SEQUENCE_NUMBER, alertNotification.getSequenceNumber()); smppMessage.setHeader(SmppConstants.COMMAND_ID, alertNotification.getCommandId()); smppMessage.setHeader(SmppConstants.COMMAND_STATUS, alertNotification.getCommandStatus()); smppMessage.setHeader(SmppConstants.SOURCE_ADDR, alertNotification.getSourceAddr()); smppMessage.setHeader(SmppConstants.SOURCE_ADDR_NPI, alertNotification.getSourceAddrNpi()); smppMessage.setHeader(SmppConstants.SOURCE_ADDR_TON, alertNotification.getSourceAddrTon()); smppMessage.setHeader(SmppConstants.ESME_ADDR, alertNotification.getEsmeAddr()); smppMessage.setHeader(SmppConstants.ESME_ADDR_NPI, alertNotification.getEsmeAddrNpi()); smppMessage.setHeader(SmppConstants.ESME_ADDR_TON, alertNotification.getEsmeAddrTon()); return smppMessage; }
@Test void deliverSmWithEmptyBodyAndPayloadInOptionalParameter() throws Exception { DeliverSm deliverSm = new DeliverSm(); String payload = "Hellö SMPP wörld!"; deliverSm.setShortMessage(new byte[] {}); deliverSm.setDataCoding(new GeneralDataCoding(Alphabet.ALPHA_DEFAULT).toByte()); deliverSm.setOptionalParameters(new Message_payload(payload.getBytes(defaultEncoding))); SmppMessage smppMessage = binding.createSmppMessage(camelContext, deliverSm); assertEquals(payload, smppMessage.getBody()); assertEquals(SmppMessageType.DeliverSm.toString(), smppMessage.getHeader(SmppConstants.MESSAGE_TYPE)); }
static MultiLineString buildMultiLineString(TDWay outerWay, List<TDWay> innerWays) { List<LineString> lineStrings = new ArrayList<>(); // outer way geometry lineStrings.add(buildLineString(outerWay)); // inner strings if (innerWays != null) { for (TDWay innerWay : innerWays) { LineString innerRing = buildLineString(innerWay); lineStrings.add(innerRing); } } return GEOMETRY_FACTORY.createMultiLineString(lineStrings.toArray(new LineString[lineStrings.size()])); }
@Test public void testBuildNonSimpleMultiLineString() { String testfile = "non-simple-multilinestring.wkt"; List<TDWay> ways = MockingUtils.wktMultiLineStringToWays(testfile); MultiLineString mls = JTSUtils.buildMultiLineString(ways.get(0), ways.subList(1, ways.size())); Geometry expected = MockingUtils.readWKTFile(testfile); Assert.isTrue(!mls.isSimple()); Assert.equals(expected, mls); }
public ConfigTransformerResult transform(Map<String, String> configs) { Map<String, Map<String, Set<String>>> keysByProvider = new HashMap<>(); Map<String, Map<String, Map<String, String>>> lookupsByProvider = new HashMap<>(); // Collect the variables from the given configs that need transformation for (Map.Entry<String, String> config : configs.entrySet()) { if (config.getValue() != null) { List<ConfigVariable> configVars = getVars(config.getValue(), DEFAULT_PATTERN); for (ConfigVariable configVar : configVars) { Map<String, Set<String>> keysByPath = keysByProvider.computeIfAbsent(configVar.providerName, k -> new HashMap<>()); Set<String> keys = keysByPath.computeIfAbsent(configVar.path, k -> new HashSet<>()); keys.add(configVar.variable); } } } // Retrieve requested variables from the ConfigProviders Map<String, Long> ttls = new HashMap<>(); for (Map.Entry<String, Map<String, Set<String>>> entry : keysByProvider.entrySet()) { String providerName = entry.getKey(); ConfigProvider provider = configProviders.get(providerName); Map<String, Set<String>> keysByPath = entry.getValue(); if (provider != null && keysByPath != null) { for (Map.Entry<String, Set<String>> pathWithKeys : keysByPath.entrySet()) { String path = pathWithKeys.getKey(); Set<String> keys = new HashSet<>(pathWithKeys.getValue()); ConfigData configData = provider.get(path, keys); Map<String, String> data = configData.data(); Long ttl = configData.ttl(); if (ttl != null && ttl >= 0) { ttls.put(path, ttl); } Map<String, Map<String, String>> keyValuesByPath = lookupsByProvider.computeIfAbsent(providerName, k -> new HashMap<>()); keyValuesByPath.put(path, data); } } } // Perform the transformations by performing variable replacements Map<String, String> data = new HashMap<>(configs); for (Map.Entry<String, String> config : configs.entrySet()) { data.put(config.getKey(), replace(lookupsByProvider, config.getValue(), DEFAULT_PATTERN)); } return new ConfigTransformerResult(data, ttls); }
@Test public void testSingleLevelOfIndirection() { ConfigTransformerResult result = configTransformer.transform(Collections.singletonMap(MY_KEY, "${test:testPath:testIndirection}")); Map<String, String> data = result.data(); assertEquals("${test:testPath:testResult}", data.get(MY_KEY)); }
public static boolean isJCacheAvailable(ClassLoader classLoader) { return isJCacheAvailable((className) -> ClassLoaderUtil.isClassAvailable(classLoader, className)); }
@Test public void testIsJCacheAvailable_withCorrectVersion_withLogger() { JCacheDetector.ClassAvailabilityChecker classAvailabilityChecker = className -> true; assertTrue(isJCacheAvailable(logger, classAvailabilityChecker)); }
public CsvData read() throws IORuntimeException { return read(this.reader, false); }
@Test @Disabled public void readTest3() { final CsvReadConfig csvReadConfig = CsvReadConfig.defaultConfig(); csvReadConfig.setContainsHeader(true); final CsvReader reader = CsvUtil.getReader(csvReadConfig); final CsvData read = reader.read(FileUtil.file("d:/test/ceshi.csv")); for (CsvRow row : read) { Console.log(row.getByName("案件ID")); } }
@Override @SneakyThrows public WxJsapiSignature createWxMpJsapiSignature(Integer userType, String url) { WxMpService service = getWxMpService(userType); return service.createJsapiSignature(url); }
@Test public void testCreateWxMpJsapiSignature() throws WxErrorException { // 准备参数 Integer userType = randomPojo(UserTypeEnum.class).getValue(); String url = randomString(); // mock 方法 WxJsapiSignature signature = randomPojo(WxJsapiSignature.class); when(wxMpService.createJsapiSignature(eq(url))).thenReturn(signature); // 调用 WxJsapiSignature result = socialClientService.createWxMpJsapiSignature(userType, url); // 断言 assertSame(signature, result); }
@Override public ConnectionType getConnectionType() { return ConnectionType.GRPC; }
@Test void testGetConnectionType() { assertEquals(ConnectionType.GRPC, grpcClient.getConnectionType()); }
public ConnectionDetails getConnectionDetails( IMetaStore metaStore, String key, String name ) { ConnectionProvider<? extends ConnectionDetails> connectionProvider = getConnectionProvider( key ); if ( connectionProvider != null ) { Class<? extends ConnectionDetails> clazz = connectionProvider.getClassType(); return loadElement( getMetaStoreFactory( metaStore, clazz ), name ); } return null; }
@Test public void testBaRolesNotNull() { addOne(); TestConnectionWithBucketsDetails connectionDetails = (TestConnectionWithBucketsDetails) connectionManager.getConnectionDetails( CONNECTION_NAME ); assertNotNull( connectionDetails ); assertNotNull( connectionDetails.getBaRoles() ); }
public static NotificationDispatcherMetadata newMetadata() { return METADATA; }
@Test public void qgChange_notification_is_enable_at_global_level() { NotificationDispatcherMetadata metadata = QGChangeNotificationHandler.newMetadata(); assertThat(metadata.getProperty(GLOBAL_NOTIFICATION)).isEqualTo("true"); }
@Override public void fail(Throwable t) { currentExecutions.values().forEach(e -> e.fail(t)); }
@Test void testFail() throws Exception { final SpeculativeExecutionVertex ev = createSpeculativeExecutionVertex(); final Execution e1 = ev.getCurrentExecutionAttempt(); final Execution e2 = ev.createNewSpeculativeExecution(System.currentTimeMillis()); ev.fail(new Exception("Forced test failure.")); assertThat(internalFailuresListener.getFailedTasks()) .containsExactly(e1.getAttemptId(), e2.getAttemptId()); }
@Override public Collection<PiPacketOperation> mapOutboundPacket(OutboundPacket packet) throws PiInterpreterException { DeviceId deviceId = packet.sendThrough(); TrafficTreatment treatment = packet.treatment(); // fabric.p4 supports only OUTPUT instructions. List<Instructions.OutputInstruction> outInstructions = treatment .allInstructions() .stream() .filter(i -> i.type().equals(OUTPUT)) .map(i -> (Instructions.OutputInstruction) i) .collect(toList()); if (treatment.allInstructions().size() != outInstructions.size()) { // There are other instructions that are not of type OUTPUT. throw new PiInterpreterException("Treatment not supported: " + treatment); } ImmutableList.Builder<PiPacketOperation> builder = ImmutableList.builder(); for (Instructions.OutputInstruction outInst : outInstructions) { if (outInst.port().equals(TABLE)) { // Logical port. Forward using the switch tables like a regular packet. builder.add(createPiPacketOperation(packet.data(), -1, true)); } else if (outInst.port().equals(FLOOD)) { // Logical port. Create a packet operation for each switch port. final DeviceService deviceService = handler().get(DeviceService.class); for (Port port : deviceService.getPorts(packet.sendThrough())) { builder.add(createPiPacketOperation(packet.data(), port.number().toLong(), false)); } } else if (outInst.port().isLogical()) { throw new PiInterpreterException(format( "Output on logical port '%s' not supported", outInst.port())); } else { // Send as-is to given port bypassing all switch tables. builder.add(createPiPacketOperation(packet.data(), outInst.port().toLong(), false)); } } return builder.build(); }
@Test public void testMapOutboundPacketWithoutForwarding() throws Exception { PortNumber outputPort = PortNumber.portNumber(1); TrafficTreatment outputTreatment = DefaultTrafficTreatment.builder() .setOutput(outputPort) .build(); ByteBuffer data = ByteBuffer.allocate(64); OutboundPacket outPkt = new DefaultOutboundPacket(DEVICE_ID, outputTreatment, data); Collection<PiPacketOperation> result = interpreter.mapOutboundPacket(outPkt); assertEquals(result.size(), 1); ImmutableList.Builder<PiPacketMetadata> builder = ImmutableList.builder(); builder.add(PiPacketMetadata.builder() .withId(FabricConstants.EGRESS_PORT) .withValue(ImmutableByteSequence.copyFrom(outputPort.toLong()) .fit(PORT_BITWIDTH)) .build()); PiPacketOperation expectedPktOp = PiPacketOperation.builder() .withType(PiPacketOperationType.PACKET_OUT) .withData(ImmutableByteSequence.copyFrom(data)) .withMetadatas(builder.build()) .build(); assertEquals(expectedPktOp, result.iterator().next()); }
@Override public synchronized Set<InternalNode> getCatalogServers() { return catalogServers; }
@Test public void testGetCatalogServers() { DiscoveryNodeManager manager = new DiscoveryNodeManager(selector, workerNodeInfo, new NoOpFailureDetector(), Optional.of(host -> false), expectedVersion, testHttpClient, new TestingDriftClient<>(), internalCommunicationConfig); try { assertEquals(manager.getCatalogServers(), ImmutableSet.of(catalogServer)); } finally { manager.stop(); } }
public static List<Integer> buildQueryWithINClauseStrings(Configuration conf, List<String> queries, StringBuilder prefix, StringBuilder suffix, List<String> inList, String inColumn, boolean addParens, boolean notIn) { // Get configuration parameters int maxQueryLength = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH); int batchSize = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE); int maxParameters = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_PARAMETERS); // Check parameter set validity as a public method. if (inList == null || inList.size() == 0 || maxQueryLength <= 0 || batchSize <= 0) { throw new IllegalArgumentException("The IN list is empty!"); } // Define constants and local variables. int inListSize = inList.size(); StringBuilder buf = new StringBuilder(); int cursor4InListArray = 0, // cursor for the "inList" array. cursor4InClauseElements = 0, // cursor for an element list per an 'IN'/'NOT IN'-clause. cursor4queryOfInClauses = 0; // cursor for in-clause lists per a query. boolean nextItemNeeded = true; boolean newInclausePrefixJustAppended = false; StringBuilder nextValue = new StringBuilder(""); StringBuilder newInclausePrefix = new StringBuilder(notIn ? " and " + inColumn + " not in (": " or " + inColumn + " in ("); List<Integer> ret = new ArrayList<>(); int currentCount = 0; // Loop over the given inList elements. while( cursor4InListArray < inListSize || !nextItemNeeded) { if (cursor4queryOfInClauses == 0) { // Append prefix buf.append(prefix); if (addParens) { buf.append("("); } buf.append(inColumn); if (notIn) { buf.append(" not in ("); } else { buf.append(" in ("); } cursor4queryOfInClauses++; newInclausePrefixJustAppended = false; } // Get the next "inList" value element if needed. if (nextItemNeeded) { nextValue.setLength(0); nextValue.append(String.valueOf(inList.get(cursor4InListArray++))); nextItemNeeded = false; } // Compute the size of a query when the 'nextValue' is added to the current query. int querySize = querySizeExpected(buf.length(), nextValue.length(), suffix.length(), addParens); if ((querySize > maxQueryLength * 1024) || (currentCount >= maxParameters)) { // Check an edge case where the DIRECT_SQL_MAX_QUERY_LENGTH does not allow one 'IN' clause with single value. if (cursor4queryOfInClauses == 1 && cursor4InClauseElements == 0) { throw new IllegalArgumentException("The current " + ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH.getVarname() + " is set too small to have one IN clause with single value!"); } // Check en edge case to throw Exception if we can not build a single query for 'NOT IN' clause cases as mentioned at the method comments. if (notIn) { throw new IllegalArgumentException("The NOT IN list has too many elements for the current " + ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH.getVarname() + "!"); } // Wrap up the current query string since we can not add another "inList" element value. if (newInclausePrefixJustAppended) { buf.delete(buf.length()-newInclausePrefix.length(), buf.length()); } buf.setCharAt(buf.length() - 1, ')'); // replace the "commar" to finish a 'IN' clause string. if (addParens) { buf.append(")"); } buf.append(suffix); queries.add(buf.toString()); ret.add(currentCount); // Prepare a new query string. buf.setLength(0); currentCount = 0; cursor4queryOfInClauses = cursor4InClauseElements = 0; querySize = 0; newInclausePrefixJustAppended = false; continue; } else if (cursor4InClauseElements >= batchSize-1 && cursor4InClauseElements != 0) { // Finish the current 'IN'/'NOT IN' clause and start a new clause. buf.setCharAt(buf.length() - 1, ')'); // replace the "comma". buf.append(newInclausePrefix.toString()); newInclausePrefixJustAppended = true; // increment cursor for per-query IN-clause list cursor4queryOfInClauses++; cursor4InClauseElements = 0; } else { buf.append(nextValue.toString()).append(","); currentCount++; nextItemNeeded = true; newInclausePrefixJustAppended = false; // increment cursor for elements per 'IN'/'NOT IN' clause. cursor4InClauseElements++; } } // Finish the last query. if (newInclausePrefixJustAppended) { buf.delete(buf.length()-newInclausePrefix.length(), buf.length()); } buf.setCharAt(buf.length() - 1, ')'); // replace the commar. if (addParens) { buf.append(")"); } buf.append(suffix); queries.add(buf.toString()); ret.add(currentCount); return ret; }
@Test(expected = IllegalArgumentException.class) public void testBuildQueryWithNOTINClauseFailure() throws Exception { MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH, 10); MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE, 100); MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_PARAMETERS, 1000); List<String> queries = new ArrayList<>(); List<Long> deleteSet = new ArrayList<>(); for (long i=0; i < 2000; i++) { deleteSet.add(i+1); } StringBuilder prefix = new StringBuilder(); StringBuilder suffix = new StringBuilder(); prefix.append("select count(*) from TXNS where "); List<String> questions = new ArrayList<>(deleteSet.size()); for (int i = 0; i < deleteSet.size(); i++) { questions.add("?"); } TxnUtils.buildQueryWithINClauseStrings(conf, queries, prefix, suffix, questions, "cc_id", false, true); }
public LocationIndex prepareIndex() { return prepareIndex(EdgeFilter.ALL_EDGES); }
@Test public void testFindingWayGeometry() { BaseGraph g = new BaseGraph.Builder(encodingManager).create(); NodeAccess na = g.getNodeAccess(); na.setNode(10, 51.2492152, 9.4317166); na.setNode(20, 52, 9); na.setNode(30, 51.2, 9.4); na.setNode(50, 49, 10); g.edge(20, 50).set(speedEnc, 60, 60).setWayGeometry(Helper.createPointList(51.25, 9.43)); g.edge(10, 20).set(speedEnc, 60, 60); g.edge(20, 30).set(speedEnc, 60, 60); LocationIndex index = createIndexNoPrepare(g, 2000).prepareIndex(); assertEquals(0, findClosestEdge(index, 51.25, 9.43)); }
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) { return invoke(n, BigDecimal.ZERO); }
@Test void invokeNull() { FunctionTestUtil.assertResultError(roundUpFunction.invoke(null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(roundUpFunction.invoke((BigDecimal) null, null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(roundUpFunction.invoke(BigDecimal.ONE, null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(roundUpFunction.invoke(null, BigDecimal.ONE), InvalidParametersEvent.class); }
@Override public void doLimitForModifyRequest(ModifyRequest modifyRequest) throws SQLException { if (null == modifyRequest || !enabledLimit) { return; } doLimit(modifyRequest.getSql()); }
@Test void testDoLimitForModifyRequestForDmlInvalid() throws SQLException { ModifyRequest insert = new ModifyRequest("insert into test(id,name) values(1,'test')"); ModifyRequest invalid = new ModifyRequest("CALL SALES.TOTAL_REVENUES()"); List<ModifyRequest> modifyRequests = new LinkedList<>(); modifyRequests.add(insert); modifyRequests.add(invalid); assertThrows(SQLException.class, () -> sqlLimiter.doLimitForModifyRequest(modifyRequests)); }
long findLogStartOffset(TopicIdPartition topicIdPartition, UnifiedLog log) throws RemoteStorageException { Optional<Long> logStartOffset = Optional.empty(); Option<LeaderEpochFileCache> maybeLeaderEpochFileCache = log.leaderEpochCache(); if (maybeLeaderEpochFileCache.isDefined()) { LeaderEpochFileCache cache = maybeLeaderEpochFileCache.get(); OptionalInt earliestEpochOpt = cache.earliestEntry() .map(epochEntry -> OptionalInt.of(epochEntry.epoch)) .orElseGet(OptionalInt::empty); while (!logStartOffset.isPresent() && earliestEpochOpt.isPresent()) { Iterator<RemoteLogSegmentMetadata> iterator = remoteLogMetadataManager.listRemoteLogSegments(topicIdPartition, earliestEpochOpt.getAsInt()); if (iterator.hasNext()) { logStartOffset = Optional.of(iterator.next().startOffset()); } earliestEpochOpt = cache.nextEpoch(earliestEpochOpt.getAsInt()); } } return logStartOffset.orElseGet(log::localLogStartOffset); }
@Test public void testFindLogStartOffsetFallbackToLocalLogStartOffsetWhenRemoteIsEmpty() throws RemoteStorageException, IOException { List<EpochEntry> epochEntries = new ArrayList<>(); epochEntries.add(new EpochEntry(1, 250L)); epochEntries.add(new EpochEntry(2, 550L)); checkpoint.write(epochEntries); LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); when(mockLog.localLogStartOffset()).thenReturn(250L); when(remoteLogMetadataManager.listRemoteLogSegments(eq(leaderTopicIdPartition), anyInt())) .thenReturn(Collections.emptyIterator()); try (RemoteLogManager remoteLogManager = new RemoteLogManager(config.remoteLogManagerConfig(), brokerId, logDir, clusterId, time, tp -> Optional.of(mockLog), (topicPartition, offset) -> { }, brokerTopicStats, metrics) { public RemoteLogMetadataManager createRemoteLogMetadataManager() { return remoteLogMetadataManager; } }) { assertEquals(250L, remoteLogManager.findLogStartOffset(leaderTopicIdPartition, mockLog)); } }
public static String toCamelCase(CharSequence name) { return toCamelCase(name, CharUtil.UNDERLINE); }
@Test public void toCamelCaseFromDashedTest() { Dict.create() .set("Table-Test-Of-day","tableTestOfDay") .forEach((key, value) -> assertEquals(value, NamingCase.toCamelCase(key, CharUtil.DASHED))); }
@Around(SYNC_UPDATE_CONFIG_ALL) public Object aroundSyncUpdateConfigAll(ProceedingJoinPoint pjp, HttpServletRequest request, HttpServletResponse response, String dataId, String group, String content, String appName, String srcUser, String tenant, String tag) throws Throwable { if (!PropertyUtil.isManageCapacity()) { return pjp.proceed(); } LOGGER.info("[capacityManagement] aroundSyncUpdateConfigAll"); String betaIps = request.getHeader("betaIps"); if (StringUtils.isBlank(betaIps)) { if (StringUtils.isBlank(tag)) { // do capacity management limitation check for writing or updating config_info table. if (configInfoPersistService.findConfigInfo(dataId, group, tenant) == null) { // Write operation. return do4Insert(pjp, request, response, group, tenant, content); } // Update operation. return do4Update(pjp, request, response, dataId, group, tenant, content); } } return pjp.proceed(); }
@Test void testAroundSyncUpdateConfigAllForInsertAspect1() throws Throwable { //test with insert //condition: // 1. has tenant: true // 2. capacity limit check: true // 3. over cluster quota: true when(PropertyUtil.isManageCapacity()).thenReturn(true); when(PropertyUtil.isCapacityLimitCheck()).thenReturn(true); when(configInfoPersistService.findConfigInfo(any(), any(), any())).thenReturn(null); when(capacityService.insertAndUpdateClusterUsage(any(), anyBoolean())).thenReturn(false); MockHttpServletRequest mockHttpServletRequest = new MockHttpServletRequest(); MockHttpServletResponse mockHttpServletResponse = new MockHttpServletResponse(); String localMockResult = (String) capacityManagementAspect.aroundSyncUpdateConfigAll(proceedingJoinPoint, mockHttpServletRequest, mockHttpServletResponse, mockDataId, mockGroup, mockContent, null, null, mockTenant, null); assertEquals(localMockResult, String.valueOf(OVER_CLUSTER_QUOTA.status)); Mockito.verify(proceedingJoinPoint, Mockito.times(0)).proceed(); }
@SuppressWarnings("unchecked") @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { if (!(statement.getStatement() instanceof CreateSource) && !(statement.getStatement() instanceof CreateAsSelect)) { return statement; } try { if (statement.getStatement() instanceof CreateSource) { final ConfiguredStatement<CreateSource> createStatement = (ConfiguredStatement<CreateSource>) statement; return (ConfiguredStatement<T>) forCreateStatement(createStatement).orElse(createStatement); } else { final ConfiguredStatement<CreateAsSelect> createStatement = (ConfiguredStatement<CreateAsSelect>) statement; return (ConfiguredStatement<T>) forCreateAsStatement(createStatement).orElse( createStatement); } } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { throw new KsqlStatementException( ErrorMessageUtil.buildErrorMessage(e), statement.getMaskedStatementText(), e.getCause()); } }
@Test public void shouldReturnStatementUnchangedIfCsasDoesnotHaveSchemaId() { // Given: givenKeyAndValueInferenceSupported(); // When: final ConfiguredStatement<?> result = injector.inject(csasStatement); // Then: assertThat(result, is(sameInstance(csasStatement))); }
@Override public int incrementAndGet(int key) { return addAndGet(key, 1); }
@Test public void testSingleThreadV2() { final IntHashCounter intMap = new AtomicIntHashCounter(16); for (int i = 1; i < 1024; i++) { intMap.incrementAndGet(i); } System.out.println(intMap); }
public long periodBarriersCrossed(long start, long end) { if (start > end) throw new IllegalArgumentException("Start cannot come before end"); long startFloored = getStartOfCurrentPeriodWithGMTOffsetCorrection(start, getTimeZone()); long endFloored = getStartOfCurrentPeriodWithGMTOffsetCorrection(end, getTimeZone()); long diff = endFloored - startFloored; switch (periodicityType) { case TOP_OF_MILLISECOND: return diff; case TOP_OF_SECOND: return diff / MILLIS_IN_ONE_SECOND; case TOP_OF_MINUTE: return diff / MILLIS_IN_ONE_MINUTE; case TOP_OF_HOUR: return diff / MILLIS_IN_ONE_HOUR; case TOP_OF_DAY: return diff / MILLIS_IN_ONE_DAY; case TOP_OF_WEEK: return diff / MILLIS_IN_ONE_WEEK; case TOP_OF_MONTH: return diffInMonths(start, end); default: throw new IllegalStateException("Unknown periodicity type."); } }
@Test public void testPeriodBarriersCrossedWhenLeavingDaylightSaving() { RollingCalendar rc = new RollingCalendar(dailyPattern, TimeZone.getTimeZone("CET"), Locale.US); // Sun Oct 29 00:02:03 CEST 2017, GMT offset = -2h long start = 1509228123333L;// 1490482923333L+217*CoreConstants.MILLIS_IN_ONE_DAY-CoreConstants.MILLIS_IN_ONE_HOUR; // Mon Oct 30 00:02:03 CET 2017, GMT offset = -1h long end = 1509228123333L + 25 * CoreConstants.MILLIS_IN_ONE_HOUR; assertEquals(1, rc.periodBarriersCrossed(start, end)); }
public static byte[] readFileBytes(File file) { if (file.exists()) { String result = readFile(file); if (result != null) { return ByteUtils.toBytes(result); } } return null; }
@Test void testReadFileBytesWithPath() { assertNotNull(DiskUtils.readFileBytes(testFile.getParent(), testFile.getName())); }
public int getCurrentCount(String ip) { int index = 0; if (ip != null) { index = ip.hashCode() % slotCount; } if (index < 0) { index = -index; } return data[index].get(); }
@Test void testGetCurrentCount() { SimpleIpFlowData simpleIpFlowData = new SimpleIpFlowData(3, 10000); simpleIpFlowData.incrementAndGet("127.0.0.1"); simpleIpFlowData.incrementAndGet("127.0.0.1"); simpleIpFlowData.incrementAndGet("127.0.0.1"); assertEquals(3, simpleIpFlowData.getCurrentCount("127.0.0.1")); simpleIpFlowData.rotateSlot(); assertEquals(0, simpleIpFlowData.getCurrentCount("127.0.0.1")); assertEquals(1, simpleIpFlowData.getAverageCount()); }
@Override public void initialize(String inputName, Map<String, String> properties) { this.catalogProperties = ImmutableMap.copyOf(properties); this.name = inputName; if (conf == null) { LOG.warn("No Hadoop Configuration was set, using the default environment Configuration"); this.conf = new Configuration(); } if (properties.containsKey(CatalogProperties.URI)) { this.conf.set(HiveConf.ConfVars.METASTORE_URIS.varname, properties.get(CatalogProperties.URI)); } if (properties.containsKey(CatalogProperties.WAREHOUSE_LOCATION)) { this.conf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, LocationUtil.stripTrailingSlash(properties.get(CatalogProperties.WAREHOUSE_LOCATION))); } this.listAllTables = Boolean.parseBoolean(properties.getOrDefault(LIST_ALL_TABLES, LIST_ALL_TABLES_DEFAULT)); String fileIOImpl = properties.get(CatalogProperties.FILE_IO_IMPL); if (fileIOImpl == null) { this.fileIO = new HadoopFileIO(conf); this.fileIO.initialize(properties); } else { this.fileIO = CatalogUtil.loadFileIO(fileIOImpl, properties, conf); } this.clients = new CachedClientPool(conf, properties); }
@Test public void testInitialize() { assertThatNoException() .isThrownBy( () -> { HiveCatalog hiveCatalog = new HiveCatalog(); hiveCatalog.initialize("hive", Maps.newHashMap()); }); }
public void incGroupGetNums(final String group, final String topic, final int incValue) { final String statsKey = buildStatsKey(topic, group); this.statsTable.get(Stats.GROUP_GET_NUMS).addValue(statsKey, incValue, 1); }
@Test public void testIncGroupGetNums() { brokerStatsManager.incGroupGetNums(GROUP_NAME, TOPIC, 1); String statsKey = brokerStatsManager.buildStatsKey(TOPIC, GROUP_NAME); assertThat(brokerStatsManager.getStatsItem(GROUP_GET_NUMS, statsKey).getValue().doubleValue()).isEqualTo(1L); }
public String createRetrievalToken() throws IOException { File retrievalToken = new File( resource.baseDirectory, "retrieval_token_" + UUID.randomUUID().toString() + ".json"); if (retrievalToken.createNewFile()) { final DataOutputStream dos = new DataOutputStream(new FileOutputStream(retrievalToken)); dos.writeBytes("{\"manifest\": {}}"); dos.flush(); dos.close(); return retrievalToken.getAbsolutePath(); } else { throw new IOException( "Could not create the RetrievalToken file: " + retrievalToken.getAbsolutePath()); } }
@Test void testCreateRetrievalToken() throws Exception { PythonDependencyInfo dependencyInfo = new PythonDependencyInfo(new HashMap<>(), null, null, new HashMap<>(), "python"); Map<String, String> sysEnv = new HashMap<>(); sysEnv.put("FLINK_HOME", "/flink"); try (ProcessPythonEnvironmentManager environmentManager = new ProcessPythonEnvironmentManager( dependencyInfo, new String[] {tmpDir}, sysEnv, new JobID())) { environmentManager.open(); String retrievalToken = environmentManager.createRetrievalToken(); File retrievalTokenFile = new File(retrievalToken); byte[] content = new byte[(int) retrievalTokenFile.length()]; try (DataInputStream input = new DataInputStream(new FileInputStream(retrievalToken))) { input.readFully(content); } assertThat(new String(content)).isEqualTo("{\"manifest\": {}}"); } }
@Override public void onRestRequest(RestRequest req, RequestContext requestContext, Map<String, String> wireAttrs, NextFilter<RestRequest, RestResponse> nextFilter) { try { if (_requestContentEncoding.hasCompressor()) { if (_helper.shouldCompressRequest(req.getEntity().length(), (CompressionOption) requestContext.getLocalAttr(R2Constants.REQUEST_COMPRESSION_OVERRIDE) )) { Compressor compressor = _requestContentEncoding.getCompressor(); ByteString compressed = compressor.deflate(req.getEntity()); if (compressed.length() < req.getEntity().length()) { req = req.builder().setEntity(compressed).setHeader(HttpConstants.CONTENT_ENCODING, compressor.getContentEncodingName()).build(); } } } String operation = (String) requestContext.getLocalAttr(R2Constants.OPERATION); if (!_acceptEncodingHeader.isEmpty() && _helper.shouldCompressResponseForOperation(operation)) { CompressionOption responseCompressionOverride = (CompressionOption) requestContext.getLocalAttr(R2Constants.RESPONSE_COMPRESSION_OVERRIDE); req = addResponseCompressionHeaders(responseCompressionOverride, req); } } catch (CompressionException e) { LOG.error(e.getMessage(), e.getCause()); } //Specify the actual compression algorithm used nextFilter.onRequest(req, requestContext, wireAttrs); }
@Test(dataProvider = "requestCompressionData") public void testRequestCompressionRules(CompressionConfig requestCompressionConfig, CompressionOption requestCompressionOverride, boolean headerShouldBePresent) throws CompressionException, URISyntaxException { ClientCompressionFilter clientCompressionFilter = new ClientCompressionFilter(EncodingType.SNAPPY.getHttpName(), requestCompressionConfig, ACCEPT_COMPRESSIONS, new CompressionConfig(Integer.MAX_VALUE), Collections.<String>emptyList()); // The entity should be compressible for this test. int original = 100; byte[] entity = new byte[original]; Arrays.fill(entity, (byte)'A'); RestRequest restRequest = new RestRequestBuilder(new URI(URI)).setMethod(RestMethod.POST).setEntity(entity).build(); int compressed = EncodingType.SNAPPY.getCompressor().deflate(new ByteArrayInputStream(entity)).length; RequestContext context = new RequestContext(); context.putLocalAttr(R2Constants.REQUEST_COMPRESSION_OVERRIDE, requestCompressionOverride); int entityLength = headerShouldBePresent ? compressed : original; String expectedContentEncoding = headerShouldBePresent ? EncodingType.SNAPPY.getHttpName() : null; clientCompressionFilter.onRestRequest(restRequest, context, Collections.<String, String>emptyMap(), new HeaderCaptureFilter(HttpConstants.CONTENT_ENCODING, expectedContentEncoding, entityLength)); }
@Override public void apply(IntentOperationContext<FlowObjectiveIntent> intentOperationContext) { Objects.requireNonNull(intentOperationContext); Optional<IntentData> toUninstall = intentOperationContext.toUninstall(); Optional<IntentData> toInstall = intentOperationContext.toInstall(); List<FlowObjectiveIntent> uninstallIntents = intentOperationContext.intentsToUninstall(); List<FlowObjectiveIntent> installIntents = intentOperationContext.intentsToInstall(); if (!toInstall.isPresent() && !toUninstall.isPresent()) { intentInstallCoordinator.intentInstallSuccess(intentOperationContext); return; } if (toUninstall.isPresent()) { IntentData intentData = toUninstall.get(); trackerService.removeTrackedResources(intentData.key(), intentData.intent().resources()); uninstallIntents.forEach(installable -> trackerService.removeTrackedResources(intentData.intent().key(), installable.resources())); } if (toInstall.isPresent()) { IntentData intentData = toInstall.get(); trackerService.addTrackedResources(intentData.key(), intentData.intent().resources()); installIntents.forEach(installable -> trackerService.addTrackedResources(intentData.key(), installable.resources())); } FlowObjectiveIntentInstallationContext intentInstallationContext = new FlowObjectiveIntentInstallationContext(intentOperationContext); uninstallIntents.stream() .map(intent -> buildObjectiveContexts(intent, REMOVE)) .flatMap(Collection::stream) .forEach(context -> { context.intentInstallationContext(intentInstallationContext); intentInstallationContext.addContext(context); intentInstallationContext.addPendingContext(context); }); installIntents.stream() .map(intent -> buildObjectiveContexts(intent, ADD)) .flatMap(Collection::stream) .forEach(context -> { context.intentInstallationContext(intentInstallationContext); intentInstallationContext.addContext(context); intentInstallationContext.addNextPendingContext(context); }); intentInstallationContext.apply(); }
@Test public void testGroupInstallationFailedErrorUnderThreshold() { // group install failed, and retry two times. intentInstallCoordinator = new TestIntentInstallCoordinator(); installer.intentInstallCoordinator = intentInstallCoordinator; errors = ImmutableList.of(GROUPINSTALLATIONFAILED, GROUPINSTALLATIONFAILED); installer.flowObjectiveService = new TestFailedFlowObjectiveService(errors); context = createInstallContext(); installer.apply(context); successContext = intentInstallCoordinator.successContext; assertEquals(successContext, context); }
@Override public PageResult<MailAccountDO> getMailAccountPage(MailAccountPageReqVO pageReqVO) { return mailAccountMapper.selectPage(pageReqVO); }
@Test public void testGetMailAccountPage() { // mock 数据 MailAccountDO dbMailAccount = randomPojo(MailAccountDO.class, o -> { // 等会查询到 o.setMail("768@qq.com"); o.setUsername("yunai"); }); mailAccountMapper.insert(dbMailAccount); // 测试 mail 不匹配 mailAccountMapper.insert(cloneIgnoreId(dbMailAccount, o -> o.setMail("788@qq.com"))); // 测试 username 不匹配 mailAccountMapper.insert(cloneIgnoreId(dbMailAccount, o -> o.setUsername("tudou"))); // 准备参数 MailAccountPageReqVO reqVO = new MailAccountPageReqVO(); reqVO.setMail("768"); reqVO.setUsername("yu"); // 调用 PageResult<MailAccountDO> pageResult = mailAccountService.getMailAccountPage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbMailAccount, pageResult.getList().get(0)); }
@Override public GatewayFilter apply(RequestSizeGatewayFilterFactory.RequestSizeConfig requestSizeConfig) { requestSizeConfig.validate(); return new GatewayFilter() { @Override public Mono<Void> filter(ServerWebExchange exchange, GatewayFilterChain chain) { ServerHttpRequest request = exchange.getRequest(); String contentLength = request.getHeaders().getFirst("content-length"); if (!ObjectUtils.isEmpty(contentLength)) { Long currentRequestSize = Long.valueOf(contentLength); if (currentRequestSize > requestSizeConfig.getMaxSize().toBytes()) { exchange.getResponse().setStatusCode(HttpStatus.PAYLOAD_TOO_LARGE); if (!exchange.getResponse().isCommitted()) { exchange.getResponse() .getHeaders() .add("errorMessage", getErrorMessage(currentRequestSize, requestSizeConfig.getMaxSize().toBytes())); } return exchange.getResponse().setComplete(); } } return chain.filter(exchange); } @Override public String toString() { return filterToStringCreator(RequestSizeGatewayFilterFactory.this) .append("max", requestSizeConfig.getMaxSize()) .toString(); } }; }
@Test public void toStringFormat() { RequestSizeConfig config = new RequestSizeConfig(); config.setMaxSize(DataSize.ofBytes(1000L)); GatewayFilter filter = new RequestSizeGatewayFilterFactory().apply(config); assertThat(filter.toString()).contains("max").contains("1000"); }
@Override public Code issueCode(Session session, IdTokenJWS idTokenJWS) { var code = IdGenerator.generateID(); var value = new Code( code, clock.instant(), clock.instant().plus(TTL), session.redirectUri(), session.nonce(), session.clientId(), idTokenJWS); codeRepo.save(value); return value; }
@Test void issueCode_propagatesValues() { var issuer = URI.create("https://idp.example.com"); var keyStore = mock(KeyStore.class); var codeRepo = mock(CodeRepo.class); var sut = new TokenIssuerImpl(issuer, keyStore, codeRepo); var nonce = UUID.randomUUID().toString(); var redirectUri = URI.create("https://myapp.example.com/callback"); var clientId = "myapp"; var session = Session.create().nonce(nonce).redirectUri(redirectUri).clientId(clientId).build(); // when var code = sut.issueCode(session, null); // then assertEquals(nonce, code.nonce()); assertEquals(redirectUri, code.redirectUri()); assertEquals(clientId, code.clientId()); }
@Override public void updateProject(GoViewProjectUpdateReqVO updateReqVO) { // 校验存在 validateProjectExists(updateReqVO.getId()); // 更新 GoViewProjectDO updateObj = GoViewProjectConvert.INSTANCE.convert(updateReqVO); goViewProjectMapper.updateById(updateObj); }
@Test public void testUpdateProject_success() { // mock 数据 GoViewProjectDO dbGoViewProject = randomPojo(GoViewProjectDO.class); goViewProjectMapper.insert(dbGoViewProject);// @Sql: 先插入出一条存在的数据 // 准备参数 GoViewProjectUpdateReqVO reqVO = randomPojo(GoViewProjectUpdateReqVO.class, o -> { o.setId(dbGoViewProject.getId()); // 设置更新的 ID o.setStatus(randomCommonStatus()); }); // 调用 goViewProjectService.updateProject(reqVO); // 校验是否更新正确 GoViewProjectDO goViewProject = goViewProjectMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, goViewProject); }
@Override public final String path() { return delegate.getRequestURI(); }
@Test void path_doesntCrashOnNullUrl() { assertThat(wrapper.path()) .isNull(); }
@Override public void validate(CruiseConfig cruiseConfig) { String newServerId = cruiseConfig.server().getServerId(); if (initialIdHolder.compareAndSet(null, newServerId)) { return; } String initialId = initialIdHolder.get(); if (!Objects.equals(initialId, newServerId) && env.enforceServerImmutability()) { throw new RuntimeException(String.format("The value of 'serverId' uniquely identifies a Go server instance. This field cannot be modified (attempting to change from [%s] to [%s]).", initialId, newServerId)); } }
@Test public void shouldBeValidWhenServerIdIsUpdatedFromNull() { validator.validate(cruiseConfig); cruiseConfig.server().ensureServerIdExists(); validator.validate(cruiseConfig); validator.validate(cruiseConfig); }
@Override public Registry getRegistry(URL url) { if (registryManager == null) { throw new IllegalStateException("Unable to fetch RegistryManager from ApplicationModel BeanFactory. " + "Please check if `setApplicationModel` has been override."); } Registry defaultNopRegistry = registryManager.getDefaultNopRegistryIfDestroyed(); if (null != defaultNopRegistry) { return defaultNopRegistry; } url = URLBuilder.from(url) .setPath(RegistryService.class.getName()) .addParameter(INTERFACE_KEY, RegistryService.class.getName()) .removeParameter(TIMESTAMP_KEY) .removeAttribute(EXPORT_KEY) .removeAttribute(REFER_KEY) .build(); String key = createRegistryCacheKey(url); Registry registry = null; boolean check = url.getParameter(CHECK_KEY, true) && url.getPort() != 0; // Lock the registry access process to ensure a single instance of the registry registryManager.getRegistryLock().lock(); try { // double check // fix https://github.com/apache/dubbo/issues/7265. defaultNopRegistry = registryManager.getDefaultNopRegistryIfDestroyed(); if (null != defaultNopRegistry) { return defaultNopRegistry; } registry = registryManager.getRegistry(key); if (registry != null) { return registry; } // create registry by spi/ioc registry = createRegistry(url); if (check && registry == null) { throw new IllegalStateException("Can not create registry " + url); } if (registry != null) { registryManager.putRegistry(key, registry); } } catch (Exception e) { if (check) { throw new RuntimeException("Can not create registry " + url, e); } else { // 1-11 Failed to obtain or create registry (service) object. LOGGER.warn(REGISTRY_FAILED_CREATE_INSTANCE, "", "", "Failed to obtain or create registry ", e); } } finally { // Release the lock registryManager.getRegistryLock().unlock(); } return registry; }
@Test void testRegistryFactoryCache() { URL url = URL.valueOf("dubbo://" + NetUtils.getLocalAddress().getHostAddress() + ":2233"); Registry registry1 = registryFactory.getRegistry(url); Registry registry2 = registryFactory.getRegistry(url); Assertions.assertEquals(registry1, registry2); }
public Object getProperty( Object root, String propName ) throws Exception { List<Integer> extractedIndexes = new ArrayList<>(); BeanInjectionInfo.Property prop = info.getProperties().get( propName ); if ( prop == null ) { throw new RuntimeException( "Property not found" ); } Object obj = root; for ( int i = 1, arrIndex = 0; i < prop.path.size(); i++ ) { BeanLevelInfo s = prop.path.get( i ); obj = s.field.get( obj ); if ( obj == null ) { return null; // some value in path is null - return empty } switch ( s.dim ) { case ARRAY: int indexArray = extractedIndexes.get( arrIndex++ ); if ( Array.getLength( obj ) <= indexArray ) { return null; } obj = Array.get( obj, indexArray ); if ( obj == null ) { return null; // element is empty } break; case LIST: int indexList = extractedIndexes.get( arrIndex++ ); List<?> list = (List<?>) obj; if ( list.size() <= indexList ) { return null; } obj = list.get( indexList ); if ( obj == null ) { return null; // element is empty } break; case NONE: break; } } return obj; }
@Test public void getProperty_Found() { BeanInjector bi = new BeanInjector(null ); BeanInjectionInfo bii = new BeanInjectionInfo( MetaBeanLevel1.class ); BeanInjectionInfo.Property actualProperty = bi.getProperty( bii, "SEPARATOR" ); assertNotNull(actualProperty); assertEquals("SEPARATOR", actualProperty.getName() ); }
@Udf public String rpad( @UdfParameter(description = "String to be padded") final String input, @UdfParameter(description = "Target length") final Integer targetLen, @UdfParameter(description = "Padding string") final String padding) { if (input == null) { return null; } if (padding == null || padding.isEmpty() || targetLen == null || targetLen < 0) { return null; } final StringBuilder sb = new StringBuilder(targetLen + padding.length()); sb.append(input); final int padChars = Math.max(targetLen - input.length(), 0); for (int i = 0; i < padChars; i += padding.length()) { sb.append(padding); } sb.setLength(targetLen); return sb.toString(); }
@Test public void shouldReturnEmptyByteBufferForZeroLength() { final ByteBuffer result = udf.rpad(BYTES_123, 0, BYTES_45); assertThat(result, is(EMPTY_BYTES)); }
public static Boolean isMultiInstance() { return isMultiInstance; }
@Test void testIsMultiInstance2() throws InvocationTargetException, IllegalAccessException { System.setProperty("isMultiInstance", "true"); initMethod.invoke(JvmUtil.class); Boolean multiInstance = JvmUtil.isMultiInstance(); assertTrue(multiInstance); }
public static Activity getActivityOfView(Context context, View view) { Activity activity = null; try { if (context != null) { if (context instanceof Activity) { activity = (Activity) context; } else if (context instanceof ContextWrapper) { while (!(context instanceof Activity) && context instanceof ContextWrapper) { context = ((ContextWrapper) context).getBaseContext(); } if (context instanceof Activity) { activity = (Activity) context; } } if (activity == null && view != null) { Object object = view.getTag(R.id.sensors_analytics_tag_view_activity); if (object != null) { if (object instanceof Activity) { activity = (Activity) object; } } } } } catch (Exception e) { SALog.printStackTrace(e); } return activity; }
@Test public void getActivityOfView() { TextView textView1 = new TextView(mApplication); textView1.setText("child1"); Assert.assertNull(SAViewUtils.getActivityOfView(mApplication, textView1)); }
@Override public void reportFailedMsgs(FailedMsgs failedMsgs) { logger.info(String.format(LOG_PREFIX + "reportFailedMsgs: %s", failedMsgs)); }
@Test public void testReportFailedMsgs() { FailedMsgs failedMsgs = new FailedMsgs(); failedMsgs.setTopic("unit-test"); failedMsgs.setConsumerGroup("default-consumer"); failedMsgs.setFailedMsgsTotalRecently(2); defaultMonitorListener.reportFailedMsgs(failedMsgs); }
@Override public final boolean offer(int ordinal, @Nonnull Object item) { if (ordinal == -1) { return offerInternal(allEdges, item); } else { if (ordinal == bucketCount()) { // ordinal beyond bucketCount will add to snapshot queue, which we don't allow through this method throw new IllegalArgumentException("Illegal edge ordinal: " + ordinal); } singleEdge[0] = ordinal; return offerInternal(singleEdge, item); } }
@Test public void when_sameItemOfferedTwice_then_success() { String item = "foo"; assertTrue(outbox.offer(item)); assertTrue(outbox.offer(item)); }
@Override public Bytes key(final Bytes cacheKey) { return Bytes.wrap(bytesFromCacheKey(cacheKey)); }
@Test public void key() { assertThat( cacheFunction.key(THE_CACHE_KEY), equalTo(THE_KEY) ); }
public XAttrFeature(List<XAttr> xAttrs) { if (xAttrs != null && !xAttrs.isEmpty()) { List<XAttr> toPack = new ArrayList<XAttr>(); ImmutableList.Builder<XAttr> b = null; for (XAttr attr : xAttrs) { if (attr.getValue() == null || attr.getValue().length <= PACK_THRESHOLD) { toPack.add(attr); } else { if (b == null) { b = ImmutableList.builder(); } b.add(attr); } } this.attrs = XAttrFormat.toBytes(toPack); if (b != null) { this.xAttrs = b.build(); } } }
@Test public void testXAttrFeature() throws Exception { List<XAttr> xAttrs = new ArrayList<>(); XAttrFeature feature = new XAttrFeature(xAttrs); // no XAttrs in the feature assertTrue(feature.getXAttrs().isEmpty()); // one XAttr in the feature XAttr a1 = XAttrHelper.buildXAttr(name1, value1); xAttrs.add(a1); feature = new XAttrFeature(xAttrs); XAttr r1 = feature.getXAttr(name1); assertTrue(a1.equals(r1)); assertEquals(feature.getXAttrs().size(), 1); // more XAttrs in the feature XAttr a2 = XAttrHelper.buildXAttr(name2, value2); XAttr a3 = XAttrHelper.buildXAttr(name3); XAttr a4 = XAttrHelper.buildXAttr(name4, value4); XAttr a5 = XAttrHelper.buildXAttr(name5, value5); XAttr a6 = XAttrHelper.buildXAttr(name6, value6); XAttr a7 = XAttrHelper.buildXAttr(name7, value7); XAttr bigXattr = XAttrHelper.buildXAttr(bigXattrKey, bigXattrValue); xAttrs.add(a2); xAttrs.add(a3); xAttrs.add(a4); xAttrs.add(a5); xAttrs.add(a6); xAttrs.add(a7); xAttrs.add(bigXattr); feature = new XAttrFeature(xAttrs); XAttr r2 = feature.getXAttr(name2); assertTrue(a2.equals(r2)); XAttr r3 = feature.getXAttr(name3); assertTrue(a3.equals(r3)); XAttr r4 = feature.getXAttr(name4); assertTrue(a4.equals(r4)); XAttr r5 = feature.getXAttr(name5); assertTrue(a5.equals(r5)); XAttr r6 = feature.getXAttr(name6); assertTrue(a6.equals(r6)); XAttr r7 = feature.getXAttr(name7); assertTrue(a7.equals(r7)); XAttr rBigXattr = feature.getXAttr(bigXattrKey); assertTrue(bigXattr.equals(rBigXattr)); List<XAttr> rs = feature.getXAttrs(); assertEquals(rs.size(), xAttrs.size()); for (int i = 0; i < rs.size(); i++) { assertTrue(xAttrs.contains(rs.get(i))); } // get non-exist XAttr in the feature XAttr r8 = feature.getXAttr(name8); assertTrue(r8 == null); }
public RowExpression extract(PlanNode node) { return node.accept(new Visitor(domainTranslator, functionAndTypeManager), null); }
@Test public void testLeftJoin() { ImmutableList.Builder<EquiJoinClause> criteriaBuilder = ImmutableList.builder(); criteriaBuilder.add(new EquiJoinClause(AV, DV)); criteriaBuilder.add(new EquiJoinClause(BV, EV)); List<EquiJoinClause> criteria = criteriaBuilder.build(); Map<VariableReferenceExpression, ColumnHandle> leftAssignments = Maps.filterKeys(scanAssignments, Predicates.in(ImmutableList.of(AV, BV, CV))); TableScanNode leftScan = tableScanNode(leftAssignments); Map<VariableReferenceExpression, ColumnHandle> rightAssignments = Maps.filterKeys(scanAssignments, Predicates.in(ImmutableList.of(DV, EV, FV))); TableScanNode rightScan = tableScanNode(rightAssignments); FilterNode left = filter(leftScan, and( lessThan(BV, AV), lessThan(CV, bigintLiteral(10)), equals(GV, bigintLiteral(10)))); FilterNode right = filter(rightScan, and( equals(DV, EV), lessThan(FV, bigintLiteral(100)))); PlanNode node = new JoinNode( Optional.empty(), newId(), JoinType.LEFT, left, right, criteria, ImmutableList.<VariableReferenceExpression>builder() .addAll(left.getOutputVariables()) .addAll(right.getOutputVariables()) .build(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), ImmutableMap.of()); RowExpression effectivePredicate = effectivePredicateExtractor.extract(node); // All right side symbols having output symbols should be checked against NULL assertEquals(normalizeConjuncts(effectivePredicate), normalizeConjuncts(lessThan(BV, AV), lessThan(CV, bigintLiteral(10)), or(equals(DV, EV), and(isNull(DV), isNull(EV))), or(lessThan(FV, bigintLiteral(100)), isNull(FV)), or(equals(AV, DV), isNull(DV)), or(equals(BV, EV), isNull(EV)))); }
public Optional<DateTime> nextTime(JobTriggerDto trigger) { return nextTime(trigger, trigger.nextTime()); }
@Test public void emptyNextTimeCron() { final JobTriggerDto trigger = JobTriggerDto.builderWithClock(clock) .jobDefinitionId("abc-123") .jobDefinitionType("event-processor-execution-v1") .schedule(CronJobSchedule.builder() // At every hour in 2024 .cronExpression("0 0 * ? * * 2024") .build()) .build(); // Last execution for the expression final DateTime date = DateTime.parse("2024-12-31T23:00:00.000Z"); final Optional<DateTime> nextTime = strategies.nextTime(trigger, date); assertThat(nextTime).isEmpty(); }
@Override public Object executeOnKey(K key, com.hazelcast.map.EntryProcessor entryProcessor) { return map.executeOnKey(key, entryProcessor); }
@Test public void testExecuteOnKey() { map.put(23, "value-23"); map.put(42, "value-42"); String result = (String) adapter.executeOnKey(23, new IMapReplaceEntryProcessor("value", "newValue")); assertEquals("newValue-23", result); assertEquals("newValue-23", map.get(23)); assertEquals("value-42", map.get(42)); }
public void setRequestTarget(String requestTarget) { this.requestTarget = requestTarget; }
@Test void testSetRequestTarget() { assertNull(basicContext.getRequestTarget()); basicContext.setRequestTarget("POST /v2/ns/instance"); assertEquals("POST /v2/ns/instance", basicContext.getRequestTarget()); basicContext.setRequestTarget(InstanceRequest.class.getSimpleName()); assertEquals(InstanceRequest.class.getSimpleName(), basicContext.getRequestTarget()); }
@Override public ByteBuf asReadOnly() { return this; }
@Test public void asReadOnly() { ByteBuf buf = buffer(1); ByteBuf readOnly = buf.asReadOnly(); assertTrue(readOnly.isReadOnly()); assertSame(readOnly, readOnly.asReadOnly()); readOnly.release(); }