focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static List<ComponentDto> sortComponents(List<ComponentDto> components, ComponentTreeRequest wsRequest, List<MetricDto> metrics, Table<String, MetricDto, ComponentTreeData.Measure> measuresByComponentUuidAndMetric) { List<String> sortParameters = wsRequest.getSort(); if (sortParameters == null || sortParameters.isEmpty()) { return components; } boolean isAscending = wsRequest.getAsc(); Map<String, Ordering<ComponentDto>> orderingsBySortField = ImmutableMap.<String, Ordering<ComponentDto>>builder() .put(NAME_SORT, componentNameOrdering(isAscending)) .put(QUALIFIER_SORT, componentQualifierOrdering(isAscending)) .put(PATH_SORT, componentPathOrdering(isAscending)) .put(METRIC_SORT, metricValueOrdering(wsRequest, metrics, measuresByComponentUuidAndMetric)) .put(METRIC_PERIOD_SORT, metricPeriodOrdering(wsRequest, metrics, measuresByComponentUuidAndMetric)) .build(); String firstSortParameter = sortParameters.get(0); Ordering<ComponentDto> primaryOrdering = orderingsBySortField.get(firstSortParameter); if (sortParameters.size() > 1) { for (int i = 1; i < sortParameters.size(); i++) { String secondarySortParameter = sortParameters.get(i); Ordering<ComponentDto> secondaryOrdering = orderingsBySortField.get(secondarySortParameter); primaryOrdering = primaryOrdering.compound(secondaryOrdering); } } primaryOrdering = primaryOrdering.compound(componentNameOrdering(true)); return primaryOrdering.immutableSortedCopy(components); }
@Test void sort_by_path() { ComponentTreeRequest wsRequest = newRequest(singletonList(PATH_SORT), true, null); List<ComponentDto> result = sortComponents(wsRequest); assertThat(result).extracting("path") .containsExactly("path-1", "path-2", "path-3", "path-4", "path-5", "path-6", "path-7", "path-8", "path-9"); }
public static String getClientHostName(AlluxioConfiguration conf) { if (conf.isSet(PropertyKey.USER_HOSTNAME)) { return conf.getString(PropertyKey.USER_HOSTNAME); } return getLocalHostName((int) conf.getMs(PropertyKey.NETWORK_HOST_RESOLUTION_TIMEOUT_MS)); }
@Test public void getConfiguredClientHostname() { mConfiguration.set(PropertyKey.USER_HOSTNAME, "clienthost"); assertEquals("clienthost", NetworkAddressUtils.getClientHostName(mConfiguration)); }
public static String toJsonStr(JSON json, int indentFactor) { if (null == json) { return null; } return json.toJSONString(indentFactor); }
@Test public void testArrayEntity() { final String jsonStr = JSONUtil.toJsonStr(new ArrayEntity()); assertEquals("{\"a\":[],\"b\":[0],\"c\":[],\"d\":[],\"e\":[]}", jsonStr); }
@Override public boolean isSigned(final int columnIndex) throws SQLException { try { return resultSetMetaData.isSigned(columnIndex); } catch (final SQLFeatureNotSupportedException ignored) { return false; } }
@Test void assertIsSigned() throws SQLException { assertTrue(queryResultMetaData.isSigned(1)); }
@SuppressWarnings("WeakerAccess") public TimestampExtractor defaultTimestampExtractor() { return getConfiguredInstance(DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, TimestampExtractor.class); }
@Test public void shouldUseCorrectDefaultsWhenNoneSpecified() { final StreamsConfig config = new StreamsConfig(getStreamsConfig()); assertInstanceOf(FailOnInvalidTimestamp.class, config.defaultTimestampExtractor()); assertThrows(ConfigException.class, config::defaultKeySerde); assertThrows(ConfigException.class, config::defaultValueSerde); }
@CanIgnoreReturnValue public final Ordered containsExactlyEntriesIn(Multimap<?, ?> expectedMultimap) { checkNotNull(expectedMultimap, "expectedMultimap"); checkNotNull(actual); ListMultimap<?, ?> missing = difference(expectedMultimap, actual); ListMultimap<?, ?> extra = difference(actual, expectedMultimap); // TODO(kak): Possible enhancement: Include "[1 copy]" if the element does appear in // the subject but not enough times. Similarly for unexpected extra items. if (!missing.isEmpty()) { if (!extra.isEmpty()) { boolean addTypeInfo = hasMatchingToStringPair(missing.entries(), extra.entries()); // Note: The usage of countDuplicatesAndAddTypeInfo() below causes entries no longer to be // grouped by key in the 'missing' and 'unexpected items' parts of the message (we still // show the actual and expected multimaps in the standard format). String missingDisplay = addTypeInfo ? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(missing).entries()) : countDuplicatesMultimap(annotateEmptyStringsMultimap(missing)); String extraDisplay = addTypeInfo ? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(extra).entries()) : countDuplicatesMultimap(annotateEmptyStringsMultimap(extra)); failWithActual( fact("missing", missingDisplay), fact("unexpected", extraDisplay), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } else { failWithActual( fact("missing", countDuplicatesMultimap(annotateEmptyStringsMultimap(missing))), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } } else if (!extra.isEmpty()) { failWithActual( fact("unexpected", countDuplicatesMultimap(annotateEmptyStringsMultimap(extra))), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } return new MultimapInOrder(/* allowUnexpected = */ false, expectedMultimap); }
@Test public void containsExactlyEntriesIn() { ImmutableListMultimap<Integer, String> listMultimap = ImmutableListMultimap.of(3, "one", 3, "six", 3, "two", 4, "five", 4, "four"); ImmutableSetMultimap<Integer, String> setMultimap = ImmutableSetMultimap.copyOf(listMultimap); assertThat(listMultimap).containsExactlyEntriesIn(setMultimap); }
public Lock lock(ApplicationId application) { return lock(application, defaultLockTimeout); }
@Test public void locks_can_be_acquired_and_released() { ApplicationId app = ApplicationId.from(TenantName.from("testTenant"), ApplicationName.from("testApp"), InstanceName.from("testInstance")); try (var ignored = zkClient.lock(app)) { throw new RuntimeException(); } catch (RuntimeException expected) { } try (var ignored = zkClient.lock(app)) { } try (var ignored = zkClient.lock(app)) { } }
@Override public List<String> batchUpdateMetadata(String namespaceId, InstanceOperationInfo instanceOperationInfo, Map<String, String> metadata) throws NacosException { boolean isEphemeral = !UtilsAndCommons.PERSIST.equals(instanceOperationInfo.getConsistencyType()); String serviceName = instanceOperationInfo.getServiceName(); Service service = getService(namespaceId, serviceName, isEphemeral); List<String> result = new LinkedList<>(); List<Instance> needUpdateInstance = findBatchUpdateInstance(instanceOperationInfo, service); for (Instance each : needUpdateInstance) { String metadataId = InstancePublishInfo.genMetadataId(each.getIp(), each.getPort(), each.getClusterName()); Optional<InstanceMetadata> instanceMetadata = metadataManager.getInstanceMetadata(service, metadataId); InstanceMetadata newMetadata = instanceMetadata.map(this::cloneMetadata).orElseGet(InstanceMetadata::new); newMetadata.getExtendData().putAll(metadata); metadataOperateService.updateInstanceMetadata(service, metadataId, newMetadata); result.add(each.toInetAddr() + ":" + UtilsAndCommons.LOCALHOST_SITE + ":" + each.getClusterName() + ":" + ( each.isEphemeral() ? UtilsAndCommons.EPHEMERAL : UtilsAndCommons.PERSIST)); } return result; }
@Test void testBatchUpdateMetadata() throws NacosException { Instance instance = new Instance(); instance.setServiceName("C"); instance.setIp("1.1.1.1"); instance.setPort(8848); ServiceInfo serviceInfo = new ServiceInfo(); serviceInfo.setHosts(Collections.singletonList(instance)); when(serviceStorage.getData(Mockito.any())).thenReturn(serviceInfo); InstanceOperationInfo instanceOperationInfo = new InstanceOperationInfo(); List<String> res = instanceOperatorClient.batchUpdateMetadata("A", instanceOperationInfo, new HashMap<>()); assertEquals(1, res.size()); }
private CommunityName(String name) { this.name = name; }
@Test public void testCommunityName() { CommunityName communityName = CommunityName.communityName(cName); assertNotNull("The CommunityName should not be null.", communityName); assertEquals("The name should match the expected value.", cName, communityName.name()); }
@Bean @ConditionalOnMissingBean(WebsocketCollector.class) public WebsocketCollector websocketCollector() { return new WebsocketCollector(); }
@Test public void testWebsocketCollector() { WebSocketSyncConfiguration websocketListener = new WebSocketSyncConfiguration(); assertNotNull(websocketListener.websocketCollector()); }
@Override public Iterable<GenericRow> transform( final K readOnlyKey, final GenericRow value, final KsqlProcessingContext ctx ) { if (value == null) { return null; } final List<Iterator<?>> iters = new ArrayList<>(tableFunctionAppliers.size()); int maxLength = 0; for (final TableFunctionApplier applier : tableFunctionAppliers) { final List<?> exploded = applier.apply(value, processingLogger); iters.add(exploded.iterator()); maxLength = Math.max(maxLength, exploded.size()); } final List<GenericRow> rows = new ArrayList<>(maxLength); for (int i = 0; i < maxLength; i++) { final GenericRow newRow = new GenericRow(value.values().size() + iters.size()); newRow.appendAll(value.values()); for (final Iterator<?> iter : iters) { if (iter.hasNext()) { newRow.append(iter.next()); } else { newRow.append(null); } } rows.add(newRow); } return rows; }
@Test public void shouldZipTwoFunctions() { // Given: final TableFunctionApplier applier1 = createApplier(Arrays.asList(10, 10, 10)); final TableFunctionApplier applier2 = createApplier(Arrays.asList(20, 20)); final KudtfFlatMapper<String> flatMapper = new KudtfFlatMapper<>(ImmutableList.of(applier1, applier2), processingLogger); // When: final Iterable<GenericRow> iterable = flatMapper.transform(KEY, VALUE, ctx); // Then: final Iterator<GenericRow> iter = iterable.iterator(); assertThat(iter.next().values(), is(Arrays.asList(1, 2, 3, 10, 20))); assertThat(iter.next().values(), is(Arrays.asList(1, 2, 3, 10, 20))); assertThat(iter.next().values(), is(Arrays.asList(1, 2, 3, 10, null))); assertThat(iter.hasNext(), is(false)); }
public static void rethrowIOException(Throwable cause) throws IOException { if (cause instanceof IOException) { throw (IOException) cause; } else if (cause instanceof RuntimeException) { throw (RuntimeException) cause; } else if (cause instanceof Error) { throw (Error) cause; } else { throw new IOException(cause.getMessage(), cause); } }
@Test public void testRethrowIOException() { IOException ioe = new IOException("test"); try { rethrowIOException(ioe); fail("Should rethrow IOException"); } catch (IOException e) { assertSame(ioe, e); } }
@SuppressWarnings({ "nullness" // TODO(https://github.com/apache/beam/issues/20497) }) public static TableReference parseTableSpec(String tableSpec) { Matcher match = BigQueryIO.TABLE_SPEC.matcher(tableSpec); if (!match.matches()) { throw new IllegalArgumentException( String.format( "Table specification [%s] is not in one of the expected formats (" + " [project_id]:[dataset_id].[table_id]," + " [project_id].[dataset_id].[table_id]," + " [dataset_id].[table_id])", tableSpec)); } TableReference ref = new TableReference(); ref.setProjectId(match.group("PROJECT")); return ref.setDatasetId(match.group("DATASET")).setTableId(match.group("TABLE")); }
@Test public void testTableParsingError() { thrown.expect(IllegalArgumentException.class); BigQueryHelpers.parseTableSpec("0123456:foo.bar"); }
public static InetAddress getLocalAddress() { if (LOCAL_ADDRESS != null) { return LOCAL_ADDRESS; } InetAddress localAddress = getLocalAddress0(); LOCAL_ADDRESS = localAddress; return localAddress; }
@Test void testGetLocalAddress() { InetAddress address = NetUtils.getLocalAddress(); assertNotNull(address); assertTrue(NetUtils.isValidLocalHost(address.getHostAddress())); }
public TemplateResponse mapToTemplateResponse(ReviewGroup reviewGroup, Template template) { List<SectionResponse> sectionResponses = template.getSectionIds() .stream() .map(templateSection -> mapToSectionResponse(templateSection, reviewGroup)) .toList(); return new TemplateResponse( template.getId(), reviewGroup.getReviewee(), reviewGroup.getProjectName(), sectionResponses ); }
@Test void ์„น์…˜์˜_์„ ํƒ๋œ_์˜ต์…˜์ด_ํ•„์š”์—†๋Š”_๊ฒฝ์šฐ_์ œ๊ณตํ•˜์ง€_์•Š๋Š”๋‹ค() { // given Question question = new Question(true, QuestionType.TEXT, "์งˆ๋ฌธ", "๊ฐ€์ด๋“œ๋ผ์ธ", 1); questionRepository.save(question); Section section = new Section(VisibleType.ALWAYS, List.of(question.getId()), null, "์„น์…˜๋ช…", "๋ง๋จธ๋ฆฌ", 1); sectionRepository.save(section); Template template = new Template(List.of(section.getId())); templateRepository.save(template); ReviewGroup reviewGroup = new ReviewGroup("๋ฆฌ๋ทฐ์ด๋ช…", "ํ”„๋กœ์ ํŠธ๋ช…", "reviewRequestCode", "groupAccessCode"); reviewGroupRepository.save(reviewGroup); // when TemplateResponse templateResponse = templateMapper.mapToTemplateResponse(reviewGroup, template); // then SectionResponse sectionResponse = templateResponse.sections().get(0); assertThat(sectionResponse.onSelectedOptionId()).isNull(); }
@VisibleForTesting void forceFreeMemory() { memoryManager.close(); }
@Test public void testForceFreeMemory() throws Throwable { ArbitraryOutputBuffer buffer = createArbitraryBuffer(createInitialEmptyOutputBuffers(ARBITRARY), sizeOfPages(10)); for (int i = 0; i < 3; i++) { addPage(buffer, createPage(i)); } OutputBufferMemoryManager memoryManager = buffer.getMemoryManager(); assertTrue(memoryManager.getBufferedBytes() > 0); buffer.forceFreeMemory(); assertEquals(memoryManager.getBufferedBytes(), 0); // adding a page after forceFreeMemory() should be NOOP addPage(buffer, createPage(1)); assertEquals(memoryManager.getBufferedBytes(), 0); }
public static SqlDecimal of(final int precision, final int scale) { return new SqlDecimal(precision, scale); }
@Test(expected = SchemaException.class) public void shouldThrowIfScaleGreaterThanPrecision() { SqlDecimal.of(2, 3); }
@Override public void authorize(Permission permission, NacosUser nacosUser) throws AccessException { if (Loggers.AUTH.isDebugEnabled()) { Loggers.AUTH.debug("auth permission: {}, nacosUser: {}", permission, nacosUser); } if (nacosUser.isGlobalAdmin()) { return; } if (hasGlobalAdminRole(nacosUser)) { return; } if (!roleService.hasPermission(nacosUser, permission)) { throw new AccessException("authorization failed!"); } }
@Test void testAuthorize() { Permission permission = new Permission(); NacosUser nacosUser = new NacosUser(); when(roleService.hasPermission(nacosUser, permission)).thenReturn(false); assertThrows(AccessException.class, () -> { abstractAuthenticationManager.authorize(permission, nacosUser); }); }
public static BadRequestException invalidRoleTypeFormat(String format) { return new BadRequestException("invalid roleType format:%s", format); }
@Test public void testInvalidRoleTypeFormat() { BadRequestException invalidRoleTypeFormat = BadRequestException.invalidRoleTypeFormat("format"); assertEquals("invalid roleType format:format", invalidRoleTypeFormat.getMessage()); }
public HikariDataSource getDataSource() { return ds; }
@Test @Ignore public void testCustomMysqlDataSource() { DataSource ds = SingletonServiceFactory.getBean(CustomMysqlDataSource.class).getDataSource(); assertNotNull(ds); try(Connection connection = ds.getConnection()){ assertNotNull(connection); } catch (SQLException e) { e.printStackTrace(); } }
@Override public void close() throws Exception { if (running.compareAndSet(true, false)) { LOG.info("Closing {}.", this); curatorFramework.getConnectionStateListenable().removeListener(listener); Exception exception = null; try { treeCache.close(); } catch (Exception e) { exception = e; } try { leaderLatch.close(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } ExceptionUtils.tryRethrowException(exception); } }
@Test void testLeaderElectionWithMultipleDrivers() throws Exception { final CuratorFrameworkWithUnhandledErrorListener curatorFramework = startCuratorFramework(); try { Set<ElectionDriver> electionDrivers = Stream.generate( () -> createLeaderElectionDriver( curatorFramework.asCuratorFramework(), testingFatalErrorHandlerResource .getTestingFatalErrorHandler())) .limit(3) .collect(Collectors.toSet()); while (!electionDrivers.isEmpty()) { final CompletableFuture<Object> anyLeader = CompletableFuture.anyOf( electionDrivers.stream() .map(ElectionDriver::getLeadershipFuture) .toArray(CompletableFuture[]::new)); // wait for any leader anyLeader.join(); final Map<Boolean, Set<ElectionDriver>> leaderAndRest = electionDrivers.stream() .collect( Collectors.partitioningBy( ElectionDriver::hasLeadership, Collectors.toSet())); assertThat(leaderAndRest.get(true)).hasSize(1); // TODO: remove this line after CURATOR-645 is resolved // CURATOR-645 covers a bug in the LeaderLatch implementation that causes a race // condition if a child node, participating in the leader election, is removed too // fast. This results in a different code branch being executed which triggers a // reset of the LeaderLatch instead of re-collecting the children to determine the // next leader. // The issue occurs because LeaderLatch#checkLeadership is not executed // transactionally, i.e. retrieving the children and setting up the watcher for the // predecessor is not done atomically. This leads to the race condition where a // children (the previous leader's node) is removed before setting up the watcher // which results in an invalid handling of the situation using reset. // Adding some sleep here (simulating the leader actually doing something) will // reduce the risk of falling into the race condition because it will give the // concurrently running LeaderLatch instances more time to set up the watchers // properly. Thread.sleep(100); Iterables.getOnlyElement(leaderAndRest.get(true)).close(); electionDrivers = leaderAndRest.get(false); } } finally { curatorFramework.close(); } }
@Override public void close() { for (ContextManagerLifecycleListener each : ShardingSphereServiceLoader.getServiceInstances(ContextManagerLifecycleListener.class)) { each.onDestroyed(this); } executorEngine.close(); metaDataContexts.get().close(); persistServiceFacade.getComputeNodePersistService().offline(computeNodeInstanceContext.getInstance()); persistServiceFacade.getRepository().close(); }
@Test void assertClose() { contextManager.close(); verify(metaDataContexts).close(); }
@Override protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { try { URL indexResource = getServletContext().getResource("/index.html"); String content = IOUtils.toString(indexResource, StandardCharsets.UTF_8); // read original content from resource if (bodyAddon != null) { if (content.contains(TAG_BODY_CLOSING)) { content = content.replace(TAG_BODY_CLOSING, bodyAddon + TAG_BODY_CLOSING); } else if (content.contains(TAG_HTML_CLOSING)) { content = content.replace(TAG_HTML_CLOSING, bodyAddon + TAG_HTML_CLOSING); } else { content = content + bodyAddon; } } // process head addon if (headAddon != null) { if (content.contains(TAG_HEAD_CLOSING)) { content = content.replace(TAG_HEAD_CLOSING, headAddon + TAG_HEAD_CLOSING); } else if (content.contains(TAG_BODY_OPENING)) { content = content.replace(TAG_BODY_OPENING, headAddon + TAG_BODY_OPENING); } else { LOGGER.error( "Unable to process Head html addon. Could not find proper anchor in index.html."); } } resp.setContentType("text/html"); resp.setStatus(HttpServletResponse.SC_OK); resp.getWriter().append(content); } catch (IOException e) { LOGGER.error("Error rendering index.html.", e); } }
@Test @Disabled("ignored due to zeppelin-web-angular not build for core tests") void testZeppelinWebAngularHtmlAddon() throws IOException, ServletException { ZeppelinConfiguration zConf = mock(ZeppelinConfiguration.class); when(zConf.getHtmlBodyAddon()).thenReturn(TEST_BODY_ADDON); when(zConf.getHtmlHeadAddon()).thenReturn(TEST_HEAD_ADDON); ServletConfig sc = mock(ServletConfig.class); ServletContext ctx = mock(ServletContext.class); when(ctx.getResource("/index.html")) .thenReturn(new URL("file:" + FILE_PATH_INDEX_HTML_ZEPPELIN_WEB_ANGULAR)); when(sc.getServletContext()).thenReturn(ctx); IndexHtmlServlet servlet = new IndexHtmlServlet(zConf); servlet.init(sc); HttpServletResponse mockResponse = mock(HttpServletResponse.class); HttpServletRequest mockRequest = mock(HttpServletRequest.class); ByteArrayOutputStream out = new ByteArrayOutputStream(); PrintWriter writer = new PrintWriter(out); when(mockResponse.getWriter()).thenReturn(writer); servlet.doGet(mockRequest, mockResponse); writer.flush(); // Get Content String content = new String(out.toString()); assertThat(content, containsString(TEST_BODY_ADDON)); assertThat(content, containsString(TEST_HEAD_ADDON)); }
@Override public Optional<SoamId> createLm(MdId mdName, MaIdShort maName, MepId mepId, LossMeasurementCreate lm) throws CfmConfigException { throw new UnsupportedOperationException("Not yet implemented"); }
@Test public void testCreateLm() throws CfmConfigException { //TODO: Implement underlying method try { soamManager.createLm(MDNAME1, MANAME1, MEPID1, null); fail("Expecting UnsupportedOperationException"); } catch (UnsupportedOperationException e) { } }
public List<StageConfig> validStagesForFetchArtifact(PipelineConfig downstreamPipeline, CaseInsensitiveString currentDownstreamStage) { for (DependencyMaterialConfig dependencyMaterial : downstreamPipeline.dependencyMaterialConfigs()) { if (dependencyMaterial.getPipelineName().equals(name)) { List<StageConfig> stageConfigs = allStagesBefore(dependencyMaterial.getStageName()); stageConfigs.add(getStage(dependencyMaterial.getStageName())); // add this stage itself return stageConfigs; } } if (this.equals(downstreamPipeline)) { return allStagesBefore(currentDownstreamStage); } return null; }
@Test public void shouldReturnStagesBeforeCurrentForSelectedPipeline() { PipelineConfig downstream = PipelineConfigMother.createPipelineConfigWithStages("downstream", "s1", "s2"); List<StageConfig> fetchableStages = downstream.validStagesForFetchArtifact(downstream, new CaseInsensitiveString("s2")); assertThat(fetchableStages.size(), is(1)); assertThat(fetchableStages, hasItem(downstream.get(0))); }
private void checkVersion() throws IOException { Version loadedVersion = loadVersion(); LOG.info("Loaded timeline store version info " + loadedVersion); if (loadedVersion.equals(getCurrentVersion())) { return; } if (loadedVersion.isCompatibleTo(getCurrentVersion())) { LOG.info("Storing timeline store version info " + getCurrentVersion()); dbStoreVersion(CURRENT_VERSION_INFO); } else { String incompatibleMessage = "Incompatible version for timeline store: " + "expecting version " + getCurrentVersion() + ", but loading version " + loadedVersion; LOG.error(incompatibleMessage); throw new IOException(incompatibleMessage); } }
@Test void testCheckVersion() throws IOException { RollingLevelDBTimelineStore dbStore = (RollingLevelDBTimelineStore) store; // default version Version defaultVersion = dbStore.getCurrentVersion(); assertEquals(defaultVersion, dbStore.loadVersion()); // compatible version Version compatibleVersion = Version.newInstance(defaultVersion.getMajorVersion(), defaultVersion.getMinorVersion() + 2); dbStore.storeVersion(compatibleVersion); assertEquals(compatibleVersion, dbStore.loadVersion()); restartTimelineStore(); dbStore = (RollingLevelDBTimelineStore) store; // overwrite the compatible version assertEquals(defaultVersion, dbStore.loadVersion()); // incompatible version Version incompatibleVersion = Version.newInstance(defaultVersion.getMajorVersion() + 1, defaultVersion.getMinorVersion()); dbStore.storeVersion(incompatibleVersion); try { restartTimelineStore(); fail("Incompatible version, should expect fail here."); } catch (ServiceStateException e) { assertTrue(e.getMessage().contains("Incompatible version for timeline store"), "Exception message mismatch"); } }
public static String findFirstUniqueAndStableStanzaID( final Packet packet, final String by ) { if ( packet == null ) { throw new IllegalArgumentException( "Argument 'packet' cannot be null." ); } if ( by == null || by.isEmpty() ) { throw new IllegalArgumentException( "Argument 'by' cannot be null or an empty string." ); } final List<Element> sids = packet.getElement().elements( QName.get( "stanza-id", "urn:xmpp:sid:0" ) ); if ( sids == null ) { return null; } for ( final Element sid : sids ) { if ( by.equals( sid.attributeValue( "by" ) ) ) { final String result = sid.attributeValue( "id" ); if ( result != null && !result.isEmpty() ) { return result; } } } return null; }
@Test public void testParseNonUUIDValue() throws Exception { // Setup fixture. final Packet input = new Message(); final JID self = new JID( "foobar" ); final String expected = "not-a-uuid"; final Element toOverwrite = input.getElement().addElement( "stanza-id", "urn:xmpp:sid:0" ); toOverwrite.addAttribute( "id", expected ); toOverwrite.addAttribute( "by", self.toString() ); // Execute system under test. final String result = StanzaIDUtil.findFirstUniqueAndStableStanzaID( input, self.toString() ); // Verify results. assertEquals( expected, result ); }
@Override public boolean replace(K key, long expectedOldValue, long newValue) { return complete(asyncCounterMap.replace(key, expectedOldValue, newValue)); }
@Test public void testReplace() { atomicCounterMap.putIfAbsent(KEY1, VALUE1); boolean replaced = atomicCounterMap.replace(KEY1, VALUE1, VALUE1 * 2); assertThat(replaced, is(true)); Long afterReplace = atomicCounterMap.get(KEY1); assertThat(afterReplace, is(VALUE1 * 2)); boolean notReplaced = atomicCounterMap.replace(KEY1, VALUE1, VALUE1 * 2); assertThat(notReplaced, is(false)); Long afterNotReplaced = atomicCounterMap.get(KEY1); assertThat(afterNotReplaced, is(VALUE1 * 2)); }
static String getRelativeFileInternal(File canonicalBaseFile, File canonicalFileToRelativize) { List<String> basePath = getPathComponents(canonicalBaseFile); List<String> pathToRelativize = getPathComponents(canonicalFileToRelativize); //if the roots aren't the same (i.e. different drives on a windows machine), we can't construct a relative //path from one to the other, so just return the canonical file if (!basePath.get(0).equals(pathToRelativize.get(0))) { return canonicalFileToRelativize.getPath(); } int commonDirs; StringBuilder sb = new StringBuilder(); for (commonDirs=1; commonDirs<basePath.size() && commonDirs<pathToRelativize.size(); commonDirs++) { if (!basePath.get(commonDirs).equals(pathToRelativize.get(commonDirs))) { break; } } boolean first = true; for (int i=commonDirs; i<basePath.size(); i++) { if (!first) { sb.append(File.separatorChar); } else { first = false; } sb.append(".."); } first = true; for (int i=commonDirs; i<pathToRelativize.size(); i++) { if (first) { if (sb.length() != 0) { sb.append(File.separatorChar); } first = false; } else { sb.append(File.separatorChar); } sb.append(pathToRelativize.get(i)); } if (sb.length() == 0) { return "."; } return sb.toString(); }
@Test public void pathUtilTest7() { File[] roots = File.listRoots(); File basePath = new File(roots[0] + "some"); File relativePath = new File(roots[0] + "some" + File.separatorChar + "dir"); String path = PathUtil.getRelativeFileInternal(basePath, relativePath); Assert.assertEquals(path, "dir"); }
public static int read(final AtomicBuffer buffer, final ErrorConsumer consumer) { return read(buffer, consumer, 0); }
@Test void shouldReadFirstObservation() { final ErrorConsumer consumer = mock(ErrorConsumer.class); final long timestamp = 7; final RuntimeException error = new RuntimeException("Test Error"); when(clock.time()).thenReturn(timestamp); log.record(error); assertThat(ErrorLogReader.read(buffer, consumer), is(1)); verify(consumer).accept(eq(1), eq(timestamp), eq(timestamp), any(String.class)); }
@Override public Optional<RegistryAuthenticator> handleHttpResponseException( ResponseException responseException) throws ResponseException, RegistryErrorException { // Only valid for status code of '401 Unauthorized'. if (responseException.getStatusCode() != HttpStatusCodes.STATUS_CODE_UNAUTHORIZED) { throw responseException; } // Checks if the 'WWW-Authenticate' header is present. String authenticationMethod = responseException.getHeaders().getAuthenticate(); if (authenticationMethod == null) { throw new RegistryErrorExceptionBuilder(getActionDescription(), responseException) .addReason("'WWW-Authenticate' header not found") .build(); } // Parses the header to retrieve the components. try { return RegistryAuthenticator.fromAuthenticationMethod( authenticationMethod, registryEndpointRequestProperties, userAgent, httpClient); } catch (RegistryAuthenticationFailedException ex) { throw new RegistryErrorExceptionBuilder(getActionDescription(), ex) .addReason("Failed get authentication method from 'WWW-Authenticate' header") .build(); } }
@Test public void testHandleHttpResponseException_badAuthenticationMethod() throws ResponseException { String authenticationMethod = "bad authentication method"; Mockito.when(mockResponseException.getStatusCode()) .thenReturn(HttpStatusCodes.STATUS_CODE_UNAUTHORIZED); Mockito.when(mockResponseException.getHeaders()).thenReturn(mockHeaders); Mockito.when(mockHeaders.getAuthenticate()).thenReturn(authenticationMethod); try { testAuthenticationMethodRetriever.handleHttpResponseException(mockResponseException); Assert.fail( "Authentication method retriever should fail if 'WWW-Authenticate' header failed to parse"); } catch (RegistryErrorException ex) { MatcherAssert.assertThat( ex.getMessage(), CoreMatchers.containsString( "Failed get authentication method from 'WWW-Authenticate' header")); } }
@Override public Set<OAuth2AccessTokenEntity> getAllAccessTokensForUser(String userName) { return tokenRepository.getAccessTokensByUserName(userName); }
@Test public void getAllAccessTokensForUser(){ when(tokenRepository.getAccessTokensByUserName(userName)).thenReturn(newHashSet(accessToken)); Set<OAuth2AccessTokenEntity> tokens = service.getAllAccessTokensForUser(userName); assertEquals(1, tokens.size()); assertTrue(tokens.contains(accessToken)); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } try { if(containerService.isContainer(file)) { final PathAttributes attributes = new PathAttributes(); final CloudBlobContainer container = session.getClient().getContainerReference(containerService.getContainer(file).getName()); container.downloadAttributes(null, null, context); final BlobContainerProperties properties = container.getProperties(); attributes.setETag(properties.getEtag()); attributes.setModificationDate(properties.getLastModified().getTime()); return attributes; } if(file.isFile() || file.isPlaceholder()) { try { final CloudBlob blob = session.getClient().getContainerReference(containerService.getContainer(file).getName()) .getBlobReferenceFromServer(containerService.getKey(file)); final BlobRequestOptions options = new BlobRequestOptions(); blob.downloadAttributes(AccessCondition.generateEmptyCondition(), options, context); return this.toAttributes(blob); } catch(StorageException e) { switch(e.getHttpStatusCode()) { case HttpStatus.SC_NOT_FOUND: if(file.isPlaceholder()) { // Ignore failure and look for common prefix break; } default: throw e; } } } // Check for common prefix try { new AzureObjectListService(session, context).list(file, new CancellingListProgressListener()); return PathAttributes.EMPTY; } catch(ListCanceledException l) { // Found common prefix return PathAttributes.EMPTY; } } catch(StorageException e) { throw new AzureExceptionMappingService().map("Failure to read attributes of {0}", e, file); } catch(URISyntaxException e) { throw new NotfoundException(e.getMessage(), e); } }
@Test public void testFindRoot() throws Exception { final AzureAttributesFinderFeature f = new AzureAttributesFinderFeature(session, null); assertEquals(PathAttributes.EMPTY, f.find(new Path("/", EnumSet.of(Path.Type.directory)))); }
public double getX() { return position.x(); }
@Test public void testGetX() throws Exception { World world = mock(World.class); Location location = new Location(world, Vector3.at(TEST_VALUE, 0, 0)); assertEquals(TEST_VALUE, location.getX(), EPSILON); }
public Map<String, Map<InetSocketAddress, ChannelInitializer<SocketChannel>>> newChannelInitializers() { Map<String, Map<InetSocketAddress, ChannelInitializer<SocketChannel>>> channelInitializers = new HashMap<>(); Set<InetSocketAddress> addresses = new HashSet<>(); for (Map.Entry<String, ProtocolHandlerWithClassLoader> handler : handlers.entrySet()) { Map<InetSocketAddress, ChannelInitializer<SocketChannel>> initializers = handler.getValue().newChannelInitializers(); initializers.forEach((address, initializer) -> { if (!addresses.add(address)) { log.error("Protocol handler for `{}` attempts to use {} for its listening port." + " But it is already occupied by other message protocols.", handler.getKey(), address); throw new RuntimeException("Protocol handler for `" + handler.getKey() + "` attempts to use " + address + " for its listening port. But it is" + " already occupied by other messaging protocols"); } channelInitializers.put(handler.getKey(), initializers); endpoints.put(address, handler.getKey()); }); } return channelInitializers; }
@Test(expectedExceptions = RuntimeException.class) public void testNewChannelInitializersOverlapped() { ChannelInitializer<SocketChannel> i1 = mock(ChannelInitializer.class); ChannelInitializer<SocketChannel> i2 = mock(ChannelInitializer.class); Map<InetSocketAddress, ChannelInitializer<SocketChannel>> p1Initializers = new HashMap<>(); p1Initializers.put(new InetSocketAddress("127.0.0.1", 6650), i1); p1Initializers.put(new InetSocketAddress("127.0.0.2", 6651), i2); ChannelInitializer<SocketChannel> i3 = mock(ChannelInitializer.class); ChannelInitializer<SocketChannel> i4 = mock(ChannelInitializer.class); Map<InetSocketAddress, ChannelInitializer<SocketChannel>> p2Initializers = new HashMap<>(); p2Initializers.put(new InetSocketAddress("127.0.0.1", 6650), i3); p2Initializers.put(new InetSocketAddress("127.0.0.4", 6651), i4); when(handler1.newChannelInitializers()).thenReturn(p1Initializers); when(handler2.newChannelInitializers()).thenReturn(p2Initializers); handlers.newChannelInitializers(); }
public static Config resolve(Config config) { var resolveSystemProperty = System.getenv("KORA_SYSTEM_PROPERTIES_RESOLVE_ENABLED"); if (resolveSystemProperty == null) { resolveSystemProperty = System.getProperty("kora.system.properties.resolve.enabled", "true"); } var ctx = new ResolveContext(config, new ArrayDeque<>(), Boolean.parseBoolean(resolveSystemProperty)); var newRoot = resolve(ctx, config.root()); if (newRoot == config.root()) { return config; } return new SimpleConfig(config.origin(), newRoot); }
@Test void testResolveReference() { var config = fromMap(Map.of( "object", Map.of( "field", "test-value" ), "reference", "${object.field}" )).resolve(); assertThat(config.get("reference").asString()).isEqualTo("test-value"); }
public CqlSessionSelectResult tableDetail(String clusterId, TableDTO.ClusterTableGetArgs args) { CqlSession session = cqlSessionFactory.get(clusterId); int limit = 1; SimpleStatement statement = ClusterUtils.getSchemaTables(session, args.getKeyspace()) .all() .whereColumn(CassandraSystemTablesColumn.TABLES_KEYSPACE_NAME.getColumnName()).isEqualTo(bindMarker()) .whereColumn(CassandraSystemTablesColumn.TABLES_TABLE_NAME.getColumnName()).isEqualTo(bindMarker()) .limit(limit) .build(args.getKeyspace(), args.getTable()) .setPageSize(limit) .setTimeout(Duration.ofSeconds(3)); ResultSet resultSet = session.execute(statement); ColumnDefinitions definitions = resultSet.getColumnDefinitions(); Row row = resultSet.one(); if (row == null) { throw new ClusterTableException.ClusterTableNotFoundException(String.format("not found table(%s)", args.getTable())); } return CqlSessionSelectResult.builder() .row(convertRow(session.getContext().getCodecRegistry(), definitions, row)) .rowHeader(CassdioColumnDefinition.makes(definitions)) .build(); }
@Test @Disabled //TODO ๋ณ€๊ฒฝํ•„์š” void get_table_in_keyspace() { ClusterTableGetArgs args = ClusterTableGetArgs.builder() .keyspace(keyspaceName) .table("test_table_1") .build(); // when CqlSessionSelectResult sut = clusterTableGetCommander.tableDetail(CLUSTER_ID, args); // then // assertThat(sut.getTableDescribe()).isNotBlank(); // // assertThat(sut.getTable().getTableName()).isEqualTo("test_table_1"); // assertThat(sut.getTable().getComment()).isEqualTo("test_table_one"); // assertThat(sut.getTable().getOptions()).containsEntry("bloom_filter_fp_chance", 0.01); // // assertThat(sut.getColumns()).hasSize(5); // assertThat(sut.getColumns().getFirst().getName()).isEqualTo("partition_key_1"); // assertThat(sut.getColumns().getFirst().getDataType()).isEqualTo("text"); // assertThat(sut.getColumns().getFirst().getClusteringOrder()).isEqualTo(ColumnClusteringOrder.NONE); // assertThat(sut.getColumns().getFirst().getKind()).isEqualTo(ColumnKind.PARTITION_KEY); // // assertThat(sut.getColumns().get(1).getName()).isEqualTo("partition_key_2"); // assertThat(sut.getColumns().get(1).getDataType()).isEqualTo("bigint"); // assertThat(sut.getColumns().get(1).getClusteringOrder()).isEqualTo(ColumnClusteringOrder.NONE); // assertThat(sut.getColumns().get(1).getKind()).isEqualTo(ColumnKind.PARTITION_KEY); // // assertThat(sut.getColumns().get(2).getName()).isEqualTo("clustering_key_1"); // assertThat(sut.getColumns().get(2).getDataType()).isEqualTo("bigint"); // assertThat(sut.getColumns().get(2).getClusteringOrder()).isEqualTo(ColumnClusteringOrder.DESC); // assertThat(sut.getColumns().get(2).getKind()).isEqualTo(ColumnKind.CLUSTERING); // // assertThat(sut.getColumns().get(3).getName()).isEqualTo("clustering_key_2"); // assertThat(sut.getColumns().get(3).getDataType()).isEqualTo("text"); // assertThat(sut.getColumns().get(3).getClusteringOrder()).isEqualTo(ColumnClusteringOrder.ASC); // assertThat(sut.getColumns().get(3).getKind()).isEqualTo(ColumnKind.CLUSTERING); // // assertThat(sut.getColumns().get(4).getName()).isEqualTo("column_1"); // assertThat(sut.getColumns().get(4).getDataType()).isEqualTo("text"); // assertThat(sut.getColumns().get(4).getClusteringOrder()).isEqualTo(ColumnClusteringOrder.NONE); // assertThat(sut.getColumns().get(4).getKind()).isEqualTo(ColumnKind.REGULAR); }
@Path("batch") @POST public Response batchReplication(ReplicationList replicationList) { try { ReplicationListResponse batchResponse = new ReplicationListResponse(); for (ReplicationInstance instanceInfo : replicationList.getReplicationList()) { try { batchResponse.addResponse(dispatch(instanceInfo)); } catch (Exception e) { batchResponse.addResponse(new ReplicationInstanceResponse(Status.INTERNAL_SERVER_ERROR.getStatusCode(), null)); logger.error("{} request processing failed for batch item {}/{}", instanceInfo.getAction(), instanceInfo.getAppName(), instanceInfo.getId(), e); } } return Response.ok(batchResponse).build(); } catch (Throwable e) { logger.error("Cannot execute batch Request", e); return Response.status(Status.INTERNAL_SERVER_ERROR).build(); } }
@Test public void testCancelBatching() throws Exception { when(instanceResource.cancelLease(anyString())).thenReturn(Response.ok().build()); ReplicationList replicationList = new ReplicationList(newReplicationInstanceOf(Action.Cancel, instanceInfo)); Response response = peerReplicationResource.batchReplication(replicationList); assertStatusOkReply(response); verify(instanceResource, times(1)).cancelLease("true"); }
@CanIgnoreReturnValue public final Ordered containsAtLeast( @Nullable Object k0, @Nullable Object v0, @Nullable Object... rest) { return containsAtLeastEntriesIn(accumulateMultimap(k0, v0, rest)); }
@Test public void containsAtLeastVarargInOrderFailure() { ImmutableMultimap<Integer, String> actual = ImmutableMultimap.of(3, "one", 3, "six", 3, "two", 4, "five", 4, "four"); assertThat(actual).containsAtLeast(4, "four", 3, "six", 3, "two", 3, "one"); expectFailureWhenTestingThat(actual) .containsAtLeast(4, "four", 3, "six", 3, "two", 3, "one") .inOrder(); assertFailureKeys( "contents match, but order was wrong", "keys are not in order", "keys with out-of-order values", "---", "expected to contain at least", "but was"); assertFailureValue("keys with out-of-order values", "[3]"); assertFailureValue("expected to contain at least", "{4=[four], 3=[six, two, one]}"); assertFailureValue("but was", "{3=[one, six, two], 4=[five, four]}"); }
@Override public void v(String tag, String message, Object... args) { Log.v(tag, formatString(message, args)); }
@Test public void verboseLoggedCorrectly() { String expectedMessage = "Hello World"; logger.v(tag, "Hello %s", "World"); assertLogged(VERBOSE, tag, expectedMessage, null); }
public static List<ArtifactInformation> getArtifacts(List<String> stagingFiles) { ImmutableList.Builder<ArtifactInformation> artifactsBuilder = ImmutableList.builder(); Set<String> deduplicatedStagingFiles = new LinkedHashSet<>(stagingFiles); for (String path : deduplicatedStagingFiles) { File file; String stagedName = null; if (path.contains("=")) { String[] components = path.split("=", 2); file = new File(components[1]); stagedName = components[0]; } else { file = new File(path); } // Spurious items get added to the classpath, but ignoring silently can cause confusion. // Therefore, issue logs if a file does not exist before ignoring. The level will be warning // if they have a staged name, as those are likely to cause problems or unintended behavior // (e.g., dataflow-worker.jar, windmill_main). if (!file.exists()) { if (stagedName != null) { LOG.warn( "Stage Artifact '{}' with the name '{}' was not found, staging will be ignored.", file, stagedName); } else { LOG.info("Stage Artifact '{}' was not found, staging will be ignored.", file); } continue; } ArtifactInformation.Builder artifactBuilder = ArtifactInformation.newBuilder(); artifactBuilder.setTypeUrn(BeamUrns.getUrn(StandardArtifacts.Types.FILE)); artifactBuilder.setRoleUrn(BeamUrns.getUrn(StandardArtifacts.Roles.STAGING_TO)); HashCode hashCode; if (file.isDirectory()) { File zippedFile; try { zippedFile = zipDirectory(file); hashCode = Files.asByteSource(zippedFile).hash(Hashing.sha256()); } catch (IOException e) { throw new RuntimeException(e); } artifactBuilder.setTypePayload( RunnerApi.ArtifactFilePayload.newBuilder() .setPath(zippedFile.getPath()) .setSha256(hashCode.toString()) .build() .toByteString()); } else { try { hashCode = Files.asByteSource(file).hash(Hashing.sha256()); } catch (IOException e) { throw new RuntimeException(e); } artifactBuilder.setTypePayload( RunnerApi.ArtifactFilePayload.newBuilder() .setPath(file.getPath()) .setSha256(hashCode.toString()) .build() .toByteString()); } if (stagedName == null) { stagedName = createStagingFileName(file, hashCode); } artifactBuilder.setRolePayload( RunnerApi.ArtifactStagingToRolePayload.newBuilder() .setStagedName(stagedName) .build() .toByteString()); artifactsBuilder.add(artifactBuilder.build()); } return artifactsBuilder.build(); }
@Test public void testGetArtifactsBadFileLogsInfo() throws Exception { File file1 = File.createTempFile("file1-", ".txt"); file1.deleteOnExit(); List<ArtifactInformation> artifacts = Environments.getArtifacts(ImmutableList.of(file1.getAbsolutePath(), "spurious_file")); assertThat(artifacts, hasSize(1)); expectedLogs.verifyInfo("'spurious_file' was not found"); }
public Map<ReservationInterval, Resource> toIntervalMap() { readLock.lock(); try { Map<ReservationInterval, Resource> allocations = new TreeMap<ReservationInterval, Resource>(); // Empty if (isEmpty()) { return allocations; } Map.Entry<Long, Resource> lastEntry = null; for (Map.Entry<Long, Resource> entry : cumulativeCapacity.entrySet()) { if (lastEntry != null && entry.getValue() != null) { ReservationInterval interval = new ReservationInterval(lastEntry.getKey(), entry.getKey()); Resource resource = lastEntry.getValue(); allocations.put(interval, resource); } lastEntry = entry; } return allocations; } finally { readLock.unlock(); } }
@Test public void testToIntervalMap() { ResourceCalculator resCalc = new DefaultResourceCalculator(); RLESparseResourceAllocation rleSparseVector = new RLESparseResourceAllocation(resCalc); Map<ReservationInterval, Resource> mapAllocations; // Check empty mapAllocations = rleSparseVector.toIntervalMap(); Assert.assertTrue(mapAllocations.isEmpty()); // Check full int[] alloc = { 0, 5, 10, 10, 5, 0, 5, 0 }; int start = 100; Set<Entry<ReservationInterval, Resource>> inputs = generateAllocation(start, alloc, false).entrySet(); for (Entry<ReservationInterval, Resource> ip : inputs) { rleSparseVector.addInterval(ip.getKey(), ip.getValue()); } mapAllocations = rleSparseVector.toIntervalMap(); Assert.assertTrue(mapAllocations.size() == 5); for (Entry<ReservationInterval, Resource> entry : mapAllocations .entrySet()) { ReservationInterval interval = entry.getKey(); Resource resource = entry.getValue(); if (interval.getStartTime() == 101L) { Assert.assertTrue(interval.getEndTime() == 102L); Assert.assertEquals(resource, Resource.newInstance(5 * 1024, 5)); } else if (interval.getStartTime() == 102L) { Assert.assertTrue(interval.getEndTime() == 104L); Assert.assertEquals(resource, Resource.newInstance(10 * 1024, 10)); } else if (interval.getStartTime() == 104L) { Assert.assertTrue(interval.getEndTime() == 105L); Assert.assertEquals(resource, Resource.newInstance(5 * 1024, 5)); } else if (interval.getStartTime() == 105L) { Assert.assertTrue(interval.getEndTime() == 106L); Assert.assertEquals(resource, Resource.newInstance(0 * 1024, 0)); } else if (interval.getStartTime() == 106L) { Assert.assertTrue(interval.getEndTime() == 107L); Assert.assertEquals(resource, Resource.newInstance(5 * 1024, 5)); } else { Assert.fail(); } } }
public static MapperReference getMapper() { return MAPPER_REFERENCE.get(); }
@Test public void testMetricsMixIn() { ObjectMapper objectMapper = ObjectMapperFactory.getMapper().getObjectMapper(); try { Metrics metrics = new Metrics(); String json = objectMapper.writeValueAsString(metrics); Assert.assertTrue(json.contains("dimensions")); } catch (Exception ex) { Assert.fail("shouldn't have thrown exception", ex); } }
@Override protected Pair<Option<Dataset<Row>>, String> fetchNextBatch( Option<String> lastCkptStr, long sourceLimit) { Dataset<Row> rows = null; final FileSystem fs = HadoopFSUtils.getFs(sourceSqlFile, sparkContext.hadoopConfiguration(), true); try { final Scanner scanner = new Scanner(fs.open(new Path(sourceSqlFile))); scanner.useDelimiter(";"); while (scanner.hasNext()) { String sqlStr = scanner.next().trim(); if (!sqlStr.isEmpty()) { LOG.info(sqlStr); // overwrite the same dataset object until the last statement then return. rows = sparkSession.sql(sqlStr); } } return Pair.of(Option.of(rows), shouldEmitCheckPoint ? String.valueOf(System.currentTimeMillis()) : null); } catch (IOException ioe) { throw new HoodieIOException("Error reading source SQL file.", ioe); } }
@Test public void shouldSetCheckpointForSqlFileBasedSourceWithEpochCheckpoint() throws IOException { UtilitiesTestBase.Helpers.copyToDFS( "streamer-config/sql-file-based-source.sql", storage, UtilitiesTestBase.basePath + "/sql-file-based-source.sql"); props.setProperty(sqlFileSourceConfig, UtilitiesTestBase.basePath + "/sql-file-based-source.sql"); props.setProperty(sqlFileSourceConfigEmitChkPointConf, "true"); sqlFileSource = new SqlFileBasedSource(props, jsc, sparkSession, schemaProvider); Pair<Option<Dataset<Row>>, String> nextBatch = sqlFileSource.fetchNextBatch(Option.empty(), Long.MAX_VALUE); assertEquals(10000, nextBatch.getLeft().get().count()); long currentTimeInMillis = System.currentTimeMillis(); long checkpointToBeUsed = Long.parseLong(nextBatch.getRight()); assertTrue((currentTimeInMillis - checkpointToBeUsed) / 1000 < 60); assertTrue(currentTimeInMillis > checkpointToBeUsed); }
public static List<AclEntry> mergeAclEntries(List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException { ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec); ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); List<AclEntry> foundAclSpecEntries = Lists.newArrayListWithCapacity(MAX_ENTRIES); EnumMap<AclEntryScope, AclEntry> providedMask = Maps.newEnumMap(AclEntryScope.class); EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class); EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class); for (AclEntry existingEntry: existingAcl) { AclEntry aclSpecEntry = aclSpec.findByKey(existingEntry); if (aclSpecEntry != null) { foundAclSpecEntries.add(aclSpecEntry); scopeDirty.add(aclSpecEntry.getScope()); if (aclSpecEntry.getType() == MASK) { providedMask.put(aclSpecEntry.getScope(), aclSpecEntry); maskDirty.add(aclSpecEntry.getScope()); } else { aclBuilder.add(aclSpecEntry); } } else { if (existingEntry.getType() == MASK) { providedMask.put(existingEntry.getScope(), existingEntry); } else { aclBuilder.add(existingEntry); } } } // ACL spec entries that were not replacements are new additions. for (AclEntry newEntry: aclSpec) { if (Collections.binarySearch(foundAclSpecEntries, newEntry, ACL_ENTRY_COMPARATOR) < 0) { scopeDirty.add(newEntry.getScope()); if (newEntry.getType() == MASK) { providedMask.put(newEntry.getScope(), newEntry); maskDirty.add(newEntry.getScope()); } else { aclBuilder.add(newEntry); } } } copyDefaultsIfNeeded(aclBuilder); calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); return buildAndValidateAcl(aclBuilder); }
@Test public void testMergeAclEntries() throws AclException { List<AclEntry> existing = new ImmutableList.Builder<AclEntry>() .add(aclEntry(ACCESS, USER, ALL)) .add(aclEntry(ACCESS, GROUP, READ_EXECUTE)) .add(aclEntry(ACCESS, OTHER, NONE)) .build(); List<AclEntry> aclSpec = Lists.newArrayList( aclEntry(ACCESS, USER, "bruce", ALL)); List<AclEntry> expected = new ImmutableList.Builder<AclEntry>() .add(aclEntry(ACCESS, USER, ALL)) .add(aclEntry(ACCESS, USER, "bruce", ALL)) .add(aclEntry(ACCESS, GROUP, READ_EXECUTE)) .add(aclEntry(ACCESS, MASK, ALL)) .add(aclEntry(ACCESS, OTHER, NONE)) .build(); assertEquals(expected, mergeAclEntries(existing, aclSpec)); }
@Override public ResultSet getSchemas() throws SQLException { return createDatabaseMetaDataResultSet(getDatabaseMetaData().getSchemas()); }
@Test void assertGetSchemasForCatalogAndSchemaPattern() throws SQLException { when(databaseMetaData.getSchemas("test", null)).thenReturn(resultSet); assertThat(shardingSphereDatabaseMetaData.getSchemas("test", null), instanceOf(DatabaseMetaDataResultSet.class)); }
public void sendMessage(final Account account, final Device device, final Envelope message, final boolean online) { final String channel; if (device.getGcmId() != null) { channel = "gcm"; } else if (device.getApnId() != null) { channel = "apn"; } else if (device.getFetchesMessages()) { channel = "websocket"; } else { channel = "none"; } final boolean clientPresent; if (online) { clientPresent = clientPresenceManager.isPresent(account.getUuid(), device.getId()); if (clientPresent) { messagesManager.insert(account.getUuid(), device.getId(), message.toBuilder().setEphemeral(true).build()); } } else { messagesManager.insert(account.getUuid(), device.getId(), message); // We check for client presence after inserting the message to take a conservative view of notifications. If the // client wasn't present at the time of insertion but is now, they'll retrieve the message. If they were present // but disconnected before the message was delivered, we should send a notification. clientPresent = clientPresenceManager.isPresent(account.getUuid(), device.getId()); if (!clientPresent) { try { pushNotificationManager.sendNewMessageNotification(account, device.getId(), message.getUrgent()); } catch (final NotPushRegisteredException ignored) { } } } Metrics.counter(SEND_COUNTER_NAME, CHANNEL_TAG_NAME, channel, EPHEMERAL_TAG_NAME, String.valueOf(online), CLIENT_ONLINE_TAG_NAME, String.valueOf(clientPresent), URGENT_TAG_NAME, String.valueOf(message.getUrgent()), STORY_TAG_NAME, String.valueOf(message.getStory()), SEALED_SENDER_TAG_NAME, String.valueOf(!message.hasSourceUuid())) .increment(); }
@Test void testSendMessageApnClientNotPresent() throws Exception { when(clientPresenceManager.isPresent(ACCOUNT_UUID, DEVICE_ID)).thenReturn(false); when(device.getApnId()).thenReturn("apn-id"); messageSender.sendMessage(account, device, message, false); verify(messagesManager).insert(ACCOUNT_UUID, DEVICE_ID, message); verify(pushNotificationManager).sendNewMessageNotification(account, device.getId(), message.getUrgent()); }
static KafkaNodePoolTemplate convertTemplate(KafkaClusterTemplate template) { if (template != null) { return new KafkaNodePoolTemplateBuilder() .withPodSet(template.getPodSet()) .withPod(template.getPod()) .withPerPodService(template.getPerPodService()) .withPerPodRoute(template.getPerPodRoute()) .withPerPodIngress(template.getPerPodIngress()) .withPersistentVolumeClaim(template.getPersistentVolumeClaim()) .withKafkaContainer(template.getKafkaContainer()) .withInitContainer(template.getInitContainer()) .build(); } else { return null; } }
@Test public void testConvertNullTemplate() { assertThat(VirtualNodePoolConverter.convertTemplate(null), is(nullValue())); }
@Override public void open(Map<String, Object> map, SinkContext sinkContext) throws Exception { try { val configV2 = InfluxDBSinkConfig.load(map, sinkContext); configV2.validate(); sink = new InfluxDBSink(); } catch (Exception e) { try { val configV1 = org.apache.pulsar.io.influxdb.v1.InfluxDBSinkConfig.load(map, sinkContext); configV1.validate(); sink = new org.apache.pulsar.io.influxdb.v1.InfluxDBGenericRecordSink(); } catch (Exception e1) { throw new Exception("For InfluxDB V2: \n" + e.toString() + "\n" + "For InfluxDB V1: \n" + e1.toString()); } } sink.open(map, sinkContext); }
@Test public void openInfluxV1() throws Exception { Map<String, Object> map = new HashMap<>(); map.put("influxdbUrl", "http://localhost:8086"); map.put("database", "test_db"); InfluxDBGenericRecordSink sink = new InfluxDBGenericRecordSink(); try { sink.open(map, mock(SinkContext.class)); } catch (InfluxDBIOException e) { // Do nothing } assertTrue(sink.sink instanceof org.apache.pulsar.io.influxdb.v1.InfluxDBGenericRecordSink); }
public static Type fromHudiType(Schema avroSchema) { Schema.Type columnType = avroSchema.getType(); LogicalType logicalType = avroSchema.getLogicalType(); PrimitiveType primitiveType = null; boolean isConvertedFailed = false; switch (columnType) { case BOOLEAN: primitiveType = PrimitiveType.BOOLEAN; break; case INT: if (logicalType instanceof LogicalTypes.Date) { primitiveType = PrimitiveType.DATE; } else if (logicalType instanceof LogicalTypes.TimeMillis) { primitiveType = PrimitiveType.TIME; } else { primitiveType = PrimitiveType.INT; } break; case LONG: if (logicalType instanceof LogicalTypes.TimeMicros) { primitiveType = PrimitiveType.TIME; } else if (logicalType instanceof LogicalTypes.TimestampMillis || logicalType instanceof LogicalTypes.TimestampMicros) { primitiveType = PrimitiveType.DATETIME; } else { primitiveType = PrimitiveType.BIGINT; } break; case FLOAT: primitiveType = PrimitiveType.FLOAT; break; case DOUBLE: primitiveType = PrimitiveType.DOUBLE; break; case STRING: return ScalarType.createDefaultCatalogString(); case ARRAY: Type type = new ArrayType(fromHudiType(avroSchema.getElementType())); if (type.isArrayType()) { return type; } else { isConvertedFailed = true; break; } case FIXED: case BYTES: if (logicalType instanceof LogicalTypes.Decimal) { int precision = ((LogicalTypes.Decimal) logicalType).getPrecision(); int scale = ((LogicalTypes.Decimal) logicalType).getScale(); return ScalarType.createUnifiedDecimalType(precision, scale); } else { primitiveType = PrimitiveType.VARCHAR; break; } case RECORD: // Struct type List<Schema.Field> fields = avroSchema.getFields(); Preconditions.checkArgument(fields.size() > 0); ArrayList<StructField> structFields = new ArrayList<>(fields.size()); for (Schema.Field field : fields) { String fieldName = field.name(); Type fieldType = fromHudiType(field.schema()); if (fieldType.isUnknown()) { isConvertedFailed = true; break; } structFields.add(new StructField(fieldName, fieldType)); } if (!isConvertedFailed) { return new StructType(structFields); } break; case MAP: Schema value = avroSchema.getValueType(); Type valueType = fromHudiType(value); if (valueType.isUnknown()) { isConvertedFailed = true; break; } if (!isConvertedFailed) { // Hudi map's key must be string return new MapType(ScalarType.createDefaultCatalogString(), valueType); } break; case UNION: List<Schema> nonNullMembers = avroSchema.getTypes().stream() .filter(schema -> !Schema.Type.NULL.equals(schema.getType())) .collect(Collectors.toList()); if (nonNullMembers.size() == 1) { return fromHudiType(nonNullMembers.get(0)); } else { isConvertedFailed = true; break; } case ENUM: default: isConvertedFailed = true; break; } if (isConvertedFailed) { primitiveType = PrimitiveType.UNKNOWN_TYPE; } return ScalarType.createType(primitiveType); }
@Test public void testStructHudiSchema() { Schema.Field field1 = new Schema.Field("field1", Schema.create(Schema.Type.INT), null, null); Schema.Field field2 = new Schema.Field("field2", Schema.create(Schema.Type.STRING), null, null); List<Schema.Field> fields = new LinkedList<>(); fields.add(field1); fields.add(field2); Schema structSchema = Schema.createRecord(fields); StructField structField1 = new StructField("field1", ScalarType.createType(PrimitiveType.INT)); StructField structField2 = new StructField("field2", ScalarType.createDefaultCatalogString()); ArrayList<StructField> structFields = new ArrayList<>(); structFields.add(structField1); structFields.add(structField2); StructType structType = new StructType(structFields); Assert.assertEquals(structType, fromHudiType(structSchema)); structSchema = Schema.createRecord( ImmutableList.of(new Schema.Field("enum", Schema.create(Schema.Type.NULL)))); Assert.assertEquals(UNKNOWN_TYPE, fromHudiType(structSchema)); }
public void check(AccessResource checkedAccess, AccessResource ownedAccess) { PlainAccessResource checkedPlainAccess = (PlainAccessResource) checkedAccess; PlainAccessResource ownedPlainAccess = (PlainAccessResource) ownedAccess; if (ownedPlainAccess.isAdmin()) { // admin user don't need verification return; } if (Permission.needAdminPerm(checkedPlainAccess.getRequestCode())) { throw new AclException(String.format("Need admin permission for request code=%d, but accessKey=%s is not", checkedPlainAccess.getRequestCode(), ownedPlainAccess.getAccessKey())); } Map<String, Byte> needCheckedPermMap = checkedPlainAccess.getResourcePermMap(); Map<String, Byte> ownedPermMap = ownedPlainAccess.getResourcePermMap(); if (needCheckedPermMap == null) { // If the needCheckedPermMap is null,then return return; } for (Map.Entry<String, Byte> needCheckedEntry : needCheckedPermMap.entrySet()) { String resource = needCheckedEntry.getKey(); Byte neededPerm = needCheckedEntry.getValue(); boolean isGroup = PlainAccessResource.isRetryTopic(resource); if (ownedPermMap == null || !ownedPermMap.containsKey(resource)) { // Check the default perm byte ownedPerm = isGroup ? ownedPlainAccess.getDefaultGroupPerm() : ownedPlainAccess.getDefaultTopicPerm(); if (!Permission.checkPermission(neededPerm, ownedPerm)) { throw new AclException(String.format("No default permission for %s", PlainAccessResource.printStr(resource, isGroup))); } continue; } if (!Permission.checkPermission(neededPerm, ownedPermMap.get(resource))) { throw new AclException(String.format("No permission for %s", PlainAccessResource.printStr(resource, isGroup))); } } }
@Test public void testCheck_withAdminPermission_shouldPass() { PlainAccessResource checkedAccess = new PlainAccessResource(); checkedAccess.setRequestCode(Permission.SUB); checkedAccess.addResourceAndPerm("topic1", Permission.PUB); PlainAccessResource ownedAccess = new PlainAccessResource(); ownedAccess.setAccessKey("adminUser"); ownedAccess.setAdmin(true); try { permissionChecker.check(checkedAccess, ownedAccess); } catch (AclException e) { Assert.fail("Should not throw any exception for admin user"); } }
public Optional<ApplicationRoles> readApplicationRoles(ApplicationId application) { try { Optional<byte[]> data = curator.getData(applicationRolesPath(application)); if (data.isEmpty() || data.get().length == 0) return Optional.empty(); Slime slime = SlimeUtils.jsonToSlime(data.get()); ApplicationRoles applicationRoles = ApplicationRolesSerializer.fromSlime(slime.get()); return Optional.of(applicationRoles); } catch (Exception e) { throw new RuntimeException("Error reading application roles of " + application, e); } }
@Test public void read_non_existent() { var applicationRolesStore = new ApplicationRolesStore(new MockCurator(), Path.createRoot()); Optional<ApplicationRoles> applicationRoles = applicationRolesStore.readApplicationRoles(ApplicationId.defaultId()); assertTrue(applicationRoles.isEmpty()); }
@Override String getProperty(String key) { String checkedKey = checkPropertyName(key); if (checkedKey == null) { final String upperCaseKey = key.toUpperCase(); if (!upperCaseKey.equals(key)) { checkedKey = checkPropertyName(upperCaseKey); } } if (checkedKey == null) { return null; } return env.get(checkedKey); }
@Test void testGetEnvForLowerCaseKeyWithHyphenAndDot() { assertEquals("value2", systemEnvPropertySource.getProperty("test.case-2")); }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void restrictChatMember() { ChatPermissions permissions = new ChatPermissions() .canChangeInfo(true) .canInviteUsers(true) .canPinMessages(true) .canSendAudios(true) .canSendDocuments(true) .canSendPhotos(true) .canSendVideos(true) .canSendVideoNotes(true) .canSendVoiceNotes(true) .canSendPolls(true); // implies can_send_messages BaseResponse response = bot.execute( new RestrictChatMember(groupId, memberBot, permissions) .untilDate((int) (System.currentTimeMillis() / 1000) + 5) .useIndependentChatPermissions(false) ); assertTrue(response.isOk()); }
public static RuntimeException peel(final Throwable t) { return (RuntimeException) peel(t, null, null, HAZELCAST_EXCEPTION_WRAPPER); }
@Test public void testPeel_whenThrowableIsExecutionException_thenReturnCause() { RuntimeException result = ExceptionUtil.peel(new ExecutionException(throwable)); assertEquals(throwable, result); }
public static void serializeRecordBatch(ArrowRecordBatch recordBatch, MemoryBuffer buffer) { // TODO(chaokunyang) add custom WritableByteChannel to avoid copy in `WritableByteChannelImpl` try (WriteChannel channel = new WriteChannel(Channels.newChannel(new MemoryBufferOutputStream(buffer)))) { MessageSerializer.serialize(channel, recordBatch); } catch (IOException e) { throw new RuntimeException(String.format("Serialize record batch %s failed", recordBatch), e); } }
@Test public void testSerializeRecordBatch() { VectorSchemaRoot vectorSchemaRoot = createVectorSchemaRoot(2); VectorUnloader unloader = new VectorUnloader(vectorSchemaRoot); ArrowRecordBatch recordBatch = unloader.getRecordBatch(); MemoryBuffer buffer = MemoryUtils.buffer(32); ArrowUtils.serializeRecordBatch(recordBatch, buffer); try (ArrowRecordBatch batch = ArrowUtils.deserializeRecordBatch(buffer)) { System.out.println("newRecordBatch " + batch); assertEquals(batch.getLength(), recordBatch.getLength()); assertEquals(batch.computeBodyLength(), recordBatch.computeBodyLength()); assertEquals(batch.getMessageType(), recordBatch.getMessageType()); } }
@Override public void writeQueryData(final ChannelHandlerContext context, final ProxyDatabaseConnectionManager databaseConnectionManager, final QueryCommandExecutor queryCommandExecutor, final int headerPackagesCount) throws SQLException { if (ResponseType.QUERY == queryCommandExecutor.getResponseType() && !context.channel().isActive()) { context.write(new PostgreSQLCommandCompletePacket(PostgreSQLCommand.SELECT.name(), 0L)); return; } processSimpleQuery(context, databaseConnectionManager, queryCommandExecutor); }
@Test void assertWriteQueryDataWithInactiveChannel() throws SQLException { PostgreSQLCommandExecuteEngine commandExecuteEngine = new PostgreSQLCommandExecuteEngine(); when(queryCommandExecutor.getResponseType()).thenReturn(ResponseType.QUERY); when(channel.isActive()).thenReturn(false); commandExecuteEngine.writeQueryData(channelHandlerContext, mock(ProxyDatabaseConnectionManager.class), queryCommandExecutor, 0); verify(channelHandlerContext).write(isA(PostgreSQLCommandCompletePacket.class)); }
@Override public void receiveConfigInfo(String configInfo) { if (StringUtils.isEmpty(configInfo)) { return; } Properties properties = new Properties(); try { properties.load(new StringReader(configInfo)); innerReceive(properties); } catch (IOException e) { LOGGER.error("load properties error๏ผš" + configInfo, e); } }
@Test void testReceiveConfigInfoEmpty() { final Deque<Properties> q2 = new ArrayDeque<Properties>(); PropertiesListener a = new PropertiesListener() { @Override public void innerReceive(Properties properties) { q2.offer(properties); } }; a.receiveConfigInfo(""); final Properties actual = q2.poll(); assertNull(actual); }
@Override public boolean match(final String rule) { return rule.matches("^int\\|\\d+-\\d+$"); }
@Test public void match() { assertTrue(generator.match("int|10-15")); assertFalse(generator.match("int|10.0-15")); assertFalse(generator.match("int")); assertFalse(generator.match("int|")); }
public ReviewGroupResponse getReviewGroupSummary(String reviewRequestCode) { ReviewGroup reviewGroup = reviewGroupRepository.findByReviewRequestCode(reviewRequestCode) .orElseThrow(() -> new ReviewGroupNotFoundByReviewRequestCodeException(reviewRequestCode)); return new ReviewGroupResponse(reviewGroup.getReviewee(), reviewGroup.getProjectName()); }
@Test void ๋ฆฌ๋ทฐ_์š”์ฒญ_์ฝ”๋“œ๋กœ_๋ฆฌ๋ทฐ_๊ทธ๋ฃน์„_์กฐํšŒํ•œ๋‹ค() { // given ReviewGroup reviewGroup = reviewGroupRepository.save(new ReviewGroup( "ted", "review-me", "reviewRequestCode", "groupAccessCode" )); // when ReviewGroupResponse response = reviewGroupLookupService.getReviewGroupSummary( reviewGroup.getReviewRequestCode() ); // then assertAll( () -> assertThat(response.revieweeName()).isEqualTo(reviewGroup.getReviewee()), () -> assertThat(response.projectName()).isEqualTo(reviewGroup.getProjectName()) ); }
public boolean after(DateTimeStamp other) { return compareTo(other) > 0; }
@Test void testAfter() { DateTimeStamp a; DateTimeStamp b; a = new DateTimeStamp(.586); b = new DateTimeStamp(.586); assertFalse(b.after(a)); assertFalse(a.after(b)); b = new DateTimeStamp(.587); assertTrue(b.after(a)); assertFalse(a.after(b)); a = new DateTimeStamp("2018-04-04T09:10:00.586-0100"); b = new DateTimeStamp("2018-04-04T09:10:00.586-0100"); assertFalse(b.after(a)); assertFalse(a.after(b)); b = new DateTimeStamp("2018-04-04T09:10:00.587-0100"); assertTrue(b.after(a)); assertFalse(a.after(b)); a = new DateTimeStamp("2018-04-04T09:10:00.586-0100", 0.18); b = new DateTimeStamp("2018-04-04T09:10:00.586-0100", 0.18); assertFalse(b.after(a)); assertFalse(a.after(b)); b = new DateTimeStamp("2018-04-04T09:10:00.586-0100", 0.19); assertTrue(b.after(a)); assertFalse(a.after(b)); a = new DateTimeStamp("2018-04-04T09:10:00.586-0100", 0.18); b = new DateTimeStamp("2018-04-04T09:10:00.586-0100", 0.18); assertFalse(b.after(a)); assertFalse(a.after(b)); a = new DateTimeStamp("2018-04-04T09:10:00.586-0100", 0.18); b = new DateTimeStamp("2018-04-04T09:10:00.587-0100", 0.19); assertTrue(b.after(a)); assertFalse(a.after(b)); }
public static <R> R callInstanceMethod( final Object instance, final String methodName, ClassParameter<?>... classParameters) { perfStatsCollector.incrementCount( String.format( "ReflectionHelpers.callInstanceMethod-%s_%s", instance.getClass().getName(), methodName)); try { final Class<?>[] classes = ClassParameter.getClasses(classParameters); final Object[] values = ClassParameter.getValues(classParameters); return traverseClassHierarchy( instance.getClass(), NoSuchMethodException.class, traversalClass -> { Method declaredMethod = traversalClass.getDeclaredMethod(methodName, classes); declaredMethod.setAccessible(true); return (R) declaredMethod.invoke(instance, values); }); } catch (InvocationTargetException e) { if (e.getTargetException() instanceof RuntimeException) { throw (RuntimeException) e.getTargetException(); } if (e.getTargetException() instanceof Error) { throw (Error) e.getTargetException(); } throw new RuntimeException(e.getTargetException()); } catch (Exception e) { throw new RuntimeException(e); } }
@Test public void callInstanceMethodReflectively_rethrowsUncheckedException() { ExampleDescendant example = new ExampleDescendant(); try { ReflectionHelpers.callInstanceMethod(example, "throwUncheckedException"); fail("Expected exception not thrown"); } catch (TestRuntimeException e) { } catch (RuntimeException e) { throw new RuntimeException("Incorrect exception thrown", e); } }
static AnnotatedClusterState generatedStateFrom(final Params params) { final ContentCluster cluster = params.cluster; final ClusterState workingState = ClusterState.emptyState(); final Map<Node, NodeStateReason> nodeStateReasons = new HashMap<>(); for (final NodeInfo nodeInfo : cluster.getNodeInfos()) { final NodeState nodeState = computeEffectiveNodeState(nodeInfo, params, nodeStateReasons); workingState.setNodeState(nodeInfo.getNode(), nodeState); } takeDownGroupsWithTooLowAvailability(workingState, nodeStateReasons, params); final Optional<ClusterStateReason> reasonToBeDown = clusterDownReason(workingState, params); if (reasonToBeDown.isPresent()) { workingState.setClusterState(State.DOWN); } workingState.setDistributionBits(inferDistributionBitCount(cluster, workingState, params)); return new AnnotatedClusterState(workingState, reasonToBeDown, nodeStateReasons); }
@Test void init_and_retired_counted_as_up_for_cluster_availability() { final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) .bringEntireClusterUp() .reportStorageNodeState(0, State.INITIALIZING) .proposeStorageNodeWantedState(1, State.RETIRED); // Any node being treated as down should take down the cluster here final ClusterStateGenerator.Params params = fixture.generatorParams().minStorageNodesUp(3); final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); assertThat(state.toString(), equalTo("distributor:3 storage:3 .0.s:i .0.i:1.0 .1.s:r")); }
public static DateTime parse(CharSequence dateStr, DateFormat dateFormat) { return new DateTime(dateStr, dateFormat); }
@Test public void parseUTCOffsetTest() { // issue#I437AP@Gitee String str = "2019-06-01T19:45:43+08:00"; DateTime dateTime = DateUtil.parse(str); assert dateTime != null; assertEquals("2019-06-01 19:45:43", dateTime.toString()); str = "2019-06-01T19:45:43 +08:00"; dateTime = DateUtil.parse(str); assert dateTime != null; assertEquals("2019-06-01 19:45:43", dateTime.toString()); }
public static <T> CommonPager<T> result(final PageParameter pageParameter, final Supplier<Integer> countSupplier, final Supplier<List<T>> listSupplier) { Integer count = countSupplier.get(); if (Objects.nonNull(count) && count > 0) { return new CommonPager<>(new PageParameter(pageParameter.getCurrentPage(), pageParameter.getPageSize(), count), listSupplier.get()); } return new CommonPager<>(new PageParameter(pageParameter.getCurrentPage(), pageParameter.getPageSize(), 0), Collections.emptyList()); }
@Test public void testResult() { final PageParameter pageParameter = new PageParameter(1, 10, 1); final CommonPager<String> result = PageResultUtils.result(pageParameter, () -> 1, () -> Collections.singletonList("result1")); assertEquals(result.getDataList().size(), 1); }
public static AwsCredentialsProvider create(boolean isCloud, @Nullable String stsRegion, @Nullable String accessKey, @Nullable String secretKey, @Nullable String assumeRoleArn) { AwsCredentialsProvider awsCredentials = isCloud ? getCloudAwsCredentialsProvider(accessKey, secretKey) : getAwsCredentialsProvider(accessKey, secretKey); // Apply the Assume Role ARN Authorization if specified. All AWSCredentialsProviders support this. if (!isNullOrEmpty(assumeRoleArn) && !isNullOrEmpty(stsRegion)) { LOG.debug("Creating cross account assume role credentials"); return buildStsCredentialsProvider(awsCredentials, stsRegion, assumeRoleArn, accessKey); } return awsCredentials; }
@Test public void testKeySecret() { final AwsCredentialsProvider awsCredentialsProvider = AWSAuthFactory.create(false, null, "key", "secret", null); assertThat(awsCredentialsProvider).isExactlyInstanceOf(StaticCredentialsProvider.class); assertThat("key").isEqualTo(awsCredentialsProvider.resolveCredentials().accessKeyId()); assertThat("secret").isEqualTo(awsCredentialsProvider.resolveCredentials().secretAccessKey()); }
public KinesisPermissionsResponse getPermissions() { final String setupPolicyString = policyAsJsonString(buildAwsSetupPolicy()); final String autoSetupPolicyString = policyAsJsonString(buildAwsAutoSetupPolicy()); return KinesisPermissionsResponse.create(setupPolicyString, autoSetupPolicyString); }
@Test public void testPermissions() { final KinesisPermissionsResponse permissions = awsService.getPermissions(); // Verify that the setup policy contains some needed permissions. assertTrue(permissions.setupPolicy().contains("cloudwatch")); assertTrue(permissions.setupPolicy().contains("dynamodb")); assertTrue(permissions.setupPolicy().contains("ec2")); assertTrue(permissions.setupPolicy().contains("elasticloadbalancing")); assertTrue(permissions.setupPolicy().contains("kinesis")); // Verify that the auto-setup policy contains some needed permissions. assertTrue(permissions.autoSetupPolicy().contains("CreateStream")); assertTrue(permissions.autoSetupPolicy().contains("DescribeSubscriptionFilters")); assertTrue(permissions.autoSetupPolicy().contains("PutRecord")); assertTrue(permissions.autoSetupPolicy().contains("RegisterStreamConsumer")); }
public T subtract(T other) { checkNotNull(other, "Cannot subtract null resources"); checkArgument(getClass() == other.getClass(), "Minus with different resource type"); checkArgument(name.equals(other.getName()), "Minus with different resource name"); checkArgument( value.compareTo(other.getValue()) >= 0, "Try to subtract a larger resource from this one."); return create(value.subtract(other.getValue())); }
@Test void testSubtract() { final Resource v1 = new TestResource(0.2); final Resource v2 = new TestResource(0.1); assertTestResourceValueEquals(0.1, v1.subtract(v2)); }
@Override public final Scoped onStateChange(Consumer<NodeState> listener) { // Wrap listeners in a reference of our own to guarantee uniqueness for listener references. AtomicReference<Consumer<NodeState>> listenerRef = new AtomicReference<>(listener); synchronized (mListeners) { Preconditions.checkState(mListeners.add(listenerRef), "listener already exists"); } return () -> { synchronized (mListeners) { mListeners.remove(listenerRef); } }; }
@Test(timeout = TIMEOUT) public void onStateChange() { AtomicInteger primaryCounter = new AtomicInteger(0); AtomicInteger standbyCounter = new AtomicInteger(0); Scoped listener = mSelector.onStateChange(state -> { if (state.equals(NodeState.PRIMARY)) { primaryCounter.incrementAndGet(); } else { standbyCounter.incrementAndGet(); } }); for (int i = 0; i < 10; i++) { mSelector.setState(NodeState.PRIMARY); mSelector.setState(NodeState.STANDBY); } assertEquals(10, primaryCounter.get()); assertEquals(10, standbyCounter.get()); listener.close(); mSelector.setState(NodeState.PRIMARY); mSelector.setState(NodeState.STANDBY); assertEquals(10, primaryCounter.get()); assertEquals(10, standbyCounter.get()); }
public static Pagination pageStartingAt(Integer offset, Integer total, Integer pageSize) { return new Pagination(offset, total, pageSize); }
@Test public void shouldNotCreatePaginationWithMoreThan300Records() { try { Pagination.pageStartingAt(0, 1000, 301); } catch (Exception e) { assertThat(e.getMessage(), is("The max number of perPage is [300].")); } }
public static byte[] bigIntegerToBytes(BigInteger b, int numBytes) { checkArgument(b.signum() >= 0, () -> "b must be positive or zero: " + b); checkArgument(numBytes > 0, () -> "numBytes must be positive: " + numBytes); byte[] src = b.toByteArray(); byte[] dest = new byte[numBytes]; boolean isFirstByteOnlyForSign = src[0] == 0; int length = isFirstByteOnlyForSign ? src.length - 1 : src.length; checkArgument(length <= numBytes, () -> "The given number does not fit in " + numBytes); int srcPos = isFirstByteOnlyForSign ? 1 : 0; int destPos = numBytes - length; System.arraycopy(src, srcPos, dest, destPos, length); return dest; }
@Test(expected = IllegalArgumentException.class) public void bigIntegerToBytes_convertWithNegativeLength() { BigInteger b = BigInteger.valueOf(10); ByteUtils.bigIntegerToBytes(b, -1); }
@Override public InputStream getInputStream() throws FileSystemException { return requireResolvedFileObject().getInputStream(); }
@Test public void testDelegatesGetInputStreamWithBufferSize() throws FileSystemException { InputStream inputStream = mock( InputStream.class ); when( resolvedFileObject.getInputStream( anyInt() ) ).thenReturn( inputStream ); assertSame( inputStream, fileObject.getInputStream( 10 ) ); }
@Override public long skip(long n) throws IOException { if (n <= 0) { return 0; } long toBeSkipped = Math.min(n, mLength - mPosition); if (!mUfsInStream.isPresent()) { mPosition += toBeSkipped; return toBeSkipped; } long skipped = mUfsInStream.get().skip(toBeSkipped); if (skipped > 0) { mPosition += skipped; } return skipped; }
@Test public void skip() throws IOException, AlluxioException { AlluxioURI ufsPath = getUfsPath(); createFile(ufsPath, CHUNK_SIZE); Random random = new Random(); try (FileInStream inStream = getStream(ufsPath)) { for (int i = 0; i < 10; i++) { if (inStream.remaining() <= 0) { break; } int skip = random.nextInt((int) inStream.remaining()); assertEquals(skip, inStream.skip(skip)); assertEquals(skip, inStream.getPos()); int len = CHUNK_SIZE - skip; byte[] res = new byte[len]; assertEquals(len, inStream.read(res, 0, len)); assertTrue(BufferUtils.equalIncreasingByteArray(skip, len, res)); } } }
@Override protected Release findActiveOne(long id, ApolloNotificationMessages clientMessages) { Tracer.logEvent(TRACER_EVENT_CACHE_GET_ID, String.valueOf(id)); return configIdCache.getUnchecked(id).orElse(null); }
@Test public void testFindActiveOneWithSameIdMultipleTimes() throws Exception { long someId = 1; when(releaseService.findActiveOne(someId)).thenReturn(someRelease); assertEquals(someRelease, configServiceWithCache.findActiveOne(someId, someNotificationMessages)); assertEquals(someRelease, configServiceWithCache.findActiveOne(someId, someNotificationMessages)); assertEquals(someRelease, configServiceWithCache.findActiveOne(someId, someNotificationMessages)); verify(releaseService, times(1)).findActiveOne(someId); }
public long getTimeout() { return timeout; }
@Test @DirtiesContext public void testCreateEndpointWithTimeout() throws Exception { long timeout = 1999999L; ExecEndpoint e = createExecEndpoint("exec:test?timeout=" + timeout); assertEquals(timeout, e.getTimeout()); }
public static Pair<String, String> labelKeyValuePair(String indexedLabelKey) { var idx = indexedLabelKey.indexOf('='); if (idx != -1) { return Pair.of(indexedLabelKey.substring(0, idx), indexedLabelKey.substring(idx + 1)); } throw new IllegalArgumentException("Invalid label key-value pair: " + indexedLabelKey); }
@Test void labelKeyValuePair() { var pair = LabelIndexSpecUtils.labelKeyValuePair("key=value"); assertThat(pair.getFirst()).isEqualTo("key"); assertThat(pair.getSecond()).isEqualTo("value"); pair = LabelIndexSpecUtils.labelKeyValuePair("key=value=1"); assertThat(pair.getFirst()).isEqualTo("key"); assertThat(pair.getSecond()).isEqualTo("value=1"); assertThatThrownBy(() -> LabelIndexSpecUtils.labelKeyValuePair("key")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Invalid label key-value pair: key"); }
@Override public AttributedList<Path> search(final Path workdir, final Filter<Path> regex, final ListProgressListener listener) throws BackgroundException { final AttributedList<Path> list = new AttributedList<>(); // avoid searching the "special" folders if users search from the account root if(workdir.getParent().isRoot()) { final Predicate<MantaObject> fastSearchPredicate = o -> session.isWorldReadable(o) || session.isUserWritable(o); final List<Path> homeFolderPaths = findObjectsAsPaths(workdir, fastSearchPredicate); cleanResults(homeFolderPaths, regex); addPaths(list, workdir, listener, homeFolderPaths); /* // disable search of system directories until we can provide incremental results // slowSearchPredicate will prevent us from looking at ~~/public and ~~/stor twice final Predicate<MantaObject> slowSearchPredicate = fastSearchPredicate.negate(); final List<Path> systemFolderObjects = findObjectsAsPaths(workdir, slowSearchPredicate.and(regexPredicate)); cleanResults(systemFolderObjects, regex); addPaths(list, workdir, listener, systemFolderObjects); */ } else { final List<Path> foundPaths = findObjectsAsPaths(workdir, null); cleanResults(foundPaths, regex); addPaths(list, workdir, listener, foundPaths); } return list; }
@Test public void testSearchNestedDirectory() throws Exception { Assume.assumeTrue(session.getClient().existsAndIsAccessible(testPathPrefix.getAbsolute())); final String newDirectoryName = new AlphanumericRandomStringService().random(); final String intermediateDirectoryName = new AlphanumericRandomStringService().random(); final String intermediateFileName = new AlphanumericRandomStringService().random(); final String nestedFileName = new AlphanumericRandomStringService().random(); final Path newDirectory = new Path(testPathPrefix, newDirectoryName, TYPE_DIRECTORY); final Path intermediateDirectory = new Path(newDirectory, intermediateDirectoryName, TYPE_DIRECTORY); new MantaDirectoryFeature(session).mkdir(newDirectory, null); new MantaDirectoryFeature(session).mkdir(intermediateDirectory, null); new MantaTouchFeature(session).touch(new Path(newDirectory, intermediateFileName, TYPE_FILE), null); new MantaTouchFeature(session).touch(new Path(intermediateDirectory, nestedFileName, TYPE_FILE), null); final MantaSearchFeature s = new MantaSearchFeature(session); final AttributedList<Path> search = s.search(newDirectory, new NullFilter<>(), new DisabledListProgressListener()); final Path foundIntermediateFile = search.find(f -> f.getName().equals(intermediateFileName) && f.getParent().getName().equals(newDirectoryName)); final Path foundNestedFile = search.find(f -> f.getName().equals(nestedFileName) && f.getParent().getName().equals(intermediateDirectoryName)); assertEquals(3, search.size()); assertNotNull(foundIntermediateFile); assertNotNull(foundNestedFile); final AttributedList<Path> filteredIntermediateFileSearch = s.search(newDirectory, new SearchFilter(intermediateFileName), new DisabledListProgressListener()); final Path foundFilteredIntermediateFile = search.find(f -> f.getName().equals(intermediateFileName) && f.getParent().getName().equals(newDirectoryName)); assertEquals(1, filteredIntermediateFileSearch.size()); assertNotNull(foundFilteredIntermediateFile); final AttributedList<Path> filteredNestedFileSearch = s.search(newDirectory, new Filter<Path>() { @Override public boolean accept(final Path file) { return file.isDirectory() || file.getName().matches(nestedFileName); } @Override public Pattern toPattern() { return Pattern.compile(nestedFileName); } }, new DisabledListProgressListener()); final Path foundFilteredNestedFile = search.find(f -> f.getName().equals(nestedFileName) && f.getParent().getName().equals(intermediateDirectoryName)); assertEquals(1, filteredNestedFileSearch.size()); assertNotNull(foundFilteredNestedFile); }
public FileSystem get(Key key) { synchronized (mLock) { Value value = mCacheMap.get(key); FileSystem fs; if (value == null) { // On cache miss, create and insert a new FileSystem instance, fs = FileSystem.Factory.create(FileSystemContext.create(key.mSubject, key.mConf)); mCacheMap.put(key, new Value(fs, 1)); } else { fs = value.mFileSystem; value.mRefCount.getAndIncrement(); } return new InstanceCachingFileSystem(fs, key); } }
@Test public void getTwiceThenClose2() throws IOException { Key key1 = createTestFSKey("user1"); FileSystem fs1 = mFileSystemCache.get(key1); FileSystem fs2 = mFileSystemCache.get(key1); assertSame(getDelegatedFileSystem(fs1), getDelegatedFileSystem(fs2)); fs1.close(); assertTrue(fs1.isClosed()); assertFalse(fs2.isClosed()); fs2.close(); assertTrue(fs1.isClosed()); assertTrue(fs2.isClosed()); }
@Override public byte[] serialize(final String topic, final List<?> data) { if (data == null) { return null; } try { final StringWriter stringWriter = new StringWriter(); final CSVPrinter csvPrinter = new CSVPrinter(stringWriter, csvFormat); csvPrinter.printRecord(() -> new FieldIterator(data, schema)); final String result = stringWriter.toString(); return result.substring(0, result.length() - 2).getBytes(StandardCharsets.UTF_8); } catch (final Exception e) { throw new SerializationException("Error serializing CSV message", e); } }
@Test public void shouldSerializeZeroDecimalWithPaddedZeros() { // Given: givenSingleColumnSerializer(SqlTypes.decimal(4, 2)); final List<?> values = Collections .singletonList(BigDecimal.ZERO.setScale(2, RoundingMode.UNNECESSARY)); // When: final byte[] bytes = serializer.serialize("", values); // Then: assertThat(new String(bytes, StandardCharsets.UTF_8), is("0.00")); }
@Udf public boolean check(@UdfParameter(description = "The input JSON string") final String input) { if (input == null) { return false; } try { return !UdfJsonMapper.parseJson(input).isMissingNode(); } catch (KsqlFunctionException e) { return false; } }
@Test public void shouldNotInterpretEmptyString() { assertFalse(udf.check("")); }
@Override public String evaluateWithArgs(final Map<String, Comparable<?>> map) { Closure<?> result = ((Closure<?>) evaluate("{it -> \"" + handlePlaceHolder(inlineExpression) + "\"}")).rehydrate(new Expando(), null, null); result.setResolveStrategy(Closure.DELEGATE_ONLY); map.forEach(result::setProperty); return result.call().toString(); }
@Test void assertEvaluateWithArgs() { assertThat(TypedSPILoader.getService(InlineExpressionParser.class, "GROOVY", PropertiesBuilder.build( new PropertiesBuilder.Property(InlineExpressionParser.INLINE_EXPRESSION_KEY, "${1+2}"))).evaluateWithArgs(new LinkedHashMap<>()), is("3")); }
public static <V> Read<V> read() { return new AutoValue_SparkReceiverIO_Read.Builder<V>().build(); }
@Test public void testReadObjectCreationFailsIfTimestampFnIsNull() { assertThrows( IllegalArgumentException.class, () -> SparkReceiverIO.<String>read().withTimestampFn(null)); }
public Set<Map.Entry<PropertyKey, Object>> entrySet() { return keySet().stream().map(key -> Maps.immutableEntry(key, get(key))).collect(toSet()); }
@Test public void entrySet() { Set<Map.Entry<? extends PropertyKey, Object>> expected = PropertyKey.defaultKeys().stream() .map(key -> Maps.immutableEntry(key, key.getDefaultValue())).collect(toSet()); assertThat(mProperties.entrySet(), is(expected)); mProperties.put(mKeyWithValue, "value", Source.RUNTIME); expected.add(Maps.immutableEntry(mKeyWithValue, "value")); assertThat(mProperties.entrySet(), is(expected)); }
public static List<BindAddress> validateBindAddresses(ServiceConfiguration config, Collection<String> schemes) { // migrate the existing configuration properties List<BindAddress> addresses = migrateBindAddresses(config); // parse the list of additional bind addresses Arrays .stream(StringUtils.split(StringUtils.defaultString(config.getBindAddresses()), ",")) .map(s -> { Matcher m = BIND_ADDRESSES_PATTERN.matcher(s); if (!m.matches()) { throw new IllegalArgumentException("bindAddresses: malformed: " + s); } return m; }) .map(m -> new BindAddress(m.group("name"), URI.create(m.group("url")))) .forEach(addresses::add); // apply the filter if (schemes != null) { addresses.removeIf(a -> !schemes.contains(a.getAddress().getScheme())); } return addresses; }
@Test public void testMigrationWithDefaults() { ServiceConfiguration config = new ServiceConfiguration(); List<BindAddress> addresses = BindAddressValidator.validateBindAddresses(config, null); assertEquals(Arrays.asList( new BindAddress(null, URI.create("pulsar://0.0.0.0:6650")), new BindAddress(null, URI.create("http://0.0.0.0:8080"))), addresses); }
@Override public void checkApiEndpoint(GithubAppConfiguration githubAppConfiguration) { if (StringUtils.isBlank(githubAppConfiguration.getApiEndpoint())) { throw new IllegalArgumentException("Missing URL"); } URI apiEndpoint; try { apiEndpoint = URI.create(githubAppConfiguration.getApiEndpoint()); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Invalid URL, " + e.getMessage()); } if (!"http".equalsIgnoreCase(apiEndpoint.getScheme()) && !"https".equalsIgnoreCase(apiEndpoint.getScheme())) { throw new IllegalArgumentException("Only http and https schemes are supported"); } else if (!"api.github.com".equalsIgnoreCase(apiEndpoint.getHost()) && !apiEndpoint.getPath().toLowerCase(Locale.ENGLISH).startsWith("/api/v3")) { throw new IllegalArgumentException("Invalid GitHub URL"); } }
@Test @UseDataProvider("validApiEndpoints") public void checkApiEndpoint(String url) { GithubAppConfiguration configuration = new GithubAppConfiguration(1L, "", url); assertThatCode(() -> underTest.checkApiEndpoint(configuration)).isNull(); }
public synchronized boolean deregister(String id) { assert !(Thread.currentThread() instanceof PartitionOperationThread); if (!id2InterceptorMap.containsKey(id)) { return false; } Map<String, MapInterceptor> tmpMap = new HashMap<>(id2InterceptorMap); MapInterceptor removedInterceptor = tmpMap.remove(id); id2InterceptorMap = unmodifiableMap(tmpMap); List<MapInterceptor> tmpInterceptors = new ArrayList<>(interceptors); tmpInterceptors.remove(removedInterceptor); interceptors = unmodifiableList(tmpInterceptors); return true; }
@Test public void testDeregister_whenInterceptorWasNotRegistered_thenDoNothing() { registry.deregister(interceptor.id); assertInterceptorRegistryContainsNotInterceptor(); }
@VisibleForTesting boolean prepareWorkingDir(FileSystem fs, Path workingDir) throws IOException { if (fs.exists(workingDir)) { if (force) { LOG.info("Existing Working Dir detected: -" + FORCE_OPTION + " specified -> recreating Working Dir"); fs.delete(workingDir, true); } else { LOG.info("Existing Working Dir detected: -" + FORCE_OPTION + " not specified -> exiting"); return false; } } fs.mkdirs(workingDir); fs.setPermission(workingDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true)); return true; }
@Test(timeout = 5000) public void testPrepareWorkingDir() throws Exception { Configuration conf = new Configuration(); HadoopArchiveLogs hal = new HadoopArchiveLogs(conf); FileSystem fs = FileSystem.getLocal(conf); Path workingDir = new Path("target", "testPrepareWorkingDir"); fs.delete(workingDir, true); Assert.assertFalse(fs.exists(workingDir)); // -force is false and the dir doesn't exist so it will create one hal.force = false; boolean dirPrepared = hal.prepareWorkingDir(fs, workingDir); Assert.assertTrue(dirPrepared); Assert.assertTrue(fs.exists(workingDir)); Assert.assertEquals( new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, !Shell.WINDOWS), fs.getFileStatus(workingDir).getPermission()); // Throw a file in the dir Path dummyFile = new Path(workingDir, "dummy.txt"); fs.createNewFile(dummyFile); Assert.assertTrue(fs.exists(dummyFile)); // -force is false and the dir exists, so nothing will happen and the dummy // still exists dirPrepared = hal.prepareWorkingDir(fs, workingDir); Assert.assertFalse(dirPrepared); Assert.assertTrue(fs.exists(workingDir)); Assert.assertTrue(fs.exists(dummyFile)); Assert.assertEquals( new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, !Shell.WINDOWS), fs.getFileStatus(workingDir).getPermission()); // -force is true and the dir exists, so it will recreate it and the dummy // won't exist anymore hal.force = true; dirPrepared = hal.prepareWorkingDir(fs, workingDir); Assert.assertTrue(dirPrepared); Assert.assertTrue(fs.exists(workingDir)); Assert.assertEquals( new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, !Shell.WINDOWS), fs.getFileStatus(workingDir).getPermission()); Assert.assertFalse(fs.exists(dummyFile)); }
@Override public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay readerWay, IntsRef relationFlags) { String roadClassTag = readerWay.getTag("highway"); if (roadClassTag == null) return; RoadClass roadClass = RoadClass.find(roadClassTag); if (roadClass == OTHER && roadClassTag.endsWith("_link")) roadClass = RoadClass.find(roadClassTag.substring(0, roadClassTag.length() - 5)); if (roadClass != OTHER) roadClassEnc.setEnum(false, edgeId, edgeIntAccess, roadClass); }
@Test public void testIgnore() { ReaderWay readerWay = new ReaderWay(1); EdgeIntAccess edgeIntAccess = new ArrayEdgeIntAccess(1); int edgeId = 0; readerWay.setTag("route", "ferry"); parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags); assertEquals(RoadClass.OTHER, rcEnc.getEnum(false, edgeId, edgeIntAccess)); }
@Override @SuppressWarnings("unchecked") public void upgrade() { if (clusterConfigService.get(MigrationCompleted.class) != null) { LOG.debug("Migration already done."); return; } final ImmutableSet.Builder<String> modifiedStreams = ImmutableSet.builder(); final ImmutableSet.Builder<String> modifiedAlertConditions = ImmutableSet.builder(); for (Document document : collection.find().sort(ascending(FIELD_CREATED_AT))) { final String streamId = document.getObjectId(FIELD_ID).toHexString(); if (!document.containsKey(FIELD_ALERT_CONDITIONS)) { continue; } final List<Document> alertConditions = (List<Document>) document.get(FIELD_ALERT_CONDITIONS); // Need to check if the following fields are integers: // // FieldContentValue: grace, backlog // FieldValue: grace, backlog, time, threshold // MessageCount: grace, backlog, time, threshold final Set<String> intFields = ImmutableSet.of("grace", "backlog", "time", "threshold"); for (Document alertCondition : alertConditions) { final String alertConditionId = alertCondition.get("id", String.class); final String alertConditionTitle = alertCondition.get("title", String.class); final Document parameters = alertCondition.get("parameters", Document.class); for (String field : intFields) { final Object fieldValue = parameters.get(field); // No need to convert anything if the field does not exist or is already an integer if (fieldValue == null || fieldValue instanceof Integer) { continue; } if (!(fieldValue instanceof String)) { LOG.warn("Field <{}> in alert condition <{}> ({}) of stream <{}> is not a string but a <{}>, not trying to convert it!", field, alertConditionId, alertConditionTitle, streamId, fieldValue.getClass().getCanonicalName()); continue; } final String stringValue = parameters.get(field, String.class); final Integer intValue = Ints.tryParse(stringValue); LOG.info("Converting value for field <{}> from string to integer in alert condition <{}> ({}) of stream <{}>", field, alertConditionId, alertConditionTitle, streamId); if (intValue == null) { LOG.error("Unable to parse \"{}\" into integer!", fieldValue); } final UpdateResult result = collection.updateOne(eq(FIELD_ALERT_CONDITIONS_ID, alertConditionId), set(ALERT_CONDITIONS_PARAMETERS_PREFIX + field, intValue)); // Use UpdateResult#getMatchedCount() instead of #getModifiedCount() to make it work on MongoDB 2.4 if (result.getMatchedCount() > 0) { modifiedStreams.add(streamId); modifiedAlertConditions.add(alertConditionId); } else { LOG.warn("No document modified for alert condition <{}> ({})", alertConditionId, alertConditionTitle); } } } } clusterConfigService.write(MigrationCompleted.create(modifiedStreams.build(), modifiedAlertConditions.build())); }
@Test @MongoDBFixtures("V20170110150100_FixAlertConditionsMigration.json") public void upgrade() throws Exception { // First check all types of the existing documents AlertConditionAssertions.assertThat(getAlertCondition("2fa6a415-ce0c-4a36-accc-dd9519eb06d9")) .hasParameter("backlog", 2) .hasParameter("grace", 1) .hasParameter("threshold_type", "MORE") .hasParameter("threshold", "5") .hasParameter("time", "1"); AlertConditionAssertions.assertThat(getAlertCondition("393fd8b2-9b17-42d3-86b0-6e55d0f5343a")) .hasParameter("backlog", 0) .hasParameter("field", "bar") .hasParameter("grace", "0") .hasParameter("value", "baz"); AlertConditionAssertions.assertThat(getAlertCondition("0e75404f-c0ee-40b0-8872-b1aec441ba1c")) .hasParameter("backlog", "0") .hasParameter("field", "foo") .hasParameter("grace", "0") .hasParameter("threshold_type", "HIGHER") .hasParameter("threshold", "0") .hasParameter("time", "5") .hasParameter("type", "MAX"); // Run the migration that should convert all affected fields to integers migration.upgrade(); // Check all types again AlertConditionAssertions.assertThat(getAlertCondition("2fa6a415-ce0c-4a36-accc-dd9519eb06d9")) .hasParameter("backlog", 2) .hasParameter("grace", 1) .hasParameter("threshold_type", "MORE") .hasParameter("threshold", 5) .hasParameter("time", 1); AlertConditionAssertions.assertThat(getAlertCondition("393fd8b2-9b17-42d3-86b0-6e55d0f5343a")) .hasParameter("backlog", 0) .hasParameter("field", "bar") .hasParameter("grace", 0) .hasParameter("value", "baz"); AlertConditionAssertions.assertThat(getAlertCondition("0e75404f-c0ee-40b0-8872-b1aec441ba1c")) .hasParameter("backlog", 0) .hasParameter("field", "foo") .hasParameter("grace", 0) .hasParameter("threshold_type", "HIGHER") .hasParameter("threshold", 0) .hasParameter("time", 5) .hasParameter("type", "MAX"); final MigrationCompleted migrationCompleted = clusterConfigService.get(MigrationCompleted.class); assertThat(migrationCompleted).isNotNull(); assertThat(migrationCompleted.streamIds()).containsOnly("58458e442f857c314491344e", "58458e442f857c314491345e"); assertThat(migrationCompleted.alertConditionIds()).containsOnly("2fa6a415-ce0c-4a36-accc-dd9519eb06d9", "393fd8b2-9b17-42d3-86b0-6e55d0f5343a", "0e75404f-c0ee-40b0-8872-b1aec441ba1c"); }
public String getString(@NotNull final String key, @Nullable final String defaultValue) { return System.getProperty(key, props.getProperty(key, defaultValue)); }
@Test public void testGetString_String_String() { String key = "key That Doesn't Exist"; String defaultValue = "blue bunny"; String expResult = "blue bunny"; String result = getSettings().getString(key); Assert.assertTrue(result == null); result = getSettings().getString(key, defaultValue); Assert.assertEquals(expResult, result); }
@Override public TypeSerializerSchemaCompatibility<T> resolveSchemaCompatibility( TypeSerializerSnapshot<T> oldSerializerSnapshot) { if (!(oldSerializerSnapshot instanceof AvroSerializerSnapshot)) { return TypeSerializerSchemaCompatibility.incompatible(); } AvroSerializerSnapshot<?> oldAvroSerializerSnapshot = (AvroSerializerSnapshot<?>) oldSerializerSnapshot; return resolveSchemaCompatibility(oldAvroSerializerSnapshot.schema, schema); }
@Test void aPojoIsCompatibleAfterARoundTrip() throws IOException { AvroSerializer<Pojo> serializer = new AvroSerializer<>(Pojo.class); AvroSerializerSnapshot<Pojo> restored = roundTrip(serializer.snapshotConfiguration()); assertThat(serializer.snapshotConfiguration().resolveSchemaCompatibility(restored)) .is(isCompatibleAsIs()); }
public static AfterProcessingTimeStateMachine pastFirstElementInPane() { return new AfterProcessingTimeStateMachine(IDENTITY); }
@Test public void testClear() throws Exception { SimpleTriggerStateMachineTester<IntervalWindow> tester = TriggerStateMachineTester.forTrigger( AfterProcessingTimeStateMachine.pastFirstElementInPane() .plusDelayOf(Duration.millis(5)), FixedWindows.of(Duration.millis(10))); tester.injectElements(1, 2, 3); IntervalWindow window = new IntervalWindow(new Instant(0), new Instant(10)); tester.clearState(window); tester.assertCleared(window); }
@Override public boolean tryAdd(double longitude, double latitude, V member) { return get(tryAddAsync(longitude, latitude, member)); }
@Test public void testTryAdd() { RGeo<String> geo = redisson.getGeo("test"); assertThat(geo.add(2.51, 3.12, "city1")).isEqualTo(1); assertThat(geo.tryAdd(2.5, 3.1, "city1")).isFalse(); assertThat(geo.tryAdd(2.12, 3.5, "city2")).isTrue(); }
@Override public RelativeRange apply(final Period period) { if (period != null) { return RelativeRange.Builder.builder() .from(period.withYears(0).withMonths(0).plusDays(period.getYears() * 365).plusDays(period.getMonths() * 30).toStandardSeconds().getSeconds()) .build(); } else { return null; } }
@Test void testSecondConversion() { final RelativeRange result = converter.apply(Period.seconds(5)); verifyResult(result, 5); }
public String getExample() { return ObjectUtils.isEmpty(example) ? xExample : example.toString(); }
@Test public void testGetExample() { assertEquals("shenyuExample", docParameter.getExample()); docParameter.setExample(""); final String example = docParameter.getExample(); assertEquals("shenyuXExample", docParameter.getExample()); }
public static String md5Hex(String string) { return compute(string, DigestObjectPools.MD5); }
@Test public void shouldComputeForAGivenStringUsingMD5() { String fingerprint = "Some String"; String digest = md5Hex(fingerprint); assertEquals(DigestUtils.md5Hex(fingerprint), digest); }
public SmppConfiguration getConfiguration() { return configuration; }
@Test public void emptyConstructorShouldSetTheSmppConfiguration() { assertNotNull(binding.getConfiguration()); }
public DMNContext populateContextWith(Map<String, Object> json) { for (Entry<String, Object> kv : json.entrySet()) { InputDataNode idn = model.getInputByName(kv.getKey()); if (idn != null) { processInputDataNode(kv, idn); } else { DecisionNode dn = model.getDecisionByName(kv.getKey()); if (dn != null) { processDecisionNode(kv, dn); } else { LOG.debug("The key {} was not a InputData nor a Decision to override, setting it as-is.", kv.getKey()); context.set(kv.getKey(), kv.getValue()); } } } return context; }
@Test void trafficViolationArbitraryFine() throws Exception { final DMNRuntime runtime = createRuntimeWithAdditionalResources("Traffic Violation.dmn", DMNRuntimeTypesTest.class); final DMNModel dmnModel = runtime.getModel("https://github.com/kiegroup/drools/kie-dmn/_A4BCA8B8-CF08-433F-93B2-A2598F19ECFF", "Traffic Violation"); assertThat(dmnModel).isNotNull(); assertThat(dmnModel.hasErrors()).as(DMNRuntimeUtil.formatMessages(dmnModel.getMessages())).isFalse(); DMNContext context = runtime.newContext(); final String JSON = "{\n" + " \"Driver\": {\n" + " \"Points\": 1\n" + " },\n" + " \"Violation\": {\n" + " \"Type\": \"speed\",\n" + " \"Actual Speed\": 100,\n" + " \"Speed Limit\": 100\n" + " },\n" + " \"Additional\": {\n" + // intentional additional object " \"Comment\": \"Totally arbitrarily object in context\"\n" + " },\n" + " \"Fine\": {\n" + // intentional overriding decision " \"Points\": 47,\n" + " \"Amount\": 9999\n" + " }\n" + "}"; new DynamicDMNContextBuilder(context, dmnModel).populateContextWith(readJSON(JSON)); assertTrafficViolationSuspendedCase(runtime, dmnModel, context); }
public void decode(ByteBuf buffer) { boolean last; int statusCode; while (true) { switch(state) { case READ_COMMON_HEADER: if (buffer.readableBytes() < SPDY_HEADER_SIZE) { return; } int frameOffset = buffer.readerIndex(); int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET; int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET; buffer.skipBytes(SPDY_HEADER_SIZE); boolean control = (buffer.getByte(frameOffset) & 0x80) != 0; int version; int type; if (control) { // Decode control frame common header version = getUnsignedShort(buffer, frameOffset) & 0x7FFF; type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET); streamId = 0; // Default to session Stream-ID } else { // Decode data frame common header version = spdyVersion; // Default to expected version type = SPDY_DATA_FRAME; streamId = getUnsignedInt(buffer, frameOffset); } flags = buffer.getByte(flagsOffset); length = getUnsignedMedium(buffer, lengthOffset); // Check version first then validity if (version != spdyVersion) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SPDY Version"); } else if (!isValidFrameHeader(streamId, type, flags, length)) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid Frame Error"); } else { state = getNextState(type, length); } break; case READ_DATA_FRAME: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0)); break; } // Generate data frames that do not exceed maxChunkSize int dataLength = Math.min(maxChunkSize, length); // Wait until entire frame is readable if (buffer.readableBytes() < dataLength) { return; } ByteBuf data = buffer.alloc().buffer(dataLength); data.writeBytes(buffer, dataLength); length -= dataLength; if (length == 0) { state = State.READ_COMMON_HEADER; } last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN); delegate.readDataFrame(streamId, last, data); break; case READ_SYN_STREAM_FRAME: if (buffer.readableBytes() < 10) { return; } int offset = buffer.readerIndex(); streamId = getUnsignedInt(buffer, offset); int associatedToStreamId = getUnsignedInt(buffer, offset + 4); byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07); last = hasFlag(flags, SPDY_FLAG_FIN); boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL); buffer.skipBytes(10); length -= 10; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_STREAM Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional); } break; case READ_SYN_REPLY_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_REPLY Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynReplyFrame(streamId, last); } break; case READ_RST_STREAM_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (streamId == 0 || statusCode == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid RST_STREAM Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readRstStreamFrame(streamId, statusCode); } break; case READ_SETTINGS_FRAME: if (buffer.readableBytes() < 4) { return; } boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR); numSettings = getUnsignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); length -= 4; // Validate frame length against number of entries. Each ID/Value entry is 8 bytes. if ((length & 0x07) != 0 || length >> 3 != numSettings) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SETTINGS Frame"); } else { state = State.READ_SETTING; delegate.readSettingsFrame(clear); } break; case READ_SETTING: if (numSettings == 0) { state = State.READ_COMMON_HEADER; delegate.readSettingsEnd(); break; } if (buffer.readableBytes() < 8) { return; } byte settingsFlags = buffer.getByte(buffer.readerIndex()); int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1); int value = getSignedInt(buffer, buffer.readerIndex() + 4); boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE); boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED); buffer.skipBytes(8); --numSettings; delegate.readSetting(id, value, persistValue, persisted); break; case READ_PING_FRAME: if (buffer.readableBytes() < 4) { return; } int pingId = getSignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); state = State.READ_COMMON_HEADER; delegate.readPingFrame(pingId); break; case READ_GOAWAY_FRAME: if (buffer.readableBytes() < 8) { return; } int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); state = State.READ_COMMON_HEADER; delegate.readGoAwayFrame(lastGoodStreamId, statusCode); break; case READ_HEADERS_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid HEADERS Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readHeadersFrame(streamId, last); } break; case READ_WINDOW_UPDATE_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (deltaWindowSize == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid WINDOW_UPDATE Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readWindowUpdateFrame(streamId, deltaWindowSize); } break; case READ_HEADER_BLOCK: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readHeaderBlockEnd(); break; } if (!buffer.isReadable()) { return; } int compressedBytes = Math.min(buffer.readableBytes(), length); ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes); headerBlock.writeBytes(buffer, compressedBytes); length -= compressedBytes; delegate.readHeaderBlock(headerBlock); break; case DISCARD_FRAME: int numBytes = Math.min(buffer.readableBytes(), length); buffer.skipBytes(numBytes); length -= numBytes; if (length == 0) { state = State.READ_COMMON_HEADER; break; } return; case FRAME_ERROR: buffer.skipBytes(buffer.readableBytes()); return; default: throw new Error("Shouldn't reach here."); } } }
@Test public void testSpdyDataFrame() throws Exception { int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; byte flags = 0; int length = 1024; ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); encodeDataFrameHeader(buf, streamId, flags, length); for (int i = 0; i < 256; i ++) { buf.writeInt(RANDOM.nextInt()); } decoder.decode(buf); verify(delegate).readDataFrame(streamId, false, buf.slice(SPDY_HEADER_SIZE, length)); assertFalse(buf.isReadable()); buf.release(); }
@Override public void replay( long offset, long producerId, short producerEpoch, CoordinatorRecord record ) throws RuntimeException { ApiMessageAndVersion key = record.key(); ApiMessageAndVersion value = record.value(); switch (key.version()) { case 0: case 1: offsetMetadataManager.replay( offset, producerId, (OffsetCommitKey) key.message(), (OffsetCommitValue) Utils.messageOrNull(value) ); break; case 2: groupMetadataManager.replay( (GroupMetadataKey) key.message(), (GroupMetadataValue) Utils.messageOrNull(value) ); break; case 3: groupMetadataManager.replay( (ConsumerGroupMetadataKey) key.message(), (ConsumerGroupMetadataValue) Utils.messageOrNull(value) ); break; case 4: groupMetadataManager.replay( (ConsumerGroupPartitionMetadataKey) key.message(), (ConsumerGroupPartitionMetadataValue) Utils.messageOrNull(value) ); break; case 5: groupMetadataManager.replay( (ConsumerGroupMemberMetadataKey) key.message(), (ConsumerGroupMemberMetadataValue) Utils.messageOrNull(value) ); break; case 6: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMetadataKey) key.message(), (ConsumerGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value) ); break; case 7: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMemberKey) key.message(), (ConsumerGroupTargetAssignmentMemberValue) Utils.messageOrNull(value) ); break; case 8: groupMetadataManager.replay( (ConsumerGroupCurrentMemberAssignmentKey) key.message(), (ConsumerGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value) ); break; case 9: groupMetadataManager.replay( (ShareGroupPartitionMetadataKey) key.message(), (ShareGroupPartitionMetadataValue) Utils.messageOrNull(value) ); break; case 10: groupMetadataManager.replay( (ShareGroupMemberMetadataKey) key.message(), (ShareGroupMemberMetadataValue) Utils.messageOrNull(value) ); break; case 11: groupMetadataManager.replay( (ShareGroupMetadataKey) key.message(), (ShareGroupMetadataValue) Utils.messageOrNull(value) ); break; case 12: groupMetadataManager.replay( (ShareGroupTargetAssignmentMetadataKey) key.message(), (ShareGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value) ); break; case 13: groupMetadataManager.replay( (ShareGroupTargetAssignmentMemberKey) key.message(), (ShareGroupTargetAssignmentMemberValue) Utils.messageOrNull(value) ); break; case 14: groupMetadataManager.replay( (ShareGroupCurrentMemberAssignmentKey) key.message(), (ShareGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value) ); break; default: throw new IllegalStateException("Received an unknown record type " + key.version() + " in " + record); } }
@Test public void testReplayConsumerGroupCurrentMemberAssignment() { GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class); OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class); CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class); CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class); GroupCoordinatorShard coordinator = new GroupCoordinatorShard( new LogContext(), groupMetadataManager, offsetMetadataManager, Time.SYSTEM, new MockCoordinatorTimer<>(Time.SYSTEM), mock(GroupCoordinatorConfig.class), coordinatorMetrics, metricsShard ); ConsumerGroupCurrentMemberAssignmentKey key = new ConsumerGroupCurrentMemberAssignmentKey(); ConsumerGroupCurrentMemberAssignmentValue value = new ConsumerGroupCurrentMemberAssignmentValue(); coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, new CoordinatorRecord( new ApiMessageAndVersion(key, (short) 8), new ApiMessageAndVersion(value, (short) 0) )); verify(groupMetadataManager, times(1)).replay(key, value); }