focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public synchronized int getEndOfBlockIndex() {
return endOfBlockIndex;
} | @Test
public void tesGgetEndOfBlockIndex() {
int expected = -1;
assertEquals(expected, instance.getEndOfBlockIndex(), "Unexpected initial value");
expected = 0;
instance.endOfBlockIndex = expected;
assertEquals(expected, instance.getEndOfBlockIndex());
expected = 5;
instance.endOfBlockIndex = expected;
assertEquals(expected, instance.getEndOfBlockIndex());
} |
public void createNewCodeDefinition(DbSession dbSession, String projectUuid, String mainBranchUuid,
String defaultBranchName, String newCodeDefinitionType, @Nullable String newCodeDefinitionValue) {
boolean isCommunityEdition = editionProvider.get().filter(EditionProvider.Edition.COMMUNITY::equals).isPresent();
NewCodePeriodType newCodePeriodType = parseNewCodeDefinitionType(newCodeDefinitionType);
NewCodePeriodDto dto = new NewCodePeriodDto();
dto.setType(newCodePeriodType);
dto.setProjectUuid(projectUuid);
if (isCommunityEdition) {
dto.setBranchUuid(mainBranchUuid);
}
getNewCodeDefinitionValueProjectCreation(newCodePeriodType, newCodeDefinitionValue, defaultBranchName).ifPresent(dto::setValue);
if (!CaycUtils.isNewCodePeriodCompliant(dto.getType(), dto.getValue())) {
throw new IllegalArgumentException("Failed to set the New Code Definition. The given value is not compatible with the Clean as You Code methodology. "
+ "Please refer to the documentation for compliant options.");
}
dbClient.newCodePeriodDao().insert(dbSession, dto);
} | @Test
public void createNewCodeDefinition_throw_IAE_if_days_is_invalid() {
assertThatThrownBy(() -> newCodeDefinitionResolver.createNewCodeDefinition(dbSession, DEFAULT_PROJECT_ID, MAIN_BRANCH_UUID, MAIN_BRANCH, NUMBER_OF_DAYS.name(), "unknown"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Failed to parse number of days: unknown");
} |
@Override
public List<OptExpression> transform(OptExpression input, OptimizerContext context) {
// will transform to topN
LogicalLimitOperator limit = (LogicalLimitOperator) input.getOp();
LogicalTopNOperator sort = (LogicalTopNOperator) input.getInputs().get(0).getOp();
long minLimit = limit.getLimit();
if (sort.hasLimit()) {
minLimit = Math.min(minLimit, sort.getLimit());
}
OptExpression result = new OptExpression(
new LogicalTopNOperator(sort.getOrderByElements(), limit.getLimit(), limit.getOffset()));
result.getInputs().addAll(input.getInputs().get(0).getInputs());
return Lists.newArrayList(result);
} | @Test
public void transform() {
OptExpression limit = new OptExpression(LogicalLimitOperator.init(10, 2));
OptExpression sort = new OptExpression(new LogicalTopNOperator(
Lists.newArrayList(new Ordering(new ColumnRefOperator(1, Type.INT, "name", true), false, false))));
limit.getInputs().add(sort);
MergeLimitWithSortRule rule = new MergeLimitWithSortRule();
List<OptExpression> list = rule.transform(limit, new OptimizerContext(new Memo(), new ColumnRefFactory()));
assertEquals(OperatorType.LOGICAL_TOPN, list.get(0).getOp().getOpType());
assertEquals(2, ((LogicalTopNOperator) list.get(0).getOp()).getOffset());
assertEquals(10, ((LogicalTopNOperator) list.get(0).getOp()).getLimit());
} |
@Override
public CompletableFuture<Map<String, BrokerLookupData>> filterAsync(Map<String, BrokerLookupData> brokers,
ServiceUnitId serviceUnit,
LoadManagerContext context) {
ServiceConfiguration conf = context.brokerConfiguration();
if (!conf.isPreferLaterVersions() || brokers.isEmpty()) {
return CompletableFuture.completedFuture(brokers);
}
Version latestVersion;
try {
latestVersion = getLatestVersionNumber(brokers);
if (log.isDebugEnabled()) {
log.debug("Latest broker version found was [{}]", latestVersion);
}
} catch (Exception ex) {
log.warn("Disabling PreferLaterVersions feature; reason: " + ex.getMessage());
return FutureUtil.failedFuture(
new BrokerFilterBadVersionException("Cannot determine newest broker version: " + ex.getMessage()));
}
int numBrokersLatestVersion = 0;
int numBrokersOlderVersion = 0;
Iterator<Map.Entry<String, BrokerLookupData>> brokerIterator = brokers.entrySet().iterator();
while (brokerIterator.hasNext()) {
Map.Entry<String, BrokerLookupData> next = brokerIterator.next();
String brokerId = next.getKey();
String version = next.getValue().brokerVersion();
Version brokerVersionVersion = Version.valueOf(version);
if (brokerVersionVersion.equals(latestVersion)) {
log.debug("Broker [{}] is running the latest version ([{}])", brokerId, version);
numBrokersLatestVersion++;
} else {
log.info("Broker [{}] is running an older version ([{}]); latest version is [{}]",
brokerId, version, latestVersion);
numBrokersOlderVersion++;
brokerIterator.remove();
}
}
if (numBrokersOlderVersion == 0) {
log.info("All {} brokers are running the latest version [{}]", numBrokersLatestVersion, latestVersion);
}
return CompletableFuture.completedFuture(brokers);
} | @Test
public void testDisabledFilter() throws BrokerFilterException, ExecutionException, InterruptedException {
LoadManagerContext context = getContext();
ServiceConfiguration configuration = new ServiceConfiguration();
configuration.setPreferLaterVersions(false);
doReturn(configuration).when(context).brokerConfiguration();
Map<String, BrokerLookupData> originalBrokers = Map.of(
"localhost:6650", getLookupData("2.10.0"),
"localhost:6651", getLookupData("2.10.1")
);
Map<String, BrokerLookupData> brokers = new HashMap<>(originalBrokers);
BrokerVersionFilter brokerVersionFilter = new BrokerVersionFilter();
Map<String, BrokerLookupData> result = brokerVersionFilter.filterAsync(brokers, null, context).get();
assertEquals(result, originalBrokers);
} |
@SuppressWarnings({"unchecked", "rawtypes"})
@Override
public GenericRow apply(
final GenericKey k,
final GenericRow rowValue,
final GenericRow aggRowValue
) {
final GenericRow result = GenericRow.fromList(aggRowValue.values());
for (int idx = 0; idx < nonAggColumnCount; idx++) {
result.set(idx, rowValue.get(idx));
}
for (int idx = nonAggColumnCount; idx < columnCount; idx++) {
final TableAggregationFunction function = aggregateFunctions.get(idx - nonAggColumnCount);
final Object argument = getCurrentValue(
rowValue,
function.getArgIndicesInValue(),
function::convertToInput
);
final Object previous = result.get(idx);
result.set(idx, function.undo(argument, previous));
}
return result;
} | @Test
public void shouldNotMutateParametersOnApply() {
// Given:
final GenericRow value = GenericRow.genericRow(1, 2L);
final GenericRow agg = GenericRow.genericRow(1, 2L, 3);
// When:
final GenericRow result = aggregator.apply(key, value, agg);
// Then:
assertThat(value, is(GenericRow.genericRow(1, 2L)));
assertThat(agg, is(GenericRow.genericRow(1, 2L, 3)));
assertThat("invalid test", result, is(not(GenericRow.genericRow(1, 2L, 3))));
} |
@Subscribe
public void onChatMessage(ChatMessage event)
{
if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM)
{
String message = Text.removeTags(event.getMessage());
Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message);
Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message);
Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message);
Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message);
Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message);
Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message);
Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message);
Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message);
Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message);
Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message);
Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message);
Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message);
Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message);
Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message);
Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message);
Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message);
if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE))
{
notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered");
}
else if (dodgyBreakMatcher.find())
{
notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust.");
updateDodgyNecklaceCharges(MAX_DODGY_CHARGES);
}
else if (dodgyCheckMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1)));
}
else if (dodgyProtectMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1)));
}
else if (amuletOfChemistryCheckMatcher.find())
{
updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1)));
}
else if (amuletOfChemistryUsedMatcher.find())
{
final String match = amuletOfChemistryUsedMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateAmuletOfChemistryCharges(charges);
}
else if (amuletOfChemistryBreakMatcher.find())
{
notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust.");
updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES);
}
else if (amuletOfBountyCheckMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1)));
}
else if (amuletOfBountyUsedMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1)));
}
else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT))
{
updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES);
}
else if (message.contains(BINDING_BREAK_TEXT))
{
notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT);
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1);
}
else if (bindingNecklaceUsedMatcher.find())
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
if (equipment.contains(ItemID.BINDING_NECKLACE))
{
updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1);
}
}
else if (bindingNecklaceCheckMatcher.find())
{
final String match = bindingNecklaceCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateBindingNecklaceCharges(charges);
}
else if (ringOfForgingCheckMatcher.find())
{
final String match = ringOfForgingCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateRingOfForgingCharges(charges);
}
else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player smelted with a Ring of Forging equipped.
if (equipment == null)
{
return;
}
if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1))
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES);
updateRingOfForgingCharges(charges);
}
}
else if (message.equals(RING_OF_FORGING_BREAK_TEXT))
{
notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted.");
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1);
}
else if (chronicleAddMatcher.find())
{
final String match = chronicleAddMatcher.group(1);
if (match.equals("one"))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match));
}
}
else if (chronicleUseAndCheckMatcher.find())
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1)));
}
else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0);
}
else if (message.equals(CHRONICLE_FULL_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000);
}
else if (slaughterActivateMatcher.find())
{
final String found = slaughterActivateMatcher.group(1);
if (found == null)
{
updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT);
}
else
{
updateBraceletOfSlaughterCharges(Integer.parseInt(found));
}
}
else if (slaughterCheckMatcher.find())
{
updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1)));
}
else if (expeditiousActivateMatcher.find())
{
final String found = expeditiousActivateMatcher.group(1);
if (found == null)
{
updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT);
}
else
{
updateExpeditiousBraceletCharges(Integer.parseInt(found));
}
}
else if (expeditiousCheckMatcher.find())
{
updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1)));
}
else if (bloodEssenceCheckMatcher.find())
{
updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1)));
}
else if (bloodEssenceExtractMatcher.find())
{
updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1)));
}
else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT))
{
updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES);
}
else if (braceletOfClayCheckMatcher.find())
{
updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1)));
}
else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN))
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player mined with a Bracelet of Clay equipped.
if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
// Charge is not used if only 1 inventory slot is available when mining in Prifddinas
boolean ignore = inventory != null
&& inventory.count() == 27
&& message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN);
if (!ignore)
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES);
updateBraceletOfClayCharges(charges);
}
}
}
else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT))
{
notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust");
updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES);
}
}
} | @Test
public void testExpeditiousCheck1()
{
ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", CHECK_EXPEDITIOUS_BRACELET_1, "", 0);
itemChargePlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration(ItemChargeConfig.GROUP, ItemChargeConfig.KEY_EXPEDITIOUS_BRACELET, 1);
} |
public static <T extends Message> ProtoCoder<T> of(Class<T> protoMessageClass) {
return new ProtoCoder<>(protoMessageClass, ImmutableSet.of());
} | @Test
public void encodeNullThrowsCoderException() throws Exception {
thrown.expect(CoderException.class);
thrown.expectMessage("cannot encode a null MessageA");
CoderUtils.encodeToBase64(ProtoCoder.of(MessageA.class), null);
} |
public boolean existsCreatedStep() {
return 0 != getCreatedStepCount();
} | @Test
public void testExistsCreatedStep() throws Exception {
WorkflowRuntimeOverview overview =
loadObject(
"fixtures/instances/sample-workflow-runtime-overview.json",
WorkflowRuntimeOverview.class);
assertTrue(overview.existsCreatedStep());
overview.setTotalStepCount(2);
assertTrue(overview.existsCreatedStep());
overview.setStepOverview(
singletonEnumMap(StepInstance.Status.NOT_CREATED, WorkflowStepStatusSummary.of(1)));
assertFalse(overview.existsCreatedStep());
overview.getStepOverview().put(StepInstance.Status.CREATED, WorkflowStepStatusSummary.of(0));
assertFalse(overview.existsCreatedStep());
} |
KafkaBasedLog<String, byte[]> setupAndCreateKafkaBasedLog(String topic, final WorkerConfig config) {
String clusterId = config.kafkaClusterId();
Map<String, Object> originals = config.originals();
Map<String, Object> producerProps = new HashMap<>(baseProducerProps);
producerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
Map<String, Object> consumerProps = new HashMap<>(originals);
consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
ConnectUtils.addMetricsContextProperties(consumerProps, config, clusterId);
if (config.exactlyOnceSourceEnabled()) {
ConnectUtils.ensureProperty(
consumerProps, ConsumerConfig.ISOLATION_LEVEL_CONFIG, IsolationLevel.READ_COMMITTED.toString(),
"for the worker's config topic consumer when exactly-once source support is enabled",
true
);
}
Map<String, Object> adminProps = new HashMap<>(originals);
ConnectUtils.addMetricsContextProperties(adminProps, config, clusterId);
adminProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
Map<String, Object> topicSettings = config instanceof DistributedConfig
? ((DistributedConfig) config).configStorageTopicSettings()
: Collections.emptyMap();
NewTopic topicDescription = TopicAdmin.defineTopic(topic)
.config(topicSettings) // first so that we override user-supplied settings as needed
.compacted()
.partitions(1)
.replicationFactor(config.getShort(DistributedConfig.CONFIG_STORAGE_REPLICATION_FACTOR_CONFIG))
.build();
return createKafkaBasedLog(topic, producerProps, consumerProps, new ConsumeCallback(), topicDescription, topicAdminSupplier, config, time);
} | @Test
public void testConsumerPropertiesOverrideUserSuppliedValuesWithExactlyOnceSourceEnabled() {
props.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "enabled");
props.put(ISOLATION_LEVEL_CONFIG, IsolationLevel.READ_UNCOMMITTED.toString());
createStore();
configStorage.setupAndCreateKafkaBasedLog(TOPIC, config);
verifyConfigure();
assertEquals(
IsolationLevel.READ_COMMITTED.toString(),
capturedConsumerProps.getValue().get(ISOLATION_LEVEL_CONFIG)
);
} |
@Override
@Transactional
public boolean checkForPreApproval(Long userId, Integer userType, String clientId, Collection<String> requestedScopes) {
// 第一步,基于 Client 的自动授权计算,如果 scopes 都在自动授权中,则返回 true 通过
OAuth2ClientDO clientDO = oauth2ClientService.validOAuthClientFromCache(clientId);
Assert.notNull(clientDO, "客户端不能为空"); // 防御性编程
if (CollUtil.containsAll(clientDO.getAutoApproveScopes(), requestedScopes)) {
// gh-877 - if all scopes are auto approved, approvals still need to be added to the approval store.
LocalDateTime expireTime = LocalDateTime.now().plusSeconds(TIMEOUT);
for (String scope : requestedScopes) {
saveApprove(userId, userType, clientId, scope, true, expireTime);
}
return true;
}
// 第二步,算上用户已经批准的授权。如果 scopes 都包含,则返回 true
List<OAuth2ApproveDO> approveDOs = getApproveList(userId, userType, clientId);
Set<String> scopes = convertSet(approveDOs, OAuth2ApproveDO::getScope,
OAuth2ApproveDO::getApproved); // 只保留未过期的 + 同意的
return CollUtil.containsAll(scopes, requestedScopes);
} | @Test
public void checkForPreApproval_clientAutoApprove() {
// 准备参数
Long userId = randomLongId();
Integer userType = randomEle(UserTypeEnum.values()).getValue();
String clientId = randomString();
List<String> requestedScopes = Lists.newArrayList("read");
// mock 方法
when(oauth2ClientService.validOAuthClientFromCache(eq(clientId)))
.thenReturn(randomPojo(OAuth2ClientDO.class).setAutoApproveScopes(requestedScopes));
// 调用
boolean success = oauth2ApproveService.checkForPreApproval(userId, userType,
clientId, requestedScopes);
// 断言
assertTrue(success);
List<OAuth2ApproveDO> result = oauth2ApproveMapper.selectList();
assertEquals(1, result.size());
assertEquals(userId, result.get(0).getUserId());
assertEquals(userType, result.get(0).getUserType());
assertEquals(clientId, result.get(0).getClientId());
assertEquals("read", result.get(0).getScope());
assertTrue(result.get(0).getApproved());
assertFalse(DateUtils.isExpired(result.get(0).getExpiresTime()));
} |
@Override
public void run() {
final Instant now = time.get();
try {
final Collection<PersistentQueryMetadata> queries = engine.getPersistentQueries();
final Optional<Double> saturation = queries.stream()
.collect(Collectors.groupingBy(PersistentQueryMetadata::getQueryApplicationId))
.entrySet()
.stream()
.map(e -> measure(now, e.getKey(), e.getValue()))
.max(PersistentQuerySaturationMetrics::compareSaturation)
.orElse(Optional.of(0.0));
saturation.ifPresent(s -> report(now, s));
final Set<String> appIds = queries.stream()
.map(PersistentQueryMetadata::getQueryApplicationId)
.collect(Collectors.toSet());
for (final String appId
: Sets.difference(new HashSet<>(perKafkaStreamsStats.keySet()), appIds)) {
perKafkaStreamsStats.get(appId).cleanup(reporter);
perKafkaStreamsStats.remove(appId);
}
} catch (final RuntimeException e) {
LOGGER.error("Error collecting saturation", e);
throw e;
}
} | @Test
public void shouldCleanupThreadMetric() {
// Given:
final Instant start = Instant.now();
when(clock.get()).thenReturn(start);
givenMetrics(kafkaStreams1)
.withThreadStartTime("t1", start)
.withBlockedTime("t1", Duration.ofMinutes(0));
collector.run();
when(clock.get()).thenReturn(start.plus(Duration.ofMinutes(2)));
givenMetrics(kafkaStreams1)
.withThreadStartTime("t2", start)
.withBlockedTime("t2", Duration.ofMinutes(0));
// When:
collector.run();
// Then:
verify(reporter).cleanup("query-thread-saturation", ImmutableMap.of("thread-id", "t1"));
} |
public synchronized TopologyDescription describe() {
return internalTopologyBuilder.describe();
} | @Test
public void timeWindowAnonymousStoreTypeMaterializedCountShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.windowedBy(TimeWindows.of(ofMillis(1)))
.count(Materialized.as(Materialized.StoreType.IN_MEMORY));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000003\n" +
" Processor: KSTREAM-AGGREGATE-0000000003 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000002])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(false));
} |
@Override
public PollResult poll(long currentTimeMs) {
return pollInternal(
prepareFetchRequests(),
this::handleFetchSuccess,
this::handleFetchFailure
);
} | @Test
public void testParseCorruptedRecord() throws Exception {
buildFetcher();
assignFromUser(singleton(tp0));
ByteBuffer buffer = ByteBuffer.allocate(1024);
DataOutputStream out = new DataOutputStream(new ByteBufferOutputStream(buffer));
byte magic = RecordBatch.MAGIC_VALUE_V1;
byte[] key = "foo".getBytes();
byte[] value = "baz".getBytes();
long offset = 0;
long timestamp = 500L;
int size = LegacyRecord.recordSize(magic, key.length, value.length);
byte attributes = LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME);
long crc = LegacyRecord.computeChecksum(magic, attributes, timestamp, key, value);
// write one valid record
out.writeLong(offset);
out.writeInt(size);
LegacyRecord.write(out, magic, crc, LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME), timestamp, key, value);
// and one invalid record (note the crc)
out.writeLong(offset + 1);
out.writeInt(size);
LegacyRecord.write(out, magic, crc + 1, LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME), timestamp, key, value);
// write one valid record
out.writeLong(offset + 2);
out.writeInt(size);
LegacyRecord.write(out, magic, crc, LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME), timestamp, key, value);
// Write a record whose size field is invalid.
out.writeLong(offset + 3);
out.writeInt(1);
// write one valid record
out.writeLong(offset + 4);
out.writeInt(size);
LegacyRecord.write(out, magic, crc, LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME), timestamp, key, value);
buffer.flip();
subscriptions.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(0, Optional.empty(), metadata.currentLeader(tp0)));
// normal fetch
assertEquals(1, sendFetches());
client.prepareResponse(fullFetchResponse(tidp0, MemoryRecords.readableRecords(buffer), Errors.NONE, 100L, 0));
networkClientDelegate.poll(time.timer(0));
// the first fetchRecords() should return the first valid message
assertEquals(1, fetchRecords().get(tp0).size());
assertEquals(1, subscriptions.position(tp0).offset);
ensureBlockOnRecord(1L);
seekAndConsumeRecord(buffer, 2L);
ensureBlockOnRecord(3L);
try {
// For a record that cannot be retrieved from the iterator, we cannot seek over it within the batch.
seekAndConsumeRecord(buffer, 4L);
fail("Should have thrown exception when fail to retrieve a record from iterator.");
} catch (KafkaException ke) {
// let it go
}
ensureBlockOnRecord(4L);
} |
public void reset() {
this.next = this.initial;
this.mandatoryStopMade = false;
} | @Test
public void basicTest() {
Clock mockClock = Clock.fixed(Instant.EPOCH, ZoneId.systemDefault());
Backoff backoff = new Backoff(5, TimeUnit.MILLISECONDS, 60, TimeUnit.SECONDS, 60, TimeUnit.SECONDS, mockClock);
assertTrue(checkExactAndDecrementTimer(backoff, 5));
assertTrue(withinTenPercentAndDecrementTimer(backoff, 10));
backoff.reset();
assertTrue(checkExactAndDecrementTimer(backoff, 5));
} |
public static byte[] fromHexString(final String values) {
return fromHexString(values, ":");
} | @Test(expected = NumberFormatException.class)
public void testFromHexStringError() {
String invalidStr = "00:00:00:00:00:00:ffff";
HexString.fromHexString(invalidStr);
fail("HexString.fromHexString() should have thrown a NumberFormatException");
} |
public static String getCurrentTimeStr() {
Calendar c = Calendar.getInstance();
c.setTimeInMillis(System.currentTimeMillis());
return DateFormatUtils.format(c.getTime(), YYYYMMMDDHHMMSS);
} | @Test
void testGetCurrentTimeStr() throws ParseException {
Date date1 = new Date(TimeUtils.getCurrentTime().getTime());
assertNotNull(date1.toString());
Date date2 = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").parse(TimeUtils.getCurrentTimeStr());
assertNotNull(date2.toString());
} |
@Override
@SuppressWarnings("unchecked")
public void init() throws ServiceException {
timersSize = getServiceConfig().getInt(CONF_TIMERS_SIZE, 10);
counterLock = new ReentrantLock();
timerLock = new ReentrantLock();
variableLock = new ReentrantLock();
samplerLock = new ReentrantLock();
Map<String, VariableHolder> jvmVariables = new ConcurrentHashMap<String, VariableHolder>();
counters = new ConcurrentHashMap<String, Map<String, AtomicLong>>();
timers = new ConcurrentHashMap<String, Map<String, Timer>>();
variables = new ConcurrentHashMap<String, Map<String, VariableHolder>>();
samplers = new ConcurrentHashMap<String, Map<String, Sampler>>();
samplersList = new ArrayList<Sampler>();
all = new LinkedHashMap<String, Map<String, ?>>();
all.put("os-env", System.getenv());
all.put("sys-props", (Map<String, ?>) (Map) System.getProperties());
all.put("jvm", jvmVariables);
all.put("counters", (Map) counters);
all.put("timers", (Map) timers);
all.put("variables", (Map) variables);
all.put("samplers", (Map) samplers);
jvmVariables.put("free.memory", new VariableHolder<Long>(new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return Runtime.getRuntime().freeMemory();
}
}));
jvmVariables.put("max.memory", new VariableHolder<Long>(new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return Runtime.getRuntime().maxMemory();
}
}));
jvmVariables.put("total.memory", new VariableHolder<Long>(new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return Runtime.getRuntime().totalMemory();
}
}));
} | @Test
public void sampler() throws Exception {
final long value[] = new long[1];
Instrumentation.Variable<Long> var = new Instrumentation.Variable<Long>() {
@Override
public Long getValue() {
return value[0];
}
};
InstrumentationService.Sampler sampler = new InstrumentationService.Sampler();
sampler.init(4, var);
assertEquals(sampler.getRate(), 0f, 0.0001);
sampler.sample();
assertEquals(sampler.getRate(), 0f, 0.0001);
value[0] = 1;
sampler.sample();
assertEquals(sampler.getRate(), (0d + 1) / 2, 0.0001);
value[0] = 2;
sampler.sample();
assertEquals(sampler.getRate(), (0d + 1 + 2) / 3, 0.0001);
value[0] = 3;
sampler.sample();
assertEquals(sampler.getRate(), (0d + 1 + 2 + 3) / 4, 0.0001);
value[0] = 4;
sampler.sample();
assertEquals(sampler.getRate(), (4d + 1 + 2 + 3) / 4, 0.0001);
JSONObject json = (JSONObject) new JSONParser().parse(sampler.toJSONString());
assertEquals(json.size(), 2);
assertEquals(json.get("sampler"), sampler.getRate());
assertEquals(json.get("size"), 4L);
StringWriter writer = new StringWriter();
sampler.writeJSONString(writer);
writer.close();
json = (JSONObject) new JSONParser().parse(writer.toString());
assertEquals(json.size(), 2);
assertEquals(json.get("sampler"), sampler.getRate());
assertEquals(json.get("size"), 4L);
} |
@Override
public boolean containsKey(K key) {
begin();
boolean result = transactionalMap.containsKey(key);
commit();
return result;
} | @Test
public void testContainsKey() {
map.put(23, "value-23");
assertTrue(adapter.containsKey(23));
assertFalse(adapter.containsKey(42));
} |
public static boolean isSecond(long ts) {
return (ts & SECOND_MASK) == 0;
} | @Test
public void testIsSecond() {
Assert.assertFalse(TimeUtils.isSecond(System.currentTimeMillis()));
Assert.assertTrue(TimeUtils.isSecond(System.currentTimeMillis() / 1000));
} |
@Override
public DescriptiveUrl toDownloadUrl(final Path file, final Sharee sharee, final Void options, final PasswordCallback callback) throws BackgroundException {
try {
if(log.isDebugEnabled()) {
log.debug(String.format("Create temporary link for %s", file));
}
// This link will expire in four hours and afterwards you will get 410 Gone.
final String link = new DbxUserFilesRequests(session.getClient(file)).getTemporaryLink(containerService.getKey(file)).getLink();
// Determine expiry time for URL
final Calendar expiry = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
expiry.add(Calendar.HOUR, 4);
return new DescriptiveUrl(URI.create(link), DescriptiveUrl.Type.signed,
MessageFormat.format(LocaleFactory.localizedString("{0} URL"), LocaleFactory.localizedString("Temporary", "S3"))
+ " (" + MessageFormat.format(LocaleFactory.localizedString("Expires {0}", "S3") + ")",
UserDateFormatterFactory.get().getMediumFormat(expiry.getTimeInMillis()))
);
}
catch(DbxException e) {
throw new DropboxExceptionMappingService().map(e);
}
} | @Test
public void testToUrl() throws Exception {
final DropboxTemporaryUrlProvider provider = new DropboxTemporaryUrlProvider(session);
final Path file = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new DropboxTouchFeature(session).touch(file, new TransferStatus());
assertNotNull(provider.toDownloadUrl(file, Share.Sharee.world, null, new DisabledPasswordCallback()).getUrl());
new DropboxDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public static <T> T readJsonSR(
@Nonnull final byte[] jsonWithMagic,
final ObjectMapper mapper,
final Class<? extends T> clazz
) throws IOException {
if (!hasMagicByte(jsonWithMagic)) {
// don't log contents of jsonWithMagic to avoid leaking data into the logs
throw new KsqlException(
"Got unexpected JSON serialization format that did not start with the magic byte. If "
+ "this stream was not serialized using the JsonSchemaConverter, then make sure "
+ "the stream is declared with JSON format (not JSON_SR).");
}
return mapper.readValue(
jsonWithMagic,
SIZE_OF_SR_PREFIX,
jsonWithMagic.length - SIZE_OF_SR_PREFIX,
clazz
);
} | @Test()
public void shouldThrowOnStandardJsonConversion() {
// Given:
byte[] json = new byte[]{/* data */ 0x01};
// When:
final Exception e = assertThrows(
Exception.class,
() -> JsonSerdeUtils.readJsonSR(json, mapper, Object.class)
);
// Then:
assertThat(e.getMessage(), containsString(
"Got unexpected JSON serialization format that did not start with the magic byte"));
} |
public SaslExtensions extensions() {
return saslExtensions;
} | @Test
public void testNoExtensionsFromTokenAndNullExtensions() throws Exception {
OAuthBearerClientInitialResponse response = new OAuthBearerClientInitialResponse("token", null);
assertTrue(response.extensions().map().isEmpty());
} |
@Override
public PageResult<MailAccountDO> getMailAccountPage(MailAccountPageReqVO pageReqVO) {
return mailAccountMapper.selectPage(pageReqVO);
} | @Test
public void testGetMailAccountPage() {
// mock 数据
MailAccountDO dbMailAccount = randomPojo(MailAccountDO.class, o -> { // 等会查询到
o.setMail("768@qq.com");
o.setUsername("yunai");
});
mailAccountMapper.insert(dbMailAccount);
// 测试 mail 不匹配
mailAccountMapper.insert(cloneIgnoreId(dbMailAccount, o -> o.setMail("788@qq.com")));
// 测试 username 不匹配
mailAccountMapper.insert(cloneIgnoreId(dbMailAccount, o -> o.setUsername("tudou")));
// 准备参数
MailAccountPageReqVO reqVO = new MailAccountPageReqVO();
reqVO.setMail("768");
reqVO.setUsername("yu");
// 调用
PageResult<MailAccountDO> pageResult = mailAccountService.getMailAccountPage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbMailAccount, pageResult.getList().get(0));
} |
@Override
public int delete(String id) {
return this.coll.removeById(id).getN();
} | @Test
public void deleteThrowsIllegalArgumentExceptionForInvalidObjectId() {
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("state should be: hexString has 24 characters");
decoratorService.delete("NOPE");
} |
@Override
public void connectToResourceManager(ResourceManagerGateway resourceManagerGateway) {
assertHasBeenStarted();
resourceRequirementServiceConnectionManager.connect(
resourceRequirements ->
resourceManagerGateway.declareRequiredResources(
jobMasterId, resourceRequirements, rpcTimeout));
declareResourceRequirements(declarativeSlotPool.getResourceRequirements());
} | @Test
void testConnectToResourceManagerDeclaresRequiredResources() throws Exception {
final Collection<ResourceRequirement> requiredResources =
Arrays.asList(
ResourceRequirement.create(ResourceProfile.UNKNOWN, 2),
ResourceRequirement.create(ResourceProfile.ZERO, 4));
try (DeclarativeSlotPoolService declarativeSlotPoolService =
createDeclarativeSlotPoolService(
new TestingDeclarativeSlotPoolFactory(
new TestingDeclarativeSlotPoolBuilder()
.setGetResourceRequirementsSupplier(
() -> requiredResources)))) {
final TestingResourceManagerGateway resourceManagerGateway =
new TestingResourceManagerGateway();
final CompletableFuture<ResourceRequirements> declaredResourceRequirements =
new CompletableFuture<>();
resourceManagerGateway.setDeclareRequiredResourcesFunction(
(jobMasterId, resourceRequirements) -> {
declaredResourceRequirements.complete(resourceRequirements);
return CompletableFuture.completedFuture(Acknowledge.get());
});
declarativeSlotPoolService.connectToResourceManager(resourceManagerGateway);
final ResourceRequirements resourceRequirements = declaredResourceRequirements.join();
assertThat(resourceRequirements.getResourceRequirements()).isEqualTo(requiredResources);
assertThat(resourceRequirements.getJobId()).isEqualTo(jobId);
assertThat(resourceRequirements.getTargetAddress()).isEqualTo(address);
}
} |
public static ShowResultSet execute(ShowStmt statement, ConnectContext context) {
return GlobalStateMgr.getCurrentState().getShowExecutor().showExecutorVisitor.visit(statement, context);
} | @Test
public void testShowDb() throws AnalysisException, DdlException {
ctx.setCurrentUserIdentity(UserIdentity.ROOT);
ctx.setCurrentRoleIds(Sets.newHashSet(PrivilegeBuiltinConstants.ROOT_ROLE_ID));
ShowDbStmt stmt = new ShowDbStmt(null);
ShowResultSet resultSet = ShowExecutor.execute(stmt, ctx);
Assert.assertTrue(resultSet.next());
Assert.assertEquals("Database", resultSet.getMetaData().getColumn(0).getName());
Assert.assertEquals(resultSet.getResultRows().get(0).get(0), "testDb");
} |
@Override
public Path move(final Path source, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) throws BackgroundException {
Path target;
if(source.attributes().getCustom().containsKey(KEY_DELETE_MARKER)) {
// Delete marker, copy not supported but we have to retain the delete marker at the target
target = new Path(renamed);
target.attributes().setVersionId(null);
delete.delete(Collections.singletonMap(target, status), connectionCallback, callback);
try {
// Find version id of moved delete marker
final Path bucket = containerService.getContainer(renamed);
final VersionOrDeleteMarkersChunk marker = session.getClient().listVersionedObjectsChunked(
bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(renamed),
String.valueOf(Path.DELIMITER), 1, null, null, false);
if(marker.getItems().length == 1) {
final BaseVersionOrDeleteMarker markerObject = marker.getItems()[0];
target.attributes().withVersionId(markerObject.getVersionId()).setCustom(Collections.singletonMap(KEY_DELETE_MARKER, Boolean.TRUE.toString()));
delete.delete(Collections.singletonMap(source, status), connectionCallback, callback);
}
else {
throw new NotfoundException(String.format("Unable to find delete marker %s", renamed.getName()));
}
}
catch(ServiceException e) {
throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, renamed);
}
}
else {
try {
target = proxy.copy(source, renamed, status.withLength(source.attributes().getSize()), connectionCallback, new DisabledStreamListener());
// Copy source path and nullify version id to add a delete marker
delete.delete(Collections.singletonMap(new Path(source).withAttributes(new PathAttributes(source.attributes()).withVersionId(null)), status),
connectionCallback, callback);
}
catch(NotfoundException e) {
if(source.getType().contains(Path.Type.placeholder)) {
// No placeholder object to copy, create a new one at the target
target = session.getFeature(Directory.class).mkdir(renamed, new TransferStatus().withRegion(source.attributes().getRegion()));
}
else {
throw e;
}
}
}
return target;
} | @Test
public void testMoveWithServerSideEncryptionBucketPolicy() throws Exception {
final Path container = new Path("sse-test-us-east-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path test = new Path(container, new AsciiRandomStringService().random(), EnumSet.of(Path.Type.file));
final S3AccessControlListFeature acl = new S3AccessControlListFeature(session);
final S3TouchFeature touch = new S3TouchFeature(session, acl);
final TransferStatus status = new TransferStatus();
status.setEncryption(S3EncryptionFeature.SSE_AES256);
touch.touch(test, status);
assertTrue(new S3FindFeature(session, acl).find(test));
final Path renamed = new Path(container, new AsciiRandomStringService().random(), EnumSet.of(Path.Type.file));
new S3MoveFeature(session, acl).move(test, renamed, new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback());
assertFalse(new S3FindFeature(session, acl).find(test));
assertTrue(new S3FindFeature(session, acl).find(renamed));
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(renamed), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Override
public ParDoFn create(
PipelineOptions options,
CloudObject cloudUserFn,
@Nullable List<SideInputInfo> sideInputInfos,
TupleTag<?> mainOutputTag,
Map<TupleTag<?>, Integer> outputTupleTagsToReceiverIndices,
DataflowExecutionContext<?> executionContext,
DataflowOperationContext operationContext)
throws Exception {
DoFnInstanceManager instanceManager =
fnCache.get(
operationContext.nameContext().systemName(),
() ->
DoFnInstanceManagers.cloningPool(doFnExtractor.getDoFnInfo(cloudUserFn), options));
DoFnInfo<?, ?> doFnInfo = instanceManager.peek();
DataflowExecutionContext.DataflowStepContext stepContext =
executionContext.getStepContext(operationContext);
Iterable<PCollectionView<?>> sideInputViews = doFnInfo.getSideInputViews();
SideInputReader sideInputReader =
executionContext.getSideInputReader(sideInputInfos, sideInputViews, operationContext);
if (doFnInfo.getDoFn() instanceof BatchStatefulParDoOverrides.BatchStatefulDoFn) {
// HACK: BatchStatefulDoFn is a class from DataflowRunner's overrides
// that just instructs the worker to execute it differently. This will
// be replaced by metadata in the Runner API payload
BatchStatefulParDoOverrides.BatchStatefulDoFn fn =
(BatchStatefulParDoOverrides.BatchStatefulDoFn) doFnInfo.getDoFn();
DoFn underlyingFn = fn.getUnderlyingDoFn();
return new BatchModeUngroupingParDoFn(
(BatchModeExecutionContext.StepContext) stepContext,
new SimpleParDoFn(
options,
DoFnInstanceManagers.singleInstance(doFnInfo.withFn(underlyingFn)),
sideInputReader,
doFnInfo.getMainOutput(),
outputTupleTagsToReceiverIndices,
stepContext,
operationContext,
doFnInfo.getDoFnSchemaInformation(),
doFnInfo.getSideInputMapping(),
runnerFactory));
} else if (doFnInfo.getDoFn() instanceof StreamingPCollectionViewWriterFn) {
// HACK: StreamingPCollectionViewWriterFn is a class from
// DataflowPipelineTranslator. Using the class as an indicator is a migration path
// to simply having an indicator string.
checkArgument(
stepContext instanceof StreamingModeExecutionContext.StreamingModeStepContext,
"stepContext must be a StreamingModeStepContext to use StreamingPCollectionViewWriterFn");
DataflowRunner.StreamingPCollectionViewWriterFn<Object> writerFn =
(StreamingPCollectionViewWriterFn<Object>) doFnInfo.getDoFn();
return new StreamingPCollectionViewWriterParDoFn(
(StreamingModeExecutionContext.StreamingModeStepContext) stepContext,
writerFn.getView().getTagInternal(),
writerFn.getDataCoder(),
(Coder<BoundedWindow>) doFnInfo.getWindowingStrategy().getWindowFn().windowCoder());
} else {
return new SimpleParDoFn(
options,
instanceManager,
sideInputReader,
doFnInfo.getMainOutput(),
outputTupleTagsToReceiverIndices,
stepContext,
operationContext,
doFnInfo.getDoFnSchemaInformation(),
doFnInfo.getSideInputMapping(),
runnerFactory);
}
} | @Test
public void testCleanupRegistered() throws Exception {
PipelineOptions options = PipelineOptionsFactory.create();
CounterSet counters = new CounterSet();
DoFn<?, ?> initialFn = new TestStatefulDoFn();
CloudObject cloudObject =
getCloudObject(
initialFn,
WindowingStrategy.globalDefault().withWindowFn(FixedWindows.of(Duration.millis(10))));
TimerInternals timerInternals = mock(TimerInternals.class);
DataflowStepContext stepContext = mock(DataflowStepContext.class);
when(stepContext.timerInternals()).thenReturn(timerInternals);
DataflowExecutionContext<DataflowStepContext> executionContext =
mock(DataflowExecutionContext.class);
TestOperationContext operationContext = TestOperationContext.create(counters);
when(executionContext.getStepContext(operationContext)).thenReturn(stepContext);
when(executionContext.getSideInputReader(any(), any(), any()))
.thenReturn(NullSideInputReader.empty());
ParDoFn parDoFn =
factory.create(
options,
cloudObject,
Collections.emptyList(),
MAIN_OUTPUT,
ImmutableMap.of(MAIN_OUTPUT, 0),
executionContext,
operationContext);
Receiver rcvr = new OutputReceiver();
parDoFn.startBundle(rcvr);
IntervalWindow firstWindow = new IntervalWindow(new Instant(0), new Instant(10));
parDoFn.processElement(
WindowedValue.of("foo", new Instant(1), firstWindow, PaneInfo.NO_FIRING));
verify(stepContext)
.setStateCleanupTimer(
SimpleParDoFn.CLEANUP_TIMER_ID,
firstWindow,
IntervalWindow.getCoder(),
firstWindow.maxTimestamp().plus(Duration.millis(1L)),
firstWindow.maxTimestamp().plus(Duration.millis(1L)));
} |
@Operation(description = "Return openId configuration")
@GetMapping(value = "/jwks", produces = MediaType.APPLICATION_JSON_VALUE)
@ResponseBody
public Map<String, Object> jwks() {
return Map.of("keys", Arrays.asList(provider.generateJWK()));
} | @Test
void jwks() {
var response = controller.jwks();
List list = (List) response.get("keys");
Map<String, String> key = (Map<String,String>) list.get(0);
assertEquals(1, list.size());
assertEquals("RSA", key.get("kty"));
assertEquals("sig", key.get("use"));
assertEquals("AQAB", key.get("e"));
assertEquals("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", key.get("n"));
} |
@Override
public List<String> extractPartitionValuesInPath(String partitionPath) {
// If the partitionPath is empty string( which means none-partition table), the partition values
// should be empty list.
if (partitionPath.isEmpty()) {
return Collections.emptyList();
}
String[] splits = partitionPath.split("/");
return Arrays.stream(splits).map(s -> {
if (s.contains("=")) {
String[] moreSplit = s.split("=");
ValidationUtils.checkArgument(moreSplit.length == 2, "Partition Field (" + s + ") not in expected format");
return moreSplit[1];
}
return s;
}).collect(Collectors.toList());
} | @Test
public void testMultiPartExtractor() {
MultiPartKeysValueExtractor valueExtractor = new MultiPartKeysValueExtractor();
// Test extract empty partitionPath
assertEquals(new ArrayList<>(), valueExtractor.extractPartitionValuesInPath(""));
List<String> expected = new ArrayList<>();
expected.add("2021-04-25");
expected.add("04");
// Test extract multi-partition path
assertEquals(expected, valueExtractor.extractPartitionValuesInPath("2021-04-25/04"));
// Test extract hive style partition path
assertEquals(expected, valueExtractor.extractPartitionValuesInPath("ds=2021-04-25/hh=04"));
} |
@Transient
public String getFlinkTableWith(String flinkConfig) {
if (Asserts.isNotNullString(flinkConfig)) {
Map<String, String> replacements = new HashMap<>();
replacements.put("schemaName", schema);
replacements.put("tableName", name);
return SqlUtil.replaceAllParam(flinkConfig, replacements);
}
return "";
} | @Test
void getFlinkTableWith() {
String result = table.getFlinkTableWith(flinkConfig);
assertThat(
result,
equalTo("SchemaOrigin=schemaName, TableNameOrigin=tableName, #{abc}=abc, #{}=null, " + "bcd=bcd"));
} |
@GetMapping("/addVGroup")
public Result<?> addVGroup(@RequestParam String vGroup, @RequestParam String unit) {
Result<?> result = new Result<>();
MappingDO mappingDO = new MappingDO();
mappingDO.setNamespace(Instance.getInstance().getNamespace());
mappingDO.setCluster(Instance.getInstance().getClusterName());
mappingDO.setUnit(unit);
mappingDO.setVGroup(vGroup);
boolean rst = vGroupMappingStoreManager.addVGroup(mappingDO);
Instance.getInstance().setTerm(System.currentTimeMillis());
if (!rst) {
result.setCode("500");
result.setMessage("add vGroup failed!");
}
// push the newest mapping relationship
vGroupMappingStoreManager.notifyMapping();
return result;
} | @Test
void addVGroup() {
namingController.addVGroup("group1","unit1");
} |
public static boolean safeCollectionEquals(final Collection<Comparable<?>> sources, final Collection<Comparable<?>> targets) {
List<Comparable<?>> all = new ArrayList<>(sources);
all.addAll(targets);
Optional<Class<?>> clazz = getTargetNumericType(all);
if (!clazz.isPresent()) {
return sources.equals(targets);
}
List<Comparable<?>> sourceClasses = sources.stream().map(each -> parseNumberByClazz(each.toString(), clazz.get())).collect(Collectors.toList());
List<Comparable<?>> targetClasses = targets.stream().map(each -> parseNumberByClazz(each.toString(), clazz.get())).collect(Collectors.toList());
return sourceClasses.equals(targetClasses);
} | @Test
void assertSafeCollectionEqualsForFloat() {
List<Comparable<?>> sources = Arrays.asList(10.01F, 12.01F);
List<Comparable<?>> targets = Arrays.asList(10.01F, 12.01F);
assertTrue(SafeNumberOperationUtils.safeCollectionEquals(sources, targets));
} |
@Override
public Map<String, String> apply(ServerWebExchange exchange) {
StainingRule stainingRule = stainingRuleManager.getStainingRule();
if (stainingRule == null) {
return Collections.emptyMap();
}
return ruleStainingExecutor.execute(exchange, stainingRule);
} | @Test
public void testNoStainingRule() {
RuleStainingProperties ruleStainingProperties = new RuleStainingProperties();
ruleStainingProperties.setNamespace(testNamespace);
ruleStainingProperties.setGroup(testGroup);
ruleStainingProperties.setFileName(testFileName);
ConfigFile configFile = Mockito.mock(ConfigFile.class);
when(configFile.getContent()).thenReturn("");
when(configFileService.getConfigFile(testNamespace, testGroup, testFileName)).thenReturn(configFile);
StainingRuleManager stainingRuleManager = new StainingRuleManager(ruleStainingProperties, configFileService);
RuleStainingExecutor ruleStainingExecutor = new RuleStainingExecutor();
RuleTrafficStainer ruleTrafficStainer = new RuleTrafficStainer(stainingRuleManager, ruleStainingExecutor);
Map<String, String> map = ruleTrafficStainer.apply(null);
assertThat(map).isEmpty();
} |
static void createCompactedTopic(String topicName, short partitions, short replicationFactor, Admin admin) {
NewTopic topicDescription = TopicAdmin.defineTopic(topicName).
compacted().
partitions(partitions).
replicationFactor(replicationFactor).
build();
CreateTopicsOptions args = new CreateTopicsOptions().validateOnly(false);
try {
admin.createTopics(singleton(topicDescription), args).values().get(topicName).get();
log.info("Created topic '{}'", topicName);
} catch (InterruptedException e) {
Thread.interrupted();
throw new ConnectException("Interrupted while attempting to create/find topic '" + topicName + "'", e);
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof TopicExistsException) {
log.debug("Unable to create topic '{}' since it already exists.", topicName);
return;
}
if (cause instanceof UnsupportedVersionException) {
log.debug("Unable to create topic '{}' since the brokers do not support the CreateTopics API." +
" Falling back to assume topic exists or will be auto-created by the broker.",
topicName);
return;
}
if (cause instanceof TopicAuthorizationException) {
log.debug("Not authorized to create topic(s) '{}' upon the brokers." +
" Falling back to assume topic(s) exist or will be auto-created by the broker.",
topicName);
return;
}
if (cause instanceof ClusterAuthorizationException) {
log.debug("Not authorized to create topic '{}'." +
" Falling back to assume topic exists or will be auto-created by the broker.",
topicName);
return;
}
if (cause instanceof InvalidConfigurationException) {
throw new ConnectException("Unable to create topic '" + topicName + "': " + cause.getMessage(),
cause);
}
if (cause instanceof TimeoutException) {
// Timed out waiting for the operation to complete
throw new ConnectException("Timed out while checking for or creating topic '" + topicName + "'." +
" This could indicate a connectivity issue, unavailable topic partitions, or if" +
" this is your first use of the topic it may have taken too long to create.", cause);
}
throw new ConnectException("Error while attempting to create/find topic '" + topicName + "'", e);
}
} | @Test
public void testCreateCompactedTopicAssumeTopicAlreadyExistsWithClusterAuthorizationException() throws Exception {
Map<String, KafkaFuture<Void>> values = Collections.singletonMap(TOPIC, future);
when(future.get()).thenThrow(new ExecutionException(new ClusterAuthorizationException("not authorised")));
when(ctr.values()).thenReturn(values);
when(admin.createTopics(any(), any())).thenReturn(ctr);
MirrorUtils.createCompactedTopic(TOPIC, (short) 1, (short) 1, admin);
verify(future).get();
verify(ctr).values();
verify(admin).createTopics(any(), any());
} |
static String prettyPrintException(Throwable throwable) {
if (throwable == null)
return "Null exception.";
if (throwable.getMessage() != null) {
return throwable.getClass().getSimpleName() + ": " + throwable.getMessage();
}
return throwable.getClass().getSimpleName();
} | @Test
public void testPrettyPrintException() {
assertEquals("Null exception.", KafkaAdminClient.prettyPrintException(null));
assertEquals("TimeoutException", KafkaAdminClient.prettyPrintException(new TimeoutException()));
assertEquals("TimeoutException: The foobar timed out.",
KafkaAdminClient.prettyPrintException(new TimeoutException("The foobar timed out.")));
} |
ByteArrayOutputStream createConnectivityRequest(String uuid, FlowRule rule) {
/*
{
"tapi-connectivity:connectivity-service":[
{
"uuid":"ffb006d4-349e-4d2f-817e-0906c88458d0",
"service-layer":"PHOTONIC_MEDIA",
"service-type":"POINT_TO_POINT_CONNECTIVITY",
"end-point":[
{
"local-id":"1",
"layer-protocol-name":"PHOTONIC_MEDIA",
"layer-protocol-qualifier":"tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC",
"service-interface-point":{
"service-interface-point-uuid":"0923962e-b83f-4702-9b16-a1a0db0dc1f9"
}
},
{
"local-id":"2",
"layer-protocol-name":"PHOTONIC_MEDIA",
"layer-protocol-qualifier":"tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC",
"service-interface-point":{
"service-interface-point-uuid":"76be95de-5769-4e5d-b65e-62cb6c39cf6b "
}
}
]
}
]
}
*/
DeviceService deviceService = handler().get(DeviceService.class);
PortCriterion inputPortCriterion = (PortCriterion) checkNotNull(rule.selector()
.getCriterion(Criterion.Type.IN_PORT));
String inputPortUuid = deviceService.getPort(rule.deviceId(),
inputPortCriterion.port()).annotations().value(TapiDeviceHelper.UUID);
Instructions.OutputInstruction outInstruction = (Instructions.OutputInstruction) checkNotNull(rule.treatment()
.allInstructions().stream().filter(instr -> instr.type().equals(Instruction.Type.OUTPUT))
.findFirst().orElse(null));
String outputPortUuid = deviceService.getPort(rule.deviceId(),
outInstruction.port()).annotations().value(TapiDeviceHelper.UUID);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
try {
JsonGenerator generator = getJsonGenerator(stream);
generator.writeStartObject();
generator.writeArrayFieldStart(TAPI_CONNECTIVITY_CONNECTIVITY_SERVICE);
generator.writeStartObject();
generator.writeStringField(TapiDeviceHelper.UUID, uuid);
generator.writeStringField(SERVICE_LAYER, PHOTONIC_MEDIA);
generator.writeStringField(SERVICE_TYPE, POINT_TO_POINT_CONNECTIVITY);
generator.writeArrayFieldStart(END_POINT);
//ADVA OLS requires these to be 1,2 for every connection
addEndPoint(generator, inputPortUuid, 1);
addEndPoint(generator, outputPortUuid, 2);
generator.writeEndArray();
generator.writeEndObject();
generator.writeEndArray();
generator.writeEndObject();
generator.close();
return stream;
} catch (IOException e) {
log.error("Cant' create json", e);
}
return stream;
} | @Test
public void createConnRequest() {
String output = tapiFrp.createConnectivityRequest(CONNECTION_UUID, FLOW_RULE).toString();
System.out.println(output);
assertEquals("Json to create network connectivity is wrong", CONNECTIVITY_REQUEST, output);
} |
@Override
public List<Integer> applyTransforms(List<Integer> originalGlyphIds)
{
List<Integer> intermediateGlyphsFromGsub = adjustRephPosition(originalGlyphIds);
intermediateGlyphsFromGsub = repositionGlyphs(intermediateGlyphsFromGsub);
for (String feature : FEATURES_IN_ORDER)
{
if (!gsubData.isFeatureSupported(feature))
{
if (feature.equals(RKRF_FEATURE) && gsubData.isFeatureSupported(VATU_FEATURE))
{
// Create your own rkrf feature from vatu feature
intermediateGlyphsFromGsub = applyRKRFFeature(
gsubData.getFeature(VATU_FEATURE),
intermediateGlyphsFromGsub);
}
LOG.debug("the feature {} was not found", feature);
continue;
}
LOG.debug("applying the feature {}", feature);
ScriptFeature scriptFeature = gsubData.getFeature(feature);
intermediateGlyphsFromGsub = applyGsubFeature(scriptFeature,
intermediateGlyphsFromGsub);
}
return Collections.unmodifiableList(intermediateGlyphsFromGsub);
} | @Test
void testApplyTransforms_vatu()
{
// given
List<Integer> glyphsAfterGsub = Arrays.asList(517,593,601,665);
// when
List<Integer> result = gsubWorkerForDevanagari.applyTransforms(getGlyphIds("श्रत्रस्रघ्र"));
// then
assertEquals(glyphsAfterGsub, result);
} |
void prioritizeCopiesAndShiftUps(List<MigrationInfo> migrations) {
for (int i = 0; i < migrations.size(); i++) {
prioritize(migrations, i);
}
if (logger.isFinestEnabled()) {
StringBuilder s = new StringBuilder("Migration order after prioritization: [");
int ix = 0;
for (MigrationInfo migration : migrations) {
s.append("\n\t").append(ix++).append("- ").append(migration).append(",");
}
s.deleteCharAt(s.length() - 1);
s.append("]");
logger.finest(s.toString());
}
} | @Test
public void testCopyPrioritizationAgainstMove()
throws UnknownHostException {
List<MigrationInfo> migrations = new ArrayList<>();
final MigrationInfo migration1 = new MigrationInfo(0, null, new PartitionReplica(new Address("localhost", 5701), uuids[0]), -1, -1, -1, 0);
final MigrationInfo migration2 = new MigrationInfo(0, null, new PartitionReplica(new Address("localhost", 5702), uuids[1]), -1, -1, -1, 1);
final MigrationInfo migration3 = new MigrationInfo(0, new PartitionReplica(new Address("localhost", 5703), uuids[2]),
new PartitionReplica(new Address("localhost", 5704), uuids[3]), 2, -1, -1, 2);
final MigrationInfo migration4 = new MigrationInfo(0, new PartitionReplica(new Address("localhost", 5705), uuids[4]),
new PartitionReplica(new Address("localhost", 5706), uuids[5]), 2, -1, -1, 3);
final MigrationInfo migration5 = new MigrationInfo(0, null, new PartitionReplica(new Address("localhost", 5707), uuids[6]), -1, -1, -1, 4);
migrations.add(migration1);
migrations.add(migration2);
migrations.add(migration3);
migrations.add(migration4);
migrations.add(migration5);
migrationPlanner.prioritizeCopiesAndShiftUps(migrations);
assertEquals(asList(migration1, migration2, migration5, migration3, migration4), migrations);
} |
@Override
public InvokerWrapper getInvokerWrapper() {
return invokerWrapper;
} | @Test(expected = UnsupportedOperationException.class)
public void testInvokerWrapper_invoke() {
context.getInvokerWrapper().invoke(null, false);
} |
@SuppressWarnings("unchecked")
@Udf
public <T> List<T> union(
@UdfParameter(description = "First array of values") final List<T> left,
@UdfParameter(description = "Second array of values") final List<T> right) {
if (left == null || right == null) {
return null;
}
final Set<T> combined = Sets.newLinkedHashSet(left);
combined.addAll(right);
return (List<T>) Arrays.asList(combined.toArray());
} | @Test
public void shouldReturnNullForAllNullInputs() {
final List<Long> result = udf.union((List<Long>) null, (List<Long>) null);
assertThat(result, is(nullValue()));
} |
public static int hash(Client client) {
if (!(client instanceof IpPortBasedClient)) {
return 0;
}
return Objects.hash(client.getClientId(),
client.getAllPublishedService().stream()
.map(s -> {
InstancePublishInfo ip = client.getInstancePublishInfo(s);
double weight = getWeight(ip);
Boolean enabled = getEnabled(ip);
String cluster = StringUtils.defaultIfBlank(ip.getCluster(), DEFAULT_CLUSTER_NAME);
return Objects.hash(
s.getNamespace(),
s.getGroup(),
s.getName(),
s.isEphemeral(),
ip.getIp(),
ip.getPort(),
weight,
ip.isHealthy(),
enabled,
cluster,
ip.getExtendDatum()
);
})
.collect(Collectors.toSet()));
} | @Test
void performanceTestOfHash() {
long start = System.nanoTime();
for (int i = 0; i < N; i++) {
DistroUtils.hash(client1);
}
System.out.printf("Distro Verify Hash Performance: %.2f ivk/ns\n", ((double) System.nanoTime() - start) / N);
} |
public <T> HttpRestResult<T> post(String url, Header header, Query query, Object body, Type responseType)
throws Exception {
return execute(url, HttpMethod.POST, new RequestHttpEntity(header, query, body), responseType);
} | @Test
void testPost() throws Exception {
when(requestClient.execute(any(), eq("POST"), any())).thenReturn(mockResponse);
when(mockResponse.getStatusCode()).thenReturn(200);
when(mockResponse.getBody()).thenReturn(new ByteArrayInputStream("test".getBytes()));
HttpRestResult<String> result = restTemplate.post("http://127.0.0.1:8848/nacos/test", Header.EMPTY, Query.EMPTY,
new Object(), String.class);
assertTrue(result.ok());
assertEquals(Header.EMPTY, result.getHeader());
assertEquals("test", result.getData());
} |
@Override
public Write.Append append(final Path file, final TransferStatus status) throws BackgroundException {
return new Write.Append(status.isExists()).withStatus(status);
} | @Test
public void testAppend() throws Exception {
final Path workdir = new SFTPHomeDirectoryService(session).find();
final Path test = new Path(workdir, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new SFTPTouchFeature(session).touch(test, new TransferStatus());
assertTrue(new SFTPUploadFeature(session).append(test, new TransferStatus().exists(true).withLength(1L).withRemote(new SFTPAttributesFinderFeature(session).find(test))).append);
new SFTPDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
@Activate
protected void activate() {
this.loadConfigs();
log.info("Started");
} | @Test
public void basics() throws IOException {
stageTestResource("basic.json");
loader.activate();
assertEquals("incorrect component", FOO_COMPONENT, service.component);
} |
@Override
public Map<String, Object> processCsvFile(String encodedCsvData, boolean dryRun) throws JsonProcessingException {
services = new HashMap<>();
serviceParentChildren = new HashMap<>();
Map<String, Object> result = super.processCsvFile(encodedCsvData, dryRun);
if (!services.isEmpty()) {
retrieveLegacyServiceIds();
saveAll(dryRun);
processServiceParentChildren(serviceParentChildren, dryRun);
}
return result;
} | @Test
void processCsvFileSuccessCreationServiceTest() throws IOException {
String csvData = """SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS""";
mockAdmin();
mockconnection();
Map<String, Object> resultMap = csvService.processCsvFile(encodeCsv(csvData), false);
assertEquals("Bestand verwerkt", resultMap.get("result"));
assertTrue(((ArrayList) resultMap.get("failed")).isEmpty());
List<String> succeededArray = new ArrayList<>();
succeededArray.add("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS");
assertArrayEquals(((ArrayList) resultMap.get("succeeded")).toArray(), succeededArray.toArray());
} |
public void mergeWith(DynamicFilterStats other)
{
if (other == null) {
return;
}
producerNodeIds.addAll(other.getProducerNodeIds());
} | @Test
public void testMergeWith()
{
final PlanNodeId[] planNodes1 = new PlanNodeId[] {new PlanNodeId("1"), new PlanNodeId("2")};
Set<PlanNodeId> planNodeSet1 = new HashSet<>(Arrays.asList(planNodes1));
DynamicFilterStats stats1 = new DynamicFilterStats(planNodeSet1);
assertEquals(stats1.getProducerNodeIds(), planNodeSet1);
final PlanNodeId[] planNodes2 = new PlanNodeId[] {new PlanNodeId("2"), new PlanNodeId("3")};
Set<PlanNodeId> planNodeSet2 = new
HashSet<>(Arrays.asList(planNodes2));
DynamicFilterStats stats2 = new DynamicFilterStats(planNodeSet2);
assertEquals(stats2.getProducerNodeIds(), planNodeSet2);
stats2.mergeWith(stats1);
assertEquals(stats1.getProducerNodeIds(), planNodeSet1);
final Set<PlanNodeId> expectedMergeNodeSet = ImmutableSet.of(new PlanNodeId("1"), new PlanNodeId("2"), new PlanNodeId("3"));
assertEquals(stats2.getProducerNodeIds(), expectedMergeNodeSet);
assertFalse(stats2.empty());
final PlanNodeId[] emptyPlanNodes = new PlanNodeId[] {};
Set<PlanNodeId> emptyPlanNodeSet = new HashSet<>(Arrays.asList(emptyPlanNodes));
DynamicFilterStats emptyStats1 = new DynamicFilterStats(emptyPlanNodeSet);
DynamicFilterStats emptyStats2 = new DynamicFilterStats(emptyPlanNodeSet);
emptyStats2.mergeWith(emptyStats1);
assertTrue(emptyStats2.empty());
assertTrue(emptyStats1.empty());
assertTrue(emptyStats1.getProducerNodeIds().isEmpty());
assertTrue(emptyStats2.getProducerNodeIds().isEmpty());
} |
@Override
public Long clusterCountKeysInSlot(int slot) {
RedisClusterNode node = clusterGetNodeForSlot(slot);
MasterSlaveEntry entry = executorService.getConnectionManager().getEntry(new InetSocketAddress(node.getHost(), node.getPort()));
RFuture<Long> f = executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLUSTER_COUNTKEYSINSLOT, slot);
return syncFuture(f);
} | @Test
public void testClusterCountKeysInSlot() {
Long t = connection.clusterCountKeysInSlot(1);
assertThat(t).isZero();
} |
@Override
public V put(K key, V value, Duration ttl) {
return get(putAsync(key, value, ttl));
} | @Test
public void testExpiredIterator() throws InterruptedException {
RMapCacheNative<String, String> cache = redisson.getMapCacheNative("simple");
cache.put("0", "8");
cache.put("1", "6", Duration.ofSeconds(1));
cache.put("2", "4", Duration.ofSeconds(3));
cache.put("3", "2", Duration.ofSeconds(4));
cache.put("4", "4", Duration.ofSeconds(1));
Thread.sleep(1000);
assertThat(cache.keySet()).containsOnly("0", "2", "3");
cache.destroy();
} |
@Override
public Port port(String portId) {
checkArgument(!Strings.isNullOrEmpty(portId), ERR_NULL_PORT_ID);
return osNetworkStore.port(portId);
} | @Test
public void testGetPortById() {
createBasicNetworks();
assertTrue("Port did not match", target.port(PORT_ID) != null);
assertTrue("Port did not match", target.port(UNKNOWN_ID) == null);
} |
@Override
public String getImage() {
if ( isLocked() ) {
return "ui/images/lock.svg"; //$NON-NLS-1$
}
return "ui/images/jobrepo.svg"; //$NON-NLS-1$
} | @Test
public void testGetImage() {
String image = uiJob.getImage();
assertNotNull( image );
File f = new File( image );
when( mockEERepositoryObject.getLock() ).thenReturn( mockRepositoryLock );
String image2 = uiJob.getImage();
assertNotNull( image2 );
f = new File( image2 );
assertNotEquals( image, image2 );
} |
public static void addSecurityProvider(Properties properties) {
properties.keySet().stream()
.filter(key -> key.toString().matches("security\\.provider(\\.\\d+)?"))
.sorted(Comparator.comparing(String::valueOf)).forEach(key -> addSecurityProvider(properties.get(key).toString()));
} | @Test
void addSecurityProviderTest() {
removeAllDummyProviders();
Provider[] providers = Security.getProviders();
int providersCountBefore = providers.length;
SecurityProviderLoader.addSecurityProvider(DummyProvider.class.getName());
Provider[] providersAfter = Security.getProviders();
Provider provider = Security.getProvider(DummyProvider.PROVIDER_NAME);
try {
assertEquals(providersCountBefore + 1, providersAfter.length);
assertNotNull(provider, "Provider not installed.");
assertEquals(DummyProvider.class, provider.getClass());
assertEquals(provider, providersAfter[providersAfter.length - 1]);
} catch (AssertionError e){
Arrays.stream(providers).forEach(pro -> System.err.println(pro.getName()));
throw e;
}
} |
public void setWisdom(int wizard, int amount) {
wizards[wizard].setWisdom(amount);
} | @Test
void testSetWisdom() {
var wizardNumber = 0;
var bytecode = new int[5];
bytecode[0] = LITERAL.getIntValue();
bytecode[1] = wizardNumber;
bytecode[2] = LITERAL.getIntValue();
bytecode[3] = 50; // wisdom amount
bytecode[4] = SET_WISDOM.getIntValue();
var vm = new VirtualMachine();
vm.execute(bytecode);
assertEquals(50, vm.getWizards()[wizardNumber].getWisdom());
} |
public void convert(FSConfigToCSConfigConverterParams params)
throws Exception {
validateParams(params);
this.clusterResource = getClusterResource(params);
this.convertPlacementRules = params.isConvertPlacementRules();
this.outputDirectory = params.getOutputDirectory();
this.rulesToFile = params.isPlacementRulesToFile();
this.usePercentages = params.isUsePercentages();
this.preemptionMode = params.getPreemptionMode();
prepareOutputFiles(params.isConsole());
loadConversionRules(params.getConversionRulesConfig());
Configuration inputYarnSiteConfig = getInputYarnSiteConfig(params);
handleFairSchedulerConfig(params, inputYarnSiteConfig);
convert(inputYarnSiteConfig);
} | @Test
public void testConvertFSConfigurationClusterResourceInvalid2()
throws Exception {
FSConfigToCSConfigConverterParams params = createDefaultParamsBuilder()
.withClusterResource("vcores=20, memmmm=240")
.build();
expectedException.expect(ConversionException.class);
expectedException.expectMessage("Error while parsing resource");
converter.convert(params);
} |
public static boolean isValidMetaKey(String key) {
return META_KEY_PATTERN.matcher(key).matches();
} | @Test
public void testMetaKey() {
Assert.assertTrue(ConsulUtils.isValidMetaKey("tags"));
Assert.assertTrue(ConsulUtils.isValidMetaKey("TAGS"));
Assert.assertTrue(ConsulUtils.isValidMetaKey("TAGS1"));
Assert.assertTrue(ConsulUtils.isValidMetaKey("TAGS-1"));
Assert.assertTrue(ConsulUtils.isValidMetaKey("consul-tags"));
Assert.assertTrue(ConsulUtils.isValidMetaKey("consul_tags"));
Assert.assertTrue(ConsulUtils.isValidMetaKey("consul_-_tags"));
Assert.assertFalse(ConsulUtils.isValidMetaKey("consul.tags"));
Assert.assertFalse(ConsulUtils.isValidMetaKey("consul:tags"));
} |
public static String toArgumentString(Object[] args) {
StringBuilder buf = new StringBuilder();
for (Object arg : args) {
if (buf.length() > 0) {
buf.append(COMMA_SEPARATOR);
}
if (arg == null || ReflectUtils.isPrimitives(arg.getClass())) {
buf.append(arg);
} else {
try {
buf.append(JsonUtils.toJson(arg));
} catch (Exception e) {
logger.warn(COMMON_JSON_CONVERT_EXCEPTION, "", "", e.getMessage(), e);
buf.append(arg);
}
}
}
return buf.toString();
} | @Test
void testToArgumentString() throws Exception {
String s = StringUtils.toArgumentString(new Object[] {"a", 0, Collections.singletonMap("enabled", true)});
assertThat(s, containsString("a,"));
assertThat(s, containsString("0,"));
assertThat(s, containsString("{\"enabled\":true}"));
} |
@Override
public void initialize(URI uri, Configuration conf)
throws IOException
{
requireNonNull(uri, "uri is null");
requireNonNull(conf, "conf is null");
super.initialize(uri, conf);
setConf(conf);
this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
this.workingDirectory = new Path(PATH_SEPARATOR).makeQualified(this.uri, new Path(PATH_SEPARATOR));
HiveS3Config defaults = new HiveS3Config();
this.stagingDirectory = new File(conf.get(S3_STAGING_DIRECTORY, defaults.getS3StagingDirectory().toString()));
this.maxAttempts = conf.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries()) + 1;
this.maxBackoffTime = Duration.valueOf(conf.get(S3_MAX_BACKOFF_TIME, defaults.getS3MaxBackoffTime().toString()));
this.maxRetryTime = Duration.valueOf(conf.get(S3_MAX_RETRY_TIME, defaults.getS3MaxRetryTime().toString()));
int maxErrorRetries = conf.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries());
boolean sslEnabled = conf.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled());
Duration connectTimeout = Duration.valueOf(conf.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString()));
Duration socketTimeout = Duration.valueOf(conf.get(S3_SOCKET_TIMEOUT, defaults.getS3SocketTimeout().toString()));
int maxConnections = conf.getInt(S3_MAX_CONNECTIONS, defaults.getS3MaxConnections());
this.multiPartUploadMinFileSize = conf.getLong(S3_MULTIPART_MIN_FILE_SIZE, defaults.getS3MultipartMinFileSize().toBytes());
this.multiPartUploadMinPartSize = conf.getLong(S3_MULTIPART_MIN_PART_SIZE, defaults.getS3MultipartMinPartSize().toBytes());
this.isPathStyleAccess = conf.getBoolean(S3_PATH_STYLE_ACCESS, defaults.isS3PathStyleAccess());
this.useInstanceCredentials = conf.getBoolean(S3_USE_INSTANCE_CREDENTIALS, defaults.isS3UseInstanceCredentials());
this.pinS3ClientToCurrentRegion = conf.getBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION, defaults.isPinS3ClientToCurrentRegion());
this.s3IamRole = conf.get(S3_IAM_ROLE, defaults.getS3IamRole());
this.s3IamRoleSessionName = conf.get(S3_IAM_ROLE_SESSION_NAME, defaults.getS3IamRoleSessionName());
verify(!(useInstanceCredentials && conf.get(S3_IAM_ROLE) != null),
"Invalid configuration: either use instance credentials or specify an iam role");
verify((pinS3ClientToCurrentRegion && conf.get(S3_ENDPOINT) == null) || !pinS3ClientToCurrentRegion,
"Invalid configuration: either endpoint can be set or S3 client can be pinned to the current region");
this.sseEnabled = conf.getBoolean(S3_SSE_ENABLED, defaults.isS3SseEnabled());
this.sseType = PrestoS3SseType.valueOf(conf.get(S3_SSE_TYPE, defaults.getS3SseType().name()));
this.sseKmsKeyId = conf.get(S3_SSE_KMS_KEY_ID, defaults.getS3SseKmsKeyId());
this.s3AclType = PrestoS3AclType.valueOf(conf.get(S3_ACL_TYPE, defaults.getS3AclType().name()));
String userAgentPrefix = conf.get(S3_USER_AGENT_PREFIX, defaults.getS3UserAgentPrefix());
this.skipGlacierObjects = conf.getBoolean(S3_SKIP_GLACIER_OBJECTS, defaults.isSkipGlacierObjects());
this.s3StorageClass = conf.getEnum(S3_STORAGE_CLASS, defaults.getS3StorageClass());
ClientConfiguration configuration = new ClientConfiguration()
.withMaxErrorRetry(maxErrorRetries)
.withProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP)
.withConnectionTimeout(toIntExact(connectTimeout.toMillis()))
.withSocketTimeout(toIntExact(socketTimeout.toMillis()))
.withMaxConnections(maxConnections)
.withUserAgentPrefix(userAgentPrefix)
.withUserAgentSuffix(S3_USER_AGENT_SUFFIX);
this.credentialsProvider = createAwsCredentialsProvider(uri, conf);
this.s3 = createAmazonS3Client(conf, configuration);
} | @Test
public void testCompatibleStaticCredentials()
throws Exception
{
Configuration config = new Configuration();
config.set(S3_ACCESS_KEY, "test_secret_access_key");
config.set(S3_SECRET_KEY, "test_access_key_id");
config.set(S3_ENDPOINT, "test.example.endpoint.com");
config.set(S3_SIGNER_TYPE, "S3SignerType");
// the static credentials should be preferred
try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) {
fs.initialize(new URI("s3a://test-bucket/"), config);
assertInstanceOf(getAwsCredentialsProvider(fs), AWSStaticCredentialsProvider.class);
}
} |
public void changeMethod(StealingMethod method) {
this.method = method;
} | @Test
void testChangeMethod() {
final var initialMethod = spy(StealingMethod.class);
final var thief = new HalflingThief(initialMethod);
thief.steal();
verify(initialMethod).steal();
String target = verify(initialMethod).pickTarget();
verify(initialMethod).confuseTarget(target);
verify(initialMethod).stealTheItem(target);
final var newMethod = spy(StealingMethod.class);
thief.changeMethod(newMethod);
thief.steal();
verify(newMethod).steal();
String newTarget = verify(newMethod).pickTarget();
verify(newMethod).confuseTarget(newTarget);
verify(newMethod).stealTheItem(newTarget);
verifyNoMoreInteractions(initialMethod, newMethod);
} |
public DefaultProbe(Map<String, String> props) {
this("Default probe: IP presence", props);
} | @Test
public void testDefaultProbe() {
// component instance has a good hostname, so probe will eventually succeed
// whether or not DNS checking is enabled
ComponentInstance componentInstance =
createMockComponentInstance("example.com");
checkPingResults(probe, componentInstance, false);
// component instance has a bad hostname, so probe will fail when DNS
// checking is enabled
componentInstance = createMockComponentInstance("bad.dns.test");
checkPingResults(probe, componentInstance, probe.isDnsCheckEnabled());
} |
@Override
public String toString()
{
return "id: " + super.toString() + ", entity: " + (_entity == null ? "" : _entity);
} | @Test
public void testToString()
{
IdEntityResponse<Long, AnyRecord> longIdEntityResponse = new IdEntityResponse<>(6L, new AnyRecord());
Assert.assertEquals(longIdEntityResponse.toString(), "id: 6, entity: {}");
IdEntityResponse<Long, AnyRecord> nullIdEntityResponse = new IdEntityResponse<>(null, new AnyRecord());
Assert.assertEquals(nullIdEntityResponse.toString(), "id: , entity: {}");
} |
public static TunnelId valueOf(String value) {
return new TunnelId(value);
} | @Test
public void testConstruction() {
final String tunnelIdValue = "7777";
final TunnelId tunnelId = TunnelId.valueOf(tunnelIdValue);
assertThat(tunnelId, is(notNullValue()));
assertThat(tunnelId.id(), is(tunnelIdValue));
} |
@Override
protected void delete(Collection<HadoopResourceId> resourceIds) throws IOException {
for (HadoopResourceId resourceId : resourceIds) {
// ignore response as issues are surfaced with exception
final Path resourcePath = resourceId.toPath();
resourcePath.getFileSystem(configuration).delete(resourceId.toPath(), false);
}
} | @Test
public void testDeleteNonExisting() throws Exception {
fileSystem.delete(ImmutableList.of(testPath("MissingFile")));
} |
public static UserOperatorConfig buildFromMap(Map<String, String> map) {
Map<String, String> envMap = new HashMap<>(map);
envMap.keySet().retainAll(UserOperatorConfig.keyNames());
Map<String, Object> generatedMap = ConfigParameter.define(envMap, CONFIG_VALUES);
return new UserOperatorConfig(generatedMap);
} | @Test
public void testFromMapInvalidScramPasswordLengthThrows() {
Map<String, String> envVars = new HashMap<>(UserOperatorConfigTest.ENV_VARS);
envVars.put(UserOperatorConfig.SCRAM_SHA_PASSWORD_LENGTH.key(), "not_an_integer");
assertThrows(InvalidConfigurationException.class, () -> UserOperatorConfig.buildFromMap(envVars));
} |
public static Expression fromJson(String json) {
return fromJson(json, null);
} | @Test
public void invalidTerm() {
assertThatThrownBy(
() ->
ExpressionParser.fromJson(
"{\n"
+ " \"type\" : \"not\",\n"
+ " \"child\" : {\n"
+ " \"type\" : \"lt\",\n"
+ " \"term\" : 23,\n"
+ " \"values\" : [ \"a\" ]\n"
+ " }\n"
+ "}"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse reference (requires string or object): 23");
} |
@Secured(action = ActionTypes.READ)
@GetMapping("/services")
public Object listDetail(@RequestParam(required = false) boolean withInstances,
@RequestParam(defaultValue = Constants.DEFAULT_NAMESPACE_ID) String namespaceId,
@RequestParam(required = false) int pageNo, @RequestParam(required = false) int pageSize,
@RequestParam(name = "serviceNameParam", defaultValue = StringUtils.EMPTY) String serviceName,
@RequestParam(name = "groupNameParam", defaultValue = StringUtils.EMPTY) String groupName,
@RequestParam(name = "instance", defaultValue = StringUtils.EMPTY) String containedInstance,
@RequestParam(required = false) boolean hasIpCount) throws NacosException {
if (withInstances) {
return judgeCatalogService().pageListServiceDetail(namespaceId, groupName, serviceName, pageNo, pageSize);
}
return judgeCatalogService()
.pageListService(namespaceId, groupName, serviceName, pageNo, pageSize, containedInstance, hasIpCount);
} | @Test
void testListDetail() {
try {
when(catalogServiceV2.pageListServiceDetail(Constants.DEFAULT_NAMESPACE_ID, TEST_GROUP_NAME, TEST_SERVICE_NAME, 1,
10)).thenReturn(Collections.emptyList());
Object res = catalogController.listDetail(true, Constants.DEFAULT_NAMESPACE_ID, 1, 10, TEST_SERVICE_NAME, TEST_GROUP_NAME, null,
true);
assertTrue(res instanceof List);
assertEquals(0, ((List) res).size());
} catch (NacosException e) {
e.printStackTrace();
fail(e.getMessage());
}
} |
public static boolean cleanDirectory(File dir) {
if (dir.isDirectory()) {
String[] children = dir.list();
if (children != null) {
for (String aChildren : children) {
boolean success = cleanDirectory(new File(dir, aChildren));
if (!success) {
return false;
}
}
}
}
return dir.delete();
} | @Test
public void cleanDirectory() throws Exception {
String filePath = System.getProperty("java.io.tmpdir") + File.separator
+ "FileTest" + 1;
FileUtils.string2File(new File(filePath, "xx.tmp"), "helloworld!");
Assert.assertTrue(new File(filePath, "xx.tmp").exists());
String ct = FileUtils.file2String(new File(filePath, "xx.tmp"));
Assert.assertTrue(ct.equals("helloworld!"));
List<String> datas = FileUtils.readLines(new File(filePath, "xx.tmp"));
Assert.assertTrue(datas.size() == 1);
FileUtils.cleanDirectory(new File(filePath));
Assert.assertFalse(new File(filePath).exists());
} |
public SegmentLineage(String tableNameWithType) {
_tableNameWithType = tableNameWithType;
_lineageEntries = new HashMap<>();
} | @Test
public void testSegmentLineage() {
SegmentLineage segmentLineage = new SegmentLineage("test_OFFLINE");
String id = SegmentLineageUtils.generateLineageEntryId();
segmentLineage.addLineageEntry(id,
new LineageEntry(Arrays.asList("s1", "s2", "s3"), Arrays.asList("s4", "s5"), LineageEntryState.COMPLETED,
11111L));
LineageEntry lineageEntry = segmentLineage.getLineageEntry(id);
Assert.assertEquals(lineageEntry.getSegmentsFrom(), Arrays.asList("s1", "s2", "s3"));
Assert.assertEquals(lineageEntry.getSegmentsTo(), Arrays.asList("s4", "s5"));
Assert.assertEquals(lineageEntry.getState(), LineageEntryState.COMPLETED);
Assert.assertEquals(lineageEntry.getTimestamp(), 11111L);
String id2 = SegmentLineageUtils.generateLineageEntryId();
segmentLineage.addLineageEntry(id2,
new LineageEntry(Arrays.asList("s6", "s6", "s8"), Arrays.asList("s9", "s10"), LineageEntryState.COMPLETED,
22222L));
LineageEntry lineageEntry2 = segmentLineage.getLineageEntry(id2);
Assert.assertEquals(lineageEntry2.getSegmentsFrom(), Arrays.asList("s6", "s6", "s8"));
Assert.assertEquals(lineageEntry2.getSegmentsTo(), Arrays.asList("s9", "s10"));
Assert.assertEquals(lineageEntry2.getState(), LineageEntryState.COMPLETED);
Assert.assertEquals(lineageEntry2.getTimestamp(), 22222L);
String id3 = SegmentLineageUtils.generateLineageEntryId();
segmentLineage.addLineageEntry(id3,
new LineageEntry(Arrays.asList("s5", "s9"), Arrays.asList("s11"), LineageEntryState.IN_PROGRESS, 33333L));
LineageEntry lineageEntry3 = segmentLineage.getLineageEntry(id3);
Assert.assertEquals(lineageEntry3.getSegmentsFrom(), Arrays.asList("s5", "s9"));
Assert.assertEquals(lineageEntry3.getSegmentsTo(), Arrays.asList("s11"));
Assert.assertEquals(lineageEntry3.getState(), LineageEntryState.IN_PROGRESS);
Assert.assertEquals(lineageEntry3.getTimestamp(), 33333L);
String id4 = SegmentLineageUtils.generateLineageEntryId();
segmentLineage.addLineageEntry(id4,
new LineageEntry(new ArrayList<>(), Arrays.asList("s12"), LineageEntryState.IN_PROGRESS, 44444L));
LineageEntry lineageEntry4 = segmentLineage.getLineageEntry(id4);
Assert.assertEquals(lineageEntry4.getSegmentsFrom(), new ArrayList<>());
Assert.assertEquals(lineageEntry4.getSegmentsTo(), Arrays.asList("s12"));
Assert.assertEquals(lineageEntry4.getState(), LineageEntryState.IN_PROGRESS);
Assert.assertEquals(lineageEntry4.getTimestamp(), 44444L);
// Test the convesion from the segment lineage to the znRecord
ZNRecord znRecord = segmentLineage.toZNRecord();
Assert.assertEquals(znRecord.getId(), "test_OFFLINE");
Map<String, List<String>> listFields = znRecord.getListFields();
List<String> entry = listFields.get(id);
Assert.assertEquals(entry.get(0), String.join(",", Arrays.asList("s1", "s2", "s3")));
Assert.assertEquals(entry.get(1), String.join(",", Arrays.asList("s4", "s5")));
Assert.assertEquals(entry.get(2), LineageEntryState.COMPLETED.toString());
Assert.assertEquals(entry.get(3), Long.toString(11111L));
List<String> entry2 = listFields.get(id2);
Assert.assertEquals(entry2.get(0), String.join(",", Arrays.asList("s6", "s6", "s8")));
Assert.assertEquals(entry2.get(1), String.join(",", Arrays.asList("s9", "s10")));
Assert.assertEquals(entry2.get(2), LineageEntryState.COMPLETED.toString());
Assert.assertEquals(entry2.get(3), Long.toString(22222L));
List<String> entry3 = listFields.get(id3);
Assert.assertEquals(entry3.get(0), String.join(",", Arrays.asList("s5", "s9")));
Assert.assertEquals(entry3.get(1), String.join(",", Arrays.asList("s11")));
Assert.assertEquals(entry3.get(2), LineageEntryState.IN_PROGRESS.toString());
Assert.assertEquals(entry3.get(3), Long.toString(33333L));
List<String> entry4 = listFields.get(id4);
Assert.assertEquals(entry4.get(0), "");
Assert.assertEquals(entry4.get(1), String.join(",", Arrays.asList("s12")));
Assert.assertEquals(entry4.get(2), LineageEntryState.IN_PROGRESS.toString());
Assert.assertEquals(entry4.get(3), Long.toString(44444L));
// Test the conversion from the znRecord to the segment lineage
SegmentLineage segmentLineageFromZNRecord = SegmentLineage.fromZNRecord(segmentLineage.toZNRecord());
Assert.assertEquals(segmentLineageFromZNRecord.getLineageEntry(id), lineageEntry);
Assert.assertEquals(segmentLineageFromZNRecord.getLineageEntry(id2), lineageEntry2);
Assert.assertEquals(segmentLineageFromZNRecord.getLineageEntry(id3), lineageEntry3);
Assert.assertEquals(segmentLineageFromZNRecord.getLineageEntry(id4), lineageEntry4);
// Try to delete by iterating through the lineage entry ids
for (String lineageId : segmentLineage.getLineageEntryIds()) {
segmentLineage.deleteLineageEntry(lineageId);
}
Assert.assertEquals(segmentLineage.getLineageEntryIds().size(), 0);
} |
public static Map<String, String> getSegmentationSourcesMap(final MiningModelCompilationDTO compilationDTO,
final List<KiePMMLModel> nestedModels) {
logger.debug("getSegmentationSourcesMap {}", compilationDTO.getModel().getSegmentation());
final Map<String, String> toReturn = getSegmentsSourcesMap(compilationDTO,
nestedModels);
return getSegmentationSourcesMapCommon(compilationDTO,
toReturn);
} | @Test
void getSegmentationSourcesMap() {
final List<KiePMMLModel> nestedModels = new ArrayList<>();
final CommonCompilationDTO<MiningModel> source =
CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME,
pmml,
MINING_MODEL,
new PMMLCompilationContextMock(),
"FILENAME");
final MiningModelCompilationDTO compilationDTO =
MiningModelCompilationDTO.fromCompilationDTO(source);
final Map<String, String> retrieved = KiePMMLSegmentationFactory.getSegmentationSourcesMap(compilationDTO,
nestedModels);
assertThat(retrieved).isNotNull();
int expectedNestedModels = MINING_MODEL.getSegmentation().getSegments().size();
assertThat(nestedModels).hasSize(expectedNestedModels);
} |
public static HostInfo parseHostInfo(final String applicationServerId) {
if (applicationServerId == null || applicationServerId.trim().isEmpty()) {
return StreamsMetadataState.UNKNOWN_HOST;
}
final String serverId = applicationServerId.endsWith("/")
? applicationServerId.substring(0, applicationServerId.lastIndexOf("/"))
: applicationServerId;
final String host = getHost(serverId);
final Integer port = getPort(serverId);
if (host == null || port == null) {
throw new KsqlException(String.format(
"Error parsing host address %s. Expected format host:port.", applicationServerId));
}
return new HostInfo(host, port);
} | @Test
public void shouldReturnServerPortWithTrailingSlash() {
// When:
final HostInfo hostInfo = ServerUtil.parseHostInfo("http://localhost:8088/");
// Then:
assertThat(hostInfo.port(), Matchers.is(8088));
} |
@Override
public GlobalCommitResponseProto convert2Proto(GlobalCommitResponse globalCommitResponse) {
final short typeCode = globalCommitResponse.getTypeCode();
final AbstractMessageProto abstractMessage = AbstractMessageProto.newBuilder().setMessageType(
MessageTypeProto.forNumber(typeCode)).build();
final String msg = globalCommitResponse.getMsg();
final AbstractResultMessageProto abstractResultMessageProto = AbstractResultMessageProto.newBuilder().setMsg(
msg == null ? "" : msg).setResultCode(ResultCodeProto.valueOf(globalCommitResponse.getResultCode().name()))
.setAbstractMessage(abstractMessage).build();
AbstractTransactionResponseProto abstractTransactionResponseProto = AbstractTransactionResponseProto
.newBuilder().setAbstractResultMessage(abstractResultMessageProto).setTransactionExceptionCode(
TransactionExceptionCodeProto.valueOf(globalCommitResponse.getTransactionExceptionCode().name()))
.build();
AbstractGlobalEndResponseProto abstractGlobalEndResponseProto = AbstractGlobalEndResponseProto.newBuilder()
.setAbstractTransactionResponse(abstractTransactionResponseProto).setGlobalStatus(
GlobalStatusProto.valueOf(globalCommitResponse.getGlobalStatus().name())).build();
GlobalCommitResponseProto result = GlobalCommitResponseProto.newBuilder().setAbstractGlobalEndResponse(
abstractGlobalEndResponseProto).build();
return result;
} | @Test
public void convert2Proto() {
GlobalCommitResponse globalCommitResponse = new GlobalCommitResponse();
globalCommitResponse.setGlobalStatus(GlobalStatus.AsyncCommitting);
globalCommitResponse.setMsg("msg");
globalCommitResponse.setResultCode(ResultCode.Failed);
globalCommitResponse.setTransactionExceptionCode(TransactionExceptionCode.BranchRegisterFailed);
GlobalCommitResponseConvertor convertor = new GlobalCommitResponseConvertor();
GlobalCommitResponseProto proto = convertor.convert2Proto(globalCommitResponse);
GlobalCommitResponse real = convertor.convert2Model(proto);
assertThat((real.getTypeCode())).isEqualTo(globalCommitResponse.getTypeCode());
assertThat((real.getMsg())).isEqualTo(globalCommitResponse.getMsg());
assertThat((real.getResultCode())).isEqualTo(globalCommitResponse.getResultCode());
assertThat((real.getTransactionExceptionCode())).isEqualTo(globalCommitResponse.getTransactionExceptionCode());
} |
@Override
public MigratablePipeline findPipelineToMigrate(LoadImbalance imbalance) {
Set<? extends MigratablePipeline> candidates = imbalance.getPipelinesOwnedBy(imbalance.srcOwner);
long migrationThreshold = (long) ((imbalance.maximumLoad - imbalance.minimumLoad)
* MAXIMUM_NO_OF_EVENTS_AFTER_MIGRATION_COEFFICIENT);
MigratablePipeline candidate = null;
long loadInSelectedPipeline = 0;
for (MigratablePipeline pipeline : candidates) {
long load = imbalance.getLoad(pipeline);
if (load > loadInSelectedPipeline) {
if (load < migrationThreshold) {
loadInSelectedPipeline = load;
candidate = pipeline;
}
}
}
return candidate;
} | @Test
public void testFindPipelineToMigrate() {
NioThread srcOwner = mock(NioThread.class);
NioThread dstOwner = mock(NioThread.class);
imbalance.srcOwner = srcOwner;
imbalance.dstOwner = dstOwner;
imbalance.minimumLoad = 100;
MigratablePipeline pipeline1 = mock(MigratablePipeline.class);
loadCounter.set(pipeline1, 100L);
ownerToPipelines.put(dstOwner, singleton(pipeline1));
imbalance.maximumLoad = 300;
MigratablePipeline pipeline2 = mock(MigratablePipeline.class);
MigratablePipeline pipeline3 = mock(MigratablePipeline.class);
loadCounter.set(pipeline2, 200L);
loadCounter.set(pipeline3, 100L);
ownerToPipelines.put(srcOwner, Set.of(pipeline2, pipeline3));
MigratablePipeline pipelineToMigrate = strategy.findPipelineToMigrate(imbalance);
assertEquals(pipeline3, pipelineToMigrate);
} |
public Optional<User> login(String nameOrEmail, String password) {
if (nameOrEmail == null || password == null) {
return Optional.empty();
}
User user = userDAO.findByName(nameOrEmail);
if (user == null) {
user = userDAO.findByEmail(nameOrEmail);
}
if (user != null && !user.isDisabled()) {
boolean authenticated = encryptionService.authenticate(password, user.getPassword(), user.getSalt());
if (authenticated) {
performPostLoginActivities(user);
return Optional.of(user);
}
}
return Optional.empty();
} | @Test
void callingLoginShouldNotReturnUserObjectIfCouldNotFindUserByNameOrEmail() {
Mockito.when(userDAO.findByName("test@test.com")).thenReturn(null);
Mockito.when(userDAO.findByEmail("test@test.com")).thenReturn(null);
Optional<User> user = userService.login("test@test.com", "password");
Assertions.assertFalse(user.isPresent());
} |
@Override
public String doSharding(final Collection<String> availableTargetNames, final PreciseShardingValue<Comparable<?>> shardingValue) {
ShardingSpherePreconditions.checkNotNull(shardingValue.getValue(), NullShardingValueException::new);
String tableNameSuffix = String.valueOf(doSharding(parseDate(shardingValue.getValue())));
return ShardingAutoTableAlgorithmUtils.findMatchedTargetName(availableTargetNames, tableNameSuffix, shardingValue.getDataNodeInfo()).orElse(null);
} | @Test
void assertRangeDoShardingWithoutUpperBound() {
List<String> availableTargetNames = Arrays.asList("t_order_0", "t_order_1", "t_order_2", "t_order_3", "t_order_4", "t_order_5");
Collection<String> actual = shardingAlgorithm.doSharding(availableTargetNames,
new RangeShardingValue<>("t_order", "create_time", DATA_NODE_INFO, Range.greaterThan("2020-01-01 00:00:09")));
assertThat(actual.size(), is(3));
assertTrue(actual.contains("t_order_3"));
assertTrue(actual.contains("t_order_4"));
assertTrue(actual.contains("t_order_5"));
} |
@Override
public Long createNotifyMessage(Long userId, Integer userType,
NotifyTemplateDO template, String templateContent, Map<String, Object> templateParams) {
NotifyMessageDO message = new NotifyMessageDO().setUserId(userId).setUserType(userType)
.setTemplateId(template.getId()).setTemplateCode(template.getCode())
.setTemplateType(template.getType()).setTemplateNickname(template.getNickname())
.setTemplateContent(templateContent).setTemplateParams(templateParams).setReadStatus(false);
notifyMessageMapper.insert(message);
return message.getId();
} | @Test
public void testCreateNotifyMessage_success() {
// 准备参数
Long userId = randomLongId();
Integer userType = randomEle(UserTypeEnum.values()).getValue();
NotifyTemplateDO template = randomPojo(NotifyTemplateDO.class);
String templateContent = randomString();
Map<String, Object> templateParams = randomTemplateParams();
// mock 方法
// 调用
Long messageId = notifyMessageService.createNotifyMessage(userId, userType,
template, templateContent, templateParams);
// 断言
NotifyMessageDO message = notifyMessageMapper.selectById(messageId);
assertNotNull(message);
assertEquals(userId, message.getUserId());
assertEquals(userType, message.getUserType());
assertEquals(template.getId(), message.getTemplateId());
assertEquals(template.getCode(), message.getTemplateCode());
assertEquals(template.getType(), message.getTemplateType());
assertEquals(template.getNickname(), message.getTemplateNickname());
assertEquals(templateContent, message.getTemplateContent());
assertEquals(templateParams, message.getTemplateParams());
assertEquals(false, message.getReadStatus());
assertNull(message.getReadTime());
} |
@Override
public ListenableFuture<?> execute(CreateMaterializedView statement, TransactionManager transactionManager, Metadata metadata, AccessControl accessControl, Session session, List<Expression> parameters, WarningCollector warningCollector)
{
QualifiedObjectName viewName = createQualifiedObjectName(session, statement, statement.getName());
Optional<TableHandle> viewHandle = metadata.getMetadataResolver(session).getTableHandle(viewName);
if (viewHandle.isPresent()) {
if (!statement.isNotExists()) {
throw new SemanticException(MATERIALIZED_VIEW_ALREADY_EXISTS, statement, "Materialized view '%s' already exists", viewName);
}
return immediateFuture(null);
}
accessControl.checkCanCreateTable(session.getRequiredTransactionId(), session.getIdentity(), session.getAccessControlContext(), viewName);
accessControl.checkCanCreateView(session.getRequiredTransactionId(), session.getIdentity(), session.getAccessControlContext(), viewName);
Map<NodeRef<Parameter>, Expression> parameterLookup = parameterExtractor(statement, parameters);
Analyzer analyzer = new Analyzer(session, metadata, sqlParser, accessControl, Optional.empty(), parameters, parameterLookup, warningCollector);
Analysis analysis = analyzer.analyze(statement);
ConnectorId connectorId = metadata.getCatalogHandle(session, viewName.getCatalogName())
.orElseThrow(() -> new PrestoException(NOT_FOUND, "Catalog does not exist: " + viewName.getCatalogName()));
List<ColumnMetadata> columnMetadata = analysis.getOutputDescriptor(statement.getQuery())
.getVisibleFields().stream()
.map(field -> new ColumnMetadata(field.getName().get(), field.getType()))
.collect(toImmutableList());
Map<String, Expression> sqlProperties = mapFromProperties(statement.getProperties());
Map<String, Object> properties = metadata.getTablePropertyManager().getProperties(
connectorId,
viewName.getCatalogName(),
sqlProperties,
session,
metadata,
parameterLookup);
ConnectorTableMetadata viewMetadata = new ConnectorTableMetadata(
toSchemaTableName(viewName),
columnMetadata,
properties,
statement.getComment());
String sql = getFormattedSql(statement.getQuery(), sqlParser, Optional.of(parameters));
List<SchemaTableName> baseTables = analysis.getTableNodes().stream()
.map(table -> {
QualifiedObjectName tableName = createQualifiedObjectName(session, table, table.getName());
if (!viewName.getCatalogName().equals(tableName.getCatalogName())) {
throw new SemanticException(
NOT_SUPPORTED,
statement,
"Materialized view %s created from a base table in a different catalog %s is not supported.",
viewName, tableName);
}
return toSchemaTableName(tableName);
})
.distinct()
.collect(toImmutableList());
MaterializedViewColumnMappingExtractor extractor = new MaterializedViewColumnMappingExtractor(analysis, session);
MaterializedViewDefinition viewDefinition = new MaterializedViewDefinition(
sql,
viewName.getSchemaName(),
viewName.getObjectName(),
baseTables,
Optional.of(session.getUser()),
extractor.getMaterializedViewColumnMappings(),
extractor.getMaterializedViewDirectColumnMappings(),
extractor.getBaseTablesOnOuterJoinSide(),
Optional.empty());
try {
metadata.createMaterializedView(session, viewName.getCatalogName(), viewMetadata, viewDefinition, statement.isNotExists());
}
catch (PrestoException e) {
// connectors are not required to handle the ignoreExisting flag
if (!e.getErrorCode().equals(ALREADY_EXISTS.toErrorCode()) || !statement.isNotExists()) {
throw e;
}
}
return immediateFuture(null);
} | @Test
public void testCreateMaterializedViewNotExistsTrue()
{
SqlParser parser = new SqlParser();
String sql = String.format("CREATE MATERIALIZED VIEW IF NOT EXISTS %s AS SELECT 2021 AS col_0 FROM %s", MATERIALIZED_VIEW_A, TABLE_A);
CreateMaterializedView statement = (CreateMaterializedView) parser.createStatement(sql, ParsingOptions.builder().build());
QueryStateMachine stateMachine = QueryStateMachine.begin(
sql,
Optional.empty(),
testSession,
URI.create("fake://uri"),
new ResourceGroupId("test"),
Optional.empty(),
false,
transactionManager,
accessControl,
executorService,
metadata,
WarningCollector.NOOP);
WarningCollector warningCollector = stateMachine.getWarningCollector();
CreateMaterializedViewTask createMaterializedViewTask = new CreateMaterializedViewTask(parser);
getFutureValue(createMaterializedViewTask.execute(statement, transactionManager, metadata, accessControl, testSession, emptyList(), warningCollector));
assertEquals(metadata.getCreateMaterializedViewCallCount(), 1);
} |
static String toJavaName(String opensslName) {
if (opensslName == null) {
return null;
}
Matcher matcher = PATTERN.matcher(opensslName);
if (matcher.matches()) {
String group1 = matcher.group(1);
if (group1 != null) {
return group1.toUpperCase(Locale.ROOT) + "with" + matcher.group(2).toUpperCase(Locale.ROOT);
}
if (matcher.group(3) != null) {
return matcher.group(4).toUpperCase(Locale.ROOT) + "with" + matcher.group(3).toUpperCase(Locale.ROOT);
}
if (matcher.group(5) != null) {
return matcher.group(6).toUpperCase(Locale.ROOT) + "with" + matcher.group(5).toUpperCase(Locale.ROOT);
}
}
return null;
} | @Test
public void testInvalid() {
assertNull(SignatureAlgorithmConverter.toJavaName("ThisIsSomethingInvalid"));
} |
@Override
public ConsumeMessageDirectlyResult consumeMessageDirectly(MessageExt msg, String brokerName) {
ConsumeMessageDirectlyResult result = new ConsumeMessageDirectlyResult();
result.setOrder(true);
List<MessageExt> msgs = new ArrayList<>();
msgs.add(msg);
MessageQueue mq = new MessageQueue();
mq.setBrokerName(brokerName);
mq.setTopic(msg.getTopic());
mq.setQueueId(msg.getQueueId());
ConsumeOrderlyContext context = new ConsumeOrderlyContext(mq);
this.defaultMQPushConsumerImpl.resetRetryAndNamespace(msgs, this.consumerGroup);
final long beginTime = System.currentTimeMillis();
log.info("consumeMessageDirectly receive new message: {}", msg);
try {
ConsumeOrderlyStatus status = this.messageListener.consumeMessage(msgs, context);
if (status != null) {
switch (status) {
case COMMIT:
result.setConsumeResult(CMResult.CR_COMMIT);
break;
case ROLLBACK:
result.setConsumeResult(CMResult.CR_ROLLBACK);
break;
case SUCCESS:
result.setConsumeResult(CMResult.CR_SUCCESS);
break;
case SUSPEND_CURRENT_QUEUE_A_MOMENT:
result.setConsumeResult(CMResult.CR_LATER);
break;
default:
break;
}
} else {
result.setConsumeResult(CMResult.CR_RETURN_NULL);
}
} catch (Throwable e) {
result.setConsumeResult(CMResult.CR_THROW_EXCEPTION);
result.setRemark(UtilAll.exceptionSimpleDesc(e));
log.warn("consumeMessageDirectly exception: {} Group: {} Msgs: {} MQ: {}",
UtilAll.exceptionSimpleDesc(e),
ConsumeMessageOrderlyService.this.consumerGroup,
msgs,
mq, e);
}
result.setAutoCommit(context.isAutoCommit());
result.setSpentTimeMills(System.currentTimeMillis() - beginTime);
log.info("consumeMessageDirectly Result: {}", result);
return result;
} | @Test
public void testConsumeMessageDirectly_WithException() {
MessageListenerOrderly listenerOrderly = new MessageListenerOrderly() {
@Override
public ConsumeOrderlyStatus consumeMessage(List<MessageExt> msgs, ConsumeOrderlyContext context) {
throw new RuntimeException();
}
};
ConsumeMessageOrderlyService consumeMessageOrderlyService = new ConsumeMessageOrderlyService(pushConsumer.getDefaultMQPushConsumerImpl(), listenerOrderly);
MessageExt msg = new MessageExt();
msg.setTopic(topic);
assertTrue(consumeMessageOrderlyService.consumeMessageDirectly(msg, brokerName).getConsumeResult().equals(CMResult.CR_THROW_EXCEPTION));
} |
public String getLogicColumnByCipherColumn(final String cipherColumnName) {
for (Entry<String, EncryptColumn> entry : columns.entrySet()) {
if (entry.getValue().getCipher().getName().equalsIgnoreCase(cipherColumnName)) {
return entry.getValue().getName();
}
}
throw new EncryptLogicColumnNotFoundException(cipherColumnName);
} | @Test
void assertGetLogicColumnByCipherColumn() {
assertThat(encryptTable.getLogicColumnByCipherColumn("cipherColumn"), is("logicColumn"));
} |
public static String toHex(long value) {
return Strings.padStart(UnsignedLongs.toString(value, 16), 16, '0');
} | @Test
public void toHex() throws Exception {
assertEquals("0f", Tools.toHex(15, 2));
assertEquals("ffff", Tools.toHex(65535, 4));
assertEquals("1000", Tools.toHex(4096, 4));
assertEquals("000000000000000f", Tools.toHex(15));
assertEquals("ffffffffffffffff", Tools.toHex(0xffffffffffffffffL));
assertEquals("0xffffffffffffffff", Tools.toHexWithPrefix(0xffffffffffffffffL));
} |
public boolean getBooleanProperty(String key, boolean defaultValue) {
return getBooleanProperty(key, defaultValue, false);
} | @Test
public void testBooleanProperty() {
TypedProperties p = createProperties();
assertEquals(true, p.getBooleanProperty("boolean", false));
assertEquals(true, p.getBooleanProperty("boolean_put_str", false));
assertEquals(true, p.getBooleanProperty("boolean_invalid", true));
assertEquals(true, p.getBooleanProperty("boolean_null", true));
} |
static Result coerceUserList(
final Collection<Expression> expressions,
final ExpressionTypeManager typeManager
) {
return coerceUserList(expressions, typeManager, Collections.emptyMap());
} | @Test
public void shouldHandleEmpty() {
// Given:
final ImmutableList<Expression> expressions = ImmutableList.of();
// When:
final Result result = CoercionUtil.coerceUserList(expressions, typeManager);
// Then:
assertThat(result.commonType(), is(Optional.empty()));
assertThat(result.expressions(), is(ImmutableList.of()));
} |
OrcBulkWriter(Vectorizer<T> vectorizer, Writer writer) {
this.vectorizer = checkNotNull(vectorizer);
this.writer = checkNotNull(writer);
this.rowBatch = vectorizer.getSchema().createRowBatch();
// Configure the vectorizer with the writer so that users can add
// metadata on the fly through the Vectorizer#vectorize(...) method.
this.vectorizer.setWriter(this.writer);
} | @Test
void testOrcBulkWriter(@TempDir File outDir) throws Exception {
final Properties writerProps = new Properties();
writerProps.setProperty("orc.compress", "LZ4");
final OrcBulkWriterFactory<Record> writer =
new OrcBulkWriterFactory<>(
new RecordVectorizer(schema), writerProps, new Configuration());
StreamingFileSink<Record> sink =
StreamingFileSink.forBulkFormat(new Path(outDir.toURI()), writer)
.withBucketAssigner(new UniqueBucketAssigner<>("test"))
.withBucketCheckInterval(10000)
.build();
try (OneInputStreamOperatorTestHarness<Record, Object> testHarness =
new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink), 1, 1, 0)) {
testHarness.setup();
testHarness.open();
int time = 0;
for (final Record record : input) {
testHarness.processElement(record, ++time);
}
testHarness.snapshot(1, ++time);
testHarness.notifyOfCompletedCheckpoint(1);
OrcBulkWriterTestUtil.validate(outDir, input);
}
} |
@Override
public AlterConfigsResult incrementalAlterConfigs(Map<ConfigResource, Collection<AlterConfigOp>> configs,
final AlterConfigsOptions options) {
final Map<ConfigResource, KafkaFutureImpl<Void>> allFutures = new HashMap<>();
// BROKER_LOGGER requests always go to a specific, constant broker or controller node.
//
// BROKER resource changes for a specific (non-default) resource go to either that specific
// node (if using bootstrap.servers), or directly to the active controller (if using
// bootstrap.controllers)
//
// All other requests go to the least loaded broker (if using bootstrap.servers) or the
// active controller (if using bootstrap.controllers)
final Collection<ConfigResource> unifiedRequestResources = new ArrayList<>();
for (ConfigResource resource : configs.keySet()) {
Integer node = nodeFor(resource);
if (metadataManager.usingBootstrapControllers()) {
if (!resource.type().equals(ConfigResource.Type.BROKER_LOGGER)) {
node = null;
}
}
if (node != null) {
NodeProvider nodeProvider = new ConstantNodeIdProvider(node, true);
allFutures.putAll(incrementalAlterConfigs(configs, options, Collections.singleton(resource), nodeProvider));
} else
unifiedRequestResources.add(resource);
}
if (!unifiedRequestResources.isEmpty())
allFutures.putAll(incrementalAlterConfigs(configs, options, unifiedRequestResources, new LeastLoadedBrokerOrActiveKController()));
return new AlterConfigsResult(new HashMap<>(allFutures));
} | @Test
public void testIncrementalAlterConfigs() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
//test error scenarios
IncrementalAlterConfigsResponseData responseData = new IncrementalAlterConfigsResponseData();
responseData.responses().add(new AlterConfigsResourceResponse()
.setResourceName("")
.setResourceType(ConfigResource.Type.BROKER.id())
.setErrorCode(Errors.CLUSTER_AUTHORIZATION_FAILED.code())
.setErrorMessage("authorization error"));
responseData.responses().add(new AlterConfigsResourceResponse()
.setResourceName("metric1")
.setResourceType(ConfigResource.Type.CLIENT_METRICS.id())
.setErrorCode(Errors.INVALID_REQUEST.code())
.setErrorMessage("Subscription is not allowed"));
responseData.responses().add(new AlterConfigsResourceResponse()
.setResourceName("topic1")
.setResourceType(ConfigResource.Type.TOPIC.id())
.setErrorCode(Errors.INVALID_REQUEST.code())
.setErrorMessage("Config value append is not allowed for config"));
responseData.responses().add(new AlterConfigsResourceResponse()
.setResourceName("group1")
.setResourceType(ConfigResource.Type.GROUP.id())
.setErrorCode(Errors.INVALID_CONFIG.code())
.setErrorMessage("Unknown group config name: group.initial.rebalance.delay.ms"));
env.kafkaClient().prepareResponse(new IncrementalAlterConfigsResponse(responseData));
ConfigResource brokerResource = new ConfigResource(ConfigResource.Type.BROKER, "");
ConfigResource topicResource = new ConfigResource(ConfigResource.Type.TOPIC, "topic1");
ConfigResource metricResource = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, "metric1");
ConfigResource groupResource = new ConfigResource(ConfigResource.Type.GROUP, "group1");
AlterConfigOp alterConfigOp1 = new AlterConfigOp(
new ConfigEntry("log.segment.bytes", "1073741"),
AlterConfigOp.OpType.SET);
AlterConfigOp alterConfigOp2 = new AlterConfigOp(
new ConfigEntry("compression.type", "gzip"),
AlterConfigOp.OpType.APPEND);
AlterConfigOp alterConfigOp3 = new AlterConfigOp(
new ConfigEntry("interval.ms", "1000"),
AlterConfigOp.OpType.APPEND);
AlterConfigOp alterConfigOp4 = new AlterConfigOp(
new ConfigEntry("group.initial.rebalance.delay.ms", "1000"),
AlterConfigOp.OpType.SET);
final Map<ConfigResource, Collection<AlterConfigOp>> configs = new HashMap<>();
configs.put(brokerResource, singletonList(alterConfigOp1));
configs.put(topicResource, singletonList(alterConfigOp2));
configs.put(metricResource, singletonList(alterConfigOp3));
configs.put(groupResource, singletonList(alterConfigOp4));
AlterConfigsResult result = env.adminClient().incrementalAlterConfigs(configs);
TestUtils.assertFutureError(result.values().get(brokerResource), ClusterAuthorizationException.class);
TestUtils.assertFutureError(result.values().get(topicResource), InvalidRequestException.class);
TestUtils.assertFutureError(result.values().get(metricResource), InvalidRequestException.class);
TestUtils.assertFutureError(result.values().get(groupResource), InvalidConfigurationException.class);
// Test a call where there are no errors.
responseData = new IncrementalAlterConfigsResponseData();
responseData.responses().add(new AlterConfigsResourceResponse()
.setResourceName("")
.setResourceType(ConfigResource.Type.BROKER.id())
.setErrorCode(Errors.NONE.code())
.setErrorMessage(ApiError.NONE.message()));
responseData.responses().add(new AlterConfigsResourceResponse()
.setResourceName("metric1")
.setResourceType(ConfigResource.Type.CLIENT_METRICS.id())
.setErrorCode(Errors.NONE.code())
.setErrorMessage(ApiError.NONE.message()));
responseData.responses().add(new AlterConfigsResourceResponse()
.setResourceName("group1")
.setResourceType(ConfigResource.Type.GROUP.id())
.setErrorCode(Errors.NONE.code())
.setErrorMessage(ApiError.NONE.message()));
final Map<ConfigResource, Collection<AlterConfigOp>> successConfig = new HashMap<>();
successConfig.put(brokerResource, singletonList(alterConfigOp1));
successConfig.put(metricResource, singletonList(alterConfigOp3));
successConfig.put(groupResource, singletonList(alterConfigOp4));
env.kafkaClient().prepareResponse(new IncrementalAlterConfigsResponse(responseData));
env.adminClient().incrementalAlterConfigs(successConfig).all().get();
}
} |
@Override
public synchronized List<PersistFile> pollFilesToPersist(long workerId)
throws InvalidPathException, AccessControlException {
List<PersistFile> filesToPersist = new ArrayList<>();
List<Long> fileIdsToPersist = new ArrayList<>();
if (!mWorkerToAsyncPersistFiles.containsKey(workerId)) {
return filesToPersist;
}
Set<Long> scheduledFiles = mWorkerToAsyncPersistFiles.get(workerId);
try {
for (long fileId : scheduledFiles) {
try {
FileInfo fileInfo = mFileSystemMasterView.getFileInfo(fileId);
if (fileInfo.isCompleted()) {
fileIdsToPersist.add(fileId);
List<Long> blockIds = new ArrayList<>();
for (FileBlockInfo fileBlockInfo : mFileSystemMasterView
.getFileBlockInfoList(mFileSystemMasterView.getPath(fileId))) {
blockIds.add(fileBlockInfo.getBlockInfo().getBlockId());
}
filesToPersist.add(new PersistFile(fileId, blockIds));
}
} catch (FileDoesNotExistException e) {
LOG.warn("FileId {} does not exist, ignore persistence it", fileId);
}
}
} catch (UnavailableException e) {
return filesToPersist;
}
mWorkerToAsyncPersistFiles.get(workerId).removeAll(fileIdsToPersist);
return filesToPersist;
} | @Test
public void persistenceFileWithBlocksOnMultipleWorkers() throws Exception {
DefaultAsyncPersistHandler handler =
new DefaultAsyncPersistHandler(new FileSystemMasterView(mFileSystemMaster));
AlluxioURI path = new AlluxioURI("/test");
List<FileBlockInfo> blockInfoList = new ArrayList<>();
BlockLocation location1 = new BlockLocation().setWorkerId(1);
blockInfoList.add(new FileBlockInfo()
.setBlockInfo(new BlockInfo().setLocations(Lists.newArrayList(location1))));
BlockLocation location2 = new BlockLocation().setWorkerId(2);
blockInfoList.add(new FileBlockInfo()
.setBlockInfo(new BlockInfo().setLocations(Lists.newArrayList(location2))));
long fileId = 2;
when(mFileSystemMaster.getFileId(path)).thenReturn(fileId);
when(mFileSystemMaster.getFileInfo(fileId))
.thenReturn(new FileInfo().setLength(1).setCompleted(true));
when(mFileSystemMaster.getFileBlockInfoList(path)).thenReturn(blockInfoList);
// no persist scheduled on any worker
assertEquals(0, handler.pollFilesToPersist(1).size());
assertEquals(0, handler.pollFilesToPersist(2).size());
} |
synchronized void add(int splitCount) {
int pos = count % history.length;
history[pos] = splitCount;
count += 1;
} | @Test
public void testThreeMoreThanFullHistory() {
EnumerationHistory history = new EnumerationHistory(3);
history.add(1);
history.add(2);
history.add(3);
history.add(4);
history.add(5);
history.add(6);
int[] expectedHistorySnapshot = {4, 5, 6};
testHistory(history, expectedHistorySnapshot);
} |
public void registerStrategy(BatchingStrategy<?, ?, ?> strategy) {
_strategies.add(strategy);
} | @Test
public void testBatchAndFailedSingleton() {
RecordingStrategy<Integer, Integer, String> strategy =
new RecordingStrategy<>((key, promise) -> {
if (key % 2 == 0) {
promise.done(String.valueOf(key));
} else {
promise.fail(new Exception());
}
}, key -> key % 2);
_batchingSupport.registerStrategy(strategy);
Task<String> task = Task.par(strategy.batchable(0), strategy.batchable(1).recover(e -> "failed"), strategy.batchable(2))
.map("concat", (s0, s1, s2) -> s0 + s1 + s2);
String result = runAndWait("TestBatchingSupport.testBatchAndFailedSingleton", task);
assertEquals(result, "0failed2");
assertTrue(strategy.getClassifiedKeys().contains(0));
assertTrue(strategy.getClassifiedKeys().contains(1));
assertTrue(strategy.getClassifiedKeys().contains(2));
assertEquals(strategy.getExecutedBatches().size(), 1);
assertEquals(strategy.getExecutedSingletons().size(), 1);
} |
@Override
public Set<Interface> getInterfacesByIp(IpAddress ip) {
return interfaces.values()
.stream()
.flatMap(Collection::stream)
.filter(intf -> intf.ipAddressesList()
.stream()
.anyMatch(ia -> ia.ipAddress().equals(ip)))
.collect(collectingAndThen(toSet(), ImmutableSet::copyOf));
} | @Test
public void testGetInterfacesByIp() throws Exception {
IpAddress ip = Ip4Address.valueOf("192.168.2.1");
Set<Interface> byIp = Collections.singleton(createInterface(2));
assertEquals(byIp, interfaceManager.getInterfacesByIp(ip));
} |
public static Throwable stripException(
Throwable throwableToStrip, Class<? extends Throwable> typeToStrip) {
while (typeToStrip.isAssignableFrom(throwableToStrip.getClass())
&& throwableToStrip.getCause() != null) {
throwableToStrip = throwableToStrip.getCause();
}
return throwableToStrip;
} | @Test
void testInvalidExceptionStripping() {
final FlinkException expectedException =
new FlinkException(new RuntimeException(new FlinkException("inner exception")));
final Throwable strippedException =
ExceptionUtils.stripException(expectedException, RuntimeException.class);
assertThat(strippedException).isEqualTo(expectedException);
} |
@Override
public void closeRewardActivity(Long id) {
// 校验存在
RewardActivityDO dbRewardActivity = validateRewardActivityExists(id);
if (dbRewardActivity.getStatus().equals(PromotionActivityStatusEnum.CLOSE.getStatus())) { // 已关闭的活动,不能关闭噢
throw exception(REWARD_ACTIVITY_CLOSE_FAIL_STATUS_CLOSED);
}
if (dbRewardActivity.getStatus().equals(PromotionActivityStatusEnum.END.getStatus())) { // 已关闭的活动,不能关闭噢
throw exception(REWARD_ACTIVITY_CLOSE_FAIL_STATUS_END);
}
// 更新
RewardActivityDO updateObj = new RewardActivityDO().setId(id).setStatus(PromotionActivityStatusEnum.CLOSE.getStatus());
rewardActivityMapper.updateById(updateObj);
} | @Test
public void testCloseRewardActivity() {
// mock 数据
RewardActivityDO dbRewardActivity = randomPojo(RewardActivityDO.class, o -> o.setStatus(PromotionActivityStatusEnum.WAIT.getStatus()));
rewardActivityMapper.insert(dbRewardActivity);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbRewardActivity.getId();
// 调用
rewardActivityService.closeRewardActivity(id);
// 校验状态
RewardActivityDO rewardActivity = rewardActivityMapper.selectById(id);
assertEquals(rewardActivity.getStatus(), PromotionActivityStatusEnum.CLOSE.getStatus());
} |
@Override
public void aggregate(Iterable<Integer> hashValues) {
for (int hash: hashValues) {
aggregate(hash);
}
} | @Test
public void requireThatSerializationRetainAllData() {
SparseSketch from = new SparseSketch();
from.aggregate(42);
from.aggregate(1337);
SparseSketch to = new SparseSketch();
BufferSerializer buffer = new BufferSerializer();
from.serialize(buffer);
buffer.flip();
to.deserialize(buffer);
assertEquals(from, to);
} |
public static String keyToString(Object key,
URLEscaper.Escaping escaping,
UriComponent.Type componentType,
boolean full,
ProtocolVersion version)
{
if (version.compareTo(AllProtocolVersions.RESTLI_PROTOCOL_2_0_0.getProtocolVersion()) >= 0)
{
return keyToStringV2(key, escaping, componentType, full);
}
else
{
return keyToStringV1(key, escaping, full);
}
} | @Test(dataProvider = TestConstants.RESTLI_PROTOCOL_1_2_PREFIX + "longKey")
public void testLongKeyToString(ProtocolVersion version, String expected)
{
Long longKey = 6L;
String longKeyString = URIParamUtils.keyToString(longKey, NO_ESCAPING, null, true, version);
Assert.assertEquals(longKeyString, expected);
} |
@Override
public CloseableHttpClient connect(final ProxyFinder proxy, final HostKeyCallback key, final LoginCallback prompt, final CancelCallback cancel) throws ConnectionCanceledException {
final HttpClientBuilder configuration = builder.build(proxy, this, prompt);
authorizationService = new OAuth2RequestInterceptor(configuration.build(), host, prompt)
.withRedirectUri(host.getProtocol().getOAuthRedirectUrl());
configuration.addInterceptorLast(authorizationService);
configuration.setServiceUnavailableRetryStrategy(new CustomServiceUnavailableRetryStrategy(host,
new ExecutionCountServiceUnavailableRetryStrategy(new OAuth2ErrorResponseInterceptor(host, authorizationService))));
return configuration.build();
} | @Test
public void testConnect() throws Exception {
assertNotEquals(StringUtils.EMPTY, session.getHost().getCredentials().getUsername());
} |
public void hasLength(int expectedLength) {
checkArgument(expectedLength >= 0, "expectedLength(%s) must be >= 0", expectedLength);
check("length()").that(checkNotNull(actual).length()).isEqualTo(expectedLength);
} | @Test
public void hasLengthNegative() {
try {
assertThat("kurt").hasLength(-1);
fail();
} catch (IllegalArgumentException expected) {
}
} |
@Override
public void writeBundleDataOnZooKeeper() {
updateBundleData();
// Write the bundle data to metadata store.
List<CompletableFuture<Void>> futures = new ArrayList<>();
// use synchronized to protect bundleArr.
synchronized (bundleArr) {
int updateBundleCount = selectTopKBundle();
bundleArr.stream().limit(updateBundleCount).forEach(entry -> futures.add(
pulsarResources.getLoadBalanceResources().getBundleDataResources().updateBundleData(
entry.getKey(), (BundleData) entry.getValue())));
}
// Write the time average broker data to metadata store.
for (Map.Entry<String, BrokerData> entry : loadData.getBrokerData().entrySet()) {
final String broker = entry.getKey();
final TimeAverageBrokerData data = entry.getValue().getTimeAverageData();
futures.add(pulsarResources.getLoadBalanceResources()
.getBrokerTimeAverageDataResources().updateTimeAverageBrokerData(broker, data));
}
try {
FutureUtil.waitForAll(futures).join();
} catch (Exception e) {
log.warn("Error when writing metadata data to store", e);
}
} | @Test
public void testFilterBundlesWhileWritingToMetadataStore() throws Exception {
Map<String, PulsarService> pulsarServices = new HashMap<>();
pulsarServices.put(pulsar1.getWebServiceAddress(), pulsar1);
pulsarServices.put(pulsar2.getWebServiceAddress(), pulsar2);
MetadataCache<BundleData> metadataCache = pulsar1.getLocalMetadataStore().getMetadataCache(BundleData.class);
String protocol = "http://";
PulsarService leaderBroker = pulsarServices.get(protocol + pulsar1.getLeaderElectionService().getCurrentLeader().get().getBrokerId());
ModularLoadManagerImpl loadManager = (ModularLoadManagerImpl) getField(
leaderBroker.getLoadManager().get(), "loadManager");
int topK = 1;
leaderBroker.getConfiguration().setLoadBalancerMaxNumberOfBundlesInBundleLoadReport(topK);
// there are two broker in cluster, so total bundle count will be topK * 2
int exportBundleCount = topK * 2;
// create and configure bundle-data
final int totalBundles = 5;
final NamespaceBundle[] bundles = LoadBalancerTestingUtils.makeBundles(
nsFactory, "test", "test", "test", totalBundles);
LoadData loadData = (LoadData) getField(loadManager, "loadData");
for (int i = 0; i < totalBundles; i++) {
final BundleData bundleData = new BundleData(10, 1000);
final String bundleDataPath = String.format("%s/%s", BUNDLE_DATA_BASE_PATH, bundles[i]);
final TimeAverageMessageData longTermMessageData = new TimeAverageMessageData(1000);
longTermMessageData.setMsgThroughputIn(1000 * i);
longTermMessageData.setMsgThroughputOut(1000 * i);
longTermMessageData.setMsgRateIn(1000 * i);
longTermMessageData.setNumSamples(1000);
bundleData.setLongTermData(longTermMessageData);
loadData.getBundleData().put(bundles[i].toString(), bundleData);
loadData.getBrokerData().get(leaderBroker.getWebServiceAddress().substring(protocol.length()))
.getLocalData().getLastStats().put(bundles[i].toString(), new NamespaceBundleStats());
metadataCache.create(bundleDataPath, bundleData).join();
}
for (int i = 0; i < totalBundles; i++) {
final String bundleDataPath = String.format("%s/%s", BUNDLE_DATA_BASE_PATH, bundles[i]);
assertEquals(metadataCache.getWithStats(bundleDataPath).get().get().getStat().getVersion(), 0);
}
// update bundle data to zk and verify
loadManager.writeBundleDataOnZooKeeper();
int filterBundleCount = totalBundles - exportBundleCount;
for (int i = 0; i < filterBundleCount; i++) {
final String bundleDataPath = String.format("%s/%s", BUNDLE_DATA_BASE_PATH, bundles[i]);
assertEquals(metadataCache.getWithStats(bundleDataPath).get().get().getStat().getVersion(), 0);
}
for (int i = filterBundleCount; i < totalBundles; i++) {
final String bundleDataPath = String.format("%s/%s", BUNDLE_DATA_BASE_PATH, bundles[i]);
assertEquals(metadataCache.getWithStats(bundleDataPath).get().get().getStat().getVersion(), 1);
}
} |
public PaginationContext createPaginationContext(final TopProjectionSegment topProjectionSegment, final Collection<ExpressionSegment> expressions, final List<Object> params) {
Collection<AndPredicate> andPredicates = expressions.stream().flatMap(each -> ExpressionExtractUtils.getAndPredicates(each).stream()).collect(Collectors.toList());
Optional<ExpressionSegment> rowNumberPredicate = expressions.isEmpty() ? Optional.empty() : getRowNumberPredicate(andPredicates, topProjectionSegment.getAlias());
Optional<PaginationValueSegment> offset = rowNumberPredicate.isPresent() ? createOffsetWithRowNumber(rowNumberPredicate.get()) : Optional.empty();
PaginationValueSegment rowCount = topProjectionSegment.getTop();
return new PaginationContext(offset.orElse(null), rowCount, params);
} | @Test
void assertCreatePaginationContextWhenParameterMarkerRowNumberValueSegment() {
String name = "rowNumberAlias";
ColumnSegment left = new ColumnSegment(0, 10, new IdentifierValue(name));
ParameterMarkerExpressionSegment right = new ParameterMarkerExpressionSegment(0, 10, 0);
BinaryOperationExpression expression = new BinaryOperationExpression(0, 0, left, right, ">", null);
PaginationContext paginationContext = topPaginationContextEngine.createPaginationContext(
new TopProjectionSegment(0, 10, null, name), Collections.singletonList(expression), Collections.singletonList(1));
assertTrue(paginationContext.getOffsetSegment().isPresent());
PaginationValueSegment paginationValueSegment = paginationContext.getOffsetSegment().get();
assertThat(paginationValueSegment, instanceOf(ParameterMarkerRowNumberValueSegment.class));
ParameterMarkerRowNumberValueSegment parameterMarkerRowNumberValueSegment = (ParameterMarkerRowNumberValueSegment) paginationValueSegment;
assertThat(parameterMarkerRowNumberValueSegment.getStartIndex(), is(0));
assertThat(parameterMarkerRowNumberValueSegment.getStopIndex(), is(10));
assertThat(parameterMarkerRowNumberValueSegment.getParameterIndex(), is(0));
assertFalse(paginationContext.getRowCountSegment().isPresent());
} |
void runOnce() {
if (transactionManager != null) {
try {
transactionManager.maybeResolveSequences();
RuntimeException lastError = transactionManager.lastError();
// do not continue sending if the transaction manager is in a failed state
if (transactionManager.hasFatalError()) {
if (lastError != null)
maybeAbortBatches(lastError);
client.poll(retryBackoffMs, time.milliseconds());
return;
}
if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) {
return;
}
// Check whether we need a new producerId. If so, we will enqueue an InitProducerId
// request which will be sent below
transactionManager.bumpIdempotentEpochAndResetIdIfNeeded();
if (maybeSendAndPollTransactionalRequest()) {
return;
}
} catch (AuthenticationException e) {
// This is already logged as error, but propagated here to perform any clean ups.
log.trace("Authentication exception while processing transactional request", e);
transactionManager.authenticationFailed(e);
}
}
long currentTimeMs = time.milliseconds();
long pollTimeout = sendProducerData(currentTimeMs);
client.poll(pollTimeout, currentTimeMs);
} | @Test
public void testCorrectHandlingOfDuplicateSequenceError() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = createTransactionManager();
setupWithTransactionState(transactionManager);
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
assertEquals(0, transactionManager.sequenceNumber(tp0));
// Send first ProduceRequest
Future<RecordMetadata> request1 = appendToAccumulator(tp0);
sender.runOnce();
String nodeId = client.requests().peek().destination();
Node node = new Node(Integer.parseInt(nodeId), "localhost", 0);
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, transactionManager.sequenceNumber(tp0));
assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
// Send second ProduceRequest
Future<RecordMetadata> request2 = appendToAccumulator(tp0);
sender.runOnce();
assertEquals(2, client.inFlightRequestCount());
assertEquals(2, transactionManager.sequenceNumber(tp0));
assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
assertFalse(request1.isDone());
assertFalse(request2.isDone());
assertTrue(client.isReady(node, time.milliseconds()));
ClientRequest firstClientRequest = client.requests().peek();
ClientRequest secondClientRequest = (ClientRequest) client.requests().toArray()[1];
client.respondToRequest(secondClientRequest, produceResponse(tp0, 1000, Errors.NONE, 0));
sender.runOnce(); // receive response 1
assertEquals(OptionalLong.of(1000), transactionManager.lastAckedOffset(tp0));
assertEquals(OptionalInt.of(1), transactionManager.lastAckedSequence(tp0));
client.respondToRequest(firstClientRequest, produceResponse(tp0, ProduceResponse.INVALID_OFFSET, Errors.DUPLICATE_SEQUENCE_NUMBER, 0));
sender.runOnce(); // receive response 0
// Make sure that the last ack'd sequence doesn't change.
assertEquals(OptionalInt.of(1), transactionManager.lastAckedSequence(tp0));
assertEquals(OptionalLong.of(1000), transactionManager.lastAckedOffset(tp0));
assertFalse(client.hasInFlightRequests());
RecordMetadata unknownMetadata = request1.get();
assertFalse(unknownMetadata.hasOffset());
assertEquals(-1L, unknownMetadata.offset());
} |
public void setApplicationContext(final ApplicationContext applicationContext) {
this.applicationContext = applicationContext;
} | @Test
public void testSetCfgContext() throws NoSuchFieldException {
final ConfigurableApplicationContext cfgContext = mock(ConfigurableApplicationContext.class);
springBeanUtilsUnderTest.setApplicationContext(cfgContext);
assertNotNull(springBeanUtilsUnderTest.getClass().getDeclaredField("applicationContext"));
} |
public void addValueProviders(final String segmentName,
final RocksDB db,
final Cache cache,
final Statistics statistics) {
if (storeToValueProviders.isEmpty()) {
logger.debug("Adding metrics recorder of task {} to metrics recording trigger", taskId);
streamsMetrics.rocksDBMetricsRecordingTrigger().addMetricsRecorder(this);
} else if (storeToValueProviders.containsKey(segmentName)) {
throw new IllegalStateException("Value providers for store " + segmentName + " of task " + taskId +
" has been already added. This is a bug in Kafka Streams. " +
"Please open a bug report under https://issues.apache.org/jira/projects/KAFKA/issues");
}
verifyDbAndCacheAndStatistics(segmentName, db, cache, statistics);
logger.debug("Adding value providers for store {} of task {}", segmentName, taskId);
storeToValueProviders.put(segmentName, new DbAndCacheAndStatistics(db, cache, statistics));
} | @Test
public void shouldThrowIfValueProvidersForASegmentHasBeenAlreadyAdded() {
recorder.addValueProviders(SEGMENT_STORE_NAME_1, dbToAdd1, cacheToAdd1, statisticsToAdd1);
final Throwable exception = assertThrows(
IllegalStateException.class,
() -> recorder.addValueProviders(SEGMENT_STORE_NAME_1, dbToAdd1, cacheToAdd1, statisticsToAdd2)
);
assertThat(
exception.getMessage(),
is("Value providers for store " + SEGMENT_STORE_NAME_1 + " of task " + TASK_ID1 +
" has been already added. This is a bug in Kafka Streams. " +
"Please open a bug report under https://issues.apache.org/jira/projects/KAFKA/issues")
);
} |
public static RateLimiterRegistry of(Configuration configuration, CompositeCustomizer<RateLimiterConfigCustomizer> customizer){
CommonRateLimiterConfigurationProperties rateLimiterProperties = CommonsConfigurationRateLimiterConfiguration.of(configuration);
Map<String, RateLimiterConfig> rateLimiterConfigMap = rateLimiterProperties.getInstances()
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> rateLimiterProperties.createRateLimiterConfig(entry.getValue(), customizer, entry.getKey())));
return RateLimiterRegistry.of(rateLimiterConfigMap);
} | @Test
public void testRateLimiterRegistryFromYamlFile() throws ConfigurationException {
Configuration config = CommonsConfigurationUtil.getConfiguration(YAMLConfiguration.class, TestConstants.RESILIENCE_CONFIG_YAML_FILE_NAME);
RateLimiterRegistry registry = CommonsConfigurationRateLimiterRegistry.of(config, new CompositeCustomizer<>(List.of()));
Assertions.assertThat(registry.rateLimiter(TestConstants.BACKEND_A).getName()).isEqualTo(TestConstants.BACKEND_A);
Assertions.assertThat(registry.rateLimiter(TestConstants.BACKEND_B).getName()).isEqualTo(TestConstants.BACKEND_B);
} |
public static boolean seemDuplicates(FeedItem item1, FeedItem item2) {
if (sameAndNotEmpty(item1.getItemIdentifier(), item2.getItemIdentifier())) {
return true;
}
FeedMedia media1 = item1.getMedia();
FeedMedia media2 = item2.getMedia();
if (media1 == null || media2 == null) {
return false;
}
if (sameAndNotEmpty(media1.getStreamUrl(), media2.getStreamUrl())) {
return true;
}
return titlesLookSimilar(item1, item2)
&& datesLookSimilar(item1, item2)
&& durationsLookSimilar(media1, media2)
&& mimeTypeLooksSimilar(media1, media2);
} | @Test
public void testNoMediaType() {
assertTrue(FeedItemDuplicateGuesser.seemDuplicates(
item("id1", "Title", "example.com/episode1", 2 * DAYS, 5 * MINUTES, ""),
item("id2", "Title", "example.com/episode2", 2 * DAYS, 5 * MINUTES, "")));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.