focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public void checkTopicAccess(
final KsqlSecurityContext securityContext,
final String topicName,
final AclOperation operation
) {
checkAccess(new CacheKey(securityContext,
AuthObjectType.TOPIC,
topicName,
operation));
} | @Test
public void shouldThrowExceptionWhenBackendValidatorThrowsAnException() {
// Given
doThrow(RuntimeException.class).when(backendValidator)
.checkTopicAccess(securityContext, TOPIC_1, AclOperation.READ);
// When:
assertThrows(
RuntimeException.class,
() -> cache.checkTopicAccess(securityContext, TOPIC_1, AclOperation.READ)
);
} |
Spec getSpec() {
return spec;
} | @Test
void basic() {
ProxyServer proxy = createTestServer(new MockConfigSource());
Spec spec = new Spec("localhost", 12345);
ConfigProxyRpcServer server = new ConfigProxyRpcServer(proxy, new Supervisor(new Transport()), spec);
assertEquals(spec, server.getSpec());
} |
public boolean ping() {
String checkPath = Joiner.on(PATH_DELIMITER).join(location,
joinPrefix(prefixRepo, name));
Status st = storage.checkPathExist(checkPath);
if (!st.ok()) {
errMsg = TimeUtils.longToTimeString(System.currentTimeMillis()) + ": " + st.getErrMsg();
return false;
}
// clear err msg
errMsg = null;
return true;
} | @Test
public void testPing() {
new Expectations() {
{
storage.checkPathExist(anyString);
minTimes = 0;
result = Status.OK;
}
};
repo = new Repository(10000, "repo", false, location, storage);
Assert.assertTrue(repo.ping());
Assert.assertTrue(repo.getErrorMsg() == null);
} |
@Override
public MapSettings setProperty(String key, String value) {
return (MapSettings) super.setProperty(key, value);
} | @Test
public void test_get_float() {
Settings settings = new MapSettings();
settings.setProperty("from_float", 3.14159f);
settings.setProperty("from_string", "3.14159");
assertThat(settings.getDouble("from_float")).isEqualTo(3.14159f, Offset.offset(0.00001));
assertThat(settings.getDouble("from_string")).isEqualTo(3.14159f, Offset.offset(0.00001));
assertThat(settings.getDouble("unknown")).isNull();
} |
public static ValueLabel formatBytes(long bytes) {
return new ValueLabel(bytes, BYTES_UNIT);
} | @Test
public void formatKiloBytes() {
vl = TopoUtils.formatBytes(2_000L);
assertEquals(AM_WM, TopoUtils.Magnitude.KILO, vl.magnitude());
assertEquals(AM_WL, "1.95 KB", vl.toString());
} |
@Override
public Stream<MappingField> resolveAndValidateFields(
boolean isKey,
List<MappingField> userFields,
Map<String, String> options,
InternalSerializationService serializationService
) {
Map<QueryPath, MappingField> fieldsByPath = extractFields(userFields, isKey);
PortableId portableId = getPortableId(fieldsByPath, options, isKey);
ClassDefinition classDefinition = serializationService.getPortableContext()
.lookupClassDefinition(portableId);
// Fallback option for the case, when the portable objects were not de/serialized yet
// and user fields were not provided by the user explicitly. In this case we try to
// manually create a Portable instance and register its ClassDefinition.
if (userFields.isEmpty() && classDefinition == null) {
SerializationServiceV1 ss = (SerializationServiceV1) serializationService;
// Try to create a Portable instance with the default constructor,
// register its ClassDefinition, and throw object away.
var tempPortableObj = ss.getPortableSerializer()
.createNewPortableInstance(portableId.getFactoryId(), portableId.getClassId());
if (tempPortableObj != null) {
try {
ss.getPortableContext().lookupOrRegisterClassDefinition(tempPortableObj);
} catch (Exception e) {
// If the default constructor doesn't make Portable fields non-null,we're done:
// we can't register the class, so we interrupt the execution with the exception.
throw QueryException.error("Cannot create mapping for Portable type. "
+ "Please, provide the explicit definition for all columns.");
}
classDefinition = serializationService.getPortableContext().lookupClassDefinition(portableId);
}
}
return userFields.isEmpty()
? resolveFields(isKey, classDefinition)
: resolveAndValidateFields(isKey, fieldsByPath, classDefinition);
} | @Test
@Parameters({
"true, __key",
"false, this"
})
public void when_userDeclaresFields_then_fieldsNotAddedFromClassDefinition(boolean key, String prefix) {
InternalSerializationService ss = new DefaultSerializationServiceBuilder().build();
ClassDefinition classDefinition =
new ClassDefinitionBuilder(1, 2, 3)
.addIntField("field1")
.addStringField("field2")
.build();
ss.getPortableContext().registerClassDefinition(classDefinition);
Map<String, String> options = ImmutableMap.of(
(key ? OPTION_KEY_FACTORY_ID : OPTION_VALUE_FACTORY_ID), String.valueOf(classDefinition.getFactoryId()),
(key ? OPTION_KEY_CLASS_ID : OPTION_VALUE_CLASS_ID), String.valueOf(classDefinition.getClassId()),
(key ? OPTION_KEY_CLASS_VERSION : OPTION_VALUE_CLASS_VERSION), String.valueOf(classDefinition.getVersion())
);
Stream<MappingField> resolvedFields = INSTANCE.resolveAndValidateFields(
key,
singletonList(field("field2", QueryDataType.VARCHAR, prefix + ".field2")),
options,
ss
);
assertThat(resolvedFields).containsExactly(
field("field2", QueryDataType.VARCHAR, prefix + ".field2")
);
} |
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) {
return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature);
} | @Test
public void testMismatchingElementType() throws Exception {
DoFnSignature sig =
DoFnSignatures.getSignature(
new DoFn<String, String>() {
@ProcessElement
public void process(@Element Integer element) {}
}.getClass());
assertThat(sig.processElement().extraParameters().size(), equalTo(1));
assertThat(
sig.processElement().extraParameters().get(0), instanceOf(SchemaElementParameter.class));
} |
@Override
public KeyValueIterator<Windowed<K>, V> all() {
return new KeyValueIteratorFacade<>(inner.all());
} | @Test
public void shouldReturnPlainKeyValuePairsOnAll() {
when(mockedKeyValueWindowTimestampIterator.next())
.thenReturn(KeyValue.pair(
new Windowed<>("key1", new TimeWindow(21L, 22L)),
ValueAndTimestamp.make("value1", 22L)))
.thenReturn(KeyValue.pair(
new Windowed<>("key2", new TimeWindow(42L, 43L)),
ValueAndTimestamp.make("value2", 100L)));
when(mockedWindowTimestampStore.all()).thenReturn(mockedKeyValueWindowTimestampIterator);
final KeyValueIterator<Windowed<String>, String> iterator = readOnlyWindowStoreFacade.all();
assertThat(iterator.next(), is(KeyValue.pair(new Windowed<>("key1", new TimeWindow(21L, 22L)), "value1")));
assertThat(iterator.next(), is(KeyValue.pair(new Windowed<>("key2", new TimeWindow(42L, 43L)), "value2")));
} |
void generate(MessageSpec message) throws Exception {
if (message.struct().versions().contains(Short.MAX_VALUE)) {
throw new RuntimeException("Message " + message.name() + " does " +
"not specify a maximum version.");
}
structRegistry.register(message);
schemaGenerator.generateSchemas(message);
messageFlexibleVersions = message.flexibleVersions();
generateClass(Optional.of(message),
message.dataClassName(),
message.struct(),
message.struct().versions());
headerGenerator.generate();
} | @Test
public void testInvalidNullDefaultForNullableStruct() throws Exception {
MessageSpec testMessageSpec = MessageGenerator.JSON_SERDE.readValue(String.join("", Arrays.asList(
"{",
" \"type\": \"request\",",
" \"name\": \"FooBar\",",
" \"validVersions\": \"0\",",
" \"flexibleVersions\": \"none\",",
" \"fields\": [",
" { \"name\": \"struct1\", \"type\": \"MyStruct\", \"versions\": \"0+\", \"nullableVersions\": \"0+\", ",
" \"default\": \"not-null\", \"fields\": [",
" { \"name\": \"field1\", \"type\": \"string\", \"versions\": \"0+\" }",
" ]",
" }",
" ]",
"}")), MessageSpec.class);
assertStringContains("Invalid default for struct field struct1. The only valid default for a struct field " +
"is the empty struct or null",
assertThrows(RuntimeException.class, () -> {
new MessageDataGenerator("org.apache.kafka.common.message").generate(testMessageSpec);
}).getMessage());
} |
@Override
public String toString() {
return "CSV Input ("
+ StringUtils.showControlCharacters(String.valueOf(getFieldDelimiter()))
+ ") "
+ Arrays.toString(getFilePaths());
} | @Test
void testPojoTypeWithMappingInformation() throws Exception {
File tempFile = File.createTempFile("CsvReaderPojoType", "tmp");
tempFile.deleteOnExit();
tempFile.setWritable(true);
OutputStreamWriter wrt = new OutputStreamWriter(new FileOutputStream(tempFile));
wrt.write("123,3.123,AAA,BBB\n");
wrt.write("456,1.123,BBB,AAA\n");
wrt.close();
@SuppressWarnings("unchecked")
PojoTypeInfo<PojoItem> typeInfo =
(PojoTypeInfo<PojoItem>) TypeExtractor.createTypeInfo(PojoItem.class);
CsvInputFormat<PojoItem> inputFormat =
new PojoCsvInputFormat<>(
new Path(tempFile.toURI().toString()),
typeInfo,
new String[] {"field1", "field3", "field2", "field4"});
inputFormat.configure(new Configuration());
FileInputSplit[] splits = inputFormat.createInputSplits(1);
inputFormat.open(splits[0]);
validatePojoItem(inputFormat);
} |
protected Timestamp convertBigNumberToTimestamp( BigDecimal bd ) {
if ( bd == null ) {
return null;
}
return convertIntegerToTimestamp( bd.longValue() );
} | @Test
public void testConvertBigNumberToTimestamp_DefaultMode() throws KettleValueException {
System.setProperty( Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE,
Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE_LEGACY );
ValueMetaTimestamp valueMetaTimestamp = new ValueMetaTimestamp();
Timestamp result =
valueMetaTimestamp.convertBigNumberToTimestamp( BigDecimal.valueOf( TIMESTAMP_AS_NANOSECONDS ) );
assertEquals( TIMESTAMP_WITH_NANOSECONDS, result );
System.setProperty( Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE, "Something invalid!" );
valueMetaTimestamp = new ValueMetaTimestamp();
result = valueMetaTimestamp.convertBigNumberToTimestamp( BigDecimal.valueOf( TIMESTAMP_AS_NANOSECONDS ) );
assertEquals( TIMESTAMP_WITH_NANOSECONDS, result );
} |
@Bean
public CorsFilter corsFilter() {
UrlBasedCorsConfigurationSource source = new UrlBasedCorsConfigurationSource();
CorsConfiguration config = jHipsterProperties.getCors();
if (!CollectionUtils.isEmpty(config.getAllowedOrigins()) || !CollectionUtils.isEmpty(config.getAllowedOriginPatterns())) {
log.debug("Registering CORS filter");
source.registerCorsConfiguration("/api/**", config);
source.registerCorsConfiguration("/management/**", config);
source.registerCorsConfiguration("/v3/api-docs", config);
source.registerCorsConfiguration("/swagger-ui/**", config);
}
return new CorsFilter(source);
} | @Test
void shouldCorsFilterDeactivatedForNullAllowedOrigins() throws Exception {
props.getCors().setAllowedOrigins(null);
MockMvc mockMvc = MockMvcBuilders.standaloneSetup(new WebConfigurerTestController()).addFilters(webConfigurer.corsFilter()).build();
mockMvc
.perform(get("/api/test-cors").header(HttpHeaders.ORIGIN, "other.domain.com"))
.andExpect(status().isOk())
.andExpect(header().doesNotExist(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN));
} |
public static TableMetadata newTableMetadata(
Schema schema,
PartitionSpec spec,
SortOrder sortOrder,
String location,
Map<String, String> properties) {
int formatVersion =
PropertyUtil.propertyAsInt(
properties, TableProperties.FORMAT_VERSION, DEFAULT_TABLE_FORMAT_VERSION);
return newTableMetadata(
schema, spec, sortOrder, location, persistedProperties(properties), formatVersion);
} | @Test
public void testNoReservedPropertyForTableMetadataCreation() {
Schema schema = new Schema(Types.NestedField.required(10, "x", Types.StringType.get()));
assertThatThrownBy(
() ->
TableMetadata.newTableMetadata(
schema,
PartitionSpec.unpartitioned(),
null,
"/tmp",
ImmutableMap.of(TableProperties.FORMAT_VERSION, "1"),
1))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage(
"Table properties should not contain reserved properties, but got {format-version=1}");
assertThatThrownBy(
() ->
TableMetadata.newTableMetadata(
schema,
PartitionSpec.unpartitioned(),
null,
"/tmp",
ImmutableMap.of(TableProperties.UUID, "uuid"),
1))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Table properties should not contain reserved properties, but got {uuid=uuid}");
} |
@Override
protected void doSaveSubscriberData(SubscriberMetadataIdentifier subscriberMetadataIdentifier, String urls) {
zkClient.createOrUpdate(getNodePath(subscriberMetadataIdentifier), urls, false);
} | @Test
void testDoSaveSubscriberData() throws ExecutionException, InterruptedException {
String interfaceName = "org.apache.dubbo.metadata.store.zookeeper.ZookeeperMetadataReport4TstService";
String version = "1.0.0";
String group = null;
String application = "etc-metadata-report-consumer-test";
String revision = "90980";
String protocol = "xxx";
URL url = generateURL(interfaceName, version, group, application);
SubscriberMetadataIdentifier subscriberMetadataIdentifier =
new SubscriberMetadataIdentifier(application, revision);
String r = JsonUtils.toJson(Arrays.asList(url.toString()));
zookeeperMetadataReport.doSaveSubscriberData(subscriberMetadataIdentifier, r);
String fileContent = zookeeperMetadataReport.zkClient.getContent(
zookeeperMetadataReport.getNodePath(subscriberMetadataIdentifier));
Assertions.assertNotNull(fileContent);
Assertions.assertEquals(fileContent, r);
} |
public String getContext() {
return context;
} | @Test
public void testBuilderAppendIfAbsent() {
Configuration conf = new Configuration();
conf.set(HADOOP_CALLER_CONTEXT_SEPARATOR_KEY, "$");
CallerContext.Builder builder = new CallerContext.Builder(null, conf);
builder.append("key1", "value1");
Assert.assertEquals("key1:value1",
builder.build().getContext());
// Append an existed key with different value.
builder.appendIfAbsent("key1", "value2");
String[] items = builder.build().getContext().split("\\$");
Assert.assertEquals(1, items.length);
Assert.assertEquals("key1:value1",
builder.build().getContext());
// Append an absent key.
builder.appendIfAbsent("key2", "value2");
String[] items2 = builder.build().getContext().split("\\$");
Assert.assertEquals(2, items2.length);
Assert.assertEquals("key1:value1$key2:value2",
builder.build().getContext());
// Append a key that is a substring of an existing key.
builder.appendIfAbsent("key", "value");
String[] items3 = builder.build().getContext().split("\\$");
Assert.assertEquals(3, items3.length);
Assert.assertEquals("key1:value1$key2:value2$key:value",
builder.build().getContext());
} |
public void onPeriodicEmit() {
updateCombinedWatermark();
} | @Test
void deferredOutputDoesNotImmediatelyAdvanceWatermark() {
TestingWatermarkOutput underlyingWatermarkOutput = createTestingWatermarkOutput();
WatermarkOutputMultiplexer multiplexer =
new WatermarkOutputMultiplexer(underlyingWatermarkOutput);
WatermarkOutput watermarkOutput1 = createDeferredOutput(multiplexer);
WatermarkOutput watermarkOutput2 = createDeferredOutput(multiplexer);
watermarkOutput1.emitWatermark(new Watermark(0));
watermarkOutput2.emitWatermark(new Watermark(1));
assertThat(underlyingWatermarkOutput.lastWatermark()).isNull();
multiplexer.onPeriodicEmit();
assertThat(underlyingWatermarkOutput.lastWatermark()).isEqualTo(new Watermark(0));
} |
@Override
public List<String> selectList(String text) {
List<String> results = new ArrayList<String>();
for (Selector selector : selectors) {
List<String> strings = selector.selectList(text);
results.addAll(strings);
}
return results;
} | @Test
public void testSelectList() {
String htmlContent = "<!DOCTYPE html>\n" +
"<html lang=\"en\">\n" +
"<head>\n" +
" <meta charset=\"UTF-8\">\n" +
" <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n" +
" <title>HTML with XPath</title>\n" +
"</head>\n" +
"<body>\n" +
" <div class=\"container\">\n" +
" <div class=\"item1\">Item 1</div>\n" +
" <div class=\"item2\">Item 2</div>\n" +
" </div>\n" +
"</body>\n" +
"</html>";
String expectedResult = "[<head>\n" +
" <meta charset=\"UTF-8\">\n" +
" <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n" +
" <title>HTML with XPath</title>\n" +
"</head>, <div class=\"item1\">\n" +
" Item 1\n" +
"</div>, <div class=\"item2\">\n" +
" Item 2\n" +
"</div>]";
List<Selector> selectors = new ArrayList<Selector>();
selectors.add(new CssSelector("head"));
selectors.add(new XpathSelector("//div[@class='item1']"));
selectors.add(new XpathSelector("//div[@class='item2']"));
OrSelector orSelector = new OrSelector(selectors);
List<String> result = orSelector.selectList(htmlContent);
assertEquals(expectedResult, result.toString());
} |
public static <T> T getProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr, Configuration conf,
SocketFactory factory) throws IOException {
return getProtocolProxy(
protocol, clientVersion, addr, conf, factory).getProxy();
} | @Test
public void testClientWithoutServer() throws Exception {
TestRpcService proxy;
short invalidPort = 20;
InetSocketAddress invalidAddress = new InetSocketAddress(ADDRESS,
invalidPort);
long invalidClientVersion = 1L;
try {
proxy = RPC.getProxy(TestRpcService.class,
invalidClientVersion, invalidAddress, conf);
// Test echo method
proxy.echo(null, newEchoRequest("hello"));
fail("We should not have reached here");
} catch (ServiceException ioe) {
//this is what we expected
if (!(ioe.getCause() instanceof ConnectException)) {
fail("We should not have reached here");
}
}
} |
@NonNull
public String processShownotes() {
String shownotes = rawShownotes;
if (TextUtils.isEmpty(shownotes)) {
Log.d(TAG, "shownotesProvider contained no shownotes. Returning 'no shownotes' message");
shownotes = "<html><head></head><body><p id='apNoShownotes'>" + noShownotesLabel + "</p></body></html>";
}
// replace ASCII line breaks with HTML ones if shownotes don't contain HTML line breaks already
if (!LINE_BREAK_REGEX.matcher(shownotes).find() && !shownotes.contains("<p>")) {
shownotes = shownotes.replace("\n", "<br />");
}
Document document = Jsoup.parse(shownotes);
cleanCss(document);
document.head().appendElement("style").attr("type", "text/css").text(webviewStyle);
addTimecodes(document);
return document.toString();
} | @Test
public void testProcessShownotesAddTimecodeMmssNoChapters() {
final String timeStr = "10:11";
final long time = 10 * 60 * 1000 + 11 * 1000;
String shownotes = "<p> Some test text with a timecode " + timeStr + " here.</p>";
ShownotesCleaner t = new ShownotesCleaner(context, shownotes, 11 * 60 * 1000);
String res = t.processShownotes();
checkLinkCorrect(res, new long[]{time}, new String[]{timeStr});
} |
public static char[] asciiBytesToChar(byte[] bytes) {
char[] chars = new char[bytes.length];
for (int i = 0; i < bytes.length; i++) {
chars[i] = (char) bytes[i];
bytes[i] = '\0';
}
return chars;
} | @Test
public void testAsciiBytesToChar() {
byte[] asciiBytes = asciiString.getBytes(StandardCharsets.US_ASCII);
char[] asciiChars = SecretStoreUtil.asciiBytesToChar(asciiBytes);
assertThat(asciiChars).isEqualTo(asciiString.toCharArray());
assertThat(asciiBytes).containsOnly('\0');
} |
public static boolean equals(FlatRecordTraversalObjectNode left, FlatRecordTraversalObjectNode right) {
if (left == null && right == null) {
return true;
}
if (left == null || right == null) {
return false;
}
if (!left.getSchema().getName().equals(right.getSchema().getName())) {
return false;
}
extractCommonObjectSchema(left, right);
return compare(left, right);
} | @Test
public void differentSet() {
SimpleHollowDataset dataset = SimpleHollowDataset.fromClassDefinitions(Movie.class);
FakeHollowSchemaIdentifierMapper idMapper = new FakeHollowSchemaIdentifierMapper(dataset);
HollowObjectMapper objMapper = new HollowObjectMapper(HollowWriteStateCreator.createWithSchemas(dataset.getSchemas()));
FlatRecordWriter flatRecordWriter = new FlatRecordWriter(dataset, idMapper);
Movie movie1 = new Movie();
movie1.countries = new HashSet<>();
movie1.countries.add(new Country("US"));
movie1.countries.add(new Country("CA"));
Movie movie2 = new Movie();
movie2.countries = new HashSet<>();
movie2.countries.add(new Country("US"));
movie2.countries.add(new Country("CB"));
flatRecordWriter.reset();
objMapper.writeFlat(movie1, flatRecordWriter);
FlatRecord flatRecord1 = flatRecordWriter.generateFlatRecord();
flatRecordWriter.reset();
objMapper.writeFlat(movie2, flatRecordWriter);
FlatRecord flatRecord2 = flatRecordWriter.generateFlatRecord();
Assertions.assertThat(FlatRecordTraversalObjectNodeEquality.equals(new FlatRecordTraversalObjectNode(flatRecord1), new FlatRecordTraversalObjectNode(flatRecord2))).isFalse();
} |
@Override
@CacheEvict(cacheNames = "ai:video:config", key = "#updateReqVO.type")
public void updateAiVideoConfig(AiVideoConfigUpdateReqVO updateReqVO) {
// 校验存在
validateAiVideoConfigExists(updateReqVO.getId());
// 更新
AiVideoConfigDO updateObj = AiVideoConfigConvert.INSTANCE.convert(updateReqVO);
aiVideoConfigMapper.updateById(updateObj);
} | @Test
public void testUpdateAiVideoConfig_success() {
// mock 数据
AiVideoConfigDO dbAiVideoConfig = randomPojo(AiVideoConfigDO.class);
aiVideoConfigMapper.insert(dbAiVideoConfig);// @Sql: 先插入出一条存在的数据
// 准备参数
AiVideoConfigUpdateReqVO reqVO = randomPojo(AiVideoConfigUpdateReqVO.class, o -> {
o.setId(dbAiVideoConfig.getId()); // 设置更新的 ID
});
// 调用
aiVideoConfigService.updateAiVideoConfig(reqVO);
// 校验是否更新正确
AiVideoConfigDO aiVideoConfig = aiVideoConfigMapper.selectById(reqVO.getId()); // 获取最新的
assertPojoEquals(reqVO, aiVideoConfig);
} |
public boolean fence(HAServiceTarget fromSvc) {
return fence(fromSvc, null);
} | @Test
public void testMultipleFencers() throws BadFencingConfigurationException {
NodeFencer fencer = setupFencer(
AlwaysSucceedFencer.class.getName() + "(foo)\n" +
AlwaysSucceedFencer.class.getName() + "(bar)\n");
assertTrue(fencer.fence(MOCK_TARGET));
// Only one call, since the first fencer succeeds
assertEquals(1, AlwaysSucceedFencer.fenceCalled);
assertEquals("foo", AlwaysSucceedFencer.callArgs.get(0));
} |
public static CustomWeighting.Parameters createWeightingParameters(CustomModel customModel, EncodedValueLookup lookup) {
String key = customModel.toString();
Class<?> clazz = customModel.isInternal() ? INTERNAL_CACHE.get(key) : null;
if (CACHE_SIZE > 0 && clazz == null)
clazz = CACHE.get(key);
if (clazz == null) {
clazz = createClazz(customModel, lookup);
if (customModel.isInternal()) {
INTERNAL_CACHE.put(key, clazz);
if (INTERNAL_CACHE.size() > 100) {
CACHE.putAll(INTERNAL_CACHE);
INTERNAL_CACHE.clear();
LoggerFactory.getLogger(CustomModelParser.class).warn("Internal cache must stay small but was "
+ INTERNAL_CACHE.size() + ". Cleared it. Misuse of CustomModel::internal?");
}
} else if (CACHE_SIZE > 0) {
CACHE.put(key, clazz);
}
}
try {
// The class does not need to be thread-safe as we create an instance per request
CustomWeightingHelper prio = (CustomWeightingHelper) clazz.getDeclaredConstructor().newInstance();
prio.init(customModel, lookup, CustomModel.getAreasAsMap(customModel.getAreas()));
return new CustomWeighting.Parameters(
prio::getSpeed, prio::calcMaxSpeed,
prio::getPriority, prio::calcMaxPriority,
customModel.getDistanceInfluence() == null ? 0 : customModel.getDistanceInfluence(),
customModel.getHeadingPenalty() == null ? Parameters.Routing.DEFAULT_HEADING_PENALTY : customModel.getHeadingPenalty());
} catch (ReflectiveOperationException ex) {
throw new IllegalArgumentException("Cannot compile expression " + ex.getMessage(), ex);
}
} | @Test
void setPriorityForRoadClass() {
CustomModel customModel = new CustomModel();
customModel.addToPriority(If("road_class == PRIMARY", MULTIPLY, "0.5"));
customModel.addToSpeed(If("true", LIMIT, "100"));
CustomWeighting.EdgeToDoubleMapping priorityMapping = CustomModelParser.createWeightingParameters(customModel, encodingManager).getEdgeToPriorityMapping();
BaseGraph graph = new BaseGraph.Builder(encodingManager).create();
EdgeIteratorState edge1 = graph.edge(0, 1).setDistance(100).set(roadClassEnc, RoadClass.PRIMARY);
EdgeIteratorState edge2 = graph.edge(1, 2).setDistance(100).set(roadClassEnc, RoadClass.SECONDARY);
assertEquals(0.5, priorityMapping.get(edge1, false), 1.e-6);
assertEquals(1.0, priorityMapping.get(edge2, false), 1.e-6);
} |
public ProtocolBuilder threadpool(String threadpool) {
this.threadpool = threadpool;
return getThis();
} | @Test
void threadpool() {
ProtocolBuilder builder = new ProtocolBuilder();
builder.threadpool("mockthreadpool");
Assertions.assertEquals("mockthreadpool", builder.build().getThreadpool());
} |
public Properties getProperties()
{
return properties;
} | @Test
public void testUriWithSslEnabledPathOnly()
throws SQLException
{
PrestoDriverUri parameters = createDriverUri("presto://localhost:8080/blackhole?SSL=true&SSLTrustStorePath=truststore.jks");
assertUriPortScheme(parameters, 8080, "https");
Properties properties = parameters.getProperties();
assertEquals(properties.getProperty(SSL_TRUST_STORE_PATH.getKey()), "truststore.jks");
assertNull(properties.getProperty(SSL_TRUST_STORE_PASSWORD.getKey()));
} |
@Override
@MethodNotAvailable
public V replace(K key, V newValue) {
throw new MethodNotAvailableException();
} | @Test(expected = MethodNotAvailableException.class)
public void testReplace() {
adapter.replace(23, "value");
} |
public boolean isEnabled() {
return enabled;
} | @Test
public void testWebsocketSyncPropertiesDefaultValue() {
assertThat(new WebsocketSyncProperties().isEnabled(), is(true));
} |
@VisibleForTesting
static void validateWorkerSettings(DataflowPipelineWorkerPoolOptions workerOptions) {
DataflowPipelineOptions dataflowOptions = workerOptions.as(DataflowPipelineOptions.class);
validateSdkContainerImageOptions(workerOptions);
GcpOptions gcpOptions = workerOptions.as(GcpOptions.class);
Preconditions.checkArgument(
gcpOptions.getZone() == null || gcpOptions.getWorkerRegion() == null,
"Cannot use option zone with workerRegion. Prefer either workerZone or workerRegion.");
Preconditions.checkArgument(
gcpOptions.getZone() == null || gcpOptions.getWorkerZone() == null,
"Cannot use option zone with workerZone. Prefer workerZone.");
Preconditions.checkArgument(
gcpOptions.getWorkerRegion() == null || gcpOptions.getWorkerZone() == null,
"workerRegion and workerZone options are mutually exclusive.");
boolean hasExperimentWorkerRegion = false;
if (dataflowOptions.getExperiments() != null) {
for (String experiment : dataflowOptions.getExperiments()) {
if (experiment.startsWith("worker_region")) {
hasExperimentWorkerRegion = true;
break;
}
}
}
Preconditions.checkArgument(
!hasExperimentWorkerRegion || gcpOptions.getWorkerRegion() == null,
"Experiment worker_region and option workerRegion are mutually exclusive.");
Preconditions.checkArgument(
!hasExperimentWorkerRegion || gcpOptions.getWorkerZone() == null,
"Experiment worker_region and option workerZone are mutually exclusive.");
if (gcpOptions.getZone() != null) {
LOG.warn("Option --zone is deprecated. Please use --workerZone instead.");
gcpOptions.setWorkerZone(gcpOptions.getZone());
gcpOptions.setZone(null);
}
} | @Test
public void testZoneAndWorkerRegionMutuallyExclusive() {
DataflowPipelineWorkerPoolOptions options =
PipelineOptionsFactory.as(DataflowPipelineWorkerPoolOptions.class);
options.setZone("us-east1-b");
options.setWorkerRegion("us-east1");
assertThrows(
IllegalArgumentException.class, () -> DataflowRunner.validateWorkerSettings(options));
} |
public FEELFnResult<List<Object>> invoke(@ParameterName("list") Object[] lists) {
if ( lists == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "lists", "cannot be null"));
}
final Set<Object> resultSet = new LinkedHashSet<>();
for ( final Object list : lists ) {
if ( list instanceof Collection ) {
resultSet.addAll((Collection) list);
} else {
resultSet.add(list);
}
}
// spec requires us to return a new list
return FEELFnResult.ofResult( new ArrayList<>(resultSet) );
} | @Test
void invokeMixedTypes() {
FunctionTestUtil.assertResultList(
unionFunction.invoke(new Object[]{"test", Arrays.asList(10, "test", 5), BigDecimal.TEN}),
Arrays.asList("test", 10, 5, BigDecimal.TEN));
} |
@Override
public void close() throws IOException {
InputFileBlockHolder.unset();
// close the current iterator
this.currentIterator.close();
// exhaust the task iterator
while (tasks.hasNext()) {
tasks.next();
}
} | @Test
public void testClosureWithoutAnyRead() throws IOException {
Integer totalTasks = 10;
Integer recordPerTask = 10;
List<FileScanTask> tasks = createFileScanTasks(totalTasks, recordPerTask);
ClosureTrackingReader reader = new ClosureTrackingReader(table, tasks);
reader.close();
tasks.forEach(
t ->
assertThat(reader.hasIterator(t))
.as("Iterator should not be created eagerly for tasks")
.isFalse());
} |
@POST
@Timed
@ApiOperation(
value = "Launch input on this node",
response = InputCreated.class
)
@ApiResponses(value = {
@ApiResponse(code = 404, message = "No such input type registered"),
@ApiResponse(code = 400, message = "Missing or invalid configuration"),
@ApiResponse(code = 400, message = "Type is exclusive and already has input running")
})
@RequiresPermissions(RestPermissions.INPUTS_CREATE)
@AuditEvent(type = AuditEventTypes.MESSAGE_INPUT_CREATE)
public Response create(@ApiParam(name = "JSON body", required = true)
@Valid @NotNull InputCreateRequest lr) throws ValidationException {
try {
throwBadRequestIfNotGlobal(lr);
// TODO Configuration type values need to be checked. See ConfigurationMapConverter.convertValues()
final MessageInput messageInput = messageInputFactory.create(lr, getCurrentUser().getName(), lr.node());
if (config.isCloud() && !messageInput.isCloudCompatible()) {
throw new BadRequestException(String.format(Locale.ENGLISH,
"The input type <%s> is not allowed in the cloud environment!", lr.type()));
}
messageInput.checkConfiguration();
final Input input = this.inputService.create(messageInput.asMap());
final String newId = inputService.save(input);
final URI inputUri = getUriBuilderToSelf().path(InputsResource.class)
.path("{inputId}")
.build(newId);
return Response.created(inputUri).entity(InputCreated.create(newId)).build();
} catch (NoSuchInputTypeException e) {
LOG.error("There is no such input type registered.", e);
throw new NotFoundException("There is no such input type registered.", e);
} catch (ConfigurationException e) {
LOG.error("Missing or invalid input configuration.", e);
throw new BadRequestException("Missing or invalid input configuration.", e);
}
} | @Test
public void testCreateNotGlobalInputInCloud() {
when(configuration.isCloud()).thenReturn(true);
when(inputCreateRequest.global()).thenReturn(false);
assertThatThrownBy(() -> inputsResource.create(inputCreateRequest)).isInstanceOf(BadRequestException.class)
.hasMessageContaining("Only global inputs");
} |
public void initialize(ConnectorContext ctx) {
context = ctx;
} | @Test
public void shouldInitializeContextWithTaskConfigs() {
List<Map<String, String>> taskConfigs = new ArrayList<>();
connector.initialize(context, taskConfigs);
assertableConnector.assertInitialized();
assertableConnector.assertContext(context);
assertableConnector.assertTaskConfigs(taskConfigs);
} |
public static BadRequestException create(String... errorMessages) {
return create(asList(errorMessages));
} | @Test
public void text_error() {
BadRequestException exception = BadRequestException.create("error");
assertThat(exception.getMessage()).isEqualTo("error");
} |
public abstract Duration parse(String text); | @Test
public void testLongCombined() {
Assert.assertEquals(Duration.parse("P2DT3H4M5S"), DurationStyle.LONG.parse("2 days 3 Hours\t 4 minute 5 seconds"));
} |
@VisibleForTesting
Object evaluate(final GenericRow row) {
return term.getValue(new TermEvaluationContext(row));
} | @Test
public void shouldEvaluateLikePredicate() {
// Given:
final Expression expression1 = new LikePredicate(
new StringLiteral("catdog"), new StringLiteral("ca%og"), Optional.empty()
);
final Expression expression2 = new LikePredicate(
new StringLiteral("cat%og"), new StringLiteral("cat\\%og"), Optional.empty()
);
final Expression expression3 = new LikePredicate(
new StringLiteral("cat%og"), new StringLiteral("cat\\%og"), Optional.of('\\')
);
// When:
InterpretedExpression interpreter1 = interpreter(expression1);
InterpretedExpression interpreter2 = interpreter(expression2);
InterpretedExpression interpreter3 = interpreter(expression3);
// Then:
assertThat(interpreter1.evaluate(ROW), is(true));
assertThat(interpreter2.evaluate(ROW), is(false));
assertThat(interpreter3.evaluate(ROW), is(true));
} |
@Override
public Set<RuleDescriptionSectionDto> generateSections(RulesDefinition.Rule rule) {
return getDescriptionInHtml(rule)
.map(this::generateSections)
.orElse(emptySet());
} | @Test
public void parse_returns_all_empty_fields_when_empty_description() {
when(rule.htmlDescription()).thenReturn("");
Set<RuleDescriptionSectionDto> results = generator.generateSections(rule);
assertThat(results).isEmpty();
} |
@Deactivate
public void deactivate() {
deviceService.removeListener(deviceListener);
store.unsetDelegate(delegate);
eventDispatcher.removeSink(AlarmEvent.class);
log.info("Stopped");
} | @Test
public void deactivate() throws Exception {
providerService.updateAlarmList(DEVICE_ID, ImmutableSet.of(ALARM_B, ALARM_A));
verifyGettingSetsOfAlarms(manager, 2, 2);
alarmStore.deactivate();
manager.removeListener(listener);
manager.deactivate();
NetTestTools.injectEventDispatcher(manager, null);
assertFalse("Store should not have delegate", alarmStore.hasDelegate());
} |
@Override
public ResourceReconcileResult tryReconcileClusterResources(
TaskManagerResourceInfoProvider taskManagerResourceInfoProvider) {
ResourceReconcileResult.Builder builder = ResourceReconcileResult.builder();
List<TaskManagerInfo> taskManagersIdleTimeout = new ArrayList<>();
List<TaskManagerInfo> taskManagersNonTimeout = new ArrayList<>();
long currentTime = System.currentTimeMillis();
taskManagerResourceInfoProvider
.getRegisteredTaskManagers()
.forEach(
taskManagerInfo -> {
if (taskManagerInfo.isIdle()
&& currentTime - taskManagerInfo.getIdleSince()
>= taskManagerTimeout.toMilliseconds()) {
taskManagersIdleTimeout.add(taskManagerInfo);
} else {
taskManagersNonTimeout.add(taskManagerInfo);
}
});
List<PendingTaskManager> pendingTaskManagersNonUse = new ArrayList<>();
List<PendingTaskManager> pendingTaskManagersInuse = new ArrayList<>();
taskManagerResourceInfoProvider
.getPendingTaskManagers()
.forEach(
pendingTaskManager -> {
if (pendingTaskManager.getPendingSlotAllocationRecords().isEmpty()) {
pendingTaskManagersNonUse.add(pendingTaskManager);
} else {
pendingTaskManagersInuse.add(pendingTaskManager);
}
});
ResourceProfile resourcesToKeep = ResourceProfile.ZERO;
ResourceProfile resourcesInTotal = ResourceProfile.ZERO;
boolean resourceFulfilled = false;
// check whether available resources of used (pending) task manager is enough.
ResourceProfile resourcesAvailableOfNonIdle =
getAvailableResourceOfTaskManagers(taskManagersNonTimeout);
ResourceProfile resourcesInTotalOfNonIdle =
getTotalResourceOfTaskManagers(taskManagersNonTimeout);
resourcesToKeep = resourcesToKeep.merge(resourcesAvailableOfNonIdle);
resourcesInTotal = resourcesInTotal.merge(resourcesInTotalOfNonIdle);
if (isRequiredResourcesFulfilled(resourcesToKeep, resourcesInTotal)) {
resourceFulfilled = true;
} else {
ResourceProfile resourcesAvailableOfNonIdlePendingTaskManager =
getAvailableResourceOfPendingTaskManagers(pendingTaskManagersInuse);
ResourceProfile resourcesInTotalOfNonIdlePendingTaskManager =
getTotalResourceOfPendingTaskManagers(pendingTaskManagersInuse);
resourcesToKeep = resourcesToKeep.merge(resourcesAvailableOfNonIdlePendingTaskManager);
resourcesInTotal = resourcesInTotal.merge(resourcesInTotalOfNonIdlePendingTaskManager);
}
// try reserve or release unused (pending) task managers
for (TaskManagerInfo taskManagerInfo : taskManagersIdleTimeout) {
if (resourceFulfilled
|| isRequiredResourcesFulfilled(resourcesToKeep, resourcesInTotal)) {
resourceFulfilled = true;
builder.addTaskManagerToRelease(taskManagerInfo);
} else {
resourcesToKeep = resourcesToKeep.merge(taskManagerInfo.getAvailableResource());
resourcesInTotal = resourcesInTotal.merge(taskManagerInfo.getTotalResource());
}
}
for (PendingTaskManager pendingTaskManager : pendingTaskManagersNonUse) {
if (resourceFulfilled
|| isRequiredResourcesFulfilled(resourcesToKeep, resourcesInTotal)) {
resourceFulfilled = true;
builder.addPendingTaskManagerToRelease(pendingTaskManager);
} else {
resourcesToKeep = resourcesToKeep.merge(pendingTaskManager.getUnusedResource());
resourcesInTotal =
resourcesInTotal.merge(pendingTaskManager.getTotalResourceProfile());
}
}
if (!resourceFulfilled) {
// fulfill required resources
tryFulFillRequiredResourcesWithAction(
resourcesToKeep, resourcesInTotal, builder::addPendingTaskManagerToAllocate);
}
return builder.build();
} | @Test
void testIdlePendingTaskManagerShouldBeReleased() {
final PendingTaskManager pendingTaskManager =
new PendingTaskManager(DEFAULT_SLOT_RESOURCE, 1);
final TaskManagerResourceInfoProvider taskManagerResourceInfoProvider =
TestingTaskManagerResourceInfoProvider.newBuilder()
.setPendingTaskManagersSupplier(
() -> Collections.singleton(pendingTaskManager))
.build();
ResourceReconcileResult result =
ANY_MATCHING_STRATEGY.tryReconcileClusterResources(taskManagerResourceInfoProvider);
assertThat(result.getPendingTaskManagersToRelease()).containsExactly(pendingTaskManager);
} |
static Collection<Method> getAllMethods(Class<?> owner, Predicate<? super Method> predicate) {
return getAll(owner, Class::getDeclaredMethods).filter(predicate).collect(toList());
} | @Test
public void getAllMethods() {
Collection<Method> methods = ReflectionUtils.getAllMethods(Child.class, named("overrideMe"));
assertThat(methods).containsOnly(
method(Child.class, "overrideMe"),
method(UpperMiddle.class, "overrideMe"),
method(LowerMiddle.class, "overrideMe"),
method(Parent.class, "overrideMe")
);
} |
public Type type() {
return pir != null ? Type.trTCM :
ebs != null ? Type.srTCM : Type.sr2CM;
} | @Test
public void testType() {
BandwidthProfile.Builder bwProfileBuilder = BandwidthProfile.builder()
.name("profile")
.cir(Bandwidth.bps(ONE_M))
.cbs((int) ONE_K)
.greenAction(getBuilder(Action.PASS).build())
.redAction(getBuilder(Action.DISCARD).build())
.colorAware(false);
assertEquals("wrong bw profile type",
bwProfileBuilder.build().type(),
BandwidthProfile.Type.sr2CM);
bwProfileBuilder.ebs((int) TWO_K)
.yellowAction(getBuilder(Action.REMARK)
.dscpClass(DscpClass.AF11)
.build());
assertEquals("wrong bw profile type",
bwProfileBuilder.build().type(),
BandwidthProfile.Type.srTCM);
bwProfileBuilder.ebs(null);
bwProfileBuilder.pir(Bandwidth.bps(TEN_M))
.pbs((int) TWO_K);
assertEquals("wrong bw profile type",
bwProfileBuilder.build().type(),
BandwidthProfile.Type.trTCM);
} |
@Override
public int getPriority() {
// if server ability manager exist, you should choose the server one
return 0;
} | @Test
void testGetPriority() {
assertEquals(0, clientAbilityControlManager.getPriority());
} |
public String getString(String path) {
return ObjectConverter.convertObjectTo(get(path), String.class);
} | @Test
public void
parses_json_document_with_attribute_name_equal_to_properties() {
// Given
final String jsonWithPropertyAttribute = "[{\"properties\":\"test\"}]"; // properties is a reserved word in Groovy
// When
final String value = new JsonPath(jsonWithPropertyAttribute).getString("[0].properties");
// Then
assertThat(value, equalTo("test"));
} |
public static Boolean judge(final ConditionData conditionData, final String realData) {
if (Objects.isNull(conditionData) || StringUtils.isBlank(conditionData.getOperator())) {
return false;
}
PredicateJudge predicateJudge = newInstance(conditionData.getOperator());
if (!(predicateJudge instanceof BlankPredicateJudge) && StringUtils.isBlank(realData)) {
return false;
}
return predicateJudge.judge(conditionData, realData);
} | @Test
public void testPathPatternJudge() {
conditionData.setOperator(OperatorEnum.PATH_PATTERN.getAlias());
conditionData.setParamValue("/http/**");
assertTrue(PredicateJudgeFactory.judge(conditionData, "/http/**"));
assertTrue(PredicateJudgeFactory.judge(conditionData, "/http/test"));
assertTrue(PredicateJudgeFactory.judge(conditionData, "/http/test/test"));
assertFalse(PredicateJudgeFactory.judge(conditionData, "/http1/**"));
} |
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException {
// Add policies to all HTTP headers
for (String header : this.cspHeaders) {
((HttpServletResponse) response).setHeader(header, this.policies);
}
chain.doFilter(request, response);
} | @Test
public void set_content_security_headers() throws Exception {
doInit();
HttpServletRequest request = newRequest("/");
underTest.doFilter(request, response, chain);
verify(response).setHeader("Content-Security-Policy", EXPECTED);
verify(response).setHeader("X-Content-Security-Policy", EXPECTED);
verify(response).setHeader("X-WebKit-CSP", EXPECTED);
verify(chain).doFilter(request, response);
} |
public void getFields( RowMetaInterface row, String name, RowMetaInterface[] info, StepMeta nextStep,
VariableSpace space, Repository repository, IMetaStore metaStore ) {
// No values are added to the row in this type of step
// However, in case of Fixed length records,
// the field precisions and lengths are altered!
for ( int i = 0; i < outputFields.length; i++ ) {
XMLField field = outputFields[i];
ValueMetaInterface v = row.searchValueMeta( field.getFieldName() );
if ( v != null ) {
v.setLength( field.getLength(), field.getPrecision() );
}
}
} | @Test
public void testGetFields() throws Exception {
XMLOutputMeta xmlOutputMeta = new XMLOutputMeta();
xmlOutputMeta.setDefault();
XMLField xmlField = new XMLField();
xmlField.setFieldName( "aField" );
xmlField.setLength( 10 );
xmlField.setPrecision( 3 );
xmlOutputMeta.setOutputFields( new XMLField[] { xmlField } );
RowMetaInterface row = mock( RowMetaInterface.class );
RowMetaInterface rmi = mock( RowMetaInterface.class );
StepMeta nextStep = mock( StepMeta.class );
Repository repo = mock( Repository.class );
IMetaStore metastore = mock( IMetaStore.class );
ValueMetaInterface vmi = mock( ValueMetaInterface.class );
when( row.searchValueMeta( "aField" ) ).thenReturn( vmi );
xmlOutputMeta.getFields( row, "", new RowMetaInterface[] { rmi }, nextStep, new Variables(), repo, metastore );
verify( vmi ).setLength( 10, 3 );
} |
public static CommandExecutor newInstance(final CommandPacketType commandPacketType, final PostgreSQLCommandPacket commandPacket,
final ConnectionSession connectionSession, final PortalContext portalContext) throws SQLException {
if (commandPacket instanceof SQLReceivedPacket) {
log.debug("Execute packet type: {}, sql: {}", commandPacketType, ((SQLReceivedPacket) commandPacket).getSQL());
} else {
log.debug("Execute packet type: {}", commandPacketType);
}
if (!(commandPacket instanceof PostgreSQLAggregatedCommandPacket)) {
return getCommandExecutor(commandPacketType, commandPacket, connectionSession, portalContext);
}
PostgreSQLAggregatedCommandPacket aggregatedCommandPacket = (PostgreSQLAggregatedCommandPacket) commandPacket;
if (aggregatedCommandPacket.isContainsBatchedStatements() && aggregatedCommandPacket.getPackets().stream().noneMatch(OpenGaussComBatchBindPacket.class::isInstance)) {
return new PostgreSQLAggregatedCommandExecutor(getExecutorsOfAggregatedBatchedStatements(aggregatedCommandPacket, connectionSession, portalContext));
}
List<CommandExecutor> result = new ArrayList<>(aggregatedCommandPacket.getPackets().size());
for (PostgreSQLCommandPacket each : aggregatedCommandPacket.getPackets()) {
result.add(getCommandExecutor((CommandPacketType) each.getIdentifier(), each, connectionSession, portalContext));
}
return new PostgreSQLAggregatedCommandExecutor(result);
} | @Test
void assertAggregatedPacketNotBatchedStatements() throws SQLException {
PostgreSQLComParsePacket parsePacket = mock(PostgreSQLComParsePacket.class);
when(parsePacket.getIdentifier()).thenReturn(PostgreSQLCommandPacketType.PARSE_COMMAND);
PostgreSQLComFlushPacket flushPacket = new PostgreSQLComFlushPacket(new PostgreSQLPacketPayload(Unpooled.wrappedBuffer(new byte[4]), StandardCharsets.UTF_8));
PostgreSQLComBindPacket bindPacket = mock(PostgreSQLComBindPacket.class);
when(bindPacket.getIdentifier()).thenReturn(PostgreSQLCommandPacketType.BIND_COMMAND);
PostgreSQLComDescribePacket describePacket = mock(PostgreSQLComDescribePacket.class);
when(describePacket.getIdentifier()).thenReturn(PostgreSQLCommandPacketType.DESCRIBE_COMMAND);
PostgreSQLComExecutePacket executePacket = mock(PostgreSQLComExecutePacket.class);
when(executePacket.getIdentifier()).thenReturn(PostgreSQLCommandPacketType.EXECUTE_COMMAND);
PostgreSQLComSyncPacket syncPacket = mock(PostgreSQLComSyncPacket.class);
when(syncPacket.getIdentifier()).thenReturn(PostgreSQLCommandPacketType.SYNC_COMMAND);
PostgreSQLAggregatedCommandPacket packet = mock(PostgreSQLAggregatedCommandPacket.class);
when(packet.isContainsBatchedStatements()).thenReturn(false);
when(packet.getPackets()).thenReturn(Arrays.asList(parsePacket, flushPacket, bindPacket, describePacket, executePacket, syncPacket));
CommandExecutor actual = OpenGaussCommandExecutorFactory.newInstance(null, packet, connectionSession, portalContext);
assertThat(actual, instanceOf(PostgreSQLAggregatedCommandExecutor.class));
Iterator<CommandExecutor> actualPacketsIterator = getExecutorsFromAggregatedCommandExecutor((PostgreSQLAggregatedCommandExecutor) actual).iterator();
assertThat(actualPacketsIterator.next(), instanceOf(PostgreSQLComParseExecutor.class));
assertThat(actualPacketsIterator.next(), instanceOf(PostgreSQLComFlushExecutor.class));
assertThat(actualPacketsIterator.next(), instanceOf(PostgreSQLComBindExecutor.class));
assertThat(actualPacketsIterator.next(), instanceOf(PostgreSQLComDescribeExecutor.class));
assertThat(actualPacketsIterator.next(), instanceOf(PostgreSQLComExecuteExecutor.class));
assertThat(actualPacketsIterator.next(), instanceOf(PostgreSQLComSyncExecutor.class));
assertFalse(actualPacketsIterator.hasNext());
} |
public List<Node> readNodes() {
CachingCurator.Session session = db.getSession();
return session.getChildren(nodesPath).stream()
.flatMap(hostname -> readNode(session, hostname).stream())
.toList();
} | @Test
public void can_read_stored_host_information() throws Exception {
String zkline = "{\"hostname\":\"host1\",\"state\":\"ready\",\"ipAddresses\":[\"127.0.0.1\"],\"additionalIpAddresses\":[\"127.0.0.2\"],\"openStackId\":\"7951bb9d-3989-4a60-a21c-13690637c8ea\",\"flavor\":\"default\",\"created\":1421054425159, \"type\":\"host\"}";
curator.framework().create().creatingParentsIfNeeded().forPath("/provision/v1/nodes/host1", zkline.getBytes());
List<Node> allocatedNodes = zkClient.readNodes();
assertEquals(1, allocatedNodes.size());
assertEquals(NodeType.host, allocatedNodes.get(0).type());
} |
@Override
public synchronized DeviceEvent removeDevice(DeviceId deviceId) {
final NodeId myId = clusterService.getLocalNode().id();
NodeId master = mastershipService.getMasterFor(deviceId);
// if there exist a master, forward
// if there is no master, try to become one and process
boolean relinquishAtEnd = false;
if (master == null) {
final MastershipRole myRole = mastershipService.getLocalRole(deviceId);
if (myRole != MastershipRole.NONE) {
relinquishAtEnd = true;
}
log.debug("Temporarily requesting role for {} to remove", deviceId);
if (mastershipService.requestRoleFor(deviceId).join() == MastershipRole.MASTER) {
master = myId;
}
}
boolean isMaster = myId.equals(master);
// If this node is not the master, forward the request.
if (!isMaster) {
log.debug("{} has control of {}, forwarding remove request",
master, deviceId);
// TODO check unicast return value
clusterCommunicator.unicast(deviceId, DEVICE_REMOVE_REQ, SERIALIZER::encode, master);
/* error log:
log.error("Failed to forward {} remove request to {}", deviceId, master, e);
*/
}
// If this node is the master, get a timestamp. Otherwise, default to the current device timestamp.
Timestamp timestamp = isMaster ? deviceClockService.getTimestamp(deviceId) : null;
DeviceEvent event = removeDeviceInternal(deviceId, timestamp);
// If this node is the master, update peers.
if (isMaster && event != null) {
log.debug("Notifying peers of a device removed topology event for deviceId: {}",
deviceId);
notifyPeers(new InternalDeviceRemovedEvent(deviceId, timestamp));
}
notifyDelegateIfNotNull(event);
// Relinquish mastership if acquired to remove the device.
if (relinquishAtEnd) {
log.debug("Relinquishing temporary role acquired for {}", deviceId);
mastershipService.relinquishMastership(deviceId);
}
return event;
} | @Test
public final void testRemoveDevice() {
putDevice(DID1, SW1, A1);
List<PortDescription> pds = Arrays.asList(
DefaultPortDescription.builder().withPortNumber(P1).isEnabled(true).annotations(A2).build()
);
deviceStore.updatePorts(PID, DID1, pds);
putDevice(DID2, SW1);
assertEquals(2, deviceStore.getDeviceCount());
assertEquals(1, deviceStore.getPorts(DID1).size());
assertAnnotationsEquals(deviceStore.getDevice(DID1).annotations(), A1);
assertAnnotationsEquals(deviceStore.getPort(DID1, P1).annotations(), A2);
Capture<InternalDeviceEvent> message = Capture.newInstance();
Capture<MessageSubject> subject = Capture.newInstance();
Capture<Function<InternalDeviceEvent, byte[]>> encoder = Capture.newInstance();
resetCommunicatorExpectingSingleBroadcast(message, subject, encoder);
DeviceEvent event = deviceStore.removeDevice(DID1);
assertEquals(DEVICE_REMOVED, event.type());
assertDevice(DID1, SW1, event.subject());
assertEquals(1, deviceStore.getDeviceCount());
assertEquals(0, deviceStore.getPorts(DID1).size());
verify(clusterCommunicator);
// TODO: verify broadcast message
assertTrue(message.hasCaptured());
// putBack Device, Port w/o annotation
putDevice(DID1, SW1);
List<PortDescription> pds2 = Arrays.asList(
DefaultPortDescription.builder().withPortNumber(P1).isEnabled(true).build()
);
deviceStore.updatePorts(PID, DID1, pds2);
// annotations should not survive
assertEquals(2, deviceStore.getDeviceCount());
assertEquals(1, deviceStore.getPorts(DID1).size());
assertAnnotationsEquals(deviceStore.getDevice(DID1).annotations());
assertAnnotationsEquals(deviceStore.getPort(DID1, P1).annotations());
} |
public <T extends BuildableManifestTemplate> ManifestTemplate getManifestListTemplate(
Class<T> manifestTemplateClass) throws IOException {
Preconditions.checkArgument(
manifestTemplateClass == V22ManifestTemplate.class,
"Build an OCI image index is not yet supported");
Preconditions.checkState(!images.isEmpty(), "no images given");
V22ManifestListTemplate manifestList = new V22ManifestListTemplate();
for (Image image : images) {
ImageToJsonTranslator imageTranslator = new ImageToJsonTranslator(image);
BlobDescriptor configDescriptor =
Digests.computeDigest(imageTranslator.getContainerConfiguration());
BuildableManifestTemplate manifestTemplate =
imageTranslator.getManifestTemplate(manifestTemplateClass, configDescriptor);
BlobDescriptor manifestDescriptor = Digests.computeDigest(manifestTemplate);
ManifestDescriptorTemplate manifest = new ManifestDescriptorTemplate();
manifest.setMediaType(manifestTemplate.getManifestMediaType());
manifest.setSize(manifestDescriptor.getSize());
manifest.setDigest(manifestDescriptor.getDigest().toString());
manifest.setPlatform(image.getArchitecture(), image.getOs());
manifestList.addManifest(manifest);
}
return manifestList;
} | @Test
public void testGetManifestListTemplate_emptyImagesList() throws IOException {
try {
new ManifestListGenerator(Collections.emptyList())
.getManifestListTemplate(V22ManifestTemplate.class);
Assert.fail();
} catch (IllegalStateException ex) {
Assert.assertEquals("no images given", ex.getMessage());
}
} |
@Override
@Deprecated
public <K1, V1> KStream<K1, V1> flatTransform(final org.apache.kafka.streams.kstream.TransformerSupplier<? super K, ? super V, Iterable<KeyValue<K1, V1>>> transformerSupplier,
final String... stateStoreNames) {
Objects.requireNonNull(transformerSupplier, "transformerSupplier can't be null");
final String name = builder.newProcessorName(TRANSFORM_NAME);
return flatTransform(transformerSupplier, Named.as(name), stateStoreNames);
} | @Test
@SuppressWarnings("deprecation")
public void shouldNotAllowNullNamedOnFlatTransform() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.flatTransform(flatTransformerSupplier, (Named) null));
assertThat(exception.getMessage(), equalTo("named can't be null"));
} |
@Override
public boolean test(final String resourceName) {
return resourceName.matches(blackList);
} | @Test
public void shouldIgnoreBlankLines() throws IOException {
writeBlacklist(ImmutableList.<String>builder().add("", "java.util", "").build());
final Blacklist blacklist = new Blacklist(this.blacklistFile);
assertFalse(blacklist.test("java.lang.Process"));
assertTrue(blacklist.test("java.util.List"));
} |
public static InternalLogger getInstance(Class<?> clazz) {
return getInstance(clazz.getName());
} | @Test
public void testTrace() {
final InternalLogger logger = InternalLoggerFactory.getInstance("mock");
logger.trace("a");
verify(mockLogger).trace("a");
} |
@Override
public void itemSet(String itemType, String itemId, JSONObject properties) {
} | @Test
public void itemSet() {
mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() {
@Override
public boolean onTrackEvent(String eventName, JSONObject eventProperties) {
Assert.fail();
return false;
}
});
mSensorsAPI.itemSet("item", "1232", null);
} |
public static ResourceCalculatorProcessTree getResourceCalculatorProcessTree(
String pid, Class<? extends ResourceCalculatorProcessTree> clazz, Configuration conf) {
if (clazz != null) {
try {
Constructor <? extends ResourceCalculatorProcessTree> c = clazz.getConstructor(String.class);
ResourceCalculatorProcessTree rctree = c.newInstance(pid);
rctree.setConf(conf);
rctree.initialize();
return rctree;
} catch(Exception e) {
throw new RuntimeException(e);
}
}
// No class given, try a os specific class
if (ProcfsBasedProcessTree.isAvailable()) {
return new ProcfsBasedProcessTree(pid);
}
if (WindowsBasedProcessTree.isAvailable()) {
return new WindowsBasedProcessTree(pid);
}
// Not supported on this system.
return null;
} | @Test
void testCreatedInstanceConfigured() {
ResourceCalculatorProcessTree tree;
Configuration conf = new Configuration();
tree = ResourceCalculatorProcessTree.getResourceCalculatorProcessTree("1", EmptyProcessTree.class, conf);
assertNotNull(tree);
assertThat(tree.getConf(), sameInstance(conf));
} |
public void execute() {
execute(LOG);
} | @Test
public void should_not_fail_if_default_profile_used_at_least_once() {
store.put("foo", new TestInputFileBuilder("foo", "src/Bar.java").setLanguage("java").build());
QProfileVerifier profileLogger = new QProfileVerifier(store, profiles);
profileLogger.execute();
} |
@Udf
public <T> List<T> remove(
@UdfParameter(description = "Array of values") final List<T> array,
@UdfParameter(description = "Value to remove") final T victim) {
if (array == null) {
return null;
}
return array.stream()
.filter(el -> !Objects.equals(el, victim))
.collect(Collectors.toList());
} | @Test
public void shouldReturnAllElementsIfNoMatches() {
final List<String> input1 = Arrays.asList("foo");
final String input2 = "bar";
final List<String> result = udf.remove(input1, input2);
assertThat(result, contains("foo"));
} |
@Override
public void createOrUpdate(final String path, final Object data) {
zkClient.createOrUpdate(path, data, CreateMode.PERSISTENT);
} | @Test
public void testOnMetaDataChangedCreate() throws UnsupportedEncodingException {
MetaData metaData = MetaData.builder().id(MOCK_ID).path(MOCK_PATH).appName(MOCK_APP_NAME).build();
String metaDataPath = DefaultPathConstants.buildMetaDataPath(URLEncoder.encode(metaData.getPath(), StandardCharsets.UTF_8));
zookeeperDataChangedListener.onMetaDataChanged(ImmutableList.of(metaData), DataEventTypeEnum.CREATE);
verify(zkClient, times(1)).createOrUpdate(metaDataPath, metaData, CreateMode.PERSISTENT);
} |
void executeMergeConfigTask(List<ConfigInfoChanged> configInfoList, int pageSize) {
for (ConfigInfoChanged configInfo : configInfoList) {
String dataId = configInfo.getDataId();
String group = configInfo.getGroup();
String tenant = configInfo.getTenant();
try {
List<ConfigInfoAggr> datumList = new ArrayList<>();
int rowCount = configInfoAggrPersistService.aggrConfigInfoCount(dataId, group, tenant);
int pageCount = (int) Math.ceil(rowCount * 1.0 / pageSize);
for (int pageNo = 1; pageNo <= pageCount; pageNo++) {
Page<ConfigInfoAggr> page = configInfoAggrPersistService.findConfigInfoAggrByPage(dataId, group,
tenant, pageNo, pageSize);
if (page != null) {
datumList.addAll(page.getPageItems());
LOGGER.info("[merge-query] {}, {}, size/total={}/{}", dataId, group, datumList.size(),
rowCount);
}
}
// merge
if (datumList.size() > 0) {
ConfigInfo cf = MergeTaskProcessor.merge(dataId, group, tenant, datumList);
String aggrContent = cf.getContent();
String localContentMD5 = ConfigCacheService.getContentMd5(GroupKey.getKey(dataId, group));
String aggrConetentMD5 = MD5Utils.md5Hex(aggrContent, Constants.ENCODE);
if (!StringUtils.equals(localContentMD5, aggrConetentMD5)) {
configInfoPersistService.insertOrUpdate(null, null, cf, null);
LOGGER.info("[merge-ok] {}, {}, size={}, length={}, md5={}, content={}", dataId, group,
datumList.size(), cf.getContent().length(), cf.getMd5(),
ContentUtils.truncateContent(cf.getContent()));
}
} else {
// remove config info
configInfoPersistService.removeConfigInfo(dataId, group, tenant, InetUtils.getSelfIP(), null);
LOGGER.warn("[merge-delete] delete config info because no datum. dataId=" + dataId + ", groupId="
+ group);
}
} catch (Throwable e) {
LOGGER.info("[merge-error] " + dataId + ", " + group + ", " + e.toString(), e);
}
FINISHED.incrementAndGet();
if (FINISHED.get() % 100 == 0) {
LOGGER.info("[all-merge-dump] {} / {}", FINISHED.get(), total);
}
}
LOGGER.info("[all-merge-dump] {} / {}", FINISHED.get(), total);
} | @Test
void executeMergeConfigTask() {
envUtilMockedStatic.when(() -> EnvUtil.getProperty(eq("nacos.config.retention.days"))).thenReturn("10");
ConfigInfoChanged hasDatum = new ConfigInfoChanged();
hasDatum.setDataId("hasDatumdataId1");
hasDatum.setTenant("tenant1");
hasDatum.setGroup("group1");
ConfigInfoChanged noDatum = new ConfigInfoChanged();
noDatum.setDataId("dataId1");
noDatum.setTenant("tenant1");
noDatum.setGroup("group1");
List<ConfigInfoChanged> configInfoList = new ArrayList<>();
configInfoList.add(hasDatum);
configInfoList.add(noDatum);
when(configInfoAggrPersistService.aggrConfigInfoCount(eq(hasDatum.getDataId()), eq(hasDatum.getGroup()),
eq(hasDatum.getTenant()))).thenReturn(2);
Page<ConfigInfoAggr> datumPage = new Page<>();
ConfigInfoAggr configInfoAggr1 = new ConfigInfoAggr();
configInfoAggr1.setContent("12344");
ConfigInfoAggr configInfoAggr2 = new ConfigInfoAggr();
configInfoAggr2.setContent("12345666");
datumPage.getPageItems().add(configInfoAggr1);
datumPage.getPageItems().add(configInfoAggr2);
when(configInfoAggrPersistService.findConfigInfoAggrByPage(eq(hasDatum.getDataId()), eq(hasDatum.getGroup()),
eq(hasDatum.getTenant()), anyInt(), anyInt())).thenReturn(datumPage);
when(configInfoAggrPersistService.aggrConfigInfoCount(eq(noDatum.getDataId()), eq(noDatum.getGroup()),
eq(noDatum.getTenant()))).thenReturn(0);
mergeDatumService.executeMergeConfigTask(configInfoList, 1000);
} |
@Override
public void dealSeckill(long seckillId, String userPhone, String note, String taskId) {
try {
InetAddress localHost = InetAddress.getLocalHost();
SuccessKilledDTO successKilled = new SuccessKilledDTO();
successKilled.setSeckillId(seckillId);
successKilled.setUserPhone(userPhone);
successKilled.setCreateTime(new Date());
successKilled.setServerIp(localHost.getHostAddress() + ":" + localHost.getHostName());
if (seckillService.reduceNumber(successKilled) < 1) {
Seckill seckill = seckillMapper.selectById(seckillId);
log.debug("#dealSeckill 当前库存:{},秒杀活动id:{},商品id:{}", seckill.getNumber(), seckill.getSeckillId(), seckill.getGoodsId());
if (stateMachineService.checkState(seckillId, States.IN_PROGRESS)) {
stateMachineService.feedMachine(Events.ACTIVITY_CALCULATE, seckillId);
// 高并发时可能多次发送完成通知,使用锁控制
Boolean endFlag = redisService.setSeckillEndFlag(seckillId, taskId);
if (endFlag) {
streamBridge.send(DEFAULT_BINDING_NAME, MessageBuilder.withPayload(
SeckillMockResponseDTO.builder().seckillId(seckillId).note(note).status(true).taskId(taskId).build())
.build());
log.info("#dealSeckill 商品已售罄,最新秒杀信息:{}", seckill);
}
}
if (seckill.getNumber() <= 0) {
log.debug("#dealSeckill 库存不足,无法继续秒杀!");
}
}
} catch (UnknownHostException e) {
log.error(e.getMessage(), e);
}
} | @Test
void dealSeckill() {
long seckillId = 1001L;
Seckill seckill = new Seckill();
seckill.setNumber(0);
when(seckillMapper.selectById(seckillId)).thenReturn(seckill);
seckillProcedureExecutor.dealSeckill(seckillId, "123", "test", "1");
Seckill updateSeckill = new Seckill();
updateSeckill.setSeckillId(seckillId);
// updateSeckill.setStatus(SeckillStatusConstant.END);
verify(seckillMapper, times(1)).selectById(seckillId);
} |
@Override
public String toString() {
return GsonUtils.getInstance().toJson(this);
} | @Test
public void testToString() {
ConfigData<Object> configData = new ConfigData<>();
configData.setLastModifyTime(LAST_MODIFY_TIME);
configData.setMd5(MD5);
configData.setData(Collections.emptyList());
assertNotNull(configData.toString());
} |
public static InetSocketAddress getLocalSocketAddress(String host, int port) {
return isInvalidLocalHost(host) ? new InetSocketAddress(port) : new InetSocketAddress(host, port);
} | @Test
void testGetLocalSocketAddress() {
InetSocketAddress address = NetUtils.getLocalSocketAddress("localhost", 12345);
assertTrue(address.getAddress().isAnyLocalAddress());
assertEquals(address.getPort(), 12345);
address = NetUtils.getLocalSocketAddress("dubbo-addr", 12345);
assertEquals(address.getHostName(), "dubbo-addr");
assertEquals(address.getPort(), 12345);
} |
public static String randomCreditCode() {
final StringBuilder buf = new StringBuilder(18);
//
for (int i = 0; i < 2; i++) {
int num = RandomUtil.randomInt(BASE_CODE_ARRAY.length - 1);
buf.append(Character.toUpperCase(BASE_CODE_ARRAY[num]));
}
for (int i = 2; i < 8; i++) {
int num = RandomUtil.randomInt(10);
buf.append(BASE_CODE_ARRAY[num]);
}
for (int i = 8; i < 17; i++) {
int num = RandomUtil.randomInt(BASE_CODE_ARRAY.length - 1);
buf.append(BASE_CODE_ARRAY[num]);
}
final String code = buf.toString();
return code + BASE_CODE_ARRAY[getParityBit(code)];
} | @Test
public void randomCreditCode() {
final String s = CreditCodeUtil.randomCreditCode();
assertTrue(CreditCodeUtil.isCreditCode(s));
} |
@Override
public String normalise(String text) {
if (Objects.isNull(text) || text.isEmpty()) {
throw new IllegalArgumentException("Text cannot be null or empty");
}
return text.trim()
.toLowerCase()
.replaceAll("\\p{Punct}", "")
.replaceAll("\\s+", " ");
} | @Description("Normalise, when text has multiple spaces between words, then return lowercased text with spaces minimised to one")
@Test
void normalise_WhenTextHasMultipleSpacesBetweenWords_ThenReturnLowercasedTextWithOneSpace() {
// When
var result = textNormaliser.normalise("Hello World");
// Then
assertThat(result).isEqualTo("hello world");
} |
@Override
public ResourceId resolve(String other, ResolveOptions resolveOptions) {
checkState(isDirectory(), "Expected this resource to be a directory, but was [%s]", toString());
if (resolveOptions == ResolveOptions.StandardResolveOptions.RESOLVE_DIRECTORY) {
if ("..".equals(other)) {
if ("/".equals(key)) {
return this;
}
int parentStopsAt = key.substring(0, key.length() - 1).lastIndexOf('/');
return fromComponents(scheme, bucket, key.substring(0, parentStopsAt + 1));
}
if ("".equals(other)) {
return this;
}
if (!other.endsWith("/")) {
other += "/";
}
if (S3_URI.matcher(other).matches()) {
return resolveFromUri(other);
}
return fromComponents(scheme, bucket, key + other);
}
if (resolveOptions == ResolveOptions.StandardResolveOptions.RESOLVE_FILE) {
checkArgument(
!other.endsWith("/"), "Cannot resolve a file with a directory path: [%s]", other);
checkArgument(!"..".equals(other), "Cannot resolve parent as file: [%s]", other);
if (S3_URI.matcher(other).matches()) {
return resolveFromUri(other);
}
return fromComponents(scheme, bucket, key + other);
}
throw new UnsupportedOperationException(
String.format("Unexpected StandardResolveOptions [%s]", resolveOptions));
} | @Test
public void testResolve() {
for (TestCase testCase : PATH_TEST_CASES) {
ResourceId resourceId = S3ResourceId.fromUri(testCase.baseUri);
ResourceId resolved = resourceId.resolve(testCase.relativePath, testCase.resolveOptions);
assertEquals(testCase.expectedResult, resolved.toString());
}
// Tests for common s3 paths.
assertEquals(
S3ResourceId.fromUri("s3://bucket/tmp/aa"),
S3ResourceId.fromUri("s3://bucket/tmp/").resolve("aa", RESOLVE_FILE));
assertEquals(
S3ResourceId.fromUri("s3://bucket/tmp/aa/bb/cc/"),
S3ResourceId.fromUri("s3://bucket/tmp/")
.resolve("aa", RESOLVE_DIRECTORY)
.resolve("bb", RESOLVE_DIRECTORY)
.resolve("cc", RESOLVE_DIRECTORY));
// Tests absolute path.
assertEquals(
S3ResourceId.fromUri("s3://bucket/tmp/aa"),
S3ResourceId.fromUri("s3://bucket/tmp/bb/").resolve("s3://bucket/tmp/aa", RESOLVE_FILE));
// Tests bucket with no ending '/'.
assertEquals(
S3ResourceId.fromUri("s3://my-bucket/tmp"),
S3ResourceId.fromUri("s3://my-bucket").resolve("tmp", RESOLVE_FILE));
// Tests path with unicode
assertEquals(
S3ResourceId.fromUri("s3://bucket/输出 目录/输出 文件01.txt"),
S3ResourceId.fromUri("s3://bucket/输出 目录/").resolve("输出 文件01.txt", RESOLVE_FILE));
} |
public static <K, V> Reshuffle<K, V> of() {
return new Reshuffle<>();
} | @Test
@Category(ValidatesRunner.class)
public void testReshuffleAfterFixedWindowsAndGroupByKey() {
PCollection<KV<String, Iterable<Integer>>> input =
pipeline
.apply(
Create.of(GBK_TESTABLE_KVS)
.withCoder(KvCoder.of(StringUtf8Coder.of(), VarIntCoder.of())))
.apply(Window.into(FixedWindows.of(Duration.standardMinutes(10L))))
.apply(GroupByKey.create());
PCollection<KV<String, Iterable<Integer>>> output = input.apply(Reshuffle.of());
PAssert.that(output).satisfies(new AssertThatHasExpectedContents());
assertEquals(input.getWindowingStrategy(), output.getWindowingStrategy());
pipeline.run();
} |
public EmptyRow() {
// No-op.
} | @Test
public void testEmptyRow() {
EmptyRow row = EmptyRow.INSTANCE;
assertEquals(0, row.getColumnCount());
} |
public CheckpointProperties(
boolean forced,
SnapshotType checkpointType,
boolean discardSubsumed,
boolean discardFinished,
boolean discardCancelled,
boolean discardFailed,
boolean discardSuspended,
boolean unclaimed) {
this.forced = forced;
this.checkpointType = checkNotNull(checkpointType);
this.discardSubsumed = discardSubsumed;
this.discardFinished = discardFinished;
this.discardCancelled = discardCancelled;
this.discardFailed = discardFailed;
this.discardSuspended = discardSuspended;
this.unclaimed = unclaimed;
} | @Test
void testCheckpointProperties() {
CheckpointProperties props =
CheckpointProperties.forCheckpoint(CheckpointRetentionPolicy.RETAIN_ON_FAILURE);
assertThat(props.forceCheckpoint()).isFalse();
assertThat(props.discardOnSubsumed()).isTrue();
assertThat(props.discardOnJobFinished()).isTrue();
assertThat(props.discardOnJobCancelled()).isTrue();
assertThat(props.discardOnJobFailed()).isFalse();
assertThat(props.discardOnJobSuspended()).isTrue();
props =
CheckpointProperties.forCheckpoint(
CheckpointRetentionPolicy.RETAIN_ON_CANCELLATION);
assertThat(props.forceCheckpoint()).isFalse();
assertThat(props.discardOnSubsumed()).isTrue();
assertThat(props.discardOnJobFinished()).isTrue();
assertThat(props.discardOnJobCancelled()).isFalse();
assertThat(props.discardOnJobFailed()).isFalse();
assertThat(props.discardOnJobSuspended()).isFalse();
} |
@Override
public ClientRequest transformRequest(ClientRequest request, ServiceInstance instance) {
if (instance != null) {
MetadataContextHolder.get().setLoadbalancer(LOAD_BALANCER_SERVICE_INSTANCE, instance);
}
return request;
} | @Test
public void test() throws Throwable {
transformer.transformRequest(clientRequest, serviceInstance);
assertThat(MetadataContextHolder.get().getLoadbalancerMetadata().get(LOAD_BALANCER_SERVICE_INSTANCE)).isEqualTo(serviceInstance);
} |
static <T> CheckedSupplier<T> decorateCheckedSupplier(Observation observation,
CheckedSupplier<T> supplier) {
return () -> observation.observeChecked(supplier::get);
} | @Test
public void shouldDecorateCheckedSupplier() throws Throwable {
given(helloWorldService.returnHelloWorldWithException()).willReturn("Hello world");
CheckedSupplier<String> timedSupplier = Observations
.decorateCheckedSupplier(observation, helloWorldService::returnHelloWorldWithException);
String value = timedSupplier.get();
assertThatObservationWasStartedAndFinishedWithoutErrors();
assertThat(value).isEqualTo("Hello world");
then(helloWorldService).should(times(1)).returnHelloWorldWithException();
} |
public static SerializableFunction<Row, Mutation> beamRowToMutationFn(
Mutation.Op operation, String table) {
return (row -> {
switch (operation) {
case INSERT:
return MutationUtils.createMutationFromBeamRows(Mutation.newInsertBuilder(table), row);
case DELETE:
return Mutation.delete(table, MutationUtils.createKeyFromBeamRow(row));
case UPDATE:
return MutationUtils.createMutationFromBeamRows(Mutation.newUpdateBuilder(table), row);
case REPLACE:
return MutationUtils.createMutationFromBeamRows(Mutation.newReplaceBuilder(table), row);
case INSERT_OR_UPDATE:
return MutationUtils.createMutationFromBeamRows(
Mutation.newInsertOrUpdateBuilder(table), row);
default:
throw new IllegalArgumentException(
String.format("Unknown mutation operation type: %s", operation));
}
});
} | @Test
public void testCreateInsertOrUpdateMutationFromRowWithNulls() {
Mutation expectedMutation = createMutationNulls(Mutation.Op.INSERT_OR_UPDATE);
Mutation mutation =
beamRowToMutationFn(Mutation.Op.INSERT_OR_UPDATE, TABLE).apply(WRITE_ROW_NULLS);
assertEquals(expectedMutation, mutation);
} |
public ProjectList searchProjects(String gitlabUrl, String personalAccessToken, @Nullable String projectName,
@Nullable Integer pageNumber, @Nullable Integer pageSize) {
String url = format("%s/projects?archived=false&simple=true&membership=true&order_by=name&sort=asc&search=%s%s%s",
gitlabUrl,
projectName == null ? "" : urlEncode(projectName),
pageNumber == null ? "" : format("&page=%d", pageNumber),
pageSize == null ? "" : format("&per_page=%d", pageSize)
);
LOG.debug("get projects : [{}]", url);
Request request = new Request.Builder()
.addHeader(PRIVATE_TOKEN, personalAccessToken)
.url(url)
.get()
.build();
try (Response response = client.newCall(request).execute()) {
Headers headers = response.headers();
checkResponseIsSuccessful(response, "Could not get projects from GitLab instance");
List<Project> projectList = Project.parseJsonArray(response.body().string());
int returnedPageNumber = parseAndGetIntegerHeader(headers.get("X-Page"));
int returnedPageSize = parseAndGetIntegerHeader(headers.get("X-Per-Page"));
String xtotal = headers.get("X-Total");
Integer totalProjects = Strings.isEmpty(xtotal) ? null : parseAndGetIntegerHeader(xtotal);
return new ProjectList(projectList, returnedPageNumber, returnedPageSize, totalProjects);
} catch (JsonSyntaxException e) {
throw new IllegalArgumentException("Could not parse GitLab answer to search projects. Got a non-json payload as result.");
} catch (IOException e) {
logException(url, e);
throw new IllegalStateException(e.getMessage(), e);
}
} | @Test
public void search_projects_dont_fail_if_no_x_total() throws InterruptedException {
MockResponse projects = new MockResponse()
.setResponseCode(200)
.setBody("[\n"
+ " {\n"
+ " \"id\": 1,\n"
+ " \"name\": \"SonarQube example 1\",\n"
+ " \"name_with_namespace\": \"SonarSource / SonarQube / SonarQube example 1\",\n"
+ " \"path\": \"sonarqube-example-1\",\n"
+ " \"path_with_namespace\": \"sonarsource/sonarqube/sonarqube-example-1\",\n"
+ " \"web_url\": \"https://example.gitlab.com/sonarsource/sonarqube/sonarqube-example-1\"\n"
+ " }"
+ "]");
projects.addHeader("X-Page", 1);
projects.addHeader("X-Per-Page", 10);
server.enqueue(projects);
ProjectList projectList = underTest.searchProjects(gitlabUrl, "pat", "example", 1, 10);
assertThat(projectList.getPageNumber()).isOne();
assertThat(projectList.getPageSize()).isEqualTo(10);
assertThat(projectList.getTotal()).isNull();
assertThat(projectList.getProjects()).hasSize(1);
assertThat(projectList.getProjects()).extracting(
Project::getId, Project::getName, Project::getNameWithNamespace, Project::getPath, Project::getPathWithNamespace, Project::getWebUrl).containsExactly(
tuple(1L, "SonarQube example 1", "SonarSource / SonarQube / SonarQube example 1", "sonarqube-example-1", "sonarsource/sonarqube/sonarqube-example-1",
"https://example.gitlab.com/sonarsource/sonarqube/sonarqube-example-1"));
RecordedRequest projectGitlabRequest = server.takeRequest(10, TimeUnit.SECONDS);
String gitlabUrlCall = projectGitlabRequest.getRequestUrl().toString();
assertThat(gitlabUrlCall).isEqualTo(server.url("") + "projects?archived=false&simple=true&membership=true&order_by=name&sort=asc&search=example&page=1&per_page=10");
assertThat(projectGitlabRequest.getMethod()).isEqualTo("GET");
} |
@Override
public String getUnderFSType() {
return "obs";
} | @Test
public void getUnderFSType() {
Assert.assertEquals("obs", mOBSUnderFileSystem.getUnderFSType());
} |
public void addValueProviders(final String segmentName,
final RocksDB db,
final Cache cache,
final Statistics statistics) {
if (storeToValueProviders.isEmpty()) {
logger.debug("Adding metrics recorder of task {} to metrics recording trigger", taskId);
streamsMetrics.rocksDBMetricsRecordingTrigger().addMetricsRecorder(this);
} else if (storeToValueProviders.containsKey(segmentName)) {
throw new IllegalStateException("Value providers for store " + segmentName + " of task " + taskId +
" has been already added. This is a bug in Kafka Streams. " +
"Please open a bug report under https://issues.apache.org/jira/projects/KAFKA/issues");
}
verifyDbAndCacheAndStatistics(segmentName, db, cache, statistics);
logger.debug("Adding value providers for store {} of task {}", segmentName, taskId);
storeToValueProviders.put(segmentName, new DbAndCacheAndStatistics(db, cache, statistics));
} | @Test
public void shouldSetStatsLevelToExceptDetailedTimersWhenValueProvidersWithStatisticsAreAdded() {
recorder.addValueProviders(SEGMENT_STORE_NAME_1, dbToAdd1, cacheToAdd1, statisticsToAdd1);
verify(statisticsToAdd1).setStatsLevel(StatsLevel.EXCEPT_DETAILED_TIMERS);
} |
@VisibleForTesting
static String generateMigrationFile(QualifiedVersion version, String template, List<ChangelogEntry> entries) throws IOException {
final Map<Boolean, Map<String, List<ChangelogEntry.Deprecation>>> deprecationsByNotabilityByArea = entries.stream()
.map(ChangelogEntry::getDeprecation)
.filter(Objects::nonNull)
.sorted(comparing(ChangelogEntry.Deprecation::getTitle))
.collect(
groupingBy(
ChangelogEntry.Deprecation::isNotable,
TreeMap::new,
groupingBy(ChangelogEntry.Deprecation::getArea, TreeMap::new, toList())
)
);
final Map<Boolean, Map<String, List<ChangelogEntry.Breaking>>> breakingByNotabilityByArea = entries.stream()
.map(ChangelogEntry::getBreaking)
.filter(Objects::nonNull)
.sorted(comparing(ChangelogEntry.Breaking::getTitle))
.collect(
groupingBy(
ChangelogEntry.Breaking::isNotable,
TreeMap::new,
groupingBy(ChangelogEntry.Breaking::getArea, TreeMap::new, toList())
)
);
final Map<String, Object> bindings = new HashMap<>();
bindings.put("breakingByNotabilityByArea", breakingByNotabilityByArea);
bindings.put("deprecationsByNotabilityByArea", deprecationsByNotabilityByArea);
bindings.put("isElasticsearchSnapshot", version.isSnapshot());
bindings.put("majorDotMinor", version.major() + "." + version.minor());
bindings.put("majorDotMinorDotRevision", version.major() + "." + version.minor() + "." + version.revision());
bindings.put("majorMinor", String.valueOf(version.major()) + version.minor());
bindings.put("nextMajor", (version.major() + 1) + ".0");
bindings.put("version", version);
return TemplateUtils.render(template, bindings);
} | @Test
public void generateIndexFile_rendersCorrectMarkup() throws Exception {
// given:
final String template = getResource("/templates/breaking-changes.asciidoc");
final String expectedOutput = getResource(
"/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateMigrationFile.asciidoc"
);
final List<ChangelogEntry> entries = getEntries();
// when:
final String actualOutput = BreakingChangesGenerator.generateMigrationFile(
QualifiedVersion.of("8.4.0-SNAPSHOT"),
template,
entries
);
// then:
assertThat(actualOutput, equalTo(expectedOutput));
} |
@Override
public List<AdminUserDO> getUserListByNickname(String nickname) {
return userMapper.selectListByNickname(nickname);
} | @Test
public void testGetUserListByNickname() {
// mock 数据
AdminUserDO user = randomAdminUserDO(o -> o.setNickname("芋头"));
userMapper.insert(user);
// 测试 nickname 不匹配
userMapper.insert(randomAdminUserDO(o -> o.setNickname("源码")));
// 准备参数
String nickname = "芋";
// 调用
List<AdminUserDO> result = userService.getUserListByNickname(nickname);
// 断言
assertEquals(1, result.size());
assertEquals(user, result.get(0));
} |
@Override
public boolean retryRequest(IOException exception, int executionCount, HttpContext ctx) {
log.fine(() -> String.format("retryRequest(exception='%s', executionCount='%d', ctx='%s'",
exception.getClass().getName(), executionCount, ctx));
HttpClientContext clientCtx = HttpClientContext.adapt(ctx);
if (!predicate.test(exception, clientCtx)) {
log.fine(() -> String.format("Not retrying for '%s'", ctx));
return false;
}
if (executionCount > maxRetries) {
log.fine(() -> String.format("Max retries exceeded for '%s'", ctx));
retryFailedConsumer.onRetryFailed(exception, executionCount, clientCtx);
return false;
}
Duration delay = delaySupplier.getDelay(executionCount);
log.fine(() -> String.format("Retrying after %s for '%s'", delay, ctx));
retryConsumer.onRetry(exception, delay, executionCount, clientCtx);
sleeper.sleep(delay);
return true;
} | @SuppressWarnings("unchecked")
@Test
void retry_consumers_are_invoked() {
RetryConsumer<IOException> retryConsumer = (RetryConsumer<IOException>) mock(RetryConsumer.class);
RetryFailedConsumer<IOException> retryFailedConsumer = (RetryFailedConsumer<IOException>) mock(RetryFailedConsumer.class);
Duration delay = Duration.ofSeconds(10);
int maxRetries = 5;
DelayedConnectionLevelRetryHandler handler = DelayedConnectionLevelRetryHandler.Builder
.withFixedDelay(delay, maxRetries)
.withSleeper(mock(Sleeper.class))
.onRetry(retryConsumer)
.onRetryFailed(retryFailedConsumer)
.build();
IOException exception = new IOException();
HttpClientContext ctx = new HttpClientContext();
int lastExecutionCount = maxRetries + 1;
for (int i = 1; i <= lastExecutionCount; i++) {
handler.retryRequest(exception, i, ctx);
}
verify(retryFailedConsumer).onRetryFailed(exception, lastExecutionCount, ctx);
for (int i = 1; i < lastExecutionCount; i++) {
verify(retryConsumer).onRetry(exception, delay, i, ctx);
}
} |
@Override
public void filter(ContainerRequestContext requestContext) throws IOException {
if (!requestContext.getUriInfo().getPath().endsWith(targetPath)) {
return;
}
final List<MediaType> acceptedFormats = requestContext.getAcceptableMediaTypes();
final Map<MediaType, ExportFormat> exportFormatCandidates = supportedFormats.entrySet()
.stream()
.filter(entry -> acceptedFormats.stream().anyMatch(acceptedFormat -> entry.getKey().isCompatible(acceptedFormat)))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
if (exportFormatCandidates.isEmpty()) {
requestContext.abortWith(Response.status(Response.Status.UNSUPPORTED_MEDIA_TYPE).build());
return;
}
final Map<MediaType, Optional<String>> candidateErrors = exportFormatCandidates.entrySet()
.stream()
.collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().hasError()));
if (candidateErrors.values().stream().allMatch(Optional::isPresent)) {
final String errorMessage = candidateErrors.values().stream()
.map(optionalError -> optionalError.orElse(""))
.collect(Collectors.joining("\n"));
requestContext.abortWith(Response.status(Response.Status.UNSUPPORTED_MEDIA_TYPE)
.entity(errorMessage)
.type(MoreMediaTypes.TEXT_PLAIN_TYPE)
.build());
return;
}
final List<String> allowedMediaTypes = candidateErrors.entrySet().stream()
.filter(entry -> !entry.getValue().isPresent())
.map(Map.Entry::getKey)
.map(MediaType::toString)
.collect(Collectors.toList());
requestContext.getHeaders().put(HttpHeaders.ACCEPT, allowedMediaTypes);
} | @Test
void doesNothingIfRequestPathDoesNotMatch() throws Exception {
final ContainerRequestFilter filter = new MessageExportFormatFilter(Collections.emptySet());
final ContainerRequestContext requestContext = mockRequestContextForNonMatchingPath();
filter.filter(requestContext);
verifyRequestNotAborted(requestContext);
} |
public void clearForTask() {
MDC.remove(MDC_CE_TASK_UUID);
} | @Test
public void clearForTask_removes_task_uuid_from_MDC() {
MDC.put(MDC_CE_TASK_UUID, "some_value");
underTest.clearForTask();
assertThat(MDC.get(MDC_CE_TASK_UUID)).isNull();
} |
protected static void checkNormalWithComma(String configKey, String configValue) throws SofaRpcRuntimeException {
checkPattern(configKey, configValue, NORMAL_COMMA, "only allow a-zA-Z0-9 '-' '_' '.' ','");
} | @Test
public void checkNormalWithComma() {
ConfigValueHelper.checkNormalWithComma("aaa", "123abc-_.,");
try {
ConfigValueHelper.checkNormalWithComma("aaa", "123abc-_.,!");
Assert.fail();
} catch (Exception e) {
Assert.assertTrue(e.getMessage().contains("aaa"));
}
} |
public SqlType getExpressionSqlType(final Expression expression) {
return getExpressionSqlType(expression, Collections.emptyMap());
} | @Test
public void shouldGetCorrectSchemaForSearchedCaseWhenStruct() {
// Given:
final Expression expression = new SearchedCaseExpression(
ImmutableList.of(
new WhenClause(
new ComparisonExpression(Type.EQUAL, TestExpressions.COL0, new IntegerLiteral(10)),
ADDRESS
)
),
Optional.empty()
);
// When:
final SqlType result = expressionTypeManager.getExpressionSqlType(expression);
// Then:
final SqlType sqlType = SCHEMA.findColumn(ADDRESS.getColumnName()).get().type();
assertThat(result, is(sqlType));
} |
static MinMax findMinMax(MinMax minMax, List<Statement> statements, EncodedValueLookup lookup) {
List<List<Statement>> groups = CustomModelParser.splitIntoGroup(statements);
for (List<Statement> group : groups) findMinMaxForGroup(minMax, group, lookup);
return minMax;
} | @Test
public void testFindMax() {
List<Statement> statements = new ArrayList<>();
statements.add(If("true", LIMIT, "100"));
assertEquals(100, findMinMax(new MinMax(0, 120), statements, lookup).max);
statements.add(Else(LIMIT, "20"));
assertEquals(100, findMinMax(new MinMax(0, 120), statements, lookup).max);
statements = new ArrayList<>();
statements.add(If("road_environment == BRIDGE", LIMIT, "85"));
statements.add(Else(LIMIT, "100"));
assertEquals(100, findMinMax(new MinMax(0, 120), statements, lookup).max);
// find bigger speed than stored max_speed (30) in server-side custom_models
statements = new ArrayList<>();
statements.add(If("true", MULTIPLY, "2"));
statements.add(If("true", LIMIT, "35"));
assertEquals(35, findMinMax(new MinMax(0, 30), statements, lookup).max);
} |
@Override
public void writeMetrics(MetricQueryResults metricQueryResults) throws Exception {
final long metricTimestamp = System.currentTimeMillis() / 1000L;
Socket socket = new Socket(InetAddress.getByName(address), port);
BufferedWriter writer =
new BufferedWriter(new OutputStreamWriter(socket.getOutputStream(), charset));
StringBuilder messagePayload = new StringBuilder();
Iterable<MetricResult<Long>> counters = metricQueryResults.getCounters();
Iterable<MetricResult<GaugeResult>> gauges = metricQueryResults.getGauges();
Iterable<MetricResult<DistributionResult>> distributions =
metricQueryResults.getDistributions();
for (MetricResult<Long> counter : counters) {
messagePayload.append(new CounterMetricMessage(counter, "value", metricTimestamp).toString());
}
for (MetricResult<GaugeResult> gauge : gauges) {
messagePayload.append(new GaugeMetricMessage(gauge, "value").toString());
}
for (MetricResult<DistributionResult> distribution : distributions) {
messagePayload.append(
new DistributionMetricMessage(distribution, "min", metricTimestamp).toString());
messagePayload.append(
new DistributionMetricMessage(distribution, "max", metricTimestamp).toString());
messagePayload.append(
new DistributionMetricMessage(distribution, "count", metricTimestamp).toString());
messagePayload.append(
new DistributionMetricMessage(distribution, "sum", metricTimestamp).toString());
messagePayload.append(
new DistributionMetricMessage(distribution, "mean", metricTimestamp).toString());
}
writer.write(messagePayload.toString());
writer.flush();
writer.close();
socket.close();
} | @Test
public void testWriteMetricsWithCommittedSupported() throws Exception {
MetricQueryResults metricQueryResults = new CustomMetricQueryResults(true);
MetricsOptions pipelineOptions = PipelineOptionsFactory.create().as(MetricsOptions.class);
pipelineOptions.setMetricsGraphitePort(port);
pipelineOptions.setMetricsGraphiteHost("127.0.0.1");
MetricsGraphiteSink metricsGraphiteSink = new MetricsGraphiteSink(pipelineOptions);
CountDownLatch countDownLatch = new CountDownLatch(1);
graphiteServer.setCountDownLatch(countDownLatch);
metricsGraphiteSink.writeMetrics(metricQueryResults);
countDownLatch.await();
String join = String.join("\n", graphiteServer.getMessages());
String regexpr =
"beam.counter.ns1.n1.s1.committed.value 10 [0-9]+\\n"
+ "beam.counter.ns1.n1.s1.attempted.value 20 [0-9]+\\n"
+ "beam.gauge.ns1.n3.s3.committed.value 100 [0-9]+\\n"
+ "beam.gauge.ns1.n3.s3.attempted.value 120 [0-9]+\\n"
+ "beam.distribution.ns1.n2.s2.committed.min 5 [0-9]+\\n"
+ "beam.distribution.ns1.n2.s2.attempted.min 3 [0-9]+\\n"
+ "beam.distribution.ns1.n2.s2.committed.max 8 [0-9]+\\n"
+ "beam.distribution.ns1.n2.s2.attempted.max 9 [0-9]+\\n"
+ "beam.distribution.ns1.n2.s2.committed.count 2 [0-9]+\\n"
+ "beam.distribution.ns1.n2.s2.attempted.count 4 [0-9]+\\n"
+ "beam.distribution.ns1.n2.s2.committed.sum 10 [0-9]+\\n"
+ "beam.distribution.ns1.n2.s2.attempted.sum 25 [0-9]+\\n"
+ "beam.distribution.ns1.n2.s2.committed.mean 5.0 [0-9]+\\n"
+ "beam.distribution.ns1.n2.s2.attempted.mean 6.25 [0-9]+";
assertTrue(join.matches(regexpr));
} |
public static AuditManagerS3A createAndStartAuditManager(
Configuration conf,
IOStatisticsStore iostatistics) {
AuditManagerS3A auditManager;
if (conf.getBoolean(AUDIT_ENABLED, AUDIT_ENABLED_DEFAULT)) {
auditManager = new ActiveAuditManagerS3A(
requireNonNull(iostatistics));
} else {
LOG.debug("auditing is disabled");
auditManager = stubAuditManager();
}
auditManager.init(conf);
auditManager.start();
LOG.debug("Started Audit Manager {}", auditManager);
return auditManager;
} | @Test
public void testLoggingAuditorBinding() throws Throwable {
AuditManagerS3A manager = AuditIntegration.createAndStartAuditManager(
AuditTestSupport.loggingAuditConfig(),
ioStatistics);
OperationAuditor auditor = manager.getAuditor();
assertServiceStateStarted(auditor);
manager.close();
assertServiceStateStopped(auditor);
} |
@Override
public FSDataOutputStream create(Path path, boolean overwrite, int bufferSize, short replication,
long blockSize, Progressable progress) throws IOException {
String confUmask = mAlluxioConf.getString(PropertyKey.SECURITY_AUTHORIZATION_PERMISSION_UMASK);
Mode mode = ModeUtils.applyFileUMask(Mode.defaults(), confUmask);
return this.create(path, new FsPermission(mode.toShort()), overwrite, bufferSize, replication,
blockSize, progress);
} | @Test
public void concurrentInitialize() throws Exception {
List<Thread> threads = new ArrayList<>();
final org.apache.hadoop.conf.Configuration conf = getConf();
for (int i = 0; i < 100; i++) {
Thread t = new Thread(() -> {
URI uri = URI.create(Constants.HEADER + "randomhost:410/");
try {
org.apache.hadoop.fs.FileSystem.get(uri, conf);
} catch (IOException e) {
fail();
}
});
threads.add(t);
}
for (Thread t : threads) {
t.start();
}
for (Thread t : threads) {
t.join();
}
} |
Optional<ImageMetadataTemplate> retrieveMetadata(ImageReference imageReference)
throws IOException, CacheCorruptedException {
Path imageDirectory = cacheStorageFiles.getImageDirectory(imageReference);
Path metadataPath = imageDirectory.resolve("manifests_configs.json");
if (!Files.exists(metadataPath)) {
return Optional.empty();
}
ImageMetadataTemplate metadata;
try (LockFile ignored = LockFile.lock(imageDirectory.resolve("lock"))) {
metadata = JsonTemplateMapper.readJsonFromFile(metadataPath, ImageMetadataTemplate.class);
}
verifyImageMetadata(metadata, imageDirectory);
return Optional.of(metadata);
} | @Test
public void testRetrieveMetadata_ociSingleManifest()
throws IOException, URISyntaxException, CacheCorruptedException {
setupCachedMetadataOci(cacheDirectory);
ImageMetadataTemplate metadata =
cacheStorageReader.retrieveMetadata(ImageReference.of("test", "image", "tag")).get();
Assert.assertNull(metadata.getManifestList());
Assert.assertEquals(1, metadata.getManifestsAndConfigs().size());
OciManifestTemplate manifestTemplate =
(OciManifestTemplate) metadata.getManifestsAndConfigs().get(0).getManifest();
Assert.assertEquals(2, manifestTemplate.getSchemaVersion());
Assert.assertEquals(
"8c662931926fa990b41da3c9f42663a537ccd498130030f9149173a0493832ad",
manifestTemplate.getContainerConfiguration().getDigest().getHash());
} |
public static List<String> splitToWhiteSpaceSeparatedTokens(String input) {
if (input == null) {
return new ArrayList<>();
}
StringTokenizer tokenizer = new StringTokenizer(input.trim(), QUOTE_CHAR + WHITESPACE, true);
List<String> tokens = new ArrayList<>();
StringBuilder quotedText = new StringBuilder();
while (tokenizer.hasMoreTokens()) {
String token = tokenizer.nextToken();
if (QUOTE_CHAR.equals(token)) {
// if we have a quote, add the next tokens to the quoted text
// until the quoting has finished
quotedText.append(QUOTE_CHAR);
String buffer = quotedText.toString();
if (isSingleQuoted(buffer) || isDoubleQuoted(buffer)) {
tokens.add(buffer.substring(1, buffer.length() - 1));
quotedText = new StringBuilder();
}
} else if (WHITESPACE.equals(token)) {
// a white space, if in quote, add the white space, otherwise
// skip it
if (quotedText.length() > 0) {
quotedText.append(WHITESPACE);
}
} else {
if (quotedText.length() > 0) {
quotedText.append(token);
} else {
tokens.add(token);
}
}
}
if (quotedText.length() > 0) {
throw new IllegalArgumentException("Invalid quoting found in args " + quotedText);
}
return tokens;
} | @Test
public void testTwoDoubleQuotes() {
List<String> args = splitToWhiteSpaceSeparatedTokens("\"\"arg0\"\" \"\"arg1\"\"");
assertEquals("\"arg0\"", args.get(0));
assertEquals("\"arg1\"", args.get(1));
} |
@Override
public Map<ExecutionAttemptID, ExecutionSlotAssignment> allocateSlotsFor(
List<ExecutionAttemptID> executionAttemptIds) {
final Map<ExecutionVertexID, ExecutionAttemptID> vertexIdToExecutionId = new HashMap<>();
executionAttemptIds.forEach(
executionId ->
vertexIdToExecutionId.put(executionId.getExecutionVertexId(), executionId));
checkState(
vertexIdToExecutionId.size() == executionAttemptIds.size(),
"SlotSharingExecutionSlotAllocator does not support one execution vertex to have multiple concurrent executions");
final List<ExecutionVertexID> vertexIds =
executionAttemptIds.stream()
.map(ExecutionAttemptID::getExecutionVertexId)
.collect(Collectors.toList());
return allocateSlotsForVertices(vertexIds).stream()
.collect(
Collectors.toMap(
vertexAssignment ->
vertexIdToExecutionId.get(
vertexAssignment.getExecutionVertexId()),
vertexAssignment ->
new ExecutionSlotAssignment(
vertexIdToExecutionId.get(
vertexAssignment.getExecutionVertexId()),
vertexAssignment.getLogicalSlotFuture())));
} | @Test
void testSlotRequestProfileFromExecutionSlotSharingGroup() {
final ResourceProfile resourceProfile1 = ResourceProfile.fromResources(1, 10);
final ResourceProfile resourceProfile2 = ResourceProfile.fromResources(2, 20);
final AllocationContext context =
AllocationContext.newBuilder()
.addGroupAndResource(resourceProfile1, EV1, EV3)
.addGroupAndResource(resourceProfile2, EV2, EV4)
.build();
context.allocateSlotsFor(EV1, EV2);
assertThat(context.getSlotProvider().getRequests()).hasSize(2);
assertThat(
context.getSlotProvider().getRequests().values().stream()
.map(PhysicalSlotRequest::getSlotProfile)
.map(SlotProfile::getPhysicalSlotResourceProfile)
.collect(Collectors.toList()))
.containsExactlyInAnyOrder(resourceProfile1, resourceProfile2);
} |
public static String getHostAddress()
throws SocketException, UnknownHostException {
boolean isIPv6Preferred = Boolean.parseBoolean(System.getProperty("java.net.preferIPv6Addresses"));
DatagramSocket ds = new DatagramSocket();
try {
ds.connect(isIPv6Preferred ? Inet6Address.getByName(DUMMY_OUT_IPV6) : Inet4Address.getByName(DUMMY_OUT_IPV4),
HTTP_PORT);
} catch (java.io.UncheckedIOException e) {
LOGGER.warn(e.getMessage());
if (isIPv6Preferred) {
LOGGER.warn("No IPv6 route available on host, falling back to IPv4");
ds.connect(Inet4Address.getByName(DUMMY_OUT_IPV4), HTTP_PORT);
} else {
LOGGER.warn("No IPv4 route available on host, falling back to IPv6");
ds.connect(Inet6Address.getByName(DUMMY_OUT_IPV6), HTTP_PORT);
}
}
InetAddress localAddress = ds.getLocalAddress();
if (localAddress.isAnyLocalAddress()) {
localAddress = isIPv6Preferred ? getLocalIPv6Address() : InetAddress.getLocalHost();
}
return localAddress.getHostAddress();
} | @Test(description = "Test getHostAddress with preferIPv6Addresses=true in IPv6 only environment")
public void testGetHostAddressIPv6EnvIPv6Preferred() {
System.setProperty("java.net.preferIPv6Addresses", "true");
InetAddress mockInetAddress = mock(InetAddress.class);
when(mockInetAddress.isAnyLocalAddress()).thenReturn(false);
when(mockInetAddress.getHostAddress()).thenReturn(LOCAL_ADDRESS_IPV6);
try (MockedConstruction<DatagramSocket> mockedConstructionDatagramSocket = mockConstruction(DatagramSocket.class,
initDatagramSocket(mockInetAddress, NetworkEnv.IPV6))) {
String hostAddress = NetUtils.getHostAddress();
DatagramSocket mockDatagramSocket = mockedConstructionDatagramSocket.constructed().get(0);
assertEquals(LOCAL_ADDRESS_IPV6, hostAddress);
assertEquals(1, mockedConstructionDatagramSocket.constructed().size());
verify(mockDatagramSocket, times(1)).connect(any(), anyInt());
} catch (SocketException | UnknownHostException e) {
Assert.fail("Should not throw: " + e.getMessage());
}
} |
public static String identifyDriver(String nameContainsProductInfo) {
return identifyDriver(nameContainsProductInfo, null);
} | @Test
public void identifyDriverTest(){
Map<String,String> map = new HashMap<>(25);
map.put("mysql",DRIVER_MYSQL_V6);
map.put("cobar",DRIVER_MYSQL_V6);
map.put("oracle",DRIVER_ORACLE);
map.put("postgresql",DRIVER_POSTGRESQL);
map.put("sqlite",DRIVER_SQLLITE3);
map.put("sqlserver",DRIVER_SQLSERVER);
map.put("microsoft",DRIVER_SQLSERVER);
// 单元测试歧义
//map.put("hive2",DRIVER_HIVE2);
//map.put("hive",DRIVER_HIVE);
map.put("h2",DRIVER_H2);
map.put("derby",DRIVER_DERBY);
map.put("hsqldb",DRIVER_HSQLDB);
map.put("dm",DRIVER_DM7);
map.put("kingbase8",DRIVER_KINGBASE8);
map.put("ignite",DRIVER_IGNITE_THIN);
map.put("clickhouse",DRIVER_CLICK_HOUSE);
map.put("highgo",DRIVER_HIGHGO);
map.put("db2",DRIVER_DB2);
map.put("xugu",DRIVER_XUGU);
map.put("phoenix",DRIVER_PHOENIX);
map.put("zenith",DRIVER_GAUSS);
map.put("gbase",DRIVER_GBASE);
map.put("oscar",DRIVER_OSCAR);
map.put("sybase",DRIVER_SYBASE);
map.put("mariadb",DRIVER_MARIADB);
map.forEach((k,v) -> assertEquals(v,
DialectFactory.identifyDriver(k+ RandomUtil.randomString(2),null) ));
} |
@Override
public void store(Measure newMeasure) {
saveMeasure(newMeasure.inputComponent(), (DefaultMeasure<?>) newMeasure);
} | @Test
public void shouldIgnoreMeasuresOnFolders() {
underTest.store(new DefaultMeasure()
.on(new DefaultInputDir("foo", "bar"))
.forMetric(CoreMetrics.LINES)
.withValue(10));
verifyNoMoreInteractions(reportPublisher);
} |
@GetMapping("/login")
public ShenyuAdminResult loginDashboardUser(final String userName, final String password, @RequestParam(required = false) final String clientId) {
LoginDashboardUserVO loginVO = dashboardUserService.login(userName, password, clientId);
return Optional.ofNullable(loginVO)
.map(loginStatus -> {
if (Boolean.TRUE.equals(loginStatus.getEnabled())) {
return ShenyuAdminResult.success(ShenyuResultMessage.PLATFORM_LOGIN_SUCCESS, loginVO);
}
return ShenyuAdminResult.error(ShenyuResultMessage.LOGIN_USER_DISABLE_ERROR);
}).orElse(ShenyuAdminResult.error(ShenyuResultMessage.PLATFORM_LOGIN_ERROR));
} | @Test
public void testLoginDashboardUser() throws Exception {
final String loginUri = "/platform/login?userName=admin&password=123456";
LoginDashboardUserVO loginDashboardUserVO = LoginDashboardUserVO.buildLoginDashboardUserVO(dashboardUserVO);
given(this.dashboardUserService.login(eq("admin"), eq("123456"), isNull())).willReturn(loginDashboardUserVO);
this.mockMvc.perform(MockMvcRequestBuilders.request(HttpMethod.GET, loginUri))
.andExpect(status().isOk())
.andExpect(jsonPath("$.code", is(CommonErrorCode.SUCCESSFUL)))
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.PLATFORM_LOGIN_SUCCESS)))
.andExpect(jsonPath("$.data.id", is(loginDashboardUserVO.getId())))
.andReturn();
} |
public boolean isSymmetric() {
if (!isSquare()) {
return false;
} else {
for (int i = 0; i < dim1; i++) {
for (int j = i + 1; j < dim1; j++) {
if (Double.compare(get(i,j),get(j,i)) != 0) {
return false;
}
}
}
return true;
}
} | @Test
public void symmetricTest() {
assertFalse(generateA().isSymmetric());
assertFalse(generateB().isSymmetric());
assertTrue(generateSymmetric().isSymmetric());
} |
protected boolean isNodeEmpty(JsonNode json) {
if (json.isArray()) {
return isListEmpty((ArrayNode) json);
} else if (json.isObject()) {
return isObjectEmpty((ObjectNode) json);
} else {
return isEmptyText(json);
}
} | @Test
public void isNodeEmpty_textNode() {
assertThat(expressionEvaluator.isNodeEmpty(new TextNode(""))).isTrue();
assertThat(expressionEvaluator.isNodeEmpty(new TextNode(null))).isTrue();
assertThat(expressionEvaluator.isNodeEmpty(new TextNode(VALUE))).isFalse();
} |
@Override
public void registerRemote(final RemoteInstance remoteInstance) throws ServiceRegisterException {
try {
this.port = remoteInstance.getAddress().getPort();
healthChecker.health();
} catch (Throwable e) {
healthChecker.unHealth(e);
throw new ServiceRegisterException(e.getMessage());
}
} | @Test
public void registerRemote() throws Exception {
RemoteInstance instance = new RemoteInstance(addressA);
withEnvironmentVariable(SELF_UID, SELF_UID).execute(() -> {
providerA = createProvider(SELF_UID);
coordinatorA = getClusterCoordinator(providerA);
coordinatorA.start();
});
doReturn(Optional.of(Collections.singletonList(podA)))
.when(NamespacedPodListInformer.INFORMER)
.listPods();
ClusterMockWatcher watcher = new ClusterMockWatcher();
coordinatorA.registerWatcher(watcher);
coordinatorA.registerRemote(instance);
KubernetesCoordinator.K8sResourceEventHandler listener = coordinatorA.new K8sResourceEventHandler();
listener.onAdd(podA);
List<RemoteInstance> remoteInstances = watcher.getRemoteInstances();
assertEquals(1, remoteInstances.size());
assertEquals(1, coordinatorA.queryRemoteNodes().size());
Address queryAddress = remoteInstances.get(0).getAddress();
assertEquals(addressA, queryAddress);
assertTrue(queryAddress.isSelf());
} |
@Override
public PostgreSQLTypeUnspecifiedSQLParameter parse(final String value) {
return new PostgreSQLTypeUnspecifiedSQLParameter(value);
} | @Test
void assertParse() {
assertThat(new PostgreSQLUnspecifiedValueParser().parse("1").toString(), is("1"));
} |
@Override
public DataflowPipelineJob run(Pipeline pipeline) {
// Multi-language pipelines and pipelines that include upgrades should automatically be upgraded
// to Runner v2.
if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_runner_v2")) {
LOG.info(
"Automatically enabling Dataflow Runner v2 since the pipeline used cross-language"
+ " transforms or pipeline needed a transform upgrade.");
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build());
}
}
if (useUnifiedWorker(options)) {
if (hasExperiment(options, "disable_runner_v2")
|| hasExperiment(options, "disable_runner_v2_until_2023")
|| hasExperiment(options, "disable_prime_runner_v2")) {
throw new IllegalArgumentException(
"Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set.");
}
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("use_runner_v2")) {
experiments.add("use_runner_v2");
}
if (!experiments.contains("use_unified_worker")) {
experiments.add("use_unified_worker");
}
if (!experiments.contains("beam_fn_api")) {
experiments.add("beam_fn_api");
}
if (!experiments.contains("use_portable_job_submission")) {
experiments.add("use_portable_job_submission");
}
options.setExperiments(ImmutableList.copyOf(experiments));
}
logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline);
logWarningIfBigqueryDLQUnused(pipeline);
if (shouldActAsStreaming(pipeline)) {
options.setStreaming(true);
if (useUnifiedWorker(options)) {
options.setEnableStreamingEngine(true);
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("enable_streaming_engine")) {
experiments.add("enable_streaming_engine");
}
if (!experiments.contains("enable_windmill_service")) {
experiments.add("enable_windmill_service");
}
}
}
if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) {
ProjectionPushdownOptimizer.optimize(pipeline);
}
LOG.info(
"Executing pipeline on the Dataflow Service, which will have billing implications "
+ "related to Google Compute Engine usage and other Google Cloud Services.");
DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class);
String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions);
// This incorrectly puns the worker harness container image (which implements v1beta3 API)
// with the SDK harness image (which implements Fn API).
//
// The same Environment is used in different and contradictory ways, depending on whether
// it is a v1 or v2 job submission.
RunnerApi.Environment defaultEnvironmentForDataflow =
Environments.createDockerEnvironment(workerHarnessContainerImageURL);
// The SdkComponents for portable an non-portable job submission must be kept distinct. Both
// need the default environment.
SdkComponents portableComponents = SdkComponents.create();
portableComponents.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
RunnerApi.Pipeline portablePipelineProto =
PipelineTranslation.toProto(pipeline, portableComponents, false);
// Note that `stageArtifacts` has to be called before `resolveArtifact` because
// `resolveArtifact` updates local paths to staged paths in pipeline proto.
portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto);
List<DataflowPackage> packages = stageArtifacts(portablePipelineProto);
portablePipelineProto = resolveArtifacts(portablePipelineProto);
portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Portable pipeline proto:\n{}",
TextFormat.printer().printToString(portablePipelineProto));
}
// Stage the portable pipeline proto, retrieving the staged pipeline path, then update
// the options on the new job
// TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options
LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation());
byte[] serializedProtoPipeline = portablePipelineProto.toByteArray();
DataflowPackage stagedPipeline =
options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME);
dataflowOptions.setPipelineUrl(stagedPipeline.getLocation());
if (useUnifiedWorker(options)) {
LOG.info("Skipping v1 transform replacements since job will run on v2.");
} else {
// Now rewrite things to be as needed for v1 (mutates the pipeline)
// This way the job submitted is valid for v1 and v2, simultaneously
replaceV1Transforms(pipeline);
}
// Capture the SdkComponents for look up during step translations
SdkComponents dataflowV1Components = SdkComponents.create();
dataflowV1Components.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
// No need to perform transform upgrading for the Runner v1 proto.
RunnerApi.Pipeline dataflowV1PipelineProto =
PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Dataflow v1 pipeline proto:\n{}",
TextFormat.printer().printToString(dataflowV1PipelineProto));
}
// Set a unique client_request_id in the CreateJob request.
// This is used to ensure idempotence of job creation across retried
// attempts to create a job. Specifically, if the service returns a job with
// a different client_request_id, it means the returned one is a different
// job previously created with the same job name, and that the job creation
// has been effectively rejected. The SDK should return
// Error::Already_Exists to user in that case.
int randomNum = new Random().nextInt(9000) + 1000;
String requestId =
DateTimeFormat.forPattern("YYYYMMddHHmmssmmm")
.withZone(DateTimeZone.UTC)
.print(DateTimeUtils.currentTimeMillis())
+ "_"
+ randomNum;
JobSpecification jobSpecification =
translator.translate(
pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages);
if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_staged_dataflow_worker_jar")) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("use_staged_dataflow_worker_jar")
.build());
}
}
Job newJob = jobSpecification.getJob();
try {
newJob
.getEnvironment()
.setSdkPipelineOptions(
MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class));
} catch (IOException e) {
throw new IllegalArgumentException(
"PipelineOptions specified failed to serialize to JSON.", e);
}
newJob.setClientRequestId(requestId);
DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo();
String version = dataflowRunnerInfo.getVersion();
checkState(
!"${pom.version}".equals(version),
"Unable to submit a job to the Dataflow service with unset version ${pom.version}");
LOG.info("Dataflow SDK version: {}", version);
newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties());
// The Dataflow Service may write to the temporary directory directly, so
// must be verified.
if (!isNullOrEmpty(options.getGcpTempLocation())) {
newJob
.getEnvironment()
.setTempStoragePrefix(
dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation()));
}
newJob.getEnvironment().setDataset(options.getTempDatasetId());
if (options.getWorkerRegion() != null) {
newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion());
}
if (options.getWorkerZone() != null) {
newJob.getEnvironment().setWorkerZone(options.getWorkerZone());
}
if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED");
} else if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED");
}
// Represent the minCpuPlatform pipeline option as an experiment, if not already present.
if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
List<String> minCpuFlags =
experiments.stream()
.filter(p -> p.startsWith("min_cpu_platform"))
.collect(Collectors.toList());
if (minCpuFlags.isEmpty()) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform())
.build());
} else {
LOG.warn(
"Flag min_cpu_platform is defined in both top level PipelineOption, "
+ "as well as under experiments. Proceed using {}.",
minCpuFlags.get(0));
}
}
newJob
.getEnvironment()
.setExperiments(
ImmutableList.copyOf(
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList())));
// Set the Docker container image that executes Dataflow worker harness, residing in Google
// Container Registry. Translator is guaranteed to create a worker pool prior to this point.
// For runner_v1, only worker_harness_container is set.
// For runner_v2, both worker_harness_container and sdk_harness_container are set to the same
// value.
String containerImage = getContainerImageForJob(options);
for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) {
workerPool.setWorkerHarnessContainerImage(containerImage);
}
configureSdkHarnessContainerImages(options, portablePipelineProto, newJob);
newJob.getEnvironment().setVersion(getEnvironmentVersion(options));
if (hooks != null) {
hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment());
}
// enable upload_graph when the graph is too large
byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8);
int jobGraphByteSize = jobGraphBytes.length;
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES
&& !hasExperiment(options, "upload_graph")
&& !useUnifiedWorker(options)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build());
LOG.info(
"The job graph size ({} in bytes) is larger than {}. Automatically add "
+ "the upload_graph option to experiments.",
jobGraphByteSize,
CREATE_JOB_REQUEST_LIMIT_BYTES);
}
if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) {
ArrayList<String> experiments = new ArrayList<>(options.getExperiments());
while (experiments.remove("upload_graph")) {}
options.setExperiments(experiments);
LOG.warn(
"The upload_graph experiment was specified, but it does not apply "
+ "to runner v2 jobs. Option has been automatically removed.");
}
// Upload the job to GCS and remove the graph object from the API call. The graph
// will be downloaded from GCS by the service.
if (hasExperiment(options, "upload_graph")) {
DataflowPackage stagedGraph =
options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME);
newJob.getSteps().clear();
newJob.setStepsLocation(stagedGraph.getLocation());
}
if (!isNullOrEmpty(options.getDataflowJobFile())
|| !isNullOrEmpty(options.getTemplateLocation())) {
boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation());
if (isTemplate) {
checkArgument(
isNullOrEmpty(options.getDataflowJobFile()),
"--dataflowJobFile and --templateLocation are mutually exclusive.");
}
String fileLocation =
firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile());
checkArgument(
fileLocation.startsWith("/") || fileLocation.startsWith("gs://"),
"Location must be local or on Cloud Storage, got %s.",
fileLocation);
ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */);
String workSpecJson = DataflowPipelineTranslator.jobToString(newJob);
try (PrintWriter printWriter =
new PrintWriter(
new BufferedWriter(
new OutputStreamWriter(
Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)),
UTF_8)))) {
printWriter.print(workSpecJson);
LOG.info("Printed job specification to {}", fileLocation);
} catch (IOException ex) {
String error = String.format("Cannot create output file at %s", fileLocation);
if (isTemplate) {
throw new RuntimeException(error, ex);
} else {
LOG.warn(error, ex);
}
}
if (isTemplate) {
LOG.info("Template successfully created.");
return new DataflowTemplateJob();
}
}
String jobIdToUpdate = null;
if (options.isUpdate()) {
jobIdToUpdate = getJobIdFromName(options.getJobName());
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setReplaceJobId(jobIdToUpdate);
}
if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) {
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot());
}
Job jobResult;
try {
jobResult = dataflowClient.createJob(newJob);
} catch (GoogleJsonResponseException e) {
String errorMessages = "Unexpected errors";
if (e.getDetails() != null) {
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) {
errorMessages =
"The size of the serialized JSON representation of the pipeline "
+ "exceeds the allowable limit. "
+ "For more information, please see the documentation on job submission:\n"
+ "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs";
} else {
errorMessages = e.getDetails().getMessage();
}
}
throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e);
} catch (IOException e) {
throw new RuntimeException("Failed to create a workflow job", e);
}
// Use a raw client for post-launch monitoring, as status calls may fail
// regularly and need not be retried automatically.
DataflowPipelineJob dataflowPipelineJob =
new DataflowPipelineJob(
DataflowClient.create(options),
jobResult.getId(),
options,
jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(),
portablePipelineProto);
// If the service returned client request id, the SDK needs to compare it
// with the original id generated in the request, if they are not the same
// (i.e., the returned job is not created by this request), throw
// DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException
// depending on whether this is a reload or not.
if (jobResult.getClientRequestId() != null
&& !jobResult.getClientRequestId().isEmpty()
&& !jobResult.getClientRequestId().equals(requestId)) {
// If updating a job.
if (options.isUpdate()) {
throw new DataflowJobAlreadyUpdatedException(
dataflowPipelineJob,
String.format(
"The job named %s with id: %s has already been updated into job id: %s "
+ "and cannot be updated again.",
newJob.getName(), jobIdToUpdate, jobResult.getId()));
} else {
throw new DataflowJobAlreadyExistsException(
dataflowPipelineJob,
String.format(
"There is already an active job named %s with id: %s. If you want to submit a"
+ " second job, try again by setting a different name using --jobName.",
newJob.getName(), jobResult.getId()));
}
}
LOG.info(
"To access the Dataflow monitoring console, please navigate to {}",
MonitoringUtil.getJobMonitoringPageURL(
options.getProject(), options.getRegion(), jobResult.getId()));
LOG.info("Submitted job: {}", jobResult.getId());
LOG.info(
"To cancel the job using the 'gcloud' tool, run:\n> {}",
MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId()));
return dataflowPipelineJob;
} | @Test
public void testUpdate() throws IOException {
DataflowPipelineOptions options = buildPipelineOptions();
options.setUpdate(true);
options.setJobName("oldJobName");
Pipeline p = buildDataflowPipeline(options);
DataflowPipelineJob job = (DataflowPipelineJob) p.run();
assertEquals("newid", job.getJobId());
ArgumentCaptor<Job> jobCaptor = ArgumentCaptor.forClass(Job.class);
Mockito.verify(mockJobs).create(eq(PROJECT_ID), eq(REGION_ID), jobCaptor.capture());
assertValidJob(jobCaptor.getValue());
} |
public static String from(Query query) {
AbstractProducedQuery abstractProducedQuery = query.unwrap(AbstractProducedQuery.class);
String[] sqls = abstractProducedQuery
.getProducer()
.getFactory()
.getQueryPlanCache()
.getHQLQueryPlan(abstractProducedQuery.getQueryString(), false, Collections.emptyMap())
.getSqlStrings();
return sqls.length > 0 ? sqls[0] : null;
} | @Test
public void testCriteriaAPI() {
doInJPA(entityManager -> {
CriteriaBuilder builder = entityManager.getCriteriaBuilder();
CriteriaQuery<PostComment> criteria = builder.createQuery(PostComment.class);
Root<PostComment> postComment = criteria.from(PostComment.class);
Join<PostComment, Post> post = postComment.join("post");
criteria.where(
builder.like(post.get("title"), "%Java%")
);
criteria.orderBy(
builder.asc(postComment.get("id"))
);
Query criteriaQuery = entityManager.createQuery(criteria);
String sql = SQLExtractor.from(criteriaQuery);
assertNotNull(sql);
LOGGER.info(
"The Criteria API query: [\n{}\n]\ngenerates the following SQL query: [\n{}\n]",
criteriaQuery.unwrap(org.hibernate.query.Query.class).getQueryString(),
sql
);
});
} |
public Span nextSpan(TraceContextOrSamplingFlags extracted) {
if (extracted == null) throw new NullPointerException("extracted == null");
TraceContext context = extracted.context();
if (context != null) return newChild(context);
TraceIdContext traceIdContext = extracted.traceIdContext();
if (traceIdContext != null) {
return _toSpan(null, decorateContext(
InternalPropagation.instance.flags(extracted.traceIdContext()),
traceIdContext.traceIdHigh(),
traceIdContext.traceId(),
0L,
0L,
0L,
extracted.extra()
));
}
SamplingFlags samplingFlags = extracted.samplingFlags();
List<Object> extra = extracted.extra();
TraceContext parent = currentTraceContext.get();
int flags;
long traceIdHigh = 0L, traceId = 0L, localRootId = 0L, spanId = 0L;
if (parent != null) {
// At this point, we didn't extract trace IDs, but do have a trace in progress. Since typical
// trace sampling is up front, we retain the decision from the parent.
flags = InternalPropagation.instance.flags(parent);
traceIdHigh = parent.traceIdHigh();
traceId = parent.traceId();
localRootId = parent.localRootId();
spanId = parent.spanId();
extra = concat(extra, parent.extra());
} else {
flags = InternalPropagation.instance.flags(samplingFlags);
}
return _toSpan(parent,
decorateContext(flags, traceIdHigh, traceId, localRootId, spanId, 0L, extra));
} | @Test void nextSpan_extractedTraceContext_extra() {
TraceContextOrSamplingFlags extracted = TraceContextOrSamplingFlags.newBuilder(context)
.addExtra(1L).build();
assertThat(tracer.nextSpan(extracted).context().extra())
.contains(1L);
} |
@Override
public void onKey(
int primaryCode, Keyboard.Key key, int multiTapIndex, int[] nearByKeyCodes, boolean fromUI) {
mParentListener.listener().onKey(primaryCode, key, multiTapIndex, nearByKeyCodes, fromUI);
if ((mInOneShot && primaryCode != KeyCodes.DELETE) || primaryCode == KeyCodes.ENTER) {
mKeyboardDismissAction.run();
}
} | @Test
public void testOnKey() {
final AnyKeyboard.AnyKey key = Mockito.mock(AnyKeyboard.AnyKey.class);
final int[] nearByKeyCodes = {3};
mUnderTest.onKey(1, key, 2, nearByKeyCodes, true);
final InOrder inOrder = Mockito.inOrder(mMockParentListener, mMockKeyboardDismissAction);
inOrder
.verify(mMockParentListener)
.onKey(
Mockito.eq(1),
Mockito.same(key),
Mockito.eq(2),
Mockito.same(nearByKeyCodes),
Mockito.eq(true));
inOrder.verifyNoMoreInteractions();
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.