focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@ApiOperation(value = "Assign entity view to edge (assignEntityViewToEdge)",
notes = "Creates assignment of an existing entity view to an instance of The Edge. " +
EDGE_ASSIGN_ASYNC_FIRST_STEP_DESCRIPTION +
"Second, remote edge service will receive a copy of assignment entity view " +
EDGE_ASSIGN_RECEIVE_STEP_DESCRIPTION +
"Third, once entity view will be delivered to edge service, it's going to be available for usage on remote edge instance.")
@PreAuthorize("hasAuthority('TENANT_ADMIN')")
@RequestMapping(value = "/edge/{edgeId}/entityView/{entityViewId}", method = RequestMethod.POST)
@ResponseBody
public EntityView assignEntityViewToEdge(@PathVariable(EDGE_ID) String strEdgeId,
@PathVariable(ENTITY_VIEW_ID) String strEntityViewId) throws ThingsboardException {
checkParameter(EDGE_ID, strEdgeId);
checkParameter(ENTITY_VIEW_ID, strEntityViewId);
EdgeId edgeId = new EdgeId(toUUID(strEdgeId));
Edge edge = checkEdgeId(edgeId, Operation.READ);
EntityViewId entityViewId = new EntityViewId(toUUID(strEntityViewId));
checkEntityViewId(entityViewId, Operation.READ);
return tbEntityViewService.assignEntityViewToEdge(getTenantId(), getCurrentUser().getCustomerId(),
entityViewId, edge, getCurrentUser());
}
|
@Test
public void testAssignEntityViewToEdge() throws Exception {
Edge edge = constructEdge("My edge", "default");
Edge savedEdge = doPost("/api/edge", edge, Edge.class);
EntityView savedEntityView = getNewSavedEntityView("My entityView");
doPost("/api/edge/" + savedEdge.getId().getId().toString()
+ "/device/" + testDevice.getId().getId().toString(), Device.class);
doPost("/api/edge/" + savedEdge.getId().getId().toString()
+ "/entityView/" + savedEntityView.getId().getId().toString(), EntityView.class);
PageData<EntityView> pageData = doGetTypedWithPageLink("/api/edge/" + savedEdge.getId().getId().toString() + "/entityViews?",
PAGE_DATA_ENTITY_VIEW_TYPE_REF, new PageLink(100));
Assert.assertEquals(1, pageData.getData().size());
doDelete("/api/edge/" + savedEdge.getId().getId().toString()
+ "/entityView/" + savedEntityView.getId().getId().toString(), EntityView.class);
pageData = doGetTypedWithPageLink("/api/edge/" + savedEdge.getId().getId().toString() + "/entityViews?",
PAGE_DATA_ENTITY_VIEW_TYPE_REF, new PageLink(100));
Assert.assertEquals(0, pageData.getData().size());
}
|
@Override
public ParDoFn create(
PipelineOptions options,
CloudObject cloudUserFn,
List<SideInputInfo> sideInputInfos,
TupleTag<?> mainOutputTag,
Map<TupleTag<?>, Integer> outputTupleTagsToReceiverIndices,
DataflowExecutionContext<?> executionContext,
DataflowOperationContext operationContext)
throws Exception {
Coder<?> coder =
CloudObjects.coderFromCloudObject(
CloudObject.fromSpec(Structs.getObject(cloudUserFn, PropertyNames.ENCODING)));
Object key =
CoderUtils.decodeFromByteArray(
coder, Structs.getBytes(cloudUserFn, WorkerPropertyNames.ENCODED_KEY));
return new PairWithConstantKeyParDoFn(key);
}
|
@Test
public void testConversionOfRecord() throws Exception {
ParDoFn parDoFn =
new PairWithConstantKeyDoFnFactory()
.create(
null /* pipeline options */,
CloudObject.fromSpec(
ImmutableMap.of(
PropertyNames.OBJECT_TYPE_NAME, "PairWithConstantKeyDoFn",
WorkerPropertyNames.ENCODED_KEY,
StringUtils.byteArrayToJsonString(
CoderUtils.encodeToByteArray(BigEndianIntegerCoder.of(), 42)),
PropertyNames.ENCODING,
ImmutableMap.of(
PropertyNames.OBJECT_TYPE_NAME, "kind:fixed_big_endian_int32"))),
null /* side input infos */,
null /* main output tag */,
null /* output tag to receiver index */,
null /* exection context */,
null /* operation context */);
List<Object> outputReceiver = new ArrayList<>();
parDoFn.startBundle(outputReceiver::add);
parDoFn.processElement(valueInGlobalWindow(43));
assertThat(outputReceiver, contains(valueInGlobalWindow(KV.of(42, 43))));
}
|
@Override
public final ChannelPipeline addBefore(String baseName, String name, ChannelHandler handler) {
return addBefore(null, baseName, name, handler);
}
|
@Test
@Timeout(value = 3000, unit = TimeUnit.MILLISECONDS)
public void testAddBefore() throws Throwable {
ChannelPipeline pipeline1 = new LocalChannel().pipeline();
ChannelPipeline pipeline2 = new LocalChannel().pipeline();
EventLoopGroup defaultGroup = new DefaultEventLoopGroup(2);
try {
EventLoop eventLoop1 = defaultGroup.next();
EventLoop eventLoop2 = defaultGroup.next();
eventLoop1.register(pipeline1.channel()).syncUninterruptibly();
eventLoop2.register(pipeline2.channel()).syncUninterruptibly();
CountDownLatch latch = new CountDownLatch(2 * 10);
for (int i = 0; i < 10; i++) {
eventLoop1.execute(new TestTask(pipeline2, latch));
eventLoop2.execute(new TestTask(pipeline1, latch));
}
latch.await();
} finally {
defaultGroup.shutdownGracefully();
}
}
|
@Override
public void registerStore(final StateStore store,
final StateRestoreCallback stateRestoreCallback,
final CommitCallback commitCallback) {
final String storeName = store.name();
// TODO (KAFKA-12887): we should not trigger user's exception handler for illegal-argument but always
// fail-crash; in this case we would not need to immediately close the state store before throwing
if (CHECKPOINT_FILE_NAME.equals(storeName)) {
store.close();
throw new IllegalArgumentException(format("%sIllegal store name: %s, which collides with the pre-defined " +
"checkpoint file name", logPrefix, storeName));
}
if (stores.containsKey(storeName)) {
store.close();
throw new IllegalArgumentException(format("%sStore %s has already been registered.", logPrefix, storeName));
}
if (stateRestoreCallback instanceof StateRestoreListener) {
log.warn("The registered state restore callback is also implementing the state restore listener interface, " +
"which is not expected and would be ignored");
}
final StateStoreMetadata storeMetadata = isLoggingEnabled(storeName) ?
new StateStoreMetadata(
store,
getStorePartition(storeName),
stateRestoreCallback,
commitCallback,
converterForStore(store)) :
new StateStoreMetadata(store, commitCallback);
// register the store first, so that if later an exception is thrown then eventually while we call `close`
// on the state manager this state store would be closed as well
stores.put(storeName, storeMetadata);
if (!stateUpdaterEnabled) {
maybeRegisterStoreWithChangelogReader(storeName);
}
log.debug("Registered state store {} to its state manager", storeName);
}
|
@Test
public void shouldPreserveStreamsExceptionOnCloseIfStoreThrows() {
final StreamsException exception = new StreamsException("KABOOM!");
final ProcessorStateManager stateManager = getStateManager(Task.TaskType.ACTIVE);
final MockKeyValueStore stateStore = new MockKeyValueStore(persistentStoreName, true) {
@Override
public void close() {
throw exception;
}
};
stateManager.registerStore(stateStore, stateStore.stateRestoreCallback, null);
final StreamsException thrown = assertThrows(StreamsException.class, stateManager::close);
assertEquals(exception, thrown);
}
|
@Override
public LoginIdentityContext getLoginIdentityContext(RequestResource resource) {
LoginIdentityContext result = new LoginIdentityContext();
if (!ramContext.validate() || notFountInjector(resource.getType())) {
return result;
}
resourceInjectors.get(resource.getType()).doInject(resource, ramContext, result);
return result;
}
|
@Test
void testGetLoginIdentityContextWithoutLogin() {
LoginIdentityContext actual = ramClientAuthService.getLoginIdentityContext(resource);
assertTrue(actual.getAllKey().isEmpty());
verify(mockResourceInjector, never()).doInject(resource, ramContext, actual);
}
|
public EvictionConfig getEvictionConfig() {
return evictionConfig;
}
|
@Test(expected = IllegalArgumentException.class)
public void testMaxSize_whenValueIsNegative_thenThrowException() {
config.getEvictionConfig().setSize(-1);
}
|
@Override
public void close() throws IOException {
close(true);
}
|
@Test
public void testStagingDirectoryCreation() throws IOException {
S3FileIOProperties newStagingDirectoryAwsProperties =
new S3FileIOProperties(
ImmutableMap.of(S3FileIOProperties.STAGING_DIRECTORY, newTmpDirectory));
S3OutputStream stream =
new S3OutputStream(s3, randomURI(), newStagingDirectoryAwsProperties, nullMetrics());
stream.close();
}
|
public OkHttpClient get(boolean keepAlive, boolean skipTLSVerify) {
try {
return cache.get(Parameters.fromBoolean(keepAlive, skipTLSVerify));
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
}
|
@Test
public void testWithTlsVerifyNoKeepAlive() throws IOException, NoSuchAlgorithmException, KeyStoreException, KeyManagementException {
final ParameterizedHttpClientProvider provider = new ParameterizedHttpClientProvider(client(null), clientCertificates.sslSocketFactory(), clientCertificates.trustManager());
final OkHttpClient okHttpClient = provider.get(false, false);
assertThat(okHttpClient.sslSocketFactory().createSocket().getOption(StandardSocketOptions.SO_KEEPALIVE)).isFalse();
try (Response response = okHttpClient.newCall(new Request.Builder().url(server.url("/")).get().build()).execute()) {
assertThat(response.isSuccessful()).isTrue();
}
}
|
@Override
public synchronized DeviceEvent createOrUpdateDevice(ProviderId providerId,
DeviceId deviceId,
DeviceDescription deviceDescription) {
NodeId localNode = clusterService.getLocalNode().id();
NodeId deviceNode = mastershipService.getMasterFor(deviceId);
boolean isMaster = localNode.equals(deviceNode);
// Process device update only if we're the master,
// otherwise signal the actual master.
DeviceEvent deviceEvent = null;
// If this node is the master for the device, acquire a new timestamp. Otherwise,
// use a 0,0 or tombstone timestamp to create the device if it doesn't already exist.
Timestamp newTimestamp;
try {
newTimestamp = isMaster
? deviceClockService.getTimestamp(deviceId)
: removalRequest.getOrDefault(deviceId, DEFAULT_TIMESTAMP);
} catch (IllegalStateException e) {
newTimestamp = removalRequest.getOrDefault(deviceId, DEFAULT_TIMESTAMP);
isMaster = false;
}
final Timestamped<DeviceDescription> deltaDesc = new Timestamped<>(deviceDescription, newTimestamp);
final Timestamped<DeviceDescription> mergedDesc;
final Map<ProviderId, DeviceDescriptions> device = getOrCreateDeviceDescriptionsMap(deviceId);
synchronized (device) {
deviceEvent = createOrUpdateDeviceInternal(providerId, deviceId, deltaDesc);
if (deviceEvent == null) {
return null;
}
mergedDesc = device.get(providerId).getDeviceDesc();
}
// If this node is the master for the device, update peers.
if (isMaster) {
log.debug("Notifying peers of a device update topology event for providerId: {} and deviceId: {}",
providerId, deviceId);
notifyPeers(new InternalDeviceEvent(providerId, deviceId, mergedDesc));
}
notifyDelegateIfNotNull(deviceEvent);
return deviceEvent;
}
|
@Test
public final void testCreateOrUpdateDevice() throws IOException {
DeviceDescription description =
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
HW, SW1, SN, CID);
Capture<InternalDeviceEvent> message = Capture.newInstance();
Capture<MessageSubject> subject = Capture.newInstance();
Capture<Function<InternalDeviceEvent, byte[]>> encoder = Capture.newInstance();
resetCommunicatorExpectingSingleBroadcast(message, subject, encoder);
DeviceEvent event = deviceStore.createOrUpdateDevice(PID, DID1, description);
assertEquals(DEVICE_ADDED, event.type());
assertDevice(DID1, SW1, event.subject());
verify(clusterCommunicator);
assertInternalDeviceEvent(NID1, DID1, PID, description, message, subject, encoder);
DeviceDescription description2 =
new DefaultDeviceDescription(DID1.uri(), SWITCH, MFR,
HW, SW2, SN, CID);
resetCommunicatorExpectingSingleBroadcast(message, subject, encoder);
DeviceEvent event2 = deviceStore.createOrUpdateDevice(PID, DID1, description2);
assertEquals(DEVICE_UPDATED, event2.type());
assertDevice(DID1, SW2, event2.subject());
verify(clusterCommunicator);
assertInternalDeviceEvent(NID1, DID1, PID, description2, message, subject, encoder);
reset(clusterCommunicator);
assertNull("No change expected", deviceStore.createOrUpdateDevice(PID, DID1, description2));
}
|
@Override
@SuppressWarnings({"CastCanBeRemovedNarrowingVariableType", "unchecked"})
public E poll() {
final E[] buffer = consumerBuffer;
final long index = consumerIndex;
final long mask = consumerMask;
final long offset = modifiedCalcElementOffset(index, mask);
Object e = lvElement(buffer, offset);// LoadLoad
if (e == null) {
if (index != lvProducerIndex(this)) {
// poll() == null iff queue is empty, null element is not strong enough indicator, so we
// must check the producer index. If the queue is indeed not empty we spin until element is
// visible.
do {
e = lvElement(buffer, offset);
} while (e == null);
} else {
return null;
}
}
if (e == JUMP) {
final E[] nextBuffer = getNextBuffer(buffer, mask);
return newBufferPoll(nextBuffer, index);
}
soElement(buffer, offset, null);
soConsumerIndex(this, index + 2);
return (E) e;
}
|
@Test(dataProvider = "empty")
public void poll_whenEmpty(MpscGrowableArrayQueue<Integer> queue) {
assertThat(queue.poll()).isNull();
}
|
public ScalarFn loadScalarFunction(List<String> functionPath, String jarPath) {
String functionFullName = String.join(".", functionPath);
try {
FunctionDefinitions functionDefinitions = loadJar(jarPath);
if (!functionDefinitions.scalarFunctions().containsKey(functionPath)) {
throw new IllegalArgumentException(
String.format(
"No implementation of scalar function %s found in %s.%n"
+ " 1. Create a class implementing %s and annotate it with @AutoService(%s.class).%n"
+ " 2. Add function %s to the class's userDefinedScalarFunctions implementation.",
functionFullName,
jarPath,
UdfProvider.class.getSimpleName(),
UdfProvider.class.getSimpleName(),
functionFullName));
}
return functionDefinitions.scalarFunctions().get(functionPath);
} catch (IOException e) {
throw new RuntimeException(
String.format(
"Failed to load user-defined scalar function %s from %s", functionFullName, jarPath),
e);
}
}
|
@Test
public void testLoadUnregisteredScalarFunctionThrowsRuntimeException() {
JavaUdfLoader udfLoader = new JavaUdfLoader();
thrown.expect(RuntimeException.class);
thrown.expectMessage(
String.format("No implementation of scalar function notRegistered found in %s.", jarPath));
udfLoader.loadScalarFunction(Collections.singletonList("notRegistered"), jarPath);
}
|
public Collection<DatabaseType> getAllBranchDatabaseTypes() {
return ShardingSphereServiceLoader.getServiceInstances(DatabaseType.class)
.stream().filter(each -> each.getTrunkDatabaseType().map(optional -> optional == databaseType).orElse(false)).collect(Collectors.toList());
}
|
@Test
void assertGetAllBranchDatabaseTypes() {
Collection<DatabaseType> actual = new DatabaseTypeRegistry(TypedSPILoader.getService(DatabaseType.class, "TRUNK")).getAllBranchDatabaseTypes();
assertThat(actual, is(Collections.singletonList(TypedSPILoader.getService(DatabaseType.class, "BRANCH"))));
}
|
@Override
public boolean match(String attributeValue) {
if (attributeValue == null) {
return false;
}
switch (type) {
case Equals:
return attributeValue.equals(value);
case StartsWith:
return (length == -1 || length == attributeValue.length()) && attributeValue.startsWith(value);
case EndsWith:
return (length == -1 || length == attributeValue.length()) && attributeValue.endsWith(value);
case Contains:
return attributeValue.contains(value);
case Regexp:
return regexPattern.matcher(attributeValue).matches();
default:
throw new IllegalStateException("Unexpected type " + type);
}
}
|
@Test
public void testEscapeChar() {
assertTrue(new LikeCondition("a\\%b").match("a%b"));
assertFalse(new LikeCondition("a\\%b").match("aXb"));
assertFalse(new LikeCondition("a\\%b").match("ab"));
assertTrue(new LikeCondition("a\\\\b").match("a\\b"));
assertTrue(new LikeCondition("a~%b", '~').match("a%b"));
assertFalse(new LikeCondition("a~%b", '~').match("aXb"));
assertFalse(new LikeCondition("a~%b", '~').match("ab"));
assertTrue(new LikeCondition("a~~b", '~').match("a~b"));
}
|
@Override
public String toString() {
return namespace + "/" + name;
}
|
@Test
public void testToString() {
NamespaceAndName nan = new NamespaceAndName("namespace1", "name1");
assertThat(nan.toString(), is("namespace1/name1"));
}
|
@Override
public void writeTo(ByteBuf byteBuf) throws LispWriterException {
WRITER.writeTo(byteBuf, this);
}
|
@Test
public void testSerialization() throws LispReaderException, LispWriterException,
LispParseError, DeserializationException {
ByteBuf byteBuf = Unpooled.buffer();
ReferralRecordWriter writer = new ReferralRecordWriter();
writer.writeTo(byteBuf, record1);
ReferralRecordReader reader = new ReferralRecordReader();
LispReferralRecord deserialized = reader.readFrom(byteBuf);
new EqualsTester()
.addEqualityGroup(record1, deserialized).testEquals();
}
|
public SQLRewriteResult rewrite(final QueryContext queryContext, final RouteContext routeContext, final ConnectionContext connectionContext) {
SQLRewriteContext sqlRewriteContext = createSQLRewriteContext(queryContext, routeContext, connectionContext);
SQLTranslatorRule rule = globalRuleMetaData.getSingleRule(SQLTranslatorRule.class);
return routeContext.getRouteUnits().isEmpty()
? new GenericSQLRewriteEngine(rule, database, globalRuleMetaData).rewrite(sqlRewriteContext, queryContext)
: new RouteSQLRewriteEngine(rule, database, globalRuleMetaData).rewrite(sqlRewriteContext, routeContext, queryContext);
}
|
@Test
void assertRewriteForRouteSQLRewriteResult() {
ShardingSphereDatabase database = new ShardingSphereDatabase(DefaultDatabase.LOGIC_NAME, TypedSPILoader.getService(DatabaseType.class, "H2"), mockResourceMetaData(),
mock(RuleMetaData.class), Collections.singletonMap("test", mock(ShardingSphereSchema.class)));
SQLTranslatorRule sqlTranslatorRule = mock(SQLTranslatorRule.class);
when(sqlTranslatorRule.translate(any(), any(), any(), any(), any(), any())).thenReturn(new SQLTranslatorContext("", Collections.emptyList()));
SQLRewriteEntry sqlRewriteEntry = new SQLRewriteEntry(database, new RuleMetaData(Collections.singleton(sqlTranslatorRule)), new ConfigurationProperties(new Properties()));
RouteContext routeContext = new RouteContext();
RouteUnit firstRouteUnit = mock(RouteUnit.class);
when(firstRouteUnit.getDataSourceMapper()).thenReturn(new RouteMapper("ds", "ds_0"));
RouteUnit secondRouteUnit = mock(RouteUnit.class);
when(secondRouteUnit.getDataSourceMapper()).thenReturn(new RouteMapper("ds", "ds_1"));
routeContext.getRouteUnits().addAll(Arrays.asList(firstRouteUnit, secondRouteUnit));
RouteSQLRewriteResult sqlRewriteResult = (RouteSQLRewriteResult) sqlRewriteEntry.rewrite(createQueryContext(), routeContext, mock(ConnectionContext.class));
assertThat(sqlRewriteResult.getSqlRewriteUnits().size(), is(2));
}
|
@Override
public boolean complete() {
if (snapshotInProgress) {
return false;
}
while (emitFromTraverser(pendingTraverser)) {
try {
Message t = consumer.receiveNoWait();
if (t == null) {
pendingTraverser = eventTimeMapper.flatMapIdle();
break;
}
if (guarantee == EXACTLY_ONCE) {
// We don't know whether the messages with the restored IDs were acknowledged in the previous
// execution or not. They are acknowledged in phase-2 of the snapshot which might not be executed.
// If we receive a message with a restored ID, we ignore it. But if we don't receive some ID,
// we can never safely throw it out.
// In order to avoid storing the restored IDs forever, we set a timeout after which we clear the
// collection. We start the timeout after receiving the first message, at which time we know the
// broker is working. We assume it will redeliver the messages promptly; if it doesn't, we assume
// they were acknowledged in the previous execution or delivered to another processor in this
// execution.
if (restoredIdsExpiration == Long.MAX_VALUE) {
restoredIdsExpiration = System.nanoTime() + RESTORED_IDS_TTL;
} else if (!restoredIds.isEmpty() && restoredIdsExpiration <= System.nanoTime()) {
restoredIds = emptySet();
}
Object msgId = messageIdFn.apply(t);
if (msgId == null) {
throw new JetException("Received a message without an ID. All messages must have an ID, " +
"you can specify an extracting function using "
+ JmsSourceBuilder.class.getSimpleName() + ".messageIdFn()");
}
seenIds.add(msgId);
if (restoredIds.remove(msgId)) {
getLogger().fine("Redelivered message dropped: %s", t);
continue;
}
}
T projectedItem = projectionFn.apply(t);
pendingTraverser = projectedItem != null
? eventTimeMapper.flatMapEvent(projectedItem, 0, handleJmsTimestamp(t))
: eventTimeMapper.flatMapIdle();
} catch (JMSException e) {
throw sneakyThrow(e);
}
}
return false;
}
|
@Test
public void when_projectionToNull_then_filteredOut() throws Exception {
String queueName = randomString();
logger.info("using queue: " + queueName);
String message1 = sendMessage(queueName, true);
String message2 = sendMessage(queueName, true);
initializeProcessor(queueName, true, m -> {
String msg = ((TextMessage) m).getText();
return msg.equals(message1) ? null : msg;
});
Queue<Object> queue = outbox.queue(0);
List<Object> actualOutput = new ArrayList<>();
assertTrueEventually(() -> {
outbox.reset();
processor.complete();
Object item = queue.poll();
if (item != null) {
actualOutput.add(item);
}
assertEquals(singletonList(message2), actualOutput);
});
}
|
void addGetModelForKieBaseMethod(StringBuilder sb) {
sb.append(
" public java.util.List<Model> getModelsForKieBase(String kieBaseName) {\n");
if (!modelMethod.getKieBaseNames().isEmpty()) {
sb.append( " switch (kieBaseName) {\n");
for (String kBase : modelMethod.getKieBaseNames()) {
sb.append(" case \"" + kBase + "\": ");
List<String> models = modelsByKBase.get(kBase);
String collected = null;
if (models != null) {
collected = models.stream()
.map(element -> "new " + element + "()")
.collect(Collectors.joining(","));
}
sb.append(collected != null && !collected.isEmpty() ?
"return java.util.Arrays.asList( " + collected + " );\n" :
"return getModels();\n");
}
sb.append(" }\n");
}
sb.append(
" throw new IllegalArgumentException(\"Unknown KieBase: \" + kieBaseName);\n" +
" }\n" +
"\n" );
}
|
@Test
public void addGetModelForKieBaseMethodEmptyModelsByKBaseValuesTest() {
KieBaseModel kieBaseModel = getKieBaseModel("ModelTest");
Map<String, KieBaseModel> kBaseModels = new HashMap<>();
kBaseModels.put("default-kie", kieBaseModel);
Map<String, List<String>> modelsByKBase = new HashMap<>();
modelsByKBase.put("default-kie", Collections.emptyList());
ModelSourceClass modelSourceClass = new ModelSourceClass(RELEASE_ID, kBaseModels, modelsByKBase);
StringBuilder sb = new StringBuilder();
modelSourceClass.addGetModelForKieBaseMethod(sb);
String retrieved = sb.toString();
String expected = "switch (kieBaseName) {";
assertThat(retrieved.contains(expected)).isTrue();
expected = "case \"default-kie\": return getModels();";
assertThat(retrieved.contains(expected)).isTrue();
}
|
@Override
protected String generatePoetStringTypes() {
StringBuilder symbolBuilder = new StringBuilder();
if (getMethodReturnType().equals(theContract)) {
symbolBuilder.append(" %L = %T.");
} else {
symbolBuilder.append("val %L = %L.");
}
symbolBuilder
.append(method.getName())
.append("(")
.append(getPoetFormatSpecifier())
.append(").send()");
return symbolBuilder.toString();
}
|
@Test
public void testGenerateJavaPoetStringTypesWhenReturnTypeIsNotContract() {
List<Method> listOfFilteredMethods = MethodFilter.extractValidMethods(greeterContractClass);
Method newGreeting =
listOfFilteredMethods.stream()
.filter(m -> m.getName().equals("newGreeting"))
.collect(Collectors.toList())
.get(0);
KotlinParser parser =
new KotlinParser(greeterContractClass, newGreeting, new KotlinMappingHelper());
assertEquals("val %L = %L.newGreeting(%S).send()", parser.generatePoetStringTypes());
}
|
@Override
public V load(K key) {
awaitSuccessfulInit();
try (SqlResult queryResult = sqlService.execute(queries.load(), key)) {
Iterator<SqlRow> it = queryResult.iterator();
V value = null;
if (it.hasNext()) {
SqlRow sqlRow = it.next();
if (it.hasNext()) {
throw new IllegalStateException("multiple matching rows for a key " + key);
}
// If there is a single column as the value, return that column as the value
if (queryResult.getRowMetadata().getColumnCount() == 2 && genericMapStoreProperties.singleColumnAsValue) {
value = sqlRow.getObject(1);
} else {
//noinspection unchecked
value = (V) toGenericRecord(sqlRow, genericMapStoreProperties);
}
}
return value;
}
}
|
@Test
public void givenTableMultipleColumns_whenLoad_thenReturnGenericRecord() {
var spec = new ObjectSpec(mapName,
col("id", INT),
col("name", STRING),
col("age", INT),
col("address", STRING));
objectProvider.createObject(spec);
objectProvider.insertItems(spec, 1);
mapLoader = createMapLoader();
GenericRecord genericRecord = mapLoader.load(0);
assertThat(genericRecord.getInt32("id")).isZero();
assertThat(genericRecord.getString("name")).isEqualTo("name-0");
assertThat(genericRecord.getInt32("age")).isEqualTo(2);
assertThat(genericRecord.getString("address")).isEqualTo("address-0");
}
|
public Date parseString(String dateString) throws ParseException {
if (dateString == null || dateString.isEmpty()) {
return null;
}
Matcher xep82WoMillisMatcher = xep80DateTimeWoMillisPattern.matcher(dateString);
Matcher xep82Matcher = xep80DateTimePattern.matcher(dateString);
if (xep82WoMillisMatcher.matches() || xep82Matcher.matches()) {
String rfc822Date;
// Convert the ISO 8601 time zone string to a RFC822 compatible format
// since SimpleDateFormat supports ISO8601 only with Java7 or higher
if (dateString.charAt(dateString.length() - 1) == 'Z') {
rfc822Date = dateString.replace("Z", "+0000");
} else {
// If the time zone wasn't specified with 'Z', then it's in
// ISO8601 format (i.e. '(+|-)HH:mm')
// RFC822 needs a similar format just without the colon (i.e.
// '(+|-)HHmm)'), so remove it
int lastColon = dateString.lastIndexOf(':');
rfc822Date = dateString.substring(0, lastColon) + dateString.substring(lastColon + 1);
}
if (xep82WoMillisMatcher.matches()) {
synchronized (dateTimeFormatWoMillies) {
return dateTimeFormatWoMillies.parse(rfc822Date);
}
} else {
// OF-898: Replace any number of millisecond-characters with at most three of them.
rfc822Date = rfc822Date.replaceAll("(\\.[0-9]{3})[0-9]*", "$1");
synchronized (dateTimeFormat) {
return dateTimeFormat.parse(rfc822Date);
}
}
}
throw new ParseException("Date String could not be parsed: \"" + dateString + "\"", 0);
}
|
@Test
public void testFormatManySecondFractions() throws Exception
{
// Setup fixture
final String testValue = "2015-03-19T22:54:15.841473+00:00"; // Thu, 19 Mar 2015 22:54:15.841473 GMT
// Execute system under test
final Date result = xmppDateTimeFormat.parseString(testValue);
// Verify results
long expected = 1426805655841L; // Epoch value of Thu, 19 Mar 2015 22:54:15 GMT
assertEquals( expected, result.getTime() );
}
|
InputStream getInternal(@Nullable JobID jobId, BlobKey blobKey) throws IOException {
if (this.socket.isClosed()) {
throw new IllegalStateException(
"BLOB Client is not connected. "
+ "Client has been shut down or encountered an error before.");
}
if (LOG.isDebugEnabled()) {
LOG.debug("GET BLOB {}/{} from {}.", jobId, blobKey, socket.getLocalSocketAddress());
}
try {
OutputStream os = this.socket.getOutputStream();
InputStream is = this.socket.getInputStream();
// Send GET header
sendGetHeader(os, jobId, blobKey);
receiveAndCheckGetResponse(is);
return new BlobInputStream(is, blobKey, os);
} catch (Throwable t) {
BlobUtils.closeSilently(socket, LOG);
throw new IOException("GET operation failed: " + t.getMessage(), t);
}
}
|
@Test
void testSocketTimeout() throws IOException {
Configuration clientConfig = getBlobClientConfig();
int oldSoTimeout = clientConfig.get(BlobServerOptions.SO_TIMEOUT);
clientConfig.set(BlobServerOptions.SO_TIMEOUT, 50);
try (final TestBlobServer testBlobServer =
new TestBlobServer(
clientConfig,
tempDir.resolve("test_server").toFile(),
new VoidBlobStore(),
10_000L)) {
testBlobServer.start();
InetSocketAddress serverAddress =
new InetSocketAddress("localhost", testBlobServer.getPort());
try (BlobClient client = new BlobClient(serverAddress, clientConfig)) {
client.getInternal(new JobID(), BlobKey.createKey(TRANSIENT_BLOB));
fail("Should throw an exception.");
} catch (Throwable t) {
assertThat(ExceptionUtils.findThrowable(t, java.net.SocketTimeoutException.class))
.isPresent();
}
} finally {
clientConfig.set(BlobServerOptions.SO_TIMEOUT, oldSoTimeout);
}
}
|
public void clean(final Date now) {
List<String> files = this.findFiles();
List<String> expiredFiles = this.filterFiles(files, this.createExpiredFileFilter(now));
for (String f : expiredFiles) {
this.delete(new File(f));
}
if (this.totalSizeCap != CoreConstants.UNBOUNDED_TOTAL_SIZE_CAP && this.totalSizeCap > 0) {
this.capTotalSize(files);
}
List<String> emptyDirs = this.findEmptyDirs();
for (String dir : emptyDirs) {
this.delete(new File(dir));
}
}
|
@Test
public void keepsRecentFilesAndOlderFilesWithinTotalSizeCap() {
setupSizeCapTest();
remover.clean(EXPIRY);
for (File f : recentFiles) {
verify(fileProvider, never()).deleteFile(f);
}
for (File f : Arrays.asList(expiredFiles).subList(0, MAX_HISTORY - NUM_FILES_TO_KEEP)) {
verify(fileProvider, never()).deleteFile(f);
}
}
|
@Override
public void registerRemote(RemoteInstance remoteInstance) throws ServiceRegisterException {
if (needUsingInternalAddr()) {
remoteInstance = new RemoteInstance(new Address(config.getInternalComHost(), config.getInternalComPort(), true));
}
this.selfAddress = remoteInstance.getAddress();
String host = remoteInstance.getAddress().getHost();
int port = remoteInstance.getAddress().getPort();
try {
namingService.registerInstance(config.getServiceName(), host, port);
healthChecker.health();
} catch (Throwable e) {
healthChecker.unHealth(e);
throw new ServiceRegisterException(e.getMessage());
}
}
|
@Test
public void registerRemoteUsingInternal() throws NacosException {
nacosConfig.setInternalComHost(internalAddress.getHost());
nacosConfig.setInternalComPort(internalAddress.getPort());
registerRemote(internalAddress);
}
|
boolean isWriteEnclosureForValueMetaInterface( ValueMetaInterface v ) {
return ( isWriteEnclosed( v ) )
|| isEnclosureFixDisabledAndContainsSeparatorOrEnclosure( v.getName().getBytes() );
}
|
@Test
public void testWriteEnclosedForValueMetaInterfaceWithEnclosureForcedAndEnclosureFixDisabled() {
TextFileOutputData data = new TextFileOutputData();
data.binaryEnclosure = new byte[]{101};
data.binarySeparator = new byte[]{101};
data.writer = new ByteArrayOutputStream();
TextFileOutputMeta meta = getTextFileOutputMeta();
meta.setEnclosureForced(false);
meta.setEnclosureFixDisabled(false);
TextFileOutput textFileOutput = getTextFileOutput(data, meta);
ValueMetaBase valueMetaInterface = getValueMetaInterface();
assertTrue(textFileOutput.isWriteEnclosureForValueMetaInterface(valueMetaInterface));
}
|
static AuthorizationResult getDefaultResult(Map<String, ?> configs) {
Object configValue = configs.get(ALLOW_EVERYONE_IF_NO_ACL_IS_FOUND_CONFIG);
if (configValue == null) return DENIED;
return Boolean.parseBoolean(configValue.toString().trim()) ? ALLOWED : DENIED;
}
|
@Test
public void testGetDefaultResult() {
assertEquals(DENIED, getDefaultResult(Collections.emptyMap()));
assertEquals(ALLOWED, getDefaultResult(Collections.singletonMap(
ALLOW_EVERYONE_IF_NO_ACL_IS_FOUND_CONFIG, "true")));
assertEquals(DENIED, getDefaultResult(Collections.singletonMap(
ALLOW_EVERYONE_IF_NO_ACL_IS_FOUND_CONFIG, "false")));
}
|
public static void init(Meter meter, Supplier<AttributesBuilder> attributesBuilderSupplier,
MessageStoreConfig storeConfig, MessageStoreFetcher fetcher,
FlatFileStore flatFileStore, MessageStore next) {
TieredStoreMetricsManager.attributesBuilderSupplier = attributesBuilderSupplier;
apiLatency = meter.histogramBuilder(HISTOGRAM_API_LATENCY)
.setDescription("Tiered store rpc latency")
.setUnit("milliseconds")
.ofLongs()
.build();
providerRpcLatency = meter.histogramBuilder(HISTOGRAM_PROVIDER_RPC_LATENCY)
.setDescription("Tiered store rpc latency")
.setUnit("milliseconds")
.ofLongs()
.build();
uploadBytes = meter.histogramBuilder(HISTOGRAM_UPLOAD_BYTES)
.setDescription("Tiered store upload buffer size")
.setUnit("bytes")
.ofLongs()
.build();
downloadBytes = meter.histogramBuilder(HISTOGRAM_DOWNLOAD_BYTES)
.setDescription("Tiered store download buffer size")
.setUnit("bytes")
.ofLongs()
.build();
dispatchBehind = meter.gaugeBuilder(GAUGE_DISPATCH_BEHIND)
.setDescription("Tiered store dispatch behind message count")
.ofLongs()
.buildWithCallback(measurement -> {
for (FlatMessageFile flatFile : flatFileStore.deepCopyFlatFileToList()) {
MessageQueue mq = flatFile.getMessageQueue();
long maxOffset = next.getMaxOffsetInQueue(mq.getTopic(), mq.getQueueId());
long maxTimestamp = next.getMessageStoreTimeStamp(mq.getTopic(), mq.getQueueId(), maxOffset - 1);
if (maxTimestamp > 0 && System.currentTimeMillis() - maxTimestamp > TimeUnit.HOURS.toMillis(flatFile.getFileReservedHours())) {
continue;
}
Attributes commitLogAttributes = newAttributesBuilder()
.put(LABEL_TOPIC, mq.getTopic())
.put(LABEL_QUEUE_ID, mq.getQueueId())
.put(LABEL_FILE_TYPE, FileSegmentType.COMMIT_LOG.name().toLowerCase())
.build();
Attributes consumeQueueAttributes = newAttributesBuilder()
.put(LABEL_TOPIC, mq.getTopic())
.put(LABEL_QUEUE_ID, mq.getQueueId())
.put(LABEL_FILE_TYPE, FileSegmentType.CONSUME_QUEUE.name().toLowerCase())
.build();
measurement.record(Math.max(maxOffset - flatFile.getConsumeQueueMaxOffset(), 0), consumeQueueAttributes);
}
});
dispatchLatency = meter.gaugeBuilder(GAUGE_DISPATCH_LATENCY)
.setDescription("Tiered store dispatch latency")
.setUnit("seconds")
.ofLongs()
.buildWithCallback(measurement -> {
for (FlatMessageFile flatFile : flatFileStore.deepCopyFlatFileToList()) {
MessageQueue mq = flatFile.getMessageQueue();
long maxOffset = next.getMaxOffsetInQueue(mq.getTopic(), mq.getQueueId());
long maxTimestamp = next.getMessageStoreTimeStamp(mq.getTopic(), mq.getQueueId(), maxOffset - 1);
if (maxTimestamp > 0 && System.currentTimeMillis() - maxTimestamp > TimeUnit.HOURS.toMillis(flatFile.getFileReservedHours())) {
continue;
}
Attributes commitLogAttributes = newAttributesBuilder()
.put(LABEL_TOPIC, mq.getTopic())
.put(LABEL_QUEUE_ID, mq.getQueueId())
.put(LABEL_FILE_TYPE, FileSegmentType.COMMIT_LOG.name().toLowerCase())
.build();
Attributes consumeQueueAttributes = newAttributesBuilder()
.put(LABEL_TOPIC, mq.getTopic())
.put(LABEL_QUEUE_ID, mq.getQueueId())
.put(LABEL_FILE_TYPE, FileSegmentType.CONSUME_QUEUE.name().toLowerCase())
.build();
long consumeQueueDispatchOffset = flatFile.getConsumeQueueMaxOffset();
long consumeQueueDispatchLatency = next.getMessageStoreTimeStamp(mq.getTopic(), mq.getQueueId(), consumeQueueDispatchOffset);
if (maxOffset <= consumeQueueDispatchOffset || consumeQueueDispatchLatency < 0) {
measurement.record(0, consumeQueueAttributes);
} else {
measurement.record(System.currentTimeMillis() - consumeQueueDispatchLatency, consumeQueueAttributes);
}
}
});
messagesDispatchTotal = meter.counterBuilder(COUNTER_MESSAGES_DISPATCH_TOTAL)
.setDescription("Total number of dispatch messages")
.build();
messagesOutTotal = meter.counterBuilder(COUNTER_MESSAGES_OUT_TOTAL)
.setDescription("Total number of outgoing messages")
.build();
fallbackTotal = meter.counterBuilder(COUNTER_GET_MESSAGE_FALLBACK_TOTAL)
.setDescription("Total times of fallback to next store when getting message")
.build();
cacheCount = meter.gaugeBuilder(GAUGE_CACHE_COUNT)
.setDescription("Tiered store cache message count")
.ofLongs()
.buildWithCallback(measurement -> {
if (fetcher instanceof MessageStoreFetcherImpl) {
long count = ((MessageStoreFetcherImpl) fetcher).getFetcherCache().stats().loadCount();
measurement.record(count, newAttributesBuilder().build());
}
});
cacheBytes = meter.gaugeBuilder(GAUGE_CACHE_BYTES)
.setDescription("Tiered store cache message bytes")
.setUnit("bytes")
.ofLongs()
.buildWithCallback(measurement -> {
if (fetcher instanceof MessageStoreFetcherImpl) {
long count = ((MessageStoreFetcherImpl) fetcher).getFetcherCache().estimatedSize();
measurement.record(count, newAttributesBuilder().build());
}
});
cacheAccess = meter.counterBuilder(COUNTER_CACHE_ACCESS)
.setDescription("Tiered store cache access count")
.build();
cacheHit = meter.counterBuilder(COUNTER_CACHE_HIT)
.setDescription("Tiered store cache hit count")
.build();
storageSize = meter.gaugeBuilder(GAUGE_STORAGE_SIZE)
.setDescription("Broker storage size")
.setUnit("bytes")
.ofLongs()
.buildWithCallback(measurement -> {
Map<String, Map<FileSegmentType, Long>> topicFileSizeMap = new HashMap<>();
try {
MetadataStore metadataStore = flatFileStore.getMetadataStore();
metadataStore.iterateFileSegment(fileSegment -> {
Map<FileSegmentType, Long> subMap =
topicFileSizeMap.computeIfAbsent(fileSegment.getPath(), k -> new HashMap<>());
FileSegmentType fileSegmentType =
FileSegmentType.valueOf(fileSegment.getType());
Long size = subMap.computeIfAbsent(fileSegmentType, k -> 0L);
subMap.put(fileSegmentType, size + fileSegment.getSize());
});
} catch (Exception e) {
log.error("Failed to get storage size", e);
}
topicFileSizeMap.forEach((topic, subMap) -> {
subMap.forEach((fileSegmentType, size) -> {
Attributes attributes = newAttributesBuilder()
.put(LABEL_TOPIC, topic)
.put(LABEL_FILE_TYPE, fileSegmentType.name().toLowerCase())
.build();
measurement.record(size, attributes);
});
});
});
storageMessageReserveTime = meter.gaugeBuilder(GAUGE_STORAGE_MESSAGE_RESERVE_TIME)
.setDescription("Broker message reserve time")
.setUnit("milliseconds")
.ofLongs()
.buildWithCallback(measurement -> {
for (FlatMessageFile flatFile : flatFileStore.deepCopyFlatFileToList()) {
long timestamp = flatFile.getMinStoreTimestamp();
if (timestamp > 0) {
MessageQueue mq = flatFile.getMessageQueue();
Attributes attributes = newAttributesBuilder()
.put(LABEL_TOPIC, mq.getTopic())
.put(LABEL_QUEUE_ID, mq.getQueueId())
.build();
measurement.record(System.currentTimeMillis() - timestamp, attributes);
}
}
});
}
|
@Test
public void init() {
MessageStoreConfig storeConfig = new MessageStoreConfig();
storeConfig.setTieredBackendServiceProvider(PosixFileSegment.class.getName());
TieredMessageStore messageStore = Mockito.mock(TieredMessageStore.class);
Mockito.when(messageStore.getStoreConfig()).thenReturn(storeConfig);
Mockito.when(messageStore.getFlatFileStore()).thenReturn(Mockito.mock(FlatFileStore.class));
MessageStoreFetcherImpl fetcher = Mockito.spy(new MessageStoreFetcherImpl(messageStore));
TieredStoreMetricsManager.init(
OpenTelemetrySdk.builder().build().getMeter(""),
null, storeConfig, fetcher,
Mockito.mock(FlatFileStore.class), Mockito.mock(DefaultMessageStore.class));
}
|
public static Long jsToInteger( Object value, Class<?> clazz ) {
if ( Number.class.isAssignableFrom( clazz ) ) {
return ( (Number) value ).longValue();
} else {
String classType = clazz.getName();
if ( classType.equalsIgnoreCase( "java.lang.String" ) ) {
return ( new Long( (String) value ) );
} else if ( classType.equalsIgnoreCase( JS_UNDEFINED ) ) {
return null;
} else if ( classType.equalsIgnoreCase( JS_NATIVE_NUM ) ) {
Number nb = Context.toNumber( value );
return nb.longValue();
} else if ( classType.equalsIgnoreCase( JS_NATIVE_JAVA_OBJ ) ) {
// Is it a Value?
//
try {
Value v = (Value) Context.jsToJava( value, Value.class );
return v.getInteger();
} catch ( Exception e2 ) {
String string = Context.toString( value );
return Long.parseLong( Const.trim( string ) );
}
} else {
return Long.parseLong( value.toString() );
}
}
}
|
@Test
public void jsToInteger_NativeJavaObject_Double() throws Exception {
Scriptable value = getDoubleValue();
assertEquals( LONG_ONE, JavaScriptUtils.jsToInteger( value, NativeJavaObject.class ) );
}
|
public void printLine(String line) {
context.console().printLine(line);
}
|
@Test
public void shouldDelegatePrintLineToConsole() {
String line = "some line";
consoleLogger.printLine(line);
verify(mockedConsole).printLine(line);
}
|
public long getDefaultValue() {
return defaultValue;
}
|
@Test
public void getDefaultValue_ReturnsDefaultValue() {
assertEquals(50, longRangeAttribute.getDefaultValue());
}
|
public static Connection OpenConnection( String serveur, int port, String username, String password,
boolean useKey, String keyFilename, String passPhrase, int timeOut, VariableSpace space, String proxyhost,
int proxyport, String proxyusername, String proxypassword ) throws KettleException {
Connection conn = null;
char[] content = null;
boolean isAuthenticated = false;
try {
// perform some checks
if ( useKey ) {
if ( Utils.isEmpty( keyFilename ) ) {
throw new KettleException( BaseMessages.getString( SSHMeta.PKG, "SSH.Error.PrivateKeyFileMissing" ) );
}
FileObject keyFileObject = KettleVFS.getFileObject( keyFilename );
if ( !keyFileObject.exists() ) {
throw new KettleException( BaseMessages.getString( SSHMeta.PKG, "SSH.Error.PrivateKeyNotExist", keyFilename ) );
}
FileContent keyFileContent = keyFileObject.getContent();
CharArrayWriter charArrayWriter = new CharArrayWriter( (int) keyFileContent.getSize() );
try ( InputStream in = keyFileContent.getInputStream() ) {
IOUtils.copy( in, charArrayWriter );
}
content = charArrayWriter.toCharArray();
}
// Create a new connection
conn = createConnection( serveur, port );
/* We want to connect through a HTTP proxy */
if ( !Utils.isEmpty( proxyhost ) ) {
/* Now connect */
// if the proxy requires basic authentication:
if ( !Utils.isEmpty( proxyusername ) ) {
conn.setProxyData( new HTTPProxyData( proxyhost, proxyport, proxyusername, proxypassword ) );
} else {
conn.setProxyData( new HTTPProxyData( proxyhost, proxyport ) );
}
}
// and connect
if ( timeOut == 0 ) {
conn.connect();
} else {
conn.connect( null, 0, timeOut * 1000 );
}
// authenticate
if ( useKey ) {
isAuthenticated =
conn.authenticateWithPublicKey( username, content, space.environmentSubstitute( passPhrase ) );
} else {
isAuthenticated = conn.authenticateWithPassword( username, password );
}
if ( isAuthenticated == false ) {
throw new KettleException( BaseMessages.getString( SSHMeta.PKG, "SSH.Error.AuthenticationFailed", username ) );
}
} catch ( Exception e ) {
// Something wrong happened
// do not forget to disconnect if connected
if ( conn != null ) {
conn.close();
}
throw new KettleException( BaseMessages.getString( SSHMeta.PKG, "SSH.Error.ErrorConnecting", serveur, username ), e );
}
return conn;
}
|
@Test
public void testOpenConnectionTimeOut() throws Exception {
when( connection.authenticateWithPassword( username, password ) ).thenReturn( true );
assertNotNull( SSHData.OpenConnection( server, port, username, password, false, null,
null, 100, null, null, proxyPort, proxyUsername, proxyPassword ) );
verify( connection ).connect( isNull(), eq( 0 ), eq( 100 * 1000 ) );
}
|
public void free() {
try {
POSIX.munmap(baseAddress, mmappedLength);
} catch (IOException e) {
LOG.warn(this + ": failed to munmap", e);
}
LOG.trace(this + ": freed");
}
|
@Test(timeout=60000)
public void testStartupShutdown() throws Exception {
File path = new File(TEST_BASE, "testStartupShutdown");
path.mkdirs();
SharedFileDescriptorFactory factory =
SharedFileDescriptorFactory.create("shm_",
new String[] { path.getAbsolutePath() } );
FileInputStream stream =
factory.createDescriptor("testStartupShutdown", 4096);
ShortCircuitShm shm = new ShortCircuitShm(ShmId.createRandom(), stream);
shm.free();
stream.close();
FileUtil.fullyDelete(path);
}
|
@Override
public BasicTypeDefine reconvert(Column column) {
BasicTypeDefine.BasicTypeDefineBuilder builder =
BasicTypeDefine.builder()
.name(column.getName())
.nullable(column.isNullable())
.comment(column.getComment())
.defaultValue(column.getDefaultValue());
switch (column.getDataType().getSqlType()) {
case BOOLEAN:
builder.columnType(PG_BOOLEAN);
builder.dataType(PG_BOOLEAN);
break;
case TINYINT:
case SMALLINT:
builder.columnType(PG_SMALLINT);
builder.dataType(PG_SMALLINT);
break;
case INT:
builder.columnType(PG_INTEGER);
builder.dataType(PG_INTEGER);
break;
case BIGINT:
builder.columnType(PG_BIGINT);
builder.dataType(PG_BIGINT);
break;
case FLOAT:
builder.columnType(PG_REAL);
builder.dataType(PG_REAL);
break;
case DOUBLE:
builder.columnType(PG_DOUBLE_PRECISION);
builder.dataType(PG_DOUBLE_PRECISION);
break;
case DECIMAL:
if (column.getSourceType() != null
&& column.getSourceType().equalsIgnoreCase(PG_MONEY)) {
builder.columnType(PG_MONEY);
builder.dataType(PG_MONEY);
} else {
DecimalType decimalType = (DecimalType) column.getDataType();
long precision = decimalType.getPrecision();
int scale = decimalType.getScale();
if (precision <= 0) {
precision = DEFAULT_PRECISION;
scale = DEFAULT_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is precision less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (precision > MAX_PRECISION) {
scale = (int) Math.max(0, scale - (precision - MAX_PRECISION));
precision = MAX_PRECISION;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum precision of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_PRECISION,
precision,
scale);
}
if (scale < 0) {
scale = 0;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is scale less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (scale > MAX_SCALE) {
scale = MAX_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_SCALE,
precision,
scale);
}
builder.columnType(String.format("%s(%s,%s)", PG_NUMERIC, precision, scale));
builder.dataType(PG_NUMERIC);
builder.precision(precision);
builder.scale(scale);
}
break;
case BYTES:
builder.columnType(PG_BYTEA);
builder.dataType(PG_BYTEA);
break;
case STRING:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(PG_TEXT);
builder.dataType(PG_TEXT);
} else if (column.getColumnLength() <= MAX_VARCHAR_LENGTH) {
builder.columnType(
String.format("%s(%s)", PG_VARCHAR, column.getColumnLength()));
builder.dataType(PG_VARCHAR);
} else {
builder.columnType(PG_TEXT);
builder.dataType(PG_TEXT);
}
break;
case DATE:
builder.columnType(PG_DATE);
builder.dataType(PG_DATE);
break;
case TIME:
Integer timeScale = column.getScale();
if (timeScale != null && timeScale > MAX_TIME_SCALE) {
timeScale = MAX_TIME_SCALE;
log.warn(
"The time column {} type time({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to time({})",
column.getName(),
column.getScale(),
MAX_SCALE,
timeScale);
}
if (timeScale != null && timeScale > 0) {
builder.columnType(String.format("%s(%s)", PG_TIME, timeScale));
} else {
builder.columnType(PG_TIME);
}
builder.dataType(PG_TIME);
builder.scale(timeScale);
break;
case TIMESTAMP:
Integer timestampScale = column.getScale();
if (timestampScale != null && timestampScale > MAX_TIMESTAMP_SCALE) {
timestampScale = MAX_TIMESTAMP_SCALE;
log.warn(
"The timestamp column {} type timestamp({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to timestamp({})",
column.getName(),
column.getScale(),
MAX_TIMESTAMP_SCALE,
timestampScale);
}
if (timestampScale != null && timestampScale > 0) {
builder.columnType(String.format("%s(%s)", PG_TIMESTAMP, timestampScale));
} else {
builder.columnType(PG_TIMESTAMP);
}
builder.dataType(PG_TIMESTAMP);
builder.scale(timestampScale);
break;
case ARRAY:
ArrayType arrayType = (ArrayType) column.getDataType();
SeaTunnelDataType elementType = arrayType.getElementType();
switch (elementType.getSqlType()) {
case BOOLEAN:
builder.columnType(PG_BOOLEAN_ARRAY);
builder.dataType(PG_BOOLEAN_ARRAY);
break;
case TINYINT:
case SMALLINT:
builder.columnType(PG_SMALLINT_ARRAY);
builder.dataType(PG_SMALLINT_ARRAY);
break;
case INT:
builder.columnType(PG_INTEGER_ARRAY);
builder.dataType(PG_INTEGER_ARRAY);
break;
case BIGINT:
builder.columnType(PG_BIGINT_ARRAY);
builder.dataType(PG_BIGINT_ARRAY);
break;
case FLOAT:
builder.columnType(PG_REAL_ARRAY);
builder.dataType(PG_REAL_ARRAY);
break;
case DOUBLE:
builder.columnType(PG_DOUBLE_PRECISION_ARRAY);
builder.dataType(PG_DOUBLE_PRECISION_ARRAY);
break;
case BYTES:
builder.columnType(PG_BYTEA);
builder.dataType(PG_BYTEA);
break;
case STRING:
builder.columnType(PG_TEXT_ARRAY);
builder.dataType(PG_TEXT_ARRAY);
break;
default:
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.POSTGRESQL,
elementType.getSqlType().name(),
column.getName());
}
break;
default:
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.POSTGRESQL,
column.getDataType().getSqlType().name(),
column.getName());
}
return builder.build();
}
|
@Test
public void testReconvertDouble() {
Column column =
PhysicalColumn.builder().name("test").dataType(BasicType.DOUBLE_TYPE).build();
BasicTypeDefine typeDefine = PostgresTypeConverter.INSTANCE.reconvert(column);
Assertions.assertEquals(column.getName(), typeDefine.getName());
Assertions.assertEquals(
PostgresTypeConverter.PG_DOUBLE_PRECISION, typeDefine.getColumnType());
Assertions.assertEquals(
PostgresTypeConverter.PG_DOUBLE_PRECISION, typeDefine.getDataType());
}
|
@GetMapping(value = ApiConstants.PROMETHEUS_CONTROLLER_SERVICE_PATH, produces = "application/json; charset=UTF-8")
public ResponseEntity<String> metricNamespaceService(@PathVariable("namespaceId") String namespaceId,
@PathVariable("service") String service) throws NacosException {
ArrayNode arrayNode = getServiceArrayNode(namespaceId, s -> s.getName().equals(service));
return ResponseEntity.ok().body(arrayNode.toString());
}
|
@Test
public void testMetricNamespaceService() throws Exception {
when(instanceServiceV2.listAllInstances(nameSpace, NamingUtils.getGroupedName(name, group))).thenReturn(testInstanceList);
String prometheusNamespaceServicePath = ApiConstants.PROMETHEUS_CONTROLLER_SERVICE_PATH.replace("{namespaceId}", nameSpace);
prometheusNamespaceServicePath = prometheusNamespaceServicePath.replace("{service}", service.getName());
MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.get(prometheusNamespaceServicePath);
MockHttpServletResponse response = mockMvc.perform(builder).andReturn().getResponse();
assertEquals(200, response.getStatus());
assertEquals(testInstanceList.size(), JacksonUtils.toObj(response.getContentAsString()).size());
}
|
@Override
public Set<NodeHealth> readAll() {
long clusterTime = hzMember.getClusterTime();
long timeout = clusterTime - TIMEOUT_30_SECONDS;
Map<UUID, TimestampedNodeHealth> sqHealthState = readReplicatedMap();
Set<UUID> hzMemberUUIDs = hzMember.getMemberUuids();
Set<NodeHealth> existingNodeHealths = sqHealthState.entrySet().stream()
.filter(outOfDate(timeout))
.filter(ofNonExistentMember(hzMemberUUIDs))
.map(entry -> entry.getValue().getNodeHealth())
.collect(Collectors.toSet());
if (LOG.isTraceEnabled()) {
LOG.trace("Reading {} and keeping {}", new HashMap<>(sqHealthState), existingNodeHealths);
}
return ImmutableSet.copyOf(existingNodeHealths);
}
|
@Test
public void readAll_logs_message_for_each_non_existing_member_ignored_if_TRACE() {
logging.setLevel(Level.TRACE);
Map<String, TimestampedNodeHealth> map = new HashMap<>();
String memberUuid1 = randomAlphanumeric(44);
String memberUuid2 = randomAlphanumeric(44);
map.put(memberUuid1, new TimestampedNodeHealth(randomNodeHealth(), clusterTime - 1));
map.put(memberUuid2, new TimestampedNodeHealth(randomNodeHealth(), clusterTime - 1));
when(hazelcastMember.getClusterTime()).thenReturn(clusterTime);
doReturn(map).when(hazelcastMember).getReplicatedMap(MAP_SQ_HEALTH_STATE);
underTest.readAll();
assertThat(logging.getLogs()).hasSize(3);
assertThat(logging.getLogs(Level.TRACE))
.containsOnly(
"Reading " + new HashMap<>(map) + " and keeping []",
"Ignoring NodeHealth of member " + memberUuid1 + " because it is not part of the cluster at the moment",
"Ignoring NodeHealth of member " + memberUuid2 + " because it is not part of the cluster at the moment");
}
|
public static String truncateContent(String content) {
if (content == null) {
return "";
} else if (content.length() <= SHOW_CONTENT_SIZE) {
return content;
} else {
return content.substring(0, SHOW_CONTENT_SIZE) + "...";
}
}
|
@Test
void testTruncateContentNull() {
String actual = ContentUtils.truncateContent(null);
assertEquals("", actual);
}
|
@Override
Class<?> getReturnType() {
return null;
}
|
@Test
public void test_getReturnType() {
Class<?> returnType = NullGetter.NULL_GETTER.getReturnType();
assertNull(returnType);
}
|
public static String baToHexString(byte[] ba) {
StringBuilder sb = new StringBuilder(ba.length * 2);
for (byte b : ba) {
int j = b & 0xff;
if (j < 16) {
sb.append('0'); // $NON-NLS-1$ add zero padding
}
sb.append(Integer.toHexString(j));
}
return sb.toString();
}
|
@Test
public void testbaToHexString() {
assertEquals("", JOrphanUtils.baToHexString(new byte[]{}));
assertEquals("00", JOrphanUtils.baToHexString(new byte[]{0}));
assertEquals("0f107f8081ff", JOrphanUtils.baToHexString(new byte[]{15, 16, 127, -128, -127, -1}));
}
|
public Map<String, String> requestPenReset(PenRequest request) throws PenRequestException, SharedServiceClientException, SoapValidationException {
final List<PenRequestStatus> result = repository.findByBsnAndDocTypeAndSequenceNo(request.getBsn(), request.getDocType(), request.getSequenceNo());
checkIfTooSoonOrTooOften(result);
final Map<String, String> response = new HashMap<>();
final PenAanvraagEIDRequest rdwRequest = new PenAanvraagEIDRequest();
rdwRequest.setEIDSTATINFO(eidstatinfoBuilder(request.getBsn(), request.getSequenceNo()));
final PenAanvraagEIDResponse rdwResponse = client.penRequest(rdwRequest);
logger.info("The SOAP pen request was successful");
if ( !"OK".equals(rdwResponse.getResponseDescription())) {
String errorMessage = "Connection with RDW was successful, but the response description was not equal to OK";
logger.error(errorMessage);
throw new PenRequestException("DWS10", errorMessage);
}
createPenRequestStatus(request.getBsn(), request.getSequenceNo(), DocumentType.DRIVING_LICENCE);
response.putAll(statusOK);
return response;
}
|
@Test
public void penRequestWithConfigurationsErrorThrowsDWS3Exception() throws PenRequestException, SharedServiceClientException, SoapValidationException {
Mockito.when(ssMock.getSSConfigInt(speedRequest)).thenThrow(new SharedServiceClientException("DWS3"));
mockStatusList.add(status);
Mockito.when(mockRepository.findByBsnAndDocTypeAndSequenceNo(request.getBsn(), request.getDocType(), request.getSequenceNo())).thenReturn(mockStatusList);
Exception exception = assertThrows(SharedServiceClientException.class, () -> {
service.requestPenReset(request);
});
assertEquals("DWS3", exception.getMessage());
}
|
public static <T> Write<T> write(String jdbcUrl, String table) {
return new AutoValue_ClickHouseIO_Write.Builder<T>()
.jdbcUrl(jdbcUrl)
.table(table)
.properties(new Properties())
.maxInsertBlockSize(DEFAULT_MAX_INSERT_BLOCK_SIZE)
.initialBackoff(DEFAULT_INITIAL_BACKOFF)
.maxRetries(DEFAULT_MAX_RETRIES)
.maxCumulativeBackoff(DEFAULT_MAX_CUMULATIVE_BACKOFF)
.build()
.withInsertDeduplicate(true)
.withInsertDistributedSync(true);
}
|
@Test
public void testArrayOfPrimitiveTypes() throws Exception {
Schema schema =
Schema.of(
Schema.Field.of("f0", FieldType.array(FieldType.DATETIME)),
Schema.Field.of("f1", FieldType.array(FieldType.DATETIME)),
Schema.Field.of("f2", FieldType.array(FieldType.FLOAT)),
Schema.Field.of("f3", FieldType.array(FieldType.DOUBLE)),
Schema.Field.of("f4", FieldType.array(FieldType.BYTE)),
Schema.Field.of("f5", FieldType.array(FieldType.INT16)),
Schema.Field.of("f6", FieldType.array(FieldType.INT32)),
Schema.Field.of("f7", FieldType.array(FieldType.INT64)),
Schema.Field.of("f8", FieldType.array(FieldType.STRING)),
Schema.Field.of("f9", FieldType.array(FieldType.INT16)),
Schema.Field.of("f10", FieldType.array(FieldType.INT32)),
Schema.Field.of("f11", FieldType.array(FieldType.INT64)),
Schema.Field.of("f12", FieldType.array(FieldType.INT64)),
Schema.Field.of("f13", FieldType.array(FieldType.STRING)),
Schema.Field.of("f14", FieldType.array(FieldType.STRING)),
Schema.Field.of("f15", FieldType.array(FieldType.BOOLEAN)));
Row row1 =
Row.withSchema(schema)
.addArray(
new DateTime(2030, 10, 1, 0, 0, 0, DateTimeZone.UTC),
new DateTime(2031, 10, 1, 0, 0, 0, DateTimeZone.UTC))
.addArray(
new DateTime(2030, 10, 9, 8, 7, 6, DateTimeZone.UTC),
new DateTime(2031, 10, 9, 8, 7, 6, DateTimeZone.UTC))
.addArray(2.2f, 3.3f)
.addArray(3.3, 4.4)
.addArray((byte) 4, (byte) 5)
.addArray((short) 5, (short) 6)
.addArray(6, 7)
.addArray(7L, 8L)
.addArray("eight", "nine")
.addArray((short) 9, (short) 10)
.addArray(10, 11)
.addArray(11L, 12L)
.addArray(12L, 13L)
.addArray("abc", "cde")
.addArray("cde", "abc")
.addArray(true, false)
.build();
executeSql(
"CREATE TABLE test_array_of_primitive_types ("
+ "f0 Array(Date),"
+ "f1 Array(DateTime),"
+ "f2 Array(Float32),"
+ "f3 Array(Float64),"
+ "f4 Array(Int8),"
+ "f5 Array(Int16),"
+ "f6 Array(Int32),"
+ "f7 Array(Int64),"
+ "f8 Array(String),"
+ "f9 Array(UInt8),"
+ "f10 Array(UInt16),"
+ "f11 Array(UInt32),"
+ "f12 Array(UInt64),"
+ "f13 Array(Enum8('abc' = 1, 'cde' = 2)),"
+ "f14 Array(Enum16('abc' = -1, 'cde' = -2)),"
+ "f15 Array(Bool)"
+ ") ENGINE=Log");
pipeline
.apply(Create.of(row1).withRowSchema(schema))
.apply(write("test_array_of_primitive_types"));
pipeline.run().waitUntilFinish();
try (ResultSet rs = executeQuery("SELECT * FROM test_array_of_primitive_types")) {
rs.next();
assertEquals("[2030-10-01, 2031-10-01]", rs.getString("f0"));
assertEquals("[2030-10-09T08:07:06, 2031-10-09T08:07:06]", rs.getString("f1"));
// Since comparing float/double values is not precise, we compare the string representation
assertEquals("[2.2,3.3]", rs.getString("f2"));
assertEquals("[3.3,4.4]", rs.getString("f3"));
assertArrayEquals(new byte[] {4, 5}, (byte[]) rs.getArray("f4").getArray());
assertArrayEquals(new short[] {5, 6}, (short[]) rs.getArray("f5").getArray());
assertArrayEquals(new int[] {6, 7}, (int[]) rs.getArray("f6").getArray());
assertArrayEquals(new long[] {7L, 8L}, (long[]) rs.getArray("f7").getArray());
assertArrayEquals(new String[] {"eight", "nine"}, (String[]) rs.getArray("f8").getArray());
assertArrayEquals(new byte[] {9, 10}, (byte[]) rs.getArray("f9").getArray());
assertArrayEquals(new short[] {10, 11}, (short[]) rs.getArray("f10").getArray());
assertArrayEquals(new int[] {11, 12}, (int[]) rs.getArray("f11").getArray());
assertArrayEquals(new long[] {12L, 13L}, (long[]) rs.getArray("f12").getArray());
assertArrayEquals(new String[] {"abc", "cde"}, (String[]) rs.getArray("f13").getArray());
assertArrayEquals(new String[] {"cde", "abc"}, (String[]) rs.getArray("f14").getArray());
assertArrayEquals(new boolean[] {true, false}, (boolean[]) rs.getArray("f15").getArray());
}
}
|
@Override
public DenseMatrix matrixMultiply(Matrix other) {
if (dim2 == other.getDimension1Size()) {
if (other instanceof DenseMatrix) {
DenseMatrix otherDense = (DenseMatrix) other;
double[][] output = new double[dim1][otherDense.dim2];
for (int i = 0; i < dim1; i++) {
for (int j = 0; j < otherDense.dim2; j++) {
output[i][j] = columnRowDot(i,j,otherDense);
}
}
return new DenseMatrix(output);
} else if (other instanceof DenseSparseMatrix) {
DenseSparseMatrix otherSparse = (DenseSparseMatrix) other;
int otherDim2 = otherSparse.getDimension2Size();
double[][] output = new double[dim1][otherDim2];
for (int i = 0; i < dim1; i++) {
for (int j = 0; j < otherDim2; j++) {
output[i][j] = columnRowDot(i,j,otherSparse);
}
}
return new DenseMatrix(output);
} else {
throw new IllegalArgumentException("Unknown matrix type " + other.getClass().getName());
}
} else {
throw new IllegalArgumentException("Invalid matrix dimensions, this.shape=" + Arrays.toString(shape) + ", other.shape = " + Arrays.toString(other.getShape()));
}
}
|
@Test
public void matrixMultiplyTest() {
//4x4 matrices
DenseMatrix a = generateA();
DenseMatrix b = generateB();
DenseMatrix c = generateC();
//4x7 matrix
DenseMatrix d = generateD();
//7x3 matrix
DenseMatrix e = generateE();
//3x4 matrix
DenseMatrix f = generateF();
//4x7 output
DenseMatrix ad = generateAD();
assertEquals(ad,a.matrixMultiply(d));
DenseMatrix bd = generateBD();
assertEquals(bd,b.matrixMultiply(d));
DenseMatrix cd = generateCD();
assertEquals(cd,c.matrixMultiply(d));
//3x4 output
DenseMatrix fa = generateFA();
assertEquals(fa,f.matrixMultiply(a));
DenseMatrix fb = generateFB();
assertEquals(fb,f.matrixMultiply(b));
DenseMatrix fc = generateFC();
assertEquals(fc,f.matrixMultiply(c));
//4x3 output
DenseMatrix de = generateDE();
assertEquals(de,d.matrixMultiply(e));
//3x7 output
DenseMatrix fd = generateFD();
assertEquals(fd,f.matrixMultiply(d));
//7x4 output
DenseMatrix ef = generateEF();
assertEquals(ef,e.matrixMultiply(f));
//4x4 output
DenseMatrix def = generateDEF();
assertEquals(def,d.matrixMultiply(e).matrixMultiply(f));
}
|
int numRemoteSources(EdgeDef edge) {
Set<Address> edgeSources = sources.get(edge.edgeId());
return edgeSources.contains(localAddress) ? edgeSources.size() - 1 : edgeSources.size();
}
|
@Test
public void when_traversingDAGAndCheckNumberOfConnections() {
VertexDef vertex_1 = createMockVertex(1);
VertexDef vertex_2 = createMockVertex(2);
List<VertexDef> vertices = Arrays.asList(vertex_1, vertex_2);
EdgeDef edge_1_2 = createEdge(vertex_1, vertex_2, null);
DagNodeUtil util = new DagNodeUtil(vertices, allAddresses, node1Address);
assertEquals(0, util.numRemoteSources(edge_1_2));
}
|
@Override
public DataflowPipelineJob run(Pipeline pipeline) {
// Multi-language pipelines and pipelines that include upgrades should automatically be upgraded
// to Runner v2.
if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_runner_v2")) {
LOG.info(
"Automatically enabling Dataflow Runner v2 since the pipeline used cross-language"
+ " transforms or pipeline needed a transform upgrade.");
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build());
}
}
if (useUnifiedWorker(options)) {
if (hasExperiment(options, "disable_runner_v2")
|| hasExperiment(options, "disable_runner_v2_until_2023")
|| hasExperiment(options, "disable_prime_runner_v2")) {
throw new IllegalArgumentException(
"Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set.");
}
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("use_runner_v2")) {
experiments.add("use_runner_v2");
}
if (!experiments.contains("use_unified_worker")) {
experiments.add("use_unified_worker");
}
if (!experiments.contains("beam_fn_api")) {
experiments.add("beam_fn_api");
}
if (!experiments.contains("use_portable_job_submission")) {
experiments.add("use_portable_job_submission");
}
options.setExperiments(ImmutableList.copyOf(experiments));
}
logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline);
logWarningIfBigqueryDLQUnused(pipeline);
if (shouldActAsStreaming(pipeline)) {
options.setStreaming(true);
if (useUnifiedWorker(options)) {
options.setEnableStreamingEngine(true);
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("enable_streaming_engine")) {
experiments.add("enable_streaming_engine");
}
if (!experiments.contains("enable_windmill_service")) {
experiments.add("enable_windmill_service");
}
}
}
if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) {
ProjectionPushdownOptimizer.optimize(pipeline);
}
LOG.info(
"Executing pipeline on the Dataflow Service, which will have billing implications "
+ "related to Google Compute Engine usage and other Google Cloud Services.");
DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class);
String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions);
// This incorrectly puns the worker harness container image (which implements v1beta3 API)
// with the SDK harness image (which implements Fn API).
//
// The same Environment is used in different and contradictory ways, depending on whether
// it is a v1 or v2 job submission.
RunnerApi.Environment defaultEnvironmentForDataflow =
Environments.createDockerEnvironment(workerHarnessContainerImageURL);
// The SdkComponents for portable an non-portable job submission must be kept distinct. Both
// need the default environment.
SdkComponents portableComponents = SdkComponents.create();
portableComponents.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
RunnerApi.Pipeline portablePipelineProto =
PipelineTranslation.toProto(pipeline, portableComponents, false);
// Note that `stageArtifacts` has to be called before `resolveArtifact` because
// `resolveArtifact` updates local paths to staged paths in pipeline proto.
portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto);
List<DataflowPackage> packages = stageArtifacts(portablePipelineProto);
portablePipelineProto = resolveArtifacts(portablePipelineProto);
portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Portable pipeline proto:\n{}",
TextFormat.printer().printToString(portablePipelineProto));
}
// Stage the portable pipeline proto, retrieving the staged pipeline path, then update
// the options on the new job
// TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options
LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation());
byte[] serializedProtoPipeline = portablePipelineProto.toByteArray();
DataflowPackage stagedPipeline =
options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME);
dataflowOptions.setPipelineUrl(stagedPipeline.getLocation());
if (useUnifiedWorker(options)) {
LOG.info("Skipping v1 transform replacements since job will run on v2.");
} else {
// Now rewrite things to be as needed for v1 (mutates the pipeline)
// This way the job submitted is valid for v1 and v2, simultaneously
replaceV1Transforms(pipeline);
}
// Capture the SdkComponents for look up during step translations
SdkComponents dataflowV1Components = SdkComponents.create();
dataflowV1Components.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
// No need to perform transform upgrading for the Runner v1 proto.
RunnerApi.Pipeline dataflowV1PipelineProto =
PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Dataflow v1 pipeline proto:\n{}",
TextFormat.printer().printToString(dataflowV1PipelineProto));
}
// Set a unique client_request_id in the CreateJob request.
// This is used to ensure idempotence of job creation across retried
// attempts to create a job. Specifically, if the service returns a job with
// a different client_request_id, it means the returned one is a different
// job previously created with the same job name, and that the job creation
// has been effectively rejected. The SDK should return
// Error::Already_Exists to user in that case.
int randomNum = new Random().nextInt(9000) + 1000;
String requestId =
DateTimeFormat.forPattern("YYYYMMddHHmmssmmm")
.withZone(DateTimeZone.UTC)
.print(DateTimeUtils.currentTimeMillis())
+ "_"
+ randomNum;
JobSpecification jobSpecification =
translator.translate(
pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages);
if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_staged_dataflow_worker_jar")) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("use_staged_dataflow_worker_jar")
.build());
}
}
Job newJob = jobSpecification.getJob();
try {
newJob
.getEnvironment()
.setSdkPipelineOptions(
MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class));
} catch (IOException e) {
throw new IllegalArgumentException(
"PipelineOptions specified failed to serialize to JSON.", e);
}
newJob.setClientRequestId(requestId);
DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo();
String version = dataflowRunnerInfo.getVersion();
checkState(
!"${pom.version}".equals(version),
"Unable to submit a job to the Dataflow service with unset version ${pom.version}");
LOG.info("Dataflow SDK version: {}", version);
newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties());
// The Dataflow Service may write to the temporary directory directly, so
// must be verified.
if (!isNullOrEmpty(options.getGcpTempLocation())) {
newJob
.getEnvironment()
.setTempStoragePrefix(
dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation()));
}
newJob.getEnvironment().setDataset(options.getTempDatasetId());
if (options.getWorkerRegion() != null) {
newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion());
}
if (options.getWorkerZone() != null) {
newJob.getEnvironment().setWorkerZone(options.getWorkerZone());
}
if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED");
} else if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED");
}
// Represent the minCpuPlatform pipeline option as an experiment, if not already present.
if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
List<String> minCpuFlags =
experiments.stream()
.filter(p -> p.startsWith("min_cpu_platform"))
.collect(Collectors.toList());
if (minCpuFlags.isEmpty()) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform())
.build());
} else {
LOG.warn(
"Flag min_cpu_platform is defined in both top level PipelineOption, "
+ "as well as under experiments. Proceed using {}.",
minCpuFlags.get(0));
}
}
newJob
.getEnvironment()
.setExperiments(
ImmutableList.copyOf(
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList())));
// Set the Docker container image that executes Dataflow worker harness, residing in Google
// Container Registry. Translator is guaranteed to create a worker pool prior to this point.
// For runner_v1, only worker_harness_container is set.
// For runner_v2, both worker_harness_container and sdk_harness_container are set to the same
// value.
String containerImage = getContainerImageForJob(options);
for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) {
workerPool.setWorkerHarnessContainerImage(containerImage);
}
configureSdkHarnessContainerImages(options, portablePipelineProto, newJob);
newJob.getEnvironment().setVersion(getEnvironmentVersion(options));
if (hooks != null) {
hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment());
}
// enable upload_graph when the graph is too large
byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8);
int jobGraphByteSize = jobGraphBytes.length;
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES
&& !hasExperiment(options, "upload_graph")
&& !useUnifiedWorker(options)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build());
LOG.info(
"The job graph size ({} in bytes) is larger than {}. Automatically add "
+ "the upload_graph option to experiments.",
jobGraphByteSize,
CREATE_JOB_REQUEST_LIMIT_BYTES);
}
if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) {
ArrayList<String> experiments = new ArrayList<>(options.getExperiments());
while (experiments.remove("upload_graph")) {}
options.setExperiments(experiments);
LOG.warn(
"The upload_graph experiment was specified, but it does not apply "
+ "to runner v2 jobs. Option has been automatically removed.");
}
// Upload the job to GCS and remove the graph object from the API call. The graph
// will be downloaded from GCS by the service.
if (hasExperiment(options, "upload_graph")) {
DataflowPackage stagedGraph =
options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME);
newJob.getSteps().clear();
newJob.setStepsLocation(stagedGraph.getLocation());
}
if (!isNullOrEmpty(options.getDataflowJobFile())
|| !isNullOrEmpty(options.getTemplateLocation())) {
boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation());
if (isTemplate) {
checkArgument(
isNullOrEmpty(options.getDataflowJobFile()),
"--dataflowJobFile and --templateLocation are mutually exclusive.");
}
String fileLocation =
firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile());
checkArgument(
fileLocation.startsWith("/") || fileLocation.startsWith("gs://"),
"Location must be local or on Cloud Storage, got %s.",
fileLocation);
ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */);
String workSpecJson = DataflowPipelineTranslator.jobToString(newJob);
try (PrintWriter printWriter =
new PrintWriter(
new BufferedWriter(
new OutputStreamWriter(
Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)),
UTF_8)))) {
printWriter.print(workSpecJson);
LOG.info("Printed job specification to {}", fileLocation);
} catch (IOException ex) {
String error = String.format("Cannot create output file at %s", fileLocation);
if (isTemplate) {
throw new RuntimeException(error, ex);
} else {
LOG.warn(error, ex);
}
}
if (isTemplate) {
LOG.info("Template successfully created.");
return new DataflowTemplateJob();
}
}
String jobIdToUpdate = null;
if (options.isUpdate()) {
jobIdToUpdate = getJobIdFromName(options.getJobName());
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setReplaceJobId(jobIdToUpdate);
}
if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) {
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot());
}
Job jobResult;
try {
jobResult = dataflowClient.createJob(newJob);
} catch (GoogleJsonResponseException e) {
String errorMessages = "Unexpected errors";
if (e.getDetails() != null) {
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) {
errorMessages =
"The size of the serialized JSON representation of the pipeline "
+ "exceeds the allowable limit. "
+ "For more information, please see the documentation on job submission:\n"
+ "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs";
} else {
errorMessages = e.getDetails().getMessage();
}
}
throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e);
} catch (IOException e) {
throw new RuntimeException("Failed to create a workflow job", e);
}
// Use a raw client for post-launch monitoring, as status calls may fail
// regularly and need not be retried automatically.
DataflowPipelineJob dataflowPipelineJob =
new DataflowPipelineJob(
DataflowClient.create(options),
jobResult.getId(),
options,
jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(),
portablePipelineProto);
// If the service returned client request id, the SDK needs to compare it
// with the original id generated in the request, if they are not the same
// (i.e., the returned job is not created by this request), throw
// DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException
// depending on whether this is a reload or not.
if (jobResult.getClientRequestId() != null
&& !jobResult.getClientRequestId().isEmpty()
&& !jobResult.getClientRequestId().equals(requestId)) {
// If updating a job.
if (options.isUpdate()) {
throw new DataflowJobAlreadyUpdatedException(
dataflowPipelineJob,
String.format(
"The job named %s with id: %s has already been updated into job id: %s "
+ "and cannot be updated again.",
newJob.getName(), jobIdToUpdate, jobResult.getId()));
} else {
throw new DataflowJobAlreadyExistsException(
dataflowPipelineJob,
String.format(
"There is already an active job named %s with id: %s. If you want to submit a"
+ " second job, try again by setting a different name using --jobName.",
newJob.getName(), jobResult.getId()));
}
}
LOG.info(
"To access the Dataflow monitoring console, please navigate to {}",
MonitoringUtil.getJobMonitoringPageURL(
options.getProject(), options.getRegion(), jobResult.getId()));
LOG.info("Submitted job: {}", jobResult.getId());
LOG.info(
"To cancel the job using the 'gcloud' tool, run:\n> {}",
MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId()));
return dataflowPipelineJob;
}
|
@Test
public void testRunWithFiles() throws IOException {
// Test that the function DataflowRunner.stageFiles works as expected.
final String cloudDataflowDataset = "somedataset";
// Create some temporary files.
File temp1 = File.createTempFile("DataflowRunnerTest-", ".txt");
temp1.deleteOnExit();
File temp2 = File.createTempFile("DataflowRunnerTest2-", ".txt");
temp2.deleteOnExit();
String overridePackageName = "alias.txt";
when(mockGcsUtil.getObjects(anyListOf(GcsPath.class)))
.thenReturn(
ImmutableList.of(
GcsUtil.StorageObjectOrIOException.create(new FileNotFoundException("some/path"))));
DataflowPipelineOptions options = PipelineOptionsFactory.as(DataflowPipelineOptions.class);
options.setFilesToStage(
ImmutableList.of(
temp1.getAbsolutePath(), overridePackageName + "=" + temp2.getAbsolutePath()));
options.setStagingLocation(VALID_STAGING_BUCKET);
options.setTempLocation(VALID_TEMP_BUCKET);
options.setTempDatasetId(cloudDataflowDataset);
options.setProject(PROJECT_ID);
options.setRegion(REGION_ID);
options.setJobName("job");
options.setDataflowClient(buildMockDataflow(mockJobs));
options.setGcsUtil(mockGcsUtil);
options.setGcpCredential(new TestCredential());
when(mockGcsUtil.create(any(GcsPath.class), any(GcsUtil.CreateOptions.class)))
.then(
invocation ->
FileChannel.open(
Files.createTempFile("channel-", ".tmp"),
StandardOpenOption.CREATE,
StandardOpenOption.WRITE,
StandardOpenOption.DELETE_ON_CLOSE));
Pipeline p = buildDataflowPipeline(options);
DataflowPipelineJob job = (DataflowPipelineJob) p.run();
assertEquals("newid", job.getJobId());
ArgumentCaptor<Job> jobCaptor = ArgumentCaptor.forClass(Job.class);
Mockito.verify(mockJobs).create(eq(PROJECT_ID), eq(REGION_ID), jobCaptor.capture());
Job workflowJob = jobCaptor.getValue();
assertValidJob(workflowJob);
assertEquals(2, workflowJob.getEnvironment().getWorkerPools().get(0).getPackages().size());
DataflowPackage workflowPackage1 =
workflowJob.getEnvironment().getWorkerPools().get(0).getPackages().get(0);
assertThat(workflowPackage1.getName(), endsWith(getFileExtension(temp1.getAbsolutePath())));
DataflowPackage workflowPackage2 =
workflowJob.getEnvironment().getWorkerPools().get(0).getPackages().get(1);
assertEquals(overridePackageName, workflowPackage2.getName());
assertEquals(
GcsPath.fromUri(VALID_TEMP_BUCKET).toResourceName(),
workflowJob.getEnvironment().getTempStoragePrefix());
assertEquals(cloudDataflowDataset, workflowJob.getEnvironment().getDataset());
assertEquals(
DataflowRunnerInfo.getDataflowRunnerInfo().getName(),
workflowJob.getEnvironment().getUserAgent().get("name"));
assertEquals(
DataflowRunnerInfo.getDataflowRunnerInfo().getVersion(),
workflowJob.getEnvironment().getUserAgent().get("version"));
}
|
@Override
public void createTable(Table table) {
validateTableType(table);
// first assert the table name is unique
if (tables.containsKey(table.getName())) {
throw new IllegalArgumentException("Duplicate table name: " + table.getName());
}
// invoke the provider's create
providers.get(table.getType()).createTable(table);
// store to the global metastore
tables.put(table.getName(), table);
}
|
@Test(expected = IllegalArgumentException.class)
public void testCreateTable_duplicatedName() throws Exception {
Table table = mockTable("person");
store.createTable(table);
store.createTable(table);
}
|
@Override
public Progress getProgress() {
// If current tracking range is no longer growable, get progress as a normal range.
if (range.getTo() != Long.MAX_VALUE || range.getTo() == range.getFrom()) {
return super.getProgress();
}
// Convert to BigDecimal in computation to prevent overflow, which may result in lost of
// precision.
BigDecimal estimateRangeEnd = BigDecimal.valueOf(rangeEndEstimator.estimate());
if (lastAttemptedOffset == null) {
return Progress.from(
0,
estimateRangeEnd
.subtract(BigDecimal.valueOf(range.getFrom()), MathContext.DECIMAL128)
.max(BigDecimal.ZERO)
.doubleValue());
}
BigDecimal workRemaining =
estimateRangeEnd
.subtract(BigDecimal.valueOf(lastAttemptedOffset), MathContext.DECIMAL128)
.max(BigDecimal.ZERO);
BigDecimal totalWork =
estimateRangeEnd
.max(BigDecimal.valueOf(lastAttemptedOffset))
.subtract(BigDecimal.valueOf(range.getFrom()), MathContext.DECIMAL128);
return Progress.from(
totalWork.subtract(workRemaining, MathContext.DECIMAL128).doubleValue(),
workRemaining.doubleValue());
}
|
@Test
public void testProgressBeforeStart() throws Exception {
SimpleEstimator simpleEstimator = new SimpleEstimator();
GrowableOffsetRangeTracker tracker = new GrowableOffsetRangeTracker(10L, simpleEstimator);
simpleEstimator.setEstimateRangeEnd(20);
Progress currentProcess = tracker.getProgress();
assertEquals(0, currentProcess.getWorkCompleted(), 0.001);
assertEquals(10, currentProcess.getWorkRemaining(), 0.001);
simpleEstimator.setEstimateRangeEnd(15);
currentProcess = tracker.getProgress();
assertEquals(0, currentProcess.getWorkCompleted(), 0.001);
assertEquals(5, currentProcess.getWorkRemaining(), 0.001);
simpleEstimator.setEstimateRangeEnd(5);
currentProcess = tracker.getProgress();
assertEquals(0, currentProcess.getWorkCompleted(), 0.001);
assertEquals(0, currentProcess.getWorkRemaining(), 0.001);
}
|
@Override
public void accept(ServerWebExchange exchange, CachedResponse cachedResponse) {
ServerHttpResponse response = exchange.getResponse();
long calculatedMaxAgeInSeconds = calculateMaxAgeInSeconds(exchange.getRequest(), cachedResponse,
configuredTimeToLive);
rewriteCacheControlMaxAge(response.getHeaders(), calculatedMaxAgeInSeconds);
}
|
@Test
void maxAgeIsDecreasedByTimePassed_whenFilterIsAppliedAfterSecondsLater() {
Duration timeToLive = Duration.ofSeconds(30);
CachedResponse inputCachedResponse = CachedResponse.create(HttpStatus.OK).timestamp(clock.instant()).build();
SetMaxAgeHeaderAfterCacheExchangeMutator toTest = new SetMaxAgeHeaderAfterCacheExchangeMutator(timeToLive,
clock, false);
toTest.accept(inputExchange, inputCachedResponse);
Optional<Long> firstMaxAgeSeconds = parseMaxAge(inputExchange.getResponse());
SetMaxAgeHeaderAfterCacheExchangeMutator toTestSecondsLater = new SetMaxAgeHeaderAfterCacheExchangeMutator(
timeToLive, clockSecondsLater, false);
toTestSecondsLater.accept(inputExchange, inputCachedResponse);
Optional<Long> secondMaxAgeSeconds = parseMaxAge(inputExchange.getResponse());
assertThat(firstMaxAgeSeconds).contains(timeToLive.getSeconds());
assertThat(secondMaxAgeSeconds).contains(timeToLive.getSeconds() - SECONDS_LATER);
}
|
public Configuration getConf() {
return conf;
}
|
@Test
public void testConfiguration() throws Exception {
Configuration conf = new Configuration(false);
conf.set("a", "A");
MyKeyProvider kp = new MyKeyProvider(conf);
Assert.assertEquals("A", kp.getConf().get("a"));
}
|
public void alterResource(AlterResourceStmt stmt) throws DdlException {
this.writeLock();
try {
// check if the target resource exists .
String name = stmt.getResourceName();
Resource resource = this.getResource(name);
if (resource == null) {
throw new DdlException("Resource(" + name + ") does not exist");
}
// 1. alter the resource properties
// 2. clear the cache
// 3. update the edit log
if (resource instanceof HiveResource) {
((HiveResource) resource).alterProperties(stmt.getProperties());
} else if (resource instanceof HudiResource) {
((HudiResource) resource).alterProperties(stmt.getProperties());
} else if (resource instanceof IcebergResource) {
((IcebergResource) resource).alterProperties(stmt.getProperties());
} else {
throw new DdlException("Alter resource statement only support external hive/hudi/iceberg now");
}
if (resource.needMappingCatalog()) {
String type = resource.getType().name().toLowerCase(Locale.ROOT);
String catalogName = getResourceMappingCatalogName(resource.getName(), type);
DropCatalogStmt dropCatalogStmt = new DropCatalogStmt(catalogName);
GlobalStateMgr.getCurrentState().getCatalogMgr().dropCatalog(dropCatalogStmt);
Map<String, String> properties = Maps.newHashMap(stmt.getProperties());
properties.put("type", type);
String uriInProperties = stmt.getProperties().get(HIVE_METASTORE_URIS);
String uris = uriInProperties == null ? resource.getHiveMetastoreURIs() : uriInProperties;
properties.put(HIVE_METASTORE_URIS, uris);
GlobalStateMgr.getCurrentState().getCatalogMgr().createCatalog(type, catalogName, "mapping catalog", properties);
}
GlobalStateMgr.getCurrentState().getEditLog().logCreateResource(resource);
} finally {
this.writeUnLock();
}
}
|
@Test(expected = DdlException.class)
public void testAlterResourcePropertyNotExist(@Injectable EditLog editLog, @Mocked GlobalStateMgr globalStateMgr)
throws UserException {
ResourceMgr mgr = new ResourceMgr();
// add hive resource
name = "hive0";
type = "hive";
addHiveResource(mgr, editLog, globalStateMgr);
// alter hive resource
Map<String, String> properties = new HashMap<>();
properties.put("hive.metastore.uris.xxx", "thrift://10.10.44.xxx:9083");
AlterResourceStmt stmt = new AlterResourceStmt(name, properties);
com.starrocks.sql.analyzer.Analyzer.analyze(stmt, connectContext);
mgr.alterResource(stmt);
}
|
@Override
public byte readByte(@Nonnull String fieldName) throws IOException {
FieldDefinition fd = cd.getField(fieldName);
if (fd == null) {
return 0;
}
validateTypeCompatibility(fd, BYTE);
return super.readByte(fieldName);
}
|
@Test
public void testReadByte() throws Exception {
byte aByte = reader.readByte("byte");
assertEquals(1, aByte);
assertEquals(0, reader.readByte("NO SUCH FIELD"));
}
|
public static KieSession loadKieSession(ModelLocalUriId modelLocalUriId, EfestoRuntimeContext context) {
logger.debug("loadKieSession {} {}", modelLocalUriId, context);
Optional<GeneratedExecutableResource> generatedExecutableResourceOpt =
GeneratedResourceUtils.getGeneratedExecutableResource(modelLocalUriId,
context.getGeneratedResourcesMap());
GeneratedExecutableResource finalResource = generatedExecutableResourceOpt.orElseThrow(
() -> new KieRuntimeServiceException("Can not find expected GeneratedExecutableResource for " + modelLocalUriId));
List<Model> models = new ArrayList<>();
for (String className : finalResource.getFullClassNames()) {
models.add(loadModel(className, context));
}
logger.debug("models {}", models);
KieBase kieBase = KieBaseBuilder.createKieBaseFromModel(models);
logger.debug("kieBase {}", kieBase);
try {
KieSession toReturn = kieBase.newKieSession();
logger.debug("toReturn {}", toReturn);
return toReturn;
} catch (Exception e) {
String errorMessage = String.format("Failed to create new session from %s due to %s", kieBase,
e.getMessage());
logger.error(errorMessage, e);
throw new KieRuntimeServiceException(errorMessage, e);
}
}
|
@Disabled("DROOLS-7090 : In this test, there is no RuntimeService for drl so this cannot find IndexFile.drl_json")
@Test
void loadKieSession() {
EfestoRuntimeContext context = EfestoRuntimeContextUtils.buildWithParentClassLoader(Thread.currentThread().getContextClassLoader());
ModelLocalUriId localUri = new ModelLocalUriId(LocalUri.parse("/drl" + basePath));
KieSession retrieved = EfestoKieSessionUtil.loadKieSession(localUri, context);
assertThat(retrieved).isNotNull();
assertThat(retrieved.getIdentifier()).isZero();
}
|
@Override
public Optional<SimpleAddress> selectAddress(Optional<String> addressSelectionContext)
{
if (addressSelectionContext.isPresent()) {
return addressSelectionContext
.map(HostAndPort::fromString)
.map(SimpleAddress::new);
}
List<HostAndPort> catalogServers = internalNodeManager.getCatalogServers().stream()
.filter(node -> node.getThriftPort().isPresent())
.map(catalogServerNode -> {
HostAddress hostAndPort = catalogServerNode.getHostAndPort();
return HostAndPort.fromParts(hostAndPort.getHostText(), catalogServerNode.getThriftPort().getAsInt());
})
.collect(toImmutableList());
return hostSelector.apply(catalogServers).map(SimpleAddress::new);
}
|
@Test
public void testAddressSelectionNoContext()
{
InMemoryNodeManager internalNodeManager = new InMemoryNodeManager();
RandomCatalogServerAddressSelector selector = new RandomCatalogServerAddressSelector(internalNodeManager, hostAndPorts -> Optional.of(hostAndPorts.get(0)));
internalNodeManager.addNode(
CONNECTOR_ID,
new InternalNode(
"1",
URI.create("local://localhost:123/1"),
OptionalInt.empty(),
"1",
false,
false,
true,
false));
internalNodeManager.addNode(
CONNECTOR_ID,
new InternalNode(
"2",
URI.create("local://localhost:456/1"),
OptionalInt.of(2),
"1",
false,
false,
true,
false));
internalNodeManager.addNode(
CONNECTOR_ID,
new InternalNode(
"3",
URI.create("local://localhost:789/2"),
OptionalInt.of(3),
"1",
false,
false,
true,
false));
Optional<SimpleAddressSelector.SimpleAddress> address = selector.selectAddress(Optional.empty());
assertTrue(address.isPresent());
assertEquals(address.get().getHostAndPort(), HostAndPort.fromParts("localhost", 2));
}
|
public ScanResults run(ScanTarget scanTarget) throws ExecutionException, InterruptedException {
return runAsync(scanTarget).get();
}
|
@Test
public void run_whenServiceFingerprinterSucceeded_fillsReconnaissanceReportWithFingerprintResult()
throws ExecutionException, InterruptedException {
Injector injector =
Guice.createInjector(
new FakeUtcClockModule(),
new FakePluginExecutionModule(),
new FakePortScannerBootstrapModule(),
new FakeServiceFingerprinterBootstrapModule(),
new FakeVulnDetectorBootstrapModule(),
new FakeVulnDetectorBootstrapModule2());
scanningWorkflow = injector.getInstance(DefaultScanningWorkflow.class);
ScanResults scanResults = scanningWorkflow.run(buildScanTarget());
assertThat(scanResults.getScanStatus()).isEqualTo(ScanStatus.SUCCEEDED);
assertThat(scanResults.getReconnaissanceReport().getNetworkServicesList())
.containsExactly(
FakeServiceFingerprinter.addWebServiceContext(
FakePortScanner.getFakeNetworkService(buildScanTarget().getNetworkEndpoint())));
}
|
public abstract boolean compare(A actual, E expected);
|
@Test
public void testFormattingDiffsUsing_compare() {
// The compare behaviour should be the same as the wrapped correspondence.
assertThat(LENGTHS_WITH_DIFF.compare("foo", 3)).isTrue();
assertThat(LENGTHS_WITH_DIFF.compare("foot", 4)).isTrue();
assertThat(LENGTHS_WITH_DIFF.compare("foo", 4)).isFalse();
}
|
@Override
public Boolean update(List<ModifyRequest> modifyRequests, BiConsumer<Boolean, Throwable> consumer) {
return update(transactionTemplate, jdbcTemplate, modifyRequests, consumer);
}
|
@Test
void testUpdate2() {
List<ModifyRequest> modifyRequests = new ArrayList<>();
ModifyRequest modifyRequest1 = new ModifyRequest();
String sql = "UPDATE config_info SET data_id = 'test' WHERE id = ?;";
modifyRequest1.setSql(sql);
Object[] args = new Object[] {1};
modifyRequest1.setArgs(args);
modifyRequests.add(modifyRequest1);
when(transactionTemplate.execute(any(TransactionCallback.class))).thenReturn(true);
assertTrue(operate.update(modifyRequests, biConsumer));
}
|
static KiePMMLDerivedField getKiePMMLDerivedField(final DerivedField derivedField,
final List<Field<?>> fields) {
DataType dataType = derivedField.getDataType() != null ? derivedField.getDataType() : getDataType(fields,derivedField.getName());
OP_TYPE opType = derivedField.getOpType() != null ? OP_TYPE.byName(derivedField.getOpType().value()) : null;
return KiePMMLDerivedField.builder(derivedField.getName(),
getKiePMMLExtensions(derivedField.getExtensions()),
DATA_TYPE.byName(dataType.value()),
opType,
getKiePMMLExpression(derivedField.getExpression()))
.withDisplayName(derivedField.getDisplayName())
.build();
}
|
@Test
void getKiePMMLDerivedField() {
final String fieldName = "fieldName";
final DerivedField toConvert = getDerivedField(fieldName);
KiePMMLDerivedField retrieved = KiePMMLDerivedFieldInstanceFactory.getKiePMMLDerivedField(toConvert,
Collections.emptyList());
commonVerifyKiePMMLDerivedField(retrieved, toConvert);
}
|
@VisibleForTesting
ExportResult<MusicContainerResource> exportPlaylistItems(
TokensAndUrlAuthData authData,
IdOnlyContainerResource playlistData,
Optional<PaginationData> paginationData, UUID jobId)
throws IOException, InvalidTokenException, PermissionDeniedException, ParseException {
String playlistId = playlistData.getId();
Optional<String> paginationToken =
paginationData.map((PaginationData value) -> ((StringPaginationToken) value).getToken());
PlaylistItemExportResponse playlistItemExportResponse =
getOrCreateMusicHttpApi(authData).exportPlaylistItems(playlistId, paginationToken);
PaginationData nextPageData = null;
if (!Strings.isNullOrEmpty(playlistItemExportResponse.getNextPageToken())) {
nextPageData = new StringPaginationToken(playlistItemExportResponse.getNextPageToken());
}
ContinuationData continuationData = new ContinuationData(nextPageData);
MusicContainerResource containerResource = null;
GooglePlaylistItem[] googlePlaylistItems = playlistItemExportResponse.getPlaylistItems();
List<MusicPlaylistItem> playlistItems = new ArrayList<>();
if (googlePlaylistItems != null && googlePlaylistItems.length > 0) {
for (GooglePlaylistItem googlePlaylistItem : googlePlaylistItems) {
playlistItems.add(convertPlaylistItem(playlistId, googlePlaylistItem));
monitor.debug(
() ->
String.format(
"%s: Google Music exporting playlist item in %s : [track title: %s, track isrc: %s]",
jobId, playlistId,
googlePlaylistItem.getTrack().getTitle(),
googlePlaylistItem.getTrack().getIsrc()));
}
containerResource = new MusicContainerResource(null, playlistItems, null, null);
}
return new ExportResult<>(ResultType.CONTINUE, containerResource, continuationData);
}
|
@Test
public void exportPlaylistItemFirstSet()
throws IOException, InvalidTokenException, PermissionDeniedException, ParseException {
GooglePlaylistItem playlistItem = setUpSinglePlaylistItem("t1_isrc", "r1_icpn");
when(playlistItemExportResponse.getPlaylistItems())
.thenReturn(new GooglePlaylistItem[]{playlistItem});
when(playlistItemExportResponse.getNextPageToken()).thenReturn(PLAYLIST_ITEM_TOKEN);
IdOnlyContainerResource idOnlyContainerResource = new IdOnlyContainerResource("p1_id");
ExportResult<MusicContainerResource> result =
googleMusicExporter.exportPlaylistItems(null, idOnlyContainerResource, Optional.empty(),
uuid);
// Check results
// Verify correct methods were called
verify(musicHttpApi).exportPlaylistItems("p1_id", Optional.empty());
verify(playlistItemExportResponse).getPlaylistItems();
// Check pagination
ContinuationData continuationData = result.getContinuationData();
StringPaginationToken paginationToken =
(StringPaginationToken) continuationData.getPaginationData();
assertThat(paginationToken.getToken()).isEqualTo(PLAYLIST_ITEM_TOKEN);
// Check playlist field of container (should be empty)
Collection<MusicPlaylist> actualPlaylists = result.getExportedData().getPlaylists();
assertThat(actualPlaylists).isEmpty();
// Check playlistItems field of container
List<MusicPlaylistItem> actualPlaylistItems = result.getExportedData().getPlaylistItems();
assertThat(
actualPlaylistItems.stream()
.map(MusicPlaylistItem::getPlaylistId)
.collect(Collectors.toList()))
.containsExactly("p1_id"); // for download
assertThat(
actualPlaylistItems.stream()
.map(MusicPlaylistItem::getTrack)
.collect(Collectors.toList()))
.containsExactly(
new MusicRecording("t1_isrc", null, 0L, new MusicRelease("r1_icpn", null, null), null,
true));
}
|
static void processResources(KieCompilerService kieCompilerService, EfestoResource toProcess, EfestoCompilationContext context) {
List<EfestoCompilationOutput> efestoCompilationOutputList = kieCompilerService.processResource(toProcess, context);
for (EfestoCompilationOutput compilationOutput : efestoCompilationOutputList) {
if (compilationOutput instanceof EfestoCallableOutput) {
populateContext(context, (EfestoCallableOutput) compilationOutput);
if (compilationOutput instanceof EfestoCallableOutputClassesContainer) {
EfestoCallableOutputClassesContainer classesContainer =
(EfestoCallableOutputClassesContainer) compilationOutput;
context.loadClasses(classesContainer.getCompiledClassesMap());
context.addGeneratedClasses(classesContainer.getModelLocalUriId().asModelLocalUriId(),
classesContainer.getCompiledClassesMap());
}
} else if (compilationOutput instanceof EfestoResource) {
processResourceWithContext((EfestoResource) compilationOutput, context);
}
}
}
|
@Test
void processResourcesWithRedirect() {
KieCompilerService kieCompilerServiceMock = mock(MockKieCompilerServiceF.class);
EfestoResource toProcess = new MockEfestoInputF();
EfestoCompilationContext context =
EfestoCompilationContextUtils.buildWithParentClassLoader(Thread.currentThread().getContextClassLoader());
CompilationManagerUtils.processResources(kieCompilerServiceMock, toProcess, context);
verify(kieCompilerServiceMock, times(1)).processResource(toProcess, context);
}
|
@Override
@Transactional(rollbackFor = Exception.class) // 添加事务,异常则回滚所有导入
public UserImportRespVO importUserList(List<UserImportExcelVO> importUsers, boolean isUpdateSupport) {
// 1.1 参数校验
if (CollUtil.isEmpty(importUsers)) {
throw exception(USER_IMPORT_LIST_IS_EMPTY);
}
// 1.2 初始化密码不能为空
String initPassword = configApi.getConfigValueByKey(USER_INIT_PASSWORD_KEY);
if (StrUtil.isEmpty(initPassword)) {
throw exception(USER_IMPORT_INIT_PASSWORD);
}
// 2. 遍历,逐个创建 or 更新
UserImportRespVO respVO = UserImportRespVO.builder().createUsernames(new ArrayList<>())
.updateUsernames(new ArrayList<>()).failureUsernames(new LinkedHashMap<>()).build();
importUsers.forEach(importUser -> {
// 2.1.1 校验字段是否符合要求
try {
ValidationUtils.validate(BeanUtils.toBean(importUser, UserSaveReqVO.class).setPassword(initPassword));
} catch (ConstraintViolationException ex){
respVO.getFailureUsernames().put(importUser.getUsername(), ex.getMessage());
return;
}
// 2.1.2 校验,判断是否有不符合的原因
try {
validateUserForCreateOrUpdate(null, null, importUser.getMobile(), importUser.getEmail(),
importUser.getDeptId(), null);
} catch (ServiceException ex) {
respVO.getFailureUsernames().put(importUser.getUsername(), ex.getMessage());
return;
}
// 2.2.1 判断如果不存在,在进行插入
AdminUserDO existUser = userMapper.selectByUsername(importUser.getUsername());
if (existUser == null) {
userMapper.insert(BeanUtils.toBean(importUser, AdminUserDO.class)
.setPassword(encodePassword(initPassword)).setPostIds(new HashSet<>())); // 设置默认密码及空岗位编号数组
respVO.getCreateUsernames().add(importUser.getUsername());
return;
}
// 2.2.2 如果存在,判断是否允许更新
if (!isUpdateSupport) {
respVO.getFailureUsernames().put(importUser.getUsername(), USER_USERNAME_EXISTS.getMsg());
return;
}
AdminUserDO updateUser = BeanUtils.toBean(importUser, AdminUserDO.class);
updateUser.setId(existUser.getId());
userMapper.updateById(updateUser);
respVO.getUpdateUsernames().add(importUser.getUsername());
});
return respVO;
}
|
@Test
public void testImportUserList_02() {
// 准备参数
UserImportExcelVO importUser = randomPojo(UserImportExcelVO.class, o -> {
o.setStatus(randomEle(CommonStatusEnum.values()).getStatus()); // 保证 status 的范围
o.setSex(randomEle(SexEnum.values()).getSex()); // 保证 sex 的范围
o.setEmail(randomEmail());
o.setMobile(randomMobile());
});
// mock deptService 的方法
DeptDO dept = randomPojo(DeptDO.class, o -> {
o.setId(importUser.getDeptId());
o.setStatus(CommonStatusEnum.ENABLE.getStatus());
});
when(deptService.getDept(eq(dept.getId()))).thenReturn(dept);
// mock passwordEncoder 的方法
when(passwordEncoder.encode(eq("yudaoyuanma"))).thenReturn("java");
// 调用
UserImportRespVO respVO = userService.importUserList(newArrayList(importUser), true);
// 断言
assertEquals(1, respVO.getCreateUsernames().size());
AdminUserDO user = userMapper.selectByUsername(respVO.getCreateUsernames().get(0));
assertPojoEquals(importUser, user);
assertEquals("java", user.getPassword());
assertEquals(0, respVO.getUpdateUsernames().size());
assertEquals(0, respVO.getFailureUsernames().size());
}
|
public Schema addToSchema(Schema schema) {
validate(schema);
schema.addProp(LOGICAL_TYPE_PROP, name);
schema.setLogicalType(this);
return schema;
}
|
@Test
void bytesDecimalToFromJson() {
Schema schema = Schema.create(Schema.Type.BYTES);
LogicalTypes.decimal(9, 2).addToSchema(schema);
Schema parsed = new Schema.Parser().parse(schema.toString(true));
assertEquals(schema, parsed, "Constructed and parsed schemas should match");
}
|
public String getValue() {
return value;
}
|
@Test
public void shouldHandleDoubleValueAsAString() throws Exception {
final ConfigurationValue configurationValue = new ConfigurationValue(3.1428571429D);
assertThat(configurationValue.getValue(), is("3.1428571429"));
}
|
public synchronized Value get(String key) throws IOException {
checkNotClosed();
Entry entry = lruEntries.get(key);
if (entry == null) {
return null;
}
if (!entry.readable) {
return null;
}
for (File file : entry.cleanFiles) {
// A file must have been deleted manually!
if (!file.exists()) {
return null;
}
}
redundantOpCount++;
journalWriter.append(READ);
journalWriter.append(' ');
journalWriter.append(key);
journalWriter.append('\n');
if (journalRebuildRequired()) {
executorService.submit(cleanupCallable);
}
return new Value(key, entry.sequenceNumber, entry.cleanFiles, entry.lengths);
}
|
@Test public void aggressiveClearingHandlesRead() throws Exception {
deleteDirectory(cacheDir);
assertThat(cache.get("a")).isNull();
}
|
public List<CreateIdStatus<K>> getElements()
{
return _collection;
}
|
@Test(dataProvider = "provideKeys")
public <K> void testCreate(K[] keys)
{
ProtocolVersion version = AllProtocolVersions.BASELINE_PROTOCOL_VERSION;
List<CreateIdStatus<K>> elements = new ArrayList<>();
elements.add(new CreateIdStatus<>(HttpStatus.S_201_CREATED.getCode(), keys[0], null, version));
elements.add(new CreateIdStatus<>(HttpStatus.S_201_CREATED.getCode(), keys[1], null, version));
ErrorResponse error = new ErrorResponse().setMessage("3");
elements.add(new CreateIdStatus<>(HttpStatus.S_500_INTERNAL_SERVER_ERROR.getCode(), keys[2], error, version));
BatchCreateIdResponse<K> batchResp = new BatchCreateIdResponse<>(elements);
Assert.assertEquals(batchResp.getElements(), elements);
}
|
public static String replaceAll(CharSequence content, String regex, String replacementTemplate) {
final Pattern pattern = Pattern.compile(regex, Pattern.DOTALL);
return replaceAll(content, pattern, replacementTemplate);
}
|
@Test
public void issuesI5TQDRTest(){
final Pattern patternIp = Pattern.compile("((2(5[0-5]|[0-4]\\d))|[0-1]?\\d{1,2})\\.((2(5[0-5]|[0-4]\\d))|[0-1]?\\d{1,2})\\.((2(5[0-5]|[0-4]\\d))"
+ "|[0-1]?\\d{1,2})\\.((2(5[0-5]|[0-4]\\d))|[0-1]?\\d{1,2})");
final String s = ReUtil.replaceAll("1.2.3.4", patternIp, "$1.**.**.$10");
assertEquals("1.**.**.4", s);
}
|
public synchronized void put(Integer e) {
LOGGER.info("putting");
sourceList.add(e);
LOGGER.info("notifying");
notify();
}
|
@Test
void testPut() {
var g = new GuardedQueue();
g.put(12);
assertEquals(Integer.valueOf(12), g.get());
}
|
public static String elasticsearchMessage(String indexName, String messageId) {
checkArgument("indexName", indexName);
checkArgument("messageId", messageId);
return String.join(":", ES_MESSAGE, indexName, messageId);
}
|
@Test
public void elasticsearchMessage() {
assertThat(EventOriginContext.elasticsearchMessage("graylog_0", "b5e53442-12bb-4374-90ed-c325c0d979ce"))
.isEqualTo("urn:graylog:message:es:graylog_0:b5e53442-12bb-4374-90ed-c325c0d979ce");
assertThatCode(() -> EventOriginContext.elasticsearchMessage("", "b5e53442-12bb-4374-90ed-c325c0d979ce"))
.hasMessageContaining("indexName")
.isInstanceOf(IllegalArgumentException.class);
assertThatCode(() -> EventOriginContext.elasticsearchMessage(null, "b5e53442-12bb-4374-90ed-c325c0d979ce"))
.hasMessageContaining("indexName")
.isInstanceOf(IllegalArgumentException.class);
assertThatCode(() -> EventOriginContext.elasticsearchMessage("graylog_0", ""))
.hasMessageContaining("messageId")
.isInstanceOf(IllegalArgumentException.class);
assertThatCode(() -> EventOriginContext.elasticsearchMessage("graylog_0", null))
.hasMessageContaining("messageId")
.isInstanceOf(IllegalArgumentException.class);
}
|
@Override
public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) {
if (handler instanceof HandlerMethod handlerMethod) {
preHandle(handlerMethod, request);
} else {
LOGGER.debug("Handler is not a HandlerMethod, skipping deprecated check.");
}
return true;
}
|
@Test
public void preHandle_whenNotHandlerMethod_shouldLogDebugMessage() {
DeprecatedHandler handler = new DeprecatedHandler(userSession);
boolean result = handler.preHandle(mock(HttpServletRequest.class), mock(HttpServletResponse.class), "Not handler");
assertThat(result).isTrue();
assertThat(logTester.logs(Level.DEBUG)).contains("Handler is not a HandlerMethod, skipping deprecated check.");
}
|
@VisibleForTesting
protected static PrivateKey loadPrivateKey(File file, String password) throws IOException, GeneralSecurityException {
try (final InputStream is = Files.newInputStream(file.toPath())) {
final byte[] keyBytes = ByteStreams.toByteArray(is);
final String keyString = new String(keyBytes, StandardCharsets.US_ASCII);
final Matcher m = KEY_PATTERN.matcher(keyString);
byte[] encoded = keyBytes;
if (m.matches()) {
if (!Strings.isNullOrEmpty(m.group(1))) {
throw new IllegalArgumentException("Unsupported key type PKCS#1, please convert to PKCS#8");
}
encoded = BaseEncoding.base64().decode(m.group(3).replaceAll("[\\r\\n]", ""));
}
final EncodedKeySpec keySpec = createKeySpec(encoded, password);
if (keySpec == null) {
throw new IllegalArgumentException("Unsupported key type: " + file);
}
final String[] keyAlgorithms = {"RSA", "DSA", "EC"};
for (String keyAlgorithm : keyAlgorithms) {
try {
@SuppressWarnings("InsecureCryptoUsage") final KeyFactory keyFactory = KeyFactory.getInstance(keyAlgorithm);
return keyFactory.generatePrivate(keySpec);
} catch (InvalidKeySpecException e) {
LOG.debug("Loading {} private key from \"{}\" failed", keyAlgorithm, file, e);
}
}
throw new IllegalArgumentException("Unsupported key type: " + file);
}
}
|
@Test
public void testLoadPrivateKey() throws Exception {
if (exceptionClass != null) {
expectedException.expect(exceptionClass);
expectedException.expectMessage(exceptionMessage);
}
final File keyFile = resourceToFile(keyFileName);
final PrivateKey privateKey = KeyUtil.loadPrivateKey(keyFile, keyPassword);
assertThat(privateKey).isNotNull();
}
|
public void setSelectLength( int[] selectLength ) {
if ( selectLength.length > selectFields.length ) {
resizeSelectFields( selectLength.length );
}
for ( int i = 0; i < selectFields.length; i++ ) {
if ( i < selectLength.length ) {
selectFields[i].setLength( selectLength[i] );
} else {
selectFields[i].setLength( UNDEFINED );
}
}
}
|
@Test
public void setSelectLength() {
selectValuesMeta.setSelectLength( new int[] { 1, 2 } );
assertArrayEquals( new int[] { 1, 2 }, selectValuesMeta.getSelectLength() );
}
|
State getState() {
return state;
}
|
@Test
public void verify_failed_restart_resulting_in_hard_stop_cycle() {
assertThat(underTest.getState()).isEqualTo(INIT);
verifyMoveTo(STARTING);
verifyMoveTo(OPERATIONAL);
verifyMoveTo(RESTARTING);
verifyMoveTo(HARD_STOPPING);
verifyMoveTo(FINALIZE_STOPPING);
verifyMoveTo(STOPPED);
}
|
@Override
public Map<String, String> evaluate(FunctionArgs args, EvaluationContext context) {
final String value = valueParam.required(args, context);
if (Strings.isNullOrEmpty(value)) {
return Collections.emptyMap();
}
final CharMatcher kvPairsMatcher = splitParam.optional(args, context).orElse(CharMatcher.whitespace());
final CharMatcher kvDelimMatcher = valueSplitParam.optional(args, context).orElse(CharMatcher.anyOf("="));
Splitter outerSplitter = Splitter.on(DelimiterCharMatcher.withQuoteHandling(kvPairsMatcher))
.omitEmptyStrings()
.trimResults();
final Splitter entrySplitter = Splitter.on(kvDelimMatcher)
.omitEmptyStrings()
.limit(2)
.trimResults();
return new MapSplitter(outerSplitter,
entrySplitter,
ignoreEmptyValuesParam.optional(args, context).orElse(true),
trimCharactersParam.optional(args, context).orElse(CharMatcher.none()),
trimValueCharactersParam.optional(args, context).orElse(CharMatcher.none()),
allowDupeKeysParam.optional(args, context).orElse(true),
duplicateHandlingParam.optional(args, context).orElse(TAKE_FIRST))
.split(value);
}
|
@Test
void testDefaultSettingsTakeFirst() {
final Map<String, Expression> arguments = Collections.singletonMap("value", valueExpression);
Map<String, String> result = classUnderTest.evaluate(new FunctionArgs(classUnderTest, arguments), evaluationContext);
Map<String, String> expectedResult = new HashMap<>();
expectedResult.put("test", "do");
expectedResult.put("number", "12345");
assertThat(result).containsExactlyInAnyOrderEntriesOf(expectedResult);
}
|
@Override
public int hashCode() {
return Objects.hash(
threadName,
threadState,
activeTasks,
standbyTasks,
mainConsumerClientId,
restoreConsumerClientId,
producerClientIds,
adminClientId);
}
|
@Test
public void shouldNotBeEqualIfDifferInConsumerClientId() {
final ThreadMetadata differRestoreConsumerClientId = new ThreadMetadataImpl(
THREAD_NAME,
THREAD_STATE,
MAIN_CONSUMER_CLIENT_ID,
"different",
PRODUCER_CLIENT_IDS,
ADMIN_CLIENT_ID,
ACTIVE_TASKS,
STANDBY_TASKS
);
assertThat(threadMetadata, not(equalTo(differRestoreConsumerClientId)));
assertThat(threadMetadata.hashCode(), not(equalTo(differRestoreConsumerClientId.hashCode())));
}
|
@Override
public boolean match(Message msg, StreamRule rule) {
Double msgVal = getDouble(msg.getField(rule.getField()));
if (msgVal == null) {
return false;
}
Double ruleVal = getDouble(rule.getValue());
if (ruleVal == null) {
return false;
}
return rule.getInverted() ^ (msgVal < ruleVal);
}
|
@Test
public void testSuccessfullInvertedMatchWithEqualValues() {
StreamRule rule = getSampleRule();
rule.setValue("-9001");
rule.setInverted(true);
Message msg = getSampleMessage();
msg.addField("something", "-9001");
StreamRuleMatcher matcher = getMatcher(rule);
assertTrue(matcher.match(msg, rule));
}
|
@Operation(summary = "start an application for an app coming from a request station", tags = { SwaggerConfig.RS_ACTIVATE_WITH_APP }, operationId = "request_station_session",
parameters = {@Parameter(ref = "API-V"), @Parameter(ref = "OS-T"), @Parameter(ref = "APP-V"), @Parameter(ref = "OS-V"), @Parameter(ref = "REL-T")})
@PostMapping(value = "request_station/request_session", produces = "application/json")
@ResponseBody
public AppResponse startRequestStationAppApplication(@Valid @RequestBody RsStartAppApplicationRequest request) throws FlowNotDefinedException, NoSuchAlgorithmException, FlowStateNotDefinedException, IOException, SharedServiceClientException {
return service.startFlow(ApplyForAppAtRequestStationFlow.NAME, Action.RS_START_APP_APPLICATION, request);
}
|
@Test
void validateIfCorrectProcessesAreCalledRequestStationRequestSession() throws FlowNotDefinedException, NoSuchAlgorithmException, IOException, FlowStateNotDefinedException, SharedServiceClientException {
RsStartAppApplicationRequest request = new RsStartAppApplicationRequest();
activationController.startRequestStationAppApplication(request);
verify(flowService, times(1)).startFlow(anyString(), any(Action.class), any(RsStartAppApplicationRequest.class));
}
|
public Project getProject(String gitlabUrl, String pat, Long gitlabProjectId) {
String url = format("%s/projects/%s", gitlabUrl, gitlabProjectId);
LOG.debug("get project : [{}]", url);
Request request = new Request.Builder()
.addHeader(PRIVATE_TOKEN, pat)
.get()
.url(url)
.build();
try (Response response = client.newCall(request).execute()) {
checkResponseIsSuccessful(response);
String body = response.body().string();
LOG.trace("loading project payload result : [{}]", body);
return new GsonBuilder().create().fromJson(body, Project.class);
} catch (JsonSyntaxException e) {
throw new IllegalArgumentException("Could not parse GitLab answer to retrieve a project. Got a non-json payload as result.");
} catch (IOException e) {
logException(url, e);
throw new IllegalStateException(e.getMessage(), e);
}
}
|
@Test
public void get_project_details() throws InterruptedException {
MockResponse projectResponse = new MockResponse()
.setResponseCode(200)
.setBody("""
{\
"id": 1234,\
"name": "SonarQube example 2",\
"name_with_namespace": "SonarSource / SonarQube / SonarQube example 2",\
"path": "sonarqube-example-2",\
"path_with_namespace": "sonarsource/sonarqube/sonarqube-example-2",\
"web_url": "https://example.gitlab.com/sonarsource/sonarqube/sonarqube-example-2"\
}""");
server.enqueue(projectResponse);
Project project = underTest.getProject(gitlabUrl, "pat", 1234L);
RecordedRequest projectGitlabRequest = server.takeRequest(10, TimeUnit.SECONDS);
String gitlabUrlCall = projectGitlabRequest.getRequestUrl().toString();
assertThat(project).isNotNull();
assertThat(gitlabUrlCall).isEqualTo(
server.url("") + "projects/1234");
assertThat(projectGitlabRequest.getMethod()).isEqualTo("GET");
}
|
@Override
public void streamRequest(final StreamRequest request, final Callback<StreamResponse> callback)
{
streamRequest(request, new RequestContext(), callback);
}
|
@Test
public void testStreamWithFailout() throws URISyntaxException, InterruptedException {
setupRedirectStrategy(true);
sendAndVerifyStreamRequest();
ArgumentCaptor<StreamRequest> requestArgumentCaptor = ArgumentCaptor.forClass(StreamRequest.class);
verify(_d2Client, times(1)).streamRequest(requestArgumentCaptor.capture(), any(), any());
assertEquals(requestArgumentCaptor.getValue().getURI().toString(), REDIRECTED_URI);
}
|
@Override
public String selectForUpdateSkipLocked() {
return supportsSelectForUpdateSkipLocked ? " FOR UPDATE SKIP LOCKED" : "";
}
|
@Test
void mySQL8DoesNotSupportSelectForUpdateSkipLocked() {
assertThat(new MySqlDialect("MySQL", "8.0.0").selectForUpdateSkipLocked()).isEmpty();
}
|
@Override
public Object parse(final String property, final Object value) {
if (property.equalsIgnoreCase(KsqlConstants.LEGACY_RUN_SCRIPT_STATEMENTS_CONTENT)) {
validator.validate(property, value);
return value;
}
final ConfigItem configItem = resolver.resolve(property, true)
.orElseThrow(() -> new PropertyNotFoundException(property));
final Object parsedValue = configItem.parseValue(value);
validator.validate(configItem.getPropertyName(), parsedValue);
return parsedValue;
}
|
@Test
public void shouldCallResolverForOtherProperties() {
// When:
parser.parse(KsqlConfig.KSQL_SERVICE_ID_CONFIG, "100");
// Then:
verify(resolver).resolve(KsqlConfig.KSQL_SERVICE_ID_CONFIG, true);
}
|
@Override
public Iterator<IndexKeyEntries> getSqlRecordIteratorBatch(@Nonnull Comparable value, boolean descending) {
return getSqlRecordIteratorBatch(value, descending, null);
}
|
@Test
public void getSqlRecordIteratorBatchLeftIncludedRightExcludedDescending() {
var expectedKeyOrder = List.of(6, 3, 0);
var result = store.getSqlRecordIteratorBatch(0, true, 1, false, true);
assertResult(expectedKeyOrder, result);
}
|
public static boolean validateAssuranceLevel(int assuranceLevel) {
return numberMap.entrySet()
.stream()
.filter(entry -> Objects.equals(entry.getValue(), assuranceLevel))
.findFirst()
.map(Map.Entry::getKey)
.isPresent();
}
|
@Test
void unknownAssuranceLevel() {
boolean result = LevelOfAssurance.validateAssuranceLevel(40);
assertFalse(result);
}
|
public static String toAbsolute(String baseURL, String relativeURL) {
String relURL = relativeURL;
// Relative to protocol
if (relURL.startsWith("//")) {
return StringUtils.substringBefore(baseURL, "//") + "//"
+ StringUtils.substringAfter(relURL, "//");
}
// Relative to domain name
if (relURL.startsWith("/")) {
return getRoot(baseURL) + relURL;
}
// Relative to full full page URL minus ? or #
if (relURL.startsWith("?") || relURL.startsWith("#")) {
// this is a relative url and should have the full page base
return baseURL.replaceFirst("(.*?)([\\?\\#])(.*)", "$1") + relURL;
}
// Relative to last directory/segment
if (!relURL.contains("://")) {
String base = baseURL.replaceFirst("(.*?)([\\?\\#])(.*)", "$1");
if (StringUtils.countMatches(base, '/') > 2) {
base = base.replaceFirst("(.*/)(.*)", "$1");
}
if (base.endsWith("/")) {
// This is a URL relative to the last URL segment
relURL = base + relURL;
} else {
relURL = base + "/" + relURL;
}
}
// Not detected as relative, so return as is
return relURL;
}
|
@Test
public void testToAbsoluteRelativeToFullPageURL() {
s = "?name=john";
t = "https://www.example.com/a/b/c.html?name=john";
assertEquals(t, HttpURL.toAbsolute(absURL, s));
}
|
@Override
public GlobalTransaction beginTransaction(TransactionInfo txInfo) throws TransactionalExecutor.ExecutionException {
GlobalTransaction tx = GlobalTransactionContext.getCurrentOrCreate();
try {
triggerBeforeBegin(tx);
tx.begin(txInfo.getTimeOut(), txInfo.getName());
triggerAfterBegin(tx);
} catch (TransactionException txe) {
throw new TransactionalExecutor.ExecutionException(tx, txe, TransactionalExecutor.Code.BeginFailure);
}
return tx;
}
|
@Test
public void testBeginTransaction() {
MockedStatic<GlobalTransactionContext> enhancedServiceLoader = Mockito.mockStatic(GlobalTransactionContext.class);
enhancedServiceLoader.when(GlobalTransactionContext::getCurrentOrCreate).thenReturn(new MockGlobalTransaction());
MockedStatic<TransactionHookManager> enhancedTransactionHookManager = Mockito.mockStatic(TransactionHookManager.class);
enhancedTransactionHookManager.when(TransactionHookManager::getHooks).thenReturn(Collections.singletonList(new MockTransactionHook()));
TransactionInfo transactionInfo = new TransactionInfo();
Assertions.assertDoesNotThrow(() -> sagaTransactionalTemplate.beginTransaction(transactionInfo));
enhancedServiceLoader.close();
enhancedTransactionHookManager.close();
}
|
static void verifyFixInvalidValues(final List<KiePMMLMiningField> notTargetMiningFields,
final PMMLRequestData requestData) {
logger.debug("verifyInvalidValues {} {}", notTargetMiningFields, requestData);
final Collection<ParameterInfo> requestParams = requestData.getRequestParams();
final List<ParameterInfo> toRemove = new ArrayList<>();
notTargetMiningFields.forEach(miningField -> {
ParameterInfo parameterInfo = requestParams.stream()
.filter(paramInfo -> miningField.getName().equals(paramInfo.getName()))
.findFirst()
.orElse(null);
if (parameterInfo != null) {
boolean match = isMatching(parameterInfo, miningField);
if (!match) {
manageInvalidValues(miningField, parameterInfo, toRemove);
}
toRemove.forEach(requestData::removeRequestParam);
}
});
}
|
@Test
void verifyFixInvalidValuesInvalidAsValueWithoutReplacement() {
assertThatExceptionOfType(KiePMMLException.class).isThrownBy(() -> {
KiePMMLMiningField miningField0 = KiePMMLMiningField.builder("FIELD-0", null)
.withDataType(DATA_TYPE.STRING)
.withInvalidValueTreatmentMethod(INVALID_VALUE_TREATMENT_METHOD.AS_VALUE)
.withAllowedValues(Arrays.asList("123", "124", "125"))
.build();
KiePMMLMiningField miningField1 = KiePMMLMiningField.builder("FIELD-1", null)
.withDataType(DATA_TYPE.DOUBLE)
.withInvalidValueTreatmentMethod(INVALID_VALUE_TREATMENT_METHOD.AS_VALUE)
.withAllowedValues(Arrays.asList("1.23", "12.4", "1.25"))
.build();
List<KiePMMLInterval> intervals = Arrays.asList(new KiePMMLInterval(0.0, 12.4, CLOSURE.CLOSED_CLOSED),
new KiePMMLInterval(12.6, 14.5, CLOSURE.OPEN_CLOSED));
KiePMMLMiningField miningField2 = KiePMMLMiningField.builder("FIELD-2", null)
.withDataType(DATA_TYPE.DOUBLE)
.withInvalidValueTreatmentMethod(INVALID_VALUE_TREATMENT_METHOD.AS_VALUE)
.withIntervals(intervals)
.build();
List<KiePMMLMiningField> miningFields = Arrays.asList(miningField0, miningField1, miningField2);
PMMLRequestData pmmlRequestData = new PMMLRequestData("123", "modelName");
pmmlRequestData.addRequestParam("FIELD-0", "122");
pmmlRequestData.addRequestParam("FIELD-1", 12.5);
pmmlRequestData.addRequestParam("FIELD-2", 14.6);
PreProcess.verifyFixInvalidValues(miningFields, pmmlRequestData);
});
}
|
public static SqlType fromValue(final BigDecimal value) {
// SqlDecimal does not support negative scale:
final BigDecimal decimal = value.scale() < 0
? value.setScale(0, BigDecimal.ROUND_UNNECESSARY)
: value;
/* We can't use BigDecimal.precision() directly for all cases, since it defines
* precision differently from SQL Decimal.
* In particular, if the decimal is between -0.1 and 0.1, BigDecimal precision can be
* lower than scale, which is disallowed in SQL Decimal. For example, 0.005 in
* BigDecimal has a precision,scale of 1,3; whereas we expect 4,3.
* If the decimal is in (-1,1) but outside (-0.1,0.1), the code doesn't throw, but
* gives lower precision than expected (e.g., 0.8 has precision 1 instead of 2).
* To account for this edge case, we just take the scale and add one and use that
* for the precision instead. This works since BigDecimal defines scale as the
* number of digits to the right of the period; which is one lower than the precision for
* anything in the range (-1, 1).
* This covers the case where BigDecimal has a value of 0.
* Note: This solution differs from the SQL definition in that it returns (4, 3) for
* both "0.005" and ".005", whereas SQL expects (3, 3) for the latter. This is unavoidable
* if we use BigDecimal as an intermediate representation, since the two strings are parsed
* identically by it to have precision 1.
*/
if (decimal.compareTo(BigDecimal.ONE) < 0 && decimal.compareTo(BigDecimal.ONE.negate()) > 0) {
return SqlTypes.decimal(decimal.scale() + 1, decimal.scale());
}
return SqlTypes.decimal(decimal.precision(), Math.max(decimal.scale(), 0));
}
|
@Test
public void shouldGetSchemaFromDecimal10_5() {
// When:
final SqlType schema = DecimalUtil.fromValue(new BigDecimal("12345.12345"));
// Then:
assertThat(schema, is(SqlTypes.decimal(10, 5)));
}
|
public Span nextSpan(Message message) {
TraceContextOrSamplingFlags extracted =
extractAndClearTraceIdProperties(processorExtractor, message, message);
Span result = tracer.nextSpan(extracted); // Processor spans use the normal sampler.
// When an upstream context was not present, lookup keys are unlikely added
if (extracted.context() == null && !result.isNoop()) {
// simplify code by re-using an existing MessagingRequest impl
tagQueueOrTopic(new MessageConsumerRequest(message, destination(message)), result);
}
return result;
}
|
@Test void nextSpan_shouldnt_tag_queue_when_incoming_context() throws JMSException {
setStringProperty(message, "b3", "0000000000000001-0000000000000002-1");
message.setJMSDestination(createDestination("foo", TYPE.QUEUE));
jmsTracing.nextSpan(message).start().finish();
assertThat(testSpanHandler.takeLocalSpan().tags()).isEmpty();
}
|
private static void addSearchChain(SearchChainResolver.Builder builder,
FederationConfig.Target target,
FederationConfig.Target.SearchChain searchChain)
{
String id = target.id();
if (!id.equals(searchChain.searchChainId()))
throw new RuntimeException("Invalid federation config, " + id + " != " + searchChain.searchChainId());
ComponentId searchChainId = ComponentId.fromString(id);
builder.addSearchChain(searchChainId, federationOptions(searchChain), searchChain.documentTypes());
// Here we make synthetic SearchChain proxies for all cluster.schema combinations possible
// Given a source on the form saerchcluster.schema will rewrite it to source=searchcluster and restrict to schema.
// TODO Consider solving this in the config model by making many synthetic search clusters
for (String schema : searchChain.documentTypes()) {
String virtualChainId = id + "." + schema;
builder.addSearchChain(ComponentId.fromString(virtualChainId),
new SearchChaininvocationProxy(searchChainId, federationOptions(searchChain).setUseByDefault(false), schema));
}
}
|
@Test
void require_that_optional_search_chains_does_not_delay_federation() {
BlockingSearcher blockingSearcher = new BlockingSearcher();
FederationTester tester = new FederationTester();
tester.addSearchChain("chain1", new AddHitSearcher());
tester.addOptionalSearchChain("chain2", blockingSearcher);
Result result = tester.searchAndFill();
assertEquals(2, result.getHitCount());
assertTrue(result.hits().get(0) instanceof HitGroup);
assertTrue(result.hits().get(1) instanceof HitGroup);
HitGroup chain1Result = (HitGroup) result.hits().get(0);
HitGroup chain2Result = (HitGroup) result.hits().get(1);
// Verify chain1 result: One filled hit
assertEquals(1, chain1Result.size());
assertFilled(getFirstHit(chain1Result));
// Verify chain2 result: A timeout error
assertEquals(1, chain2Result.size());
assertNotNull(chain2Result.getErrorHit());
ErrorHit errorHit = chain2Result.getErrorHit();
assertEquals(1, errorHit.errors().size());
ErrorMessage error = errorHit.errors().iterator().next();
assertEquals("chain2", error.getSource());
assertEquals(ErrorMessage.timeoutCode, error.getCode());
assertEquals("Timed out", error.getMessage());
assertEquals("Error in execution of chain 'chain2': Chain timed out.", error.getDetailedMessage());
}
|
public static Set<Result> anaylze(String log) {
Set<Result> results = new HashSet<>();
for (Rule rule : Rule.values()) {
Matcher matcher = rule.pattern.matcher(log);
if (matcher.find()) {
results.add(new Result(rule, log, matcher));
}
}
return results;
}
|
@Test
public void jdk9() throws IOException {
CrashReportAnalyzer.Result result = findResultByRule(
CrashReportAnalyzer.anaylze(loadLog("/logs/java9.txt")),
CrashReportAnalyzer.Rule.JDK_9);
}
|
private AccessLog(String logFormat, Object... args) {
Objects.requireNonNull(logFormat, "logFormat");
this.logFormat = logFormat;
this.args = args;
}
|
@Test
void accessLogDefaultFormat() {
disposableServer = createServer()
.handle((req, resp) -> {
resp.withConnection(conn -> {
ChannelHandler handler = conn.channel().pipeline().get(NettyPipeline.AccessLogHandler);
resp.header(ACCESS_LOG_HANDLER, handler != null ? FOUND : NOT_FOUND);
});
return resp.send();
})
.accessLog(true)
.bindNow();
Tuple2<String, String> response = getHttpClientResponse(URI_1);
assertAccessLogging(response, true, false, null);
}
|
@Override
public Collection<MepEntry> getAllMeps(MdId mdName, MaIdShort maName)
throws CfmConfigException {
//Will throw IllegalArgumentException if ma does not exist
cfmMdService.getMaintenanceAssociation(mdName, maName);
Collection<Mep> mepStoreCollection = mepStore.getAllMeps();
Collection<MepEntry> mepEntryCollection = new ArrayList<>();
for (Mep mep : mepStoreCollection) {
if (mep.mdId().equals(mdName) && mep.maId().equals(maName)) {
DeviceId mepDeviceId = mep.deviceId();
if (deviceService.getDevice(mepDeviceId) == null) {
log.warn("Device not found/available " + mepDeviceId +
" for MEP: " + mdName + "/" + maName + "/" + mep.mepId());
continue;
} else if (!deviceService.getDevice(mepDeviceId)
.is(CfmMepProgrammable.class)) {
throw new CfmConfigException("Device " + mepDeviceId +
" does not support CfmMepProgrammable behaviour.");
}
log.debug("Retrieving MEP results for Mep {} in MD {}, MA {} "
+ "on Device {}", mep.mepId(), mdName, maName, mepDeviceId);
mepEntryCollection.add(deviceService
.getDevice(mepDeviceId)
.as(CfmMepProgrammable.class)
.getMep(mdName, maName, mep.mepId()));
}
}
return mepEntryCollection;
}
|
@Test
public void testGetAllMeps() throws CfmConfigException {
expect(mdService.getMaintenanceAssociation(MDNAME1, MANAME1))
.andReturn(Optional.ofNullable(ma1))
.anyTimes();
replay(mdService);
expect(deviceService.getDevice(DEVICE_ID1)).andReturn(device1).anyTimes();
expect(deviceService.getDevice(DEVICE_ID2)).andReturn(device2).anyTimes();
replay(deviceService);
expect(driverService.getDriver(DEVICE_ID1)).andReturn(testDriver).anyTimes();
expect(driverService.getDriver(DEVICE_ID2)).andReturn(testDriver).anyTimes();
replay(driverService);
Collection<MepEntry> mepEntries = mepManager.getAllMeps(MDNAME1, MANAME1);
assertEquals(4, mepEntries.size());
}
|
static void populateOutputFields(final PMML4Result toUpdate,
final ProcessingDTO processingDTO) {
logger.debug("populateOutputFields {} {}", toUpdate, processingDTO);
for (KiePMMLOutputField outputField : processingDTO.getOutputFields()) {
Object variableValue = outputField.evaluate(processingDTO);
if (variableValue != null) {
String variableName = outputField.getName();
toUpdate.addResultVariable(variableName, variableValue);
processingDTO.addKiePMMLNameValue(new KiePMMLNameValue(variableName, variableValue));
}
}
}
|
@Test
void populateTransformedOutputField1() {
KiePMMLOutputField outputField = KiePMMLOutputField.builder(OUTPUT_NAME, Collections.emptyList())
.withResultFeature(RESULT_FEATURE.TRANSFORMED_VALUE)
.build();
KiePMMLTestingModel kiePMMLModel = testingModelBuilder(outputField).build();
ProcessingDTO processingDTO = buildProcessingDTOWithDefaultNameValues(kiePMMLModel);
PMML4Result toUpdate = new PMML4Result();
PostProcess.populateOutputFields(toUpdate, processingDTO);
assertThat(toUpdate.getResultVariables()).isEmpty();
}
|
public static TimestampExtractionPolicy create(
final KsqlConfig ksqlConfig,
final LogicalSchema schema,
final Optional<TimestampColumn> timestampColumn
) {
if (!timestampColumn.isPresent()) {
return new MetadataTimestampExtractionPolicy(getDefaultTimestampExtractor(ksqlConfig));
}
final ColumnName col = timestampColumn.get().getColumn();
final Optional<String> timestampFormat = timestampColumn.get().getFormat();
final Column column = schema.findColumn(col)
.orElseThrow(() -> new KsqlException(
"The TIMESTAMP column set in the WITH clause does not exist in the schema: '"
+ col.toString(FormatOptions.noEscape()) + "'"));
final SqlBaseType tsColumnType = column.type().baseType();
if (tsColumnType == SqlBaseType.STRING) {
final String format = timestampFormat.orElseThrow(() -> new KsqlException(
"A String timestamp field has been specified without"
+ " also specifying the "
+ CommonCreateConfigs.TIMESTAMP_FORMAT_PROPERTY.toLowerCase()));
return new StringTimestampExtractionPolicy(col, format);
}
if (timestampFormat.isPresent()) {
throw new KsqlException("'" + CommonCreateConfigs.TIMESTAMP_FORMAT_PROPERTY
+ "' set in the WITH clause can only be used "
+ "when the timestamp column is of type STRING.");
}
if (tsColumnType == SqlBaseType.BIGINT) {
return new LongColumnTimestampExtractionPolicy(col);
}
if (tsColumnType == SqlBaseType.TIMESTAMP) {
return new TimestampColumnTimestampExtractionPolicy(col);
}
throw new KsqlException(
"Timestamp column, " + col + ", should be LONG(INT64), TIMESTAMP,"
+ " or a String with a "
+ CommonCreateConfigs.TIMESTAMP_FORMAT_PROPERTY.toLowerCase()
+ " specified.");
}
|
@Test
public void shouldCreateTimestampPolicyWhenTimestampFieldIsOfTypeTimestamp() {
// Given:
final String timestamp = "timestamp";
final LogicalSchema schema = schemaBuilder2
.valueColumn(ColumnName.of(timestamp.toUpperCase()), SqlTypes.TIMESTAMP)
.build();
// When:
final TimestampExtractionPolicy result = TimestampExtractionPolicyFactory
.create(
ksqlConfig,
schema,
Optional.of(
new TimestampColumn(
ColumnName.of(timestamp.toUpperCase()),
Optional.empty()
)
)
);
// Then:
assertThat(result, instanceOf(TimestampColumnTimestampExtractionPolicy.class));
assertThat(result.getTimestampField(),
equalTo(ColumnName.of(timestamp.toUpperCase())));
}
|
static Optional<Catalog> loadCatalog(Configuration conf, String catalogName) {
String catalogType = getCatalogType(conf, catalogName);
if (NO_CATALOG_TYPE.equalsIgnoreCase(catalogType)) {
return Optional.empty();
} else {
String name = catalogName == null ? ICEBERG_DEFAULT_CATALOG_NAME : catalogName;
return Optional.of(CatalogUtil.buildIcebergCatalog(name,
getCatalogProperties(conf, name, catalogType), conf));
}
}
|
@Test
public void testDefaultCatalogProperties() {
String catalogProperty = "io.manifest.cache-enabled";
// Set global property
final String defaultCatalogProperty = InputFormatConfig.CATALOG_DEFAULT_CONFIG_PREFIX + catalogProperty;
conf.setBoolean(defaultCatalogProperty, true);
HiveCatalog defaultCatalog = (HiveCatalog) Catalogs.loadCatalog(conf, null).get();
Assert.assertEquals("true", defaultCatalog.properties().get(catalogProperty));
Assert.assertEquals("true",
defaultCatalog.newTableOps(TableIdentifier.of("default", "iceberg"))
.io().properties().get(catalogProperty));
// set property at catalog level, and that should take precedence over the global property.
conf.setBoolean(
String.format("%s%s.%s", InputFormatConfig.CATALOG_CONFIG_PREFIX, Catalogs.ICEBERG_DEFAULT_CATALOG_NAME,
catalogProperty), false);
defaultCatalog = (HiveCatalog) Catalogs.loadCatalog(conf, null).get();
Assert.assertEquals("false", defaultCatalog.properties().get(catalogProperty));
Assert.assertEquals("false",
defaultCatalog.newTableOps(TableIdentifier.of("default", "iceberg"))
.io().properties().get(catalogProperty));
}
|
@Override
public E putIfAbsent(String key, E value) {
return computeIfAbsent(key, k -> value);
}
|
@Test
public void putIfAbsent_cacheHit_noCacheUpdate() {
Function<String, Integer> mappingFunctionMock = Mockito.mock(Function.class);
doReturn(36).when(mutableEntryMock).getValue();
entryProcessorMock = new CacheRegistryStore.AtomicComputeProcessor<>();
entryProcessorArgMock = mappingFunctionMock;
Integer cacheResult = classUnderTest.putIfAbsent(CACHE_KEY, 36);
verify(mutableEntryMock, never()).setValue(any());
assertEquals(Integer.valueOf(36), cacheResult);
}
|
public long removePublication(final long registrationId)
{
final long correlationId = toDriverCommandBuffer.nextCorrelationId();
final int index = toDriverCommandBuffer.tryClaim(REMOVE_PUBLICATION, RemoveMessageFlyweight.length());
if (index < 0)
{
throw new AeronException("could not write remove publication command");
}
removeMessage
.wrap(toDriverCommandBuffer.buffer(), index)
.registrationId(registrationId)
.clientId(clientId)
.correlationId(correlationId);
toDriverCommandBuffer.commit(index);
return correlationId;
}
|
@Test
void threadSendsRemoveChannelMessage()
{
conductor.removePublication(CORRELATION_ID);
assertReadsOneMessage(
(msgTypeId, buffer, index, length) ->
{
final RemoveMessageFlyweight message = new RemoveMessageFlyweight();
message.wrap(buffer, index);
assertEquals(REMOVE_PUBLICATION, msgTypeId);
assertEquals(CORRELATION_ID, message.registrationId());
}
);
}
|
@Override
public boolean rejoinNeededOrPending() {
if (!subscriptions.hasAutoAssignedPartitions())
return false;
// we need to rejoin if we performed the assignment and metadata has changed;
// also for those owned-but-no-longer-existed partitions we should drop them as lost
if (assignmentSnapshot != null && !assignmentSnapshot.matches(metadataSnapshot)) {
final String fullReason = String.format("cached metadata has changed from %s at the beginning of the rebalance to %s",
assignmentSnapshot, metadataSnapshot);
requestRejoinIfNecessary("cached metadata has changed", fullReason);
return true;
}
// we need to join if our subscription has changed since the last join
if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) {
final String fullReason = String.format("subscription has changed from %s at the beginning of the rebalance to %s",
joinedSubscription, subscriptions.subscription());
requestRejoinIfNecessary("subscription has changed", fullReason);
return true;
}
return super.rejoinNeededOrPending();
}
|
@Test
public void testRebalanceInProgressOnSyncGroup() {
subscriptions.subscribe(singleton(topic1), Optional.of(rebalanceListener));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// join initially, but let coordinator rebalance on sync
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(Collections.emptyList(), Errors.REBALANCE_IN_PROGRESS));
// then let the full join/sync finish successfully
client.prepareResponse(joinGroupFollowerResponse(2, consumerId, "leader", Errors.NONE));
client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE));
coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
assertEquals(singleton(t1p), subscriptions.assignedPartitions());
}
|
public FileConfig getFile() {
return file;
}
|
@Test
public void testFileConfig() {
ShenyuConfig.FileConfig fileConfig = config.getFile();
fileConfig.setMaxSize(10);
fileConfig.setEnabled(true);
Boolean enabled = fileConfig.getEnabled();
Integer maxSize = fileConfig.getMaxSize();
notEmptyElements(maxSize, enabled);
}
|
@Override
public Cursor<byte[]> scan(RedisClusterNode node, ScanOptions options) {
return new ScanCursor<byte[]>(0, options) {
private RedisClient client = getEntry(node);
@Override
protected ScanIteration<byte[]> doScan(long cursorId, ScanOptions options) {
if (isQueueing() || isPipelined()) {
throw new UnsupportedOperationException("'SSCAN' cannot be called in pipeline / transaction mode.");
}
if (client == null) {
return null;
}
List<Object> args = new ArrayList<Object>();
args.add(Long.toUnsignedString(cursorId));
if (options.getPattern() != null) {
args.add("MATCH");
args.add(options.getPattern());
}
if (options.getCount() != null) {
args.add("COUNT");
args.add(options.getCount());
}
RFuture<ListScanResult<byte[]>> f = executorService.readAsync(client, ByteArrayCodec.INSTANCE, RedisCommands.SCAN, args.toArray());
ListScanResult<byte[]> res = syncFuture(f);
String pos = res.getPos();
client = res.getRedisClient();
if ("0".equals(pos)) {
client = null;
}
return new ScanIteration<byte[]>(Long.parseUnsignedLong(pos), res.getValues());
}
}.open();
}
|
@Test
public void testScan() {
testInCluster(connection -> {
Map<byte[], byte[]> map = new HashMap<>();
for (int i = 0; i < 10000; i++) {
map.put(RandomString.make(32).getBytes(), RandomString.make(32).getBytes(StandardCharsets.UTF_8));
}
connection.mSet(map);
Cursor<byte[]> b = connection.scan(ScanOptions.scanOptions().build());
Set<String> sett = new HashSet<>();
int counter = 0;
while (b.hasNext()) {
byte[] tt = b.next();
sett.add(new String(tt));
counter++;
}
assertThat(sett.size()).isEqualTo(map.size());
assertThat(counter).isEqualTo(map.size());
});
}
|
public OffsetRange[] getNextOffsetRanges(Option<String> lastCheckpointStr, long sourceLimit, HoodieIngestionMetrics metrics) {
// Come up with final set of OffsetRanges to read (account for new partitions, limit number of events)
long maxEventsToReadFromKafka = getLongWithAltKeys(props, KafkaSourceConfig.MAX_EVENTS_FROM_KAFKA_SOURCE);
long numEvents;
if (sourceLimit == Long.MAX_VALUE) {
numEvents = maxEventsToReadFromKafka;
LOG.info("SourceLimit not configured, set numEvents to default value : {}", maxEventsToReadFromKafka);
} else {
numEvents = sourceLimit;
}
long minPartitions = getLongWithAltKeys(props, KafkaSourceConfig.KAFKA_SOURCE_MIN_PARTITIONS);
LOG.info("getNextOffsetRanges set config {} to {}", KafkaSourceConfig.KAFKA_SOURCE_MIN_PARTITIONS.key(), minPartitions);
return getNextOffsetRanges(lastCheckpointStr, numEvents, minPartitions, metrics);
}
|
@Test
public void testGetNextOffsetRangesFromMultiplePartitions() {
HoodieTestDataGenerator dataGenerator = new HoodieTestDataGenerator();
testUtils.createTopic(testTopicName, 2);
testUtils.sendMessages(testTopicName, Helpers.jsonifyRecords(dataGenerator.generateInserts("000", 1000)));
KafkaOffsetGen kafkaOffsetGen = new KafkaOffsetGen(getConsumerConfigs("earliest", KAFKA_CHECKPOINT_TYPE_STRING));
OffsetRange[] nextOffsetRanges = kafkaOffsetGen.getNextOffsetRanges(Option.empty(), 499, metrics);
assertEquals(3, nextOffsetRanges.length);
assertEquals(0, nextOffsetRanges[0].fromOffset());
assertEquals(249, nextOffsetRanges[0].untilOffset());
assertEquals(249, nextOffsetRanges[1].fromOffset());
assertEquals(250, nextOffsetRanges[1].untilOffset());
assertEquals(0, nextOffsetRanges[2].fromOffset());
assertEquals(249, nextOffsetRanges[2].untilOffset());
}
|
static UnixResolverOptions parseEtcResolverOptions() throws IOException {
return parseEtcResolverOptions(new File(ETC_RESOLV_CONF_FILE));
}
|
@Test
public void timeoutOptionIsParsedIfPresent(@TempDir Path tempDir) throws IOException {
File f = buildFile(tempDir, "search localdomain\n" +
"nameserver 127.0.0.11\n" +
"options timeout:0\n");
assertEquals(0, parseEtcResolverOptions(f).timeout());
f = buildFile(tempDir, "search localdomain\n" +
"nameserver 127.0.0.11\n" +
"options foo:bar timeout:124\n");
assertEquals(124, parseEtcResolverOptions(f).timeout());
}
|
public Optional<Long> getTokenTimeout(
final Optional<String> token,
final KsqlConfig ksqlConfig,
final Optional<KsqlAuthTokenProvider> authTokenProvider
) {
final long maxTimeout =
ksqlConfig.getLong(KsqlConfig.KSQL_WEBSOCKET_CONNECTION_MAX_TIMEOUT_MS);
if (maxTimeout > 0) {
if (authTokenProvider.isPresent() && token.isPresent()) {
try {
final long tokenTimeout = authTokenProvider.get()
.getLifetimeMs(StringUtils.removeStart(token.get(), BEARER)) - clock.millis();
return Optional.of(Math.min(tokenTimeout, maxTimeout));
} catch (final Exception e) {
log.error(e.getMessage());
}
}
return Optional.of(maxTimeout);
} else {
return Optional.empty();
}
}
|
@Test
public void shouldReturnMaxTimeout() {
// Given:
when(authTokenProvider.getLifetimeMs(TOKEN)).thenReturn(50000000L);
// Then:
assertThat(authenticationUtil.getTokenTimeout(Optional.of(TOKEN), ksqlConfig, Optional.of(authTokenProvider)), equalTo(Optional.of(60000L)));
}
|
public void writeShortMethodDescriptor(MethodReference methodReference) throws IOException {
writeSimpleName(methodReference.getName());
writer.write('(');
for (CharSequence paramType: methodReference.getParameterTypes()) {
writeType(paramType);
}
writer.write(')');
writeType(methodReference.getReturnType());
}
|
@Test
public void testWriteShortMethodDescriptor() throws IOException {
DexFormattedWriter writer = new DexFormattedWriter(output);
writer.writeShortMethodDescriptor(getMethodReference());
Assert.assertEquals("methodName(Lparam1;Lparam2;)Lreturn/type;", output.toString());
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.