focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public static TableElements of(final TableElement... elements) {
return new TableElements(ImmutableList.copyOf(elements));
} | @Test
public void shouldThrowOnDuplicateKeyValueColumns() {
// Given:
final List<TableElement> elements = ImmutableList.of(
tableElement("v0", INT_TYPE, KEY_CONSTRAINT),
tableElement("v0", INT_TYPE),
tableElement("v1", INT_TYPE, PRIMARY_KEY_CONSTRAINT),
tableElement("v1", INT_TYPE)
);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> TableElements.of(elements)
);
// Then:
assertThat(e.getMessage(), containsString(
"Duplicate column names:"));
assertThat(e.getMessage(), containsString(
"v0"));
assertThat(e.getMessage(), containsString(
"v1"));
} |
public static String get(@NonNull SymbolRequest request) {
String name = request.getName();
String title = request.getTitle();
String tooltip = request.getTooltip();
String htmlTooltip = request.getHtmlTooltip();
String classes = request.getClasses();
String pluginName = request.getPluginName();
String id = request.getId();
String identifier = (pluginName == null || pluginName.isBlank()) ? "core" : pluginName;
String symbol = SYMBOLS
.computeIfAbsent(identifier, key -> new ConcurrentHashMap<>())
.computeIfAbsent(name, key -> loadSymbol(identifier, key));
if ((tooltip != null && !tooltip.isBlank()) && (htmlTooltip == null || htmlTooltip.isBlank())) {
symbol = symbol.replaceAll("<svg", Matcher.quoteReplacement("<svg tooltip=\"" + Functions.htmlAttributeEscape(tooltip) + "\""));
}
if (htmlTooltip != null && !htmlTooltip.isBlank()) {
symbol = symbol.replaceAll("<svg", Matcher.quoteReplacement("<svg data-html-tooltip=\"" + Functions.htmlAttributeEscape(htmlTooltip) + "\""));
}
if (id != null && !id.isBlank()) {
symbol = symbol.replaceAll("<svg", Matcher.quoteReplacement("<svg id=\"" + Functions.htmlAttributeEscape(id) + "\""));
}
if (classes != null && !classes.isBlank()) {
symbol = symbol.replaceAll("<svg", "<svg class=\"" + Functions.htmlAttributeEscape(classes) + "\"");
}
if (title != null && !title.isBlank()) {
symbol = "<span class=\"jenkins-visually-hidden\">" + Util.xmlEscape(title) + "</span>" + symbol;
}
return symbol;
} | @Test
@DisplayName("IDs in symbol should not be removed")
@Issue("JENKINS-70730")
void getSymbol_idInSymbolIsPresent() {
String symbol = Symbol.get(new SymbolRequest.Builder()
.withId("some-random-id")
.withName("with-id").build());
assertThat(symbol, containsString("id=\"a\""));
} |
@Override
public boolean containsKey(K key) {
return map.containsKey(key);
} | @Test
public void testContainsKey() {
map.put(23, "value-23");
assertTrue(adapter.containsKey(23));
assertFalse(adapter.containsKey(42));
} |
public Node parse() throws ScanException {
if (tokenList == null || tokenList.isEmpty())
return null;
return E();
} | @Test
public void withDefault() throws ScanException {
Tokenizer tokenizer = new Tokenizer("${b:-c}");
Parser parser = new Parser(tokenizer.tokenize());
Node node = parser.parse();
Node witness = new Node(Node.Type.VARIABLE, new Node(Node.Type.LITERAL, "b"));
witness.defaultPart = new Node(Node.Type.LITERAL, "c");
assertEquals(witness, node);
} |
@Override
public void select(final List<Local> files) {
if(log.isDebugEnabled()) {
log.debug(String.format("Select files for %s", files));
}
previews.clear();
for(final Local selected : files) {
previews.add(new QLPreviewItem() {
@Override
public NSURL previewItemURL() {
return NSURL.fileURLWithPath(selected.getAbsolute());
}
@Override
public String previewItemTitle() {
return selected.getDisplayName();
}
});
}
} | @Test
public void testSelect() {
QuickLook q = new QuartzQuickLook();
final List<Local> files = new ArrayList<Local>();
files.add(new NullLocal("f"));
files.add(new NullLocal("b"));
q.select(files);
} |
public static String formatSql(final AstNode root) {
final StringBuilder builder = new StringBuilder();
new Formatter(builder).process(root, 0);
return StringUtils.stripEnd(builder.toString(), "\n");
} | @Test
public void shouldFormatStructWithReservedWords() {
// Given:
final Statement statement = parseSingle("CREATE STREAM s (foo STRUCT<`END` VARCHAR>) WITH (kafka_topic='foo', value_format='JSON');");
// When:
final String result = SqlFormatter.formatSql(statement);
// Then:
assertThat(result, is("CREATE STREAM S (FOO STRUCT<`END` STRING>) WITH (KAFKA_TOPIC='foo', VALUE_FORMAT='JSON');"));
} |
public Optional<ComputationState> getIfPresent(String computationId) {
return Optional.ofNullable(computationCache.getIfPresent(computationId));
} | @Test
public void testGetIfPresent_computationStateNotCached() {
Optional<ComputationState> computationState =
computationStateCache.getIfPresent("computationId");
assertFalse(computationState.isPresent());
verifyNoInteractions(configFetcher);
} |
public static <T> boolean listEquals(List<T> left, List<T> right) {
if (left == null) {
return right == null;
} else {
if (right == null) {
return false;
}
if (left.size() != right.size()) {
return false;
}
List<T> ltmp = new ArrayList<T>(left);
List<T> rtmp = new ArrayList<T>(right);
for (T t : ltmp) {
rtmp.remove(t);
}
return rtmp.isEmpty();
}
} | @Test
public void testListEquals() {
List left = new ArrayList();
List right = new ArrayList();
Assert.assertTrue(CommonUtils.listEquals(null, null));
Assert.assertFalse(CommonUtils.listEquals(left, null));
Assert.assertFalse(CommonUtils.listEquals(null, right));
Assert.assertTrue(CommonUtils.listEquals(left, right));
left.add("111");
left.add("111");
Assert.assertFalse(CommonUtils.listEquals(left, right));
right.add("222");
right.add("111");
Assert.assertFalse(CommonUtils.listEquals(left, right));
left.remove("111");
left.add("222");
Assert.assertTrue(CommonUtils.listEquals(left, right));
} |
@Nonnull
public static String cutOffAtFirst(@Nonnull String text, char cutoff) {
int i = text.indexOf(cutoff);
if (i < 0) return text;
return text.substring(0, i);
} | @Test
void testCutOffAtFirst() {
// chars
assertEquals("", StringUtil.cutOffAtFirst("", 'd'));
assertEquals("abc", StringUtil.cutOffAtFirst("abcdefg", 'd'));
assertEquals("abc", StringUtil.cutOffAtFirst("abcdefgd", 'd'));
// strings
assertEquals("", StringUtil.cutOffAtFirst("", "d"));
assertEquals("abc", StringUtil.cutOffAtFirst("abcdefg", "d"));
assertEquals("abc", StringUtil.cutOffAtFirst("abcdefgd", "d"));
} |
@Override
protected void registerMetadata(final MetaDataRegisterDTO dto) {
if (dto.isRegisterMetaData()) {
MetaDataService metaDataService = getMetaDataService();
MetaDataDO exist = metaDataService.findByPath(dto.getPath());
metaDataService.saveOrUpdateMetaData(exist, dto);
}
} | @Test
public void testRegisterMetadata() {
MetaDataDO metaDataDO = MetaDataDO.builder().build();
when(metaDataService.findByPath(any())).thenReturn(metaDataDO);
MetaDataRegisterDTO metaDataDTO = MetaDataRegisterDTO.builder().registerMetaData(true).build();
shenyuClientRegisterDivideService.registerMetadata(metaDataDTO);
verify(metaDataService).saveOrUpdateMetaData(metaDataDO, metaDataDTO);
} |
public void execute() {
Profiler stepProfiler = Profiler.create(LOGGER).logTimeLast(true);
boolean allStepsExecuted = false;
try {
executeSteps(stepProfiler);
allStepsExecuted = true;
} finally {
if (listener != null) {
executeListener(allStepsExecuted);
}
}
} | @Test
public void execute_calls_listener_finished_method_with_all_step_runs() {
new ComputationStepExecutor(mockComputationSteps(computationStep1, computationStep2), taskInterrupter, listener)
.execute();
verify(listener).finished(true);
verifyNoMoreInteractions(listener);
} |
@Override
public void run() {
if (!redoService.isConnected()) {
LogUtils.NAMING_LOGGER.warn("Grpc Connection is disconnect, skip current redo task");
return;
}
try {
redoForInstances();
redoForSubscribes();
} catch (Exception e) {
LogUtils.NAMING_LOGGER.warn("Redo task run with unexpected exception: ", e);
}
} | @Test
void testRunRedoDeRegisterSubscriberWithClientDisabled() throws NacosException {
when(clientProxy.isEnable()).thenReturn(false);
Set<SubscriberRedoData> mockData = generateMockSubscriberData(true, true, false);
when(redoService.findSubscriberRedoData()).thenReturn(mockData);
redoTask.run();
verify(clientProxy, never()).doUnsubscribe(SERVICE, GROUP, CLUSTER);
} |
@Override
public BufferWithSubpartition getNextBuffer(@Nullable MemorySegment transitBuffer) {
checkState(isFinished, "Sort buffer is not ready to be read.");
checkState(!isReleased, "Sort buffer is already released.");
if (!hasRemaining()) {
freeSegments.add(transitBuffer);
return null;
}
int numBytesRead = 0;
Buffer.DataType bufferDataType = Buffer.DataType.DATA_BUFFER;
int currentReadingSubpartitionId = subpartitionReadOrder[readOrderIndex];
do {
// Get the buffer index and offset from the index entry
int toReadBufferIndex = getSegmentIndexFromPointer(readIndexEntryAddress);
int toReadOffsetInBuffer = getSegmentOffsetFromPointer(readIndexEntryAddress);
// Get the lengthAndDataType buffer according the buffer index
MemorySegment toReadBuffer = segments.get(toReadBufferIndex);
// From the lengthAndDataType buffer, read and get the length and the data type
long lengthAndDataType = toReadBuffer.getLong(toReadOffsetInBuffer);
int recordLength = getSegmentIndexFromPointer(lengthAndDataType);
Buffer.DataType dataType =
Buffer.DataType.values()[getSegmentOffsetFromPointer(lengthAndDataType)];
// If the buffer is an event and some data has been read, return it directly to ensure
// that the event will occupy one buffer independently
if (dataType.isEvent() && numBytesRead > 0) {
break;
}
bufferDataType = dataType;
// Get the next index entry address and move the read position forward
long nextReadIndexEntryAddress = toReadBuffer.getLong(toReadOffsetInBuffer + 8);
toReadOffsetInBuffer += INDEX_ENTRY_SIZE;
// Allocate a temp buffer for the event, recycle the original buffer
if (bufferDataType.isEvent()) {
freeSegments.add(transitBuffer);
transitBuffer = MemorySegmentFactory.allocateUnpooledSegment(recordLength);
}
if (!isPartialRecordAllowed
&& !isLastBufferPartialRecord
&& numBytesRead > 0
&& numBytesRead + recordLength > transitBuffer.size()) {
break;
}
// Start reading data from the data buffer
numBytesRead +=
copyRecordOrEvent(
transitBuffer,
numBytesRead,
toReadBufferIndex,
toReadOffsetInBuffer,
recordLength);
if (recordRemainingBytes == 0) {
// move to next subpartition if the current subpartition has been finished
if (readIndexEntryAddress
== lastIndexEntryAddresses[currentReadingSubpartitionId]) {
isLastBufferPartialRecord = false;
updateReadSubpartitionAndIndexEntryAddress();
break;
}
readIndexEntryAddress = nextReadIndexEntryAddress;
if (isLastBufferPartialRecord) {
isLastBufferPartialRecord = false;
break;
}
} else {
isLastBufferPartialRecord = true;
}
} while (numBytesRead < transitBuffer.size() && bufferDataType.isBuffer());
if (!isPartialRecordAllowed
&& !isLastBufferPartialRecord
&& bufferDataType == Buffer.DataType.DATA_BUFFER) {
bufferDataType = Buffer.DataType.DATA_BUFFER_WITH_CLEAR_END;
}
numTotalBytesRead += numBytesRead;
return new BufferWithSubpartition(
new NetworkBuffer(
transitBuffer,
bufferDataType.isBuffer() ? bufferRecycler : FreeingBufferRecycler.INSTANCE,
bufferDataType,
numBytesRead),
currentReadingSubpartitionId);
} | @Test
void testBufferIsRecycledWhenGetEvent() throws Exception {
int numSubpartitions = 10;
int bufferPoolSize = 512;
int bufferSizeBytes = 1024;
int numBuffersForSort = 20;
int subpartitionId = 0;
Random random = new Random(1234);
NetworkBufferPool globalPool = new NetworkBufferPool(bufferPoolSize, bufferSizeBytes);
BufferPool bufferPool = globalPool.createBufferPool(bufferPoolSize, bufferPoolSize);
LinkedList<MemorySegment> segments = new LinkedList<>();
for (int i = 0; i < numBuffersForSort; ++i) {
segments.add(bufferPool.requestMemorySegmentBlocking());
}
TieredStorageSortBuffer sortBuffer =
new TieredStorageSortBuffer(
segments,
bufferPool,
numSubpartitions,
bufferSizeBytes,
numBuffersForSort,
true);
byte[] bytes = new byte[1];
random.nextBytes(bytes);
ByteBuffer dataRecord = ByteBuffer.wrap(bytes);
sortBuffer.append(dataRecord, subpartitionId, Buffer.DataType.DATA_BUFFER);
ByteBuffer eventRecord = ByteBuffer.wrap(bytes);
sortBuffer.append(eventRecord, subpartitionId, Buffer.DataType.EVENT_BUFFER);
sortBuffer.finish();
MemorySegment memorySegment = bufferPool.requestMemorySegmentBlocking();
BufferWithSubpartition bufferWithSubpartition = sortBuffer.getNextBuffer(memorySegment);
assertThat(bufferWithSubpartition.getBuffer().isBuffer()).isTrue();
assertThat(bufferWithSubpartition.getSubpartitionIndex()).isEqualTo(subpartitionId);
bufferWithSubpartition.getBuffer().recycleBuffer();
assertThat(bufferPool.bestEffortGetNumOfUsedBuffers()).isEqualTo(numBuffersForSort);
bufferWithSubpartition = sortBuffer.getNextBuffer(memorySegment);
assertThat(bufferWithSubpartition.getBuffer().isBuffer()).isFalse();
assertThat(bufferWithSubpartition.getSubpartitionIndex()).isEqualTo(subpartitionId);
assertThat(bufferPool.bestEffortGetNumOfUsedBuffers()).isEqualTo(numBuffersForSort);
} |
public String compile(final String xls,
final String template,
int startRow,
int startCol) {
return compile( xls,
template,
InputType.XLS,
startRow,
startCol );
} | @Test
public void testLoadCsv() {
final String drl = converter.compile("/data/ComplexWorkbook.drl.csv",
"/templates/test_template2.drl",
InputType.CSV,
10,
2);
assertThat(drl).isNotNull();
assertThat(drl.indexOf("myObject.setIsValid(1, 2)") > 0).isTrue();
assertThat(drl.indexOf("myObject.size () > 2") > 0).isTrue();
assertThat(drl.indexOf("Foo(myObject.getColour().equals(red),\n myObject.size () > 1") > 0).isTrue();
} |
public static InstrumentedThreadFactory privilegedThreadFactory(MetricRegistry registry, String name) {
return new InstrumentedThreadFactory(Executors.privilegedThreadFactory(), registry, name);
} | @Test
public void testPrivilegedThreadFactory() throws Exception {
final ThreadFactory threadFactory = InstrumentedExecutors.privilegedThreadFactory(registry);
threadFactory.newThread(new NoopRunnable());
final Field delegateField = InstrumentedThreadFactory.class.getDeclaredField("delegate");
delegateField.setAccessible(true);
final ThreadFactory delegate = (ThreadFactory) delegateField.get(threadFactory);
assertThat(delegate.getClass().getCanonicalName()).isEqualTo("java.util.concurrent.Executors.PrivilegedThreadFactory");
} |
@VisibleForTesting
static boolean isCompressed(String contentEncoding) {
return contentEncoding.contains(HttpHeaderValues.GZIP.toString())
|| contentEncoding.contains(HttpHeaderValues.DEFLATE.toString())
|| contentEncoding.contains(HttpHeaderValues.BR.toString())
|| contentEncoding.contains(HttpHeaderValues.COMPRESS.toString());
} | @Test
void detectsDeflate() {
assertTrue(HttpUtils.isCompressed("deflate"));
} |
@Override
public Output run(RunContext runContext) throws Exception {
String taskSpec = runContext.render(this.spec);
try {
Task task = OBJECT_MAPPER.readValue(taskSpec, Task.class);
if (task instanceof TemplatedTask) {
throw new IllegalArgumentException("The templated task cannot be of type 'io.kestra.plugin.core.templating.TemplatedTask'");
}
if (task instanceof RunnableTask<?> runnableTask) {
return runnableTask.run(runContext);
}
throw new IllegalArgumentException("The templated task must be a runnable task");
} catch (JsonProcessingException e) {
throw new IllegalVariableEvaluationException(e);
}
} | @Test
void templatedFlowable() {
RunContext runContext = runContextFactory.of();
TemplatedTask templatedTask = TemplatedTask.builder()
.id("template")
.type(TemplatedTask.class.getName())
.spec("""
type: io.kestra.plugin.core.flow.Pause
delay: PT10S""")
.build();
var exception = assertThrows(IllegalArgumentException.class, () -> templatedTask.run(runContext));
assertThat(exception.getMessage(), is("The templated task must be a runnable task"));
} |
public static PutMessageResult checkBeforePutMessage(BrokerController brokerController, final MessageExt msg) {
if (brokerController.getMessageStore().isShutdown()) {
LOG.warn("message store has shutdown, so putMessage is forbidden");
return new PutMessageResult(PutMessageStatus.SERVICE_NOT_AVAILABLE, null);
}
if (!brokerController.getMessageStoreConfig().isDuplicationEnable() && BrokerRole.SLAVE == brokerController.getMessageStoreConfig().getBrokerRole()) {
long value = PRINT_TIMES.getAndIncrement();
if ((value % 50000) == 0) {
LOG.warn("message store is in slave mode, so putMessage is forbidden ");
}
return new PutMessageResult(PutMessageStatus.SERVICE_NOT_AVAILABLE, null);
}
if (!brokerController.getMessageStore().getRunningFlags().isWriteable()) {
long value = PRINT_TIMES.getAndIncrement();
if ((value % 50000) == 0) {
LOG.warn("message store is not writeable, so putMessage is forbidden " + brokerController.getMessageStore().getRunningFlags().getFlagBits());
}
return new PutMessageResult(PutMessageStatus.SERVICE_NOT_AVAILABLE, null);
} else {
PRINT_TIMES.set(0);
}
final byte[] topicData = msg.getTopic().getBytes(MessageDecoder.CHARSET_UTF8);
boolean retryTopic = msg.getTopic() != null && msg.getTopic().startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX);
if (!retryTopic && topicData.length > Byte.MAX_VALUE) {
LOG.warn("putMessage message topic[{}] length too long {}, but it is not supported by broker",
msg.getTopic(), topicData.length);
return new PutMessageResult(PutMessageStatus.MESSAGE_ILLEGAL, null);
}
if (topicData.length > MAX_TOPIC_LENGTH) {
LOG.warn("putMessage message topic[{}] length too long {}, but it is not supported by broker",
msg.getTopic(), topicData.length);
return new PutMessageResult(PutMessageStatus.MESSAGE_ILLEGAL, null);
}
if (msg.getBody() == null) {
LOG.warn("putMessage message topic[{}], but message body is null", msg.getTopic());
return new PutMessageResult(PutMessageStatus.MESSAGE_ILLEGAL, null);
}
if (brokerController.getMessageStore().isOSPageCacheBusy()) {
return new PutMessageResult(PutMessageStatus.OS_PAGE_CACHE_BUSY, null);
}
return null;
} | @Test
public void testCheckBeforePutMessage() {
BrokerController brokerController = Mockito.mock(BrokerController.class);
MessageStore messageStore = Mockito.mock(MessageStore.class);
MessageStoreConfig messageStoreConfig = new MessageStoreConfig();
RunningFlags runningFlags = Mockito.mock(RunningFlags.class);
Mockito.when(brokerController.getMessageStore()).thenReturn(messageStore);
Mockito.when(brokerController.getMessageStore().isShutdown()).thenReturn(false);
Mockito.when(brokerController.getMessageStoreConfig()).thenReturn(messageStoreConfig);
Mockito.when(messageStore.getRunningFlags()).thenReturn(runningFlags);
Mockito.when(messageStore.getRunningFlags().isWriteable()).thenReturn(true);
MessageExt messageExt = new MessageExt();
messageExt.setTopic(RandomStringUtils.randomAlphabetic(Byte.MAX_VALUE).toUpperCase());
messageExt.setBody(RandomStringUtils.randomAlphabetic(Byte.MAX_VALUE).toUpperCase().getBytes());
Assert.assertNull(HookUtils.checkBeforePutMessage(brokerController, messageExt));
messageExt.setTopic(RandomStringUtils.randomAlphabetic(Byte.MAX_VALUE + 1).toUpperCase());
Assert.assertEquals(PutMessageStatus.MESSAGE_ILLEGAL, Objects.requireNonNull(
HookUtils.checkBeforePutMessage(brokerController, messageExt)).getPutMessageStatus());
messageExt.setTopic(MixAll.RETRY_GROUP_TOPIC_PREFIX +
RandomStringUtils.randomAlphabetic(Byte.MAX_VALUE + 1).toUpperCase());
Assert.assertNull(HookUtils.checkBeforePutMessage(brokerController, messageExt));
messageExt.setTopic(MixAll.RETRY_GROUP_TOPIC_PREFIX +
RandomStringUtils.randomAlphabetic(255 - MixAll.RETRY_GROUP_TOPIC_PREFIX.length()).toUpperCase());
Assert.assertNull(HookUtils.checkBeforePutMessage(brokerController, messageExt));
messageExt.setTopic(MixAll.RETRY_GROUP_TOPIC_PREFIX +
RandomStringUtils.randomAlphabetic(256 - MixAll.RETRY_GROUP_TOPIC_PREFIX.length()).toUpperCase());
Assert.assertEquals(PutMessageStatus.MESSAGE_ILLEGAL, Objects.requireNonNull(
HookUtils.checkBeforePutMessage(brokerController, messageExt)).getPutMessageStatus());
} |
@Override
public Object convertData( ValueMetaInterface meta2, Object data2 ) throws KettleValueException {
switch ( meta2.getType() ) {
case TYPE_STRING:
return convertStringToInternetAddress( meta2.getString( data2 ) );
case TYPE_INTEGER:
return convertIntegerToInternetAddress( meta2.getInteger( data2 ) );
case TYPE_NUMBER:
return convertNumberToInternetAddress( meta2.getNumber( data2 ) );
case TYPE_BIGNUMBER:
return convertBigNumberToInternetAddress( meta2.getBigNumber( data2 ) );
case TYPE_INET:
return ( (ValueMetaInternetAddress) meta2 ).getInternetAddress( data2 );
default:
throw new KettleValueException( meta2.toStringMeta() + " : can't be converted to an Internet Address" );
}
} | @Test
public void testConvertNumberIPv4() throws Exception {
ValueMetaInterface vmia = new ValueMetaInternetAddress( "Test" );
ValueMetaNumber vmn = new ValueMetaNumber( "aNumber" );
Object convertedIPv4 = vmia.convertData( vmn, InetAddress2BigInteger( SAMPLE_IPV4_AS_BYTES ).doubleValue() );
assertNotNull( convertedIPv4 );
assertTrue( convertedIPv4 instanceof InetAddress );
assertArrayEquals( SAMPLE_IPV4_AS_BYTES, ( (InetAddress) convertedIPv4 ).getAddress() );
} |
public static List<Criterion> parse(String filter) {
return StreamSupport.stream(CRITERIA_SPLITTER.split(filter).spliterator(), false)
.map(FilterParser::parseCriterion)
.toList();
} | @Test
public void parse_filter_having_value_containing_operator_characters() {
List<Criterion> criterion = FilterParser.parse("languages IN (java, python, <null>)");
assertThat(criterion)
.extracting(Criterion::getKey, Criterion::getOperator, Criterion::getValues, Criterion::getValue)
.containsOnly(
tuple("languages", IN, asList("java", "python", "<null>"), null));
} |
@Override
public <T> T clone(T object) {
if (object instanceof String) {
return object;
} else if (object instanceof Collection) {
Object firstElement = findFirstNonNullElement((Collection) object);
if (firstElement != null && !(firstElement instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), firstElement.getClass());
return objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
} else if (object instanceof Map) {
Map.Entry firstEntry = this.findFirstNonNullEntry((Map) object);
if (firstEntry != null) {
Object key = firstEntry.getKey();
Object value = firstEntry.getValue();
if (!(key instanceof Serializable) || !(value instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), key.getClass(), value.getClass());
return (T) objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
}
} else if (object instanceof JsonNode) {
return (T) ((JsonNode) object).deepCopy();
}
if (object instanceof Serializable) {
try {
return (T) SerializationHelper.clone((Serializable) object);
} catch (SerializationException e) {
//it is possible that object itself implements java.io.Serializable, but underlying structure does not
//in this case we switch to the other JSON marshaling strategy which doesn't use the Java serialization
}
}
return jsonClone(object);
} | @Test
public void should_clone_map_of_non_serializable_value() {
Map<String, NonSerializableObject> original = new HashMap<>();
original.put("key", new NonSerializableObject("value"));
Object cloned = serializer.clone(original);
assertEquals(original, cloned);
assertNotSame(original, cloned);
} |
public SchemaMapping fromParquet(MessageType parquetSchema) {
List<Type> fields = parquetSchema.getFields();
List<TypeMapping> mappings = fromParquet(fields);
List<Field> arrowFields = fields(mappings);
return new SchemaMapping(new Schema(arrowFields), parquetSchema, mappings);
} | @Test
public void testRepeatedParquetToArrow() {
Schema arrow = converter.fromParquet(Paper.schema).getArrowSchema();
assertEquals(paperArrowSchema, arrow);
} |
public B lazy(Boolean lazy) {
this.lazy = lazy;
return getThis();
} | @Test
void lazy() {
ReferenceBuilder builder = new ReferenceBuilder();
builder.lazy(true);
Assertions.assertTrue(builder.build().getLazy());
builder.lazy(false);
Assertions.assertFalse(builder.build().getLazy());
} |
@Override
public List<TransferItem> list(final Session<?> session, final Path remote,
final Local directory, final ListProgressListener listener) throws BackgroundException {
if(log.isDebugEnabled()) {
log.debug(String.format("List children for %s", directory));
}
if(directory.isSymbolicLink()) {
final Symlink symlink = session.getFeature(Symlink.class);
if(new UploadSymlinkResolver(symlink, roots).resolve(directory)) {
if(log.isDebugEnabled()) {
log.debug(String.format("Do not list children for symbolic link %s", directory));
}
// We can resolve the target of the symbolic link and will create a link on the remote system
// using the symlink feature of the session
return Collections.emptyList();
}
}
final List<TransferItem> children = new ArrayList<>();
for(Local local : directory.list().filter(comparator, filter)) {
children.add(new TransferItem(new Path(remote, local.getName(),
local.isDirectory() ? EnumSet.of(Path.Type.directory) : EnumSet.of(Path.Type.file)), local));
}
return children;
} | @Test
public void testList() throws Exception {
final NullLocal local = new NullLocal("t") {
@Override
public AttributedList<Local> list() {
AttributedList<Local> l = new AttributedList<>();
l.add(new NullLocal(this.getAbsolute(), "c"));
return l;
}
};
final Path root = new Path("/t", EnumSet.of(Path.Type.file));
Transfer t = new UploadTransfer(new Host(new TestProtocol()), root, local);
assertEquals(Collections.singletonList(new TransferItem(new Path("/t/c", EnumSet.of(Path.Type.file)), new NullLocal("t", "c"))),
t.list(new NullSession(new Host(new TestProtocol())), root, local, new DisabledListProgressListener()));
} |
@Override
public byte[] serialize(final String topic, final List<?> values) {
if (values == null) {
return null;
}
final T single = extractOnlyColumn(values, topic);
return inner.serialize(topic, single);
} | @Test
public void shouldSerializeNewStyleNulls() {
// When:
final byte[] result = serializer.serialize(TOPIC, HEADERS, null);
// Then:
assertThat(result, is(nullValue()));
} |
@Operation(summary = "Get all certificates based on conditions")
@PostMapping(value = "/search", consumes = "application/json")
@ResponseBody
public Page<Certificate> search(@RequestBody CertSearchRequest request,
@RequestParam(name = "page", defaultValue = "0") int pageIndex,
@RequestParam(name = "size", defaultValue = "30") int pageSize) {
return certificateService.searchAll(request, pageIndex, pageSize);
} | @Test
public void getAllCertificatesBasedOnConditions() {
CertSearchRequest request = new CertSearchRequest();
when(certificateServiceMock.searchAll(request, 1, 10)).thenReturn(getPageCertificates());
Page<Certificate> result = controllerMock.search(request, 1, 10);
verify(certificateServiceMock, times(1)).searchAll(any(CertSearchRequest.class), anyInt(), anyInt());
assertNotNull(result);
assertEquals(1, result.getTotalPages());
assertEquals(2, result.getContent().size());
} |
public NugetPackage parse(InputStream stream) throws NuspecParseException {
try {
final DocumentBuilder db = XmlUtils.buildSecureDocumentBuilder();
final Document d = db.parse(stream);
final XPath xpath = XPathFactory.newInstance().newXPath();
final NugetPackage nuspec = new NugetPackage();
if (xpath.evaluate("/package/metadata/id", d, XPathConstants.NODE) == null
|| xpath.evaluate("/package/metadata/version", d, XPathConstants.NODE) == null
|| xpath.evaluate("/package/metadata/authors", d, XPathConstants.NODE) == null
|| xpath.evaluate("/package/metadata/description", d, XPathConstants.NODE) == null) {
throw new NuspecParseException("Invalid Nuspec format");
}
nuspec.setId(xpath.evaluate("/package/metadata/id", d));
nuspec.setVersion(xpath.evaluate("/package/metadata/version", d));
nuspec.setAuthors(xpath.evaluate("/package/metadata/authors", d));
nuspec.setOwners(getOrNull((Node) xpath.evaluate("/package/metadata/owners", d, XPathConstants.NODE)));
nuspec.setLicenseUrl(getOrNull((Node) xpath.evaluate("/package/metadata/licenseUrl", d, XPathConstants.NODE)));
nuspec.setTitle(getOrNull((Node) xpath.evaluate("/package/metadata/title", d, XPathConstants.NODE)));
nuspec.setDescription(xpath.evaluate("/package/metadata/description", d));
return nuspec;
} catch (ParserConfigurationException | SAXException | IOException | XPathExpressionException | NuspecParseException e) {
throw new NuspecParseException("Unable to parse nuspec", e);
}
} | @Test(expected = NuspecParseException.class)
public void testNotNuspec() throws Exception {
XPathNuspecParser parser = new XPathNuspecParser();
//InputStream is = XPathNuspecParserTest.class.getClassLoader().getResourceAsStream("suppressions.xml");
InputStream is = BaseTest.getResourceAsStream(this, "suppressions.xml");
parser.parse(is);
} |
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PCollectionsImmutableSet<?> that = (PCollectionsImmutableSet<?>) o;
return Objects.equals(underlying(), that.underlying());
} | @Test
public void testEquals() {
final MapPSet<Object> mock = mock(MapPSet.class);
assertEquals(new PCollectionsImmutableSet<>(mock), new PCollectionsImmutableSet<>(mock));
final MapPSet<Object> someOtherMock = mock(MapPSet.class);
assertNotEquals(new PCollectionsImmutableSet<>(mock), new PCollectionsImmutableSet<>(someOtherMock));
} |
public Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(final Set<TopicPartition> partitions,
final Timer timer) {
if (partitions.isEmpty()) return Collections.emptyMap();
final Generation generationForOffsetRequest = generationIfStable();
if (pendingCommittedOffsetRequest != null &&
!pendingCommittedOffsetRequest.sameRequest(partitions, generationForOffsetRequest)) {
// if we were waiting for a different request, then just clear it.
pendingCommittedOffsetRequest = null;
}
long attempts = 0L;
do {
if (!ensureCoordinatorReady(timer)) return null;
// contact coordinator to fetch committed offsets
final RequestFuture<Map<TopicPartition, OffsetAndMetadata>> future;
if (pendingCommittedOffsetRequest != null) {
future = pendingCommittedOffsetRequest.response;
} else {
future = sendOffsetFetchRequest(partitions);
pendingCommittedOffsetRequest = new PendingCommittedOffsetRequest(partitions, generationForOffsetRequest, future);
}
client.poll(future, timer);
if (future.isDone()) {
pendingCommittedOffsetRequest = null;
if (future.succeeded()) {
return future.value();
} else if (!future.isRetriable()) {
throw future.exception();
} else {
timer.sleep(retryBackoff.backoff(attempts++));
}
} else {
return null;
}
} while (timer.notExpired());
return null;
} | @Test
public void testFetchCommittedOffsets() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
long offset = 500L;
String metadata = "blahblah";
Optional<Integer> leaderEpoch = Optional.of(15);
OffsetFetchResponse.PartitionData data = new OffsetFetchResponse.PartitionData(offset, leaderEpoch,
metadata, Errors.NONE);
client.prepareResponse(offsetFetchResponse(Errors.NONE, singletonMap(t1p, data)));
Map<TopicPartition, OffsetAndMetadata> fetchedOffsets = coordinator.fetchCommittedOffsets(singleton(t1p),
time.timer(Long.MAX_VALUE));
assertNotNull(fetchedOffsets);
assertEquals(new OffsetAndMetadata(offset, leaderEpoch, metadata), fetchedOffsets.get(t1p));
} |
@Override
public String toString() {
return toString(true);
} | @Test
public void testToStringHumanWithQuota() {
long length = Long.MAX_VALUE;
long fileCount = 222222222;
long directoryCount = 33333;
long quota = 222256578;
long spaceConsumed = 1073741825;
long spaceQuota = 1;
ContentSummary contentSummary = new ContentSummary.Builder().length(length).
fileCount(fileCount).directoryCount(directoryCount).quota(quota).
spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
String expected = " 212.0 M 1023 1 "
+ " -1 G 32.6 K 211.9 M 8.0 E ";
assertEquals(expected, contentSummary.toString(true, true));
} |
@Override
public void write(T record) {
recordConsumer.startMessage();
try {
messageWriter.writeTopLevelMessage(record);
} catch (RuntimeException e) {
Message m = (record instanceof Message.Builder) ? ((Message.Builder) record).build() : (Message) record;
LOG.error("Cannot write message {}: {}", e.getMessage(), m);
throw e;
}
recordConsumer.endMessage();
} | @Test
public void testMessageRecursion() {
RecordConsumer readConsumerMock = Mockito.mock(RecordConsumer.class);
Configuration conf = new Configuration();
ProtoSchemaConverter.setMaxRecursion(conf, 1);
ProtoWriteSupport<Trees.BinaryTree> spyWriter =
createReadConsumerInstance(Trees.BinaryTree.class, readConsumerMock, conf);
Trees.BinaryTree.Builder msg = Trees.BinaryTree.newBuilder();
Trees.BinaryTree.Builder cur = msg;
for (int i = 0; i < 10; ++i) {
cur.getValueBuilder().setTypeUrl("" + i);
cur = cur.getRightBuilder();
}
Trees.BinaryTree built = msg.build();
spyWriter.write(built);
InOrder inOrder = Mockito.inOrder(readConsumerMock);
inOrder.verify(readConsumerMock).startMessage();
inOrder.verify(readConsumerMock).startField("value", 0);
inOrder.verify(readConsumerMock).startGroup();
inOrder.verify(readConsumerMock).startField("type_url", 0);
inOrder.verify(readConsumerMock).addBinary(Binary.fromConstantByteArray("0".getBytes()));
inOrder.verify(readConsumerMock).endField("type_url", 0);
inOrder.verify(readConsumerMock).startField("value", 1);
inOrder.verify(readConsumerMock).addBinary(Binary.fromConstantByteArray("".getBytes()));
inOrder.verify(readConsumerMock).endField("value", 1);
inOrder.verify(readConsumerMock).endGroup();
inOrder.verify(readConsumerMock).endField("value", 0);
inOrder.verify(readConsumerMock).startField("right", 2);
inOrder.verify(readConsumerMock).startGroup();
inOrder.verify(readConsumerMock).startField("value", 0);
inOrder.verify(readConsumerMock).startGroup();
inOrder.verify(readConsumerMock).startField("type_url", 0);
inOrder.verify(readConsumerMock).addBinary(Binary.fromConstantByteArray("1".getBytes()));
inOrder.verify(readConsumerMock).endField("type_url", 0);
inOrder.verify(readConsumerMock).startField("value", 1);
inOrder.verify(readConsumerMock).addBinary(Binary.fromConstantByteArray("".getBytes()));
inOrder.verify(readConsumerMock).endField("value", 1);
inOrder.verify(readConsumerMock).endGroup();
inOrder.verify(readConsumerMock).endField("value", 0);
inOrder.verify(readConsumerMock).startField("right", 2);
inOrder.verify(readConsumerMock)
.addBinary(
Binary.fromConstantByteArray(built.getRight().getRight().toByteArray()));
inOrder.verify(readConsumerMock).endField("right", 2);
inOrder.verify(readConsumerMock).endGroup();
inOrder.verify(readConsumerMock).endField("right", 2);
inOrder.verify(readConsumerMock).endMessage();
Mockito.verifyNoMoreInteractions(readConsumerMock);
} |
public boolean isResolverViewId() {
return viewId.contains(SEPARATOR);
} | @Test
public void testStandardView() {
final ViewResolverDecoder decoder = new ViewResolverDecoder("62068954bd0cd7035876fcec");
assertFalse(decoder.isResolverViewId());
assertThatThrownBy(decoder::getResolverName)
.isExactlyInstanceOf(IllegalArgumentException.class);
assertThatThrownBy(decoder::getViewId)
.isExactlyInstanceOf(IllegalArgumentException.class);
} |
@Override
public boolean updateEntryExpiration(K key, long ttl, TimeUnit ttlUnit, long maxIdleTime, TimeUnit maxIdleUnit) {
return get(updateEntryExpirationAsync(key, ttl, ttlUnit, maxIdleTime, maxIdleUnit));
} | @Test
public void testUpdateEntryExpiration() throws InterruptedException {
RMapCache<Integer, Integer> cache = redisson.getMapCache("testUpdateEntryExpiration");
cache.put(1, 2, 3, TimeUnit.SECONDS);
Thread.sleep(2000);
long ttl = cache.remainTimeToLive(1);
assertThat(ttl).isBetween(900L, 1000L);
assertThat(cache.updateEntryExpiration(1, 2, TimeUnit.SECONDS, -1, TimeUnit.SECONDS)).isTrue();
long ttl2 = cache.remainTimeToLive(1);
assertThat(ttl2).isBetween(1900L, 2000L);
Thread.sleep(2000);
assertThat(cache.updateEntryExpiration(1, 2, TimeUnit.SECONDS, -1, TimeUnit.SECONDS)).isFalse();
} |
public String getProgress(final boolean running, final long size, final long transferred) {
return this.getProgress(System.currentTimeMillis(), running, size, transferred);
} | @Test
public void testStopped() {
final long start = System.currentTimeMillis();
Speedometer m = new Speedometer(start, true);
assertEquals("0 B of 1.0 MB", m.getProgress(true, 1000000L, 0L));
assertEquals("500.0 KB of 1.0 MB", m.getProgress(false, 1000000L, 1000000L / 2));
} |
@Override
public Optional<ConfigItem> resolve(final String propertyName, final boolean strict) {
if (propertyName.startsWith(KSQL_REQUEST_CONFIG_PROPERTY_PREFIX)) {
return resolveRequestConfig(propertyName);
} else if (propertyName.startsWith(KSQL_CONFIG_PROPERTY_PREFIX)
&& !propertyName.startsWith(KSQL_STREAMS_PREFIX)) {
return resolveKsqlConfig(propertyName);
}
return resolveStreamsConfig(propertyName, strict);
} | @Test
public void shouldResolveKsqlPrefixedProducerConfig() {
assertThat(resolver.resolve(
KsqlConfig.KSQL_STREAMS_PREFIX + ProducerConfig.BUFFER_MEMORY_CONFIG, true),
is(resolvedItem(ProducerConfig.BUFFER_MEMORY_CONFIG, PRODUCER_CONFIG_DEF)));
} |
@Override
public Iterable<RedisClusterNode> clusterGetNodes() {
return read(null, StringCodec.INSTANCE, CLUSTER_NODES);
} | @Test
public void testClusterGetNodes() {
Iterable<RedisClusterNode> nodes = connection.clusterGetNodes();
assertThat(nodes).hasSize(6);
for (RedisClusterNode redisClusterNode : nodes) {
assertThat(redisClusterNode.getLinkState()).isNotNull();
assertThat(redisClusterNode.getFlags()).isNotEmpty();
assertThat(redisClusterNode.getHost()).isNotNull();
assertThat(redisClusterNode.getPort()).isNotNull();
assertThat(redisClusterNode.getId()).isNotNull();
assertThat(redisClusterNode.getType()).isNotNull();
if (redisClusterNode.getType() == NodeType.MASTER) {
assertThat(redisClusterNode.getSlotRange().getSlots()).isNotEmpty();
} else {
assertThat(redisClusterNode.getMasterId()).isNotNull();
}
}
} |
public static int countArguments(String sql) {
int argCount = 0;
for (int i = 0; i < sql.length(); i++) {
if (sql.charAt(i) == '?') {
argCount++;
}
}
return argCount;
} | @Test
public void countArguments() {
assertThat(SqlLogFormatter.countArguments("select * from issues")).isZero();
assertThat(SqlLogFormatter.countArguments("select * from issues where id=?")).isOne();
assertThat(SqlLogFormatter.countArguments("select * from issues where id=? and kee=?")).isEqualTo(2);
} |
public PluginWrapper(PluginManager parent, File archive, Manifest manifest, URL baseResourceURL,
ClassLoader classLoader, File disableFile,
List<Dependency> dependencies, List<Dependency> optionalDependencies) {
this.parent = parent;
this.manifest = manifest;
this.shortName = Util.intern(computeShortName(manifest, archive.getName()));
this.baseResourceURL = baseResourceURL;
this.classLoader = classLoader;
this.disableFile = disableFile;
this.active = !disableFile.exists();
this.dependencies = dependencies;
this.optionalDependencies = optionalDependencies;
for (Dependency d : optionalDependencies) {
assert d.optional : d + " included among optionalDependencies of " + shortName + " but was not marked optional";
}
this.archive = archive;
} | @Test
public void dependencyFailedToLoad() {
pluginWrapper("dependency").version("5").buildFailed();
PluginWrapper pw = pluginWrapper("dependee").deps("dependency:3").buildLoaded();
final IOException ex = assertThrows(IOException.class, pw::resolvePluginDependencies);
assertContains(ex, "Failed to load: Dependee (dependee 42)", "Failed to load: Dependency (dependency 5)");
} |
public static Version of(int major, int minor) {
if (major == UNKNOWN_VERSION && minor == UNKNOWN_VERSION) {
return UNKNOWN;
} else {
return new Version(major, minor);
}
} | @Test(expected = AssertionError.class)
@RequireAssertEnabled
public void construct_withNegativeMajor() {
Version.of(-1, 1);
} |
@Override
public OAuth2AccessTokenDO checkAccessToken(String accessToken) {
OAuth2AccessTokenDO accessTokenDO = getAccessToken(accessToken);
if (accessTokenDO == null) {
throw exception0(GlobalErrorCodeConstants.UNAUTHORIZED.getCode(), "访问令牌不存在");
}
if (DateUtils.isExpired(accessTokenDO.getExpiresTime())) {
throw exception0(GlobalErrorCodeConstants.UNAUTHORIZED.getCode(), "访问令牌已过期");
}
return accessTokenDO;
} | @Test
public void testCheckAccessToken_null() {
// 调研,并断言
assertServiceException(() -> oauth2TokenService.checkAccessToken(randomString()),
new ErrorCode(401, "访问令牌不存在"));
} |
public static void destroyAll() {
synchronized (globalLock) {
for (FrameworkModel frameworkModel : new ArrayList<>(allInstances)) {
frameworkModel.destroy();
}
}
} | @Test
void destroyAll() {
FrameworkModel frameworkModel = new FrameworkModel();
frameworkModel.defaultApplication();
frameworkModel.newApplication();
FrameworkModel.destroyAll();
Assertions.assertTrue(FrameworkModel.getAllInstances().isEmpty());
Assertions.assertTrue(frameworkModel.isDestroyed());
try {
frameworkModel.defaultApplication();
Assertions.fail("Cannot create new application after framework model destroyed");
} catch (Exception e) {
Assertions.assertEquals("FrameworkModel is destroyed", e.getMessage(), StringUtils.toString(e));
}
try {
frameworkModel.newApplication();
Assertions.fail("Cannot create new application after framework model destroyed");
} catch (Exception e) {
Assertions.assertEquals("FrameworkModel is destroyed", e.getMessage(), StringUtils.toString(e));
}
} |
public List<ScanFilterData> createScanFilterDataForBeaconParser(BeaconParser beaconParser, List<Identifier> identifiers) {
ArrayList<ScanFilterData> scanFilters = new ArrayList<ScanFilterData>();
long typeCode = beaconParser.getMatchingBeaconTypeCode();
int startOffset = beaconParser.getMatchingBeaconTypeCodeStartOffset();
int endOffset = beaconParser.getMatchingBeaconTypeCodeEndOffset();
byte[] typeCodeBytes = BeaconParser.longToByteArray(typeCode, endOffset-startOffset+1);
if (identifiers != null && identifiers.size() > 0 && identifiers.get(0) != null && beaconParser.getMatchingBeaconTypeCode() == 0x0215) {
// If type code 0215 ibeacon, we allow also adding identifiers to the filter
for (int manufacturer : beaconParser.getHardwareAssistManufacturers()) {
ScanFilterData sfd = new ScanFilterData();
sfd.manufacturer = manufacturer;
int length = 18;
if (identifiers.size() == 2) {
length = 20;
}
if (identifiers.size() == 3) {
length = 22;
}
sfd.filter = new byte[length];
sfd.filter[0] = typeCodeBytes[0];
sfd.filter[1] = typeCodeBytes[1];
byte[] idBytes = identifiers.get(0).toByteArray();
for (int i = 0; i < idBytes.length; i++) {
sfd.filter[i+2] = idBytes[i];
}
if (identifiers.size() > 1 && identifiers.get(1) != null) {
idBytes = identifiers.get(1).toByteArray();
for (int i = 0; i < idBytes.length; i++) {
sfd.filter[i+18] = idBytes[i];
}
}
if (identifiers.size() > 2 && identifiers.get(2) != null) {
idBytes = identifiers.get(2).toByteArray();
for (int i = 0; i < idBytes.length; i++) {
sfd.filter[i+20] = idBytes[i];
}
}
sfd.mask = new byte[length];
for (int i = 0 ; i < length; i++) {
sfd.mask[i] = (byte) 0xff;
}
sfd.serviceUuid = null;
sfd.serviceUuid128Bit = new byte[0];
scanFilters.add(sfd);
return scanFilters;
}
}
for (int manufacturer : beaconParser.getHardwareAssistManufacturers()) {
ScanFilterData sfd = new ScanFilterData();
Long serviceUuid = beaconParser.getServiceUuid();
// Note: the -2 here is because we want the filter and mask to start after the
// two-byte manufacturer code, and the beacon parser expression is based on offsets
// from the start of the two byte code
int length = endOffset + 1 - 2;
byte[] filter = new byte[0];
byte[] mask = new byte[0];
if (length > 0) {
filter = new byte[length];
mask = new byte[length];
for (int layoutIndex = 2; layoutIndex <= endOffset; layoutIndex++) {
int filterIndex = layoutIndex-2;
if (layoutIndex < startOffset) {
filter[filterIndex] = 0;
mask[filterIndex] = 0;
} else {
filter[filterIndex] = typeCodeBytes[layoutIndex-startOffset];
mask[filterIndex] = (byte) 0xff;
}
}
}
sfd.manufacturer = manufacturer;
sfd.filter = filter;
sfd.mask = mask;
sfd.serviceUuid = serviceUuid;
sfd.serviceUuid128Bit = beaconParser.getServiceUuid128Bit();
scanFilters.add(sfd);
}
return scanFilters;
} | @Test
public void testScanFilterWithIdentifiers() throws Exception {
org.robolectric.shadows.ShadowLog.stream = System.err;
BeaconParser parser = new BeaconParser().setBeaconLayout("m:2-3=0215,i:4-19,i:20-21,i:22-23,p:24-24");
parser.setHardwareAssistManufacturerCodes(new int[] {0x004c});
BeaconManager.setManifestCheckingDisabled(true); // no manifest available in robolectric
ArrayList<Identifier> identifiers = new ArrayList<Identifier>();
identifiers.add(Identifier.parse("2F234454-CF6D-4A0F-ADF2-F4911BA9FFA6"));
identifiers.add(Identifier.parse("0x0102"));
identifiers.add(Identifier.parse("0x0304"));
List<ScanFilterUtils.ScanFilterData> scanFilterDatas = new ScanFilterUtils().createScanFilterDataForBeaconParser(parser, identifiers);
assertEquals("scanFilters should be of correct size", 1, scanFilterDatas.size());
ScanFilterUtils.ScanFilterData sfd = scanFilterDatas.get(0);
assertEquals("manufacturer should be right", 0x004c, sfd.manufacturer);
assertEquals("mask length should be right", 22, sfd.mask.length);
assertArrayEquals("mask should be right", new byte[] {(byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff}, sfd.mask);
assertArrayEquals("filter should be right", new byte[] {(byte)0x02, (byte)0x15, (byte)0x2F, (byte)0x23, (byte)0x44, (byte)0x54, (byte)0xCF, (byte)0x6D, (byte)0x4A, (byte)0x0F, (byte)0xAD, (byte)0xF2, (byte)0xF4, (byte)0x91, (byte)0x1B, (byte)0xA9, (byte)0xFF, (byte)0xA6, (byte)0x01, (byte)0x02, (byte)0x03, (byte)0x04}, sfd.filter);
} |
static String buildRedirect(String redirectUri, Map<String, Object> params) {
String paramsString = params.entrySet()
.stream()
.map(e -> e.getKey() + "=" + urlEncode(String.valueOf(e.getValue())))
.collect(Collectors.joining("&"));
if (redirectUri.contains("?")) {
return redirectUri + "&" + paramsString;
}
return redirectUri + "?" + paramsString;
} | @Test
public void testBuildRedirect() {
String url = OAuth2AuthorizeController.buildRedirect("http://hsweb.me/callback", Collections.singletonMap("code", "1234"));
assertEquals(url,"http://hsweb.me/callback?code=1234");
} |
void decode(int streamId, ByteBuf in, Http2Headers headers, boolean validateHeaders) throws Http2Exception {
Http2HeadersSink sink = new Http2HeadersSink(
streamId, headers, maxHeaderListSize, validateHeaders);
// Check for dynamic table size updates, which must occur at the beginning:
// https://www.rfc-editor.org/rfc/rfc7541.html#section-4.2
decodeDynamicTableSizeUpdates(in);
decode(in, sink);
// Now that we've read all of our headers we can perform the validation steps. We must
// delay throwing until this point to prevent dynamic table corruption.
sink.finish();
} | @Test
public void testIncompleteHeaderFieldRepresentation() throws Http2Exception {
// Incomplete Literal Header Field with Incremental Indexing
byte[] input = {(byte) 0x40};
final ByteBuf in = Unpooled.wrappedBuffer(input);
try {
assertThrows(Http2Exception.class, new Executable() {
@Override
public void execute() throws Throwable {
hpackDecoder.decode(0, in, mockHeaders, true);
}
});
} finally {
in.release();
}
} |
public static String formatSql(final AstNode root) {
final StringBuilder builder = new StringBuilder();
new Formatter(builder).process(root, 0);
return StringUtils.stripEnd(builder.toString(), "\n");
} | @Test
public void shouldFormatTumblingWindowWithRetention() {
// Given:
final String statementString = "CREATE STREAM S AS SELECT ITEMID, COUNT(*) FROM ORDERS WINDOW TUMBLING (SIZE 7 DAYS, RETENTION 14 DAYS) GROUP BY ITEMID;";
final Statement statement = parseSingle(statementString);
final String result = SqlFormatter.formatSql(statement);
assertThat(result, is("CREATE STREAM S AS SELECT\n"
+ " ITEMID,\n"
+ " COUNT(*)\n"
+ "FROM ORDERS ORDERS\n"
+ "WINDOW TUMBLING ( SIZE 7 DAYS , RETENTION 14 DAYS ) \n"
+ "GROUP BY ITEMID\n"
+ "EMIT CHANGES"));
} |
public String callServer(String api, Map<String, String> params, Map<String, String> body, String curServer,
String method) throws NacosException {
long start = System.currentTimeMillis();
long end = 0;
String namespace = params.get(CommonParams.NAMESPACE_ID);
String group = params.get(CommonParams.GROUP_NAME);
String serviceName = params.get(CommonParams.SERVICE_NAME);
params.putAll(getSecurityHeaders(namespace, group, serviceName));
Header header = NamingHttpUtil.builderHeader();
String url;
if (curServer.startsWith(HTTPS_PREFIX) || curServer.startsWith(HTTP_PREFIX)) {
url = curServer + api;
} else {
if (!InternetAddressUtil.containsPort(curServer)) {
curServer = curServer + InternetAddressUtil.IP_PORT_SPLITER + serverPort;
}
url = NamingHttpClientManager.getInstance().getPrefix() + curServer + api;
}
try {
HttpRestResult<String> restResult = nacosRestTemplate.exchangeForm(url, header,
Query.newInstance().initParams(params), body, method, String.class);
end = System.currentTimeMillis();
MetricsMonitor.getNamingRequestMonitor(method, url, String.valueOf(restResult.getCode()))
.observe(end - start);
if (restResult.ok()) {
return restResult.getData();
}
if (HttpStatus.SC_NOT_MODIFIED == restResult.getCode()) {
return StringUtils.EMPTY;
}
throw new NacosException(restResult.getCode(), restResult.getMessage());
} catch (NacosException e) {
NAMING_LOGGER.error("[NA] failed to request", e);
throw e;
} catch (Exception e) {
NAMING_LOGGER.error("[NA] failed to request", e);
throw new NacosException(NacosException.SERVER_ERROR, e);
}
} | @Test
void testCallServerFail304() throws Exception {
//given
NacosRestTemplate nacosRestTemplate = mock(NacosRestTemplate.class);
when(nacosRestTemplate.exchangeForm(any(), any(), any(), any(), any(), any())).thenAnswer(invocationOnMock -> {
//return url
HttpRestResult<Object> res = new HttpRestResult<Object>();
res.setMessage("redirect");
res.setCode(304);
return res;
});
final Field nacosRestTemplateField = NamingHttpClientProxy.class.getDeclaredField("nacosRestTemplate");
nacosRestTemplateField.setAccessible(true);
nacosRestTemplateField.set(clientProxy, nacosRestTemplate);
String api = "/api";
Map<String, String> params = new HashMap<>();
Map<String, String> body = new HashMap<>();
String method = HttpMethod.GET;
String curServer = "127.0.0.1";
//when
String s = clientProxy.callServer(api, params, body, curServer, method);
//then
assertEquals("", s);
} |
public MethodBuilder stat(Integer stat) {
this.stat = stat;
return getThis();
} | @Test
void stat() {
MethodBuilder builder = MethodBuilder.newBuilder();
builder.stat(1);
Assertions.assertEquals(1, builder.build().getStat());
} |
@Override
public KeyVersion createKey(final String name, final byte[] material,
final Options options) throws IOException {
return doOp(new ProviderCallable<KeyVersion>() {
@Override
public KeyVersion call(KMSClientProvider provider) throws IOException {
return provider.createKey(name, material, options);
}
}, nextIdx(), false);
} | @Test
public void testClientRetriesWithRuntimeException() throws Exception {
Configuration conf = new Configuration();
conf.setInt(
CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 3);
KMSClientProvider p1 = mock(KMSClientProvider.class);
when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
.thenThrow(new RuntimeException("p1"));
KMSClientProvider p2 = mock(KMSClientProvider.class);
when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
.thenThrow(new IOException("p2"));
when(p1.getKMSUrl()).thenReturn("p1");
when(p2.getKMSUrl()).thenReturn("p2");
LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
new KMSClientProvider[] {p1, p2}, 0, conf);
try {
kp.createKey("test3", new Options(conf));
fail("Should fail since provider p1 threw RuntimeException");
} catch (Exception e) {
assertTrue(e instanceof RuntimeException);
}
verify(p1, Mockito.times(1)).createKey(Mockito.eq("test3"),
Mockito.any(Options.class));
verify(p2, Mockito.never()).createKey(Mockito.eq("test3"),
Mockito.any(Options.class));
} |
private void initialize() {
log.debug("Initializing share partition: {}-{}", groupId, topicIdPartition);
// Initialize the share partition by reading the state from the persister.
ReadShareGroupStateResult response;
try {
response = persister.readState(new ReadShareGroupStateParameters.Builder()
.setGroupTopicPartitionData(new GroupTopicPartitionData.Builder<PartitionIdLeaderEpochData>()
.setGroupId(this.groupId)
.setTopicsData(Collections.singletonList(new TopicData<>(topicIdPartition.topicId(),
Collections.singletonList(PartitionFactory.newPartitionIdLeaderEpochData(topicIdPartition.partition(), 0)))))
.build())
.build()
).get();
} catch (InterruptedException | ExecutionException e) {
log.error("Failed to initialize the share partition: {}-{}", groupId, topicIdPartition, e);
throw new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition), e);
}
if (response == null || response.topicsData() == null || response.topicsData().size() != 1) {
log.error("Failed to initialize the share partition: {}-{}. Invalid state found: {}.",
groupId, topicIdPartition, response);
throw new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition));
}
TopicData<PartitionAllData> state = response.topicsData().get(0);
if (state.topicId() != topicIdPartition.topicId() || state.partitions().size() != 1
|| state.partitions().get(0).partition() != topicIdPartition.partition()) {
log.error("Failed to initialize the share partition: {}-{}. Invalid topic partition response: {}.",
groupId, topicIdPartition, response);
throw new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition));
}
PartitionAllData partitionData = state.partitions().get(0);
// Set the state epoch and end offset from the persisted state.
startOffset = partitionData.startOffset() != -1 ? partitionData.startOffset() : 0;
stateEpoch = partitionData.stateEpoch();
List<PersisterStateBatch> stateBatches = partitionData.stateBatches();
for (PersisterStateBatch stateBatch : stateBatches) {
if (stateBatch.firstOffset() < startOffset) {
log.error("Invalid state batch found for the share partition: {}-{}. The base offset: {}"
+ " is less than the start offset: {}.", groupId, topicIdPartition,
stateBatch.firstOffset(), startOffset);
throw new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition));
}
InFlightBatch inFlightBatch = new InFlightBatch(EMPTY_MEMBER_ID, stateBatch.firstOffset(),
stateBatch.lastOffset(), RecordState.forId(stateBatch.deliveryState()), stateBatch.deliveryCount(), null);
cachedState.put(stateBatch.firstOffset(), inFlightBatch);
}
// Update the endOffset of the partition.
if (!cachedState.isEmpty()) {
// If the cachedState is not empty, findNextFetchOffset flag is set to true so that any AVAILABLE records
// in the cached state are not missed
findNextFetchOffset.set(true);
endOffset = cachedState.lastEntry().getValue().lastOffset();
// In case the persister read state RPC result contains no AVAILABLE records, we can update cached state
// and start/end offsets.
maybeUpdateCachedStateAndOffsets();
} else {
endOffset = partitionData.startOffset();
}
} | @Test
public void testInitialize() {
Persister persister = Mockito.mock(Persister.class);
ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class);
Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList(
new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList(
PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(),
Arrays.asList(
new PersisterStateBatch(5L, 10L, RecordState.AVAILABLE.id, (short) 2),
new PersisterStateBatch(11L, 15L, RecordState.ARCHIVED.id, (short) 3)))))));
Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult));
SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build();
assertFalse(sharePartition.cachedState().isEmpty());
assertEquals(5, sharePartition.startOffset());
assertEquals(15, sharePartition.endOffset());
assertEquals(3, sharePartition.stateEpoch());
assertEquals(5, sharePartition.nextFetchOffset());
assertEquals(2, sharePartition.cachedState().size());
assertNotNull(sharePartition.cachedState().get(5L));
assertNotNull(sharePartition.cachedState().get(11L));
assertEquals(10, sharePartition.cachedState().get(5L).lastOffset());
assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState());
assertEquals(2, sharePartition.cachedState().get(5L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(5L).offsetState());
assertEquals(15, sharePartition.cachedState().get(11L).lastOffset());
assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).batchState());
assertEquals(3, sharePartition.cachedState().get(11L).batchDeliveryCount());
assertNull(sharePartition.cachedState().get(11L).offsetState());
} |
public static StatementExecutorResponse execute(
final ConfiguredStatement<DropConnector> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final String connectorName = statement.getStatement().getConnectorName();
final boolean ifExists = statement.getStatement().getIfExists();
final ConnectResponse<String> response =
serviceContext.getConnectClient().delete(connectorName);
if (response.error().isPresent()) {
if (ifExists && response.httpCode() == HttpStatus.SC_NOT_FOUND) {
return StatementExecutorResponse.handled(Optional.of(
new WarningEntity(statement.getMaskedStatementText(),
"Connector '" + connectorName + "' does not exist.")));
} else {
final String errorMsg = "Failed to drop connector: " + response.error().get();
throw new KsqlRestException(EndpointResponse.create()
.status(response.httpCode())
.entity(new KsqlErrorMessage(Errors.toErrorCode(response.httpCode()), errorMsg))
.build()
);
}
}
return StatementExecutorResponse.handled(Optional.of(
new DropConnectorEntity(statement.getMaskedStatementText(), connectorName)));
} | @Test
public void shouldReturnOnSuccess() {
// Given:
when(connectClient.delete(anyString()))
.thenReturn(ConnectResponse.success("foo", HttpStatus.SC_OK));
// When:
final Optional<KsqlEntity> response = DropConnectorExecutor
.execute(DROP_CONNECTOR_CONFIGURED, mock(SessionProperties.class),null, serviceContext).getEntity();
// Then:
assertThat("expected response", response.isPresent());
assertThat(((DropConnectorEntity) response.get()).getConnectorName(), is("foo"));
} |
public synchronized Topology addSink(final String name,
final String topic,
final String... parentNames) {
internalTopologyBuilder.addSink(name, topic, null, null, null, parentNames);
return this;
} | @Test
public void shouldNotAllowNullTopicWhenAddingSink() {
assertThrows(NullPointerException.class, () -> topology.addSink("name", (String) null));
} |
@Override
public PollResult poll(long currentTimeMs) {
return pollInternal(
prepareFetchRequests(),
this::handleFetchSuccess,
this::handleFetchFailure
);
} | @Test
public void testUpdatePositionWithLastRecordMissingFromBatch() {
buildFetcher();
MemoryRecords records = MemoryRecords.withRecords(Compression.NONE,
new SimpleRecord("0".getBytes(), "v".getBytes()),
new SimpleRecord("1".getBytes(), "v".getBytes()),
new SimpleRecord("2".getBytes(), "v".getBytes()),
new SimpleRecord(null, "value".getBytes()));
// Remove the last record to simulate compaction
MemoryRecords.FilterResult result = records.filterTo(tp0, new MemoryRecords.RecordFilter(0, 0) {
@Override
protected BatchRetentionResult checkBatchRetention(RecordBatch batch) {
return new BatchRetentionResult(BatchRetention.DELETE_EMPTY, false);
}
@Override
protected boolean shouldRetainRecord(RecordBatch recordBatch, Record record) {
return record.key() != null;
}
}, ByteBuffer.allocate(1024), Integer.MAX_VALUE, BufferSupplier.NO_CACHING);
result.outputBuffer().flip();
MemoryRecords compactedRecords = MemoryRecords.readableRecords(result.outputBuffer());
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, sendFetches());
client.prepareResponse(fullFetchResponse(tidp0, compactedRecords, Errors.NONE, 100L, 0));
networkClientDelegate.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> allFetchedRecords = fetchRecords();
assertTrue(allFetchedRecords.containsKey(tp0));
List<ConsumerRecord<byte[], byte[]>> fetchedRecords = allFetchedRecords.get(tp0);
assertEquals(3, fetchedRecords.size());
for (int i = 0; i < 3; i++) {
assertEquals(Integer.toString(i), new String(fetchedRecords.get(i).key()));
}
// The next offset should point to the next batch
assertEquals(4L, subscriptions.position(tp0).offset);
} |
public Set<Device> getDevicesFromPath(String path) throws IOException {
MutableInt counter = new MutableInt(0);
try (Stream<Path> stream = Files.walk(Paths.get(path), 1)) {
return stream.filter(p -> p.toFile().getName().startsWith("veslot"))
.map(p -> toDevice(p, counter))
.collect(Collectors.toSet());
}
} | @Test
public void testDeviceStateNumberTooHigh() throws IOException {
createVeSlotFile(0);
createOsStateFile(5);
when(mockCommandExecutor.getOutput())
.thenReturn("8:1:character special file");
when(udevUtil.getSysPath(anyInt(), anyChar())).thenReturn(testFolder);
Set<Device> devices = discoverer.getDevicesFromPath(testFolder);
assertEquals("Number of devices", 1, devices.size());
Device device = devices.iterator().next();
assertEquals("Device ID", 0, device.getId());
assertEquals("Major number", 8, device.getMajorNumber());
assertEquals("Minor number", 1, device.getMinorNumber());
assertEquals("Status", "Unknown (5)", device.getStatus());
assertFalse("Device should not be healthy", device.isHealthy());
} |
@Override
public String getPonLinks(String target) {
DriverHandler handler = handler();
NetconfController controller = handler.get(NetconfController.class);
MastershipService mastershipService = handler.get(MastershipService.class);
DeviceId ncDeviceId = handler.data().deviceId();
checkNotNull(controller, "Netconf controller is null");
String reply = null;
if (!mastershipService.isLocalMaster(ncDeviceId)) {
log.warn("Not master for {} Use {} to execute command",
ncDeviceId,
mastershipService.getMasterFor(ncDeviceId));
return null;
}
try {
StringBuilder request = new StringBuilder();
request.append(VOLT_NE_OPEN + VOLT_NE_NAMESPACE);
request.append(ANGLE_RIGHT + NEW_LINE);
request.append(buildStartTag(VOLT_PORTS));
if (target != null) {
int pon;
try {
pon = Integer.parseInt(target);
if (pon <= ZERO) {
log.error("Invalid integer for ponlink-id:{}", target);
return null;
}
} catch (NumberFormatException e) {
log.error("Non-number input for ponlink-id:{}", target);
return null;
}
request.append(buildStartTag(GPON_PONLINK_PORTS))
.append(buildStartTag(GPON_PONLINK_PORT))
.append(buildStartTag(PONLINK_ID, false))
.append(target)
.append(buildEndTag(PONLINK_ID))
.append(buildEndTag(GPON_PONLINK_PORT))
.append(buildEndTag(GPON_PONLINK_PORTS));
} else {
request.append(buildEmptyTag(GPON_PONLINK_PORTS));
}
request.append(buildEndTag(VOLT_PORTS));
request.append(VOLT_NE_CLOSE);
reply = controller
.getDevicesMap()
.get(ncDeviceId)
.getSession()
.get(request.toString(), REPORT_ALL);
} catch (NetconfException e) {
log.error("Cannot communicate to device {} exception {}", ncDeviceId, e);
}
return reply;
} | @Test
public void testValidGetPonLinks() throws Exception {
String reply;
String target;
for (int i = ZERO; i < VALID_GET_TCS.length; i++) {
target = VALID_GET_TCS[i];
currentKey = i;
reply = voltConfig.getPonLinks(target);
assertNotNull("Incorrect response for VALID_GET_TCS", reply);
}
} |
@Override
public InstancePublishInfo getInstancePublishInfo(Service service) {
return publishers.get(service);
} | @Test
void getInstancePublishInfo() {
addServiceInstance();
InstancePublishInfo publishInfo = abstractClient.getInstancePublishInfo(service);
assertNotNull(publishInfo);
} |
@VisibleForTesting
void validateUsernameUnique(Long id, String username) {
if (StrUtil.isBlank(username)) {
return;
}
AdminUserDO user = userMapper.selectByUsername(username);
if (user == null) {
return;
}
// 如果 id 为空,说明不用比较是否为相同 id 的用户
if (id == null) {
throw exception(USER_USERNAME_EXISTS);
}
if (!user.getId().equals(id)) {
throw exception(USER_USERNAME_EXISTS);
}
} | @Test
public void testValidateUsernameUnique_usernameExistsForCreate() {
// 准备参数
String username = randomString();
// mock 数据
userMapper.insert(randomAdminUserDO(o -> o.setUsername(username)));
// 调用,校验异常
assertServiceException(() -> userService.validateUsernameUnique(null, username),
USER_USERNAME_EXISTS);
} |
@CheckForNull
static String checkEventCategory(@Nullable String category) {
if (category == null) {
return null;
}
checkArgument(category.length() <= MAX_CATEGORY_LENGTH, "Event category length (%s) is longer than the maximum authorized (%s). '%s' was provided.",
category.length(), MAX_CATEGORY_LENGTH, category);
return category;
} | @Test
void fail_if_category_longer_than_50() {
assertThatThrownBy(() -> EventValidator.checkEventCategory(repeat("a", 50 + 1)))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Event category length (51) is longer than the maximum authorized (50). " +
"'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' was provided.");
} |
@Override
public void execute(Context context) {
executeForBranch(treeRootHolder.getRoot());
} | @Test
public void givenUpgradeEventWithDifferentSqVersion_whenStepIsExecuted_thenANewUpgradeEventIsCreated() {
when(sonarQubeVersion.get()).thenReturn(Version.parse("10.3"));
when(dbClient.eventDao()).thenReturn(mock());
when(dbClient.eventDao().selectSqUpgradesByMostRecentFirst(any(), any())).thenReturn(getUpgradeEvents("10.2", "10.1"));
underTest.execute(new TestComputationStepContext());
verify(eventRepository, times(1)).add(eventArgumentCaptor.capture());
verifyNoMoreInteractions(eventRepository);
assertThat(eventArgumentCaptor.getAllValues())
.extracting(Event::getCategory, Event::getName)
.containsExactly(tuple(Event.Category.SQ_UPGRADE, "10.3"));
} |
@Override
public int run(InputStream in, PrintStream out, PrintStream err, List<String> args) throws Exception {
if (args.isEmpty()) {
printHelp(out);
return 0;
}
OutputStream output = out;
if (args.size() > 1) {
output = Util.fileOrStdout(args.get(args.size() - 1), out);
args = args.subList(0, args.size() - 1);
}
DataFileWriter<GenericRecord> writer = new DataFileWriter<>(new GenericDatumWriter<>());
Schema schema = null;
Map<String, byte[]> metadata = new TreeMap<>();
String inputCodec = null;
for (String inFile : expandsInputFiles(args)) {
InputStream input = Util.fileOrStdin(inFile, in);
DataFileStream<GenericRecord> reader = new DataFileStream<>(input, new GenericDatumReader<>());
if (schema == null) {
// this is the first file - set up the writer, and store the
// Schema & metadata we'll use.
schema = reader.getSchema();
for (String key : reader.getMetaKeys()) {
if (!DataFileWriter.isReservedMeta(key)) {
byte[] metadatum = reader.getMeta(key);
metadata.put(key, metadatum);
writer.setMeta(key, metadatum);
}
}
inputCodec = reader.getMetaString(DataFileConstants.CODEC);
if (inputCodec == null) {
inputCodec = DataFileConstants.NULL_CODEC;
}
writer.setCodec(CodecFactory.fromString(inputCodec));
writer.create(schema, output);
} else {
// check that we're appending to the same schema & metadata.
if (!schema.equals(reader.getSchema())) {
err.println("input files have different schemas");
reader.close();
return 1;
}
for (String key : reader.getMetaKeys()) {
if (!DataFileWriter.isReservedMeta(key)) {
byte[] metadatum = reader.getMeta(key);
byte[] writersMetadatum = metadata.get(key);
if (!Arrays.equals(metadatum, writersMetadatum)) {
err.println("input files have different non-reserved metadata");
reader.close();
return 2;
}
}
}
String thisCodec = reader.getMetaString(DataFileConstants.CODEC);
if (thisCodec == null) {
thisCodec = DataFileConstants.NULL_CODEC;
}
if (!inputCodec.equals(thisCodec)) {
err.println("input files have different codecs");
reader.close();
return 3;
}
}
writer.appendAllFrom(reader, /* recompress */ false);
reader.close();
}
writer.close();
return 0;
} | @Test
void globPatternConcat() throws Exception {
Map<String, String> metadata = new HashMap<>();
for (int i = 0; i < 3; i++) {
generateData(name.getMethodName() + "-" + i + ".avro", Type.STRING, metadata, DEFLATE);
}
File output = new File(OUTPUT_DIR, name.getMethodName() + ".avro");
List<String> args = asList(new File(INPUT_DIR, "/*").getAbsolutePath(), output.getAbsolutePath());
int returnCode = new ConcatTool().run(System.in, System.out, System.err, args);
assertEquals(0, returnCode);
assertEquals(ROWS_IN_INPUT_FILES * 3, numRowsInFile(output));
} |
@Override public void accept(K key, V value) {
subject.onNext(Map.entry(key, value));
} | @Test
public void singleKey_mostRecent() {
var timeInWriteBehind = new AtomicReference<ZonedDateTime>();
var numberOfEntries = new AtomicInteger();
// Given this cache...
var writer = new WriteBehindCacheWriter.Builder<Long, ZonedDateTime>()
.coalesce(BinaryOperator.maxBy(ZonedDateTime::compareTo))
.bufferTime(Duration.ofSeconds(1))
.writeAction(entries -> {
// We might get here before the cache has been written to,
// so just wait for the next time we are called
if (entries.isEmpty()) {
return;
}
var zonedDateTime = entries.values().iterator().next();
timeInWriteBehind.set(zonedDateTime);
numberOfEntries.set(entries.size());
}).build();
Cache<Long, ZonedDateTime> cache = Caffeine.newBuilder().build();
// When these cache updates happen...
var latest = ZonedDateTime.now().truncatedTo(DAYS);
for (int i = 0; i < 4; i++) {
latest = latest.plusNanos(200);
var value = latest;
cache.asMap().compute(1L, (key, oldValue) -> {
writer.accept(key, value);
return value;
});
}
// Then the write behind action gets 1 entry to write with the most recent time
await().untilAtomic(numberOfEntries, is(1));
await().untilAtomic(timeInWriteBehind, is(latest));
} |
@Override
public void destroy() {
servletConfig = null;
} | @Test
public void testDestroy() {
new ReportServlet().destroy();
} |
@Override
public void removeProxySelector(final DiscoveryHandlerDTO discoveryHandlerDTO, final ProxySelectorDTO proxySelectorDTO) {
DataChangedEvent dataChangedEvent = new DataChangedEvent(ConfigGroupEnum.PROXY_SELECTOR, DataEventTypeEnum.DELETE,
Collections.singletonList(DiscoveryTransfer.INSTANCE.mapToData(proxySelectorDTO)));
eventPublisher.publishEvent(dataChangedEvent);
} | @Test
public void testRemoveProxySelector() {
doNothing().when(eventPublisher).publishEvent(any(DataChangedEvent.class));
localDiscoveryProcessor.removeProxySelector(new DiscoveryHandlerDTO(), new ProxySelectorDTO());
verify(eventPublisher).publishEvent(any(DataChangedEvent.class));
} |
public static URI toURI(URL url) throws UtilException {
return toURI(url, false);
} | @Test
public void issue3676Test() {
String fileFullName = "/Uploads/20240601/aaaa.txt";
final URI uri = URLUtil.toURI(fileFullName);
final URI resolve = uri.resolve(".");
assertEquals("/Uploads/20240601/", resolve.toString());
} |
@Override
public Iterator<QueryableEntry> iterator() {
return new It();
} | @Test(expected = UnsupportedOperationException.class)
public void testIterator_empty_remove() {
result.iterator().remove();
} |
@Override
public boolean isTraceEnabled() {
return logger.isTraceEnabled();
} | @Test
public void testIsTraceEnabled() {
Logger mockLogger = mock(Logger.class);
when(mockLogger.getName()).thenReturn("foo");
when(mockLogger.isTraceEnabled()).thenReturn(true);
InternalLogger logger = new Slf4JLogger(mockLogger);
assertTrue(logger.isTraceEnabled());
verify(mockLogger).getName();
verify(mockLogger).isTraceEnabled();
} |
public static String toString(int flags) {
if (flags == TMNOFLAGS) {
return "TMNOFLAGS";
}
StringBuilder result = new StringBuilder();
if (hasFlag(flags, TMENDRSCAN)) {
add(result, "TMENDRSCAN");
}
if (hasFlag(flags, TMFAIL)) {
add(result, "TMFAIL");
}
if (hasFlag(flags, TMJOIN)) {
add(result, "TMJOIN");
}
if (hasFlag(flags, TMONEPHASE)) {
add(result, "TMONEPHASE");
}
if (hasFlag(flags, TMRESUME)) {
add(result, "TMRESUME");
}
if (hasFlag(flags, TMSTARTRSCAN)) {
add(result, "TMSTARTRSCAN");
}
if (hasFlag(flags, TMSUCCESS)) {
add(result, "TMSUCCESS");
}
if (hasFlag(flags, TMSUSPEND)) {
add(result, "TMSUSPEND");
}
int nonStandardFlags = flags
& ~TMENDRSCAN
& ~TMFAIL
& ~TMJOIN
& ~TMONEPHASE
& ~TMRESUME
& ~TMSTARTRSCAN
& ~TMSUCCESS
& ~TMSUSPEND;
if (nonStandardFlags != 0) {
add(result, String.format("0x%08x", nonStandardFlags));
}
return result.toString();
} | @Test
public void test() throws Exception {
assertThat(XASupport.toString(flags), is(expectedResult));
} |
public static TransferAction forName(final String name) {
return registry.get(name);
} | @Test
public void testForName() {
assertEquals(TransferAction.overwrite.hashCode(),
TransferAction.forName(TransferAction.overwrite.name()).hashCode());
} |
@Override
public ObjectNode encode(Criterion criterion, CodecContext context) {
EncodeCriterionCodecHelper encoder = new EncodeCriterionCodecHelper(criterion, context);
return encoder.encode();
} | @Test
public void matchUdpSrcTest() {
Criterion criterion = Criteria.matchUdpSrc(tpPort);
ObjectNode result = criterionCodec.encode(criterion, context);
assertThat(result, matchesCriterion(criterion));
} |
@Override
public void run() {
while (!schedulerState.isShuttingDown()) {
if (!schedulerState.isPaused()) {
try {
toRun.run();
} catch (Throwable e) {
LOG.error("Unhandled exception. Will keep running.", e);
schedulerListeners.onSchedulerEvent(SchedulerEventType.UNEXPECTED_ERROR);
}
}
try {
waitBetweenRuns.doWait();
} catch (InterruptedException interruptedException) {
if (schedulerState.isShuttingDown()) {
LOG.debug("Thread '{}' interrupted due to shutdown.", Thread.currentThread().getName());
} else {
LOG.error("Unexpected interruption of thread. Will keep running.", interruptedException);
schedulerListeners.onSchedulerEvent(SchedulerEventType.UNEXPECTED_ERROR);
}
}
}
} | @Test
public void should_wait_on_runtime_exception() {
Assertions.assertTimeoutPreemptively(
Duration.ofSeconds(1),
() -> {
runnable.setAction(
() -> {
throw new RuntimeException();
});
runUntilShutdown.run();
assertThat(countingWaiter.counter, is(2));
});
} |
public WithJsonPath(JsonPath jsonPath, Matcher<T> resultMatcher) {
this.jsonPath = jsonPath;
this.resultMatcher = resultMatcher;
} | @Test
public void shouldMatchJsonPathEvaluatedToDoubleValue() {
assertThat(BOOKS_JSON, withJsonPath(compile("$.store.bicycle.price"), equalTo(19.95)));
assertThat(BOOKS_JSON, withJsonPath("$.store.bicycle.price", equalTo(19.95)));
} |
@PostMapping("/account_status")
@Operation(summary = "Get the data of aan an account")
public DAccountDataResult getAccountData(@RequestBody DAccountRequest request) {
AppSession appSession = validate(request);
long accountId = appSession.getAccountId();
AccountDataResult result = accountService.getAccountData(accountId);
return DAccountDataResult.copyFrom(result);
} | @Test
public void validRequest() {
DAccountRequest request = new DAccountRequest();
request.setAppSessionId("id");
AccountDataResult result = new AccountDataResult();
result.setStatus(Status.OK);
result.setError("error");
result.setEmailStatus(EmailStatus.NOT_VERIFIED);
result.setClassifiedDeceased(true);
result.setSetting2Factor(true);
result.setUnreadNotifications(1);
result.setCurrentEmailAddress("email");
when(accountService.getAccountData(eq(1L))).thenReturn(result);
DAccountDataResult accountData = accountDataController.getAccountData(request);
assertEquals(Status.OK, accountData.getStatus());
assertEquals("error", accountData.getError());
assertEquals(EmailStatus.NOT_VERIFIED, accountData.getEmailStatus());
assertEquals(true, accountData.getClassifiedDeceased());
assertEquals(1, accountData.getUnreadNotifications());
assertEquals(true, accountData.getSetting2Factor());
assertEquals("email", accountData.getCurrentEmailAddress());
} |
public ReadOperation getReadOperation() {
if (operations == null || operations.isEmpty()) {
throw new IllegalStateException("Map task has no operation.");
}
Operation readOperation = operations.get(0);
if (!(readOperation instanceof ReadOperation)) {
throw new IllegalStateException("First operation in the map task is not a ReadOperation.");
}
return (ReadOperation) readOperation;
} | @Test
public void testNoOperation() throws Exception {
// Test MapTaskExecutor without a single operation.
ExecutionStateTracker stateTracker = ExecutionStateTracker.newForTest();
try (MapTaskExecutor executor =
new MapTaskExecutor(new ArrayList<Operation>(), counterSet, stateTracker)) {
thrown.expect(IllegalStateException.class);
thrown.expectMessage("has no operation");
executor.getReadOperation();
}
} |
protected void maybeCloseFetchSessions(final Timer timer) {
final List<RequestFuture<ClientResponse>> requestFutures = sendFetchesInternal(
prepareCloseFetchSessionRequests(),
this::handleCloseFetchSessionSuccess,
this::handleCloseFetchSessionFailure
);
// Poll to ensure that request has been written to the socket. Wait until either the timer has expired or until
// all requests have received a response.
while (timer.notExpired() && !requestFutures.stream().allMatch(RequestFuture::isDone)) {
client.poll(timer, null, true);
timer.update();
}
if (!requestFutures.stream().allMatch(RequestFuture::isDone)) {
// we ran out of time before completing all futures. It is ok since we don't want to block the shutdown
// here.
log.debug("All requests couldn't be sent in the specific timeout period {}ms. " +
"This may result in unnecessary fetch sessions at the broker. Consider increasing the timeout passed for " +
"KafkaConsumer.close(Duration timeout)", timer.timeoutMs());
}
} | @Test
public void testCloseShouldBeIdempotent() {
buildFetcher();
fetcher.close();
fetcher.close();
fetcher.close();
verify(fetcher, times(1)).maybeCloseFetchSessions(any(Timer.class));
} |
@Override
public Object createWebSocket(final JettyServerUpgradeRequest request, final JettyServerUpgradeResponse response) {
try {
Optional<WebSocketAuthenticator<T>> authenticator = Optional.ofNullable(environment.getAuthenticator());
final ReusableAuth<T> authenticated;
if (authenticator.isPresent()) {
authenticated = authenticator.get().authenticate(request);
if (authenticated.invalidCredentialsProvided()) {
response.sendForbidden("Unauthorized");
return null;
}
} else {
authenticated = ReusableAuth.anonymous();
}
return new WebSocketResourceProvider<>(getRemoteAddress(request),
remoteAddressPropertyName,
this.jerseyApplicationHandler,
this.environment.getRequestLog(),
authenticated,
this.environment.getMessageFactory(),
ofNullable(this.environment.getConnectListener()),
this.environment.getIdleTimeout());
} catch (AuthenticationException | IOException e) {
logger.warn("Authentication failure", e);
try {
response.sendError(500, "Failure");
} catch (IOException ignored) {
}
return null;
}
} | @Test
void testErrorAuthorization() throws AuthenticationException, IOException {
when(environment.getAuthenticator()).thenReturn(authenticator);
when(authenticator.authenticate(eq(request))).thenThrow(new AuthenticationException("database failure"));
when(environment.jersey()).thenReturn(jerseyEnvironment);
WebSocketResourceProviderFactory<Account> factory = new WebSocketResourceProviderFactory<>(environment,
Account.class,
mock(WebSocketConfiguration.class),
REMOTE_ADDRESS_PROPERTY_NAME);
Object connection = factory.createWebSocket(request, response);
assertNull(connection);
verify(response).sendError(eq(500), eq("Failure"));
verify(authenticator).authenticate(eq(request));
} |
@Operation(summary = "queryResourceListPaging", description = "QUERY_RESOURCE_LIST_PAGING_NOTES")
@Parameters({
@Parameter(name = "type", description = "RESOURCE_TYPE", required = true, schema = @Schema(implementation = ResourceType.class)),
@Parameter(name = "fullName", description = "RESOURCE_FULLNAME", required = true, schema = @Schema(implementation = String.class, example = "bucket_name/tenant_name/type/ds")),
@Parameter(name = "searchVal", description = "SEARCH_VAL", schema = @Schema(implementation = String.class)),
@Parameter(name = "pageNo", description = "PAGE_NO", required = true, schema = @Schema(implementation = int.class, example = "1")),
@Parameter(name = "pageSize", description = "PAGE_SIZE", required = true, schema = @Schema(implementation = int.class, example = "20"))
})
@GetMapping()
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_RESOURCES_LIST_PAGING)
public Result<PageInfo<StorageEntity>> queryResourceListPaging(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "fullName") String fullName,
@RequestParam(value = "tenantCode") String tenantCode,
@RequestParam(value = "type") ResourceType type,
@RequestParam("pageNo") Integer pageNo,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageSize") Integer pageSize) {
checkPageParams(pageNo, pageSize);
searchVal = ParameterUtils.handleEscapes(searchVal);
return resourceService.queryResourceListPaging(loginUser, fullName, tenantCode, type, searchVal, pageNo,
pageSize);
} | @Test
public void testQueryResourceListPaging() throws Exception {
Result mockResult = new Result<>();
mockResult.setCode(Status.SUCCESS.getCode());
Mockito.when(resourcesService.queryResourceListPaging(
Mockito.any(), Mockito.anyString(), Mockito.anyString(), Mockito.any(),
Mockito.anyString(), Mockito.anyInt(), Mockito.anyInt()))
.thenReturn(mockResult);
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("type", String.valueOf(ResourceType.FILE));
paramsMap.add("id", "123");
paramsMap.add("pageNo", "1");
paramsMap.add("searchVal", "test");
paramsMap.add("pageSize", "1");
paramsMap.add("fullName", "dolphinscheduler/resourcePath");
paramsMap.add("tenantCode", "123");
MvcResult mvcResult = mockMvc.perform(get("/resources")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assertions.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
} |
long addNewRecording(
final long startPosition,
final long stopPosition,
final long startTimestamp,
final long stopTimestamp,
final int imageInitialTermId,
final int segmentFileLength,
final int termBufferLength,
final int mtuLength,
final int sessionId,
final int streamId,
final String strippedChannel,
final String originalChannel,
final String sourceIdentity)
{
final int frameLength = recordingDescriptorFrameLength(strippedChannel, originalChannel, sourceIdentity);
final int recordingDescriptorOffset = nextRecordingDescriptorOffset;
if (recordingDescriptorOffset + frameLength > capacity)
{
growCatalog(MAX_CATALOG_LENGTH, frameLength);
}
final long recordingId = nextRecordingId;
catalogBuffer.wrap(catalogByteBuffer, recordingDescriptorOffset, frameLength);
descriptorEncoder
.wrap(catalogBuffer, DESCRIPTOR_HEADER_LENGTH)
.recordingId(recordingId)
.startTimestamp(startTimestamp)
.stopTimestamp(stopTimestamp)
.startPosition(startPosition)
.stopPosition(stopPosition)
.initialTermId(imageInitialTermId)
.segmentFileLength(segmentFileLength)
.termBufferLength(termBufferLength)
.mtuLength(mtuLength)
.sessionId(sessionId)
.streamId(streamId)
.strippedChannel(strippedChannel)
.originalChannel(originalChannel)
.sourceIdentity(sourceIdentity);
final int recordingLength = frameLength - DESCRIPTOR_HEADER_LENGTH;
descriptorHeaderEncoder
.wrap(catalogBuffer, 0)
.length(recordingLength)
.checksum(computeRecordingDescriptorChecksum(recordingDescriptorOffset, recordingLength))
.state(VALID);
catalogHeaderEncoder.nextRecordingId(recordingId + 1);
forceWrites(catalogChannel);
nextRecordingId = recordingId + 1;
nextRecordingDescriptorOffset = recordingDescriptorOffset + frameLength;
catalogIndex.add(recordingId, recordingDescriptorOffset);
return recordingId;
} | @Test
void shouldAppendToExistingIndex()
{
final long newRecordingId;
try (Catalog catalog = new Catalog(archiveDir, null, 0, CAPACITY, () -> 3L, null, segmentFileBuffer))
{
newRecordingId = catalog.addNewRecording(
32,
128,
21,
42,
5,
SEGMENT_LENGTH,
TERM_LENGTH,
MTU_LENGTH,
9,
4,
"channelJ",
"channelJ?tag=f",
"sourceN");
}
try (Catalog catalog = new Catalog(archiveDir, clock))
{
verifyRecordingForId(
catalog,
recordingOneId,
160,
0L,
0L, // updated from NULL_POSITION when Catalog was created for write
0L,
3L, // updated from NULL_TIMESTAMP when Catalog was created for write
0,
SEGMENT_LENGTH,
TERM_LENGTH,
MTU_LENGTH,
6,
1,
"channelG",
"channelG?tag=f",
"sourceA");
verifyRecordingForId(
catalog,
newRecordingId,
160,
32,
128,
21,
42,
5,
SEGMENT_LENGTH,
TERM_LENGTH,
MTU_LENGTH,
9,
4,
"channelJ",
"channelJ?tag=f",
"sourceN");
}
} |
@Override
public CoordinatorRecord deserialize(
ByteBuffer keyBuffer,
ByteBuffer valueBuffer
) throws RuntimeException {
final short recordType = readVersion(keyBuffer, "key");
final ApiMessage keyMessage = apiMessageKeyFor(recordType);
readMessage(keyMessage, keyBuffer, recordType, "key");
if (valueBuffer == null) {
return new CoordinatorRecord(new ApiMessageAndVersion(keyMessage, recordType), null);
}
final ApiMessage valueMessage = apiMessageValueFor(recordType);
final short valueVersion = readVersion(valueBuffer, "value");
readMessage(valueMessage, valueBuffer, valueVersion, "value");
return new CoordinatorRecord(
new ApiMessageAndVersion(keyMessage, recordType),
new ApiMessageAndVersion(valueMessage, valueVersion)
);
} | @Test
public void testDeserializeWithInvalidValueBytes() {
GroupCoordinatorRecordSerde serde = new GroupCoordinatorRecordSerde();
ApiMessageAndVersion key = new ApiMessageAndVersion(
new ConsumerGroupMetadataKey().setGroupId("foo"),
(short) 3
);
ByteBuffer keyBuffer = MessageUtil.toVersionPrefixedByteBuffer(key.version(), key.message());
ByteBuffer valueBuffer = ByteBuffer.allocate(2);
valueBuffer.putShort((short) 0);
valueBuffer.rewind();
RuntimeException ex =
assertThrows(RuntimeException.class,
() -> serde.deserialize(keyBuffer, valueBuffer));
assertTrue(ex.getMessage().startsWith("Could not read record with version 0 from value's buffer due to"),
ex.getMessage());
} |
@Override
public CloseableIterator<String> readLines(Component file) {
requireNonNull(file, "Component should not be null");
checkArgument(file.getType() == FILE, "Component '%s' is not a file", file);
Optional<CloseableIterator<String>> linesIteratorOptional = reportReader.readFileSource(file.getReportAttributes().getRef());
checkState(linesIteratorOptional.isPresent(), "File '%s' has no source code", file);
CloseableIterator<String> lineIterator = linesIteratorOptional.get();
return new ComponentLinesCloseableIterator(file, lineIterator, file.getFileAttributes().getLines());
} | @Test
public void read_lines_throws_ISE_when_sourceLines_has_more_elements_then_lineCount() {
reportReader.putFileSourceLines(FILE_REF, "line1", "line2", "line3");
assertThatThrownBy(() -> consume(underTest.readLines(createComponent(2))))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Source of file 'ReportComponent{ref=2, key='FILE_KEY', type=FILE}' has at least one more line than the expected number (2)");
} |
public FloatArrayAsIterable usingTolerance(double tolerance) {
return new FloatArrayAsIterable(tolerance(tolerance), iterableSubject());
} | @Test
public void usingTolerance_contains_success() {
assertThat(array(1.1f, TOLERABLE_2POINT2, 3.2f))
.usingTolerance(DEFAULT_TOLERANCE)
.contains(2.2f);
} |
@Override
protected String getRootKey() {
return Constants.HEADER_OSS + mBucketName;
} | @Test
public void testGetRootKey() {
Assert.assertEquals(Constants.HEADER_OSS + BUCKET_NAME, mOSSUnderFileSystem.getRootKey());
} |
public FEELFnResult<Boolean> invoke(@ParameterName("negand") Object negand) {
if ( negand != null && !(negand instanceof Boolean) ) {
return FEELFnResult.ofError( new InvalidParametersEvent( Severity.ERROR, "negand", "must be a boolean value" ) );
}
return FEELFnResult.ofResult( negand == null ? null : !((Boolean) negand) );
} | @Test
void invokeFalse() {
FunctionTestUtil.assertResult(notFunction.invoke(false), true);
} |
@Override
public UserAccount updateDb(final UserAccount userAccount) {
Document id = new Document(USER_ID, userAccount.getUserId());
Document dataSet = new Document(USER_NAME, userAccount.getUserName())
.append(ADD_INFO, userAccount.getAdditionalInfo());
db.getCollection(CachingConstants.USER_ACCOUNT)
.updateOne(id, new Document("$set", dataSet));
return userAccount;
} | @Test
void updateDb() {
MongoCollection<Document> mongoCollection = mock(MongoCollection.class);
when(db.getCollection(CachingConstants.USER_ACCOUNT)).thenReturn(mongoCollection);
assertDoesNotThrow(()-> {mongoDb.updateDb(userAccount);});
} |
public static String getBirthByIdCard(String idcard) {
return getBirth(idcard);
} | @Test
public void getBirthByIdCardTest() {
String birth = IdcardUtil.getBirthByIdCard(ID_18);
assertEquals(birth, "19781216");
String birth2 = IdcardUtil.getBirthByIdCard(ID_15);
assertEquals(birth2, "19880730");
} |
@Override
public ChannelFuture writeAndFlush(Object msg) {
CompletableFuture<Void> processFuture = new CompletableFuture<>();
try {
if (msg instanceof RemotingCommand) {
ProxyContext context = ProxyContext.createForInner(this.getClass())
.setRemoteAddress(remoteAddress)
.setLocalAddress(localAddress);
RemotingCommand command = (RemotingCommand) msg;
if (command.getExtFields() == null) {
command.setExtFields(new HashMap<>());
}
switch (command.getCode()) {
case RequestCode.CHECK_TRANSACTION_STATE: {
CheckTransactionStateRequestHeader header = (CheckTransactionStateRequestHeader) command.readCustomHeader();
MessageExt messageExt = MessageDecoder.decode(ByteBuffer.wrap(command.getBody()), true, false, false);
RelayData<TransactionData, Void> relayData = this.proxyRelayService.processCheckTransactionState(context, command, header, messageExt);
processFuture = this.processCheckTransaction(header, messageExt, relayData.getProcessResult(), relayData.getRelayFuture());
break;
}
case RequestCode.GET_CONSUMER_RUNNING_INFO: {
GetConsumerRunningInfoRequestHeader header = (GetConsumerRunningInfoRequestHeader) command.readCustomHeader();
CompletableFuture<ProxyRelayResult<ConsumerRunningInfo>> relayFuture = this.proxyRelayService.processGetConsumerRunningInfo(context, command, header);
processFuture = this.processGetConsumerRunningInfo(command, header, relayFuture);
break;
}
case RequestCode.CONSUME_MESSAGE_DIRECTLY: {
ConsumeMessageDirectlyResultRequestHeader header = (ConsumeMessageDirectlyResultRequestHeader) command.readCustomHeader();
MessageExt messageExt = MessageDecoder.decode(ByteBuffer.wrap(command.getBody()), true, false, false);
processFuture = this.processConsumeMessageDirectly(command, header, messageExt,
this.proxyRelayService.processConsumeMessageDirectly(context, command, header));
break;
}
default:
break;
}
} else {
processFuture = processOtherMessage(msg);
}
} catch (Throwable t) {
log.error("process failed. msg:{}", msg, t);
processFuture.completeExceptionally(t);
}
DefaultChannelPromise promise = new DefaultChannelPromise(this, GlobalEventExecutor.INSTANCE);
processFuture.thenAccept(ignore -> promise.setSuccess())
.exceptionally(t -> {
promise.setFailure(t);
return null;
});
return promise;
} | @Test
public void testWriteAndFlush() throws Exception {
when(this.proxyRelayService.processCheckTransactionState(any(), any(), any(), any()))
.thenReturn(new RelayData<>(mock(TransactionData.class), new CompletableFuture<>()));
ArgumentCaptor<ConsumeMessageDirectlyResultRequestHeader> consumeMessageDirectlyArgumentCaptor =
ArgumentCaptor.forClass(ConsumeMessageDirectlyResultRequestHeader.class);
when(this.proxyRelayService.processConsumeMessageDirectly(any(), any(), consumeMessageDirectlyArgumentCaptor.capture()))
.thenReturn(new CompletableFuture<>());
ArgumentCaptor<GetConsumerRunningInfoRequestHeader> getConsumerRunningInfoArgumentCaptor =
ArgumentCaptor.forClass(GetConsumerRunningInfoRequestHeader.class);
when(this.proxyRelayService.processGetConsumerRunningInfo(any(), any(), getConsumerRunningInfoArgumentCaptor.capture()))
.thenReturn(new CompletableFuture<>());
CheckTransactionStateRequestHeader checkTransactionStateRequestHeader = new CheckTransactionStateRequestHeader();
checkTransactionStateRequestHeader.setTransactionId(MessageClientIDSetter.createUniqID());
RemotingCommand checkTransactionRequest = RemotingCommand.createRequestCommand(RequestCode.CHECK_TRANSACTION_STATE, checkTransactionStateRequestHeader);
MessageExt transactionMessageExt = new MessageExt();
transactionMessageExt.setTopic("topic");
transactionMessageExt.setTags("tags");
transactionMessageExt.setBornHost(NetworkUtil.string2SocketAddress("127.0.0.2:8888"));
transactionMessageExt.setStoreHost(NetworkUtil.string2SocketAddress("127.0.0.1:10911"));
transactionMessageExt.setBody(UUID.randomUUID().toString().getBytes(StandardCharsets.UTF_8));
transactionMessageExt.setMsgId(MessageClientIDSetter.createUniqID());
checkTransactionRequest.setBody(MessageDecoder.encode(transactionMessageExt, false));
GetConsumerRunningInfoRequestHeader consumerRunningInfoRequestHeader = new GetConsumerRunningInfoRequestHeader();
consumerRunningInfoRequestHeader.setConsumerGroup("group");
consumerRunningInfoRequestHeader.setClientId("clientId");
RemotingCommand consumerRunningInfoRequest = RemotingCommand.createRequestCommand(RequestCode.GET_CONSUMER_RUNNING_INFO, consumerRunningInfoRequestHeader);
ConsumeMessageDirectlyResultRequestHeader consumeMessageDirectlyResultRequestHeader = new ConsumeMessageDirectlyResultRequestHeader();
consumeMessageDirectlyResultRequestHeader.setConsumerGroup("group");
consumeMessageDirectlyResultRequestHeader.setClientId("clientId");
MessageExt consumeMessageDirectlyMessageExt = new MessageExt();
consumeMessageDirectlyMessageExt.setTopic("topic");
consumeMessageDirectlyMessageExt.setTags("tags");
consumeMessageDirectlyMessageExt.setBornHost(NetworkUtil.string2SocketAddress("127.0.0.2:8888"));
consumeMessageDirectlyMessageExt.setStoreHost(NetworkUtil.string2SocketAddress("127.0.0.1:10911"));
consumeMessageDirectlyMessageExt.setBody(UUID.randomUUID().toString().getBytes(StandardCharsets.UTF_8));
consumeMessageDirectlyMessageExt.setMsgId(MessageClientIDSetter.createUniqID());
RemotingCommand consumeMessageDirectlyResult = RemotingCommand.createRequestCommand(RequestCode.CONSUME_MESSAGE_DIRECTLY, consumeMessageDirectlyResultRequestHeader);
consumeMessageDirectlyResult.setBody(MessageDecoder.encode(consumeMessageDirectlyMessageExt, false));
MockProxyChannel channel = new MockProxyChannel(this.proxyRelayService, null, "127.0.0.2:8888", "127.0.0.1:10911") {
@Override
protected CompletableFuture<Void> processOtherMessage(Object msg) {
return CompletableFuture.completedFuture(null);
}
@Override
protected CompletableFuture<Void> processCheckTransaction(CheckTransactionStateRequestHeader header,
MessageExt messageExt, TransactionData transactionData, CompletableFuture<ProxyRelayResult<Void>> responseFuture) {
assertEquals(checkTransactionStateRequestHeader, header);
assertArrayEquals(transactionMessageExt.getBody(), messageExt.getBody());
return CompletableFuture.completedFuture(null);
}
@Override
protected CompletableFuture<Void> processGetConsumerRunningInfo(RemotingCommand command,
GetConsumerRunningInfoRequestHeader header,
CompletableFuture<ProxyRelayResult<ConsumerRunningInfo>> responseFuture) {
assertEquals(consumerRunningInfoRequestHeader, getConsumerRunningInfoArgumentCaptor.getValue());
assertEquals(consumerRunningInfoRequestHeader, header);
return CompletableFuture.completedFuture(null);
}
@Override
protected CompletableFuture<Void> processConsumeMessageDirectly(RemotingCommand command,
ConsumeMessageDirectlyResultRequestHeader header, MessageExt messageExt,
CompletableFuture<ProxyRelayResult<ConsumeMessageDirectlyResult>> responseFuture) {
assertEquals(consumeMessageDirectlyResultRequestHeader, consumeMessageDirectlyArgumentCaptor.getValue());
assertEquals(consumeMessageDirectlyResultRequestHeader, header);
assertArrayEquals(consumeMessageDirectlyMessageExt.getBody(), messageExt.getBody());
return CompletableFuture.completedFuture(null);
}
};
assertTrue(channel.writeAndFlush(checkTransactionRequest).isSuccess());
assertTrue(channel.writeAndFlush(consumerRunningInfoRequest).isSuccess());
assertTrue(channel.writeAndFlush(consumeMessageDirectlyResult).isSuccess());
} |
public static Instant earlier(Instant time1, Instant time2) {
return time1.isBefore(time2) ? time1 : time2;
} | @Test
public void earlier() {
Instant t1 = Instant.now(); // earlier
Instant t2 = t1.plusSeconds(1); // later
assertEquals(t1, TimeUtils.earlier(t1, t2));
assertEquals(t1, TimeUtils.earlier(t2, t1));
assertEquals(t1, TimeUtils.earlier(t1, t1));
assertEquals(t2, TimeUtils.earlier(t2, t2));
} |
public static MessageDigest digest(String algorithm) {
final Matcher matcher = WITH_PATTERN.matcher(algorithm);
final String digestAlgorithm = matcher.matches() ? matcher.group(1) : algorithm;
try {
return MessageDigest.getInstance(digestAlgorithm);
} catch (NoSuchAlgorithmException e) {
throw new CryptoException("Invalid algorithm", e);
}
} | @Test
public void digestUsingName() {
assertEquals("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS",
Hex.toHexString(DigestUtils.digest("SHA-256").digest(new byte[0]))
);
} |
@Override
public SelDouble assignOps(SelOp op, SelType rhs) {
SelTypeUtil.checkTypeMatch(this.type(), rhs.type());
double another = ((SelDouble) rhs).val;
switch (op) {
case ASSIGN:
this.val = another;
return this;
case ADD_ASSIGN:
this.val += another;
return this;
case SUB_ASSIGN:
this.val -= another;
return this;
case MUL_ASSIGN:
this.val *= another;
return this;
case DIV_ASSIGN:
this.val /= another;
return this;
case MOD_ASSIGN:
this.val %= another;
return this;
default:
throw new UnsupportedOperationException(
"float/Float/double/Doubles DO NOT support assignment operation " + op);
}
} | @Test
public void testAssignOps() {
SelDouble obj = SelDouble.of(2.2);
SelDouble res = one.assignOps(SelOp.ASSIGN, obj);
assertEquals(2.2, res.doubleVal(), 0.01);
res = one.assignOps(SelOp.ASSIGN, SelDouble.of(3.3));
assertEquals(2.2, obj.doubleVal(), 0.01);
assertEquals(3.3, res.doubleVal(), 0.01);
res = one.assignOps(SelOp.ADD_ASSIGN, obj);
assertEquals(5.5, res.doubleVal(), 0.01);
res = one.assignOps(SelOp.SUB_ASSIGN, obj);
assertEquals(3.3, res.doubleVal(), 0.01);
res = one.assignOps(SelOp.MUL_ASSIGN, obj);
assertEquals(7.26, res.doubleVal(), 0.01);
res = one.assignOps(SelOp.DIV_ASSIGN, obj);
assertEquals(3.3, res.doubleVal(), 0.01);
res = one.assignOps(SelOp.MOD_ASSIGN, obj);
assertEquals(1.1, res.doubleVal(), 0.01);
} |
@Nonnull
@Override
public Result addChunk(ByteBuf buffer) {
final byte[] readable = new byte[buffer.readableBytes()];
buffer.readBytes(readable, buffer.readerIndex(), buffer.readableBytes());
final GELFMessage msg = new GELFMessage(readable);
final ByteBuf aggregatedBuffer;
switch (msg.getGELFType()) {
case CHUNKED:
try {
chunkCounter.inc();
aggregatedBuffer = checkForCompletion(msg);
if (aggregatedBuffer == null) {
return VALID_EMPTY_RESULT;
}
} catch (IllegalArgumentException | IllegalStateException | IndexOutOfBoundsException e) {
log.debug("Invalid gelf message chunk, dropping message.", e);
return INVALID_RESULT;
}
break;
case ZLIB:
case GZIP:
case UNCOMPRESSED:
aggregatedBuffer = Unpooled.wrappedBuffer(readable);
break;
case UNSUPPORTED:
return INVALID_RESULT;
default:
return INVALID_RESULT;
}
return new Result(aggregatedBuffer, true);
} | @Test
public void outOfOrderChunks() {
final ByteBuf[] chunks = createChunkedMessage(4096 + 512, 1024); // creates 5 chunks
CodecAggregator.Result result = null;
for (int i = chunks.length - 1; i >= 0; i--) {
result = aggregator.addChunk(chunks[i]);
if (i != 0) {
assertNull("message still incomplete", result.getMessage());
}
}
assertNotNull(result);
assertNotNull("first chunk should've completed the message", result.getMessage());
assertEquals(1, counterValueNamed(metricRegistry, COMPLETE_MESSAGES));
assertEquals(5, counterValueNamed(metricRegistry, CHUNK_COUNTER));
assertEquals(0, counterValueNamed(metricRegistry, WAITING_MESSAGES));
assertEquals(0, counterValueNamed(metricRegistry, EXPIRED_CHUNKS));
assertEquals(0, counterValueNamed(metricRegistry, EXPIRED_MESSAGES));
assertEquals(0, counterValueNamed(metricRegistry, DUPLICATE_CHUNKS));
} |
public void popMessageAsync(
final String brokerName, final String addr, final PopMessageRequestHeader requestHeader,
final long timeoutMillis, final PopCallback popCallback
) throws RemotingException, InterruptedException {
final RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.POP_MESSAGE, requestHeader);
this.remotingClient.invokeAsync(addr, request, timeoutMillis, new InvokeCallback() {
@Override
public void operationComplete(ResponseFuture responseFuture) {
}
@Override
public void operationSucceed(RemotingCommand response) {
try {
PopResult popResult = MQClientAPIImpl.this.processPopResponse(brokerName, response, requestHeader.getTopic(), requestHeader);
popCallback.onSuccess(popResult);
} catch (Exception e) {
popCallback.onException(e);
}
}
@Override
public void operationFail(Throwable throwable) {
popCallback.onException(throwable);
}
});
} | @Test
public void testPopLmqMessage_async() throws Exception {
final long popTime = System.currentTimeMillis();
final int invisibleTime = 10 * 1000;
final String lmqTopic = MixAll.LMQ_PREFIX + "lmq1";
doAnswer((Answer<Void>) mock -> {
InvokeCallback callback = mock.getArgument(3);
RemotingCommand request = mock.getArgument(1);
ResponseFuture responseFuture = new ResponseFuture(null, request.getOpaque(), 3 * 1000, null, null);
RemotingCommand response = RemotingCommand.createResponseCommand(PopMessageResponseHeader.class);
response.setCode(ResponseCode.SUCCESS);
response.setOpaque(request.getOpaque());
PopMessageResponseHeader responseHeader = (PopMessageResponseHeader) response.readCustomHeader();
responseHeader.setInvisibleTime(invisibleTime);
responseHeader.setPopTime(popTime);
responseHeader.setReviveQid(0);
responseHeader.setRestNum(1);
StringBuilder startOffsetInfo = new StringBuilder(64);
ExtraInfoUtil.buildStartOffsetInfo(startOffsetInfo, topic, 0, 0L);
responseHeader.setStartOffsetInfo(startOffsetInfo.toString());
StringBuilder msgOffsetInfo = new StringBuilder(64);
ExtraInfoUtil.buildMsgOffsetInfo(msgOffsetInfo, topic, 0, Collections.singletonList(0L));
responseHeader.setMsgOffsetInfo(msgOffsetInfo.toString());
response.setRemark("FOUND");
response.makeCustomHeaderToNet();
MessageExt message = new MessageExt();
message.setQueueId(3);
message.setFlag(0);
message.setQueueOffset(5L);
message.setCommitLogOffset(11111L);
message.setSysFlag(0);
message.setBornTimestamp(System.currentTimeMillis());
message.setBornHost(new InetSocketAddress("127.0.0.1", 10));
message.setStoreTimestamp(System.currentTimeMillis());
message.setStoreHost(new InetSocketAddress("127.0.0.1", 11));
message.setBody("body".getBytes());
message.setTopic(topic);
message.putUserProperty("key", "value");
message.putUserProperty(MessageConst.PROPERTY_INNER_MULTI_DISPATCH, lmqTopic);
message.getProperties().put(MessageConst.PROPERTY_INNER_MULTI_QUEUE_OFFSET, String.valueOf(0));
response.setBody(MessageDecoder.encode(message, false));
responseFuture.setResponseCommand(response);
callback.operationSucceed(responseFuture.getResponseCommand());
return null;
}).when(remotingClient).invokeAsync(anyString(), any(RemotingCommand.class), anyLong(), any(InvokeCallback.class));
final CountDownLatch done = new CountDownLatch(1);
final PopMessageRequestHeader requestHeader = new PopMessageRequestHeader();
requestHeader.setTopic(lmqTopic);
mqClientAPI.popMessageAsync(brokerName, brokerAddr, requestHeader, 10 * 1000, new PopCallback() {
@Override
public void onSuccess(PopResult popResult) {
assertThat(popResult.getPopStatus()).isEqualTo(PopStatus.FOUND);
assertThat(popResult.getRestNum()).isEqualTo(1);
assertThat(popResult.getInvisibleTime()).isEqualTo(invisibleTime);
assertThat(popResult.getPopTime()).isEqualTo(popTime);
assertThat(popResult.getMsgFoundList()).size().isEqualTo(1);
assertThat(popResult.getMsgFoundList().get(0).getTopic()).isEqualTo(lmqTopic);
assertThat(popResult.getMsgFoundList().get(0).getProperty(MessageConst.PROPERTY_INNER_MULTI_DISPATCH))
.isEqualTo(lmqTopic);
done.countDown();
}
@Override
public void onException(Throwable e) {
Assertions.fail("want no exception but got one", e);
done.countDown();
}
});
done.await();
} |
public static AscendingLongIterator and(AscendingLongIterator[] iterators) {
return new AndIterator(iterators);
} | @Test
public void testAnd() {
long seed = System.nanoTime();
System.out.println(getClass().getSimpleName() + ".testAnd seed: " + seed);
actual.add(new SparseBitSet());
expected.add(new TreeSet<>());
verifyAnd();
actual.clear();
expected.clear();
generate(0, 100, 1);
verifyAnd();
generate(100, 100, 1);
verifyAnd();
generate(0, 75000, 1);
verifyAnd();
generate(100, 40000, 2);
verifyAnd();
generate(200, 30000, 3);
verifyAnd();
actual.add(new SparseBitSet());
expected.add(new TreeSet<>());
verifyAnd();
actual.remove(actual.size() - 1);
expected.remove(expected.size() - 1);
generate(2000000, 30000, 3);
verifyAnd();
actual.remove(actual.size() - 1);
expected.remove(expected.size() - 1);
generateRandom(seed, 10000, 50000);
verifyAnd();
generateRandom(seed, 500000, -1);
verifyAnd();
} |
@SuppressWarnings("unchecked")
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
if (inner != null) {
log.error("Could not configure ListSerializer as the parameter has already been set -- inner: {}", inner);
throw new ConfigException("List serializer was already initialized using a non-default constructor");
}
final String innerSerdePropertyName = isKey ? CommonClientConfigs.DEFAULT_LIST_KEY_SERDE_INNER_CLASS : CommonClientConfigs.DEFAULT_LIST_VALUE_SERDE_INNER_CLASS;
final Object innerSerdeClassOrName = configs.get(innerSerdePropertyName);
if (innerSerdeClassOrName == null) {
throw new ConfigException("Not able to determine the serializer class because it was neither passed via the constructor nor set in the config.");
}
try {
if (innerSerdeClassOrName instanceof String) {
inner = Utils.newInstance((String) innerSerdeClassOrName, Serde.class).serializer();
} else if (innerSerdeClassOrName instanceof Class) {
inner = (Serializer<Inner>) ((Serde) Utils.newInstance((Class) innerSerdeClassOrName)).serializer();
} else {
throw new KafkaException("Could not create a serializer class instance using \"" + innerSerdePropertyName + "\" property.");
}
inner.configure(configs, isKey);
serStrategy = FIXED_LENGTH_SERIALIZERS.contains(inner.getClass()) ? SerializationStrategy.CONSTANT_SIZE : SerializationStrategy.VARIABLE_SIZE;
} catch (final ClassNotFoundException e) {
throw new ConfigException(innerSerdePropertyName, innerSerdeClassOrName, "Serializer class " + innerSerdeClassOrName + " could not be found.");
}
} | @Test
public void testListValueSerializerNoArgConstructorsShouldThrowKafkaExceptionDueInvalidClass() {
props.put(CommonClientConfigs.DEFAULT_LIST_VALUE_SERDE_INNER_CLASS, new FakeObject());
final KafkaException exception = assertThrows(
KafkaException.class,
() -> listSerializer.configure(props, false)
);
assertEquals("Could not create a serializer class instance using \"" + CommonClientConfigs.DEFAULT_LIST_VALUE_SERDE_INNER_CLASS + "\" property.", exception.getMessage());
} |
@Override
public BeamSqlTable buildBeamSqlTable(Table table) {
return new MongoDbTable(table);
} | @Test
public void testBuildBeamSqlTable_withUsernameOnly() {
Table table = fakeTable("TEST", "mongodb://username@localhost:27017/database/collection");
BeamSqlTable sqlTable = provider.buildBeamSqlTable(table);
assertNotNull(sqlTable);
assertTrue(sqlTable instanceof MongoDbTable);
MongoDbTable mongoTable = (MongoDbTable) sqlTable;
assertEquals("mongodb://username@localhost:27017", mongoTable.dbUri);
assertEquals("database", mongoTable.dbName);
assertEquals("collection", mongoTable.dbCollection);
} |
public synchronized void clean() {
try {
cleanStateAndTaskDirectoriesCalledByUser();
} catch (final Exception e) {
throw new StreamsException(e);
}
try {
if (stateDir.exists()) {
Utils.delete(globalStateDir().getAbsoluteFile());
}
} catch (final IOException exception) {
log.error(
String.format("%s Failed to delete global state directory of %s due to an unexpected exception",
logPrefix(), appId),
exception
);
throw new StreamsException(exception);
}
try {
if (hasPersistentStores && stateDir.exists() && !stateDir.delete()) {
log.warn(
String.format("%s Failed to delete state store directory of %s for it is not empty",
logPrefix(), stateDir.getAbsolutePath())
);
}
} catch (final SecurityException exception) {
log.error(
String.format("%s Failed to delete state store directory of %s due to an unexpected exception",
logPrefix(), stateDir.getAbsolutePath()),
exception
);
throw new StreamsException(exception);
}
} | @Test
public void shouldRemoveEmptyNamedTopologyDirsWhenCallingClean() throws IOException {
initializeStateDirectory(true, true);
final File namedTopologyDir = new File(appDir, "__topology1__");
assertThat(namedTopologyDir.mkdir(), is(true));
assertThat(namedTopologyDir.exists(), is(true));
directory.clean();
assertThat(namedTopologyDir.exists(), is(false));
} |
@Override
public int size() {
return count(members, selector);
} | @Test
public void testSizeWhenThisLiteMembersSelected() {
Collection<MemberImpl> collection = new MemberSelectingCollection<>(members, LITE_MEMBER_SELECTOR);
assertEquals(2, collection.size());
} |
@Override
protected int command() {
if (!validateConfigFilePresent()) {
return 1;
}
final MigrationConfig config;
try {
config = MigrationConfig.load(getConfigFile());
} catch (KsqlException | MigrationException e) {
LOGGER.error(e.getMessage());
return 1;
}
return command(getMigrationsDir(getConfigFile(), config));
} | @Test
public void shouldCreateWithExplicitVersion() {
// Given:
command = PARSER.parse(DESCRIPTION, "-v", "12");
// When:
final int result = command.command(migrationsDir);
// Then:
assertThat(result, is(0));
final File expectedFile = new File(Paths.get(migrationsDir, "V000012__" + EXPECTED_FILE_SUFFIX).toString());
assertThat(expectedFile.exists(), is(true));
assertThat(expectedFile.isDirectory(), is(false));
} |
@Override
public String toString() {
return "CSV Input ("
+ StringUtils.showControlCharacters(String.valueOf(getFieldDelimiter()))
+ ") "
+ Arrays.toString(getFilePaths());
} | @Test
void testPojoType() throws Exception {
File tempFile = File.createTempFile("CsvReaderPojoType", "tmp");
tempFile.deleteOnExit();
tempFile.setWritable(true);
OutputStreamWriter wrt = new OutputStreamWriter(new FileOutputStream(tempFile));
wrt.write("123,AAA,3.123,BBB\n");
wrt.write("456,BBB,1.123,AAA\n");
wrt.close();
@SuppressWarnings("unchecked")
PojoTypeInfo<PojoItem> typeInfo =
(PojoTypeInfo<PojoItem>) TypeExtractor.createTypeInfo(PojoItem.class);
CsvInputFormat<PojoItem> inputFormat =
new PojoCsvInputFormat<PojoItem>(new Path(tempFile.toURI().toString()), typeInfo);
inputFormat.configure(new Configuration());
FileInputSplit[] splits = inputFormat.createInputSplits(1);
inputFormat.open(splits[0]);
validatePojoItem(inputFormat);
} |
@Override
public Subnet subnet(String subnetId) {
checkArgument(!Strings.isNullOrEmpty(subnetId), ERR_NULL_SUBNET_ID);
return osNetworkStore.subnet(subnetId);
} | @Test
public void testGetSubnetById() {
createBasicNetworks();
assertTrue("Subnet did not match", target.subnet(SUBNET_ID) != null);
assertTrue("Subnet did not match", target.subnet(UNKNOWN_ID) == null);
} |
@Override
public void setPermission(final Path file, final TransferStatus status) throws BackgroundException {
try {
// Read owner from bucket
final AccessControlList list = this.toAcl(status.getAcl());
final Path bucket = containerService.getContainer(file);
if(containerService.isContainer(file)) {
session.getClient().putBucketAcl(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), list);
}
else {
session.getClient().putObjectAcl(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file), list);
}
}
catch(ServiceException e) {
final BackgroundException failure = new S3ExceptionMappingService().map("Cannot change permissions of {0}", e, file);
if(file.isDirectory()) {
if(failure instanceof NotfoundException) {
// No placeholder file may exist but we just have a common prefix
return;
}
}
throw failure;
}
} | @Test(expected = NotfoundException.class)
public void testWriteNotFound() throws Exception {
final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path test = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final S3AccessControlListFeature f = new S3AccessControlListFeature(session);
final Acl acl = new Acl();
acl.addAll(new Acl.Owner("80b9982b7b08045ee86680cc47f43c84bf439494a89ece22b5330f8a49477cf6"), new Acl.Role(Acl.Role.FULL));
acl.addAll(new Acl.GroupUser(Acl.GroupUser.EVERYONE), new Acl.Role(Acl.Role.READ));
f.setPermission(test, acl);
} |
public GoPluginBundleDescriptor build(BundleOrPluginFileDetails bundleOrPluginJarFile) {
if (!bundleOrPluginJarFile.exists()) {
throw new RuntimeException(format("Plugin or bundle jar does not exist: %s", bundleOrPluginJarFile.file()));
}
String defaultId = bundleOrPluginJarFile.file().getName();
GoPluginBundleDescriptor goPluginBundleDescriptor = new GoPluginBundleDescriptor(GoPluginDescriptor.builder()
.version("1")
.id(defaultId)
.bundleLocation(bundleOrPluginJarFile.extractionLocation())
.pluginJarFileLocation(bundleOrPluginJarFile.file().getAbsolutePath())
.isBundledPlugin(bundleOrPluginJarFile.isBundledPlugin())
.build());
try {
if (bundleOrPluginJarFile.isBundleJar()) {
return GoPluginBundleDescriptorParser.parseXML(bundleOrPluginJarFile.getBundleXml(), bundleOrPluginJarFile);
}
if (bundleOrPluginJarFile.isPluginJar()) {
return GoPluginDescriptorParser.parseXML(bundleOrPluginJarFile.getPluginXml(), bundleOrPluginJarFile);
}
goPluginBundleDescriptor.markAsInvalid(List.of(format("Plugin with ID (%s) is not valid. The plugin does not seem to contain plugin.xml or gocd-bundle.xml", defaultId)), new RuntimeException("The plugin does not seem to contain plugin.xml or gocd-bundle.xml"));
} catch (Exception e) {
log.warn("Unable to load the jar file {}", bundleOrPluginJarFile.file(), e);
final String message = requireNonNullElse(e.getMessage(), e.getClass().getCanonicalName());
String cause = e.getCause() != null ? format("%s. Cause: %s", message, e.getCause().getMessage()) : message;
goPluginBundleDescriptor.markAsInvalid(List.of(format("Plugin with ID (%s) is not valid: %s", defaultId, cause)), e);
}
return goPluginBundleDescriptor;
} | @Test
void shouldThrowExceptionForInvalidPluginIfThePluginJarDoesNotExist() {
BundleOrPluginFileDetails bundleOrPluginFileDetails = new BundleOrPluginFileDetails(new File("not-existing.jar"), true, pluginDirectory);
assertThatCode(() -> goPluginBundleDescriptorBuilder.build(bundleOrPluginFileDetails))
.isInstanceOf(RuntimeException.class);
} |
public static CharSequence unescapeCsv(CharSequence value) {
int length = checkNotNull(value, "value").length();
if (length == 0) {
return value;
}
int last = length - 1;
boolean quoted = isDoubleQuote(value.charAt(0)) && isDoubleQuote(value.charAt(last)) && length != 1;
if (!quoted) {
validateCsvFormat(value);
return value;
}
StringBuilder unescaped = InternalThreadLocalMap.get().stringBuilder();
for (int i = 1; i < last; i++) {
char current = value.charAt(i);
if (current == DOUBLE_QUOTE) {
if (isDoubleQuote(value.charAt(i + 1)) && (i + 1) != last) {
// Followed by a double-quote but not the last character
// Just skip the next double-quote
i++;
} else {
// Not followed by a double-quote or the following double-quote is the last character
throw newInvalidEscapedCsvFieldException(value, i);
}
}
unescaped.append(current);
}
return unescaped.toString();
} | @Test
public void unescapeCsvWithCRAndWithoutQuote() {
assertThrows(IllegalArgumentException.class, new Executable() {
@Override
public void execute() {
unescapeCsv("\r");
}
});
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.