repo
string | pull_number
int64 | instance_id
string | issue_numbers
sequence | base_commit
string | patch
string | test_patch
string | problem_statement
string | hints_text
string | created_at
timestamp[ns, tz=UTC] | version
float64 |
---|---|---|---|---|---|---|---|---|---|---|
apollographql/apollo-client | 8,372 | apollographql__apollo-client-8372 | [
"8370"
] | 175321b1aedbd21daa82f93ddddadfbd4fcaeb37 | diff --git a/src/cache/inmemory/writeToStore.ts b/src/cache/inmemory/writeToStore.ts
--- a/src/cache/inmemory/writeToStore.ts
+++ b/src/cache/inmemory/writeToStore.ts
@@ -1,4 +1,4 @@
-import { SelectionSetNode, FieldNode } from 'graphql';
+import { SelectionSetNode, FieldNode, SelectionNode } from 'graphql';
import { invariant, InvariantError } from 'ts-invariant';
import { equal } from '@wry/equality';
@@ -39,6 +39,11 @@ export interface WriteContext extends ReadMergeModifyContext {
merge<T>(existing: T, incoming: T): T;
// If true, merge functions will be called with undefined existing data.
overwrite: boolean;
+ incomingById: Map<string, {
+ fields: StoreObject;
+ mergeTree: MergeTree;
+ selections: Set<SelectionNode>;
+ }>;
};
interface ProcessSelectionSetOptions {
@@ -70,28 +75,75 @@ export class StoreWriter {
...variables!,
};
+ const context: WriteContext = {
+ store,
+ written: Object.create(null),
+ merge<T>(existing: T, incoming: T) {
+ return merger.merge(existing, incoming) as T;
+ },
+ variables,
+ varString: canonicalStringify(variables),
+ fragmentMap: createFragmentMap(getFragmentDefinitions(query)),
+ overwrite: !!overwrite,
+ incomingById: new Map,
+ };
+
const ref = this.processSelectionSet({
result: result || Object.create(null),
dataId,
selectionSet: operationDefinition.selectionSet,
mergeTree: { map: new Map },
- context: {
- store,
- written: Object.create(null),
- merge<T>(existing: T, incoming: T) {
- return merger.merge(existing, incoming) as T;
- },
- variables,
- varString: canonicalStringify(variables),
- fragmentMap: createFragmentMap(getFragmentDefinitions(query)),
- overwrite: !!overwrite,
- },
+ context,
});
if (!isReference(ref)) {
throw new InvariantError(`Could not identify object ${JSON.stringify(result)}`);
}
+ // So far, the store has not been modified, so now it's time to process
+ // context.incomingById and merge those incoming fields into context.store.
+ context.incomingById.forEach(({ fields, mergeTree, selections }, dataId) => {
+ const entityRef = makeReference(dataId);
+
+ if (mergeTree.map.size) {
+ fields = this.applyMerges(mergeTree, entityRef, fields, context);
+ }
+
+ if (process.env.NODE_ENV !== "production" && !context.overwrite) {
+ const hasSelectionSet = (storeFieldName: string) =>
+ fieldsWithSelectionSets.has(fieldNameFromStoreName(storeFieldName));
+ const fieldsWithSelectionSets = new Set<string>();
+ selections.forEach(selection => {
+ if (isField(selection) && selection.selectionSet) {
+ fieldsWithSelectionSets.add(selection.name.value);
+ }
+ });
+
+ const hasMergeFunction = (storeFieldName: string) => {
+ const childTree = mergeTree.map.get(storeFieldName);
+ return Boolean(childTree && childTree.info && childTree.info.merge);
+ };
+
+ Object.keys(fields).forEach(storeFieldName => {
+ // If a merge function was defined for this field, trust that it
+ // did the right thing about (not) clobbering data. If the field
+ // has no selection set, it's a scalar field, so it doesn't need
+ // a merge function (even if it's an object, like JSON data).
+ if (hasSelectionSet(storeFieldName) &&
+ !hasMergeFunction(storeFieldName)) {
+ warnAboutDataLoss(
+ entityRef,
+ fields,
+ storeFieldName,
+ context.store,
+ );
+ }
+ });
+ }
+
+ store.merge(dataId, fields);
+ });
+
// Any IDs written explicitly to the cache will be retained as
// reachable root IDs for garbage collection purposes. Although this
// logic includes root IDs like ROOT_QUERY and ROOT_MUTATION, their
@@ -170,9 +222,9 @@ export class StoreWriter {
incomingFields.__typename = typename;
}
- const workSet = new Set(selectionSet.selections);
+ const selections = new Set(selectionSet.selections);
- workSet.forEach(selection => {
+ selections.forEach(selection => {
if (!shouldInclude(selection, context.variables)) return;
if (isField(selection)) {
@@ -192,9 +244,41 @@ export class StoreWriter {
let incomingValue =
this.processFieldValue(value, selection, context, childTree);
- const childTypename = selection.selectionSet
- && context.store.getFieldValue<string>(incomingValue as StoreObject, "__typename")
- || void 0;
+ // To determine if this field holds a child object with a merge
+ // function defined in its type policy (see PR #7070), we need to
+ // figure out the child object's __typename.
+ let childTypename: string | undefined;
+
+ // The field's value can be an object that has a __typename only if
+ // the field has a selection set. Otherwise incomingValue is scalar.
+ if (selection.selectionSet) {
+ // We attempt to find the child __typename first in context.store,
+ // but the child object may not exist in the store yet, likely
+ // because it's being written for the first time, during this very
+ // call to writeToStore. Note: if incomingValue is a non-normalized
+ // StoreObject (not a Reference), getFieldValue will read from that
+ // object's properties to find its __typename.
+ childTypename = context.store.getFieldValue<string>(
+ incomingValue as StoreObject | Reference,
+ "__typename",
+ );
+
+ // If the child object is being written for the first time, but
+ // incomingValue is a Reference, then the entity that Reference
+ // identifies should have an entry in context.incomingById, which
+ // likely contains a __typename field we can use. After all, how
+ // could we know the object's ID if it had no __typename? If we
+ // wrote data into context.store as each processSelectionSet call
+ // finished processing an entity object, the child object would
+ // already be in context.store, so we wouldn't need this extra
+ // check, but holding all context.store.merge calls until after
+ // we've finished all processSelectionSet work is cleaner and solves
+ // other problems, such as issue #8370.
+ if (!childTypename && isReference(incomingValue)) {
+ const info = context.incomingById.get(incomingValue.__ref);
+ childTypename = info && info.fields.__typename;
+ }
+ }
const merge = policies.getMergeFunction(
typename,
@@ -257,53 +341,29 @@ export class StoreWriter {
// __typename strings produced by server/schema changes, which
// would otherwise be breaking changes.
policies.fragmentMatches(fragment, typename, result, context.variables)) {
- fragment.selectionSet.selections.forEach(workSet.add, workSet);
+ fragment.selectionSet.selections.forEach(selections.add, selections);
}
}
});
if ("string" === typeof dataId) {
- const entityRef = makeReference(dataId);
-
- if (mergeTree.map.size) {
- incomingFields = this.applyMerges(mergeTree, entityRef, incomingFields, context);
- }
-
- if (process.env.NODE_ENV !== "production" && !context.overwrite) {
- const hasSelectionSet = (storeFieldName: string) =>
- fieldsWithSelectionSets.has(fieldNameFromStoreName(storeFieldName));
- const fieldsWithSelectionSets = new Set<string>();
- workSet.forEach(selection => {
- if (isField(selection) && selection.selectionSet) {
- fieldsWithSelectionSets.add(selection.name.value);
- }
- });
-
- const hasMergeFunction = (storeFieldName: string) => {
- const childTree = mergeTree.map.get(storeFieldName);
- return Boolean(childTree && childTree.info && childTree.info.merge);
- };
-
- Object.keys(incomingFields).forEach(storeFieldName => {
- // If a merge function was defined for this field, trust that it
- // did the right thing about (not) clobbering data. If the field
- // has no selection set, it's a scalar field, so it doesn't need
- // a merge function (even if it's an object, like JSON data).
- if (hasSelectionSet(storeFieldName) &&
- !hasMergeFunction(storeFieldName)) {
- warnAboutDataLoss(
- entityRef,
- incomingFields,
- storeFieldName,
- context.store,
- );
- }
+ const previous = context.incomingById.get(dataId);
+ if (previous) {
+ previous.fields = context.merge(previous.fields, incomingFields);
+ previous.mergeTree = mergeMergeTrees(previous.mergeTree, mergeTree);
+ // Add all previous SelectionNode objects, rather than creating a new
+ // Set, since the original unmerged selections Set is not going to be
+ // needed again (only the merged Set).
+ previous.selections.forEach(selections.add, selections);
+ previous.selections = selections;
+ } else {
+ context.incomingById.set(dataId, {
+ fields: incomingFields,
+ mergeTree,
+ selections,
});
}
-
- context.store.merge(dataId, incomingFields);
-
- return entityRef;
+ return makeReference(dataId);
}
return incomingFields;
@@ -388,11 +448,13 @@ export class StoreWriter {
};
mergeTree.map.forEach((childTree, storeFieldName) => {
+ const eVal = getValue(e, storeFieldName);
+ const iVal = getValue(i, storeFieldName);
+ // If we have no incoming data, leave any existing data untouched.
+ if (void 0 === iVal) return;
if (getStorageArgs) {
getStorageArgs.push(storeFieldName);
}
- const eVal = getValue(e, storeFieldName);
- const iVal = getValue(i, storeFieldName);
const aVal = this.applyMerges(
childTree,
eVal,
@@ -444,14 +506,59 @@ function getChildMergeTree(
return map.get(name)!;
}
+function mergeMergeTrees(
+ left: MergeTree | undefined,
+ right: MergeTree | undefined,
+): MergeTree {
+ if (left === right || !right || mergeTreeIsEmpty(right)) return left!;
+ if (!left || mergeTreeIsEmpty(left)) return right;
+
+ const info = left.info && right.info ? {
+ ...left.info,
+ ...right.info,
+ } : left.info || right.info;
+
+ const needToMergeMaps = left.map.size && right.map.size;
+ const map = needToMergeMaps ? new Map :
+ left.map.size ? left.map : right.map;
+
+ const merged = { info, map };
+
+ if (needToMergeMaps) {
+ const remainingRightKeys = new Set(right.map.keys());
+
+ left.map.forEach((leftTree, key) => {
+ merged.map.set(
+ key,
+ mergeMergeTrees(leftTree, right.map.get(key)),
+ );
+ remainingRightKeys.delete(key);
+ });
+
+ remainingRightKeys.forEach(key => {
+ merged.map.set(
+ key,
+ mergeMergeTrees(
+ right.map.get(key),
+ left.map.get(key),
+ ),
+ );
+ });
+ }
+
+ return merged;
+}
+
+function mergeTreeIsEmpty(tree: MergeTree | undefined): boolean {
+ return !tree || !(tree.info || tree.map.size);
+}
+
function maybeRecycleChildMergeTree(
{ map }: MergeTree,
name: string | number,
) {
const childTree = map.get(name);
- if (childTree &&
- !childTree.info &&
- !childTree.map.size) {
+ if (childTree && mergeTreeIsEmpty(childTree)) {
emptyMergeTreePool.push(childTree);
map.delete(name);
}
| diff --git a/src/cache/inmemory/__tests__/__snapshots__/writeToStore.ts.snap b/src/cache/inmemory/__tests__/__snapshots__/writeToStore.ts.snap
--- a/src/cache/inmemory/__tests__/__snapshots__/writeToStore.ts.snap
+++ b/src/cache/inmemory/__tests__/__snapshots__/writeToStore.ts.snap
@@ -34,6 +34,70 @@ Object {
}
`;
+exports[`writing to the store correctly merges fragment fields along multiple paths 1`] = `
+Object {
+ "Item:0f47f85d-8081-466e-9121-c94069a77c3e": Object {
+ "__typename": "Item",
+ "id": "0f47f85d-8081-466e-9121-c94069a77c3e",
+ "value": Object {
+ "__typename": "Container",
+ "value": Object {
+ "__typename": "Value",
+ "item": Object {
+ "__ref": "Item:6dc3530b-6731-435e-b12a-0089d0ae05ac",
+ },
+ },
+ },
+ },
+ "Item:6dc3530b-6731-435e-b12a-0089d0ae05ac": Object {
+ "__typename": "Item",
+ "id": "6dc3530b-6731-435e-b12a-0089d0ae05ac",
+ "value": Object {
+ "__typename": "Container",
+ "text": "Hello World",
+ "value": Object {
+ "__typename": "Value",
+ },
+ },
+ },
+ "ROOT_QUERY": Object {
+ "__typename": "Query",
+ "item({\\"id\\":\\"123\\"})": Object {
+ "__ref": "Item:0f47f85d-8081-466e-9121-c94069a77c3e",
+ },
+ },
+}
+`;
+
+exports[`writing to the store should respect id fields added by fragments 1`] = `
+Object {
+ "AType:a-id": Object {
+ "__typename": "AType",
+ "b": Array [
+ Object {
+ "__ref": "BType:b-id",
+ },
+ ],
+ "id": "a-id",
+ },
+ "BType:b-id": Object {
+ "__typename": "BType",
+ "c": Object {
+ "__typename": "CType",
+ "title": "Your experience",
+ "titleSize": null,
+ },
+ "id": "b-id",
+ },
+ "ROOT_QUERY": Object {
+ "__typename": "Query",
+ "a": Object {
+ "__ref": "AType:a-id",
+ },
+ },
+}
+`;
+
exports[`writing to the store user objects should be able to have { __typename: "Mutation" } 1`] = `
Object {
"Gene:{\\"id\\":\\"SLC45A2\\"}": Object {
diff --git a/src/cache/inmemory/__tests__/writeToStore.ts b/src/cache/inmemory/__tests__/writeToStore.ts
--- a/src/cache/inmemory/__tests__/writeToStore.ts
+++ b/src/cache/inmemory/__tests__/writeToStore.ts
@@ -1402,6 +1402,145 @@ describe('writing to the store', () => {
});
});
+ it('correctly merges fragment fields along multiple paths', () => {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Container: {
+ // Uncommenting this line fixes the test, but should not be necessary,
+ // since the Container response object in question has the same
+ // identity along both paths.
+ // merge: true,
+ },
+ },
+ });
+
+ const query = gql`
+ query Query {
+ item(id: "123") {
+ id
+ value {
+ ...ContainerFragment
+ }
+ }
+ }
+
+ fragment ContainerFragment on Container {
+ value {
+ ...ValueFragment
+ item {
+ id
+ value {
+ text
+ }
+ }
+ }
+ }
+
+ fragment ValueFragment on Value {
+ item {
+ ...ItemFragment
+ }
+ }
+
+ fragment ItemFragment on Item {
+ value {
+ value {
+ __typename
+ }
+ }
+ }
+ `;
+
+ const data = {
+ item: {
+ __typename: "Item",
+ id: "0f47f85d-8081-466e-9121-c94069a77c3e",
+ value: {
+ __typename: "Container",
+ value: {
+ __typename: "Value",
+ item: {
+ __typename: "Item",
+ id: "6dc3530b-6731-435e-b12a-0089d0ae05ac",
+ value: {
+ __typename: "Container",
+ text: "Hello World",
+ value: {
+ __typename: "Value"
+ },
+ },
+ },
+ },
+ },
+ },
+ };
+
+ cache.writeQuery({
+ query,
+ data,
+ });
+
+ expect(cache.readQuery({ query })).toEqual(data);
+ expect(cache.extract()).toMatchSnapshot();
+ });
+
+ it('should respect id fields added by fragments', () => {
+ const query = gql`
+ query ABCQuery {
+ __typename
+ a {
+ __typename
+ id
+ ...SharedFragment
+ b {
+ __typename
+ c {
+ __typename
+ title
+ titleSize
+ }
+ }
+ }
+ }
+ fragment SharedFragment on AShared {
+ __typename
+ b {
+ __typename
+ id
+ c {
+ __typename
+ }
+ }
+ }
+ `;
+
+ const data = {
+ __typename: "Query",
+ a: {
+ __typename: "AType",
+ id: "a-id",
+ b: [{
+ __typename: "BType",
+ id: "b-id",
+ c: {
+ __typename: "CType",
+ title: "Your experience",
+ titleSize: null
+ },
+ }],
+ },
+ };
+
+ const cache = new InMemoryCache({
+ possibleTypes: { AShared: ["AType"] }
+ });
+
+ cache.writeQuery({ query, data });
+ expect(cache.readQuery({ query })).toEqual(data);
+
+ expect(cache.extract()).toMatchSnapshot();
+ });
+
it('should allow a union of objects of a different type, when overwriting a generated id with a real id', () => {
const dataWithPlaceholder = {
author: {
| Cache does not merge fragments correctly
**Intended outcome:**
Complex fragment merges work as though they were all specified in a single large query
**Actual outcome:**
Fragment merges can result in the cache dropping fields, which ends up returning an empty value from useQuery when ``returnPartialData`` is off, and misses fields when ``returnPartialData`` is on.
**How to reproduce the issue:**
https://github.com/chrbala/apollo-bug-repro/tree/data-missing
As the repo is presented, the data comes back empty, and there is a warning about cache data getting lost. Note that the [data comes back correctly](https://github.com/chrbala/apollo-bug-repro/blob/data-missing/src/index.jsx#L49) from the "server", but is corrupted in the cache.
* If ``returnPartialData`` is used, the query returns a value, but the ``text`` field is missing from the data.
* If the ``text`` field [on line 86](https://github.com/chrbala/apollo-bug-repro/blob/data-missing/src/index.jsx#L86) is replaced with ``__typename`` (the same as the contents of ``ItemFragment``), the query succeeds. This is likely because there are no conflicts for the item.
* If ``...ValueFragment`` is removed from [line 82](https://github.com/chrbala/apollo-bug-repro/blob/data-missing/src/index.jsx#L82), the query succeeds. This is likely because there are no conflicts for the item.
* If the ``id`` field is removed from [line 84](https://github.com/chrbala/apollo-bug-repro/blob/data-missing/src/index.jsx#L84), the query succeeds. This is likely because the item is included in the parent's item in the cache and is no longer normalized.
There appears to be a very broad issue (#8063) that shares this outcome, but this seems to be one of likely many root causes, so I thought it was appropriate to open a unique issue here.
**Versions**
System:
OS: macOS Mojave 10.14.6
Binaries:
Node: 12.13.1 - /usr/local/bin/node
Yarn: 1.22.10 - /usr/local/bin/yarn
npm: 6.12.1 - /usr/local/bin/npm
Browsers:
Chrome: 91.0.4472.77
Firefox: 89.0
Safari: 14.1
npmPackages:
@apollo/client: ^3.3.19 => 3.3.20
| 2021-06-11T22:38:24Z | 3.4 |
|
apollographql/apollo-client | 7,581 | apollographql__apollo-client-7581 | [
"7509"
] | 00284d20f8038367dbb9b63790f2aaad0389813e | diff --git a/src/react/hooks/useReactiveVar.ts b/src/react/hooks/useReactiveVar.ts
--- a/src/react/hooks/useReactiveVar.ts
+++ b/src/react/hooks/useReactiveVar.ts
@@ -5,11 +5,18 @@ export function useReactiveVar<T>(rv: ReactiveVar<T>): T {
const value = rv();
// We don't actually care what useState thinks the value of the variable
// is, so we take only the update function from the returned array.
- const mute = rv.onNextChange(useState(value)[1]);
+ const [, setValue] = useState(value);
+ // We subscribe to variable updates on initial mount and when the value has
+ // changed. This avoids a subtle bug in React.StrictMode where multiple listeners
+ // are added, leading to inconsistent updates.
+ useEffect(() => rv.onNextChange(setValue), [value]);
// Once the component is unmounted, ignore future updates. Note that the
- // useEffect function returns the mute function without calling it,
+ // above useEffect function returns a mute function without calling it,
// allowing it to be called when the component unmounts. This is
- // equivalent to useEffect(() => () => mute(), []), but shorter.
- useEffect(() => mute, []);
+ // equivalent to the following, but shorter:
+ // useEffect(() => {
+ // const mute = rv.onNextChange(setValue);
+ // return () => mute();
+ // }, [value])
return value;
}
| diff --git a/src/react/hooks/__tests__/useReactiveVar.test.tsx b/src/react/hooks/__tests__/useReactiveVar.test.tsx
--- a/src/react/hooks/__tests__/useReactiveVar.test.tsx
+++ b/src/react/hooks/__tests__/useReactiveVar.test.tsx
@@ -1,4 +1,4 @@
-import React from "react";
+import React, { useEffect } from "react";
import { render, wait, act } from "@testing-library/react";
import { itAsync } from "../../../testing";
@@ -13,25 +13,23 @@ describe("useReactiveVar Hook", () => {
function Component() {
const count = useReactiveVar(counterVar);
- switch (++renderCount) {
- case 1:
- expect(count).toBe(0);
- act(() => {
+ useEffect(() => {
+ switch (++renderCount) {
+ case 1:
+ expect(count).toBe(0);
counterVar(count + 1);
- });
- break;
- case 2:
- expect(count).toBe(1);
- act(() => {
+ break;
+ case 2:
+ expect(count).toBe(1);
counterVar(counterVar() + 2);
- });
- break;
- case 3:
- expect(count).toBe(3);
- break;
- default:
- reject(`too many (${renderCount}) renders`);
- }
+ break;
+ case 3:
+ expect(count).toBe(3);
+ break;
+ default:
+ reject(`too many (${renderCount}) renders`);
+ }
+ });
return null;
}
@@ -129,20 +127,13 @@ describe("useReactiveVar Hook", () => {
function Component() {
const count = useReactiveVar(counterVar);
- switch (++renderCount) {
- case 1:
- expect(count).toBe(0);
- act(() => {
- counterVar(count + 1);
- });
- break;
- case 2:
- expect(count).toBe(1);
- act(() => {
- counterVar(counterVar() + 2);
- });
- break;
- case 3:
+ useEffect(() => {
+ if (count < 3) {
+ expect(count).toBe(renderCount++);
+ counterVar(count + 1);
+ }
+
+ if (count === 3) {
expect(count).toBe(3);
setTimeout(() => {
unmount();
@@ -151,10 +142,8 @@ describe("useReactiveVar Hook", () => {
attemptedUpdateAfterUnmount = true;
}, 10);
}, 10);
- break;
- default:
- reject(`too many (${renderCount}) renders`);
- }
+ }
+ });
return null;
}
| When React.StrictMode wraps ApolloProvider or is in the same component as ApolloProvider. Using useEffect with async await and reactive variables will not work.
**Intended outcome:**
For [example](https://codesandbox.io/s/wispy-cherry-e1t9d?file=/src/index.js), click good button, the console output look like this.

**Actual outcome:**
Refresh browser and click bad button, console outpu look like this.

If move React.StrictMode to App.js, it will work.
↓ is bad too.
```tsx
<ApolloProvider client={client}>
<StrictMode>
<App />
</StrictMode>
</ApolloProvider>,
```
**How to reproduce the issue:**
https://codesandbox.io/s/wispy-cherry-e1t9d?file=/src/index.js
**Versions**
| 2021-01-14T17:55:22Z | 3.3 |
|
apollographql/apollo-client | 7,657 | apollographql__apollo-client-7657 | [
"7593"
] | 541d333c524c0d1384e1d51c81d1c5cf668ac2e8 | diff --git a/src/cache/inmemory/inMemoryCache.ts b/src/cache/inmemory/inMemoryCache.ts
--- a/src/cache/inmemory/inMemoryCache.ts
+++ b/src/cache/inmemory/inMemoryCache.ts
@@ -20,7 +20,7 @@ import {
import { StoreReader } from './readFromStore';
import { StoreWriter } from './writeToStore';
import { EntityStore, supportsResultCaching } from './entityStore';
-import { makeVar, forgetCache } from './reactiveVars';
+import { makeVar, forgetCache, recallCache } from './reactiveVars';
import {
defaultDataIdFromObject,
PossibleTypesMap,
@@ -194,6 +194,19 @@ export class InMemoryCache extends ApolloCache<NormalizedCacheObject> {
}
public watch(watch: Cache.WatchOptions): () => void {
+ if (!this.watches.size) {
+ // In case we previously called forgetCache(this) because
+ // this.watches became empty (see below), reattach this cache to any
+ // reactive variables on which it previously depended. It might seem
+ // paradoxical that we're able to recall something we supposedly
+ // forgot, but the point of calling forgetCache(this) is to silence
+ // useless broadcasts while this.watches is empty, and to allow the
+ // cache to be garbage collected. If, however, we manage to call
+ // recallCache(this) here, this cache object must not have been
+ // garbage collected yet, and should resume receiving updates from
+ // reactive variables, now that it has a watcher to notify.
+ recallCache(this);
+ }
this.watches.add(watch);
if (watch.immediate) {
this.maybeBroadcastWatch(watch);
diff --git a/src/cache/inmemory/reactiveVars.ts b/src/cache/inmemory/reactiveVars.ts
--- a/src/cache/inmemory/reactiveVars.ts
+++ b/src/cache/inmemory/reactiveVars.ts
@@ -35,10 +35,20 @@ const varsByCache = new WeakMap<ApolloCache<any>, Set<ReactiveVar<any>>>();
export function forgetCache(cache: ApolloCache<any>) {
const vars = varsByCache.get(cache);
- if (vars) {
- consumeAndIterate(vars, rv => rv.forgetCache(cache));
- varsByCache.delete(cache);
- }
+ if (vars) vars.forEach(rv => rv.forgetCache(cache));
+}
+
+// Calling forgetCache(cache) serves to silence broadcasts and allows the
+// cache to be garbage collected. However, the varsByCache WeakMap
+// preserves the set of reactive variables that were previously associated
+// with this cache, which makes it possible to "recall" the cache at a
+// later time, by reattaching it to those variables. If the cache has been
+// garbage collected in the meantime, because it is no longer reachable,
+// you won't be able to call recallCache(cache), and the cache will
+// automatically disappear from the varsByCache WeakMap.
+export function recallCache(cache: ApolloCache<any>) {
+ const vars = varsByCache.get(cache);
+ if (vars) vars.forEach(rv => rv.attachCache(cache));
}
export function makeVar<T>(value: T): ReactiveVar<T> {
@@ -86,14 +96,7 @@ export function makeVar<T>(value: T): ReactiveVar<T> {
return rv;
};
- rv.forgetCache = cache => {
- const deleted = caches.delete(cache);
- if (deleted) {
- const vars = varsByCache.get(cache);
- if (vars) vars.delete(rv);
- }
- return deleted;
- };
+ rv.forgetCache = cache => caches.delete(cache);
return rv;
}
| diff --git a/src/cache/inmemory/__tests__/cache.ts b/src/cache/inmemory/__tests__/cache.ts
--- a/src/cache/inmemory/__tests__/cache.ts
+++ b/src/cache/inmemory/__tests__/cache.ts
@@ -2658,6 +2658,104 @@ describe("ReactiveVar and makeVar", () => {
expect(spy).toBeCalledWith(cache);
});
+ it("should recall forgotten vars once cache has watches again", () => {
+ const { cache, nameVar, query } = makeCacheAndVar(false);
+ const spy = jest.spyOn(nameVar, "forgetCache");
+
+ const diffs: Cache.DiffResult<any>[] = [];
+ const watch = (immediate = true) => cache.watch({
+ query,
+ optimistic: true,
+ immediate,
+ callback(diff) {
+ diffs.push(diff);
+ },
+ });
+
+ const unwatchers = [
+ watch(),
+ watch(),
+ watch(),
+ ];
+
+ const names = () => diffs.map(diff => diff.result.onCall.name);
+
+ expect(diffs.length).toBe(3);
+ expect(names()).toEqual([
+ "Ben",
+ "Ben",
+ "Ben",
+ ]);
+
+ expect(cache["watches"].size).toBe(3);
+ expect(spy).not.toBeCalled();
+
+ unwatchers.pop()!();
+ expect(cache["watches"].size).toBe(2);
+ expect(spy).not.toBeCalled();
+
+ unwatchers.shift()!();
+ expect(cache["watches"].size).toBe(1);
+ expect(spy).not.toBeCalled();
+
+ nameVar("Hugh");
+ expect(names()).toEqual([
+ "Ben",
+ "Ben",
+ "Ben",
+ "Hugh",
+ ]);
+
+ unwatchers.pop()!();
+ expect(cache["watches"].size).toBe(0);
+ expect(spy).toBeCalledTimes(1);
+ expect(spy).toBeCalledWith(cache);
+
+ // This update is ignored because the cache no longer has any watchers.
+ nameVar("ignored");
+ expect(names()).toEqual([
+ "Ben",
+ "Ben",
+ "Ben",
+ "Hugh",
+ ]);
+
+ // Call watch(false) to avoid immediate delivery of the "ignored" name.
+ unwatchers.push(watch(false));
+ expect(cache["watches"].size).toBe(1);
+ expect(names()).toEqual([
+ "Ben",
+ "Ben",
+ "Ben",
+ "Hugh",
+ ]);
+
+ // This is the test that would fail if cache.watch did not call
+ // recallCache(cache) upon re-adding the first watcher.
+ nameVar("Jenn");
+ expect(names()).toEqual([
+ "Ben",
+ "Ben",
+ "Ben",
+ "Hugh",
+ "Jenn",
+ ]);
+
+ unwatchers.forEach(cancel => cancel());
+ expect(spy).toBeCalledTimes(2);
+ expect(spy).toBeCalledWith(cache);
+
+ // Ignored again because all watchers have been cancelled.
+ nameVar("also ignored");
+ expect(names()).toEqual([
+ "Ben",
+ "Ben",
+ "Ben",
+ "Hugh",
+ "Jenn",
+ ]);
+ });
+
it("should broadcast only once for multiple reads of same variable", () => {
const nameVar = makeVar("Ben");
const cache = new InMemoryCache({
| Reactive vars stop broadcasting changes to cache when query became inactive and active again (e.g. via navigation)
**Intended outcome:**
Changes made to local state managed with reactive variables should trigger update on every active query that depends on that variable. No matter if that was previously deactivated.
**Actual outcome:**
If query was deactivated and activated again update notification is no longer triggered.
**How to reproduce the issue:**
[CodeSandbox](https://codesandbox.io/s/apollogqlstatereactivevars-3u1op?file=/src/Home.js)
1. Click button "Click to increment" several times - change notification is broadcasted and component is re-rendered with good counter.
2. navigate to "About" and go back to "Home"
3. Click button again several times - **no update**
4. If we now navigate again to "About" and back to "Home" all works good again and in fact we are back at step one.
Based on my research I see that when query is deactivated cache is detached from reactive variable (forgetCache) but when it is activated again cache is not attached again as it is done in initial render or after second navigation.
P.S. That works good if we are using useReactiveVar hook.
**Versions**
reproduced with: 3.3.6, 3.3.7
| @WojciechKulpa Thanks for the reproduction. I'll take a look at this soon (this week).
Any updates on this? We are facing the same issue and are looking forward to a fix for that! :)
@benjamn Cool if I re-assign this to myself? There's another issue related to reactive vars (#7609) that I'm going to look into, so might as well look into this too. | 2021-02-04T23:28:02Z | 3.3 |
apollographql/apollo-client | 7,146 | apollographql__apollo-client-7146 | [
"7145",
"6154"
] | 92094fa18edf52ce1c63e57e39c4e64fda150132 | diff --git a/src/core/ObservableQuery.ts b/src/core/ObservableQuery.ts
--- a/src/core/ObservableQuery.ts
+++ b/src/core/ObservableQuery.ts
@@ -620,6 +620,10 @@ once, rather than every time you call fetchMore.`);
},
};
+ public hasObservers() {
+ return this.observers.size > 0;
+ }
+
private tearDownQuery() {
const { queryManager } = this;
diff --git a/src/core/QueryManager.ts b/src/core/QueryManager.ts
--- a/src/core/QueryManager.ts
+++ b/src/core/QueryManager.ts
@@ -563,7 +563,7 @@ export class QueryManager<TStore> {
const observableQueryPromises: Promise<ApolloQueryResult<any>>[] = [];
this.queries.forEach(({ observableQuery }, queryId) => {
- if (observableQuery) {
+ if (observableQuery && observableQuery.hasObservers()) {
const fetchPolicy = observableQuery.options.fetchPolicy;
observableQuery.resetLastResults();
| diff --git a/src/core/__tests__/QueryManager/index.ts b/src/core/__tests__/QueryManager/index.ts
--- a/src/core/__tests__/QueryManager/index.ts
+++ b/src/core/__tests__/QueryManager/index.ts
@@ -3506,9 +3506,18 @@ describe('QueryManager', () => {
}
}
`;
-
- const queryManager = mockQueryManager(reject);
+ const data = {
+ author: {
+ firstName: 'John',
+ lastName: 'Smith',
+ },
+ };
+ const queryManager = mockQueryManager(reject, {
+ request: { query },
+ result: { data }
+ });
const obs = queryManager.watchQuery<any>({ query });
+ obs.subscribe({});
obs.refetch = resolve as any;
queryManager.resetStore();
@@ -3536,6 +3545,7 @@ describe('QueryManager', () => {
let refetchCount = 0;
const obs = queryManager.watchQuery(options);
+ obs.subscribe({});
obs.refetch = () => {
++refetchCount;
return null as never;
@@ -3570,6 +3580,41 @@ describe('QueryManager', () => {
let refetchCount = 0;
+ const obs = queryManager.watchQuery(options);
+ obs.subscribe({});
+ obs.refetch = () => {
+ ++refetchCount;
+ return null as never;
+ };
+
+ queryManager.resetStore();
+
+ setTimeout(() => {
+ expect(refetchCount).toEqual(0);
+ resolve();
+ }, 50);
+ });
+
+ itAsync('should not call refetch on a non-subscribed Observable if the store is reset', (resolve, reject) => {
+ const query = gql`
+ query {
+ author {
+ firstName
+ lastName
+ }
+ }
+ `;
+
+ const queryManager = createQueryManager({
+ link: mockSingleLink().setOnError(reject),
+ });
+
+ const options = {
+ query,
+ } as WatchQueryOptions;
+
+ let refetchCount = 0;
+
const obs = queryManager.watchQuery(options);
obs.refetch = () => {
++refetchCount;
@@ -3906,10 +3951,19 @@ describe('QueryManager', () => {
}
}
`;
-
- const queryManager = mockQueryManager(reject);
+ const data = {
+ author: {
+ firstName: 'John',
+ lastName: 'Smith',
+ },
+ };
+ const queryManager = mockQueryManager(reject, {
+ request: { query },
+ result: { data },
+ });
const obs = queryManager.watchQuery({ query });
+ obs.subscribe({});
obs.refetch = resolve as any;
queryManager.reFetchObservableQueries();
@@ -3937,6 +3991,7 @@ describe('QueryManager', () => {
let refetchCount = 0;
const obs = queryManager.watchQuery(options);
+ obs.subscribe({});
obs.refetch = () => {
++refetchCount;
return null as never;
@@ -3972,6 +4027,7 @@ describe('QueryManager', () => {
let refetchCount = 0;
const obs = queryManager.watchQuery(options);
+ obs.subscribe({});
obs.refetch = () => {
++refetchCount;
return null as never;
@@ -4007,6 +4063,7 @@ describe('QueryManager', () => {
let refetchCount = 0;
const obs = queryManager.watchQuery(options);
+ obs.subscribe({});
obs.refetch = () => {
++refetchCount;
return null as never;
@@ -4021,6 +4078,40 @@ describe('QueryManager', () => {
}, 50);
});
+ itAsync('should not call refetch on a non-subscribed Observable', (resolve, reject) => {
+ const query = gql`
+ query {
+ author {
+ firstName
+ lastName
+ }
+ }
+ `;
+
+ const queryManager = createQueryManager({
+ link: mockSingleLink().setOnError(reject),
+ });
+
+ const options = {
+ query
+ } as WatchQueryOptions;
+
+ let refetchCount = 0;
+
+ const obs = queryManager.watchQuery(options);
+ obs.refetch = () => {
+ ++refetchCount;
+ return null as never;
+ };
+
+ queryManager.reFetchObservableQueries();
+
+ setTimeout(() => {
+ expect(refetchCount).toEqual(0);
+ resolve();
+ }, 50);
+ });
+
itAsync('should NOT throw an error on an inflight query() if the observed queries are refetched', (resolve, reject) => {
let queryManager: QueryManager<NormalizedCacheObject>;
const query = gql`
| `resetStore` fetches `watchQuery` that has never been subscribed to
**Intended outcome:**
Calling ApolloClient.resetStore() should not execute queries that are not actively subscribed to.
**Actual outcome:**
Calling ApolloClient.resetStore() causes a `watchQuery` that has been assigned to a variable but _never subscribed to_ to fetch data.
**How to reproduce the issue:**
Follow instructions on [my Apollo-Angular StackBlitz reproduction](https://stackblitz.com/edit/apollo-client-3-reset-store?file=app/list.component.ts).
**Versions**
```
System:
OS: macOS 10.15.6
Binaries:
Node: 12.18.4 - ~/.nvm/versions/node/v12.18.4/bin/node
Yarn: 1.22.5 - ~/.nvm/versions/node/v12.18.4/bin/yarn
npm: 6.14.8 - ~/.nvm/versions/node/v12.18.4/bin/npm
Browsers:
Chrome: 86.0.4240.75
Firefox: 81.0.1
Safari: 14.0
npmPackages:
@apollo/client: 3.2.2 => 3.2.2
apollo-angular: 2.0.4 => 2.0.4
apollo-server-express: 2.18.1 => 2.18.1
```
[3.0 beta-43]: reFetchObservableQueries causes skipped query to be fetched
We have some logic that checks which workspace a user is when navigating through our app:
```javascript
const nodeId = UrlHelper.getNodeIdFromUrl(router);
const { data: nodeData, error: nodeError, loading: nodeLoading } = useQuery<
CurrentNodeQuery,
CurrentNodeQueryVariables
>(currentNodeQuery, {
skip: !nodeId,
variables: {
id: IdConverter.encodeId(nodeId)
},
errorPolicy: 'all'
});
```
If no workspace can be found, it's skipped.
If the user is anonymous and navigates elsewhere and decides to login, we clear the cache and refetch queries:
```javascript
await client.clearStore();
... other stuff...
await client.reFetchObservableQueries(false);
```
Doing this re-executes the query to check the workspace. However, at that point the "skip" logic is ignored and the query fails because there is no workspace in the context, causing the workspace ID to be null. Therefore we get the following error:
`"[GraphQL error]: Message: Field 'id' of variable 'id' has coerced Null value for NonNull type 'ID!', Location: [{"line":1,"column":19}], Path: undefined`
**Intended outcome:**
Ideally only non-skipped queries would be re-executed, or the skip logic would be re-run somehow (a callback function for "skip" perhaps ?). This doesn't seem to be happening.
**Actual outcome:*
"reFetchObservableQueries" is refetching all queries, regardless of whether they should be skipped or not.
**How to reproduce the issue:**
I'm working on some reproduction code.
**Versions**
System:
OS: macOS 10.15.4
Binaries:
Node: 12.14.1 - ~/.nvm/versions/node/v12.14.1/bin/node
Yarn: 1.22.0 - ~/.nvm/versions/node/v12.14.1/bin/yarn
npm: 6.13.4 - ~/.nvm/versions/node/v12.14.1/bin/npm
Browsers:
Chrome: 81.0.4044.92
Firefox: 75.0
Safari: 13.1
npmPackages:
@apollo/client: ^3.0.0-beta.43 => 3.0.0-beta.43
@apollo/link-batch-http: ^2.0.0-beta.3 => 2.0.0-beta.3
@apollo/link-context: ^2.0.0-beta.3 => 2.0.0-beta.3
@apollo/link-error: ^2.0.0-beta.3 => 2.0.0-beta.3
@apollo/react-common: ^4.0.0-beta.1 => 4.0.0-beta.1
@apollo/react-components: ^4.0.0-beta.1 => 4.0.0-beta.1
@apollo/react-hoc: ^4.0.0-beta.1 => 4.0.0-beta.1
@apollo/react-ssr: ^4.0.0-beta.1 => 4.0.0-beta.1
apollo-link-log: ^1.1.3 => 1.1.3
apollo-server: ^2.11.0 => 2.11.0
react-apollo: ^3.1.3 => 3.1.3
|
This is affecting me as well, I arrived at the same conclusion as op, reFetchObservableQueries is not respecting the skip options.
[The official guide here](https://www.apollographql.com/docs/react/v3.0-beta/networking/authentication/#reset-store-on-logout) recommends calling `client.resetStore()` on user logout, but it will unintentionally cause all skipped query to fire due to this bug.
possibly related issues:
https://github.com/apollographql/apollo-client/issues/6342
https://github.com/apollographql/apollo-client/issues/6190
https://github.com/apollographql/react-apollo/issues/3492
Yep, broken. See https://github.com/apollographql/react-apollo/issues/3492#issuecomment-644385154.
I noticed that `client.resetStore()` is also executing skipped queries, which is very problematic. See: https://github.com/apollographql/apollo-client/issues/7113
Appears to have been caused by #6999 which was intended to fix #6796
What I don't understand is the use case/motivation behind #6796 in the first place. Why would you want to `refetch` a skipped query? If it's a workaround due to the mentioned `useLazyQyery` limitation, it seems wrong to accommodate that via `useQuery`. | 2020-10-10T22:22:54Z | 3.2 |
apollographql/apollo-client | 7,075 | apollographql__apollo-client-7075 | [
"7071"
] | 91299ccb5ce089e2a358dc983f342944a4974035 | diff --git a/src/cache/inmemory/writeToStore.ts b/src/cache/inmemory/writeToStore.ts
--- a/src/cache/inmemory/writeToStore.ts
+++ b/src/cache/inmemory/writeToStore.ts
@@ -284,6 +284,15 @@ export class StoreWriter {
}
if (process.env.NODE_ENV !== "production") {
+ const hasSelectionSet = (storeFieldName: string) =>
+ fieldsWithSelectionSets.has(fieldNameFromStoreName(storeFieldName));
+ const fieldsWithSelectionSets = new Set<string>();
+ workSet.forEach(selection => {
+ if (isField(selection) && selection.selectionSet) {
+ fieldsWithSelectionSets.add(selection.name.value);
+ }
+ });
+
const hasMergeFunction = (storeFieldName: string) => {
const childTree = mergeTree.map.get(storeFieldName);
return Boolean(childTree && childTree.info && childTree.info.merge);
@@ -291,8 +300,11 @@ export class StoreWriter {
Object.keys(incomingFields).forEach(storeFieldName => {
// If a merge function was defined for this field, trust that it
- // did the right thing about (not) clobbering data.
- if (!hasMergeFunction(storeFieldName)) {
+ // did the right thing about (not) clobbering data. If the field
+ // has no selection set, it's a scalar field, so it doesn't need
+ // a merge function (even if it's an object, like JSON data).
+ if (hasSelectionSet(storeFieldName) &&
+ !hasMergeFunction(storeFieldName)) {
warnAboutDataLoss(
entityRef,
incomingFields,
| diff --git a/src/cache/inmemory/__tests__/__snapshots__/writeToStore.ts.snap b/src/cache/inmemory/__tests__/__snapshots__/writeToStore.ts.snap
--- a/src/cache/inmemory/__tests__/__snapshots__/writeToStore.ts.snap
+++ b/src/cache/inmemory/__tests__/__snapshots__/writeToStore.ts.snap
@@ -1,5 +1,39 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
+exports[`writing to the store "Cache data maybe lost..." warnings should not warn when scalar fields are updated 1`] = `
+Object {
+ "ROOT_QUERY": Object {
+ "__typename": "Query",
+ "currentTime({\\"tz\\":\\"UTC-5\\"})": Object {
+ "localeString": "9/25/2020, 1:08:33 PM",
+ },
+ "someJSON": Object {
+ "foos": Array [
+ "bar",
+ "baz",
+ ],
+ "oyez": 3,
+ },
+ },
+}
+`;
+
+exports[`writing to the store "Cache data maybe lost..." warnings should not warn when scalar fields are updated 2`] = `
+Object {
+ "ROOT_QUERY": Object {
+ "__typename": "Query",
+ "currentTime({\\"tz\\":\\"UTC-5\\"})": Object {
+ "msSinceEpoch": 1601053713081,
+ },
+ "someJSON": Object {
+ "asdf": "middle",
+ "qwer": "upper",
+ "zxcv": "lower",
+ },
+ },
+}
+`;
+
exports[`writing to the store user objects should be able to have { __typename: "Mutation" } 1`] = `
Object {
"Gene:{\\"id\\":\\"SLC45A2\\"}": Object {
diff --git a/src/cache/inmemory/__tests__/writeToStore.ts b/src/cache/inmemory/__tests__/writeToStore.ts
--- a/src/cache/inmemory/__tests__/writeToStore.ts
+++ b/src/cache/inmemory/__tests__/writeToStore.ts
@@ -1582,6 +1582,72 @@ describe('writing to the store', () => {
});
});
+ describe('"Cache data maybe lost..." warnings', () => {
+ const { warn } = console;
+ let warnings: any[][] = [];
+
+ beforeEach(() => {
+ warnings.length = 0;
+ console.warn = (...args: any[]) => {
+ warnings.push(args);
+ };
+ });
+
+ afterEach(() => {
+ console.warn = warn;
+ });
+
+ it("should not warn when scalar fields are updated", () => {
+ const cache = new InMemoryCache;
+
+ const query = gql`
+ query {
+ someJSON
+ currentTime(tz: "UTC-5")
+ }
+ `;
+
+ expect(warnings).toEqual([]);
+
+ const date = new Date(1601053713081);
+
+ cache.writeQuery({
+ query,
+ data: {
+ someJSON: {
+ oyez: 3,
+ foos: ["bar", "baz"],
+ },
+ currentTime: {
+ localeString: date.toLocaleString("en-US", {
+ timeZone: "America/New_York",
+ }),
+ },
+ },
+ });
+
+ expect(cache.extract()).toMatchSnapshot();
+ expect(warnings).toEqual([]);
+
+ cache.writeQuery({
+ query,
+ data: {
+ someJSON: {
+ qwer: "upper",
+ asdf: "middle",
+ zxcv: "lower",
+ },
+ currentTime: {
+ msSinceEpoch: date.getTime(),
+ },
+ },
+ });
+
+ expect(cache.extract()).toMatchSnapshot();
+ expect(warnings).toEqual([]);
+ });
+ });
+
describe('writeResultToStore shape checking', () => {
const query = gql`
query {
| Merge warnings reported for JSONObject custom scalar
**Intended outcome:**
My understanding is that all scalar values (custom or otherwise) should be treated opaquely and replaced when updating. Such updates should not trigger a data loss merge warning.
**Actual outcome:**
When merging objects containing fields of custom scalar type JSONObject, merge warning are generated in the console for these fields.
**How to reproduce the issue:**
Include [JSONObject custom scalar](https://github.com/Urigo/graphql-scalars) into your schema and resolver map and use it in a type definition
```
scalar JSONObject
type Deployment {
id: ID!
state: JSONObject
}
```
execute a query to fetch an object that uses JSONObject, then update the data in the cache so the merge algorithm runs.
```
query DeploymentQuery($id: ID) {
deployment(id: $id) {
id
state
}
}
```
Notice that when a deployment object that has a different value for state is being merged, you see warnings in the console about potential data loss for `state` field.
**Versions**
```
System:
OS: macOS 10.15.6
Binaries:
Node: 12.18.2 - ~/.nvm/versions/node/v12.18.2/bin/node
npm: 6.14.5 - ~/.nvm/versions/node/v12.18.2/bin/npm
Browsers:
Chrome: 85.0.4183.121
Firefox: 79.0
Safari: 14.0
npmPackages:
@apollo/client: ^3.2.0 => 3.2.0
apollo-angular: ^2.0.3 => 2.0.4
```
| 2020-09-25T17:18:16Z | 3.3 |
|
apollographql/apollo-client | 7,055 | apollographql__apollo-client-7055 | [
"7040"
] | 8d77510e1eee197442c4c56410c2a5d8d00b9eda | diff --git a/src/core/QueryManager.ts b/src/core/QueryManager.ts
--- a/src/core/QueryManager.ts
+++ b/src/core/QueryManager.ts
@@ -972,7 +972,8 @@ export class QueryManager<TStore> {
if (process.env.NODE_ENV !== 'production' &&
isNonEmptyArray(diff.missing) &&
- !equal(data, {})) {
+ !equal(data, {}) &&
+ !returnPartialData) {
invariant.warn(`Missing cache result fields: ${
diff.missing.map(m => m.path.join('.')).join(', ')
}`, diff.missing);
| diff --git a/src/core/__tests__/QueryManager/index.ts b/src/core/__tests__/QueryManager/index.ts
--- a/src/core/__tests__/QueryManager/index.ts
+++ b/src/core/__tests__/QueryManager/index.ts
@@ -5228,5 +5228,107 @@ describe('QueryManager', () => {
expect(queryManager['inFlightLinkObservables'].size).toBe(0)
});
- })
+ });
+
+ describe('missing cache field warnings', () => {
+ const originalWarn = console.warn;
+ let warnCount = 0;
+
+ beforeEach(() => {
+ warnCount = 0;
+ console.warn = (...args: any[]) => {
+ warnCount += 1;
+ };
+ });
+
+ afterEach(() => {
+ console.warn = originalWarn;
+ });
+
+ function validateWarnings(
+ resolve: (result?: any) => void,
+ reject: (reason?: any) => void,
+ returnPartialData = false,
+ expectedWarnCount = 1
+ ) {
+ const query1 = gql`
+ query {
+ car {
+ make
+ model
+ id
+ __typename
+ }
+ }
+ `;
+
+ const query2 = gql`
+ query {
+ car {
+ make
+ model
+ vin
+ id
+ __typename
+ }
+ }
+ `;
+
+ const data1 = {
+ car: {
+ make: 'Ford',
+ model: 'Pinto',
+ id: 123,
+ __typename: 'Car'
+ },
+ };
+
+ const queryManager = mockQueryManager(
+ reject,
+ {
+ request: { query: query1 },
+ result: { data: data1 },
+ },
+ );
+
+ const observable1 = queryManager.watchQuery<any>({ query: query1 });
+ const observable2 = queryManager.watchQuery<any>({
+ query: query2,
+ fetchPolicy: 'cache-only',
+ returnPartialData,
+ });
+
+ return observableToPromise(
+ { observable: observable1 },
+ result => {
+ expect(result).toEqual({
+ loading: false,
+ data: data1,
+ networkStatus: NetworkStatus.ready,
+ });
+ },
+ ).then(() => {
+ observableToPromise(
+ { observable: observable2 },
+ result => {
+ expect(result).toEqual({
+ data: data1,
+ loading: false,
+ networkStatus: NetworkStatus.ready,
+ partial: true,
+ });
+ expect(warnCount).toBe(expectedWarnCount);
+ },
+ ).then(resolve, reject)
+ });
+ }
+
+ itAsync('should show missing cache result fields warning when returnPartialData is false', (resolve, reject) => {
+ validateWarnings(resolve, reject, false, 1);
+ });
+
+ itAsync('should not show missing cache result fields warning when returnPartialData is true', (resolve, reject) => {
+ validateWarnings(resolve, reject, true, 0);
+ });
+ });
});
| returnPartialData usage leads to "Missing cache result fields" warning
We are upgrading from 2.x to 3.2.0. Everything is upgraded and working as expected but in a few situations we have query pattern where we...
1. Load a list of a given type in a list view
2. have a detail view that loads single records from the list with additional fields
3. uses `returnPartialData: true` and handles missing data gracefully in the UI
These still _appear_ to work as expected but now report `Missing cache result fields` and list the fields that are not in the list view query. I _think_ this is a mis-reported warning as everything appears to work and if I set `returnPartialData: false` the errors go away.
Note also this is a readonly API with no mutations and all the objects/queries in question include an `id` field so we aren't really doing anything crazy cache wise I don't think.
**Intended outcome:**
Running a query that is a partial cache hit with `returnPartialData: true` should not result in warnings for the fields that are missing as you explicitly requested partial data.
**Actual outcome:**
Warnings are shown for missing fields when partial data is returned.
**How to reproduce the issue:**
List Query...
```javascript
const LIST_SCENES = gql`
query ListScenes {
scenes { id, title }
}
`;
useQuery(LIST_SCENES);
```
Detail Query....
```javascript
const GET_SCENE = gql`
query GetScene($sceneId: String!) {
scene(id: $sceneId) { id, title, content }
}
`;
useQuery(GET_SCENE, { returnPartialData: true, variables: { sceneId: "my-scene-id" } });
// => WARN: Missing cache result fields: scene.content [{"clientOnly": false, "message": "Can't find field 'content' on Scene:my-scene-id object", "path": ["scene", "content"] ...
```
**Versions**
```
System:
OS: macOS 10.15.6
Binaries:
Node: 13.9.0 - ~/.asdf/installs/nodejs/13.9.0/bin/node
Yarn: 1.22.5 - ~/dev/portal/app/node_modules/.bin/yarn
npm: 6.13.7 - ~/.asdf/installs/nodejs/13.9.0/bin/npm
Browsers:
Chrome: 85.0.4183.102
Safari: 14.0
npmPackages:
@apollo/client: ^3.2.0 => 3.2.0
apollo: ^2.30.3 => 2.30.3
apollo-link-queue: ^3.0.0 => 3.0.0
apollo-link-timeout: ^2.0.1 => 2.0.1
apollo-utilities: ^1.3.4 => 1.3.4
```
| @andykent Totally agree with this! We'll get this fixed soon, but for now I can assure you these are indeed just warnings, so you don't need to do anything about them. Sorry for the annoyance.
@benjamn Great news, thanks for the confirmation. | 2020-09-22T19:30:12Z | 3.2 |
apollographql/apollo-client | 6,587 | apollographql__apollo-client-6587 | [
"5877"
] | 0c0821f425b93a21b38704f624e1e5df846e4d03 | diff --git a/src/react/data/QueryData.ts b/src/react/data/QueryData.ts
--- a/src/react/data/QueryData.ts
+++ b/src/react/data/QueryData.ts
@@ -279,18 +279,6 @@ export class QueryData<TData, TVariables> extends OperationData {
return;
}
- // If we skipped previously, `previousResult.data` is set to undefined.
- // When this subscription is run after skipping, Apollo Client sends
- // the last query result data alongside the `loading` true state. This
- // means the previous skipped `data` of undefined and the incoming
- // data won't match, which would normally mean we want to trigger a
- // render to show the new data. In this case however we're already
- // showing the loading state, and want to avoid triggering an
- // additional and unnecessary render showing the same loading state.
- if (this.previousOptions.skip) {
- return;
- }
-
onNewData();
},
error: error => {
| diff --git a/src/react/hooks/__tests__/useQuery.test.tsx b/src/react/hooks/__tests__/useQuery.test.tsx
--- a/src/react/hooks/__tests__/useQuery.test.tsx
+++ b/src/react/hooks/__tests__/useQuery.test.tsx
@@ -2069,4 +2069,45 @@ describe('useQuery Hook', () => {
}).then(resolve, reject);
});
});
+
+ describe('Skipping', () => {
+ itAsync('should skip running a query when `skip` is `true`', (resolve, reject) => {
+ let renderCount = 0;
+
+ const Component = () => {
+ const [skip, setSkip] = useState(true);
+ const { loading, data } = useQuery(CAR_QUERY, { skip });
+
+ switch (++renderCount) {
+ case 1:
+ expect(loading).toBeFalsy();
+ expect(data).toBeUndefined();
+ setTimeout(() => setSkip(false));
+ break;
+ case 2:
+ expect(loading).toBeTruthy();
+ expect(data).toBeUndefined();
+ break;
+ case 3:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(CAR_RESULT_DATA);
+ break;
+ default:
+ reject("too many renders");
+ }
+
+ return null;
+ };
+
+ render(
+ <MockedProvider mocks={CAR_MOCKS}>
+ <Component />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(3);
+ }).then(resolve, reject);
+ });
+ });
});
| [3.0-beta] Changing skip to true on a hook ignores results from "fast" data sources
In a situation where:
- we use a hook with `skip: true`
- we switch the `skip` to false
- the link responds without a delay (it may be a Promise, but it cannot use `setTimeout`) - like a caching mechanism, mock, etc.
**Intended outcome:**
The query is executed and results are displayed
**Actual outcome:**
The query is executed, but results are *not* displayed - they're "loading" forever
Switching the `skip` again to `true` and `false` again works as intended.
**How to reproduce the issue:**
See this sandbox for a reproduction:
https://codesandbox.io/s/goofy-spence-0l96j
Click the `toggle skip` - you should see the results, but you'll see the "Loading..." screen. If you change the version of the package to `beta.14` - it will work again.
**Versions**
3.0.0-beta.29
The 3.0.0-beta.14 works as expected, so regression was introduced in beta.15.
**Potential Solution**
I was able to fix this by checking `this.options.skip` in this line instead:
https://github.com/apollographql/apollo-client/blob/master/src/react/data/QueryData.ts#L296
Currently, when the result of the query arrives, the `previousOptions` still holds `skip: true` - therefore the result is ignored. `previousOptions` are set by the hook with `afterExecute`, but it seems that this is too late for this skipping logic to work as intended.
| 2020-07-13T13:46:55Z | 3 |
|
apollographql/apollo-client | 6,710 | apollographql__apollo-client-6710 | [
"6659",
"6659"
] | cabe6bb6b056e554f1209ca7ba2199f49be18060 | diff --git a/src/core/ObservableQuery.ts b/src/core/ObservableQuery.ts
--- a/src/core/ObservableQuery.ts
+++ b/src/core/ObservableQuery.ts
@@ -18,15 +18,9 @@ import {
WatchQueryOptions,
FetchMoreQueryOptions,
SubscribeToMoreOptions,
- ErrorPolicy,
} from './watchQueryOptions';
-import { QueryStoreValue } from './QueryInfo';
import { Reobserver } from './Reobserver';
-
-export type ApolloCurrentQueryResult<T> = ApolloQueryResult<T> & {
- error?: ApolloError;
- partial?: boolean;
-};
+import { QueryInfo } from './QueryInfo';
export interface FetchMoreOptions<
TData = any,
@@ -45,14 +39,6 @@ export interface UpdateQueryOptions<TVariables> {
variables?: TVariables;
}
-export const hasError = (
- storeValue: QueryStoreValue,
- policy: ErrorPolicy = 'none',
-) => storeValue && (
- storeValue.networkError ||
- (policy === 'none' && isNonEmptyArray(storeValue.graphQLErrors))
-);
-
let warnedAboutUpdateQuery = false;
export class ObservableQuery<
@@ -77,12 +63,15 @@ export class ObservableQuery<
private lastResult: ApolloQueryResult<TData>;
private lastResultSnapshot: ApolloQueryResult<TData>;
private lastError: ApolloError;
+ private queryInfo: QueryInfo;
constructor({
queryManager,
+ queryInfo,
options,
}: {
queryManager: QueryManager<any>;
+ queryInfo: QueryInfo;
options: WatchQueryOptions<TVariables>;
}) {
super((observer: Observer<ApolloQueryResult<TData>>) =>
@@ -101,6 +90,8 @@ export class ObservableQuery<
// related classes
this.queryManager = queryManager;
+
+ this.queryInfo = queryInfo;
}
public result(): Promise<ApolloQueryResult<TData>> {
@@ -134,26 +125,11 @@ export class ObservableQuery<
});
}
- public getCurrentResult(): ApolloCurrentQueryResult<TData> {
- const {
- lastResult,
- lastError,
- options: { fetchPolicy },
- } = this;
-
- const isNetworkFetchPolicy =
- fetchPolicy === 'network-only' ||
- fetchPolicy === 'no-cache';
-
- const networkStatus =
- lastError ? NetworkStatus.error :
- lastResult ? lastResult.networkStatus :
- isNetworkFetchPolicy ? NetworkStatus.loading :
- NetworkStatus.ready;
-
- const result: ApolloCurrentQueryResult<TData> = {
- data: !lastError && lastResult && lastResult.data || void 0,
- error: lastError,
+ public getCurrentResult(): ApolloQueryResult<TData> {
+ const { lastResult, lastError } = this;
+ const networkStatus = this.queryInfo.networkStatus || NetworkStatus.ready;
+ const result: ApolloQueryResult<TData> = {
+ ...(lastError ? { error: lastError } : lastResult),
loading: isNetworkRequestInFlight(networkStatus),
networkStatus,
};
@@ -162,51 +138,40 @@ export class ObservableQuery<
return result;
}
- const { data, partial } = this.getCurrentQueryResult();
- Object.assign(result, { data, partial });
-
- const queryStoreValue = this.queryManager.getQueryStoreValue(this.queryId);
- if (queryStoreValue) {
- const { networkStatus } = queryStoreValue;
-
- if (hasError(queryStoreValue, this.options.errorPolicy)) {
- return Object.assign(result, {
- data: void 0,
- networkStatus,
- error: new ApolloError({
- graphQLErrors: queryStoreValue.graphQLErrors,
- networkError: queryStoreValue.networkError,
- }),
- });
- }
-
- // Variables might have been added dynamically at query time, when
- // using `@client @export(as: "varname")` for example. When this happens,
- // the variables have been updated in the query store, but not updated on
- // the original `ObservableQuery`. We'll update the observable query
- // variables here to match, so retrieving from the cache doesn't fail.
- if (queryStoreValue.variables) {
- this.options.variables = {
- ...this.options.variables,
- ...(queryStoreValue.variables as TVariables),
- };
- }
-
- Object.assign(result, {
- loading: isNetworkRequestInFlight(networkStatus),
- networkStatus,
- });
-
- if (queryStoreValue.graphQLErrors && this.options.errorPolicy === 'all') {
- result.errors = queryStoreValue.graphQLErrors;
+ const { fetchPolicy = 'cache-first' } = this.options;
+ if (fetchPolicy === 'no-cache' ||
+ fetchPolicy === 'network-only') {
+ result.partial = false;
+ } else if (
+ !result.data ||
+ // If this.options.query has @client(always: true) fields, we cannot
+ // trust result.data, since it was read from the cache without
+ // running local resolvers (and it's too late to run resolvers now,
+ // since we must return a result synchronously). TODO In the future
+ // (after Apollo Client 3.0), we should find a way to trust
+ // this.lastResult in more cases, and read from the cache only in
+ // cases when no result has been received yet.
+ !this.queryManager.transform(this.options.query).hasForcedResolvers
+ ) {
+ const diff = this.queryInfo.getDiff();
+ result.partial = !diff.complete;
+ result.data = (
+ diff.complete ||
+ this.options.returnPartialData
+ ) ? diff.result : void 0;
+ // If the cache diff is complete, and we're using a FetchPolicy that
+ // terminates after a complete cache read, we can assume the next
+ // result we receive will have NetworkStatus.ready and !loading.
+ if (diff.complete &&
+ result.networkStatus === NetworkStatus.loading &&
+ (fetchPolicy === 'cache-first' ||
+ fetchPolicy === 'cache-only')) {
+ result.networkStatus = NetworkStatus.ready;
+ result.loading = false;
}
}
- if (partial) {
- this.resetLastResults();
- } else {
- this.updateLastResult(result);
- }
+ this.updateLastResult(result);
return result;
}
@@ -235,11 +200,7 @@ export class ObservableQuery<
}
public resetQueryStoreErrors() {
- const queryStore = this.queryManager.getQueryStoreValue(this.queryId);
- if (queryStore) {
- queryStore.networkError = undefined;
- queryStore.graphQLErrors = [];
- }
+ this.queryManager.resetErrors(this.queryId);
}
/**
@@ -307,21 +268,20 @@ export class ObservableQuery<
if (combinedOptions.notifyOnNetworkStatusChange) {
const currentResult = this.getCurrentResult();
- const queryInfo = this.queryManager.getQueryStoreValue(this.queryId);
- if (queryInfo) {
- // If we neglect to update queryInfo.networkStatus here,
- // getCurrentResult may return a loading:false result while
- // fetchMore is in progress, since getCurrentResult also consults
- // queryInfo.networkStatus. Note: setting queryInfo.networkStatus
- // to an in-flight status means that QueryInfo#shouldNotify will
- // return false while fetchMore is in progress, which is why we
- // call this.reobserve() explicitly in the .finally callback after
- // fetchMore (below), since the cache write will not automatically
- // trigger a notification, even though it does trigger a cache
- // broadcast. This is a good thing, because it means we won't see
- // intervening query notifications while fetchMore is pending.
- queryInfo.networkStatus = NetworkStatus.fetchMore;
- }
+
+ // If we neglect to update queryInfo.networkStatus here,
+ // getCurrentResult may return a loading:false result while
+ // fetchMore is in progress, since getCurrentResult also consults
+ // queryInfo.networkStatus. Note: setting queryInfo.networkStatus
+ // to an in-flight status means that QueryInfo#shouldNotify will
+ // return false while fetchMore is in progress, which is why we
+ // call this.reobserve() explicitly in the .finally callback after
+ // fetchMore (below), since the cache write will not automatically
+ // trigger a notification, even though it does trigger a cache
+ // broadcast. This is a good thing, because it means we won't see
+ // intervening query notifications while fetchMore is pending.
+ this.queryInfo.networkStatus = NetworkStatus.fetchMore;
+
// Simulate a loading result for the original query with
// networkStatus === NetworkStatus.fetchMore.
this.observer.next!({
@@ -508,8 +468,15 @@ once, rather than every time you call fetchMore.`);
) => TData,
): void {
const { queryManager } = this;
- const previousResult = this.getCurrentQueryResult(false).data;
- const newResult = mapFn(previousResult!, {
+ const { result } = queryManager.cache.diff<TData>({
+ query: this.options.query,
+ variables: this.variables,
+ previousResult: this.lastResult?.data,
+ returnPartialData: true,
+ optimistic: false,
+ });
+
+ const newResult = mapFn(result!, {
variables: (this as any).variables,
});
@@ -524,49 +491,6 @@ once, rather than every time you call fetchMore.`);
}
}
- private getCurrentQueryResult(
- optimistic: boolean = true,
- ): {
- data?: TData;
- partial: boolean;
- } {
- const { fetchPolicy } = this.options;
- const lastData = this.lastResult?.data;
- if (fetchPolicy === 'no-cache' ||
- fetchPolicy === 'network-only') {
- return {
- data: lastData,
- partial: false,
- };
- }
-
- let { result, complete } = this.queryManager.cache.diff<TData>({
- query: this.options.query,
- variables: this.variables,
- previousResult: this.lastResult?.data,
- returnPartialData: true,
- optimistic,
- });
-
- if (lastData &&
- !this.lastError &&
- // If this.options.query has @client(always: true) fields, we
- // cannot trust result, since it was read from the cache without
- // running local resolvers (and it's too late to run resolvers
- // now, since we must return a result synchronously). TODO In the
- // future (after Apollo Client 3.0), we should find a way to trust
- // this.lastResult in more cases, and read from the cache only in
- // cases when no result has been received yet.
- this.queryManager.transform(this.options.query).hasForcedResolvers) {
- result = lastData;
- }
-
- return {
- data: (complete || this.options.returnPartialData) ? result : void 0,
- partial: !complete,
- };
- }
-
public startPolling(pollInterval: number) {
this.getReobserver().updateOptions({ pollInterval });
}
@@ -659,7 +583,8 @@ once, rather than every time you call fetchMore.`);
);
},
// Avoid polling during SSR and when the query is already in flight.
- !queryManager.ssrMode && (() => !queryManager.checkInFlight(queryId)),
+ !queryManager.ssrMode && (
+ () => !isNetworkRequestInFlight(this.queryInfo.networkStatus))
);
}
diff --git a/src/core/QueryInfo.ts b/src/core/QueryInfo.ts
--- a/src/core/QueryInfo.ts
+++ b/src/core/QueryInfo.ts
@@ -29,7 +29,7 @@ export type QueryStoreValue = Pick<QueryInfo,
// this.queries Map. QueryInfo objects store the latest results and errors
// for the given query, and are responsible for reporting those results to
// the corresponding ObservableQuery, via the QueryInfo.notify method.
-// Results are reported asynchronously whenever setDirty marks the
+// Results are reported asynchronously whenever setDiff marks the
// QueryInfo object as dirty, though a call to the QueryManager's
// broadcastQueries method may trigger the notification before it happens
// automatically. This class used to be a simple interface type without
@@ -65,6 +65,10 @@ export class QueryInfo {
networkStatus = NetworkStatus.setVariables;
}
+ if (!equal(query.variables, this.variables)) {
+ this.diff = null;
+ }
+
Object.assign(this, {
document: query.document,
variables: query.variables,
@@ -86,25 +90,33 @@ export class QueryInfo {
private dirty: boolean = false;
- public setDirty(): this {
- if (!this.dirty) {
- this.dirty = true;
- if (!this.notifyTimeout) {
- this.notifyTimeout = setTimeout(() => this.notify(), 0);
- }
- }
- return this;
- }
-
private notifyTimeout?: ReturnType<typeof setTimeout>;
private diff: Cache.DiffResult<any> | null = null;
+ getDiff(variables = this.variables): Cache.DiffResult<any> {
+ if (this.diff && equal(variables, this.variables)) {
+ return this.diff;
+ }
+
+ this.updateWatch(this.variables = variables);
+
+ return this.diff = this.cache.diff({
+ query: this.document!,
+ variables,
+ returnPartialData: true,
+ optimistic: true,
+ });
+ }
+
setDiff(diff: Cache.DiffResult<any> | null) {
const oldDiff = this.diff;
this.diff = diff;
if (!this.dirty && diff?.result !== oldDiff?.result) {
- this.setDirty();
+ this.dirty = true;
+ if (!this.notifyTimeout) {
+ this.notifyTimeout = setTimeout(() => this.notify(), 0);
+ }
}
}
@@ -121,6 +133,7 @@ export class QueryInfo {
(this as any).observableQuery = oq;
if (oq) {
+ oq["queryInfo"] = this;
this.listeners.add(this.oqListener = () => oq.reobserve());
} else {
delete this.oqListener;
@@ -163,14 +176,6 @@ export class QueryInfo {
// QueryInfo.prototype.
delete this.cancel;
- this.variables =
- this.networkStatus =
- this.networkError =
- this.graphQLErrors =
- this.lastWatch =
- this.lastWrittenResult =
- this.lastWrittenVars = void 0;
-
const oq = this.observableQuery;
if (oq) oq.stopPolling();
}
@@ -181,7 +186,11 @@ export class QueryInfo {
private lastWatch?: Cache.WatchOptions;
- public updateWatch<TVars = Record<string, any>>(variables: TVars): this {
+ private updateWatch(variables = this.variables) {
+ const oq = this.observableQuery;
+ if (oq && oq.options.fetchPolicy === "no-cache") {
+ return;
+ }
if (!this.lastWatch ||
this.lastWatch.query !== this.document ||
!equal(variables, this.lastWatch.variables)) {
@@ -193,7 +202,6 @@ export class QueryInfo {
callback: diff => this.setDiff(diff),
});
}
- return this;
}
private lastWrittenResult?: FetchResult<any>;
@@ -286,6 +294,10 @@ export class QueryInfo {
optimistic: true,
});
+ // Any time we're about to update this.diff, we need to make
+ // sure we've started watching the cache.
+ this.updateWatch(options.variables);
+
// If we're allowed to write to the cache, and we can read a
// complete result from the cache, update result.data to be the
// result from the cache, rather than the raw network result.
diff --git a/src/core/QueryManager.ts b/src/core/QueryManager.ts
--- a/src/core/QueryManager.ts
+++ b/src/core/QueryManager.ts
@@ -33,7 +33,6 @@ import {
import { ObservableQuery } from './ObservableQuery';
import { NetworkStatus, isNetworkRequestInFlight } from './networkStatus';
import {
- QueryListener,
ApolloQueryResult,
OperationVariables,
MutationQueryReducer,
@@ -357,8 +356,12 @@ export class QueryManager<TStore> {
return store;
}
- public getQueryStoreValue(queryId: string): QueryStoreValue | undefined {
- return queryId ? this.queries.get(queryId) : undefined;
+ public resetErrors(queryId: string) {
+ const queryInfo = this.queries.get(queryId);
+ if (queryInfo) {
+ queryInfo.networkError = undefined;
+ queryInfo.graphQLErrors = [];
+ }
}
private transformCache = new (canUseWeakMap ? WeakMap : Map)<
@@ -440,12 +443,16 @@ export class QueryManager<TStore> {
options.notifyOnNetworkStatusChange = false;
}
+ const queryInfo = new QueryInfo(this.cache);
const observable = new ObservableQuery<T, TVariables>({
queryManager: this,
+ queryInfo,
options,
});
- this.getQuery(observable.queryId).init({
+ this.queries.set(observable.queryId, queryInfo);
+
+ queryInfo.init({
document: options.query,
observableQuery: observable,
variables: options.variables,
@@ -510,10 +517,6 @@ export class QueryManager<TStore> {
if (queryInfo) queryInfo.stop();
}
- public addQueryListener(queryId: string, listener: QueryListener) {
- this.getQuery(queryId).listeners.add(listener);
- }
-
public clearStore(): Promise<void> {
// Before we have sent the reset action to the store, we can no longer
// rely on the results returned by in-flight requests since these may
@@ -833,24 +836,6 @@ export class QueryManager<TStore> {
context = {},
} = options;
- if (fetchPolicy === "cache-and-network" ||
- fetchPolicy === "network-only") {
- // When someone chooses cache-and-network or network-only as their
- // initial FetchPolicy, they almost certainly do not want future cache
- // updates to trigger unconditional network requests, which is what
- // repeatedly applying the cache-and-network or network-only policies
- // would seem to require. Instead, when the cache reports an update
- // after the initial network request, subsequent network requests should
- // be triggered only if the cache result is incomplete. This behavior
- // corresponds exactly to switching to a cache-first FetchPolicy, so we
- // modify options.fetchPolicy here for the next fetchQueryObservable
- // call, using the same options object that the Reobserver always passes
- // to fetchQueryObservable. Note: if these FetchPolicy transitions get
- // much more complicated, we might consider using some sort of state
- // machine to capture the transition rules.
- options.fetchPolicy = "cache-first";
- }
-
const mightUseNetwork =
fetchPolicy === "cache-first" ||
fetchPolicy === "cache-and-network" ||
@@ -921,7 +906,27 @@ export class QueryManager<TStore> {
: fromVariables(normalized.variables!)
);
- concast.cleanup(() => this.fetchCancelFns.delete(queryId));
+ concast.cleanup(() => {
+ this.fetchCancelFns.delete(queryId);
+
+ if (fetchPolicy === "cache-and-network" ||
+ fetchPolicy === "network-only") {
+ // When someone chooses cache-and-network or network-only as their
+ // initial FetchPolicy, they almost certainly do not want future cache
+ // updates to trigger unconditional network requests, which is what
+ // repeatedly applying the cache-and-network or network-only policies
+ // would seem to require. Instead, when the cache reports an update
+ // after the initial network request, subsequent network requests should
+ // be triggered only if the cache result is incomplete. This behavior
+ // corresponds exactly to switching to a cache-first FetchPolicy, so we
+ // modify options.fetchPolicy here for the next fetchQueryObservable
+ // call, using the same options object that the Reobserver always passes
+ // to fetchQueryObservable. Note: if these FetchPolicy transitions get
+ // much more complicated, we might consider using some sort of state
+ // machine to capture the transition rules.
+ options.fetchPolicy = "cache-first";
+ }
+ });
return concast;
}
@@ -948,15 +953,10 @@ export class QueryManager<TStore> {
variables,
lastRequestId: this.generateRequestId(),
networkStatus,
- }).updateWatch(variables);
-
- const readCache = () => this.cache.diff<any>({
- query,
- variables,
- returnPartialData: true,
- optimistic: true,
});
+ const readCache = () => queryInfo.getDiff(variables);
+
const resultsFromCache = (
diff: Cache.DiffResult<TData>,
networkStatus = queryInfo.networkStatus || NetworkStatus.loading,
@@ -975,6 +975,7 @@ export class QueryManager<TStore> {
data,
loading: isNetworkRequestInFlight(networkStatus),
networkStatus,
+ ...(diff.complete ? null : { partial: true }),
} as ApolloQueryResult<TData>);
if (this.transform(query).hasForcedResolvers) {
@@ -1065,16 +1066,6 @@ export class QueryManager<TStore> {
clientAwareness: this.clientAwareness,
};
}
-
- public checkInFlight(queryId: string): boolean {
- const query = this.getQueryStoreValue(queryId);
- return (
- !!query &&
- !!query.networkStatus &&
- query.networkStatus !== NetworkStatus.ready &&
- query.networkStatus !== NetworkStatus.error
- );
- }
}
function markMutationResult<TStore, TData>(
diff --git a/src/core/index.ts b/src/core/index.ts
--- a/src/core/index.ts
+++ b/src/core/index.ts
@@ -9,7 +9,6 @@ export {
ObservableQuery,
FetchMoreOptions,
UpdateQueryOptions,
- ApolloCurrentQueryResult,
} from './ObservableQuery';
export {
QueryBaseOptions,
diff --git a/src/core/types.ts b/src/core/types.ts
--- a/src/core/types.ts
+++ b/src/core/types.ts
@@ -1,6 +1,7 @@
import { DocumentNode, GraphQLError } from 'graphql';
import { FetchResult } from '../link/core';
+import { ApolloError } from '../errors';
import { QueryInfo } from './QueryInfo';
import { NetworkStatus } from './networkStatus';
import { Resolver } from './LocalState';
@@ -18,8 +19,13 @@ export type PureQueryOptions = {
export type ApolloQueryResult<T> = {
data?: T;
errors?: ReadonlyArray<GraphQLError>;
+ error?: ApolloError;
loading: boolean;
networkStatus: NetworkStatus;
+ // If result.data was read from the cache with missing fields,
+ // result.partial will be true. Otherwise, result.partial will be falsy
+ // (usually because the property is absent from the result object).
+ partial?: boolean;
};
// This is part of the public API, people write these functions in `updateQueries`.
| diff --git a/src/core/__tests__/ObservableQuery.ts b/src/core/__tests__/ObservableQuery.ts
--- a/src/core/__tests__/ObservableQuery.ts
+++ b/src/core/__tests__/ObservableQuery.ts
@@ -1176,6 +1176,7 @@ describe('ObservableQuery', () => {
},
loading: true,
networkStatus: NetworkStatus.loading,
+ partial: true,
});
} else if (handleCount === 2) {
expect(result).toEqual({
@@ -1395,8 +1396,8 @@ describe('ObservableQuery', () => {
});
expect(stripSymbols(observable.getCurrentResult())).toEqual({
data: dataOne,
- loading: true,
- networkStatus: NetworkStatus.loading,
+ loading: false,
+ networkStatus: NetworkStatus.ready,
partial: false,
});
}).then(resolve, reject);
@@ -1597,6 +1598,7 @@ describe('ObservableQuery', () => {
data: dataOne,
loading: true,
networkStatus: 1,
+ partial: true,
});
} else if (handleCount === 2) {
@@ -1885,11 +1887,11 @@ describe('ObservableQuery', () => {
observable.subscribe({
error() {
const { queryManager } = (observable as any);
- const queryStore = queryManager.getQueryStoreValue(observable.queryId);
- expect(queryStore.graphQLErrors).toEqual([graphQLError]);
+ const queryInfo = queryManager["queries"].get(observable.queryId);
+ expect(queryInfo.graphQLErrors).toEqual([graphQLError]);
observable.resetQueryStoreErrors();
- expect(queryStore.graphQLErrors).toEqual([]);
+ expect(queryInfo.graphQLErrors).toEqual([]);
resolve();
}
@@ -1907,10 +1909,10 @@ describe('ObservableQuery', () => {
observable.subscribe({
next() {
const { queryManager } = (observable as any);
- const queryStore = queryManager.getQueryStoreValue(observable.queryId);
- queryStore.networkError = networkError;
+ const queryInfo = queryManager["queries"].get(observable.queryId);
+ queryInfo.networkError = networkError;
observable.resetQueryStoreErrors();
- expect(queryStore.networkError).toBeUndefined();
+ expect(queryInfo.networkError).toBeUndefined();
resolve();
}
});
diff --git a/src/core/__tests__/QueryManager/index.ts b/src/core/__tests__/QueryManager/index.ts
--- a/src/core/__tests__/QueryManager/index.ts
+++ b/src/core/__tests__/QueryManager/index.ts
@@ -191,8 +191,15 @@ describe('QueryManager', () => {
function getCurrentQueryResult<TData, TVars>(
observableQuery: ObservableQuery<TData, TVars>,
- ): ReturnType<ObservableQuery<TData, TVars>["getCurrentQueryResult"]> {
- return (observableQuery as any).getCurrentQueryResult();
+ ): {
+ data?: TData;
+ partial: boolean;
+ } {
+ const result = observableQuery.getCurrentResult();
+ return {
+ data: result.data,
+ partial: !!result.partial,
+ };
}
itAsync('handles GraphQL errors', (resolve, reject) => {
@@ -2128,6 +2135,7 @@ describe('QueryManager', () => {
data: {},
loading: true,
networkStatus: NetworkStatus.loading,
+ partial: true,
});
},
result => {
@@ -2202,6 +2210,7 @@ describe('QueryManager', () => {
loading: true,
networkStatus: NetworkStatus.loading,
data: {},
+ partial: true,
});
} else if (count === 2) {
expect(result).toEqual({
@@ -2220,6 +2229,7 @@ describe('QueryManager', () => {
data: {
info: {},
},
+ partial: true,
});
} else if (count === 4) {
expect(result).toEqual({
@@ -2243,6 +2253,7 @@ describe('QueryManager', () => {
loading: true,
networkStatus: NetworkStatus.loading,
data: {},
+ partial: true,
});
} else if (count === 2) {
expect(result).toEqual({
diff --git a/src/core/__tests__/fetchPolicies.ts b/src/core/__tests__/fetchPolicies.ts
--- a/src/core/__tests__/fetchPolicies.ts
+++ b/src/core/__tests__/fetchPolicies.ts
@@ -494,6 +494,7 @@ describe('cache-and-network', function() {
data: {},
loading: true,
networkStatus: NetworkStatus.setVariables,
+ partial: true,
});
} else if (count === 3) {
expect(result).toEqual({
@@ -520,6 +521,7 @@ describe('cache-and-network', function() {
data: {},
loading: true,
networkStatus: NetworkStatus.setVariables,
+ partial: true,
});
} else if (count === 7) {
expect(result).toEqual({
diff --git a/src/react/components/__tests__/client/Query.test.tsx b/src/react/components/__tests__/client/Query.test.tsx
--- a/src/react/components/__tests__/client/Query.test.tsx
+++ b/src/react/components/__tests__/client/Query.test.tsx
@@ -1674,9 +1674,6 @@ describe('Query component', () => {
});
break;
case 4:
- expect(props.loading).toBeTruthy();
- break;
- case 5:
// Good result should be received without any errors.
expect(props.error).toBeFalsy();
expect(props.data.allPeople).toBeTruthy();
@@ -1698,7 +1695,7 @@ describe('Query component', () => {
</Query>
);
- return wait(() => expect(count).toBe(6)).then(resolve, reject);
+ return wait(() => expect(count).toBe(5)).then(resolve, reject);
}
);
diff --git a/src/react/hoc/__tests__/mutations/recycled-queries.test.tsx b/src/react/hoc/__tests__/mutations/recycled-queries.test.tsx
--- a/src/react/hoc/__tests__/mutations/recycled-queries.test.tsx
+++ b/src/react/hoc/__tests__/mutations/recycled-queries.test.tsx
@@ -162,9 +162,6 @@ describe('graphql(mutation) update queries', () => {
});
break;
case 3:
- expect(this.props.data!.loading).toBeTruthy();
- break;
- case 4:
expect(stripSymbols(this.props.data!.todo_list)).toEqual({
id: '123',
title: 'how to apollo',
@@ -228,7 +225,6 @@ describe('graphql(mutation) update queries', () => {
expect(todoUpdateQueryCount).toBe(2);
expect(queryMountCount).toBe(2);
expect(queryUnmountCount).toBe(2);
- expect(queryRenderCount).toBe(5);
}, 5);
}, 5);
}, 5);
@@ -236,7 +232,7 @@ describe('graphql(mutation) update queries', () => {
}, 5);
return wait(() => {
- expect(queryRenderCount).toBe(5);
+ expect(queryRenderCount).toBe(4);
});
});
@@ -359,12 +355,6 @@ describe('graphql(mutation) update queries', () => {
);
break;
case 3:
- expect(this.props.data!.loading).toBeTruthy();
- expect(stripSymbols(this.props.data!.todo_list)).toEqual(
- updatedData.todo_list
- );
- break;
- case 4:
expect(this.props.data!.loading).toBeFalsy();
expect(stripSymbols(this.props.data!.todo_list)).toEqual(
updatedData.todo_list
@@ -405,7 +395,7 @@ describe('graphql(mutation) update queries', () => {
});
return wait(() => {
- expect(queryRenderCount).toBe(5);
+ expect(queryRenderCount).toBe(4);
});
});
});
diff --git a/src/react/hoc/__tests__/queries/observableQuery.test.tsx b/src/react/hoc/__tests__/queries/observableQuery.test.tsx
--- a/src/react/hoc/__tests__/queries/observableQuery.test.tsx
+++ b/src/react/hoc/__tests__/queries/observableQuery.test.tsx
@@ -321,9 +321,6 @@ describe('[queries] observableQuery', () => {
expect(stripSymbols(allPeople)).toEqual(data.allPeople);
break;
case 3:
- expect(loading).toBe(true);
- break;
- case 4:
expect(loading).toBe(false);
expect(stripSymbols(allPeople)).toEqual(data.allPeople);
break;
diff --git a/src/react/hoc/__tests__/queries/skip.test.tsx b/src/react/hoc/__tests__/queries/skip.test.tsx
--- a/src/react/hoc/__tests__/queries/skip.test.tsx
+++ b/src/react/hoc/__tests__/queries/skip.test.tsx
@@ -619,32 +619,35 @@ describe('[queries] skip', () => {
})(
class extends React.Component<any> {
render() {
- switch (count) {
- case 0:
- expect(this.props.data.loading).toBeTruthy();
+ switch (++count) {
+ case 1:
+ expect(this.props.data.loading).toBe(true);
expect(ranQuery).toBe(1);
break;
- case 1:
- expect(this.props.data.loading).toBeFalsy();
+ case 2:
+ expect(this.props.data.loading).toBe(false);
expect(ranQuery).toBe(1);
setTimeout(() => {
this.props.setSkip(true);
});
break;
- case 2:
+ case 3:
expect(this.props.data).toBeUndefined();
expect(ranQuery).toBe(1);
setTimeout(() => {
this.props.setSkip(false);
});
break;
- case 3:
- expect(this.props.data!.loading).toBeFalsy();
- expect(ranQuery).toBe(2);
+ case 4:
+ expect(this.props.data!.loading).toBe(true);
+ expect(ranQuery).toBe(3);
+ break;
+ case 5:
+ expect(this.props.data!.loading).toBe(false);
+ expect(ranQuery).toBe(3);
break;
default:
}
- count += 1;
return null;
}
}
@@ -669,7 +672,7 @@ describe('[queries] skip', () => {
);
await wait(() => {
- expect(count).toEqual(4);
+ expect(count).toEqual(5);
});
});
| v3.0: loading: true for first render always, even if all data is available in cache
<!--
Thanks for filing an issue on Apollo Client!
Please make sure that you include the following information to ensure that your issue is actionable.
If you don't follow the template, your issue may end up being closed without anyone looking at it carefully, because it is not actionable for us without the information in this template.
**PLEASE NOTE:** Feature requests and non-bug related discussions are no longer managed in this repo. Feature requests should be opened in https://github.com/apollographql/apollo-feature-requests.
-->
**Intended outcome:**
<!--
What you were trying to accomplish when the bug occurred, and as much code as possible related to the source of the problem.
-->
Attempting to upgrade to @apollo/client@3.0 but I found that my application was rendering a lot more often than it was in 2.0. We have several components that are hooked into the same query, they usually only render once if the query has already completed earlier.
**Actual outcome:**
<!--
A description of what actually happened, including a screenshot or copy-paste of any related error messages, logs, or other output that might be related. Places to look for information include your browser console, server console, and network logs. Please avoid non-specific phrases like “didn’t work” or “broke”.
-->
In 3.0 it appears that every query will have `loading: true` even if the cache data can satisfy the query. It will then immediately re-render with `loading: false`
**How to reproduce the issue:**
<!--
If possible, please create a reproduction using https://github.com/apollographql/react-apollo-error-template and link to it here. If you prefer an in-browser way to create reproduction, try: https://codesandbox.io/s/github/apollographql/react-apollo-error-template
Instructions for how the issue can be reproduced by a maintainer or contributor. Be as specific as possible, and only mention what is necessary to reproduce the bug. If possible, try to isolate the exact circumstances in which the bug occurs and avoid speculation over what the cause might be.
-->
I recreated the 3.0 issue here: https://codesandbox.io/s/serene-yalow-lvfm9 look at the console and you'll observe the behavior.
Compared to 2.0: https://codesandbox.io/s/determined-galois-e1w2w if you look at the console the same query only renders 1 time with `loading: false`
**Versions**
<!--
Run the following command in your project directory, and paste its (automatically copied to clipboard) results here:
`npx envinfo@latest --preset apollo --clipboard`
-->
@apollo/client@3.0.2
v3.0: loading: true for first render always, even if all data is available in cache
<!--
Thanks for filing an issue on Apollo Client!
Please make sure that you include the following information to ensure that your issue is actionable.
If you don't follow the template, your issue may end up being closed without anyone looking at it carefully, because it is not actionable for us without the information in this template.
**PLEASE NOTE:** Feature requests and non-bug related discussions are no longer managed in this repo. Feature requests should be opened in https://github.com/apollographql/apollo-feature-requests.
-->
**Intended outcome:**
<!--
What you were trying to accomplish when the bug occurred, and as much code as possible related to the source of the problem.
-->
Attempting to upgrade to @apollo/client@3.0 but I found that my application was rendering a lot more often than it was in 2.0. We have several components that are hooked into the same query, they usually only render once if the query has already completed earlier.
**Actual outcome:**
<!--
A description of what actually happened, including a screenshot or copy-paste of any related error messages, logs, or other output that might be related. Places to look for information include your browser console, server console, and network logs. Please avoid non-specific phrases like “didn’t work” or “broke”.
-->
In 3.0 it appears that every query will have `loading: true` even if the cache data can satisfy the query. It will then immediately re-render with `loading: false`
**How to reproduce the issue:**
<!--
If possible, please create a reproduction using https://github.com/apollographql/react-apollo-error-template and link to it here. If you prefer an in-browser way to create reproduction, try: https://codesandbox.io/s/github/apollographql/react-apollo-error-template
Instructions for how the issue can be reproduced by a maintainer or contributor. Be as specific as possible, and only mention what is necessary to reproduce the bug. If possible, try to isolate the exact circumstances in which the bug occurs and avoid speculation over what the cause might be.
-->
I recreated the 3.0 issue here: https://codesandbox.io/s/serene-yalow-lvfm9 look at the console and you'll observe the behavior.
Compared to 2.0: https://codesandbox.io/s/determined-galois-e1w2w if you look at the console the same query only renders 1 time with `loading: false`
**Versions**
<!--
Run the following command in your project directory, and paste its (automatically copied to clipboard) results here:
`npx envinfo@latest --preset apollo --clipboard`
-->
@apollo/client@3.0.2
| 2020-07-27T17:17:33Z | 3.1 |
|
apollographql/apollo-client | 6,589 | apollographql__apollo-client-6589 | [
"6122"
] | b5407a3a7a0b92cde72de31253f1da78da430ddf | diff --git a/src/react/data/QueryData.ts b/src/react/data/QueryData.ts
--- a/src/react/data/QueryData.ts
+++ b/src/react/data/QueryData.ts
@@ -412,7 +412,13 @@ export class QueryData<TData, TVariables> extends OperationData {
const { data, loading, error } = this.previousData.result;
if (!loading) {
- const { query, variables, onCompleted, onError } = this.getOptions();
+ const {
+ query,
+ variables,
+ onCompleted,
+ onError,
+ skip
+ } = this.getOptions();
// No changes, so we won't call onError/onCompleted.
if (
@@ -424,7 +430,7 @@ export class QueryData<TData, TVariables> extends OperationData {
return;
}
- if (onCompleted && !error) {
+ if (onCompleted && !error && !skip) {
onCompleted(data);
} else if (onError && error) {
onError(error);
| diff --git a/src/react/hooks/__tests__/useQuery.test.tsx b/src/react/hooks/__tests__/useQuery.test.tsx
--- a/src/react/hooks/__tests__/useQuery.test.tsx
+++ b/src/react/hooks/__tests__/useQuery.test.tsx
@@ -1798,6 +1798,27 @@ describe('useQuery Hook', () => {
expect(renderCount).toBe(3);
}).then(resolve, reject);
});
+
+ itAsync('should not call onCompleted if skip is true', (resolve, reject) => {
+ function Component() {
+ const { loading } = useQuery(CAR_QUERY, {
+ skip: true,
+ onCompleted() {
+ fail('should not call onCompleted!');
+ }
+ });
+ expect(loading).toBeFalsy();
+ return null;
+ }
+
+ render(
+ <MockedProvider mocks={CAR_MOCKS}>
+ <Component />
+ </MockedProvider>
+ );
+
+ return wait().then(resolve, reject);
+ });
});
describe('Optimistic data', () => {
| Function in onCompleted fires when skip is true
Hi all, I'm experiencing the same issue as reported here by @xiaoyu-tamu:
https://github.com/apollographql/react-apollo/issues/3814
I believe it was reported in the wrong place (under the old react-apollo repo), so I am reposting here for visibility.
**Intended outcome:**
Function specified in `onCompleted` should not fire when the `skip` option is _**true**_.
**Actual outcome:**
Function specified in onCompleted fires even when `skip` option is _**true**_
**How to reproduce the issue:**
@xiaoyu-tamu's minimal codesandbox
https://codesandbox.io/s/jolly-mclean-rtrwj
```
const { loading, data } = useQuery(ALL_PEOPLE, {
skip: true,
onCompleted: () => {
alert("skipped but onComplete fired");
}
});
```
**Versions**
3.0.0-beta.24 (codesandbox minimal reproduction)
3.0.0-beta.43 (reproduced in my own project)
System:
OS: macOS 10.15.3
Binaries:
Node: 10.16.0 - ~/.nvm/versions/node/v10.16.0/bin/node
Yarn: 1.22.4 - ~/Desktop/{redacted}/node_modules/.bin/yarn
npm: 6.9.0 - ~/.nvm/versions/node/v10.16.0/bin/npm
Browsers:
Chrome: 80.0.3987.163
Firefox: 74.0
Safari: 13.0.5
npmPackages:
@apollo/client: ^3.0.0-beta.43 => 3.0.0-beta.43
apollo-link-error: ^1.1.12 => 1.1.12
apollo-link-token-refresh: ^0.2.7 => 0.2.7
| Yeah, can verify it's still happening on 3.0.0-beta.31
Hi, I am also experiencing this issue.
**Versions**
apollo-client: 2.6.8
@apollo/react-hooks: 3.1.5
Same here with @apollo/react-hooks 3.1.5
It is still in `@apollo/client` 3.0.0-beta.44 | 2020-07-14T00:57:26Z | 3 |
apollographql/apollo-client | 6,691 | apollographql__apollo-client-6691 | [
"6685"
] | 7989d5e98bd8b07c60c7f731abcc34e8636ef1fc | diff --git a/src/cache/inmemory/policies.ts b/src/cache/inmemory/policies.ts
--- a/src/cache/inmemory/policies.ts
+++ b/src/cache/inmemory/policies.ts
@@ -198,12 +198,13 @@ export const defaultDataIdFromObject = (
_id !== void 0 ? { _id } :
void 0;
}
- const idValue = id || _id;
- if (idValue !== void 0) {
+ // If there is no object.id, fall back to object._id.
+ if (id === void 0) id = _id;
+ if (id !== void 0) {
return `${__typename}:${(
- typeof idValue === "number" ||
- typeof idValue === "string"
- ) ? idValue : JSON.stringify(idValue)}`;
+ typeof id === "number" ||
+ typeof id === "string"
+ ) ? id : JSON.stringify(id)}`;
}
}
};
@@ -372,7 +373,12 @@ export class Policies {
const old = this.rootTypenamesById[rootId];
if (typename !== old) {
invariant(!old || old === which, `Cannot change root ${which} __typename more than once`);
+ // First, delete any old __typename associated with this rootId from
+ // rootIdsByTypename.
+ if (old) delete this.rootIdsByTypename[old];
+ // Now make this the only __typename that maps to this rootId.
this.rootIdsByTypename[typename] = rootId;
+ // Finally, update the __typename associated with this rootId.
this.rootTypenamesById[rootId] = typename;
}
}
| diff --git a/src/cache/inmemory/__tests__/__snapshots__/policies.ts.snap b/src/cache/inmemory/__tests__/__snapshots__/policies.ts.snap
--- a/src/cache/inmemory/__tests__/__snapshots__/policies.ts.snap
+++ b/src/cache/inmemory/__tests__/__snapshots__/policies.ts.snap
@@ -1,5 +1,55 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
+exports[`type policies can alter the root query __typename 1`] = `
+Object {
+ "Query:0": Object {
+ "__typename": "Query",
+ "id": 0,
+ },
+ "Query:1": Object {
+ "__typename": "Query",
+ "id": 1,
+ },
+ "Query:2": Object {
+ "__typename": "Query",
+ "id": 2,
+ },
+ "Query:3": Object {
+ "__typename": "Query",
+ "id": 3,
+ },
+ "ROOT_QUERY": Object {
+ "__typename": "RootQuery",
+ "items": Array [
+ Object {
+ "id": 0,
+ "query": Object {
+ "__ref": "Query:0",
+ },
+ },
+ Object {
+ "id": 1,
+ "query": Object {
+ "__ref": "Query:1",
+ },
+ },
+ Object {
+ "id": 2,
+ "query": Object {
+ "__ref": "Query:2",
+ },
+ },
+ Object {
+ "id": 3,
+ "query": Object {
+ "__ref": "Query:3",
+ },
+ },
+ ],
+ },
+}
+`;
+
exports[`type policies field policies can handle Relay-style pagination 1`] = `
Object {
"Artist:{\\"href\\":\\"/artist/jean-michel-basquiat\\"}": Object {
diff --git a/src/cache/inmemory/__tests__/policies.ts b/src/cache/inmemory/__tests__/policies.ts
--- a/src/cache/inmemory/__tests__/policies.ts
+++ b/src/cache/inmemory/__tests__/policies.ts
@@ -3968,4 +3968,63 @@ describe("type policies", function () {
// Unchanged because the merge function prefers the existing object.
expect(cache.extract()).toEqual(snapshot);
});
+
+ it("can alter the root query __typename", function () {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ RootQuery: {
+ queryType: true,
+ },
+ }
+ });
+
+ const ALL_ITEMS = gql`
+ query Items {
+ __typename
+ items {
+ id
+ query {
+ id
+ }
+ }
+ }
+ `;
+
+ function makeItem(id: number) {
+ return {
+ id,
+ query: {
+ __typename: "Query",
+ id,
+ },
+ };
+ }
+
+ cache.writeQuery({
+ query: ALL_ITEMS,
+ data: {
+ __typename: "RootQuery",
+ items: [
+ makeItem(0),
+ makeItem(1),
+ makeItem(2),
+ makeItem(3),
+ ],
+ },
+ });
+
+ expect(cache.extract()).toMatchSnapshot();
+
+ expect(cache.readQuery({
+ query: ALL_ITEMS,
+ })).toEqual({
+ __typename: "RootQuery",
+ items: [
+ makeItem(0),
+ makeItem(1),
+ makeItem(2),
+ makeItem(3),
+ ],
+ });
+ });
});
| A __typename of Query leads to incorrect caching
<!--
Thanks for filing an issue on Apollo Client!
Please make sure that you include the following information to ensure that your issue is actionable.
If you don't follow the template, your issue may end up being closed without anyone looking at it carefully, because it is not actionable for us without the information in this template.
**PLEASE NOTE:** Feature requests and non-bug related discussions are no longer managed in this repo. Feature requests should be opened in https://github.com/apollographql/apollo-feature-requests.
-->
**Intended outcome:**
<!--
What you were trying to accomplish when the bug occurred, and as much code as possible related to the source of the problem.
-->
Getting the correct results from a query
**Actual outcome:**
If using a type with `__typename` of `Query`, I get incorrect results instead. For nested references to the type, each reference is incorrectly resolved as the first instance in the result. Specifically (from the repro case):
Query:
```
query Items {
items {
id
query { id }
}
}
```
the data returned for the graphql operation is
```
[
{ "id": 0, "query": { "id": 0 } },
{ "id": 1, "query": { "id": 1 } },
{ "id": 2, "query": { "id": 2 } }
]
```
but the data from useQuery is:
```
{
"items": [
{
"__typename": "Item",
"id": "0",
"query": {
"id": "0"
}
},
{
"__typename": "Item",
"id": "1",
"query": {
"id": "0"
}
},
{
"__typename": "Item",
"id": "2",
"query": {
"id": "0"
}
}
]
}
```
Note how each nested `query` incorrectly has an `id` of `0` (instead of an id matching its `item`).
**How to reproduce the issue:**
See https://github.com/uhoh-itsmaciek/react-apollo-error-template -- thanks for the awesome template!
**Versions**
```
System:
OS: Linux 5.3 Ubuntu 19.10 (Eoan Ermine)
Binaries:
Node: 13.8.0 - ~/n/bin/node
Yarn: 1.22.4 - ~/n/bin/yarn
npm: 6.14.4 - ~/n/bin/npm
Browsers:
Chrome: 84.0.4147.89
Firefox: 78.0.2
npmPackages:
@apollo/client: ^3.0.0 => 3.0.2
npmGlobalPackages:
apollo: 2.18.3
```
| 2020-07-23T23:48:04Z | 3.1 |
|
apollographql/apollo-client | 6,448 | apollographql__apollo-client-6448 | [
"6307",
"6444"
] | 67796e597baf20aac0ac7a6bd4345e9f1bb84662 | diff --git a/src/core/QueryInfo.ts b/src/core/QueryInfo.ts
--- a/src/core/QueryInfo.ts
+++ b/src/core/QueryInfo.ts
@@ -165,7 +165,10 @@ export class QueryInfo {
this.variables =
this.networkStatus =
this.networkError =
- this.graphQLErrors = void 0;
+ this.graphQLErrors =
+ this.lastWatch =
+ this.lastWrittenResult =
+ this.lastWrittenVars = void 0;
const oq = this.observableQuery;
if (oq) oq.stopPolling();
@@ -192,6 +195,9 @@ export class QueryInfo {
return this;
}
+ private lastWrittenResult?: FetchResult<any>;
+ private lastWrittenVars?: WatchQueryOptions["variables"];
+
public markResult<T>(
result: FetchResult<T>,
options: Pick<WatchQueryOptions,
@@ -200,6 +206,8 @@ export class QueryInfo {
| "errorPolicy">,
allowCacheWrite: boolean,
) {
+ this.graphQLErrors = isNonEmptyArray(result.errors) ? result.errors : [];
+
if (options.fetchPolicy === 'no-cache') {
this.diff = { result: result.data, complete: true };
@@ -218,11 +226,57 @@ export class QueryInfo {
// of writeQuery, so we can store the new diff quietly and ignore
// it when we receive it redundantly from the watch callback.
this.cache.performTransaction(cache => {
- cache.writeQuery({
- query: this.document!,
- data: result.data as T,
- variables: options.variables,
- });
+ if (equal(result, this.lastWrittenResult) &&
+ equal(options.variables, this.lastWrittenVars)) {
+ // If result is the same as the last result we received from
+ // the network (and the variables match too), avoid writing
+ // result into the cache again. The wisdom of skipping this
+ // cache write is far from obvious, since any cache write
+ // could be the one that puts the cache back into a desired
+ // state, fixing corruption or missing data. However, if we
+ // always write every network result into the cache, we enable
+ // feuds between queries competing to update the same data in
+ // incompatible ways, which can lead to an endless cycle of
+ // cache broadcasts and useless network requests. As with any
+ // feud, eventually one side must step back from the brink,
+ // letting the other side(s) have the last word(s). There may
+ // be other points where we could break this cycle, such as
+ // silencing the broadcast for cache.writeQuery (not a good
+ // idea, since it just delays the feud a bit) or somehow
+ // avoiding the network request that just happened (also bad,
+ // because the server could return useful new data). All
+ // options considered, skipping this cache write seems to be
+ // the least damaging place to break the cycle, because it
+ // reflects the intuition that we recently wrote this exact
+ // result into the cache, so the cache *should* already/still
+ // contain this data. If some other query has clobbered that
+ // data in the meantime, that's too bad, but there will be no
+ // winners if every query blindly reverts to its own version
+ // of the data. This approach also gives the network a chance
+ // to return new data, which will be written into the cache as
+ // usual, notifying only those queries that are directly
+ // affected by the cache updates, as usual. In the future, an
+ // even more sophisticated cache could perhaps prevent or
+ // mitigate the clobbering somehow, but that would make this
+ // particular cache write even less important, and thus
+ // skipping it would be even safer than it is today.
+ if (this.diff && this.diff.complete) {
+ // Reuse data from the last good (complete) diff that we
+ // received, when possible.
+ result.data = this.diff.result;
+ return;
+ }
+ // If the previous this.diff was incomplete, fall through to
+ // re-reading the latest data with cache.diff, below.
+ } else {
+ cache.writeQuery({
+ query: this.document!,
+ data: result.data as T,
+ variables: options.variables,
+ });
+ this.lastWrittenResult = result;
+ this.lastWrittenVars = options.variables;
+ }
const diff = cache.diff<T>({
query: this.document!,
@@ -241,10 +295,11 @@ export class QueryInfo {
result.data = diff.result;
}
});
+
+ } else {
+ this.lastWrittenResult = this.lastWrittenVars = void 0;
}
}
-
- this.graphQLErrors = isNonEmptyArray(result.errors) ? result.errors : [];
}
public markReady() {
@@ -254,6 +309,7 @@ export class QueryInfo {
public markError(error: ApolloError) {
this.networkStatus = NetworkStatus.error;
+ this.lastWrittenResult = this.lastWrittenVars = void 0;
if (error.graphQLErrors) {
this.graphQLErrors = error.graphQLErrors;
| diff --git a/src/core/__tests__/QueryManager/index.ts b/src/core/__tests__/QueryManager/index.ts
--- a/src/core/__tests__/QueryManager/index.ts
+++ b/src/core/__tests__/QueryManager/index.ts
@@ -36,6 +36,7 @@ import observableToPromise, {
import subscribeAndCount from '../../../utilities/testing/subscribeAndCount';
import { stripSymbols } from '../../../utilities/testing/stripSymbols';
import { itAsync } from '../../../utilities/testing/itAsync';
+import { ApolloClient } from '../../../ApolloClient';
interface MockedMutation {
reject: (reason: any) => any;
@@ -2049,19 +2050,6 @@ describe('QueryManager', () => {
networkStatus: NetworkStatus.ready,
});
},
- result => {
- expect(stripSymbols(result)).toEqual({
- data: {
- ...data2,
- author: {
- ...data2.author,
- id: data1.author.id,
- },
- },
- loading: false,
- networkStatus: NetworkStatus.ready,
- });
- },
),
]).then(resolve, reject);
});
@@ -2163,6 +2151,132 @@ describe('QueryManager', () => {
]).then(resolve, reject);
});
+ itAsync("should not write unchanged network results to cache", (resolve, reject) => {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Query: {
+ fields: {
+ info: {
+ merge(_, incoming) {
+ return incoming;
+ },
+ },
+ },
+ },
+ },
+ });
+
+ const client = new ApolloClient({
+ cache,
+ link: new ApolloLink(operation => new Observable((observer: Observer<FetchResult>) => {
+ switch (operation.operationName) {
+ case "A":
+ observer.next!({ data: { info: { a: "ay" }}});
+ break;
+ case "B":
+ observer.next!({ data: { info: { b: "bee" }}});
+ break;
+ }
+ observer.complete!();
+ })),
+ });
+
+ const queryA = gql`query A { info { a } }`;
+ const queryB = gql`query B { info { b } }`;
+
+ const obsA = client.watchQuery({
+ query: queryA,
+ returnPartialData: true,
+ });
+
+ const obsB = client.watchQuery({
+ query: queryB,
+ returnPartialData: true,
+ });
+
+ subscribeAndCount(reject, obsA, (count, result) => {
+ if (count === 1) {
+ expect(result).toEqual({
+ loading: true,
+ networkStatus: NetworkStatus.loading,
+ data: {},
+ });
+ } else if (count === 2) {
+ expect(result).toEqual({
+ loading: false,
+ networkStatus: NetworkStatus.ready,
+ data: {
+ info: {
+ a: "ay",
+ },
+ },
+ });
+ } else if (count === 3) {
+ expect(result).toEqual({
+ loading: true,
+ networkStatus: NetworkStatus.loading,
+ data: {
+ info: {},
+ },
+ });
+ } else if (count === 4) {
+ expect(result).toEqual({
+ loading: false,
+ networkStatus: NetworkStatus.ready,
+ data: {
+ info: {
+ a: "ay",
+ },
+ },
+ });
+ setTimeout(resolve, 100);
+ } else {
+ reject(new Error(`Unexpected ${JSON.stringify({count,result})}`));
+ }
+ });
+
+ subscribeAndCount(reject, obsB, (count, result) => {
+ if (count === 1) {
+ expect(result).toEqual({
+ loading: true,
+ networkStatus: NetworkStatus.loading,
+ data: {},
+ });
+ } else if (count === 2) {
+ expect(result).toEqual({
+ loading: false,
+ networkStatus: NetworkStatus.ready,
+ data: {
+ info: {
+ b: "bee",
+ },
+ },
+ });
+ } else if (count === 3) {
+ expect(result).toEqual({
+ loading: true,
+ networkStatus: NetworkStatus.loading,
+ data: {
+ info: {},
+ },
+ });
+ } else if (count === 4) {
+ expect(result).toEqual({
+ loading: false,
+ networkStatus: NetworkStatus.ready,
+ data: {
+ info: {
+ b: "bee",
+ },
+ },
+ });
+ setTimeout(resolve, 100);
+ } else {
+ reject(new Error(`Unexpected ${JSON.stringify({count,result})}`));
+ }
+ });
+ });
+
itAsync('should not error when replacing unidentified data with a normalized ID', (resolve, reject) => {
const queryWithoutId = gql`
query {
| Fetching data again and again if more queries without id executed
The problem started after upgrading from beta 45 to beta 48 and is related to missing `id` in queries.
**Intended outcome:**
I have two components, displaying a list of items, each has its own query. The queries are very similar and differ only in query arguments.
```tsx
function MyRecentIncidents() {
const {loading, error, data} = useQuery<MyRecentIncidentsQuery>(MY_RECENT_INCIDENTS);
console.log('[Query 1] loading:', loading, 'error:', error, 'data:', data);
return null;
}
const MY_RECENT_INCIDENTS = gql`
query MyRecentIncidents {
viewer {
incidents(first: 5, createdByViewer: true) {
nodes {
id
}
}
}
}
`;
function RecentIncidents() {
const {loading, error, data} = useQuery<RecentIncidentsQuery>(RECENT_INCIDENTS);
console.log('[Query 2] loading:', loading, 'error:', error, 'data:', data);
return null;
}
const RECENT_INCIDENTS = gql`
query RecentIncidents {
viewer {
incidents(first: 5) {
nodes {
id
}
}
}
}
`;
const client = new ApolloClient({
cache: new InMemoryCache(),
uri: process.env.REACT_APP_API_GRAPHQL_URL,
});
export default function App() {
return (
<ApolloProvider client={client}>
<MyRecentIncidents/>
<RecentIncidents/>
</ApolloProvider>
);
}
```
It should fetch each query just once, render items and doesn't do anything more.
**Actual outcome:**
After initial queries complete, a new request to the server is started. Once completed, _the other_ query starts. Then the first one. And again and again…
However, `loading` status doesn't change, the components don't rerender. It just fetches data in the background.

**My observations:**
The code above worked in beta 45, the problem occurs in beta 48+. I didn't try 46 and 47. It's still present in the just-released beta 49.
Querying `id` field on `viewer` like
```tsx
const RECENT_INCIDENTS = gql`
query RecentIncidents {
viewer {
id # <--- this fixes it
incidents(first: 5) {
nodes {
id
}
}
}
}
`;
```
in both queries fixes the problem.
**Versions**
```
System:
OS: Windows 10 10.0.18363
Binaries:
Node: 12.13.0 - C:\Program Files\nodejs\node.EXE
Yarn: 1.21.1 - C:\Program Files (x86)\Yarn\bin\yarn.CMD
npm: 6.12.0 - C:\Program Files\nodejs\npm.CMD
Browsers:
Edge: 44.18362.449.0
npmPackages:
@apollo/client: ^3.0.0-beta.48 => 3.0.0-beta.48
@apollo/link-context: ^2.0.0-beta.3 => 2.0.0-beta.3
@apollo/link-error: ^2.0.0-beta.3 => 2.0.0-beta.3
```
endless loop of requests after updating past 3.0.0-beta.45
I have a similar problem as #6434, but even without any failed queries.
Just an endless looop of requests. Here's a preview of how the "initiator" tab looks like for one of the requests:
<details>```
| (anonymous) | @ | VM13:1
-- | -- | -- | --
| (anonymous) | @ | createHttpLink.js:78
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | bundle.esm.js:11
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | Observable.js:319
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| complete | @ | Concast.js:56
| ./node_modules/@apollo/client/utilities/observables/Concast.js.Concast.start | @ | Concast.js:79
| Concast | @ | Concast.js:71
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getObservableFromLink | @ | QueryManager.js:420
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getResultsFromLink | @ | QueryManager.js:459
| resultsFromLink | @ | QueryManager.js:577
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryByPolicy | @ | QueryManager.js:605
| fromVariables | @ | QueryManager.js:526
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryObservable | @ | QueryManager.js:533
| (anonymous) | @ | ObservableQuery.js:341
| ./node_modules/@apollo/client/core/Reobserver.js.Reobserver.reobserve | @ | Reobserver.js:18
| ./node_modules/@apollo/client/core/ObservableQuery.js.ObservableQuery.reobserve | @ | ObservableQuery.js:346
| listeners.add.oqListener | @ | QueryInfo.js:64
| (anonymous) | @ | QueryInfo.js:77
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.notify | @ | QueryInfo.js:77
| (anonymous) | @ | QueryInfo.js:44
| setTimeout (async) | |
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDirty | @ | QueryInfo.js:44
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDiff | @ | QueryInfo.js:53
| callback | @ | QueryInfo.js:117
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatch | @ | inMemoryCache.js:224
| InMemoryCache._this.maybeBroadcastWatch.Object.makeCacheKey | @ | inMemoryCache.js:26
| recomputeNewValue | @ | bundle.esm.js:247
| Slot.withValue | @ | context.esm.js:69
| reallyRecompute | @ | bundle.esm.js:233
| ./node_modules/optimism/lib/bundle.esm.js.Entry.recompute | @ | bundle.esm.js:160
| optimistic | @ | bundle.esm.js:479
| (anonymous) | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatches | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.performTransaction | @ | inMemoryCache.js:198
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.markResult | @ | QueryInfo.js:135
| (anonymous) | @ | QueryManager.js:467
| (anonymous) | @ | asyncMap.js:13
| (anonymous) | @ | asyncMap.js:13
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | iteration.js:4
| iterateObserversSafely | @ | iteration.js:4
| next | @ | Concast.js:25
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | Observable.js:327
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | bundle.esm.js:29
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | createHttpLink.js:85
| Promise.then (async) | |
| (anonymous) | @ | createHttpLink.js:84
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | bundle.esm.js:11
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | Observable.js:319
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| complete | @ | Concast.js:56
| ./node_modules/@apollo/client/utilities/observables/Concast.js.Concast.start | @ | Concast.js:79
| Concast | @ | Concast.js:71
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getObservableFromLink | @ | QueryManager.js:420
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getResultsFromLink | @ | QueryManager.js:459
| resultsFromLink | @ | QueryManager.js:577
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryByPolicy | @ | QueryManager.js:605
| fromVariables | @ | QueryManager.js:526
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryObservable | @ | QueryManager.js:533
| (anonymous) | @ | ObservableQuery.js:341
| ./node_modules/@apollo/client/core/Reobserver.js.Reobserver.reobserve | @ | Reobserver.js:18
| ./node_modules/@apollo/client/core/ObservableQuery.js.ObservableQuery.reobserve | @ | ObservableQuery.js:346
| listeners.add.oqListener | @ | QueryInfo.js:64
| (anonymous) | @ | QueryInfo.js:77
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.notify | @ | QueryInfo.js:77
| (anonymous) | @ | QueryInfo.js:44
| setTimeout (async) | |
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDirty | @ | QueryInfo.js:44
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDiff | @ | QueryInfo.js:53
| callback | @ | QueryInfo.js:117
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatch | @ | inMemoryCache.js:224
| InMemoryCache._this.maybeBroadcastWatch.Object.makeCacheKey | @ | inMemoryCache.js:26
| recomputeNewValue | @ | bundle.esm.js:247
| Slot.withValue | @ | context.esm.js:69
| reallyRecompute | @ | bundle.esm.js:233
| ./node_modules/optimism/lib/bundle.esm.js.Entry.recompute | @ | bundle.esm.js:160
| optimistic | @ | bundle.esm.js:479
| (anonymous) | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatches | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.performTransaction | @ | inMemoryCache.js:198
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.markResult | @ | QueryInfo.js:135
| (anonymous) | @ | QueryManager.js:467
| (anonymous) | @ | asyncMap.js:13
| (anonymous) | @ | asyncMap.js:13
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | iteration.js:4
| iterateObserversSafely | @ | iteration.js:4
| next | @ | Concast.js:25
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | Observable.js:327
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | bundle.esm.js:29
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | createHttpLink.js:85
| Promise.then (async) | |
| (anonymous) | @ | createHttpLink.js:84
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | bundle.esm.js:11
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | Observable.js:319
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| complete | @ | Concast.js:56
| ./node_modules/@apollo/client/utilities/observables/Concast.js.Concast.start | @ | Concast.js:79
| Concast | @ | Concast.js:71
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getObservableFromLink | @ | QueryManager.js:420
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getResultsFromLink | @ | QueryManager.js:459
| resultsFromLink | @ | QueryManager.js:577
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryByPolicy | @ | QueryManager.js:605
| fromVariables | @ | QueryManager.js:526
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryObservable | @ | QueryManager.js:533
| (anonymous) | @ | ObservableQuery.js:341
| ./node_modules/@apollo/client/core/Reobserver.js.Reobserver.reobserve | @ | Reobserver.js:18
| ./node_modules/@apollo/client/core/ObservableQuery.js.ObservableQuery.reobserve | @ | ObservableQuery.js:346
| listeners.add.oqListener | @ | QueryInfo.js:64
| (anonymous) | @ | QueryInfo.js:77
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.notify | @ | QueryInfo.js:77
| (anonymous) | @ | QueryInfo.js:44
| setTimeout (async) | |
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDirty | @ | QueryInfo.js:44
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDiff | @ | QueryInfo.js:53
| callback | @ | QueryInfo.js:117
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatch | @ | inMemoryCache.js:224
| InMemoryCache._this.maybeBroadcastWatch.Object.makeCacheKey | @ | inMemoryCache.js:26
| recomputeNewValue | @ | bundle.esm.js:247
| Slot.withValue | @ | context.esm.js:69
| reallyRecompute | @ | bundle.esm.js:233
| ./node_modules/optimism/lib/bundle.esm.js.Entry.recompute | @ | bundle.esm.js:160
| optimistic | @ | bundle.esm.js:479
| (anonymous) | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatches | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.performTransaction | @ | inMemoryCache.js:198
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.markResult | @ | QueryInfo.js:135
| (anonymous) | @ | QueryManager.js:467
| (anonymous) | @ | asyncMap.js:13
| (anonymous) | @ | asyncMap.js:13
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | iteration.js:4
| iterateObserversSafely | @ | iteration.js:4
| next | @ | Concast.js:25
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | Observable.js:327
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | bundle.esm.js:29
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | createHttpLink.js:85
| Promise.then (async) | |
| (anonymous) | @ | createHttpLink.js:84
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | bundle.esm.js:11
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | Observable.js:319
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| complete | @ | Concast.js:56
| ./node_modules/@apollo/client/utilities/observables/Concast.js.Concast.start | @ | Concast.js:79
| Concast | @ | Concast.js:71
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getObservableFromLink | @ | QueryManager.js:420
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getResultsFromLink | @ | QueryManager.js:459
| resultsFromLink | @ | QueryManager.js:577
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryByPolicy | @ | QueryManager.js:605
| fromVariables | @ | QueryManager.js:526
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryObservable | @ | QueryManager.js:533
| (anonymous) | @ | ObservableQuery.js:341
| ./node_modules/@apollo/client/core/Reobserver.js.Reobserver.reobserve | @ | Reobserver.js:18
| ./node_modules/@apollo/client/core/ObservableQuery.js.ObservableQuery.reobserve | @ | ObservableQuery.js:346
| listeners.add.oqListener | @ | QueryInfo.js:64
| (anonymous) | @ | QueryInfo.js:77
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.notify | @ | QueryInfo.js:77
| (anonymous) | @ | QueryInfo.js:44
| setTimeout (async) | |
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDirty | @ | QueryInfo.js:44
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDiff | @ | QueryInfo.js:53
| callback | @ | QueryInfo.js:117
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatch | @ | inMemoryCache.js:224
| InMemoryCache._this.maybeBroadcastWatch.Object.makeCacheKey | @ | inMemoryCache.js:26
| recomputeNewValue | @ | bundle.esm.js:247
| Slot.withValue | @ | context.esm.js:69
| reallyRecompute | @ | bundle.esm.js:233
| ./node_modules/optimism/lib/bundle.esm.js.Entry.recompute | @ | bundle.esm.js:160
| optimistic | @ | bundle.esm.js:479
| (anonymous) | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatches | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.performTransaction | @ | inMemoryCache.js:198
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.markResult | @ | QueryInfo.js:135
| (anonymous) | @ | QueryManager.js:467
| (anonymous) | @ | asyncMap.js:13
| (anonymous) | @ | asyncMap.js:13
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | iteration.js:4
| iterateObserversSafely | @ | iteration.js:4
| next | @ | Concast.js:25
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | Observable.js:327
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | bundle.esm.js:29
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | createHttpLink.js:85
| Promise.then (async) | |
| (anonymous) | @ | createHttpLink.js:84
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | bundle.esm.js:11
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | Observable.js:319
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| complete | @ | Concast.js:56
| ./node_modules/@apollo/client/utilities/observables/Concast.js.Concast.start | @ | Concast.js:79
| Concast | @ | Concast.js:71
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getObservableFromLink | @ | QueryManager.js:420
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getResultsFromLink | @ | QueryManager.js:459
| resultsFromLink | @ | QueryManager.js:577
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryByPolicy | @ | QueryManager.js:605
| fromVariables | @ | QueryManager.js:526
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryObservable | @ | QueryManager.js:533
| (anonymous) | @ | ObservableQuery.js:341
| ./node_modules/@apollo/client/core/Reobserver.js.Reobserver.reobserve | @ | Reobserver.js:18
| ./node_modules/@apollo/client/core/ObservableQuery.js.ObservableQuery.reobserve | @ | ObservableQuery.js:346
| listeners.add.oqListener | @ | QueryInfo.js:64
| (anonymous) | @ | QueryInfo.js:77
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.notify | @ | QueryInfo.js:77
| (anonymous) | @ | QueryInfo.js:44
| setTimeout (async) | |
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDirty | @ | QueryInfo.js:44
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDiff | @ | QueryInfo.js:53
| callback | @ | QueryInfo.js:117
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatch | @ | inMemoryCache.js:224
| InMemoryCache._this.maybeBroadcastWatch.Object.makeCacheKey | @ | inMemoryCache.js:26
| recomputeNewValue | @ | bundle.esm.js:247
| Slot.withValue | @ | context.esm.js:69
| reallyRecompute | @ | bundle.esm.js:233
| ./node_modules/optimism/lib/bundle.esm.js.Entry.recompute | @ | bundle.esm.js:160
| optimistic | @ | bundle.esm.js:479
| (anonymous) | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatches | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.performTransaction | @ | inMemoryCache.js:198
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.markResult | @ | QueryInfo.js:135
| (anonymous) | @ | QueryManager.js:467
| (anonymous) | @ | asyncMap.js:13
| (anonymous) | @ | asyncMap.js:13
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | iteration.js:4
| iterateObserversSafely | @ | iteration.js:4
| next | @ | Concast.js:25
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | Observable.js:327
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | bundle.esm.js:29
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | createHttpLink.js:85
| Promise.then (async) | |
| (anonymous) | @ | createHttpLink.js:84
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | bundle.esm.js:11
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | Observable.js:319
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| complete | @ | Concast.js:56
| ./node_modules/@apollo/client/utilities/observables/Concast.js.Concast.start | @ | Concast.js:79
| Concast | @ | Concast.js:71
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getObservableFromLink | @ | QueryManager.js:420
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getResultsFromLink | @ | QueryManager.js:459
| resultsFromLink | @ | QueryManager.js:577
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryByPolicy | @ | QueryManager.js:605
| fromVariables | @ | QueryManager.js:526
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryObservable | @ | QueryManager.js:533
| (anonymous) | @ | ObservableQuery.js:341
| ./node_modules/@apollo/client/core/Reobserver.js.Reobserver.reobserve | @ | Reobserver.js:18
| ./node_modules/@apollo/client/core/ObservableQuery.js.ObservableQuery.reobserve | @ | ObservableQuery.js:346
| listeners.add.oqListener | @ | QueryInfo.js:64
| (anonymous) | @ | QueryInfo.js:77
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.notify | @ | QueryInfo.js:77
| (anonymous) | @ | QueryInfo.js:44
| setTimeout (async) | |
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDirty | @ | QueryInfo.js:44
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDiff | @ | QueryInfo.js:53
| callback | @ | QueryInfo.js:117
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatch | @ | inMemoryCache.js:224
| InMemoryCache._this.maybeBroadcastWatch.Object.makeCacheKey | @ | inMemoryCache.js:26
| recomputeNewValue | @ | bundle.esm.js:247
| Slot.withValue | @ | context.esm.js:69
| reallyRecompute | @ | bundle.esm.js:233
| ./node_modules/optimism/lib/bundle.esm.js.Entry.recompute | @ | bundle.esm.js:160
| optimistic | @ | bundle.esm.js:479
| (anonymous) | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatches | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.performTransaction | @ | inMemoryCache.js:198
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.markResult | @ | QueryInfo.js:135
| (anonymous) | @ | QueryManager.js:467
| (anonymous) | @ | asyncMap.js:13
| (anonymous) | @ | asyncMap.js:13
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | iteration.js:4
| iterateObserversSafely | @ | iteration.js:4
| next | @ | Concast.js:25
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | Observable.js:327
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | bundle.esm.js:29
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | createHttpLink.js:85
| Promise.then (async) | |
| (anonymous) | @ | createHttpLink.js:84
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | bundle.esm.js:11
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | Observable.js:319
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| complete | @ | Concast.js:56
| ./node_modules/@apollo/client/utilities/observables/Concast.js.Concast.start | @ | Concast.js:79
| Concast | @ | Concast.js:71
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getObservableFromLink | @ | QueryManager.js:420
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getResultsFromLink | @ | QueryManager.js:459
| resultsFromLink | @ | QueryManager.js:577
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryByPolicy | @ | QueryManager.js:605
| fromVariables | @ | QueryManager.js:526
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryObservable | @ | QueryManager.js:533
| (anonymous) | @ | ObservableQuery.js:341
| ./node_modules/@apollo/client/core/Reobserver.js.Reobserver.reobserve | @ | Reobserver.js:18
| ./node_modules/@apollo/client/core/ObservableQuery.js.ObservableQuery.reobserve | @ | ObservableQuery.js:346
| listeners.add.oqListener | @ | QueryInfo.js:64
| (anonymous) | @ | QueryInfo.js:77
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.notify | @ | QueryInfo.js:77
| (anonymous) | @ | QueryInfo.js:44
| setTimeout (async) | |
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDirty | @ | QueryInfo.js:44
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDiff | @ | QueryInfo.js:53
| callback | @ | QueryInfo.js:117
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatch | @ | inMemoryCache.js:224
| InMemoryCache._this.maybeBroadcastWatch.Object.makeCacheKey | @ | inMemoryCache.js:26
| recomputeNewValue | @ | bundle.esm.js:247
| Slot.withValue | @ | context.esm.js:69
| reallyRecompute | @ | bundle.esm.js:233
| ./node_modules/optimism/lib/bundle.esm.js.Entry.recompute | @ | bundle.esm.js:160
| optimistic | @ | bundle.esm.js:479
| (anonymous) | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatches | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.performTransaction | @ | inMemoryCache.js:198
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.markResult | @ | QueryInfo.js:135
| (anonymous) | @ | QueryManager.js:467
| (anonymous) | @ | asyncMap.js:13
| (anonymous) | @ | asyncMap.js:13
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | iteration.js:4
| iterateObserversSafely | @ | iteration.js:4
| next | @ | Concast.js:25
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | Observable.js:327
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | bundle.esm.js:29
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | createHttpLink.js:85
| Promise.then (async) | |
| (anonymous) | @ | createHttpLink.js:84
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | bundle.esm.js:11
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | Observable.js:319
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| complete | @ | Concast.js:56
| ./node_modules/@apollo/client/utilities/observables/Concast.js.Concast.start | @ | Concast.js:79
| Concast | @ | Concast.js:71
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getObservableFromLink | @ | QueryManager.js:420
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getResultsFromLink | @ | QueryManager.js:459
| resultsFromLink | @ | QueryManager.js:577
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryByPolicy | @ | QueryManager.js:605
| fromVariables | @ | QueryManager.js:526
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryObservable | @ | QueryManager.js:533
| (anonymous) | @ | ObservableQuery.js:341
| ./node_modules/@apollo/client/core/Reobserver.js.Reobserver.reobserve | @ | Reobserver.js:18
| ./node_modules/@apollo/client/core/ObservableQuery.js.ObservableQuery.reobserve | @ | ObservableQuery.js:346
| listeners.add.oqListener | @ | QueryInfo.js:64
| (anonymous) | @ | QueryInfo.js:77
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.notify | @ | QueryInfo.js:77
| (anonymous) | @ | QueryInfo.js:44
| setTimeout (async) | |
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDirty | @ | QueryInfo.js:44
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDiff | @ | QueryInfo.js:53
| callback | @ | QueryInfo.js:117
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatch | @ | inMemoryCache.js:224
| InMemoryCache._this.maybeBroadcastWatch.Object.makeCacheKey | @ | inMemoryCache.js:26
| recomputeNewValue | @ | bundle.esm.js:247
| Slot.withValue | @ | context.esm.js:69
| reallyRecompute | @ | bundle.esm.js:233
| ./node_modules/optimism/lib/bundle.esm.js.Entry.recompute | @ | bundle.esm.js:160
| optimistic | @ | bundle.esm.js:479
| (anonymous) | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatches | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.performTransaction | @ | inMemoryCache.js:198
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.markResult | @ | QueryInfo.js:135
| (anonymous) | @ | QueryManager.js:467
| (anonymous) | @ | asyncMap.js:13
| (anonymous) | @ | asyncMap.js:13
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | iteration.js:4
| iterateObserversSafely | @ | iteration.js:4
| next | @ | Concast.js:25
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | Observable.js:327
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | bundle.esm.js:29
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | createHttpLink.js:85
| Promise.then (async) | |
| (anonymous) | @ | createHttpLink.js:84
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | bundle.esm.js:11
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | Observable.js:319
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| complete | @ | Concast.js:56
| ./node_modules/@apollo/client/utilities/observables/Concast.js.Concast.start | @ | Concast.js:79
| Concast | @ | Concast.js:71
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getObservableFromLink | @ | QueryManager.js:420
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getResultsFromLink | @ | QueryManager.js:459
| resultsFromLink | @ | QueryManager.js:577
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryByPolicy | @ | QueryManager.js:605
| fromVariables | @ | QueryManager.js:526
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryObservable | @ | QueryManager.js:533
| (anonymous) | @ | ObservableQuery.js:341
| ./node_modules/@apollo/client/core/Reobserver.js.Reobserver.reobserve | @ | Reobserver.js:18
| ./node_modules/@apollo/client/core/ObservableQuery.js.ObservableQuery.reobserve | @ | ObservableQuery.js:346
| listeners.add.oqListener | @ | QueryInfo.js:64
| (anonymous) | @ | QueryInfo.js:77
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.notify | @ | QueryInfo.js:77
| (anonymous) | @ | QueryInfo.js:44
| setTimeout (async) | |
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDirty | @ | QueryInfo.js:44
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDiff | @ | QueryInfo.js:53
| callback | @ | QueryInfo.js:117
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatch | @ | inMemoryCache.js:224
| InMemoryCache._this.maybeBroadcastWatch.Object.makeCacheKey | @ | inMemoryCache.js:26
| recomputeNewValue | @ | bundle.esm.js:247
| Slot.withValue | @ | context.esm.js:69
| reallyRecompute | @ | bundle.esm.js:233
| ./node_modules/optimism/lib/bundle.esm.js.Entry.recompute | @ | bundle.esm.js:160
| optimistic | @ | bundle.esm.js:479
| (anonymous) | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatches | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.performTransaction | @ | inMemoryCache.js:198
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.markResult | @ | QueryInfo.js:135
| (anonymous) | @ | QueryManager.js:467
| (anonymous) | @ | asyncMap.js:13
| (anonymous) | @ | asyncMap.js:13
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | iteration.js:4
| iterateObserversSafely | @ | iteration.js:4
| next | @ | Concast.js:25
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | Observable.js:327
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | bundle.esm.js:29
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | createHttpLink.js:85
| Promise.then (async) | |
| (anonymous) | @ | createHttpLink.js:84
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | bundle.esm.js:11
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | Observable.js:319
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| complete | @ | Concast.js:56
| ./node_modules/@apollo/client/utilities/observables/Concast.js.Concast.start | @ | Concast.js:79
| Concast | @ | Concast.js:71
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getObservableFromLink | @ | QueryManager.js:420
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getResultsFromLink | @ | QueryManager.js:459
| resultsFromLink | @ | QueryManager.js:577
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryByPolicy | @ | QueryManager.js:605
| fromVariables | @ | QueryManager.js:526
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryObservable | @ | QueryManager.js:533
| (anonymous) | @ | ObservableQuery.js:341
| ./node_modules/@apollo/client/core/Reobserver.js.Reobserver.reobserve | @ | Reobserver.js:18
| ./node_modules/@apollo/client/core/ObservableQuery.js.ObservableQuery.reobserve | @ | ObservableQuery.js:346
| listeners.add.oqListener | @ | QueryInfo.js:64
| (anonymous) | @ | QueryInfo.js:77
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.notify | @ | QueryInfo.js:77
| (anonymous) | @ | QueryInfo.js:44
| setTimeout (async) | |
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDirty | @ | QueryInfo.js:44
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDiff | @ | QueryInfo.js:53
| callback | @ | QueryInfo.js:117
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatch | @ | inMemoryCache.js:224
| InMemoryCache._this.maybeBroadcastWatch.Object.makeCacheKey | @ | inMemoryCache.js:26
| recomputeNewValue | @ | bundle.esm.js:247
| Slot.withValue | @ | context.esm.js:69
| reallyRecompute | @ | bundle.esm.js:233
| ./node_modules/optimism/lib/bundle.esm.js.Entry.recompute | @ | bundle.esm.js:160
| optimistic | @ | bundle.esm.js:479
| (anonymous) | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatches | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.performTransaction | @ | inMemoryCache.js:198
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.markResult | @ | QueryInfo.js:135
| (anonymous) | @ | QueryManager.js:467
| (anonymous) | @ | asyncMap.js:13
| (anonymous) | @ | asyncMap.js:13
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | iteration.js:4
| iterateObserversSafely | @ | iteration.js:4
| next | @ | Concast.js:25
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | Observable.js:327
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | bundle.esm.js:29
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | createHttpLink.js:85
| Promise.then (async) | |
| (anonymous) | @ | createHttpLink.js:84
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | bundle.esm.js:11
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | Observable.js:319
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| complete | @ | Concast.js:56
| ./node_modules/@apollo/client/utilities/observables/Concast.js.Concast.start | @ | Concast.js:79
| Concast | @ | Concast.js:71
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getObservableFromLink | @ | QueryManager.js:420
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getResultsFromLink | @ | QueryManager.js:459
| resultsFromLink | @ | QueryManager.js:577
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryByPolicy | @ | QueryManager.js:605
| fromVariables | @ | QueryManager.js:526
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryObservable | @ | QueryManager.js:533
| (anonymous) | @ | ObservableQuery.js:341
| ./node_modules/@apollo/client/core/Reobserver.js.Reobserver.reobserve | @ | Reobserver.js:18
| ./node_modules/@apollo/client/core/ObservableQuery.js.ObservableQuery.reobserve | @ | ObservableQuery.js:346
| listeners.add.oqListener | @ | QueryInfo.js:64
| (anonymous) | @ | QueryInfo.js:77
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.notify | @ | QueryInfo.js:77
| (anonymous) | @ | QueryInfo.js:44
| setTimeout (async) | |
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDirty | @ | QueryInfo.js:44
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDiff | @ | QueryInfo.js:53
| callback | @ | QueryInfo.js:117
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatch | @ | inMemoryCache.js:224
| InMemoryCache._this.maybeBroadcastWatch.Object.makeCacheKey | @ | inMemoryCache.js:26
| recomputeNewValue | @ | bundle.esm.js:247
| Slot.withValue | @ | context.esm.js:69
| reallyRecompute | @ | bundle.esm.js:233
| ./node_modules/optimism/lib/bundle.esm.js.Entry.recompute | @ | bundle.esm.js:160
| optimistic | @ | bundle.esm.js:479
| (anonymous) | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatches | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.performTransaction | @ | inMemoryCache.js:198
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.markResult | @ | QueryInfo.js:135
| (anonymous) | @ | QueryManager.js:467
| (anonymous) | @ | asyncMap.js:13
| (anonymous) | @ | asyncMap.js:13
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | iteration.js:4
| iterateObserversSafely | @ | iteration.js:4
| next | @ | Concast.js:25
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | Observable.js:327
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | bundle.esm.js:29
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | createHttpLink.js:85
| Promise.then (async) | |
| (anonymous) | @ | createHttpLink.js:84
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | bundle.esm.js:11
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | Observable.js:319
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| complete | @ | Concast.js:56
| ./node_modules/@apollo/client/utilities/observables/Concast.js.Concast.start | @ | Concast.js:79
| Concast | @ | Concast.js:71
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getObservableFromLink | @ | QueryManager.js:420
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getResultsFromLink | @ | QueryManager.js:459
| resultsFromLink | @ | QueryManager.js:577
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryByPolicy | @ | QueryManager.js:605
| fromVariables | @ | QueryManager.js:526
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryObservable | @ | QueryManager.js:533
| (anonymous) | @ | ObservableQuery.js:341
| ./node_modules/@apollo/client/core/Reobserver.js.Reobserver.reobserve | @ | Reobserver.js:18
| ./node_modules/@apollo/client/core/ObservableQuery.js.ObservableQuery.reobserve | @ | ObservableQuery.js:346
| listeners.add.oqListener | @ | QueryInfo.js:64
| (anonymous) | @ | QueryInfo.js:77
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.notify | @ | QueryInfo.js:77
| (anonymous) | @ | QueryInfo.js:44
| setTimeout (async) | |
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDirty | @ | QueryInfo.js:44
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDiff | @ | QueryInfo.js:53
| callback | @ | QueryInfo.js:117
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatch | @ | inMemoryCache.js:224
| InMemoryCache._this.maybeBroadcastWatch.Object.makeCacheKey | @ | inMemoryCache.js:26
| recomputeNewValue | @ | bundle.esm.js:247
| Slot.withValue | @ | context.esm.js:69
| reallyRecompute | @ | bundle.esm.js:233
| ./node_modules/optimism/lib/bundle.esm.js.Entry.recompute | @ | bundle.esm.js:160
| optimistic | @ | bundle.esm.js:479
| (anonymous) | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatches | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.performTransaction | @ | inMemoryCache.js:198
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.markResult | @ | QueryInfo.js:135
| (anonymous) | @ | QueryManager.js:467
| (anonymous) | @ | asyncMap.js:13
| (anonymous) | @ | asyncMap.js:13
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | iteration.js:4
| iterateObserversSafely | @ | iteration.js:4
| next | @ | Concast.js:25
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | Observable.js:327
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | bundle.esm.js:29
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | createHttpLink.js:85
| Promise.then (async) | |
| (anonymous) | @ | createHttpLink.js:84
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | bundle.esm.js:11
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | Observable.js:319
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| complete | @ | Concast.js:56
| ./node_modules/@apollo/client/utilities/observables/Concast.js.Concast.start | @ | Concast.js:79
| Concast | @ | Concast.js:71
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getObservableFromLink | @ | QueryManager.js:420
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getResultsFromLink | @ | QueryManager.js:459
| resultsFromLink | @ | QueryManager.js:577
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryByPolicy | @ | QueryManager.js:605
| fromVariables | @ | QueryManager.js:526
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryObservable | @ | QueryManager.js:533
| (anonymous) | @ | ObservableQuery.js:341
| ./node_modules/@apollo/client/core/Reobserver.js.Reobserver.reobserve | @ | Reobserver.js:18
| ./node_modules/@apollo/client/core/ObservableQuery.js.ObservableQuery.reobserve | @ | ObservableQuery.js:346
| listeners.add.oqListener | @ | QueryInfo.js:64
| (anonymous) | @ | QueryInfo.js:77
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.notify | @ | QueryInfo.js:77
| (anonymous) | @ | QueryInfo.js:44
| setTimeout (async) | |
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDirty | @ | QueryInfo.js:44
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDiff | @ | QueryInfo.js:53
| callback | @ | QueryInfo.js:117
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatch | @ | inMemoryCache.js:224
| InMemoryCache._this.maybeBroadcastWatch.Object.makeCacheKey | @ | inMemoryCache.js:26
| recomputeNewValue | @ | bundle.esm.js:247
| Slot.withValue | @ | context.esm.js:69
| reallyRecompute | @ | bundle.esm.js:233
| ./node_modules/optimism/lib/bundle.esm.js.Entry.recompute | @ | bundle.esm.js:160
| optimistic | @ | bundle.esm.js:479
| (anonymous) | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatches | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.performTransaction | @ | inMemoryCache.js:198
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.markResult | @ | QueryInfo.js:135
| (anonymous) | @ | QueryManager.js:467
| (anonymous) | @ | asyncMap.js:13
| (anonymous) | @ | asyncMap.js:13
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | iteration.js:4
| iterateObserversSafely | @ | iteration.js:4
| next | @ | Concast.js:25
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | Observable.js:327
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | bundle.esm.js:29
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | createHttpLink.js:85
| Promise.then (async) | |
| (anonymous) | @ | createHttpLink.js:84
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | bundle.esm.js:11
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | Observable.js:319
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| complete | @ | Concast.js:56
| ./node_modules/@apollo/client/utilities/observables/Concast.js.Concast.start | @ | Concast.js:79
| Concast | @ | Concast.js:71
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getObservableFromLink | @ | QueryManager.js:420
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getResultsFromLink | @ | QueryManager.js:459
| resultsFromLink | @ | QueryManager.js:577
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryByPolicy | @ | QueryManager.js:605
| fromVariables | @ | QueryManager.js:526
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryObservable | @ | QueryManager.js:533
| (anonymous) | @ | ObservableQuery.js:341
| ./node_modules/@apollo/client/core/Reobserver.js.Reobserver.reobserve | @ | Reobserver.js:18
| ./node_modules/@apollo/client/core/ObservableQuery.js.ObservableQuery.reobserve | @ | ObservableQuery.js:346
| listeners.add.oqListener | @ | QueryInfo.js:64
| (anonymous) | @ | QueryInfo.js:77
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.notify | @ | QueryInfo.js:77
| (anonymous) | @ | QueryInfo.js:44
| setTimeout (async) | |
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDirty | @ | QueryInfo.js:44
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDiff | @ | QueryInfo.js:53
| callback | @ | QueryInfo.js:117
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatch | @ | inMemoryCache.js:224
| InMemoryCache._this.maybeBroadcastWatch.Object.makeCacheKey | @ | inMemoryCache.js:26
| recomputeNewValue | @ | bundle.esm.js:247
| Slot.withValue | @ | context.esm.js:69
| reallyRecompute | @ | bundle.esm.js:233
| ./node_modules/optimism/lib/bundle.esm.js.Entry.recompute | @ | bundle.esm.js:160
| optimistic | @ | bundle.esm.js:479
| (anonymous) | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatches | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.performTransaction | @ | inMemoryCache.js:198
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.markResult | @ | QueryInfo.js:135
| (anonymous) | @ | QueryManager.js:467
| (anonymous) | @ | asyncMap.js:13
| (anonymous) | @ | asyncMap.js:13
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | iteration.js:4
| iterateObserversSafely | @ | iteration.js:4
| next | @ | Concast.js:25
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | Observable.js:327
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | bundle.esm.js:29
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | createHttpLink.js:85
| Promise.then (async) | |
| (anonymous) | @ | createHttpLink.js:84
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | bundle.esm.js:11
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | Observable.js:319
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| complete | @ | Concast.js:56
| ./node_modules/@apollo/client/utilities/observables/Concast.js.Concast.start | @ | Concast.js:79
| Concast | @ | Concast.js:71
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getObservableFromLink | @ | QueryManager.js:420
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getResultsFromLink | @ | QueryManager.js:459
| resultsFromLink | @ | QueryManager.js:577
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryByPolicy | @ | QueryManager.js:605
| fromVariables | @ | QueryManager.js:526
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryObservable | @ | QueryManager.js:533
| (anonymous) | @ | ObservableQuery.js:341
| ./node_modules/@apollo/client/core/Reobserver.js.Reobserver.reobserve | @ | Reobserver.js:18
| ./node_modules/@apollo/client/core/ObservableQuery.js.ObservableQuery.reobserve | @ | ObservableQuery.js:346
| listeners.add.oqListener | @ | QueryInfo.js:64
| (anonymous) | @ | QueryInfo.js:77
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.notify | @ | QueryInfo.js:77
| (anonymous) | @ | QueryInfo.js:44
| setTimeout (async) | |
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDirty | @ | QueryInfo.js:44
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDiff | @ | QueryInfo.js:53
| callback | @ | QueryInfo.js:117
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatch | @ | inMemoryCache.js:224
| InMemoryCache._this.maybeBroadcastWatch.Object.makeCacheKey | @ | inMemoryCache.js:26
| recomputeNewValue | @ | bundle.esm.js:247
| Slot.withValue | @ | context.esm.js:69
| reallyRecompute | @ | bundle.esm.js:233
| ./node_modules/optimism/lib/bundle.esm.js.Entry.recompute | @ | bundle.esm.js:160
| optimistic | @ | bundle.esm.js:479
| (anonymous) | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatches | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.performTransaction | @ | inMemoryCache.js:198
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.markResult | @ | QueryInfo.js:135
| (anonymous) | @ | QueryManager.js:467
| (anonymous) | @ | asyncMap.js:13
| (anonymous) | @ | asyncMap.js:13
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | iteration.js:4
| iterateObserversSafely | @ | iteration.js:4
| next | @ | Concast.js:25
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | Observable.js:327
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | bundle.esm.js:29
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | createHttpLink.js:85
| Promise.then (async) | |
| (anonymous) | @ | createHttpLink.js:84
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | bundle.esm.js:11
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | Observable.js:319
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| complete | @ | Concast.js:56
| ./node_modules/@apollo/client/utilities/observables/Concast.js.Concast.start | @ | Concast.js:79
| Concast | @ | Concast.js:71
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getObservableFromLink | @ | QueryManager.js:420
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getResultsFromLink | @ | QueryManager.js:459
| resultsFromLink | @ | QueryManager.js:577
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryByPolicy | @ | QueryManager.js:605
| fromVariables | @ | QueryManager.js:526
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryObservable | @ | QueryManager.js:533
| (anonymous) | @ | ObservableQuery.js:341
| ./node_modules/@apollo/client/core/Reobserver.js.Reobserver.reobserve | @ | Reobserver.js:18
| ./node_modules/@apollo/client/core/ObservableQuery.js.ObservableQuery.reobserve | @ | ObservableQuery.js:346
| listeners.add.oqListener | @ | QueryInfo.js:64
| (anonymous) | @ | QueryInfo.js:77
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.notify | @ | QueryInfo.js:77
| (anonymous) | @ | QueryInfo.js:44
| setTimeout (async) | |
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDirty | @ | QueryInfo.js:44
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.setDiff | @ | QueryInfo.js:53
| callback | @ | QueryInfo.js:117
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatch | @ | inMemoryCache.js:224
| InMemoryCache._this.maybeBroadcastWatch.Object.makeCacheKey | @ | inMemoryCache.js:26
| recomputeNewValue | @ | bundle.esm.js:247
| Slot.withValue | @ | context.esm.js:69
| reallyRecompute | @ | bundle.esm.js:233
| ./node_modules/optimism/lib/bundle.esm.js.Entry.recompute | @ | bundle.esm.js:160
| optimistic | @ | bundle.esm.js:479
| (anonymous) | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.broadcastWatches | @ | inMemoryCache.js:218
| ./node_modules/@apollo/client/cache/inmemory/inMemoryCache.js.InMemoryCache.performTransaction | @ | inMemoryCache.js:198
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.markResult | @ | QueryInfo.js:135
| (anonymous) | @ | QueryManager.js:467
| (anonymous) | @ | asyncMap.js:13
| (anonymous) | @ | asyncMap.js:13
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | iteration.js:4
| iterateObserversSafely | @ | iteration.js:4
| next | @ | Concast.js:25
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | Observable.js:327
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| next | @ | bundle.esm.js:29
| notifySubscription | @ | Observable.js:135
| onNotify | @ | Observable.js:179
| next | @ | Observable.js:235
| (anonymous) | @ | createHttpLink.js:85
| Promise.then (async) | |
| (anonymous) | @ | createHttpLink.js:84
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | bundle.esm.js:11
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| (anonymous) | @ | Observable.js:319
| Subscription | @ | Observable.js:197
| subscribe | @ | Observable.js:279
| complete | @ | Concast.js:56
| ./node_modules/@apollo/client/utilities/observables/Concast.js.Concast.start | @ | Concast.js:79
| Concast | @ | Concast.js:71
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getObservableFromLink | @ | QueryManager.js:420
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.getResultsFromLink | @ | QueryManager.js:459
| resultsFromLink | @ | QueryManager.js:577
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryByPolicy | @ | QueryManager.js:605
| fromVariables | @ | QueryManager.js:526
| ./node_modules/@apollo/client/core/QueryManager.js.QueryManager.fetchQueryObservable | @ | QueryManager.js:533
| (anonymous) | @ | ObservableQuery.js:341
| ./node_modules/@apollo/client/core/Reobserver.js.Reobserver.reobserve | @ | Reobserver.js:18
| ./node_modules/@apollo/client/core/ObservableQuery.js.ObservableQuery.reobserve | @ | ObservableQuery.js:346
| listeners.add.oqListener | @ | QueryInfo.js:64
| (anonymous) | @ | QueryInfo.js:77
| ./node_modules/@apollo/client/core/QueryInfo.js.QueryInfo.notify | @ | QueryInfo.js:77
| (anonymous) | @ | QueryInfo.js:44
```</details>
My issue started happening exactly with version 3.0.0-beta.46 and latest I tested is `3.0.0-rc.4` which still reproduces the bug.
Running my app with 3.0.0-beta.45 works just fine-tested without any other code changes. That version performs great-without any endless requests going out.
**Intended outcome:**
no request loop
**Actual outcome:**
there is an endless loop of request coming from my browser to the graphql endpoint
**How to reproduce the issue:**
<!--
If possible, please create a reproduction using https://github.com/apollographql/react-apollo-error-template and link to it here. If you prefer an in-browser way to create reproduction, try: https://codesandbox.io/s/github/apollographql/react-apollo-error-template
Instructions for how the issue can be reproduced by a maintainer or contributor. Be as specific as possible, and only mention what is necessary to reproduce the bug. If possible, try to isolate the exact circumstances in which the bug occurs and avoid speculation over what the cause might be.
-->
Not sure, but I will gladly test out any potential fix. We have an SSR app which has couple of queries hooked into several components in react router. More specifically the page that is suffereing the issue is rendering a https://github.com/JustFly1984/react-google-maps-api component and it's displaying markers in the map.
Also it's got side view which has a regular list and this list has another Graphql query.
**Versions**
<!--
Run the following command in your project directory, and paste its (automatically copied to clipboard) results here:
`npx envinfo@latest --preset apollo --clipboard`
-->
```
System:
OS: Linux 5.4 Ubuntu 20.04 LTS (Focal Fossa)
Binaries:
Node: 12.14.1 - ~/.nvm/versions/node/v12.14.1/bin/node
Yarn: 1.22.4 - ~/.yarn/bin/yarn
npm: 6.13.4 - ~/.nvm/versions/node/v12.14.1/bin/npm
Browsers:
Chrome: 83.0.4103.97
Firefox: 77.0.1
npmPackages:
@apollo/client: 3.0.0-beta.46 => 3.0.0-beta.46
@apollo/gateway: ^0.14.1 => 0.14.1
@apollo/link-batch-http: ^2.0.0-beta.3 => 2.0.0-beta.3
@apollo/link-error: ^2.0.0-beta.3 => 2.0.0-beta.3
@apollo/link-ws: ^2.0.0-beta.3 => 2.0.0-beta.3
@apollo/react-ssr: ^3.1.5 => 3.1.5
@apollo/server: ^3.0.0-alpha.1 => 3.0.0-alpha.1
apollo-client: ^2.6.10 => 2.6.10
apollo-link-http: ^1.5.17 => 1.5.17
apollo-server: ^2.14.4 => 2.14.4
```
| Ran into the same issue. I've narrowed it down to being introduced in `v3.0.0-beta.46`.
@vhenzl Can you try this?
```ts
new InMemoryCache({
typePolicies: {
// Assuming the __typename for the viewer object is "Viewer":
Viewer: {
// This means the Viewer object is a singleton whose identity
// is independent of any of its fields (other than __typename),
// so the cache can assume that any two Viewer objects are merely
// different views of the same object, and safely merge their
// fields rather than just replacing one with the other, which is
// what seems to be happening right now. This is different from
// keyFields: false in that it allows the Viewer singleton object
// to be normalized, like other entity objects, rather than treating
// it as unidentified data.
keyFields: [],
},
},
})
```
You could also write `keyFields: ["id"]` if there are going to be multiple different `Viewer` objects within the same application, and the cache will enforce that the `id` field is always present.
@benjamn Thanks for the answer. Unfortunately, I can't use the trick with `keyFields: []` as `viewer` is `User` and can appear again anywhere in the tree. So the only way for me is enforcing `id` on `User`.
Is this new behaviour considered a bug or is it intended, expected?
If it's expected, my guess is the same problem can occur for any type anywhere in the query, not just for the top-level fields. Right?
Then the safest way how to prevent it would be to enforce querying `id` field for all types that have it. And possibly autogenerate the whole `typePolicies` object from the schema with a tool like [@graphql-codegen](https://github.com/dotansimha/graphql-code-generator). Would you agree?
most likely a bug, local mutations doing `cache.modify` will also cause backend queries dispatched (with id),
like:
```ts
export const toggleComments = (
root: {},
variables: {
id: string,
commentsOpenColumnIndex: number | null
},
{ cache }: LocalResolverContext<NormalizedCacheObject, {}>
) => {
const id = cache.identify({
__typename: 'ReportType',
id: variables.id
});
if(id) {
cache.modify( {
_commentsOpenColumnIndex: _currentValue => variables.commentsOpenColumnIndex
}, id)
return true
} else {
return false
}
}
```
also working fine on `3.0.0-beta.45`
This issue exists for me in beta 50. Adding ID to the graphql query also resolved the bug.
| 2020-06-16T17:01:37Z | 3 |
apollographql/apollo-client | 6,353 | apollographql__apollo-client-6353 | [
"6305"
] | 75caa44265a540eaef844fdff41a3c4075842d97 | diff --git a/src/core/QueryManager.ts b/src/core/QueryManager.ts
--- a/src/core/QueryManager.ts
+++ b/src/core/QueryManager.ts
@@ -836,6 +836,24 @@ export class QueryManager<TStore> {
context = {},
} = options;
+ if (fetchPolicy === "cache-and-network" ||
+ fetchPolicy === "network-only") {
+ // When someone chooses cache-and-network or network-only as their
+ // initial FetchPolicy, they almost certainly do not want future cache
+ // updates to trigger unconditional network requests, which is what
+ // repeatedly applying the cache-and-network or network-only policies
+ // would seem to require. Instead, when the cache reports an update
+ // after the initial network request, subsequent network requests should
+ // be triggered only if the cache result is incomplete. This behavior
+ // corresponds exactly to switching to a cache-first FetchPolicy, so we
+ // modify options.fetchPolicy here for the next fetchQueryObservable
+ // call, using the same options object that the Reobserver always passes
+ // to fetchQueryObservable. Note: if these FetchPolicy transitions get
+ // much more complicated, we might consider using some sort of state
+ // machine to capture the transition rules.
+ options.fetchPolicy = "cache-first";
+ }
+
const mightUseNetwork =
fetchPolicy === "cache-first" ||
fetchPolicy === "cache-and-network" ||
| diff --git a/src/core/__tests__/ObservableQuery.ts b/src/core/__tests__/ObservableQuery.ts
--- a/src/core/__tests__/ObservableQuery.ts
+++ b/src/core/__tests__/ObservableQuery.ts
@@ -933,15 +933,29 @@ describe('ObservableQuery', () => {
});
describe('refetch', () => {
- type TFQO = QueryManager<any>["fetchQueryObservable"];
function mockFetchQuery(queryManager: QueryManager<any>) {
- const origFetchQuery: TFQO = (queryManager as any).fetchQueryObservable;
- return (queryManager as any).fetchQueryObservable = jest.fn<
- ReturnType<TFQO>,
- Parameters<TFQO>
+ const fetchQueryObservable = queryManager.fetchQueryObservable;
+ const fetchQueryByPolicy: QueryManager<any>["fetchQueryByPolicy"] =
+ (queryManager as any).fetchQueryByPolicy;
+
+ const mock = <T extends
+ | typeof fetchQueryObservable
+ | typeof fetchQueryByPolicy
+ >(original: T) => jest.fn<
+ ReturnType<T>,
+ Parameters<T>
>(function () {
- return origFetchQuery.apply(queryManager, arguments);
+ return original.apply(queryManager, arguments);
});
+
+ const mocks = {
+ fetchQueryObservable: mock(fetchQueryObservable),
+ fetchQueryByPolicy: mock(fetchQueryByPolicy),
+ };
+
+ Object.assign(queryManager, mocks);
+
+ return mocks;
}
itAsync('calls fetchRequest with fetchPolicy `network-only` when using a non-networked fetch policy', (resolve, reject) => {
@@ -964,15 +978,24 @@ describe('ObservableQuery', () => {
fetchPolicy: 'cache-first',
});
- const mocked = mockFetchQuery(queryManager);
+ const mocks = mockFetchQuery(queryManager);
subscribeAndCount(reject, observable, handleCount => {
if (handleCount === 1) {
observable.refetch(differentVariables);
} else if (handleCount === 2) {
- expect(mocked.mock.calls[1][1].fetchPolicy).toEqual(
- 'network-only',
- );
+ const fqbpCalls = mocks.fetchQueryByPolicy.mock.calls;
+ expect(fqbpCalls.length).toBe(2);
+ expect(fqbpCalls[1][1].fetchPolicy).toEqual('network-only');
+ // Although the options.fetchPolicy we passed just now to
+ // fetchQueryByPolicy should have been network-only,
+ // observable.options.fetchPolicy should now be updated to
+ // cache-first, since network-only (and cache-and-network) fetch
+ // policies fall back to cache-first after the first request.
+ expect(observable.options.fetchPolicy).toBe('cache-first');
+ const fqoCalls = mocks.fetchQueryObservable.mock.calls;
+ expect(fqoCalls.length).toBe(2);
+ expect(fqoCalls[1][1].fetchPolicy).toEqual('cache-first');
resolve();
}
});
@@ -1000,15 +1023,24 @@ describe('ObservableQuery', () => {
fetchPolicy: 'no-cache',
});
- const mocked = mockFetchQuery(queryManager);
+ const mocks = mockFetchQuery(queryManager);
subscribeAndCount(reject, observable, handleCount => {
if (handleCount === 1) {
observable.refetch(differentVariables);
} else if (handleCount === 2) {
- expect(
- mocked.mock.calls[1][1].fetchPolicy,
- ).toEqual('no-cache');
+ const fqbpCalls = mocks.fetchQueryByPolicy.mock.calls;
+ expect(fqbpCalls.length).toBe(2);
+ expect(fqbpCalls[1][1].fetchPolicy).toBe('no-cache');
+
+ // Unlike network-only or cache-and-network, the no-cache
+ // FetchPolicy does not switch to cache-first after the first
+ // network request.
+ expect(observable.options.fetchPolicy).toBe('no-cache');
+ const fqoCalls = mocks.fetchQueryObservable.mock.calls;
+ expect(fqoCalls.length).toBe(2);
+ expect(fqoCalls[1][1].fetchPolicy).toBe('no-cache');
+
resolve();
}
});
@@ -1159,34 +1191,35 @@ describe('ObservableQuery', () => {
networkStatus: NetworkStatus.ready,
});
+ const oldLinkObs = linkObservable;
// Make the next network request fail.
linkObservable = errorObservable;
observable.refetch().then(
- result => {
- expect(result).toEqual({
- data: {
- counter: 3,
- name: 'Ben',
- },
- });
+ () => {
+ reject(new Error('should have gotten an error'));
},
+
error => {
expect(error).toBe(intentionalNetworkFailure);
+
+ // Switch back from errorObservable.
+ linkObservable = oldLinkObs;
+
+ observable.refetch().then(result => {
+ expect(result).toEqual({
+ data: {
+ counter: 3,
+ name: 'Ben',
+ },
+ loading: false,
+ networkStatus: NetworkStatus.ready,
+ });
+ resolve();
+ }, reject);
},
);
- } else if (handleCount === 3) {
- expect(result).toEqual({
- data: {
- counter: 3,
- name: 'Ben',
- },
- loading: true,
- networkStatus: NetworkStatus.refetch,
- });
-
- resolve();
- } else if (handleCount > 4) {
+ } else if (handleCount > 2) {
reject(new Error('should not get here'));
}
},
@@ -1545,38 +1578,38 @@ describe('ObservableQuery', () => {
},
);
- queryManager.query({ query, variables }).then(() => {
+ queryManager.query({ query, variables }).then(result => {
+ expect(result).toEqual({
+ data: dataOne,
+ loading: false,
+ networkStatus: NetworkStatus.ready,
+ });
+
const observable = queryManager.watchQuery({
query,
variables,
fetchPolicy: 'network-only',
});
- expect(stripSymbols(observable.getCurrentResult())).toEqual({
- data: undefined,
+
+ expect(observable.getCurrentResult()).toEqual({
+ data: void 0,
loading: true,
networkStatus: 1,
partial: false,
});
subscribeAndCount(reject, observable, (handleCount, subResult) => {
- const {
- data,
- loading,
- networkStatus,
- } = observable.getCurrentResult();
-
if (handleCount === 1) {
expect(subResult).toEqual({
- data,
- loading,
- networkStatus,
+ loading: true,
+ networkStatus: NetworkStatus.loading,
partial: false,
});
} else if (handleCount === 2) {
- expect(stripSymbols(subResult)).toEqual({
+ expect(subResult).toEqual({
data: dataTwo,
loading: false,
- networkStatus: 7,
+ networkStatus: NetworkStatus.ready,
});
resolve();
}
| Cache-and-network should not make a network call when watched cached items change via subscription/mutation
*This is new behavior as of beta .46 and has prevented us from moving past .45 (most likely a result of https://github.com/apollographql/apollo-client/pull/6221).*
**Intended outcome:**
A query with `fetchPolicy: cache-and-network` should not make a network call when a cached item that it's watching changes. It should just re-render containing the data from the cache.
**Actual outcome:**
When an individual cached item changes (via subscription or mutation), if a query with `fetchPolicy: cache-and-network` contains that cached item, the query is making a network call to refetch.
**How to reproduce the issue:**
As instructed, I've created a full reproduction here: https://github.com/jebonfig/react-apollo-error-template
**Notes**
`cache-and-network` is an important fetchPolicy in our App, as it gives us the benefit of quick renders from cache on subsequent component mounts, and also immediately follows up with the server for consistency.
A common example use case is:
- a `cache-and-network` list of cars query
- a `cache-only` query which pulls an individual car from the cars query data via typePolicy
- *quick side note here is I've also noticed new warnings: "Missing cache result fields: person". If I've specified a `cache-only` fetch policy on the query, and a type policy to pull it from the list, then my item not being in the cache is a perfectly valid scenario. I'd argue a warning here is unnecessary noise*
- a `useSubscription` watching the list of cars
When we receive a subscription update for an individual car, the car in the cache is updated with the new data as expected. However, the new, undesired behavior is that the list query now unnecessarily refetches from the network instead of just re-rendering based on the cache updates that have already been made.
So you might say, this is intended based on the .46 changes mentioned above. If that's the case, then how can I retain the initial onComponentMount query behavior of `cache-and-network`, _AND_ the updating behavior when responding to mutations and subscriptions of `cache-first`?
**In other words, I want my query to be `cache-and-network` on initial component mount, but `cache-first` when the cached data my query is watching has been changed by a mutation or subscription.**
**Versions**
```
System:
OS: macOS High Sierra 10.13.6
Binaries:
Node: 11.2.0 - /usr/local/bin/node
npm: 6.4.1 - /usr/local/bin/npm
Browsers:
Chrome: 81.0.4044.138
Safari: 13.1
npmPackages:
@apollo/client: 3.0.0-beta.48 => 3.0.0-beta.48
@apollo/link-error: ^2.0.0-beta.3 => 2.0.0-beta.3
@apollo/link-ws: ^2.0.0-beta.3 => 2.0.0-beta.3
```
| The problem also occurs with the `"network-only"` fetch policy. Whenever the cache is updated (either manually or by a mutation), the query is refetched. | 2020-05-28T23:57:44Z | 3 |
apollographql/apollo-client | 5,116 | apollographql__apollo-client-5116 | [
"5733",
"5733"
] | 854b1c4c86326d5873c27edb3a897ef1f7f72f44 | diff --git a/config/dangerfile.ts b/config/dangerfile.ts
deleted file mode 100644
--- a/config/dangerfile.ts
+++ /dev/null
@@ -1,120 +0,0 @@
-const { includes } = require('lodash');
-const fs = require('fs');
-
-// Setup
-const github = danger.github;
-const pr = github.pr;
-const commits = github.commits;
-const modified = danger.git.modified_files;
-const bodyAndTitle = (pr.body + pr.title).toLowerCase();
-console.log(commits.map(({ sha }) => sha));
-
-// Custom modifiers for people submitting PRs to be able to say "skip this"
-const trivialPR = bodyAndTitle.includes('trivial');
-const acceptedNoTests = bodyAndTitle.includes('skip new tests');
-
-const typescriptOnly = (file: string) => includes(file, '.ts');
-const filesOnly = (file: string) =>
- fs.existsSync(file) && fs.lstatSync(file).isFile();
-
-// Custom subsets of known files
-const modifiedAppFiles = modified
- .filter(p => includes(p, 'src/') || includes(p, 'test/'))
- .filter(p => filesOnly(p) && typescriptOnly(p));
-
-// Takes a list of file paths, and converts it into clickable links
-const linkableFiles = paths => {
- const repoURL = pr.head.repo.html_url;
- const ref = pr.head.ref;
- const links = paths.map(path => {
- return createLink(`${repoURL}/blob/${ref}/${path}`, path);
- });
- return toSentence(links);
-};
-
-// ["1", "2", "3"] to "1, 2 and 3"
-const toSentence = (array: Array<string>): string => {
- if (array.length === 1) {
- return array[0];
- }
- return array.slice(0, array.length - 1).join(', ') + ' and ' + array.pop();
-};
-
-// ("/href/thing", "name") to "<a href="/href/thing">name</a>"
-const createLink = (href: string, text: string): string =>
- `<a href='${href}'>${text}</a>`;
-
-// Raise about missing code inside files
-const raiseIssueAboutPaths = (
- type: Function,
- paths: string[],
- codeToInclude: string,
-) => {
- if (paths.length > 0) {
- const files = linkableFiles(paths);
- const strict = '<code>' + codeToInclude + '</code>';
- type(`Please ensure that ${strict} is enabled on: ${files}`);
- }
-};
-
-console.log('GitHub PR Username:', pr && pr.user && pr.user.login);
-
-const githubBotUsernames = ['greenkeeper', 'renovate[bot]'];
-
-const isBot =
- pr && pr.user && pr.user.login && githubBotUsernames.includes(pr.user.login);
-
-// Rules
-if (!isBot) {
- // make sure someone else reviews these changes
- // const someoneAssigned = danger.github.pr.assignee;
- // if (someoneAssigned === null) {
- // warn(
- // 'Please assign someone to merge this PR, and optionally include people who should review.'
- // );
- // }
-
- // When there are app-changes and it's not a PR marked as trivial, expect
- // there to be CHANGELOG changes.
- const changelogChanges = modified.some(x => x.indexOf('CHANGELOG') > -1);
- if (modifiedAppFiles.length > 0 && !trivialPR && !changelogChanges) {
- fail('No CHANGELOG added.');
- }
-
- // No PR is too small to warrant a paragraph or two of summary
- if (pr.body.length === 0) {
- fail('Please add a description to your PR.');
- }
-
- const hasAppChanges = modifiedAppFiles.length > 0;
-
- const testChanges = modifiedAppFiles.filter(filepath =>
- filepath.includes('test'),
- );
- const hasTestChanges = testChanges.length > 0;
-
- // Warn when there is a big PR
- const bigPRThreshold = 500;
- if (github.pr.additions + github.pr.deletions > bigPRThreshold) {
- warn(':exclamation: Big PR');
- }
-
- // Warn if there are library changes, but not tests
- if (hasAppChanges && !hasTestChanges) {
- warn(
- "There are library changes, but not tests. That's OK as long as you're refactoring existing code",
- );
- }
-
- // Be careful of leaving testing shortcuts in the codebase
- const onlyTestFiles = testChanges.filter(x => {
- const content = fs.readFileSync(x).toString();
- return (
- content.includes('it.only') ||
- content.includes('describe.only') ||
- content.includes('fdescribe') ||
- content.includes('fit(')
- );
- });
- raiseIssueAboutPaths(fail, onlyTestFiles, 'an `only` was left in the test');
-}
diff --git a/config/jest.config.settings.js b/config/jest.config.js
similarity index 69%
rename from config/jest.config.settings.js
rename to config/jest.config.js
--- a/config/jest.config.settings.js
+++ b/config/jest.config.js
@@ -1,21 +1,19 @@
module.exports = {
+ rootDir: '..',
transform: {
'.(ts|tsx)': 'ts-jest',
},
-
globals: {
'ts-jest': {
diagnostics: false,
},
},
-
moduleFileExtensions: ['ts', 'tsx', 'js', 'json'],
testURL: 'http://localhost',
-
- testMatch: ['<rootDir>/src/**/__tests__/**/*.ts'],
testPathIgnorePatterns: [
'/node_modules/',
- '/lib/',
- '<rootDir>/lib/',
+ '/dist/'
],
+ modulePathIgnorePatterns: ['/dist/'],
+ setupFiles: ['<rootDir>/src/config/jest/setup.ts'],
};
diff --git a/config/prepareDist.js b/config/prepareDist.js
new file mode 100644
--- /dev/null
+++ b/config/prepareDist.js
@@ -0,0 +1,111 @@
+// The Apollo Client source that is published to npm is located in the
+// "dist" directory. This utility script is called when building Apollo Client,
+// to make sure the "dist" directory is prepared for publishing.
+//
+// This script will:
+//
+// - Copy the current root package.json into "dist" after adjusting it for
+// publishing.
+// - Copy the supporting files from the root into "dist" (e.g. `README.MD`,
+// `LICENSE`, etc.).
+// - Create a new `package.json` for each sub-set bundle we support, and
+// store it in the appropriate dist sub-directory.
+
+const fs = require('fs');
+const recast = require('recast');
+
+const distRoot = `${__dirname}/../dist`;
+
+
+/* @apollo/client */
+
+const packageJson = require('../package.json');
+
+// The root package.json is marked as private to prevent publishing
+// from happening in the root of the project. This sets the package back to
+// public so it can be published from the "dist" directory.
+packageJson.private = false;
+
+// Remove package.json items that we don't need to publish
+delete packageJson.scripts;
+delete packageJson.bundlesize;
+
+// The root package.json points to the CJS/ESM source in "dist", to support
+// on-going package development (e.g. running tests, supporting npm link, etc.).
+// When publishing from "dist" however, we need to update the package.json
+// to point to the files within the same directory.
+const distPackageJson = JSON.stringify(packageJson, (_key, value) => {
+ if (typeof value === 'string' && value.startsWith('./dist/')) {
+ const parts = value.split('/');
+ parts.splice(1, 1); // remove dist
+ return parts.join('/');
+ }
+ return value;
+}, 2) + "\n";
+
+// Save the modified package.json to "dist"
+fs.writeFileSync(`${distRoot}/package.json`, distPackageJson);
+
+// Copy supporting files into "dist"
+const srcDir = `${__dirname}/..`;
+const destDir = `${srcDir}/dist`;
+fs.copyFileSync(`${srcDir}/README.md`, `${destDir}/README.md`);
+fs.copyFileSync(`${srcDir}/LICENSE`, `${destDir}/LICENSE`);
+
+
+/* @apollo/client/core, @apollo/client/cache, @apollo/client/utilities */
+
+function buildPackageJson(bundleName) {
+ return JSON.stringify({
+ name: `@apollo/client/${bundleName}`,
+ main: `${bundleName}.cjs.js`,
+ module: 'index.js',
+ types: 'index.d.ts',
+ }, null, 2) + "\n";
+}
+
+function loadExportNames(bundleName) {
+ const indexSrc =
+ fs.readFileSync(`${distRoot}/${bundleName}/index.js`);
+ const exportNames = [];
+ recast.visit(recast.parse(indexSrc), {
+ visitExportSpecifier(path) {
+ exportNames.push(path.value.exported.name);
+ return false;
+ },
+ });
+ return exportNames;
+}
+
+function writeCjsIndex(bundleName, exportNames, includeNames = true) {
+ const filterPrefix = includeNames ? '' : '!';
+ fs.writeFileSync(`${distRoot}/${bundleName}/${bundleName}.cjs.js`, [
+ "const allExports = require('../apollo-client.cjs');",
+ `const names = new Set(${JSON.stringify(exportNames)});`,
+ "Object.keys(allExports).forEach(name => {",
+ ` if (${filterPrefix}names.has(name)) {`,
+ " exports[name] = allExports[name];",
+ " }",
+ "});",
+ "",
+ ].join('\n'));
+}
+
+// Create `core`, `cache` and `utilities` bundle package.json files, storing
+// them in their associated dist directory. This helps provide a way for the
+// Apollo Client core to be used without React (via `@apollo/client/core`),
+// and AC's cache and utilities to be used by themselves
+// (`@apollo/client/cache` and `@apollo/client/utilities`), via the
+// `core.cjs.js`, `cache.cjs.js` and `utilities.cjs.js` CommonJS entry point
+// files that only include the exports needed for each bundle.
+
+fs.writeFileSync(`${distRoot}/core/package.json`, buildPackageJson('core'));
+writeCjsIndex('core', loadExportNames('react'), false);
+
+fs.writeFileSync(`${distRoot}/cache/package.json`, buildPackageJson('cache'));
+writeCjsIndex('cache', loadExportNames('cache'));
+
+fs.writeFileSync(
+ `${distRoot}/utilities/package.json`,
+ buildPackageJson('utilities')
+);
diff --git a/config/rollup.config.js b/config/rollup.config.js
--- a/config/rollup.config.js
+++ b/config/rollup.config.js
@@ -1,152 +1,169 @@
import nodeResolve from 'rollup-plugin-node-resolve';
-import typescriptPlugin from 'rollup-plugin-typescript2';
-import typescript from 'typescript';
-import path from 'path';
-import fs from 'fs';
-import { transformSync } from '@babel/core';
-import cjsModulesTransform from '@babel/plugin-transform-modules-commonjs';
-import umdModulesTransform from '@babel/plugin-transform-modules-umd';
import invariantPlugin from 'rollup-plugin-invariant';
import { terser as minify } from 'rollup-plugin-terser';
+import cjs from 'rollup-plugin-commonjs';
+import fs from 'fs';
-function onwarn(message) {
- const suppressed = ['UNRESOLVED_IMPORT', 'THIS_IS_UNDEFINED'];
+import packageJson from '../package.json';
- if (!suppressed.find(code => message.code === code)) {
- return console.warn(message.message);
- }
-}
+const distDir = './dist';
-const defaultGlobals = {
- 'apollo-client': 'apollo.core',
- 'apollo-cache': 'apolloCache.core',
- 'apollo-link': 'apolloLink.core',
- 'apollo-link-dedup': 'apolloLink.dedup',
- 'apollo-utilities': 'apollo.utilities',
- 'graphql-anywhere': 'graphqlAnywhere',
- 'graphql-anywhere/lib/async': 'graphqlAnywhere.async',
- 'apollo-boost': 'apollo.boost',
- 'tslib': 'tslib',
+const globals = {
+ tslib: 'tslib',
'ts-invariant': 'invariant',
+ 'symbol-observable': '$$observable',
+ 'graphql/language/printer': 'print',
+ optimism: 'optimism',
+ 'graphql/language/visitor': 'visitor',
+ 'graphql/execution/execute': 'execute',
+ 'graphql-tag': 'graphqlTag',
+ 'fast-json-stable-stringify': 'stringify',
+ '@wry/equality': 'wryEquality',
+ graphql: 'graphql',
+ react: 'React',
+ 'zen-observable': 'Observable',
};
-export function rollup({
- name,
- input = './src/index.ts',
- outputPrefix = 'bundle',
- extraGlobals = {},
-}) {
- const projectDir = path.join(__filename, '..');
- console.info(`Building project esm ${projectDir}`);
- const tsconfig = `${projectDir}/tsconfig.json`;
+const hasOwn = Object.prototype.hasOwnProperty;
- const globals = {
- ...defaultGlobals,
- ...extraGlobals,
+function external(id) {
+ return hasOwn.call(globals, id);
+}
+
+function prepareESM(input, outputDir) {
+ return {
+ input,
+ external,
+ output: {
+ dir: outputDir,
+ format: 'esm',
+ sourcemap: true,
+ },
+ // The purpose of this job is to ensure each `./dist` ESM file is run
+ // through the `invariantPlugin`, with any resulting changes added
+ // directly back into each ESM file. By setting `preserveModules`
+ // to `true`, we're making sure Rollup doesn't attempt to create a single
+ // combined ESM bundle with the final result of running this job.
+ preserveModules: true,
+ plugins: [
+ nodeResolve(),
+ invariantPlugin({
+ // Instead of completely stripping InvariantError messages in
+ // production, this option assigns a numeric code to the
+ // production version of each error (unique to the call/throw
+ // location), which makes it much easier to trace production
+ // errors back to the unminified code where they were thrown,
+ // where the full error string can be found. See #4519.
+ errorCodes: true,
+ }),
+ cjs({
+ namedExports: {
+ 'graphql-tag': ['gql'],
+ },
+ }),
+ ],
};
+}
- function external(id) {
- return Object.prototype.hasOwnProperty.call(globals, id);
- }
+function prepareCJS(input, output) {
+ return {
+ input,
+ external,
+ output: {
+ file: output,
+ format: 'cjs',
+ sourcemap: true,
+ exports: 'named',
+ },
+ plugins: [
+ nodeResolve(),
+ cjs({
+ namedExports: {
+ 'graphql-tag': ['gql'],
+ },
+ }),
+ ],
+ };
+}
- function outputFile(format) {
- return './lib/' + outputPrefix + '.' + format + '.js';
- }
+function prepareCJSMinified(input) {
+ return {
+ input,
+ output: {
+ file: input.replace('.js', '.min.js'),
+ format: 'cjs',
+ },
+ plugins: [
+ minify({
+ mangle: {
+ toplevel: true,
+ },
+ compress: {
+ global_defs: {
+ '@process.env.NODE_ENV': JSON.stringify('production'),
+ },
+ },
+ }),
+ ],
+ };
+}
- function fromSource(format) {
- return {
- input,
- external,
- output: {
- file: outputFile(format),
- format,
- sourcemap: true,
- },
- plugins: [
- nodeResolve({
- extensions: ['.ts', '.tsx'],
- module: true,
- }),
- typescriptPlugin({ typescript, tsconfig }),
- invariantPlugin({
- // Instead of completely stripping InvariantError messages in
- // production, this option assigns a numeric code to the
- // production version of each error (unique to the call/throw
- // location), which makes it much easier to trace production
- // errors back to the unminified code where they were thrown,
- // where the full error string can be found. See #4519.
- errorCodes: true,
- }),
- ],
- onwarn,
- };
- }
+function prepareUtilities() {
+ const utilsDistDir = `${distDir}/utilities`;
+ return {
+ input: `${utilsDistDir}/index.js`,
+ external,
+ output: {
+ file: `${utilsDistDir}/utilities.cjs.js`,
+ format: 'cjs',
+ sourcemap: true,
+ exports: 'named',
+ },
+ plugins: [
+ nodeResolve(),
+ ],
+ };
+}
- function fromESM(toFormat) {
- return {
- input: outputFile('esm'),
- output: {
- file: outputFile(toFormat),
- format: 'esm',
- sourcemap: false,
- },
- // The UMD bundle expects `this` to refer to the global object. By default
- // Rollup replaces `this` with `undefined`, but this default behavior can
- // be overridden with the `context` option.
- context: 'this',
- plugins: [{
- transform(source, id) {
- const output = transformSync(source, {
- inputSourceMap: JSON.parse(fs.readFileSync(id + '.map')),
- sourceMaps: true,
- plugins: [
- [toFormat === 'umd' ? umdModulesTransform : cjsModulesTransform, {
- loose: true,
- allowTopLevelThis: true,
- }],
- ],
- });
+// Build a separate CJS only `testing.js` bundle, that includes React
+// testing utilities like `MockedProvider` (testing utilities are kept out of
+// the main `apollo-client` bundle). This bundle can be accessed directly
+// like:
+//
+// import { MockedProvider } from '@apollo/client/testing';
+function prepareTesting() {
+ const bundleName = 'testing';
- // There doesn't seem to be any way to get Rollup to emit a source map
- // that goes all the way back to the source file (rather than just to
- // the bundle.esm.js intermediate file), so we pass sourcemap:false in
- // the output options above, and manually write the CJS and UMD source
- // maps here.
- fs.writeFileSync(
- outputFile(toFormat) + '.map',
- JSON.stringify(output.map),
- );
+ // Create a type file for the new testing bundle that points to the existing
+ // `react/testing` type definitions.
+ fs.writeFileSync(
+ `${distDir}/${bundleName}.d.ts`,
+ "export * from './utilities/testing';"
+ );
- return {
- code: output.code,
- };
- }
- }],
- }
- }
+ return {
+ input: `${distDir}/utilities/testing/index.js`,
+ external,
+ output: {
+ file: `${distDir}/${bundleName}.js`,
+ format: 'cjs',
+ },
+ plugins: [
+ nodeResolve({
+ extensions: ['.js', '.jsx'],
+ }),
+ ],
+ };
+}
+function rollup() {
return [
- fromSource('esm'),
- fromESM('cjs'),
- fromESM('umd'),
- {
- input: outputFile('cjs'),
- output: {
- file: outputFile('cjs.min'),
- format: 'esm',
- },
- plugins: [
- minify({
- mangle: {
- toplevel: true,
- },
- compress: {
- global_defs: {
- '@process.env.NODE_ENV': JSON.stringify('production'),
- },
- },
- }),
- ],
- },
+ prepareESM(packageJson.module, distDir),
+ prepareCJS(packageJson.module, packageJson.main),
+ prepareCJSMinified(packageJson.main),
+ prepareUtilities(),
+ prepareTesting(),
];
}
+
+export default rollup();
diff --git a/docs/gatsby-config.js b/docs/gatsby-config.js
--- a/docs/gatsby-config.js
+++ b/docs/gatsby-config.js
@@ -11,23 +11,27 @@ module.exports = {
subtitle: 'Client (React)',
description: 'A guide to using the Apollo GraphQL Client with React',
githubRepo: 'apollographql/apollo-client',
- defaultVersion: 2.6,
+ defaultVersion: 3.0,
+ /*
versions: {
'3.0 beta': 'release-3.0',
- 2.5: 'version-2.5',
- 2.4: 'version-2.4',
+ '2.6': 'version-2.6',
+ '2.5': 'version-2.5',
+ '2.4': 'version-2.4',
},
+ */
checkLinksOptions: {
exceptions: [
- '/api/apollo-client/',
- '/v3.0-beta/api/core/',
- '/v2.5/api/apollo-client/',
- '/v2.4/api/apollo-client/',
+ '/api/core/',
+ '/v2.4/api/core/',
+ '/v2.5/api/core/',
+ '/v2.6/api/core/',
+ '/v3.0/api/core/'
],
},
typescriptApiBox: {
data: require('./docs.json'),
- filepathPrefix: 'packages/apollo-client/src/',
+ filepathPrefix: 'src/',
},
sidebarCategories: {
null: ['index', 'why-apollo', 'get-started'],
@@ -59,15 +63,17 @@ module.exports = {
'integrations/meteor',
'integrations/webpack',
],
- Networking: ['networking/network-layer', 'networking/authentication'],
- 'API Reference': [
- 'api/apollo-client',
+ Networking: [
+ 'networking/network-layer',
+ 'networking/authentication',
+ ],
+ 'Apollo Client API': [
+ 'api/core',
'api/react-hooks',
- 'api/react-ssr',
'api/react-testing',
+ 'api/react-ssr',
'api/react-components',
- 'api/react-hoc',
- 'api/react-common',
+ 'api/react-hoc'
],
Migrating: ['migrating/hooks-migration', 'migrating/boost-migration'],
},
diff --git a/examples/bundling/no-tree-shaking/rollup-ac2/rollup.config.js b/examples/bundling/no-tree-shaking/rollup-ac2/rollup.config.js
new file mode 100644
--- /dev/null
+++ b/examples/bundling/no-tree-shaking/rollup-ac2/rollup.config.js
@@ -0,0 +1,69 @@
+import node from 'rollup-plugin-node-resolve';
+import babel from 'rollup-plugin-babel';
+import cjs from 'rollup-plugin-commonjs';
+import replace from 'rollup-plugin-replace';
+import { terser as minify } from 'rollup-plugin-terser';
+import gzipPlugin from 'rollup-plugin-gzip'
+
+function build({ outputPrefix, externals = [], gzip = false }) {
+ return {
+ input: './src/index.js',
+ output: {
+ file: `./public/js/${outputPrefix}.min.js`,
+ format: 'cjs'
+ },
+ external(id) {
+ return externals.indexOf(id) >= 0;
+ },
+ treeshake: false,
+ plugins: [
+ node(),
+ replace({
+ 'process.env.NODE_ENV': JSON.stringify('production')
+ }),
+ babel({
+ exclude: 'node_modules/**',
+ presets: [require('@babel/preset-react')]
+ }),
+ cjs({
+ namedExports: {
+ 'react': [
+ 'useRef',
+ 'useContext',
+ 'useReducer',
+ 'useEffect',
+ 'useState'
+ ],
+ 'react-dom': [
+ 'render',
+ ],
+ }
+ }),
+ minify({
+ mangle: {
+ toplevel: true
+ },
+ compress: {
+ dead_code: false,
+ global_defs: {
+ '@process.env.NODE_ENV': JSON.stringify('production')
+ }
+ }
+ }),
+ gzip && gzipPlugin()
+ ]
+ };
+}
+
+export default [
+ build({
+ externals: [],
+ outputPrefix: 'app',
+ gzip: true
+ }),
+ build({
+ externals: ['react', 'react-dom'],
+ outputPrefix: 'app-no-react',
+ gzip: true
+ }),
+];
diff --git a/examples/bundling/no-tree-shaking/rollup-ac2/src/App.js b/examples/bundling/no-tree-shaking/rollup-ac2/src/App.js
new file mode 100644
--- /dev/null
+++ b/examples/bundling/no-tree-shaking/rollup-ac2/src/App.js
@@ -0,0 +1,35 @@
+import React from "react";
+import { useQuery } from "@apollo/react-hooks";
+import gql from "graphql-tag";
+
+const ALL_COUNTRIES = gql`
+ query AllCountries {
+ countries {
+ code
+ name
+ emoji
+ }
+ }
+`;
+
+export default function App() {
+ const {
+ loading,
+ data: { countries } = {}
+ } = useQuery(ALL_COUNTRIES);
+
+ return (
+ <main>
+ <h1>Countries</h1>
+ {loading ? (
+ <p>Loading…</p>
+ ) : (
+ <ul>
+ {countries.map(country => (
+ <li key={country.code}>{country.emoji} {country.name}</li>
+ ))}
+ </ul>
+ )}
+ </main>
+ );
+}
diff --git a/examples/bundling/no-tree-shaking/rollup-ac2/src/index.js b/examples/bundling/no-tree-shaking/rollup-ac2/src/index.js
new file mode 100644
--- /dev/null
+++ b/examples/bundling/no-tree-shaking/rollup-ac2/src/index.js
@@ -0,0 +1,19 @@
+import React from "react";
+import { render } from "react-dom";
+import { ApolloClient } from "apollo-client";
+import { InMemoryCache } from "apollo-cache-inmemory";
+import { ApolloProvider } from "@apollo/react-hooks";
+
+import App from "./App";
+
+const client = new ApolloClient({
+ uri: 'https://countries.trevorblades.com/',
+ cache: new InMemoryCache(),
+});
+
+render(
+ <ApolloProvider client={client}>
+ <App />
+ </ApolloProvider>,
+ document.getElementById("root")
+);
diff --git a/examples/bundling/no-tree-shaking/rollup-ac3-no-react/rollup.config.js b/examples/bundling/no-tree-shaking/rollup-ac3-no-react/rollup.config.js
new file mode 100644
--- /dev/null
+++ b/examples/bundling/no-tree-shaking/rollup-ac3-no-react/rollup.config.js
@@ -0,0 +1,68 @@
+import node from 'rollup-plugin-node-resolve';
+import babel from 'rollup-plugin-babel';
+import cjs from 'rollup-plugin-commonjs';
+import replace from 'rollup-plugin-replace';
+import { terser } from 'rollup-plugin-terser';
+import gzipPlugin from 'rollup-plugin-gzip'
+
+function build({ outputPrefix, externals = [], minify = false, gzip = false }) {
+ return {
+ input: './src/index.js',
+ output: {
+ file: `./public/js/${outputPrefix}${minify ? '.min' : ''}.js`,
+ format: 'cjs',
+ sourcemap: true
+ },
+ external(id) {
+ return externals.indexOf(id) >= 0;
+ },
+ treeshake: false,
+ plugins: [
+ node(),
+ replace({
+ 'process.env.NODE_ENV': JSON.stringify('production')
+ }),
+ babel({
+ exclude: 'node_modules/**'
+ }),
+ cjs({
+ namedExports: {
+ 'react': [
+ 'useRef',
+ 'useContext',
+ 'useReducer',
+ 'useEffect',
+ 'useState'
+ ]
+ }
+ }),
+ minify && terser({
+ mangle: {
+ toplevel: true
+ },
+ compress: {
+ dead_code: false,
+ global_defs: {
+ '@process.env.NODE_ENV': JSON.stringify('production')
+ }
+ }
+ }),
+ gzip && gzipPlugin()
+ ]
+ };
+}
+
+export default [
+ build({
+ externals: [],
+ outputPrefix: 'app',
+ minify: false,
+ gzip: false
+ }),
+ build({
+ externals: [],
+ outputPrefix: 'app',
+ minify: true,
+ gzip: true
+ })
+];
diff --git a/examples/bundling/no-tree-shaking/rollup-ac3-no-react/src/index.js b/examples/bundling/no-tree-shaking/rollup-ac3-no-react/src/index.js
new file mode 100644
--- /dev/null
+++ b/examples/bundling/no-tree-shaking/rollup-ac3-no-react/src/index.js
@@ -0,0 +1,39 @@
+import { ApolloClient, InMemoryCache, gql } from "@apollo/client";
+
+const ALL_COUNTRIES = gql`
+ query AllCountries {
+ countries @client {
+ code
+ name
+ emoji
+ }
+ }
+`;
+
+const client = new ApolloClient({
+ cache: new InMemoryCache(),
+ resolvers: {
+ Query: {
+ countries() {
+ return [
+ {
+ code: "AD",
+ emoji: "🇦🇩",
+ name: "Andorra",
+ __typename: "Country"
+ },
+ {
+ code: "AE",
+ emoji: "🇦🇪",
+ name: "United Arab Emirates",
+ __typename: "Country"
+ }
+ ];
+ }
+ }
+ }
+});
+
+client.query({ query: ALL_COUNTRIES }).then(response => {
+ console.log(response);
+});
diff --git a/examples/bundling/no-tree-shaking/rollup-ac3/rollup.config.js b/examples/bundling/no-tree-shaking/rollup-ac3/rollup.config.js
new file mode 100644
--- /dev/null
+++ b/examples/bundling/no-tree-shaking/rollup-ac3/rollup.config.js
@@ -0,0 +1,69 @@
+import node from 'rollup-plugin-node-resolve';
+import babel from 'rollup-plugin-babel';
+import cjs from 'rollup-plugin-commonjs';
+import replace from 'rollup-plugin-replace';
+import { terser as minify } from 'rollup-plugin-terser';
+import gzipPlugin from 'rollup-plugin-gzip'
+
+function build({ outputPrefix, externals = [], gzip = false }) {
+ return {
+ input: './src/index.js',
+ output: {
+ file: `./public/js/${outputPrefix}.min.js`,
+ format: 'cjs'
+ },
+ external(id) {
+ return externals.indexOf(id) >= 0;
+ },
+ treeshake: false,
+ plugins: [
+ node(),
+ replace({
+ 'process.env.NODE_ENV': JSON.stringify('production')
+ }),
+ babel({
+ exclude: 'node_modules/**',
+ presets: [require('@babel/preset-react')]
+ }),
+ cjs({
+ namedExports: {
+ 'react': [
+ 'useRef',
+ 'useContext',
+ 'useReducer',
+ 'useEffect',
+ 'useState'
+ ],
+ 'react-dom': [
+ 'render',
+ ],
+ }
+ }),
+ minify({
+ mangle: {
+ toplevel: true
+ },
+ compress: {
+ dead_code: false,
+ global_defs: {
+ '@process.env.NODE_ENV': JSON.stringify('production')
+ }
+ }
+ }),
+ gzip && gzipPlugin()
+ ]
+ };
+}
+
+export default [
+ build({
+ externals: [],
+ outputPrefix: 'app',
+ gzip: true
+ }),
+ build({
+ externals: ['react', 'react-dom'],
+ outputPrefix: 'app-no-react',
+ gzip: true
+ }),
+];
diff --git a/examples/bundling/no-tree-shaking/rollup-ac3/src/App.js b/examples/bundling/no-tree-shaking/rollup-ac3/src/App.js
new file mode 100644
--- /dev/null
+++ b/examples/bundling/no-tree-shaking/rollup-ac3/src/App.js
@@ -0,0 +1,34 @@
+import React from "react";
+import { gql, useQuery } from "@apollo/client";
+
+const ALL_COUNTRIES = gql`
+ query AllCountries {
+ countries {
+ code
+ name
+ emoji
+ }
+ }
+`;
+
+export default function App() {
+ const {
+ loading,
+ data: { countries } = {}
+ } = useQuery(ALL_COUNTRIES);
+
+ return (
+ <main>
+ <h1>Countries</h1>
+ {loading ? (
+ <p>Loading…</p>
+ ) : (
+ <ul>
+ {countries.map(country => (
+ <li key={country.code}>{country.emoji} {country.name}</li>
+ ))}
+ </ul>
+ )}
+ </main>
+ );
+}
diff --git a/examples/bundling/no-tree-shaking/rollup-ac3/src/index.js b/examples/bundling/no-tree-shaking/rollup-ac3/src/index.js
new file mode 100644
--- /dev/null
+++ b/examples/bundling/no-tree-shaking/rollup-ac3/src/index.js
@@ -0,0 +1,17 @@
+import React from "react";
+import { render } from "react-dom";
+import { ApolloClient, ApolloProvider, InMemoryCache } from "@apollo/client";
+
+import App from "./App";
+
+const client = new ApolloClient({
+ uri: 'https://countries.trevorblades.com/',
+ cache: new InMemoryCache(),
+});
+
+render(
+ <ApolloProvider client={client}>
+ <App />
+ </ApolloProvider>,
+ document.getElementById("root")
+);
diff --git a/examples/bundling/tree-shaking/rollup-ac2/rollup.config.js b/examples/bundling/tree-shaking/rollup-ac2/rollup.config.js
new file mode 100644
--- /dev/null
+++ b/examples/bundling/tree-shaking/rollup-ac2/rollup.config.js
@@ -0,0 +1,70 @@
+import node from 'rollup-plugin-node-resolve';
+import babel from 'rollup-plugin-babel';
+import cjs from 'rollup-plugin-commonjs';
+import replace from 'rollup-plugin-replace';
+import { terser as minify } from 'rollup-plugin-terser';
+import gzipPlugin from 'rollup-plugin-gzip'
+
+function build({ outputPrefix, externals = [], gzip = false }) {
+ return {
+ input: './src/index.js',
+ output: {
+ file: `./public/js/${outputPrefix}.min.js`,
+ format: 'cjs',
+ sourcemap: true
+ },
+ external(id) {
+ return externals.indexOf(id) >= 0;
+ },
+ treeshake: true,
+ plugins: [
+ node(),
+ replace({
+ 'process.env.NODE_ENV': JSON.stringify('production')
+ }),
+ babel({
+ exclude: 'node_modules/**',
+ presets: [require('@babel/preset-react')]
+ }),
+ cjs({
+ namedExports: {
+ 'react': [
+ 'useRef',
+ 'useContext',
+ 'useReducer',
+ 'useEffect',
+ 'useState'
+ ],
+ 'react-dom': [
+ 'render',
+ ],
+ }
+ }),
+ minify({
+ mangle: {
+ toplevel: true
+ },
+ compress: {
+ dead_code: true,
+ global_defs: {
+ '@process.env.NODE_ENV': JSON.stringify('production')
+ }
+ }
+ }),
+ gzip && gzipPlugin()
+ ]
+ };
+}
+
+export default [
+ build({
+ externals: [],
+ outputPrefix: 'app',
+ gzip: true
+ }),
+ build({
+ externals: ['react', 'react-dom'],
+ outputPrefix: 'app-no-react',
+ gzip: true
+ }),
+];
diff --git a/examples/bundling/tree-shaking/rollup-ac2/src/App.js b/examples/bundling/tree-shaking/rollup-ac2/src/App.js
new file mode 100644
--- /dev/null
+++ b/examples/bundling/tree-shaking/rollup-ac2/src/App.js
@@ -0,0 +1,35 @@
+import React from "react";
+import { useQuery } from "@apollo/react-hooks";
+import gql from "graphql-tag";
+
+const ALL_COUNTRIES = gql`
+ query AllCountries {
+ countries {
+ code
+ name
+ emoji
+ }
+ }
+`;
+
+export default function App() {
+ const {
+ loading,
+ data: { countries } = {}
+ } = useQuery(ALL_COUNTRIES);
+
+ return (
+ <main>
+ <h1>Countries</h1>
+ {loading ? (
+ <p>Loading…</p>
+ ) : (
+ <ul>
+ {countries.map(country => (
+ <li key={country.code}>{country.emoji} {country.name}</li>
+ ))}
+ </ul>
+ )}
+ </main>
+ );
+}
diff --git a/examples/bundling/tree-shaking/rollup-ac2/src/index.js b/examples/bundling/tree-shaking/rollup-ac2/src/index.js
new file mode 100644
--- /dev/null
+++ b/examples/bundling/tree-shaking/rollup-ac2/src/index.js
@@ -0,0 +1,19 @@
+import React from "react";
+import { render } from "react-dom";
+import { ApolloClient } from "apollo-client";
+import { InMemoryCache } from "apollo-cache-inmemory";
+import { ApolloProvider } from "@apollo/react-hooks";
+
+import App from "./App";
+
+const client = new ApolloClient({
+ uri: 'https://countries.trevorblades.com/',
+ cache: new InMemoryCache(),
+});
+
+render(
+ <ApolloProvider client={client}>
+ <App />
+ </ApolloProvider>,
+ document.getElementById("root")
+);
diff --git a/examples/bundling/tree-shaking/rollup-ac3-no-react/rollup.config.js b/examples/bundling/tree-shaking/rollup-ac3-no-react/rollup.config.js
new file mode 100644
--- /dev/null
+++ b/examples/bundling/tree-shaking/rollup-ac3-no-react/rollup.config.js
@@ -0,0 +1,68 @@
+import node from 'rollup-plugin-node-resolve';
+import babel from 'rollup-plugin-babel';
+import cjs from 'rollup-plugin-commonjs';
+import replace from 'rollup-plugin-replace';
+import { terser } from 'rollup-plugin-terser';
+import gzipPlugin from 'rollup-plugin-gzip'
+
+function build({ outputPrefix, externals = [], minify = false, gzip = false }) {
+ return {
+ input: './src/index.js',
+ output: {
+ file: `./public/js/${outputPrefix}${minify ? '.min' : ''}.js`,
+ format: 'cjs',
+ sourcemap: true
+ },
+ external(id) {
+ return externals.indexOf(id) >= 0;
+ },
+ treeshake: true,
+ plugins: [
+ node(),
+ replace({
+ 'process.env.NODE_ENV': JSON.stringify('production')
+ }),
+ babel({
+ exclude: 'node_modules/**'
+ }),
+ cjs({
+ namedExports: {
+ 'react': [
+ 'useRef',
+ 'useContext',
+ 'useReducer',
+ 'useEffect',
+ 'useState'
+ ]
+ }
+ }),
+ minify && terser({
+ mangle: {
+ toplevel: true
+ },
+ compress: {
+ dead_code: true,
+ global_defs: {
+ '@process.env.NODE_ENV': JSON.stringify('production')
+ }
+ }
+ }),
+ gzip && gzipPlugin()
+ ]
+ };
+}
+
+export default [
+ build({
+ externals: [],
+ outputPrefix: 'app',
+ minify: false,
+ gzip: false
+ }),
+ build({
+ externals: [],
+ outputPrefix: 'app',
+ minify: true,
+ gzip: true
+ })
+];
diff --git a/examples/bundling/tree-shaking/rollup-ac3-no-react/src/index.js b/examples/bundling/tree-shaking/rollup-ac3-no-react/src/index.js
new file mode 100644
--- /dev/null
+++ b/examples/bundling/tree-shaking/rollup-ac3-no-react/src/index.js
@@ -0,0 +1,39 @@
+import { ApolloClient, InMemoryCache, gql } from "@apollo/client";
+
+const ALL_COUNTRIES = gql`
+ query AllCountries {
+ countries @client {
+ code
+ name
+ emoji
+ }
+ }
+`;
+
+const client = new ApolloClient({
+ cache: new InMemoryCache(),
+ resolvers: {
+ Query: {
+ countries() {
+ return [
+ {
+ code: "AD",
+ emoji: "🇦🇩",
+ name: "Andorra",
+ __typename: "Country"
+ },
+ {
+ code: "AE",
+ emoji: "🇦🇪",
+ name: "United Arab Emirates",
+ __typename: "Country"
+ }
+ ];
+ }
+ }
+ }
+});
+
+client.query({ query: ALL_COUNTRIES }).then(response => {
+ console.log(response);
+});
diff --git a/examples/bundling/tree-shaking/rollup-ac3/rollup.config.js b/examples/bundling/tree-shaking/rollup-ac3/rollup.config.js
new file mode 100644
--- /dev/null
+++ b/examples/bundling/tree-shaking/rollup-ac3/rollup.config.js
@@ -0,0 +1,70 @@
+import node from 'rollup-plugin-node-resolve';
+import babel from 'rollup-plugin-babel';
+import cjs from 'rollup-plugin-commonjs';
+import replace from 'rollup-plugin-replace';
+import { terser as minify } from 'rollup-plugin-terser';
+import gzipPlugin from 'rollup-plugin-gzip'
+
+function build({ outputPrefix, externals = [], gzip = false }) {
+ return {
+ input: './src/index.js',
+ output: {
+ file: `./public/js/${outputPrefix}.min.js`,
+ format: 'cjs',
+ sourcemap: true
+ },
+ external(id) {
+ return externals.indexOf(id) >= 0;
+ },
+ treeshake: true,
+ plugins: [
+ node(),
+ replace({
+ 'process.env.NODE_ENV': JSON.stringify('production')
+ }),
+ babel({
+ exclude: 'node_modules/**',
+ presets: [require('@babel/preset-react')]
+ }),
+ cjs({
+ namedExports: {
+ 'react': [
+ 'useRef',
+ 'useContext',
+ 'useReducer',
+ 'useEffect',
+ 'useState'
+ ],
+ 'react-dom': [
+ 'render',
+ ],
+ }
+ }),
+ minify({
+ mangle: {
+ toplevel: true
+ },
+ compress: {
+ dead_code: true,
+ global_defs: {
+ '@process.env.NODE_ENV': JSON.stringify('production')
+ }
+ }
+ }),
+ gzip && gzipPlugin()
+ ]
+ };
+}
+
+export default [
+ build({
+ externals: [],
+ outputPrefix: 'app',
+ gzip: true
+ }),
+ build({
+ externals: ['react', 'react-dom'],
+ outputPrefix: 'app-no-react',
+ gzip: true
+ })
+];
diff --git a/examples/bundling/tree-shaking/rollup-ac3/src/App.js b/examples/bundling/tree-shaking/rollup-ac3/src/App.js
new file mode 100644
--- /dev/null
+++ b/examples/bundling/tree-shaking/rollup-ac3/src/App.js
@@ -0,0 +1,34 @@
+import React from "react";
+import { gql, useQuery } from "@apollo/client";
+
+const ALL_COUNTRIES = gql`
+ query AllCountries {
+ countries {
+ code
+ name
+ emoji
+ }
+ }
+`;
+
+export default function App() {
+ const {
+ loading,
+ data: { countries } = {}
+ } = useQuery(ALL_COUNTRIES);
+
+ return (
+ <main>
+ <h1>Countries</h1>
+ {loading ? (
+ <p>Loading…</p>
+ ) : (
+ <ul>
+ {countries.map(country => (
+ <li key={country.code}>{country.emoji} {country.name}</li>
+ ))}
+ </ul>
+ )}
+ </main>
+ );
+}
diff --git a/examples/bundling/tree-shaking/rollup-ac3/src/index.js b/examples/bundling/tree-shaking/rollup-ac3/src/index.js
new file mode 100644
--- /dev/null
+++ b/examples/bundling/tree-shaking/rollup-ac3/src/index.js
@@ -0,0 +1,17 @@
+import React from "react";
+import { render } from "react-dom";
+import { ApolloClient, ApolloProvider, InMemoryCache } from "@apollo/client";
+
+import App from "./App";
+
+const client = new ApolloClient({
+ uri: 'https://countries.trevorblades.com/',
+ cache: new InMemoryCache(),
+});
+
+render(
+ <ApolloProvider client={client}>
+ <App />
+ </ApolloProvider>,
+ document.getElementById("root")
+);
diff --git a/examples/bundling/tree-shaking/webpack-ac3/src/App.js b/examples/bundling/tree-shaking/webpack-ac3/src/App.js
new file mode 100644
--- /dev/null
+++ b/examples/bundling/tree-shaking/webpack-ac3/src/App.js
@@ -0,0 +1,34 @@
+import React from "react";
+import { gql, useQuery } from "@apollo/client";
+
+const ALL_COUNTRIES = gql`
+ query AllCountries {
+ countries {
+ code
+ name
+ emoji
+ }
+ }
+`;
+
+export default function App() {
+ const {
+ loading,
+ data: { countries } = {}
+ } = useQuery(ALL_COUNTRIES);
+
+ return (
+ <main>
+ <h1>Countries</h1>
+ {loading ? (
+ <p>Loading…</p>
+ ) : (
+ <ul>
+ {countries.map(country => (
+ <li key={country.code}>{country.emoji} {country.name}</li>
+ ))}
+ </ul>
+ )}
+ </main>
+ );
+}
diff --git a/examples/bundling/tree-shaking/webpack-ac3/src/index.js b/examples/bundling/tree-shaking/webpack-ac3/src/index.js
new file mode 100644
--- /dev/null
+++ b/examples/bundling/tree-shaking/webpack-ac3/src/index.js
@@ -0,0 +1,17 @@
+import React from "react";
+import { render } from "react-dom";
+import { ApolloClient, ApolloProvider, InMemoryCache } from "@apollo/client";
+
+import App from "./App";
+
+const client = new ApolloClient({
+ uri: 'https://countries.trevorblades.com/',
+ cache: new InMemoryCache(),
+});
+
+render(
+ <ApolloProvider client={client}>
+ <App />
+ </ApolloProvider>,
+ document.getElementById("root")
+);
diff --git a/jest.config.js b/jest.config.js
deleted file mode 100644
--- a/jest.config.js
+++ /dev/null
@@ -1,4 +0,0 @@
-module.exports = {
- rootDir: '.',
- projects: ['<rootDir>/packages/*'],
-};
diff --git a/packages/apollo-boost/jest.config.js b/packages/apollo-boost/jest.config.js
deleted file mode 100644
--- a/packages/apollo-boost/jest.config.js
+++ /dev/null
@@ -1,3 +0,0 @@
-module.exports = {
- ...require('../../config/jest.config.settings'),
-};
diff --git a/packages/apollo-boost/rollup.config.js b/packages/apollo-boost/rollup.config.js
deleted file mode 100644
--- a/packages/apollo-boost/rollup.config.js
+++ /dev/null
@@ -1,12 +0,0 @@
-import { rollup } from '../../config/rollup.config';
-
-export default rollup({
- name: 'apollo-boost',
- extraGlobals: {
- 'apollo-cache-inmemory': 'apolloCacheInMemory',
- 'apollo-link': 'apolloLink.core',
- 'apollo-link-http': 'apolloLinkHttp',
- 'apollo-link-error': 'apolloLinkError',
- 'graphql-tag': 'graphqlTag',
- },
-});
diff --git a/packages/apollo-boost/src/index.ts b/packages/apollo-boost/src/index.ts
deleted file mode 100644
--- a/packages/apollo-boost/src/index.ts
+++ /dev/null
@@ -1,202 +0,0 @@
-/* necessary for backward compat */
-export * from 'apollo-client';
-export * from 'apollo-link';
-export * from 'apollo-cache-inmemory';
-
-import { Operation, ApolloLink, Observable } from 'apollo-link';
-import { HttpLink, UriFunction } from 'apollo-link-http';
-import { onError, ErrorLink } from 'apollo-link-error';
-import { ApolloCache } from 'apollo-cache';
-import { InMemoryCache, CacheResolverMap } from 'apollo-cache-inmemory';
-import gql from 'graphql-tag';
-import ApolloClient, {
- Resolvers,
- LocalStateFragmentMatcher,
-} from 'apollo-client';
-import { DocumentNode } from 'graphql';
-import { invariant } from 'ts-invariant';
-
-export { gql, HttpLink };
-
-type ClientStateConfig = {
- cache?: ApolloCache<any>;
- defaults?: Record<string, any>;
- resolvers?: Resolvers | Resolvers[];
- typeDefs?: string | string[] | DocumentNode | DocumentNode[];
- fragmentMatcher?: LocalStateFragmentMatcher;
-};
-
-export interface PresetConfig {
- request?: (operation: Operation) => Promise<void> | void;
- uri?: string | UriFunction;
- credentials?: string;
- headers?: any;
- fetch?: WindowOrWorkerGlobalScope['fetch'];
- fetchOptions?: HttpLink.Options;
- clientState?: ClientStateConfig;
- onError?: ErrorLink.ErrorHandler;
- cacheRedirects?: CacheResolverMap;
- cache?: ApolloCache<any>;
- name?: string;
- version?: string;
- resolvers?: Resolvers | Resolvers[];
- typeDefs?: string | string[] | DocumentNode | DocumentNode[];
- fragmentMatcher?: LocalStateFragmentMatcher;
- assumeImmutableResults?: boolean;
-}
-
-// Yes, these are the exact same as the `PresetConfig` interface. We're
-// defining these again so they can be used to verify that valid config
-// options are being used in the `DefaultClient` constructor, for clients
-// that aren't using Typescript. This duplication is unfortunate, and at
-// some point can likely be adjusted so these items are inferred from
-// the `PresetConfig` interface using a Typescript transform at compilation
-// time. Unfortunately, TS transforms with rollup don't appear to be quite
-// working properly, so this will have to be re-visited at some point.
-// For now, when updating the properties of the `PresetConfig` interface,
-// please also update this constant.
-const PRESET_CONFIG_KEYS = [
- 'request',
- 'uri',
- 'credentials',
- 'headers',
- 'fetch',
- 'fetchOptions',
- 'clientState',
- 'onError',
- 'cacheRedirects',
- 'cache',
- 'name',
- 'version',
- 'resolvers',
- 'typeDefs',
- 'fragmentMatcher',
-];
-
-export default class DefaultClient<TCache> extends ApolloClient<TCache> {
- constructor(config: PresetConfig = {}) {
- if (config) {
- const diff = Object.keys(config).filter(
- key => PRESET_CONFIG_KEYS.indexOf(key) === -1,
- );
-
- if (diff.length > 0) {
- invariant.warn(
- 'ApolloBoost was initialized with unsupported options: ' +
- `${diff.join(' ')}`,
- );
- }
- }
-
- const {
- request,
- uri,
- credentials,
- headers,
- fetch,
- fetchOptions,
- clientState,
- cacheRedirects,
- onError: errorCallback,
- name,
- version,
- resolvers,
- typeDefs,
- fragmentMatcher,
- } = config;
-
- let { cache } = config;
-
- invariant(
- !cache || !cacheRedirects,
- 'Incompatible cache configuration. When not providing `cache`, ' +
- 'configure the provided instance with `cacheRedirects` instead.',
- );
-
- if (!cache) {
- cache = cacheRedirects
- ? new InMemoryCache({ cacheRedirects })
- : new InMemoryCache();
- }
-
- const errorLink = errorCallback
- ? onError(errorCallback)
- : onError(({ graphQLErrors, networkError }) => {
- if (graphQLErrors) {
- graphQLErrors.forEach(({ message, locations, path }) =>
- // tslint:disable-next-line
- invariant.warn(
- `[GraphQL error]: Message: ${message}, Location: ` +
- `${locations}, Path: ${path}`,
- ),
- );
- }
- if (networkError) {
- // tslint:disable-next-line
- invariant.warn(`[Network error]: ${networkError}`);
- }
- });
-
- const requestHandler = request
- ? new ApolloLink(
- (operation, forward) =>
- new Observable(observer => {
- let handle: any;
- Promise.resolve(operation)
- .then(oper => request(oper))
- .then(() => {
- handle = forward(operation).subscribe({
- next: observer.next.bind(observer),
- error: observer.error.bind(observer),
- complete: observer.complete.bind(observer),
- });
- })
- .catch(observer.error.bind(observer));
-
- return () => {
- if (handle) {
- handle.unsubscribe();
- }
- };
- }),
- )
- : false;
-
- const httpLink = new HttpLink({
- uri: uri || '/graphql',
- fetch,
- fetchOptions: fetchOptions || {},
- credentials: credentials || 'same-origin',
- headers: headers || {},
- });
-
- const link = ApolloLink.from([errorLink, requestHandler, httpLink].filter(
- x => !!x,
- ) as ApolloLink[]);
-
- let activeResolvers = resolvers;
- let activeTypeDefs = typeDefs;
- let activeFragmentMatcher = fragmentMatcher;
- if (clientState) {
- if (clientState.defaults) {
- cache.writeData({
- data: clientState.defaults,
- });
- }
- activeResolvers = clientState.resolvers;
- activeTypeDefs = clientState.typeDefs;
- activeFragmentMatcher = clientState.fragmentMatcher;
- }
-
- // super hacky, we will fix the types eventually
- super({
- cache,
- link,
- name,
- version,
- resolvers: activeResolvers,
- typeDefs: activeTypeDefs,
- fragmentMatcher: activeFragmentMatcher,
- } as any);
- }
-}
diff --git a/packages/apollo-cache-inmemory/jest.config.js b/packages/apollo-cache-inmemory/jest.config.js
deleted file mode 100644
--- a/packages/apollo-cache-inmemory/jest.config.js
+++ /dev/null
@@ -1,3 +0,0 @@
-module.exports = {
- ...require('../../config/jest.config.settings'),
-};
diff --git a/packages/apollo-cache-inmemory/rollup.config.js b/packages/apollo-cache-inmemory/rollup.config.js
deleted file mode 100644
--- a/packages/apollo-cache-inmemory/rollup.config.js
+++ /dev/null
@@ -1,10 +0,0 @@
-import { rollup } from '../../config/rollup.config';
-
-export default rollup({
- name: 'apollo-cache-inmemory',
- extraGlobals: {
- 'graphql/language/printer': 'print',
- optimism: 'optimism',
- 'graphql/language/visitor': 'visitor',
- },
-});
diff --git a/packages/apollo-cache-inmemory/src/depTrackingCache.ts b/packages/apollo-cache-inmemory/src/depTrackingCache.ts
deleted file mode 100644
--- a/packages/apollo-cache-inmemory/src/depTrackingCache.ts
+++ /dev/null
@@ -1,70 +0,0 @@
-import { NormalizedCache, NormalizedCacheObject, StoreObject } from './types';
-import { wrap, OptimisticWrapperFunction } from 'optimism';
-
-const hasOwn = Object.prototype.hasOwnProperty;
-
-export class DepTrackingCache implements NormalizedCache {
- // Wrapper function produced by the optimism library, used to depend on
- // dataId strings, for easy invalidation of specific IDs.
- private depend: OptimisticWrapperFunction<[string], StoreObject | undefined>;
-
- constructor(private data: NormalizedCacheObject = Object.create(null)) {
- this.depend = wrap((dataId: string) => this.data[dataId], {
- disposable: true,
- makeCacheKey(dataId: string) {
- return dataId;
- },
- });
- }
-
- public toObject(): NormalizedCacheObject {
- return this.data;
- }
-
- public get(dataId: string): StoreObject {
- this.depend(dataId);
- return this.data[dataId]!;
- }
-
- public set(dataId: string, value?: StoreObject) {
- const oldValue = this.data[dataId];
- if (value !== oldValue) {
- this.data[dataId] = value;
- this.depend.dirty(dataId);
- }
- }
-
- public delete(dataId: string): void {
- if (hasOwn.call(this.data, dataId)) {
- delete this.data[dataId];
- this.depend.dirty(dataId);
- }
- }
-
- public clear(): void {
- this.replace(null);
- }
-
- public replace(newData: NormalizedCacheObject | null): void {
- if (newData) {
- Object.keys(newData).forEach(dataId => {
- this.set(dataId, newData[dataId]);
- });
- Object.keys(this.data).forEach(dataId => {
- if (!hasOwn.call(newData, dataId)) {
- this.delete(dataId);
- }
- });
- } else {
- Object.keys(this.data).forEach(dataId => {
- this.delete(dataId);
- });
- }
- }
-}
-
-export function defaultNormalizedCacheFactory(
- seed?: NormalizedCacheObject,
-): NormalizedCache {
- return new DepTrackingCache(seed);
-}
diff --git a/packages/apollo-cache-inmemory/src/fragmentMatcher.ts b/packages/apollo-cache-inmemory/src/fragmentMatcher.ts
deleted file mode 100644
--- a/packages/apollo-cache-inmemory/src/fragmentMatcher.ts
+++ /dev/null
@@ -1,180 +0,0 @@
-import { isTest, IdValue } from 'apollo-utilities';
-import { invariant } from 'ts-invariant';
-
-import {
- ReadStoreContext,
- FragmentMatcherInterface,
- PossibleTypesMap,
- IntrospectionResultData,
-} from './types';
-
-let haveWarned = false;
-
-function shouldWarn() {
- const answer = !haveWarned;
- /* istanbul ignore if */
- if (!isTest()) {
- haveWarned = true;
- }
- return answer;
-}
-
-/**
- * This fragment matcher is very basic and unable to match union or interface type conditions
- */
-export class HeuristicFragmentMatcher implements FragmentMatcherInterface {
- constructor() {
- // do nothing
- }
-
- public ensureReady() {
- return Promise.resolve();
- }
-
- public canBypassInit() {
- return true; // we don't need to initialize this fragment matcher.
- }
-
- public match(
- idValue: IdValue,
- typeCondition: string,
- context: ReadStoreContext,
- ): boolean | 'heuristic' {
- const obj = context.store.get(idValue.id);
- const isRootQuery = idValue.id === 'ROOT_QUERY';
-
- if (!obj) {
- // https://github.com/apollographql/apollo-client/pull/3507
- return isRootQuery;
- }
-
- const { __typename = isRootQuery && 'Query' } = obj;
-
- if (!__typename) {
- if (shouldWarn()) {
- invariant.warn(`You're using fragments in your queries, but either don't have the addTypename:
- true option set in Apollo Client, or you are trying to write a fragment to the store without the __typename.
- Please turn on the addTypename option and include __typename when writing fragments so that Apollo Client
- can accurately match fragments.`);
- invariant.warn(
- 'Could not find __typename on Fragment ',
- typeCondition,
- obj,
- );
- invariant.warn(
- `DEPRECATION WARNING: using fragments without __typename is unsupported behavior ` +
- `and will be removed in future versions of Apollo client. You should fix this and set addTypename to true now.`,
- );
- }
-
- return 'heuristic';
- }
-
- if (__typename === typeCondition) {
- return true;
- }
-
- // At this point we don't know if this fragment should match or not. It's
- // either:
- //
- // 1. (GOOD) A fragment on a matching interface or union.
- // 2. (BAD) A fragment on a non-matching concrete type or interface or union.
- //
- // If it's 2, we don't want it to match. If it's 1, we want it to match. We
- // can't tell the difference, so we warn the user, but still try to match
- // it (for backwards compatibility reasons). This unfortunately means that
- // using the `HeuristicFragmentMatcher` with unions and interfaces is
- // very unreliable. This will be addressed in a future major version of
- // Apollo Client, but for now the recommendation is to use the
- // `IntrospectionFragmentMatcher` when working with unions/interfaces.
-
- if (shouldWarn()) {
- invariant.error(
- 'You are using the simple (heuristic) fragment matcher, but your ' +
- 'queries contain union or interface types. Apollo Client will not be ' +
- 'able to accurately map fragments. To make this error go away, use ' +
- 'the `IntrospectionFragmentMatcher` as described in the docs: ' +
- 'https://www.apollographql.com/docs/react/advanced/fragments.html#fragment-matcher',
- );
- }
-
- return 'heuristic';
- }
-}
-
-export class IntrospectionFragmentMatcher implements FragmentMatcherInterface {
- private isReady: boolean;
- private possibleTypesMap: PossibleTypesMap;
-
- constructor(options?: {
- introspectionQueryResultData?: IntrospectionResultData;
- }) {
- if (options && options.introspectionQueryResultData) {
- this.possibleTypesMap = this.parseIntrospectionResult(
- options.introspectionQueryResultData,
- );
- this.isReady = true;
- } else {
- this.isReady = false;
- }
-
- this.match = this.match.bind(this);
- }
-
- public match(
- idValue: IdValue,
- typeCondition: string,
- context: ReadStoreContext,
- ) {
- invariant(
- this.isReady,
- 'FragmentMatcher.match() was called before FragmentMatcher.init()',
- );
-
- const obj = context.store.get(idValue.id);
- const isRootQuery = idValue.id === 'ROOT_QUERY';
-
- if (!obj) {
- // https://github.com/apollographql/apollo-client/pull/4620
- return isRootQuery;
- }
-
- const { __typename = isRootQuery && 'Query' } = obj;
-
- invariant(
- __typename,
- `Cannot match fragment because __typename property is missing: ${JSON.stringify(
- obj,
- )}`,
- );
-
- if (__typename === typeCondition) {
- return true;
- }
-
- const implementingTypes = this.possibleTypesMap[typeCondition];
- if (
- __typename &&
- implementingTypes &&
- implementingTypes.indexOf(__typename) > -1
- ) {
- return true;
- }
-
- return false;
- }
-
- private parseIntrospectionResult(
- introspectionResultData: IntrospectionResultData,
- ): PossibleTypesMap {
- const typeMap: PossibleTypesMap = {};
- introspectionResultData.__schema.types.forEach(type => {
- if (type.kind === 'UNION' || type.kind === 'INTERFACE') {
- typeMap[type.name] = type.possibleTypes.map(
- implementingType => implementingType.name,
- );
- }
- });
- return typeMap;
- }
-}
diff --git a/packages/apollo-cache-inmemory/src/fragmentMatcherIntrospectionQuery.ts b/packages/apollo-cache-inmemory/src/fragmentMatcherIntrospectionQuery.ts
deleted file mode 100644
--- a/packages/apollo-cache-inmemory/src/fragmentMatcherIntrospectionQuery.ts
+++ /dev/null
@@ -1,97 +0,0 @@
-const query: any = {
- kind: 'Document',
- definitions: [
- {
- kind: 'OperationDefinition',
- operation: 'query',
- name: null,
- variableDefinitions: null,
- directives: [],
- selectionSet: {
- kind: 'SelectionSet',
- selections: [
- {
- kind: 'Field',
- alias: null,
- name: {
- kind: 'Name',
- value: '__schema',
- },
- arguments: [],
- directives: [],
- selectionSet: {
- kind: 'SelectionSet',
- selections: [
- {
- kind: 'Field',
- alias: null,
- name: {
- kind: 'Name',
- value: 'types',
- },
- arguments: [],
- directives: [],
- selectionSet: {
- kind: 'SelectionSet',
- selections: [
- {
- kind: 'Field',
- alias: null,
- name: {
- kind: 'Name',
- value: 'kind',
- },
- arguments: [],
- directives: [],
- selectionSet: null,
- },
- {
- kind: 'Field',
- alias: null,
- name: {
- kind: 'Name',
- value: 'name',
- },
- arguments: [],
- directives: [],
- selectionSet: null,
- },
- {
- kind: 'Field',
- alias: null,
- name: {
- kind: 'Name',
- value: 'possibleTypes',
- },
- arguments: [],
- directives: [],
- selectionSet: {
- kind: 'SelectionSet',
- selections: [
- {
- kind: 'Field',
- alias: null,
- name: {
- kind: 'Name',
- value: 'name',
- },
- arguments: [],
- directives: [],
- selectionSet: null,
- },
- ],
- },
- },
- ],
- },
- },
- ],
- },
- },
- ],
- },
- },
- ],
-};
-
-export default query;
diff --git a/packages/apollo-cache-inmemory/src/inMemoryCache.ts b/packages/apollo-cache-inmemory/src/inMemoryCache.ts
deleted file mode 100644
--- a/packages/apollo-cache-inmemory/src/inMemoryCache.ts
+++ /dev/null
@@ -1,355 +0,0 @@
-// Make builtins like Map and Set safe to use with non-extensible objects.
-import './fixPolyfills';
-
-import { DocumentNode } from 'graphql';
-
-import { Cache, ApolloCache, Transaction } from 'apollo-cache';
-
-import { addTypenameToDocument, canUseWeakMap } from 'apollo-utilities';
-
-import { wrap } from 'optimism';
-
-import { invariant, InvariantError } from 'ts-invariant';
-
-import { HeuristicFragmentMatcher } from './fragmentMatcher';
-import {
- ApolloReducerConfig,
- NormalizedCache,
- NormalizedCacheObject,
-} from './types';
-
-import { StoreReader } from './readFromStore';
-import { StoreWriter } from './writeToStore';
-import { DepTrackingCache } from './depTrackingCache';
-import { KeyTrie } from 'optimism';
-import { ObjectCache } from './objectCache';
-
-export interface InMemoryCacheConfig extends ApolloReducerConfig {
- resultCaching?: boolean;
- freezeResults?: boolean;
-}
-
-const defaultConfig: InMemoryCacheConfig = {
- fragmentMatcher: new HeuristicFragmentMatcher(),
- dataIdFromObject: defaultDataIdFromObject,
- addTypename: true,
- resultCaching: true,
- freezeResults: false,
-};
-
-export function defaultDataIdFromObject(result: any): string | null {
- if (result.__typename) {
- if (result.id !== undefined) {
- return `${result.__typename}:${result.id}`;
- }
- if (result._id !== undefined) {
- return `${result.__typename}:${result._id}`;
- }
- }
- return null;
-}
-
-const hasOwn = Object.prototype.hasOwnProperty;
-
-export class OptimisticCacheLayer extends ObjectCache {
- constructor(
- public readonly optimisticId: string,
- // OptimisticCacheLayer objects always wrap some other parent cache, so
- // this.parent should never be null.
- public readonly parent: NormalizedCache,
- public readonly transaction: Transaction<NormalizedCacheObject>,
- ) {
- super(Object.create(null));
- }
-
- public toObject(): NormalizedCacheObject {
- return {
- ...this.parent.toObject(),
- ...this.data,
- };
- }
-
- // All the other accessor methods of ObjectCache work without knowing about
- // this.parent, but the get method needs to be overridden to implement the
- // fallback this.parent.get(dataId) behavior.
- public get(dataId: string) {
- return hasOwn.call(this.data, dataId)
- ? this.data[dataId]
- : this.parent.get(dataId);
- }
-}
-
-export class InMemoryCache extends ApolloCache<NormalizedCacheObject> {
- private data: NormalizedCache;
- private optimisticData: NormalizedCache;
-
- protected config: InMemoryCacheConfig;
- private watches = new Set<Cache.WatchOptions>();
- private addTypename: boolean;
- private typenameDocumentCache = new Map<DocumentNode, DocumentNode>();
- private storeReader: StoreReader;
- private storeWriter: StoreWriter;
- private cacheKeyRoot = new KeyTrie<object>(canUseWeakMap);
-
- // Set this while in a transaction to prevent broadcasts...
- // don't forget to turn it back on!
- private silenceBroadcast: boolean = false;
-
- constructor(config: InMemoryCacheConfig = {}) {
- super();
- this.config = { ...defaultConfig, ...config };
-
- // backwards compat
- if ((this.config as any).customResolvers) {
- invariant.warn(
- 'customResolvers have been renamed to cacheRedirects. Please update your config as we will be deprecating customResolvers in the next major version.',
- );
- this.config.cacheRedirects = (this.config as any).customResolvers;
- }
-
- if ((this.config as any).cacheResolvers) {
- invariant.warn(
- 'cacheResolvers have been renamed to cacheRedirects. Please update your config as we will be deprecating cacheResolvers in the next major version.',
- );
- this.config.cacheRedirects = (this.config as any).cacheResolvers;
- }
-
- this.addTypename = !!this.config.addTypename;
-
- // Passing { resultCaching: false } in the InMemoryCache constructor options
- // will completely disable dependency tracking, which will improve memory
- // usage but worsen the performance of repeated reads.
- this.data = this.config.resultCaching
- ? new DepTrackingCache()
- : new ObjectCache();
-
- // When no optimistic writes are currently active, cache.optimisticData ===
- // cache.data, so there are no additional layers on top of the actual data.
- // When an optimistic update happens, this.optimisticData will become a
- // linked list of OptimisticCacheLayer objects that terminates with the
- // original this.data cache object.
- this.optimisticData = this.data;
-
- this.storeWriter = new StoreWriter();
- this.storeReader = new StoreReader({
- cacheKeyRoot: this.cacheKeyRoot,
- freezeResults: config.freezeResults,
- });
-
- const cache = this;
- const { maybeBroadcastWatch } = cache;
- this.maybeBroadcastWatch = wrap((c: Cache.WatchOptions) => {
- return maybeBroadcastWatch.call(this, c);
- }, {
- makeCacheKey(c: Cache.WatchOptions) {
- if (c.optimistic) {
- // If we're reading optimistic data, it doesn't matter if this.data
- // is a DepTrackingCache, since it will be ignored.
- return;
- }
-
- if (c.previousResult) {
- // If a previousResult was provided, assume the caller would prefer
- // to compare the previous data to the new data to determine whether
- // to broadcast, so we should disable caching by returning here, to
- // give maybeBroadcastWatch a chance to do that comparison.
- return;
- }
-
- if (cache.data instanceof DepTrackingCache) {
- // Return a cache key (thus enabling caching) only if we're currently
- // using a data store that can track cache dependencies.
- return cache.cacheKeyRoot.lookup(
- c.query,
- JSON.stringify(c.variables),
- );
- }
- }
- });
- }
-
- public restore(data: NormalizedCacheObject): this {
- if (data) this.data.replace(data);
- return this;
- }
-
- public extract(optimistic: boolean = false): NormalizedCacheObject {
- return (optimistic ? this.optimisticData : this.data).toObject();
- }
-
- public read<T>(options: Cache.ReadOptions): T | null {
- if (typeof options.rootId === 'string' &&
- typeof this.data.get(options.rootId) === 'undefined') {
- return null;
- }
-
- const { fragmentMatcher } = this.config;
- const fragmentMatcherFunction = fragmentMatcher && fragmentMatcher.match;
-
- return this.storeReader.readQueryFromStore({
- store: options.optimistic ? this.optimisticData : this.data,
- query: this.transformDocument(options.query),
- variables: options.variables,
- rootId: options.rootId,
- fragmentMatcherFunction,
- previousResult: options.previousResult,
- config: this.config,
- }) || null;
- }
-
- public write(write: Cache.WriteOptions): void {
- const { fragmentMatcher } = this.config;
- const fragmentMatcherFunction = fragmentMatcher && fragmentMatcher.match;
-
- this.storeWriter.writeResultToStore({
- dataId: write.dataId,
- result: write.result,
- variables: write.variables,
- document: this.transformDocument(write.query),
- store: this.data,
- dataIdFromObject: this.config.dataIdFromObject,
- fragmentMatcherFunction,
- });
-
- this.broadcastWatches();
- }
-
- public diff<T>(query: Cache.DiffOptions): Cache.DiffResult<T> {
- const { fragmentMatcher } = this.config;
- const fragmentMatcherFunction = fragmentMatcher && fragmentMatcher.match;
-
- return this.storeReader.diffQueryAgainstStore({
- store: query.optimistic ? this.optimisticData : this.data,
- query: this.transformDocument(query.query),
- variables: query.variables,
- returnPartialData: query.returnPartialData,
- previousResult: query.previousResult,
- fragmentMatcherFunction,
- config: this.config,
- });
- }
-
- public watch(watch: Cache.WatchOptions): () => void {
- this.watches.add(watch);
-
- return () => {
- this.watches.delete(watch);
- };
- }
-
- public evict(query: Cache.EvictOptions): Cache.EvictionResult {
- throw new InvariantError(`eviction is not implemented on InMemory Cache`);
- }
-
- public reset(): Promise<void> {
- this.data.clear();
- this.broadcastWatches();
-
- return Promise.resolve();
- }
-
- public removeOptimistic(idToRemove: string) {
- const toReapply: OptimisticCacheLayer[] = [];
- let removedCount = 0;
- let layer = this.optimisticData;
-
- while (layer instanceof OptimisticCacheLayer) {
- if (layer.optimisticId === idToRemove) {
- ++removedCount;
- } else {
- toReapply.push(layer);
- }
- layer = layer.parent;
- }
-
- if (removedCount > 0) {
- // Reset this.optimisticData to the first non-OptimisticCacheLayer object,
- // which is almost certainly this.data.
- this.optimisticData = layer;
-
- // Reapply the layers whose optimistic IDs do not match the removed ID.
- while (toReapply.length > 0) {
- const layer = toReapply.pop()!;
- this.performTransaction(layer.transaction, layer.optimisticId);
- }
-
- this.broadcastWatches();
- }
- }
-
- public performTransaction(
- transaction: Transaction<NormalizedCacheObject>,
- // This parameter is not part of the performTransaction signature inherited
- // from the ApolloCache abstract class, but it's useful because it saves us
- // from duplicating this implementation in recordOptimisticTransaction.
- optimisticId?: string,
- ) {
- const { data, silenceBroadcast } = this;
- this.silenceBroadcast = true;
-
- if (typeof optimisticId === 'string') {
- // Add a new optimistic layer and temporarily make this.data refer to
- // that layer for the duration of the transaction.
- this.data = this.optimisticData = new OptimisticCacheLayer(
- // Note that there can be multiple layers with the same optimisticId.
- // When removeOptimistic(id) is called for that id, all matching layers
- // will be removed, and the remaining layers will be reapplied.
- optimisticId,
- this.optimisticData,
- transaction,
- );
- }
-
- try {
- transaction(this);
- } finally {
- this.silenceBroadcast = silenceBroadcast;
- this.data = data;
- }
-
- // This broadcast does nothing if this.silenceBroadcast is true.
- this.broadcastWatches();
- }
-
- public recordOptimisticTransaction(
- transaction: Transaction<NormalizedCacheObject>,
- id: string,
- ) {
- return this.performTransaction(transaction, id);
- }
-
- public transformDocument(document: DocumentNode): DocumentNode {
- if (this.addTypename) {
- let result = this.typenameDocumentCache.get(document);
- if (!result) {
- result = addTypenameToDocument(document);
- this.typenameDocumentCache.set(document, result);
- // If someone calls transformDocument and then mistakenly passes the
- // result back into an API that also calls transformDocument, make sure
- // we don't keep creating new query documents.
- this.typenameDocumentCache.set(result, result);
- }
- return result;
- }
- return document;
- }
-
- protected broadcastWatches() {
- if (!this.silenceBroadcast) {
- this.watches.forEach(c => this.maybeBroadcastWatch(c));
- }
- }
-
- // This method is wrapped in the constructor so that it will be called only
- // if the data that would be broadcast has changed.
- private maybeBroadcastWatch(c: Cache.WatchOptions) {
- c.callback(
- this.diff({
- query: c.query,
- variables: c.variables,
- previousResult: c.previousResult && c.previousResult(),
- optimistic: c.optimistic,
- }),
- );
- }
-}
diff --git a/packages/apollo-cache-inmemory/src/index.ts b/packages/apollo-cache-inmemory/src/index.ts
deleted file mode 100644
--- a/packages/apollo-cache-inmemory/src/index.ts
+++ /dev/null
@@ -1,11 +0,0 @@
-export {
- InMemoryCache,
- InMemoryCacheConfig,
- defaultDataIdFromObject,
-} from './inMemoryCache';
-
-export * from './readFromStore';
-export * from './writeToStore';
-export * from './fragmentMatcher';
-export * from './objectCache';
-export * from './types';
diff --git a/packages/apollo-cache-inmemory/src/mapCache.ts b/packages/apollo-cache-inmemory/src/mapCache.ts
deleted file mode 100644
--- a/packages/apollo-cache-inmemory/src/mapCache.ts
+++ /dev/null
@@ -1,50 +0,0 @@
-import { NormalizedCache, NormalizedCacheObject, StoreObject } from './types';
-
-/**
- * A Map-based implementation of the NormalizedCache.
- * Note that you need a polyfill for Object.entries for this to work.
- */
-export class MapCache implements NormalizedCache {
- private cache: Map<string, StoreObject | undefined>;
-
- constructor(data: NormalizedCacheObject = {}) {
- this.cache = new Map(Object.entries(data));
- }
-
- public get(dataId: string): StoreObject {
- return this.cache.get(`${dataId}`)!;
- }
-
- public set(dataId: string, value: StoreObject): void {
- this.cache.set(`${dataId}`, value);
- }
-
- public delete(dataId: string): void {
- this.cache.delete(`${dataId}`);
- }
-
- public clear(): void {
- return this.cache.clear();
- }
-
- public toObject(): NormalizedCacheObject {
- const obj: NormalizedCacheObject = {};
- this.cache.forEach((dataId, key) => {
- obj[key] = dataId;
- });
- return obj;
- }
-
- public replace(newData: NormalizedCacheObject): void {
- this.cache.clear();
- Object.entries(newData).forEach(([dataId, value]) =>
- this.cache.set(dataId, value),
- );
- }
-}
-
-export function mapNormalizedCacheFactory(
- seed?: NormalizedCacheObject,
-): NormalizedCache {
- return new MapCache(seed);
-}
diff --git a/packages/apollo-cache-inmemory/src/objectCache.ts b/packages/apollo-cache-inmemory/src/objectCache.ts
deleted file mode 100644
--- a/packages/apollo-cache-inmemory/src/objectCache.ts
+++ /dev/null
@@ -1,35 +0,0 @@
-import { NormalizedCache, NormalizedCacheObject, StoreObject } from './types';
-
-export class ObjectCache implements NormalizedCache {
- constructor(protected data: NormalizedCacheObject = Object.create(null)) {}
-
- public toObject() {
- return this.data;
- }
-
- public get(dataId: string) {
- return this.data[dataId]!;
- }
-
- public set(dataId: string, value: StoreObject) {
- this.data[dataId] = value;
- }
-
- public delete(dataId: string) {
- this.data[dataId] = void 0;
- }
-
- public clear() {
- this.data = Object.create(null);
- }
-
- public replace(newData: NormalizedCacheObject) {
- this.data = newData || Object.create(null);
- }
-}
-
-export function defaultNormalizedCacheFactory(
- seed?: NormalizedCacheObject,
-): NormalizedCache {
- return new ObjectCache(seed);
-}
diff --git a/packages/apollo-cache-inmemory/src/readFromStore.ts b/packages/apollo-cache-inmemory/src/readFromStore.ts
deleted file mode 100644
--- a/packages/apollo-cache-inmemory/src/readFromStore.ts
+++ /dev/null
@@ -1,638 +0,0 @@
-import {
- argumentsObjectFromField,
- assign,
- canUseWeakMap,
- createFragmentMap,
- DirectiveInfo,
- FragmentMap,
- getDefaultValues,
- getDirectiveInfoFromField,
- getFragmentDefinitions,
- getMainDefinition,
- getQueryDefinition,
- getStoreKeyName,
- IdValue,
- isEqual,
- isField,
- isIdValue,
- isInlineFragment,
- isJsonValue,
- maybeDeepFreeze,
- mergeDeepArray,
- resultKeyNameFromField,
- shouldInclude,
- StoreValue,
- toIdValue,
-} from 'apollo-utilities';
-
-import { Cache } from 'apollo-cache';
-
-import {
- ReadStoreContext,
- DiffQueryAgainstStoreOptions,
- ReadQueryOptions,
- StoreObject,
-} from './types';
-
-import {
- DocumentNode,
- FieldNode,
- FragmentDefinitionNode,
- InlineFragmentNode,
- SelectionSetNode,
-} from 'graphql';
-
-import { wrap, KeyTrie } from 'optimism';
-import { DepTrackingCache } from './depTrackingCache';
-import { invariant, InvariantError } from 'ts-invariant';
-
-export type VariableMap = { [name: string]: any };
-
-export type FragmentMatcher = (
- rootValue: any,
- typeCondition: string,
- context: ReadStoreContext,
-) => boolean | 'heuristic';
-
-type ExecContext = {
- query: DocumentNode;
- fragmentMap: FragmentMap;
- contextValue: ReadStoreContext;
- variableValues: VariableMap;
- fragmentMatcher: FragmentMatcher;
-};
-
-type ExecInfo = {
- resultKey: string;
- directives: DirectiveInfo;
-};
-
-export type ExecResultMissingField = {
- object: StoreObject;
- fieldName: string;
- tolerable: boolean;
-};
-
-export type ExecResult<R = any> = {
- result: R;
- // Empty array if no missing fields encountered while computing result.
- missing?: ExecResultMissingField[];
-};
-
-type ExecStoreQueryOptions = {
- query: DocumentNode;
- rootValue: IdValue;
- contextValue: ReadStoreContext;
- variableValues: VariableMap;
- // Default matcher always matches all fragments
- fragmentMatcher?: FragmentMatcher;
-};
-
-type ExecSelectionSetOptions = {
- selectionSet: SelectionSetNode;
- rootValue: any;
- execContext: ExecContext;
-};
-
-type ExecSubSelectedArrayOptions = {
- field: FieldNode;
- array: any[];
- execContext: ExecContext;
-};
-
-export interface StoreReaderConfig {
- cacheKeyRoot?: KeyTrie<object>;
- freezeResults?: boolean;
-}
-
-export class StoreReader {
- private freezeResults: boolean;
-
- constructor({
- cacheKeyRoot = new KeyTrie<object>(canUseWeakMap),
- freezeResults = false,
- }: StoreReaderConfig = {}) {
- const {
- executeStoreQuery,
- executeSelectionSet,
- executeSubSelectedArray,
- } = this;
-
- this.freezeResults = freezeResults;
-
- this.executeStoreQuery = wrap((options: ExecStoreQueryOptions) => {
- return executeStoreQuery.call(this, options);
- }, {
- makeCacheKey({
- query,
- rootValue,
- contextValue,
- variableValues,
- fragmentMatcher,
- }: ExecStoreQueryOptions) {
- // The result of executeStoreQuery can be safely cached only if the
- // underlying store is capable of tracking dependencies and invalidating
- // the cache when relevant data have changed.
- if (contextValue.store instanceof DepTrackingCache) {
- return cacheKeyRoot.lookup(
- contextValue.store,
- query,
- fragmentMatcher,
- JSON.stringify(variableValues),
- rootValue.id,
- );
- }
- }
- });
-
- this.executeSelectionSet = wrap((options: ExecSelectionSetOptions) => {
- return executeSelectionSet.call(this, options);
- }, {
- makeCacheKey({
- selectionSet,
- rootValue,
- execContext,
- }: ExecSelectionSetOptions) {
- if (execContext.contextValue.store instanceof DepTrackingCache) {
- return cacheKeyRoot.lookup(
- execContext.contextValue.store,
- selectionSet,
- execContext.fragmentMatcher,
- JSON.stringify(execContext.variableValues),
- rootValue.id,
- );
- }
- }
- });
-
- this.executeSubSelectedArray = wrap((options: ExecSubSelectedArrayOptions) => {
- return executeSubSelectedArray.call(this, options);
- }, {
- makeCacheKey({ field, array, execContext }) {
- if (execContext.contextValue.store instanceof DepTrackingCache) {
- return cacheKeyRoot.lookup(
- execContext.contextValue.store,
- field,
- array,
- JSON.stringify(execContext.variableValues),
- );
- }
- }
- });
- }
-
- /**
- * Resolves the result of a query solely from the store (i.e. never hits the server).
- *
- * @param {Store} store The {@link NormalizedCache} used by Apollo for the `data` portion of the
- * store.
- *
- * @param {DocumentNode} query The query document to resolve from the data available in the store.
- *
- * @param {Object} [variables] A map from the name of a variable to its value. These variables can
- * be referenced by the query document.
- *
- * @param {any} previousResult The previous result returned by this function for the same query.
- * If nothing in the store changed since that previous result then values from the previous result
- * will be returned to preserve referential equality.
- */
- public readQueryFromStore<QueryType>(
- options: ReadQueryOptions,
- ): QueryType | undefined {
- return this.diffQueryAgainstStore<QueryType>({
- ...options,
- returnPartialData: false,
- }).result;
- }
-
- /**
- * Given a store and a query, return as much of the result as possible and
- * identify if any data was missing from the store.
- * @param {DocumentNode} query A parsed GraphQL query document
- * @param {Store} store The Apollo Client store object
- * @param {any} previousResult The previous result returned by this function for the same query
- * @return {result: Object, complete: [boolean]}
- */
- public diffQueryAgainstStore<T>({
- store,
- query,
- variables,
- previousResult,
- returnPartialData = true,
- rootId = 'ROOT_QUERY',
- fragmentMatcherFunction,
- config,
- }: DiffQueryAgainstStoreOptions): Cache.DiffResult<T> {
- // Throw the right validation error by trying to find a query in the document
- const queryDefinition = getQueryDefinition(query);
-
- variables = assign({}, getDefaultValues(queryDefinition), variables);
-
- const context: ReadStoreContext = {
- // Global settings
- store,
- dataIdFromObject: config && config.dataIdFromObject,
- cacheRedirects: (config && config.cacheRedirects) || {},
- };
-
- const execResult = this.executeStoreQuery({
- query,
- rootValue: {
- type: 'id',
- id: rootId,
- generated: true,
- typename: 'Query',
- },
- contextValue: context,
- variableValues: variables,
- fragmentMatcher: fragmentMatcherFunction,
- });
-
- const hasMissingFields =
- execResult.missing && execResult.missing.length > 0;
-
- if (hasMissingFields && ! returnPartialData) {
- execResult.missing!.forEach(info => {
- if (info.tolerable) return;
- throw new InvariantError(
- `Can't find field ${info.fieldName} on object ${JSON.stringify(
- info.object,
- null,
- 2,
- )}.`,
- );
- });
- }
-
- if (previousResult) {
- if (isEqual(previousResult, execResult.result)) {
- execResult.result = previousResult;
- }
- }
-
- return {
- result: execResult.result,
- complete: !hasMissingFields,
- };
- }
-
- /**
- * Based on graphql function from graphql-js:
- *
- * graphql(
- * schema: GraphQLSchema,
- * requestString: string,
- * rootValue?: ?any,
- * contextValue?: ?any,
- * variableValues?: ?{[key: string]: any},
- * operationName?: ?string
- * ): Promise<GraphQLResult>
- *
- * The default export as of graphql-anywhere is sync as of 4.0,
- * but below is an exported alternative that is async.
- * In the 5.0 version, this will be the only export again
- * and it will be async
- *
- */
- private executeStoreQuery({
- query,
- rootValue,
- contextValue,
- variableValues,
- // Default matcher always matches all fragments
- fragmentMatcher = defaultFragmentMatcher,
- }: ExecStoreQueryOptions): ExecResult {
- const mainDefinition = getMainDefinition(query);
- const fragments = getFragmentDefinitions(query);
- const fragmentMap = createFragmentMap(fragments);
- const execContext: ExecContext = {
- query,
- fragmentMap,
- contextValue,
- variableValues,
- fragmentMatcher,
- };
-
- return this.executeSelectionSet({
- selectionSet: mainDefinition.selectionSet,
- rootValue,
- execContext,
- });
- }
-
- private executeSelectionSet({
- selectionSet,
- rootValue,
- execContext,
- }: ExecSelectionSetOptions): ExecResult {
- const { fragmentMap, contextValue, variableValues: variables } = execContext;
- const finalResult: ExecResult = { result: null };
-
- const objectsToMerge: { [key: string]: any }[] = [];
-
- const object: StoreObject = contextValue.store.get(rootValue.id);
-
- const typename =
- (object && object.__typename) ||
- (rootValue.id === 'ROOT_QUERY' && 'Query') ||
- void 0;
-
- function handleMissing<T>(result: ExecResult<T>): T {
- if (result.missing) {
- finalResult.missing = finalResult.missing || [];
- finalResult.missing.push(...result.missing);
- }
- return result.result;
- }
-
- selectionSet.selections.forEach(selection => {
- if (!shouldInclude(selection, variables)) {
- // Skip this entirely
- return;
- }
-
- if (isField(selection)) {
- const fieldResult = handleMissing(
- this.executeField(object, typename, selection, execContext),
- );
-
- if (typeof fieldResult !== 'undefined') {
- objectsToMerge.push({
- [resultKeyNameFromField(selection)]: fieldResult,
- });
- }
-
- } else {
- let fragment: InlineFragmentNode | FragmentDefinitionNode;
-
- if (isInlineFragment(selection)) {
- fragment = selection;
- } else {
- // This is a named fragment
- fragment = fragmentMap[selection.name.value];
-
- if (!fragment) {
- throw new InvariantError(`No fragment named ${selection.name.value}`);
- }
- }
-
- const typeCondition =
- fragment.typeCondition && fragment.typeCondition.name.value;
-
- const match =
- !typeCondition ||
- execContext.fragmentMatcher(rootValue, typeCondition, contextValue);
-
- if (match) {
- let fragmentExecResult = this.executeSelectionSet({
- selectionSet: fragment.selectionSet,
- rootValue,
- execContext,
- });
-
- if (match === 'heuristic' && fragmentExecResult.missing) {
- fragmentExecResult = {
- ...fragmentExecResult,
- missing: fragmentExecResult.missing.map(info => {
- return { ...info, tolerable: true };
- }),
- };
- }
-
- objectsToMerge.push(handleMissing(fragmentExecResult));
- }
- }
- });
-
- // Perform a single merge at the end so that we can avoid making more
- // defensive shallow copies than necessary.
- finalResult.result = mergeDeepArray(objectsToMerge);
-
- if (this.freezeResults && process.env.NODE_ENV !== 'production') {
- Object.freeze(finalResult.result);
- }
-
- return finalResult;
- }
-
- private executeField(
- object: StoreObject,
- typename: string | void,
- field: FieldNode,
- execContext: ExecContext,
- ): ExecResult {
- const { variableValues: variables, contextValue } = execContext;
- const fieldName = field.name.value;
- const args = argumentsObjectFromField(field, variables);
-
- const info: ExecInfo = {
- resultKey: resultKeyNameFromField(field),
- directives: getDirectiveInfoFromField(field, variables),
- };
-
- const readStoreResult = readStoreResolver(
- object,
- typename,
- fieldName,
- args,
- contextValue,
- info,
- );
-
- if (Array.isArray(readStoreResult.result)) {
- return this.combineExecResults(
- readStoreResult,
- this.executeSubSelectedArray({
- field,
- array: readStoreResult.result,
- execContext,
- }),
- );
- }
-
- // Handle all scalar types here
- if (!field.selectionSet) {
- assertSelectionSetForIdValue(field, readStoreResult.result);
- if (this.freezeResults && process.env.NODE_ENV !== 'production') {
- maybeDeepFreeze(readStoreResult);
- }
- return readStoreResult;
- }
-
- // From here down, the field has a selection set, which means it's trying to
- // query a GraphQLObjectType
- if (readStoreResult.result == null) {
- // Basically any field in a GraphQL response can be null, or missing
- return readStoreResult;
- }
-
- // Returned value is an object, and the query has a sub-selection. Recurse.
- return this.combineExecResults(
- readStoreResult,
- this.executeSelectionSet({
- selectionSet: field.selectionSet,
- rootValue: readStoreResult.result,
- execContext,
- }),
- );
- }
-
- private combineExecResults<T>(
- ...execResults: ExecResult<T>[]
- ): ExecResult<T> {
- let missing: ExecResultMissingField[] | undefined;
- execResults.forEach(execResult => {
- if (execResult.missing) {
- missing = missing || [];
- missing.push(...execResult.missing);
- }
- });
- return {
- result: execResults.pop()!.result,
- missing,
- };
- }
-
- private executeSubSelectedArray({
- field,
- array,
- execContext,
- }: ExecSubSelectedArrayOptions): ExecResult {
- let missing: ExecResultMissingField[] | undefined;
-
- function handleMissing<T>(childResult: ExecResult<T>): T {
- if (childResult.missing) {
- missing = missing || [];
- missing.push(...childResult.missing);
- }
-
- return childResult.result;
- }
-
- array = array.map(item => {
- // null value in array
- if (item === null) {
- return null;
- }
-
- // This is a nested array, recurse
- if (Array.isArray(item)) {
- return handleMissing(this.executeSubSelectedArray({
- field,
- array: item,
- execContext,
- }));
- }
-
- // This is an object, run the selection set on it
- if (field.selectionSet) {
- return handleMissing(this.executeSelectionSet({
- selectionSet: field.selectionSet,
- rootValue: item,
- execContext,
- }));
- }
-
- assertSelectionSetForIdValue(field, item);
-
- return item;
- });
-
- if (this.freezeResults && process.env.NODE_ENV !== 'production') {
- Object.freeze(array);
- }
-
- return { result: array, missing };
- }
-}
-
-function assertSelectionSetForIdValue(
- field: FieldNode,
- value: any,
-) {
- if (!field.selectionSet && isIdValue(value)) {
- throw new InvariantError(
- `Missing selection set for object of type ${
- value.typename
- } returned for query field ${field.name.value}`
- );
- }
-}
-
-function defaultFragmentMatcher() {
- return true;
-}
-
-export function assertIdValue(idValue: IdValue) {
- invariant(isIdValue(idValue), `\
-Encountered a sub-selection on the query, but the store doesn't have \
-an object reference. This should never happen during normal use unless you have custom code \
-that is directly manipulating the store; please file an issue.`);
-}
-
-function readStoreResolver(
- object: StoreObject,
- typename: string | void,
- fieldName: string,
- args: any,
- context: ReadStoreContext,
- { resultKey, directives }: ExecInfo,
-): ExecResult<StoreValue> {
- let storeKeyName = fieldName;
- if (args || directives) {
- // We happen to know here that getStoreKeyName returns its first
- // argument unmodified if there are no args or directives, so we can
- // avoid calling the function at all in that case, as a small but
- // important optimization to this frequently executed code.
- storeKeyName = getStoreKeyName(storeKeyName, args, directives);
- }
-
- let fieldValue: StoreValue | void = void 0;
-
- if (object) {
- fieldValue = object[storeKeyName];
-
- if (
- typeof fieldValue === 'undefined' &&
- context.cacheRedirects &&
- typeof typename === 'string'
- ) {
- // Look for the type in the custom resolver map
- const type = context.cacheRedirects[typename];
- if (type) {
- // Look for the field in the custom resolver map
- const resolver = type[fieldName];
- if (resolver) {
- fieldValue = resolver(object, args, {
- getCacheKey(storeObj: StoreObject) {
- const id = context.dataIdFromObject!(storeObj);
- return id && toIdValue({
- id,
- typename: storeObj.__typename,
- });
- },
- });
- }
- }
- }
- }
-
- if (typeof fieldValue === 'undefined') {
- return {
- result: fieldValue,
- missing: [{
- object,
- fieldName: storeKeyName,
- tolerable: false,
- }],
- };
- }
-
- if (isJsonValue(fieldValue)) {
- fieldValue = fieldValue.json;
- }
-
- return {
- result: fieldValue,
- };
-}
diff --git a/packages/apollo-cache-inmemory/src/writeToStore.ts b/packages/apollo-cache-inmemory/src/writeToStore.ts
deleted file mode 100644
--- a/packages/apollo-cache-inmemory/src/writeToStore.ts
+++ /dev/null
@@ -1,520 +0,0 @@
-import {
- SelectionSetNode,
- FieldNode,
- DocumentNode,
- InlineFragmentNode,
- FragmentDefinitionNode,
-} from 'graphql';
-import { FragmentMatcher } from './readFromStore';
-
-import {
- assign,
- createFragmentMap,
- FragmentMap,
- getDefaultValues,
- getFragmentDefinitions,
- getOperationDefinition,
- IdValue,
- isField,
- isIdValue,
- isInlineFragment,
- isProduction,
- resultKeyNameFromField,
- shouldInclude,
- storeKeyNameFromField,
- StoreValue,
- toIdValue,
- isEqual,
-} from 'apollo-utilities';
-
-import { invariant } from 'ts-invariant';
-
-import { ObjectCache } from './objectCache';
-import { defaultNormalizedCacheFactory } from './depTrackingCache';
-
-import {
- IdGetter,
- NormalizedCache,
- ReadStoreContext,
- StoreObject,
-} from './types';
-
-export class WriteError extends Error {
- public type = 'WriteError';
-}
-
-export function enhanceErrorWithDocument(error: Error, document: DocumentNode) {
- // XXX A bit hacky maybe ...
- const enhancedError = new WriteError(
- `Error writing result to store for query:\n ${JSON.stringify(document)}`,
- );
- enhancedError.message += '\n' + error.message;
- enhancedError.stack = error.stack;
- return enhancedError;
-}
-
-export type WriteContext = {
- readonly store: NormalizedCache;
- readonly processedData?: { [x: string]: FieldNode[] };
- readonly variables?: any;
- readonly dataIdFromObject?: IdGetter;
- readonly fragmentMap?: FragmentMap;
- readonly fragmentMatcherFunction?: FragmentMatcher;
-};
-
-export class StoreWriter {
- /**
- * Writes the result of a query to the store.
- *
- * @param result The result object returned for the query document.
- *
- * @param query The query document whose result we are writing to the store.
- *
- * @param store The {@link NormalizedCache} used by Apollo for the `data` portion of the store.
- *
- * @param variables A map from the name of a variable to its value. These variables can be
- * referenced by the query document.
- *
- * @param dataIdFromObject A function that returns an object identifier given a particular result
- * object. See the store documentation for details and an example of this function.
- *
- * @param fragmentMatcherFunction A function to use for matching fragment conditions in GraphQL documents
- */
- public writeQueryToStore({
- query,
- result,
- store = defaultNormalizedCacheFactory(),
- variables,
- dataIdFromObject,
- fragmentMatcherFunction,
- }: {
- query: DocumentNode;
- result: Object;
- store?: NormalizedCache;
- variables?: Object;
- dataIdFromObject?: IdGetter;
- fragmentMatcherFunction?: FragmentMatcher;
- }): NormalizedCache {
- return this.writeResultToStore({
- dataId: 'ROOT_QUERY',
- result,
- document: query,
- store,
- variables,
- dataIdFromObject,
- fragmentMatcherFunction,
- });
- }
-
- public writeResultToStore({
- dataId,
- result,
- document,
- store = defaultNormalizedCacheFactory(),
- variables,
- dataIdFromObject,
- fragmentMatcherFunction,
- }: {
- dataId: string;
- result: any;
- document: DocumentNode;
- store?: NormalizedCache;
- variables?: Object;
- dataIdFromObject?: IdGetter;
- fragmentMatcherFunction?: FragmentMatcher;
- }): NormalizedCache {
- // XXX TODO REFACTOR: this is a temporary workaround until query normalization is made to work with documents.
- const operationDefinition = getOperationDefinition(document)!;
-
- try {
- return this.writeSelectionSetToStore({
- result,
- dataId,
- selectionSet: operationDefinition.selectionSet,
- context: {
- store,
- processedData: {},
- variables: assign(
- {},
- getDefaultValues(operationDefinition),
- variables,
- ),
- dataIdFromObject,
- fragmentMap: createFragmentMap(getFragmentDefinitions(document)),
- fragmentMatcherFunction,
- },
- });
- } catch (e) {
- throw enhanceErrorWithDocument(e, document);
- }
- }
-
- public writeSelectionSetToStore({
- result,
- dataId,
- selectionSet,
- context,
- }: {
- dataId: string;
- result: any;
- selectionSet: SelectionSetNode;
- context: WriteContext;
- }): NormalizedCache {
- const { variables, store, fragmentMap } = context;
-
- selectionSet.selections.forEach(selection => {
- if (!shouldInclude(selection, variables)) {
- return;
- }
-
- if (isField(selection)) {
- const resultFieldKey: string = resultKeyNameFromField(selection);
- const value: any = result[resultFieldKey];
-
- if (typeof value !== 'undefined') {
- this.writeFieldToStore({
- dataId,
- value,
- field: selection,
- context,
- });
- } else {
- let isDefered = false;
- let isClient = false;
- if (selection.directives && selection.directives.length) {
- // If this is a defered field we don't need to throw / warn.
- isDefered = selection.directives.some(
- directive => directive.name && directive.name.value === 'defer',
- );
-
- // When using the @client directive, it might be desirable in
- // some cases to want to write a selection set to the store,
- // without having all of the selection set values available.
- // This is because the @client field values might have already
- // been written to the cache separately (e.g. via Apollo
- // Cache's `writeData` capabilities). Because of this, we'll
- // skip the missing field warning for fields with @client
- // directives.
- isClient = selection.directives.some(
- directive => directive.name && directive.name.value === 'client',
- );
- }
-
- if (!isDefered && !isClient && context.fragmentMatcherFunction) {
- // XXX We'd like to throw an error, but for backwards compatibility's sake
- // we just print a warning for the time being.
- //throw new WriteError(`Missing field ${resultFieldKey} in ${JSON.stringify(result, null, 2).substring(0, 100)}`);
- invariant.warn(
- `Missing field ${resultFieldKey} in ${JSON.stringify(
- result,
- null,
- 2,
- ).substring(0, 100)}`,
- );
- }
- }
- } else {
- // This is not a field, so it must be a fragment, either inline or named
- let fragment: InlineFragmentNode | FragmentDefinitionNode;
-
- if (isInlineFragment(selection)) {
- fragment = selection;
- } else {
- // Named fragment
- fragment = (fragmentMap || {})[selection.name.value];
- invariant(fragment, `No fragment named ${selection.name.value}.`);
- }
-
- let matches = true;
- if (context.fragmentMatcherFunction && fragment.typeCondition) {
- // TODO we need to rewrite the fragment matchers for this to work properly and efficiently
- // Right now we have to pretend that we're passing in an idValue and that there's a store
- // on the context.
- const id = dataId || 'self';
- const idValue = toIdValue({ id, typename: undefined });
- const fakeContext: ReadStoreContext = {
- // NOTE: fakeContext always uses ObjectCache
- // since this is only to ensure the return value of 'matches'
- store: new ObjectCache({ [id]: result }),
- cacheRedirects: {},
- };
- const match = context.fragmentMatcherFunction(
- idValue,
- fragment.typeCondition.name.value,
- fakeContext,
- );
- if (!isProduction() && match === 'heuristic') {
- invariant.error('WARNING: heuristic fragment matching going on!');
- }
- matches = !!match;
- }
-
- if (matches) {
- this.writeSelectionSetToStore({
- result,
- selectionSet: fragment.selectionSet,
- dataId,
- context,
- });
- }
- }
- });
-
- return store;
- }
-
- private writeFieldToStore({
- field,
- value,
- dataId,
- context,
- }: {
- field: FieldNode;
- value: any;
- dataId: string;
- context: WriteContext;
- }) {
- const { variables, dataIdFromObject, store } = context;
-
- let storeValue: StoreValue;
- let storeObject: StoreObject;
-
- const storeFieldName: string = storeKeyNameFromField(field, variables);
-
- // If this is a scalar value...
- if (!field.selectionSet || value === null) {
- storeValue =
- value != null && typeof value === 'object'
- ? // If the scalar value is a JSON blob, we have to "escape" it so it can’t pretend to be
- // an id.
- { type: 'json', json: value }
- : // Otherwise, just store the scalar directly in the store.
- value;
- } else if (Array.isArray(value)) {
- const generatedId = `${dataId}.${storeFieldName}`;
-
- storeValue = this.processArrayValue(
- value,
- generatedId,
- field.selectionSet,
- context,
- );
- } else {
- // It's an object
- let valueDataId = `${dataId}.${storeFieldName}`;
- let generated = true;
-
- // We only prepend the '$' if the valueDataId isn't already a generated
- // id.
- if (!isGeneratedId(valueDataId)) {
- valueDataId = '$' + valueDataId;
- }
-
- if (dataIdFromObject) {
- const semanticId = dataIdFromObject(value);
-
- // We throw an error if the first character of the id is '$. This is
- // because we use that character to designate an Apollo-generated id
- // and we use the distinction between user-desiginated and application-provided
- // ids when managing overwrites.
- invariant(
- !semanticId || !isGeneratedId(semanticId),
- 'IDs returned by dataIdFromObject cannot begin with the "$" character.',
- );
-
- if (
- semanticId ||
- (typeof semanticId === 'number' && semanticId === 0)
- ) {
- valueDataId = semanticId;
- generated = false;
- }
- }
-
- if (!isDataProcessed(valueDataId, field, context.processedData)) {
- this.writeSelectionSetToStore({
- dataId: valueDataId,
- result: value,
- selectionSet: field.selectionSet,
- context,
- });
- }
-
- // We take the id and escape it (i.e. wrap it with an enclosing object).
- // This allows us to distinguish IDs from normal scalars.
- const typename = value.__typename;
- storeValue = toIdValue({ id: valueDataId, typename }, generated);
-
- // check if there was a generated id at the location where we're
- // about to place this new id. If there was, we have to merge the
- // data from that id with the data we're about to write in the store.
- storeObject = store.get(dataId);
- const escapedId =
- storeObject && (storeObject[storeFieldName] as IdValue | undefined);
- if (escapedId !== storeValue && isIdValue(escapedId)) {
- const hadTypename = escapedId.typename !== undefined;
- const hasTypename = typename !== undefined;
- const typenameChanged =
- hadTypename && hasTypename && escapedId.typename !== typename;
-
- // If there is already a real id in the store and the current id we
- // are dealing with is generated, we throw an error.
- // One exception we allow is when the typename has changed, which occurs
- // when schema defines a union, both with and without an ID in the same place.
- // checks if we "lost" the read id
- invariant(
- !generated || escapedId.generated || typenameChanged,
- `Store error: the application attempted to write an object with no provided id but the store already contains an id of ${
- escapedId.id
- } for this object. The selectionSet that was trying to be written is:\n${
- JSON.stringify(field)
- }`,
- );
-
- // checks if we "lost" the typename
- invariant(
- !hadTypename || hasTypename,
- `Store error: the application attempted to write an object with no provided typename but the store already contains an object with typename of ${
- escapedId.typename
- } for the object of id ${escapedId.id}. The selectionSet that was trying to be written is:\n${
- JSON.stringify(field)
- }`,
- );
-
- if (escapedId.generated) {
- // We should only merge if it's an object of the same type,
- // otherwise we should delete the generated object
- if (typenameChanged) {
- // Only delete the generated object when the old object was
- // inlined, and the new object is not. This is indicated by
- // the old id being generated, and the new id being real.
- if (!generated) {
- store.delete(escapedId.id);
- }
- } else {
- mergeWithGenerated(escapedId.id, (storeValue as IdValue).id, store);
- }
- }
- }
- }
-
- storeObject = store.get(dataId);
- if (!storeObject || !isEqual(storeValue, storeObject[storeFieldName])) {
- store.set(dataId, {
- ...storeObject,
- [storeFieldName]: storeValue,
- });
- }
- }
-
- private processArrayValue(
- value: any[],
- generatedId: string,
- selectionSet: SelectionSetNode,
- context: WriteContext,
- ): any[] {
- return value.map((item: any, index: any) => {
- if (item === null) {
- return null;
- }
-
- let itemDataId = `${generatedId}.${index}`;
-
- if (Array.isArray(item)) {
- return this.processArrayValue(item, itemDataId, selectionSet, context);
- }
-
- let generated = true;
-
- if (context.dataIdFromObject) {
- const semanticId = context.dataIdFromObject(item);
-
- if (semanticId) {
- itemDataId = semanticId;
- generated = false;
- }
- }
-
- if (!isDataProcessed(itemDataId, selectionSet, context.processedData)) {
- this.writeSelectionSetToStore({
- dataId: itemDataId,
- result: item,
- selectionSet,
- context,
- });
- }
-
- return toIdValue(
- { id: itemDataId, typename: item.__typename },
- generated,
- );
- });
- }
-}
-
-// Checks if the id given is an id that was generated by Apollo
-// rather than by dataIdFromObject.
-function isGeneratedId(id: string): boolean {
- return id[0] === '$';
-}
-
-function mergeWithGenerated(
- generatedKey: string,
- realKey: string,
- cache: NormalizedCache,
-): boolean {
- if (generatedKey === realKey) {
- return false;
- }
-
- const generated = cache.get(generatedKey);
- const real = cache.get(realKey);
- let madeChanges = false;
-
- Object.keys(generated).forEach(key => {
- const value = generated[key];
- const realValue = real[key];
-
- if (
- isIdValue(value) &&
- isGeneratedId(value.id) &&
- isIdValue(realValue) &&
- !isEqual(value, realValue) &&
- mergeWithGenerated(value.id, realValue.id, cache)
- ) {
- madeChanges = true;
- }
- });
-
- cache.delete(generatedKey);
- const newRealValue = { ...generated, ...real };
-
- if (isEqual(newRealValue, real)) {
- return madeChanges;
- }
-
- cache.set(realKey, newRealValue);
- return true;
-}
-
-function isDataProcessed(
- dataId: string,
- field: FieldNode | SelectionSetNode,
- processedData?: { [x: string]: (FieldNode | SelectionSetNode)[] },
-): boolean {
- if (!processedData) {
- return false;
- }
-
- if (processedData[dataId]) {
- if (processedData[dataId].indexOf(field) >= 0) {
- return true;
- } else {
- processedData[dataId].push(field);
- }
- } else {
- processedData[dataId] = [field];
- }
-
- return false;
-}
diff --git a/packages/apollo-cache/jest.config.js b/packages/apollo-cache/jest.config.js
deleted file mode 100644
--- a/packages/apollo-cache/jest.config.js
+++ /dev/null
@@ -1,3 +0,0 @@
-module.exports = {
- ...require('../../config/jest.config.settings'),
-};
diff --git a/packages/apollo-cache/rollup.config.js b/packages/apollo-cache/rollup.config.js
deleted file mode 100644
--- a/packages/apollo-cache/rollup.config.js
+++ /dev/null
@@ -1,5 +0,0 @@
-import { rollup } from '../../config/rollup.config';
-
-export default rollup({
- name: 'apollo-cache',
-});
diff --git a/packages/apollo-cache/src/index.ts b/packages/apollo-cache/src/index.ts
deleted file mode 100644
--- a/packages/apollo-cache/src/index.ts
+++ /dev/null
@@ -1,2 +0,0 @@
-export * from './cache';
-export * from './types';
diff --git a/packages/apollo-cache/src/types/index.ts b/packages/apollo-cache/src/types/index.ts
deleted file mode 100644
--- a/packages/apollo-cache/src/types/index.ts
+++ /dev/null
@@ -1,2 +0,0 @@
-export * from './DataProxy';
-export * from './Cache';
diff --git a/packages/apollo-client/analyze/src/index.js b/packages/apollo-client/analyze/src/index.js
deleted file mode 100644
--- a/packages/apollo-client/analyze/src/index.js
+++ /dev/null
@@ -1 +0,0 @@
-import ApolloClient from '../../lib/src';
diff --git a/packages/apollo-client/analyze/webpack.config.js b/packages/apollo-client/analyze/webpack.config.js
deleted file mode 100644
--- a/packages/apollo-client/analyze/webpack.config.js
+++ /dev/null
@@ -1,13 +0,0 @@
-const path = require('path');
-const BundleAnalyzerPlugin = require('webpack-bundle-analyzer')
- .BundleAnalyzerPlugin;
-
-module.exports = {
- context: __dirname,
- entry: './src/index.js',
- output: {
- path: path.resolve(__dirname, 'dist'),
- filename: 'bundle.js',
- },
- plugins: [new BundleAnalyzerPlugin()],
-};
diff --git a/packages/apollo-client/benchmark/github-reporter.ts b/packages/apollo-client/benchmark/github-reporter.ts
deleted file mode 100644
--- a/packages/apollo-client/benchmark/github-reporter.ts
+++ /dev/null
@@ -1,114 +0,0 @@
-import GithubAPI from '@octokit/rest';
-import { bsuite, groupPromises, log } from './util';
-import { thresholds } from './thresholds';
-
-export function collectAndReportBenchmarks(uploadToGithub: Boolean) {
- const github = eval('new require("@octokit/rest")()') as GithubAPI;
- const commitSHA =
- process.env.TRAVIS_PULL_REQUEST_SHA || process.env.TRAVIS_COMMIT || '';
-
- if (uploadToGithub) {
- github.authenticate({
- type: 'oauth',
- token: process.env.DANGER_GITHUB_API_TOKEN || '',
- });
-
- github.repos.createStatus({
- owner: 'apollographql',
- repo: 'apollo-client',
- sha: commitSHA,
- context: 'Benchmark',
- description: 'Evaluation is in progress!',
- state: 'pending',
- });
- }
-
- Promise.all(groupPromises)
- .then(() => {
- log('Running benchmarks.');
- return new Promise<{
- [name: string]: { mean: number; moe: number };
- }>(resolve => {
- const retMap: { [name: string]: { mean: number; moe: number } } = {};
-
- bsuite
- .on('error', (error: any) => {
- log('Error: ', error);
- })
- .on('cycle', (event: any) => {
- retMap[event.target.name] = {
- mean: event.target.stats.mean * 1000,
- moe: event.target.stats.moe * 1000,
- };
- log('Mean time in ms: ', event.target.stats.mean * 1000);
- log(String(event.target));
- log('');
- })
- .on('complete', (_: any) => {
- resolve(retMap);
- })
- .run({ async: false });
- });
- })
- .then(res => {
- let message = '';
- let pass = false;
- Object.keys(res).forEach(element => {
- if (element != 'baseline') {
- if (!thresholds[element]) {
- console.error(`Threshold not defined for "${element}"`);
- if (message === '') {
- message = `Threshold not defined for "${element}"`;
- pass = false;
- }
- } else {
- const normalizedMean = res[element].mean / res['baseline'].mean;
- if (normalizedMean > thresholds[element]) {
- const perfDropMessage = `Performance drop detected for benchmark: "${element}", ${
- res[element].mean
- } / ${res['baseline'].mean} = ${normalizedMean} > ${
- thresholds[element]
- }`;
- console.error(perfDropMessage);
- if (message === '') {
- message = `Performance drop detected for benchmark: "${element}"`;
- pass = false;
- }
- } else {
- console.log(
- `No performance drop detected for benchmark: "${element}", ${
- res[element].mean
- } / ${res['baseline'].mean} = ${normalizedMean} <= ${
- thresholds[element]
- }`,
- );
- }
- }
- }
- });
-
- if (message === '') {
- message = 'All benchmarks are under the defined thresholds!';
- pass = true;
- }
-
- console.log('Reporting benchmarks to GitHub status...');
-
- if (uploadToGithub) {
- return github.repos
- .createStatus({
- owner: 'apollographql',
- repo: 'apollo-client',
- sha: commitSHA,
- context: 'Benchmark',
- description: message,
- state: pass ? 'success' : 'error',
- })
- .then(() => {
- console.log('Published benchmark results to GitHub status');
- });
- } else {
- return;
- }
- });
-}
diff --git a/packages/apollo-client/benchmark/index.ts b/packages/apollo-client/benchmark/index.ts
deleted file mode 100644
--- a/packages/apollo-client/benchmark/index.ts
+++ /dev/null
@@ -1,428 +0,0 @@
-// This file implements some of the basic benchmarks around
-// Apollo Client.
-
-import gql from 'graphql-tag';
-
-import {
- group,
- benchmark,
- afterEach,
- DescriptionObject,
- dataIdFromObject,
-} from './util';
-
-import { ApolloClient, ApolloQueryResult } from '../src/index';
-
-import { times, cloneDeep } from 'lodash';
-
-import { InMemoryCache } from 'apollo-cache-inmemory';
-
-import { Operation, ApolloLink, FetchResult, Observable } from 'apollo-link';
-
-import { print } from 'graphql/language/printer';
-
-import { collectAndReportBenchmarks } from './github-reporter';
-
-interface MockedResponse {
- request: Operation;
- result?: FetchResult;
- error?: Error;
- delay?: number;
-}
-
-function mockSingleLink(...mockedResponses: MockedResponse[]): ApolloLink {
- return new MockLink(mockedResponses);
-}
-
-function requestToKey(request: Operation): string {
- const queryString = request.query && print(request.query);
-
- return JSON.stringify({
- variables: request.variables || {},
- query: queryString,
- });
-}
-
-class MockLink extends ApolloLink {
- private mockedResponsesByKey: { [key: string]: MockedResponse[] } = {};
-
- constructor(mockedResponses: MockedResponse[]) {
- super();
- mockedResponses.forEach(mockedResponse => {
- this.addMockedResponse(mockedResponse);
- });
- }
-
- public addMockedResponse(mockedResponse: MockedResponse) {
- const key = requestToKey(mockedResponse.request);
- let mockedResponses = this.mockedResponsesByKey[key];
- if (!mockedResponses) {
- mockedResponses = [];
- this.mockedResponsesByKey[key] = mockedResponses;
- }
- mockedResponses.push(mockedResponse);
- }
-
- public request(operation: Operation) {
- const key = requestToKey(operation);
- const responses = this.mockedResponsesByKey[key];
- if (!responses || responses.length === 0) {
- throw new Error(
- `No more mocked responses for the query: ${print(
- operation.query,
- )}, variables: ${JSON.stringify(operation.variables)}`,
- );
- }
-
- const { result, error, delay } = responses.shift()!;
- if (!result && !error) {
- throw new Error(
- `Mocked response should contain either result or error: ${key}`,
- );
- }
-
- return new Observable<FetchResult>(observer => {
- let timer = setTimeout(() => {
- if (error) {
- observer.error(error);
- } else {
- if (result) observer.next(result);
- observer.complete();
- }
- }, delay ? delay : 0);
-
- return () => {
- clearTimeout(timer);
- };
- });
- }
-}
-
-const simpleQuery = gql`
- query {
- author {
- firstName
- lastName
- }
- }
-`;
-
-const simpleResult = {
- data: {
- author: {
- firstName: 'John',
- lastName: 'Smith',
- },
- },
-};
-
-const getClientInstance = () => {
- const link = mockSingleLink({
- request: { query: simpleQuery } as Operation,
- result: simpleResult,
- });
-
- return new ApolloClient({
- link,
- cache: new InMemoryCache({ addTypename: false }),
- });
-};
-
-const createReservations = (count: number) => {
- const reservations: {
- name: string;
- id: string;
- }[] = [];
- times(count, reservationIndex => {
- reservations.push({
- name: 'Fake Reservation',
- id: reservationIndex.toString(),
- });
- });
- return reservations;
-};
-
-group(end => {
- benchmark('baseline', done => {
- let arr = Array.from({ length: 100 }, () => Math.random());
- arr.sort();
- done();
- });
- end();
-});
-
-group(end => {
- const link = mockSingleLink({
- request: { query: simpleQuery } as Operation,
- result: simpleResult,
- });
-
- const cache = new InMemoryCache();
-
- benchmark('constructing an instance', done => {
- new ApolloClient({ link, cache });
- done();
- });
- end();
-});
-
-group(end => {
- benchmark('fetching a query result from mocked server', done => {
- const client = getClientInstance();
- client.query({ query: simpleQuery }).then(_ => {
- done();
- });
- });
-
- end();
-});
-
-group(end => {
- benchmark('write data and receive update from the cache', done => {
- const client = getClientInstance();
- const observable = client.watchQuery({
- query: simpleQuery,
- fetchPolicy: 'cache-only',
- });
- observable.subscribe({
- next(res: ApolloQueryResult<Object>) {
- if (Object.keys(res.data).length > 0) {
- done();
- }
- },
- error(_: Error) {
- console.warn('Error occurred in observable.');
- },
- });
- client.query({ query: simpleQuery });
- });
-
- end();
-});
-
-group(end => {
- // This benchmark is supposed to check whether the time
- // taken to deliver updates is linear in the number of subscribers or not.
- // (Should be linear). When plotting the results from this benchmark,
- // the `meanTimes` structure can be used.
- const meanTimes: { [subscriberCount: string]: number } = {};
-
- times(4, countR => {
- const count = 5 * Math.pow(4, countR);
- benchmark(
- {
- name: `write data and deliver update to ${count} subscribers`,
- count,
- },
- done => {
- const promises: Promise<void>[] = [];
- const client = getClientInstance();
-
- times(count, () => {
- promises.push(
- new Promise<void>((resolve, _) => {
- client
- .watchQuery({
- query: simpleQuery,
- fetchPolicy: 'cache-only',
- })
- .subscribe({
- next(res: ApolloQueryResult<Object>) {
- if (Object.keys(res.data).length > 0) {
- resolve();
- }
- },
- });
- }),
- );
- });
-
- client.query({ query: simpleQuery });
- Promise.all(promises).then(() => {
- done();
- });
- },
- );
-
- afterEach((description: DescriptionObject, event: any) => {
- const iterCount = description['count'] as number;
- meanTimes[iterCount.toString()] = event.target.stats.mean * 1000;
- });
- });
-
- end();
-});
-
-times(4, (countR: number) => {
- const count = 5 * Math.pow(4, countR);
- const query = gql`
- query($id: String) {
- author(id: $id) {
- name
- id
- __typename
- }
- }
- `;
- const originalResult = {
- data: {
- author: {
- name: 'John Smith',
- id: 1,
- __typename: 'Author',
- },
- },
- };
-
- group(end => {
- const cache = new InMemoryCache({
- dataIdFromObject: (obj: any) => {
- if (obj.id && obj.__typename) {
- return obj.__typename + obj.id;
- }
- return null;
- },
- });
-
- // insert a bunch of stuff into the cache
- times(count, index => {
- const result = cloneDeep(originalResult);
- result.data.author.id = index;
-
- return cache.writeQuery({
- query,
- variables: { id: index },
- data: result.data as any,
- });
- });
-
- benchmark(
- {
- name: `read single item from cache with ${count} items in cache`,
- count,
- },
- done => {
- const randomIndex = Math.floor(Math.random() * count);
- cache.readQuery({
- query,
- variables: { id: randomIndex },
- });
- done();
- },
- );
-
- end();
- });
-});
-
-// Measure the amount of time it takes to read a bunch of
-// objects from the cache.
-times(4, index => {
- group(end => {
- const cache = new InMemoryCache({
- dataIdFromObject,
- addTypename: false,
- });
-
- const query = gql`
- query($id: String) {
- house(id: $id) {
- reservations {
- name
- id
- }
- }
- }
- `;
- const houseId = '12';
- const reservationCount = 5 * Math.pow(4, index);
- const reservations = createReservations(reservationCount);
-
- const variables = { id: houseId };
-
- cache.writeQuery({
- query,
- variables,
- data: {
- house: {
- reservations,
- },
- },
- });
-
- benchmark(
- `read result with ${reservationCount} items associated with the result`,
- done => {
- cache.readQuery({
- query,
- variables,
- });
- done();
- },
- );
-
- end();
- });
-});
-
-// Measure only the amount of time it takes to diff a query against the store
-//
-// This test allows us to differentiate between the fixed cost of .query() and the fixed cost
-// of actually reading from the store.
-times(4, index => {
- group(end => {
- const reservationCount = 5 * Math.pow(4, index);
-
- // Prime the cache.
- const query = gql`
- query($id: String) {
- house(id: $id) {
- reservations {
- name
- id
- }
- }
- }
- `;
- const variables = { id: '7' };
- const reservations = createReservations(reservationCount);
- const result = {
- house: { reservations },
- };
-
- const cache = new InMemoryCache({
- dataIdFromObject,
- addTypename: false,
- });
-
- cache.write({
- dataId: 'ROOT_QUERY',
- query,
- variables,
- result,
- });
-
- // We only keep track of the results so that V8 doesn't decide to just throw
- // away our cache read code.
- let results: any = null;
- benchmark(
- `diff query against store with ${reservationCount} items`,
- done => {
- results = cache.diff({
- query,
- variables,
- optimistic: false,
- });
- done();
- },
- );
-
- end();
- });
-});
-
-if (process.env.DANGER_GITHUB_API_TOKEN) {
- collectAndReportBenchmarks(true);
-} else {
- collectAndReportBenchmarks(false);
-}
diff --git a/packages/apollo-client/benchmark/thresholds.ts b/packages/apollo-client/benchmark/thresholds.ts
deleted file mode 100644
--- a/packages/apollo-client/benchmark/thresholds.ts
+++ /dev/null
@@ -1,38 +0,0 @@
-export const thresholds: { [name: string]: number } = {
- 'constructing an instance': 0.1,
-
- 'fetching a query result from mocked server': 14,
-
- 'write data and receive update from the cache': 14,
- 'write data and deliver update to 5 subscribers': 20,
- 'write data and deliver update to 10 subscribers': 25,
- 'write data and deliver update to 20 subscribers': 40,
- 'write data and deliver update to 40 subscribers': 62,
- 'write data and deliver update to 80 subscribers': 100,
- 'write data and deliver update to 160 subscribers': 165,
- 'write data and deliver update to 320 subscribers': 430,
-
- 'read single item from cache with 5 items in cache': 0.65 / 0.062,
- 'read single item from cache with 10 items in cache': 0.65 / 0.062,
- 'read single item from cache with 20 items in cache': 0.65 / 0.062,
- 'read single item from cache with 40 items in cache': 0.65 / 0.062,
- 'read single item from cache with 80 items in cache': 0.65 / 0.062,
- 'read single item from cache with 160 items in cache': 0.65 / 0.062,
- 'read single item from cache with 320 items in cache': 0.65 / 0.062,
-
- 'read result with 5 items associated with the result': 0.8 / 0.062,
- 'read result with 10 items associated with the result': 0.8 / 0.062,
- 'read result with 20 items associated with the result': 0.8 / 0.062,
- 'read result with 40 items associated with the result': 1 / 0.062,
- 'read result with 80 items associated with the result': 1.3 / 0.062,
- 'read result with 160 items associated with the result': 2.1 / 0.062,
- 'read result with 320 items associated with the result': 3.6 / 0.062,
-
- 'diff query against store with 5 items': 0.06 / 0.062,
- 'diff query against store with 10 items': 0.07 / 0.062,
- 'diff query against store with 20 items': 0.07 / 0.062,
- 'diff query against store with 40 items': 0.11 / 0.062,
- 'diff query against store with 80 items': 0.2 / 0.062,
- 'diff query against store with 160 items': 0.36 / 0.062,
- 'diff query against store with 320 items': 0.75 / 0.062,
-};
diff --git a/packages/apollo-client/benchmark/util.ts b/packages/apollo-client/benchmark/util.ts
deleted file mode 100644
--- a/packages/apollo-client/benchmark/util.ts
+++ /dev/null
@@ -1,173 +0,0 @@
-import Benchmark from 'benchmark';
-
-// This file implements utilities around benchmark.js that make it
-// easier to use for our benchmarking needs.
-
-// Specifically, it provides `group` and `benchmark`, examples of which
-// can be seen within the benchmarks.The functions allow you to manage scope and async
-// code more easily than benchmark.js typically allows.
-//
-// `group` is meant to provide a way to execute code that sets up the scope variables for your
-// benchmark. It is only run once before the benchmark, not on every call of the code to
-// be benchmarked. The `benchmark` function is similar to the `it` function within mocha;
-// it allows you to define a particular block of code to be benchmarked.
-
-Benchmark.options.minSamples = 150;
-export const bsuite = new Benchmark.Suite();
-export type DoneFunction = () => void;
-
-export interface DescriptionObject {
- name: string;
- [other: string]: any;
-}
-
-export type Nullable<T> = T | undefined;
-export type Description = DescriptionObject | string;
-export type CycleFunction = (doneFn: DoneFunction) => void;
-export type BenchmarkFunction = (
- description: Description,
- cycleFn: CycleFunction,
-) => void;
-export type GroupFunction = (done: DoneFunction) => void;
-export type AfterEachCallbackFunction = (
- descr: Description,
- event: any,
-) => void;
-export type AfterEachFunction = (
- afterEachFnArg: AfterEachCallbackFunction,
-) => void;
-export type AfterAllCallbackFunction = () => void;
-export type AfterAllFunction = (afterAllFn: AfterAllCallbackFunction) => void;
-
-export let benchmark: BenchmarkFunction;
-export let afterEach: AfterEachFunction;
-export let afterAll: AfterAllFunction;
-
-// Used to log stuff within benchmarks without pissing off tslint.
-export function log(logString: string, ...args: any[]) {
- // tslint:disable-next-line
- console.log(logString, ...args);
-}
-
-// A reasonable implementation of dataIdFromObject that we use within
-// the benchmarks.
-export const dataIdFromObject = (object: any) => {
- if (object.__typename && object.id) {
- return object.__typename + '__' + object.id;
- }
- return null;
-};
-
-interface Scope {
- benchmark?: BenchmarkFunction;
- afterEach?: AfterEachFunction;
- afterAll?: AfterAllFunction;
-}
-
-// Internal function that returns the current exposed functions
-// benchmark, setup, etc.
-function currentScope() {
- return {
- benchmark,
- afterEach,
- afterAll,
- };
-}
-
-// Internal function that lets us set benchmark, setup, afterEach, etc.
-// in a reasonable fashion.
-function setScope(scope: Scope) {
- benchmark = scope.benchmark as BenchmarkFunction;
- afterEach = scope.afterEach as AfterEachFunction;
- afterAll = scope.afterAll as AfterAllFunction;
-}
-
-export const groupPromises: Promise<void>[] = [];
-
-export const group = (groupFn: GroupFunction) => {
- const oldScope = currentScope();
- const scope: {
- benchmark?: BenchmarkFunction;
- afterEach?: AfterEachFunction;
- afterAll?: AfterAllFunction;
- } = {};
-
- let afterEachFn: Nullable<AfterEachCallbackFunction> = undefined;
- scope.afterEach = (afterEachFnArg: AfterEachCallbackFunction) => {
- afterEachFn = afterEachFnArg;
- };
-
- let afterAllFn: Nullable<AfterAllCallbackFunction> = undefined;
- scope.afterAll = (afterAllFnArg: AfterAllCallbackFunction) => {
- afterAllFn = afterAllFnArg;
- };
-
- const benchmarkPromises: Promise<void>[] = [];
-
- scope.benchmark = (
- description: string | Description,
- benchmarkFn: CycleFunction,
- ) => {
- const name =
- (description as DescriptionObject).name || (description as string);
- log('Adding benchmark: ', name);
-
- // const scopes: Object[] = [];
- let cycleCount = 0;
- benchmarkPromises.push(
- new Promise<void>((resolve, _) => {
- bsuite.add(name, {
- defer: true,
- fn: (deferred: any) => {
- const done = () => {
- cycleCount++;
- deferred.resolve();
- };
-
- benchmarkFn(done);
- },
-
- onComplete: (event: any) => {
- if (afterEachFn) {
- afterEachFn(description, event);
- }
- resolve();
- },
- });
- }),
- );
- };
-
- groupPromises.push(
- new Promise<void>((resolve, _) => {
- const groupDone = () => {
- Promise.all(benchmarkPromises).then(() => {
- if (afterAllFn) {
- afterAllFn();
- }
- });
- resolve();
- };
-
- setScope(scope);
- groupFn(groupDone);
- setScope(oldScope);
- }),
- );
-};
-
-export function runBenchmarks() {
- Promise.all(groupPromises).then(() => {
- log('Running benchmarks.');
- bsuite
- .on('error', (error: any) => {
- log('Error: ', error);
- })
- .on('cycle', (event: any) => {
- log('Mean time in ms: ', event.target.stats.mean * 1000);
- log(String(event.target));
- log('');
- })
- .run({ async: false });
- });
-}
diff --git a/packages/apollo-client/jest.config.js b/packages/apollo-client/jest.config.js
deleted file mode 100644
--- a/packages/apollo-client/jest.config.js
+++ /dev/null
@@ -1,5 +0,0 @@
-module.exports = {
- ...require('../../config/jest.config.settings'),
- // must be inside the src dir
- setupFiles: ['<rootDir>/src/config/jest/setup.ts'],
-};
diff --git a/packages/apollo-client/rollup.config.js b/packages/apollo-client/rollup.config.js
deleted file mode 100644
--- a/packages/apollo-client/rollup.config.js
+++ /dev/null
@@ -1,8 +0,0 @@
-import { rollup } from '../../config/rollup.config';
-
-export default rollup({
- name: 'apollo-client',
- extraGlobals: {
- 'symbol-observable': '$$observable',
- },
-});
diff --git a/packages/apollo-client/src/__mocks__/mockLinks.ts b/packages/apollo-client/src/__mocks__/mockLinks.ts
deleted file mode 100644
--- a/packages/apollo-client/src/__mocks__/mockLinks.ts
+++ /dev/null
@@ -1,158 +0,0 @@
-import {
- Operation,
- ApolloLink,
- FetchResult,
- Observable,
- GraphQLRequest,
-} from 'apollo-link';
-
-import { print } from 'graphql/language/printer';
-
-interface MockApolloLink extends ApolloLink {
- operation?: Operation;
-}
-
-// Pass in multiple mocked responses, so that you can test flows that end up
-// making multiple queries to the server
-export function mockSingleLink(
- ...mockedResponses: MockedResponse[]
-): MockApolloLink {
- return new MockLink(mockedResponses);
-}
-
-export function mockObservableLink(): MockSubscriptionLink {
- return new MockSubscriptionLink();
-}
-
-export interface MockedResponse {
- request: GraphQLRequest;
- result?: FetchResult;
- error?: Error;
- delay?: number;
-}
-
-export interface MockedSubscriptionResult {
- result?: FetchResult;
- error?: Error;
- delay?: number;
-}
-
-export interface MockedSubscription {
- request: Operation;
-}
-
-export class MockLink extends ApolloLink {
- public operation: Operation;
- private mockedResponsesByKey: { [key: string]: MockedResponse[] } = {};
-
- constructor(mockedResponses: MockedResponse[]) {
- super();
- mockedResponses.forEach(mockedResponse => {
- this.addMockedResponse(mockedResponse);
- });
- }
-
- public addMockedResponse(mockedResponse: MockedResponse) {
- const key = requestToKey(mockedResponse.request);
- let mockedResponses = this.mockedResponsesByKey[key];
- if (!mockedResponses) {
- mockedResponses = [];
- this.mockedResponsesByKey[key] = mockedResponses;
- }
- mockedResponses.push(mockedResponse);
- }
-
- public request(operation: Operation) {
- this.operation = operation;
- const key = requestToKey(operation);
- const responses = this.mockedResponsesByKey[key];
- if (!responses || responses.length === 0) {
- throw new Error(
- `No more mocked responses for the query: ${print(
- operation.query,
- )}, variables: ${JSON.stringify(operation.variables)}`,
- );
- }
-
- const { result, error, delay } = responses.shift()!;
- if (!result && !error) {
- throw new Error(
- `Mocked response should contain either result or error: ${key}`,
- );
- }
-
- return new Observable<FetchResult>(observer => {
- let timer = setTimeout(
- () => {
- if (error) {
- observer.error(error);
- } else {
- if (result) observer.next(result);
- observer.complete();
- }
- },
- delay ? delay : 0,
- );
-
- return () => {
- clearTimeout(timer);
- };
- });
- }
-}
-
-export class MockSubscriptionLink extends ApolloLink {
- // private observer: Observer<any>;
- private observer: any;
- public unsubscribers: any[] = [];
- public setups: any[] = [];
-
- constructor() {
- super();
- }
-
- public request() {
- return new Observable<FetchResult>(observer => {
- this.setups.forEach(x => x());
- this.observer = observer;
- return {
- unsubscribe: () => {
- this.unsubscribers.forEach(x => x());
- },
- closed: false,
- };
- });
- }
-
- public simulateResult(result: MockedSubscriptionResult) {
- setTimeout(() => {
- const { observer } = this;
- if (!observer) throw new Error('subscription torn down');
- if (result.result && observer.next) observer.next(result.result);
- if (result.error && observer.error) observer.error(result.error);
- }, result.delay || 0);
- }
-
- public simulateComplete() {
- const { observer } = this;
- if (!observer) throw new Error('subscription torn down');
- if (observer.complete) observer.complete();
- }
-
- public onSetup(listener: any): void {
- this.setups = this.setups.concat([listener]);
- }
-
- public onUnsubscribe(listener: any): void {
- this.unsubscribers = this.unsubscribers.concat([listener]);
- }
-}
-
-function requestToKey(request: GraphQLRequest): string {
- const queryString = request.query && print(request.query);
-
- return JSON.stringify({
- variables: request.variables || {},
- query: queryString,
- });
-}
diff --git a/packages/apollo-client/src/__mocks__/mockQueryManager.ts b/packages/apollo-client/src/__mocks__/mockQueryManager.ts
deleted file mode 100644
--- a/packages/apollo-client/src/__mocks__/mockQueryManager.ts
+++ /dev/null
@@ -1,15 +0,0 @@
-import { QueryManager } from '../core/QueryManager';
-
-import { mockSingleLink, MockedResponse } from './mockLinks';
-
-import { DataStore } from '../data/store';
-import { InMemoryCache } from 'apollo-cache-inmemory';
-
-// Helper method for the tests that construct a query manager out of a
-// a list of mocked responses for a mocked network interface.
-export default (...mockedResponses: MockedResponse[]) => {
- return new QueryManager({
- link: mockSingleLink(...mockedResponses),
- store: new DataStore(new InMemoryCache({ addTypename: false })),
- });
-};
diff --git a/packages/apollo-client/src/data/store.ts b/packages/apollo-client/src/data/store.ts
deleted file mode 100644
--- a/packages/apollo-client/src/data/store.ts
+++ /dev/null
@@ -1,205 +0,0 @@
-import { ExecutionResult, DocumentNode } from 'graphql';
-import { ApolloCache, Cache, DataProxy } from 'apollo-cache';
-
-import { QueryStoreValue } from '../data/queries';
-import {
- getOperationName,
- tryFunctionOrLogError,
- graphQLResultHasError,
-} from 'apollo-utilities';
-import { MutationQueryReducer } from '../core/types';
-
-export type QueryWithUpdater = {
- updater: MutationQueryReducer<Object>;
- query: QueryStoreValue;
-};
-
-export interface DataWrite {
- rootId: string;
- result: any;
- document: DocumentNode;
- operationName: string | null;
- variables: Object;
-}
-
-export class DataStore<TSerialized> {
- private cache: ApolloCache<TSerialized>;
-
- constructor(initialCache: ApolloCache<TSerialized>) {
- this.cache = initialCache;
- }
-
- public getCache(): ApolloCache<TSerialized> {
- return this.cache;
- }
-
- public markQueryResult(
- result: ExecutionResult,
- document: DocumentNode,
- variables: any,
- fetchMoreForQueryId: string | undefined,
- ignoreErrors: boolean = false,
- ) {
- let writeWithErrors = !graphQLResultHasError(result);
- if (ignoreErrors && graphQLResultHasError(result) && result.data) {
- writeWithErrors = true;
- }
- if (!fetchMoreForQueryId && writeWithErrors) {
- this.cache.write({
- result: result.data,
- dataId: 'ROOT_QUERY',
- query: document,
- variables: variables,
- });
- }
- }
-
- public markSubscriptionResult(
- result: ExecutionResult,
- document: DocumentNode,
- variables: any,
- ) {
- // the subscription interface should handle not sending us results we no longer subscribe to.
- // XXX I don't think we ever send in an object with errors, but we might in the future...
- if (!graphQLResultHasError(result)) {
- this.cache.write({
- result: result.data,
- dataId: 'ROOT_SUBSCRIPTION',
- query: document,
- variables: variables,
- });
- }
- }
-
- public markMutationInit(mutation: {
- mutationId: string;
- document: DocumentNode;
- variables: any;
- updateQueries: { [queryId: string]: QueryWithUpdater };
- update: ((proxy: DataProxy, mutationResult: Object) => void) | undefined;
- optimisticResponse: Object | Function | undefined;
- }) {
- if (mutation.optimisticResponse) {
- let optimistic: Object;
- if (typeof mutation.optimisticResponse === 'function') {
- optimistic = mutation.optimisticResponse(mutation.variables);
- } else {
- optimistic = mutation.optimisticResponse;
- }
-
- this.cache.recordOptimisticTransaction(c => {
- const orig = this.cache;
- this.cache = c;
-
- try {
- this.markMutationResult({
- mutationId: mutation.mutationId,
- result: { data: optimistic },
- document: mutation.document,
- variables: mutation.variables,
- updateQueries: mutation.updateQueries,
- update: mutation.update,
- });
- } finally {
- this.cache = orig;
- }
- }, mutation.mutationId);
- }
- }
-
- public markMutationResult(mutation: {
- mutationId: string;
- result: ExecutionResult;
- document: DocumentNode;
- variables: any;
- updateQueries: { [queryId: string]: QueryWithUpdater };
- update: ((proxy: DataProxy, mutationResult: Object) => void) | undefined;
- }) {
- // Incorporate the result from this mutation into the store
- if (!graphQLResultHasError(mutation.result)) {
- const cacheWrites: Cache.WriteOptions[] = [{
- result: mutation.result.data,
- dataId: 'ROOT_MUTATION',
- query: mutation.document,
- variables: mutation.variables,
- }];
-
- const { updateQueries } = mutation;
- if (updateQueries) {
- Object.keys(updateQueries).forEach(id => {
- const { query, updater } = updateQueries[id];
-
- // Read the current query result from the store.
- const { result: currentQueryResult, complete } = this.cache.diff({
- query: query.document,
- variables: query.variables,
- returnPartialData: true,
- optimistic: false,
- });
-
- if (complete) {
- // Run our reducer using the current query result and the mutation result.
- const nextQueryResult = tryFunctionOrLogError(() =>
- updater(currentQueryResult, {
- mutationResult: mutation.result,
- queryName: getOperationName(query.document) || undefined,
- queryVariables: query.variables,
- }),
- );
-
- // Write the modified result back into the store if we got a new result.
- if (nextQueryResult) {
- cacheWrites.push({
- result: nextQueryResult,
- dataId: 'ROOT_QUERY',
- query: query.document,
- variables: query.variables,
- });
- }
- }
- });
- }
-
- this.cache.performTransaction(c => {
- cacheWrites.forEach(write => c.write(write));
-
- // If the mutation has some writes associated with it then we need to
- // apply those writes to the store by running this reducer again with a
- // write action.
- const { update } = mutation;
- if (update) {
- tryFunctionOrLogError(() => update(c, mutation.result));
- }
- });
- }
- }
-
- public markMutationComplete({
- mutationId,
- optimisticResponse,
- }: {
- mutationId: string;
- optimisticResponse?: any;
- }) {
- if (optimisticResponse) {
- this.cache.removeOptimistic(mutationId);
- }
- }
-
- public markUpdateQueryResult(
- document: DocumentNode,
- variables: any,
- newResult: any,
- ) {
- this.cache.write({
- result: newResult,
- dataId: 'ROOT_QUERY',
- variables,
- query: document,
- });
- }
-
- public reset(): Promise<void> {
- return this.cache.reset();
- }
-}
diff --git a/packages/apollo-client/src/index.ts b/packages/apollo-client/src/index.ts
deleted file mode 100644
--- a/packages/apollo-client/src/index.ts
+++ /dev/null
@@ -1,38 +0,0 @@
-export {
- ObservableQuery,
- FetchMoreOptions,
- UpdateQueryOptions,
- ApolloCurrentResult,
- ApolloCurrentQueryResult,
-} from './core/ObservableQuery';
-export {
- QueryBaseOptions,
- QueryOptions,
- WatchQueryOptions,
- MutationOptions,
- SubscriptionOptions,
- FetchPolicy,
- WatchQueryFetchPolicy,
- ErrorPolicy,
- FetchMoreQueryOptions,
- SubscribeToMoreOptions,
- MutationUpdaterFn,
-} from './core/watchQueryOptions';
-export { NetworkStatus } from './core/networkStatus';
-export * from './core/types';
-export {
- Resolver,
- FragmentMatcher as LocalStateFragmentMatcher,
-} from './core/LocalState';
-
-export { isApolloError, ApolloError } from './errors/ApolloError';
-
-import ApolloClient, {
- ApolloClientOptions,
- DefaultOptions,
-} from './ApolloClient';
-export { ApolloClientOptions, DefaultOptions };
-
-// Export the client as both default and named.
-export { ApolloClient };
-export default ApolloClient;
diff --git a/packages/apollo-client/src/util/Observable.ts b/packages/apollo-client/src/util/Observable.ts
deleted file mode 100644
--- a/packages/apollo-client/src/util/Observable.ts
+++ /dev/null
@@ -1,19 +0,0 @@
-// This simplified polyfill attempts to follow the ECMAScript Observable proposal.
-// See https://github.com/zenparsing/es-observable
-import { Observable as LinkObservable } from 'apollo-link';
-
-export type Subscription = ZenObservable.Subscription;
-export type Observer<T> = ZenObservable.Observer<T>;
-
-import $$observable from 'symbol-observable';
-
-// rxjs interopt
-export class Observable<T> extends LinkObservable<T> {
- public [$$observable]() {
- return this;
- }
-
- public ['@@observable' as any]() {
- return this;
- }
-}
diff --git a/packages/apollo-client/src/util/capitalizeFirstLetter.ts b/packages/apollo-client/src/util/capitalizeFirstLetter.ts
deleted file mode 100644
--- a/packages/apollo-client/src/util/capitalizeFirstLetter.ts
+++ /dev/null
@@ -1,3 +0,0 @@
-export function capitalizeFirstLetter(str: string) {
- return str.charAt(0).toUpperCase() + str.slice(1);
-}
diff --git a/packages/apollo-utilities/jest.config.js b/packages/apollo-utilities/jest.config.js
deleted file mode 100644
--- a/packages/apollo-utilities/jest.config.js
+++ /dev/null
@@ -1,3 +0,0 @@
-module.exports = {
- ...require('../../config/jest.config.settings'),
-};
diff --git a/packages/apollo-utilities/rollup.config.js b/packages/apollo-utilities/rollup.config.js
deleted file mode 100644
--- a/packages/apollo-utilities/rollup.config.js
+++ /dev/null
@@ -1,9 +0,0 @@
-import { rollup } from '../../config/rollup.config';
-
-export default rollup({
- name: 'apollo-utilities',
- extraGlobals: {
- 'fast-json-stable-stringify': 'stringify',
- '@wry/equality': 'wryEquality',
- },
-});
diff --git a/packages/apollo-utilities/src/declarations.d.ts b/packages/apollo-utilities/src/declarations.d.ts
deleted file mode 100644
--- a/packages/apollo-utilities/src/declarations.d.ts
+++ /dev/null
@@ -1 +0,0 @@
-declare module 'fast-json-stable-stringify';
diff --git a/packages/apollo-utilities/src/index.ts b/packages/apollo-utilities/src/index.ts
deleted file mode 100644
--- a/packages/apollo-utilities/src/index.ts
+++ /dev/null
@@ -1,16 +0,0 @@
-export * from './directives';
-export * from './fragments';
-export * from './getFromAST';
-export * from './transform';
-export * from './storeUtils';
-export * from './util/assign';
-export * from './util/canUse';
-export * from './util/cloneDeep';
-export * from './util/environment';
-export * from './util/errorHandling';
-export * from './util/isEqual';
-export * from './util/maybeDeepFreeze';
-export * from './util/mergeDeep';
-export * from './util/warnOnce';
-export * from './util/stripSymbols';
-export * from './util/mergeDeep';
diff --git a/packages/apollo-utilities/src/util/isEqual.ts b/packages/apollo-utilities/src/util/isEqual.ts
deleted file mode 100644
--- a/packages/apollo-utilities/src/util/isEqual.ts
+++ /dev/null
@@ -1 +0,0 @@
-export { equal as isEqual } from '@wry/equality';
diff --git a/packages/apollo-utilities/src/util/maybeDeepFreeze.ts b/packages/apollo-utilities/src/util/maybeDeepFreeze.ts
deleted file mode 100644
--- a/packages/apollo-utilities/src/util/maybeDeepFreeze.ts
+++ /dev/null
@@ -1,33 +0,0 @@
-import { isDevelopment, isTest } from './environment';
-
-// Taken (mostly) from https://github.com/substack/deep-freeze to avoid
-// import hassles with rollup.
-function deepFreeze(o: any) {
- Object.freeze(o);
-
- Object.getOwnPropertyNames(o).forEach(function(prop) {
- if (
- o[prop] !== null &&
- (typeof o[prop] === 'object' || typeof o[prop] === 'function') &&
- !Object.isFrozen(o[prop])
- ) {
- deepFreeze(o[prop]);
- }
- });
-
- return o;
-}
-
-export function maybeDeepFreeze(obj: any) {
- if (isDevelopment() || isTest()) {
- // Polyfilled Symbols potentially cause infinite / very deep recursion while deep freezing
- // which is known to crash IE11 (https://github.com/apollographql/apollo-client/issues/3043).
- const symbolIsPolyfilled =
- typeof Symbol === 'function' && typeof Symbol('') === 'string';
-
- if (!symbolIsPolyfilled) {
- return deepFreeze(obj);
- }
- }
- return obj;
-}
diff --git a/packages/apollo-utilities/src/util/mergeDeep.ts b/packages/apollo-utilities/src/util/mergeDeep.ts
deleted file mode 100644
--- a/packages/apollo-utilities/src/util/mergeDeep.ts
+++ /dev/null
@@ -1,115 +0,0 @@
-const { hasOwnProperty } = Object.prototype;
-
-// These mergeDeep and mergeDeepArray utilities merge any number of objects
-// together, sharing as much memory as possible with the source objects, while
-// remaining careful to avoid modifying any source objects.
-
-// Logically, the return type of mergeDeep should be the intersection of
-// all the argument types. The binary call signature is by far the most
-// common, but we support 0- through 5-ary as well. After that, the
-// resulting type is just the inferred array element type. Note to nerds:
-// there is a more clever way of doing this that converts the tuple type
-// first to a union type (easy enough: T[number]) and then converts the
-// union to an intersection type using distributive conditional type
-// inference, but that approach has several fatal flaws (boolean becomes
-// true & false, and the inferred type ends up as unknown in many cases),
-// in addition to being nearly impossible to explain/understand.
-export type TupleToIntersection<T extends any[]> =
- T extends [infer A] ? A :
- T extends [infer A, infer B] ? A & B :
- T extends [infer A, infer B, infer C] ? A & B & C :
- T extends [infer A, infer B, infer C, infer D] ? A & B & C & D :
- T extends [infer A, infer B, infer C, infer D, infer E] ? A & B & C & D & E :
- T extends (infer U)[] ? U : any;
-
-export function mergeDeep<T extends any[]>(
- ...sources: T
-): TupleToIntersection<T> {
- return mergeDeepArray(sources);
-}
-
-// In almost any situation where you could succeed in getting the
-// TypeScript compiler to infer a tuple type for the sources array, you
-// could just use mergeDeep instead of mergeDeepArray, so instead of
-// trying to convert T[] to an intersection type we just infer the array
-// element type, which works perfectly when the sources array has a
-// consistent element type.
-export function mergeDeepArray<T>(sources: T[]): T {
- let target = sources[0] || {} as T;
- const count = sources.length;
- if (count > 1) {
- const pastCopies: any[] = [];
- target = shallowCopyForMerge(target, pastCopies);
- for (let i = 1; i < count; ++i) {
- target = mergeHelper(target, sources[i], pastCopies);
- }
- }
- return target;
-}
-
-function isObject(obj: any): obj is Record<string | number, any> {
- return obj !== null && typeof obj === 'object';
-}
-
-function mergeHelper(
- target: any,
- source: any,
- pastCopies: any[],
-) {
- if (isObject(source) && isObject(target)) {
- // In case the target has been frozen, make an extensible copy so that
- // we can merge properties into the copy.
- if (Object.isExtensible && !Object.isExtensible(target)) {
- target = shallowCopyForMerge(target, pastCopies);
- }
-
- Object.keys(source).forEach(sourceKey => {
- const sourceValue = source[sourceKey];
- if (hasOwnProperty.call(target, sourceKey)) {
- const targetValue = target[sourceKey];
- if (sourceValue !== targetValue) {
- // When there is a key collision, we need to make a shallow copy of
- // target[sourceKey] so the merge does not modify any source objects.
- // To avoid making unnecessary copies, we use a simple array to track
- // past copies, since it's safe to modify copies created earlier in
- // the merge. We use an array for pastCopies instead of a Map or Set,
- // since the number of copies should be relatively small, and some
- // Map/Set polyfills modify their keys.
- target[sourceKey] = mergeHelper(
- shallowCopyForMerge(targetValue, pastCopies),
- sourceValue,
- pastCopies,
- );
- }
- } else {
- // If there is no collision, the target can safely share memory with
- // the source, and the recursion can terminate here.
- target[sourceKey] = sourceValue;
- }
- });
-
- return target;
- }
-
- // If source (or target) is not an object, let source replace target.
- return source;
-}
-
-function shallowCopyForMerge<T>(value: T, pastCopies: any[]): T {
- if (
- value !== null &&
- typeof value === 'object' &&
- pastCopies.indexOf(value) < 0
- ) {
- if (Array.isArray(value)) {
- value = (value as any).slice(0);
- } else {
- value = {
- __proto__: Object.getPrototypeOf(value),
- ...value,
- };
- }
- pastCopies.push(value);
- }
- return value;
-}
diff --git a/packages/apollo-utilities/src/util/warnOnce.ts b/packages/apollo-utilities/src/util/warnOnce.ts
deleted file mode 100644
--- a/packages/apollo-utilities/src/util/warnOnce.ts
+++ /dev/null
@@ -1,24 +0,0 @@
-import { isProduction, isTest } from './environment';
-
-const haveWarned = Object.create({});
-
-/**
- * Print a warning only once in development.
- * In production no warnings are printed.
- * In test all warnings are printed.
- *
- * @param msg The warning message
- * @param type warn or error (will call console.warn or console.error)
- */
-export function warnOnceInDevelopment(msg: string, type = 'warn') {
- if (!isProduction() && !haveWarned[msg]) {
- if (!isTest()) {
- haveWarned[msg] = true;
- }
- if (type === 'error') {
- console.error(msg);
- } else {
- console.warn(msg);
- }
- }
-}
diff --git a/packages/graphql-anywhere/jest.config.js b/packages/graphql-anywhere/jest.config.js
deleted file mode 100644
--- a/packages/graphql-anywhere/jest.config.js
+++ /dev/null
@@ -1,3 +0,0 @@
-module.exports = {
- ...require('../../config/jest.config.settings'),
-};
diff --git a/packages/graphql-anywhere/rollup.config.js b/packages/graphql-anywhere/rollup.config.js
deleted file mode 100644
--- a/packages/graphql-anywhere/rollup.config.js
+++ /dev/null
@@ -1,5 +0,0 @@
-import { rollup } from '../../config/rollup.config';
-
-export default [
- ...rollup({ name: 'graphql-anywhere' }),
-];
diff --git a/packages/graphql-anywhere/src/async.ts b/packages/graphql-anywhere/src/async.ts
deleted file mode 100644
--- a/packages/graphql-anywhere/src/async.ts
+++ /dev/null
@@ -1,202 +0,0 @@
-import {
- DocumentNode,
- SelectionSetNode,
- FieldNode,
- FragmentDefinitionNode,
- InlineFragmentNode,
-} from 'graphql';
-
-import {
- getMainDefinition,
- getFragmentDefinitions,
- createFragmentMap,
- shouldInclude,
- getDirectiveInfoFromField,
- isField,
- isInlineFragment,
- resultKeyNameFromField,
- argumentsObjectFromField,
-} from 'apollo-utilities';
-
-import {
- merge,
- Resolver,
- VariableMap,
- ExecContext,
- ExecInfo,
- ExecOptions,
-} from './graphql';
-
-/* Based on graphql function from graphql-js:
- *
- * graphql(
- * schema: GraphQLSchema,
- * requestString: string,
- * rootValue?: ?any,
- * contextValue?: ?any,
- * variableValues?: ?{[key: string]: any},
- * operationName?: ?string
- * ): Promise<GraphQLResult>
- *
- * The default export as of graphql-anywhere is sync as of 4.0,
- * but below is an exported alternative that is async.
- * In the 5.0 version, this will be the only export again
- * and it will be async
- */
-export function graphql(
- resolver: Resolver,
- document: DocumentNode,
- rootValue?: any,
- contextValue?: any,
- variableValues?: VariableMap,
- execOptions: ExecOptions = {},
-): Promise<null | Object> {
- const mainDefinition = getMainDefinition(document);
-
- const fragments = getFragmentDefinitions(document);
- const fragmentMap = createFragmentMap(fragments);
-
- const resultMapper = execOptions.resultMapper;
-
- // Default matcher always matches all fragments
- const fragmentMatcher = execOptions.fragmentMatcher || (() => true);
-
- const execContext: ExecContext = {
- fragmentMap,
- contextValue,
- variableValues,
- resultMapper,
- resolver,
- fragmentMatcher,
- };
-
- return executeSelectionSet(
- mainDefinition.selectionSet,
- rootValue,
- execContext,
- );
-}
-
-async function executeSelectionSet(
- selectionSet: SelectionSetNode,
- rootValue: any,
- execContext: ExecContext,
-) {
- const { fragmentMap, contextValue, variableValues: variables } = execContext;
-
- const result = {};
-
- const execute = async selection => {
- if (!shouldInclude(selection, variables)) {
- // Skip this entirely
- return;
- }
-
- if (isField(selection)) {
- const fieldResult = await executeField(selection, rootValue, execContext);
-
- const resultFieldKey = resultKeyNameFromField(selection);
-
- if (fieldResult !== undefined) {
- if (result[resultFieldKey] === undefined) {
- result[resultFieldKey] = fieldResult;
- } else {
- merge(result[resultFieldKey], fieldResult);
- }
- }
-
- return;
- }
-
- let fragment: InlineFragmentNode | FragmentDefinitionNode;
-
- if (isInlineFragment(selection)) {
- fragment = selection;
- } else {
- // This is a named fragment
- fragment = fragmentMap[selection.name.value];
-
- if (!fragment) {
- throw new Error(`No fragment named ${selection.name.value}`);
- }
- }
-
- const typeCondition = fragment.typeCondition.name.value;
-
- if (execContext.fragmentMatcher(rootValue, typeCondition, contextValue)) {
- const fragmentResult = await executeSelectionSet(
- fragment.selectionSet,
- rootValue,
- execContext,
- );
-
- merge(result, fragmentResult);
- }
- };
-
- await Promise.all(selectionSet.selections.map(execute));
-
- if (execContext.resultMapper) {
- return execContext.resultMapper(result, rootValue);
- }
-
- return result;
-}
-
-async function executeField(
- field: FieldNode,
- rootValue: any,
- execContext: ExecContext,
-): Promise<null | Object> {
- const { variableValues: variables, contextValue, resolver } = execContext;
-
- const fieldName = field.name.value;
- const args = argumentsObjectFromField(field, variables);
-
- const info: ExecInfo = {
- isLeaf: !field.selectionSet,
- resultKey: resultKeyNameFromField(field),
- directives: getDirectiveInfoFromField(field, variables),
- field,
- };
-
- const result = await resolver(fieldName, rootValue, args, contextValue, info);
-
- // Handle all scalar types here
- if (!field.selectionSet) {
- return result;
- }
-
- // From here down, the field has a selection set, which means it's trying to
- // query a GraphQLObjectType
- if (result == null) {
- // Basically any field in a GraphQL response can be null, or missing
- return result;
- }
-
- if (Array.isArray(result)) {
- return executeSubSelectedArray(field, result, execContext);
- }
-
- // Returned value is an object, and the query has a sub-selection. Recurse.
- return executeSelectionSet(field.selectionSet, result, execContext);
-}
-
-function executeSubSelectedArray(field, result, execContext) {
- return Promise.all(
- result.map(item => {
- // null value in array
- if (item === null) {
- return null;
- }
-
- // This is a nested array, recurse
- if (Array.isArray(item)) {
- return executeSubSelectedArray(field, item, execContext);
- }
-
- // This is an object, run the selection set on it
- return executeSelectionSet(field.selectionSet, item, execContext);
- }),
- );
-}
diff --git a/packages/graphql-anywhere/src/graphql.ts b/packages/graphql-anywhere/src/graphql.ts
deleted file mode 100644
--- a/packages/graphql-anywhere/src/graphql.ts
+++ /dev/null
@@ -1,245 +0,0 @@
-import {
- DocumentNode,
- SelectionSetNode,
- FieldNode,
- FragmentDefinitionNode,
- InlineFragmentNode,
-} from 'graphql';
-
-import {
- getMainDefinition,
- getFragmentDefinitions,
- createFragmentMap,
- FragmentMap,
- DirectiveInfo,
- shouldInclude,
- getDirectiveInfoFromField,
- isField,
- isInlineFragment,
- resultKeyNameFromField,
- argumentsObjectFromField,
-} from 'apollo-utilities';
-
-export type Resolver = (
- fieldName: string,
- rootValue: any,
- args: any,
- context: any,
- info: ExecInfo,
-) => any;
-
-export type VariableMap = { [name: string]: any };
-
-export type ResultMapper = (
- values: { [fieldName: string]: any },
- rootValue: any,
-) => any;
-export type FragmentMatcher = (
- rootValue: any,
- typeCondition: string,
- context: any,
-) => boolean;
-
-export type ExecContext = {
- fragmentMap: FragmentMap;
- contextValue: any;
- variableValues: VariableMap;
- resultMapper: ResultMapper;
- resolver: Resolver;
- fragmentMatcher: FragmentMatcher;
-};
-
-export type ExecInfo = {
- isLeaf: boolean;
- resultKey: string;
- directives: DirectiveInfo;
- field: FieldNode;
-};
-
-export type ExecOptions = {
- resultMapper?: ResultMapper;
- fragmentMatcher?: FragmentMatcher;
-};
-
-/* Based on graphql function from graphql-js:
- *
- * graphql(
- * schema: GraphQLSchema,
- * requestString: string,
- * rootValue?: ?any,
- * contextValue?: ?any,
- * variableValues?: ?{[key: string]: any},
- * operationName?: ?string
- * ): Promise<GraphQLResult>
- *
- * The default export as of graphql-anywhere is sync as of 4.0,
- * but below is an exported alternative that is async.
- * In the 5.0 version, this will be the only export again
- * and it will be async
- */
-export function graphql(
- resolver: Resolver,
- document: DocumentNode,
- rootValue?: any,
- contextValue?: any,
- variableValues: VariableMap = {},
- execOptions: ExecOptions = {},
-) {
- const mainDefinition = getMainDefinition(document);
-
- const fragments = getFragmentDefinitions(document);
- const fragmentMap = createFragmentMap(fragments);
-
- const resultMapper = execOptions.resultMapper;
-
- // Default matcher always matches all fragments
- const fragmentMatcher = execOptions.fragmentMatcher || (() => true);
-
- const execContext: ExecContext = {
- fragmentMap,
- contextValue,
- variableValues,
- resultMapper,
- resolver,
- fragmentMatcher,
- };
-
- return executeSelectionSet(
- mainDefinition.selectionSet,
- rootValue,
- execContext,
- );
-}
-
-function executeSelectionSet(
- selectionSet: SelectionSetNode,
- rootValue: any,
- execContext: ExecContext,
-) {
- const { fragmentMap, contextValue, variableValues: variables } = execContext;
-
- const result = {};
-
- selectionSet.selections.forEach(selection => {
- if (variables && !shouldInclude(selection, variables)) {
- // Skip selection sets which we're able to determine should not be run
- return;
- }
-
- if (isField(selection)) {
- const fieldResult = executeField(selection, rootValue, execContext);
-
- const resultFieldKey = resultKeyNameFromField(selection);
-
- if (fieldResult !== undefined) {
- if (result[resultFieldKey] === undefined) {
- result[resultFieldKey] = fieldResult;
- } else {
- merge(result[resultFieldKey], fieldResult);
- }
- }
- } else {
- let fragment: InlineFragmentNode | FragmentDefinitionNode;
-
- if (isInlineFragment(selection)) {
- fragment = selection;
- } else {
- // This is a named fragment
- fragment = fragmentMap[selection.name.value];
-
- if (!fragment) {
- throw new Error(`No fragment named ${selection.name.value}`);
- }
- }
-
- const typeCondition = fragment.typeCondition.name.value;
-
- if (execContext.fragmentMatcher(rootValue, typeCondition, contextValue)) {
- const fragmentResult = executeSelectionSet(
- fragment.selectionSet,
- rootValue,
- execContext,
- );
-
- merge(result, fragmentResult);
- }
- }
- });
-
- if (execContext.resultMapper) {
- return execContext.resultMapper(result, rootValue);
- }
-
- return result;
-}
-
-function executeField(
- field: FieldNode,
- rootValue: any,
- execContext: ExecContext,
-): any {
- const { variableValues: variables, contextValue, resolver } = execContext;
-
- const fieldName = field.name.value;
- const args = argumentsObjectFromField(field, variables);
-
- const info: ExecInfo = {
- isLeaf: !field.selectionSet,
- resultKey: resultKeyNameFromField(field),
- directives: getDirectiveInfoFromField(field, variables),
- field,
- };
-
- const result = resolver(fieldName, rootValue, args, contextValue, info);
-
- // Handle all scalar types here
- if (!field.selectionSet) {
- return result;
- }
-
- // From here down, the field has a selection set, which means it's trying to
- // query a GraphQLObjectType
- if (result == null) {
- // Basically any field in a GraphQL response can be null, or missing
- return result;
- }
-
- if (Array.isArray(result)) {
- return executeSubSelectedArray(field, result, execContext);
- }
-
- // Returned value is an object, and the query has a sub-selection. Recurse.
- return executeSelectionSet(field.selectionSet, result, execContext);
-}
-
-function executeSubSelectedArray(field, result, execContext) {
- return result.map(item => {
- // null value in array
- if (item === null) {
- return null;
- }
-
- // This is a nested array, recurse
- if (Array.isArray(item)) {
- return executeSubSelectedArray(field, item, execContext);
- }
-
- // This is an object, run the selection set on it
- return executeSelectionSet(field.selectionSet, item, execContext);
- });
-}
-
-const hasOwn = Object.prototype.hasOwnProperty;
-
-export function merge(dest, src) {
- if (src !== null && typeof src === 'object') {
- Object.keys(src).forEach(key => {
- const srcVal = src[key];
- if (!hasOwn.call(dest, key)) {
- dest[key] = srcVal;
- } else {
- merge(dest[key], srcVal);
- }
- });
- }
-}
diff --git a/packages/graphql-anywhere/src/index.ts b/packages/graphql-anywhere/src/index.ts
deleted file mode 100644
--- a/packages/graphql-anywhere/src/index.ts
+++ /dev/null
@@ -1,14 +0,0 @@
-export { filter, check, propType } from './utilities';
-
-export {
- Resolver,
- VariableMap,
- ResultMapper,
- FragmentMatcher,
- ExecContext,
- ExecInfo,
- ExecOptions,
-} from './graphql';
-
-import { graphql } from './graphql';
-export default graphql;
diff --git a/packages/graphql-anywhere/src/utilities.ts b/packages/graphql-anywhere/src/utilities.ts
deleted file mode 100644
--- a/packages/graphql-anywhere/src/utilities.ts
+++ /dev/null
@@ -1,143 +0,0 @@
-import { DocumentNode, DirectiveNode } from 'graphql';
-
-import { getInclusionDirectives } from 'apollo-utilities';
-
-import { graphql, VariableMap, ExecInfo, ExecContext } from './graphql';
-
-import { invariant } from 'ts-invariant';
-
-const { hasOwnProperty } = Object.prototype;
-
-export function filter<FD = any, D extends FD = any>(
- doc: DocumentNode,
- data: D,
- variableValues: VariableMap = {},
-): FD {
- if (data === null) return data;
-
- const resolver = (
- fieldName: string,
- root: any,
- args: Object,
- context: ExecContext,
- info: ExecInfo,
- ) => {
- return root[info.resultKey];
- };
-
- return Array.isArray(data)
- ? data.map(dataObj => graphql(resolver, doc, dataObj, null, variableValues))
- : graphql(resolver, doc, data, null, variableValues);
-}
-
-// TODO: we should probably make check call propType and then throw,
-// rather than the other way round, to avoid constructing stack traces
-// for things like oneOf uses in React. At this stage I doubt many people
-// are using this like that, but in the future, who knows?
-export function check(
- doc: DocumentNode,
- data: any,
- variables: VariableMap = {},
-): void {
- const resolver = (
- fieldName: string,
- root: any,
- args: any,
- context: any,
- info: any,
- ) => {
- // When variables is null, fields with @include/skip directives that
- // reference variables are considered optional.
- invariant(
- hasOwnProperty.call(root, info.resultKey) ||
- (!variables && hasVariableInclusions(info.field.directives)),
- `${info.resultKey} missing on ${JSON.stringify(root)}`,
- );
- return root[info.resultKey];
- };
-
- graphql(resolver, doc, data, {}, variables, {
- fragmentMatcher: () => false,
- });
-}
-
-function hasVariableInclusions(
- directives: ReadonlyArray<DirectiveNode>,
-): boolean {
- return getInclusionDirectives(directives).some(
- ({ ifArgument }) =>
- ifArgument.value && ifArgument.value.kind === 'Variable',
- );
-}
-
-// Lifted/adapted from
-// https://github.com/facebook/react/blob/master/src/isomorphic/classic/types/ReactPropTypes.js
-const ANONYMOUS = '<<anonymous>>';
-function PropTypeError(message) {
- this.message = message;
- this.stack = '';
-}
-// Make `instanceof Error` still work for returned errors.
-PropTypeError.prototype = Error.prototype;
-
-const reactPropTypeLocationNames = {
- prop: 'prop',
- context: 'context',
- childContext: 'child context',
-};
-
-function createChainableTypeChecker(validate) {
- function checkType(
- isRequired,
- props,
- propName,
- componentName,
- location,
- propFullName,
- ) {
- componentName = componentName || ANONYMOUS;
- propFullName = propFullName || propName;
- if (props[propName] == null) {
- const locationName = reactPropTypeLocationNames[location];
- if (isRequired) {
- if (props[propName] === null) {
- return new PropTypeError(
- `The ${locationName} \`${propFullName}\` is marked as required ` +
- `in \`${componentName}\`, but its value is \`null\`.`,
- );
- }
- return new PropTypeError(
- `The ${locationName} \`${propFullName}\` is marked as required in ` +
- `\`${componentName}\`, but its value is \`undefined\`.`,
- );
- }
- return null;
- } else {
- return validate(props, propName, componentName, location, propFullName);
- }
- }
-
- const chainedCheckType = checkType.bind(null, false);
- chainedCheckType.isRequired = checkType.bind(null, true);
-
- return chainedCheckType;
-}
-
-export function propType(
- doc: DocumentNode,
- mapPropsToVariables = props => null,
-) {
- return createChainableTypeChecker((props, propName) => {
- const prop = props[propName];
- try {
- if (!prop.loading) {
- check(doc, prop, mapPropsToVariables(props));
- }
- return null;
- } catch (e) {
- // Need a much better error.
- // Also we aren't checking for extra fields
- return e;
- }
- });
-}
diff --git a/packages/apollo-client/src/ApolloClient.ts b/src/ApolloClient.ts
similarity index 93%
rename from packages/apollo-client/src/ApolloClient.ts
rename to src/ApolloClient.ts
--- a/packages/apollo-client/src/ApolloClient.ts
+++ b/src/ApolloClient.ts
@@ -1,14 +1,11 @@
-import {
- ApolloLink,
- FetchResult,
- GraphQLRequest,
- execute,
-} from 'apollo-link';
import { ExecutionResult, DocumentNode } from 'graphql';
-import { ApolloCache, DataProxy } from 'apollo-cache';
-
import { invariant, InvariantError } from 'ts-invariant';
+import { ApolloLink } from './link/core/ApolloLink';
+import { FetchResult, GraphQLRequest } from './link/core/types';
+import { execute } from './link/core/execute';
+import { ApolloCache } from './cache/core/cache';
+import { DataProxy } from './cache/core/types/DataProxy';
import { QueryManager } from './core/QueryManager';
import {
ApolloQueryResult,
@@ -17,8 +14,7 @@ import {
} from './core/types';
import { ObservableQuery } from './core/ObservableQuery';
import { LocalState, FragmentMatcher } from './core/LocalState';
-import { Observable } from './util/Observable';
-
+import { Observable } from './utilities/observables/Observable';
import {
QueryOptions,
WatchQueryOptions,
@@ -26,10 +22,9 @@ import {
MutationOptions,
WatchQueryFetchPolicy,
} from './core/watchQueryOptions';
-
-import { DataStore } from './data/store';
-
import { version } from './version';
+import { HttpLink } from './link/http/HttpLink';
+import { UriFunction } from './link/http/selectHttpOptionsAndBody';
export interface DefaultOptions {
watchQuery?: Partial<WatchQueryOptions>;
@@ -40,6 +35,9 @@ export interface DefaultOptions {
let hasSuggestedDevtools = false;
export type ApolloClientOptions<TCacheShape> = {
+ uri?: string | UriFunction;
+ credentials?: string;
+ headers?: Record<string, string>;
link?: ApolloLink;
cache: ApolloCache<TCacheShape>;
ssrForceFetchDelay?: number;
@@ -61,17 +59,16 @@ export type ApolloClientOptions<TCacheShape> = {
* receive results from the server and cache the results in a store. It also delivers updates
* to GraphQL queries through {@link Observable} instances.
*/
-export default class ApolloClient<TCacheShape> implements DataProxy {
+export class ApolloClient<TCacheShape> implements DataProxy {
public link: ApolloLink;
- public store: DataStore<TCacheShape>;
public cache: ApolloCache<TCacheShape>;
- public readonly queryManager: QueryManager<TCacheShape>;
public disableNetworkFetches: boolean;
public version: string;
public queryDeduplication: boolean;
public defaultOptions: DefaultOptions = {};
public readonly typeDefs: ApolloClientOptions<TCacheShape>['typeDefs'];
+ private queryManager: QueryManager<TCacheShape>;
private devToolsHookCb: Function;
private resetStoreCallbacks: Array<() => Promise<any>> = [];
private clearStoreCallbacks: Array<() => Promise<any>> = [];
@@ -80,6 +77,8 @@ export default class ApolloClient<TCacheShape> implements DataProxy {
/**
* Constructs an instance of {@link ApolloClient}.
*
+ * @param uri The GraphQL endpoint that Apollo Client will connect to. If
+ * `link` is configured, this option is ignored.
* @param link The {@link ApolloLink} over which GraphQL documents will be resolved into a response.
*
* @param cache The initial cache to use in the data store.
@@ -113,6 +112,9 @@ export default class ApolloClient<TCacheShape> implements DataProxy {
*/
constructor(options: ApolloClientOptions<TCacheShape>) {
const {
+ uri,
+ credentials,
+ headers,
cache,
ssrMode = false,
ssrForceFetchDelay = 0,
@@ -129,24 +131,25 @@ export default class ApolloClient<TCacheShape> implements DataProxy {
let { link } = options;
- // If a link hasn't been defined, but local state resolvers have been set,
- // setup a default empty link.
- if (!link && resolvers) {
- link = ApolloLink.empty();
+ if (!link) {
+ if (uri) {
+ link = new HttpLink({ uri, credentials, headers });
+ } else if (resolvers) {
+ link = ApolloLink.empty();
+ }
}
if (!link || !cache) {
throw new InvariantError(
- "In order to initialize Apollo Client, you must specify 'link' and 'cache' properties in the options object.\n" +
- "These options are part of the upgrade requirements when migrating from Apollo Client 1.x to Apollo Client 2.x.\n" +
- "For more information, please visit: https://www.apollographql.com/docs/tutorial/client.html#apollo-client-setup"
+ "To initialize Apollo Client, you must specify 'uri' or 'link' and " +
+ "'cache' properties in the options object. \n" +
+ "For more information, please visit: " +
+ "https://www.apollographql.com/docs/react/"
);
}
- // remove apollo-client supported directives
this.link = link;
this.cache = cache;
- this.store = new DataStore(cache);
this.disableNetworkFetches = ssrMode || ssrForceFetchDelay > 0;
this.queryDeduplication = queryDeduplication;
this.defaultOptions = defaultOptions || {};
@@ -221,8 +224,8 @@ export default class ApolloClient<TCacheShape> implements DataProxy {
});
this.queryManager = new QueryManager({
+ cache: this.cache,
link: this.link,
- store: this.store,
queryDeduplication,
ssrMode,
clientAwareness: {
@@ -453,17 +456,6 @@ export default class ApolloClient<TCacheShape> implements DataProxy {
return execute(this.link, payload);
}
- /**
- * This initializes the query manager that tracks queries and the cache
- */
- public initQueryManager(): QueryManager<TCacheShape> {
- invariant.warn(
- 'Calling the initQueryManager method is no longer necessary, ' +
- 'and it will be removed from ApolloClient in version 3.0.',
- );
- return this.queryManager;
- }
-
/**
* Resets your entire store by clearing out your cache and then re-executing
* all of your active queries. This makes it so that you may guarantee that
diff --git a/packages/apollo-cache/src/cache.ts b/src/cache/core/cache.ts
similarity index 90%
rename from packages/apollo-cache/src/cache.ts
rename to src/cache/core/cache.ts
--- a/packages/apollo-cache/src/cache.ts
+++ b/src/cache/core/cache.ts
@@ -1,7 +1,8 @@
import { DocumentNode } from 'graphql';
-import { getFragmentQueryDocument } from 'apollo-utilities';
-import { DataProxy, Cache } from './types';
+import { getFragmentQueryDocument } from '../../utilities/graphql/fragments';
+import { DataProxy } from './types/DataProxy';
+import { Cache } from './types/Cache';
import { justTypenameQuery, queryFromPojo, fragmentFromPojo } from './utils';
export type Transaction<T> = (c: ApolloCache<T>) => void;
@@ -17,11 +18,13 @@ export abstract class ApolloCache<TSerialized> implements DataProxy {
): void;
public abstract diff<T>(query: Cache.DiffOptions): Cache.DiffResult<T>;
public abstract watch(watch: Cache.WatchOptions): () => void;
- public abstract evict<TVariables = any>(
- query: Cache.EvictOptions<TVariables>,
- ): Cache.EvictionResult;
public abstract reset(): Promise<void>;
+ // If called with only one argument, removes the entire entity
+ // identified by dataId. If called with a fieldName as well, removes all
+ // fields of the identified entity whose store names match fieldName.
+ public abstract evict(dataId: string, fieldName?: string): boolean;
+
// intializer / offline / ssr API
/**
* Replaces existing state in the cache (if any) with the values expressed by
diff --git a/packages/apollo-cache/src/types/Cache.ts b/src/cache/core/types/Cache.ts
similarity index 83%
rename from packages/apollo-cache/src/types/Cache.ts
rename to src/cache/core/types/Cache.ts
--- a/packages/apollo-cache/src/types/Cache.ts
+++ b/src/cache/core/types/Cache.ts
@@ -2,9 +2,6 @@ import { DataProxy } from './DataProxy';
export namespace Cache {
export type WatchCallback = (newData: any) => void;
- export interface EvictionResult {
- success: Boolean;
- }
export interface ReadOptions<TVariables = any>
extends DataProxy.Query<TVariables> {
@@ -27,11 +24,6 @@ export namespace Cache {
callback: WatchCallback;
}
- export interface EvictOptions<TVariables = any>
- extends DataProxy.Query<TVariables> {
- rootId?: string;
- }
-
export import DiffResult = DataProxy.DiffResult;
export import WriteQueryOptions = DataProxy.WriteQueryOptions;
export import WriteFragmentOptions = DataProxy.WriteFragmentOptions;
diff --git a/packages/apollo-cache/src/types/DataProxy.ts b/src/cache/core/types/DataProxy.ts
similarity index 100%
rename from packages/apollo-cache/src/types/DataProxy.ts
rename to src/cache/core/types/DataProxy.ts
diff --git a/packages/apollo-cache/src/utils.ts b/src/cache/core/utils.ts
similarity index 100%
rename from packages/apollo-cache/src/utils.ts
rename to src/cache/core/utils.ts
diff --git a/src/cache/index.ts b/src/cache/index.ts
new file mode 100644
--- /dev/null
+++ b/src/cache/index.ts
@@ -0,0 +1,10 @@
+export { Transaction, ApolloCache } from './core/cache';
+export { Cache } from './core/types/Cache';
+export { DataProxy } from './core/types/DataProxy';
+
+export {
+ InMemoryCache,
+ InMemoryCacheConfig,
+} from './inmemory/inMemoryCache';
+export { defaultDataIdFromObject } from './inmemory/policies';
+export * from './inmemory/types';
diff --git a/src/cache/inmemory/entityStore.ts b/src/cache/inmemory/entityStore.ts
new file mode 100644
--- /dev/null
+++ b/src/cache/inmemory/entityStore.ts
@@ -0,0 +1,506 @@
+import { dep, OptimisticDependencyFunction, KeyTrie } from 'optimism';
+import { invariant } from 'ts-invariant';
+import { equal } from '@wry/equality';
+
+import { isReference, StoreValue } from '../../utilities/graphql/storeUtils';
+import {
+ DeepMerger,
+ ReconcilerFunction,
+} from '../../utilities/common/mergeDeep';
+import { canUseWeakMap } from '../../utilities/common/canUse';
+import { NormalizedCache, NormalizedCacheObject, StoreObject } from './types';
+import {
+ getTypenameFromStoreObject,
+ fieldNameFromStoreName,
+} from './helpers';
+
+const hasOwn = Object.prototype.hasOwnProperty;
+
+export abstract class EntityStore implements NormalizedCache {
+ protected data: NormalizedCacheObject = Object.create(null);
+
+ public readonly group: CacheGroup;
+
+ public abstract addLayer(
+ layerId: string,
+ replay: (layer: EntityStore) => any,
+ ): EntityStore;
+
+ public abstract removeLayer(layerId: string): EntityStore;
+
+ // Although the EntityStore class is abstract, it contains concrete
+ // implementations of the various NormalizedCache interface methods that
+ // are inherited by the Root and Layer subclasses.
+
+ public toObject(): NormalizedCacheObject {
+ return { ...this.data };
+ }
+
+ public has(dataId: string): boolean {
+ return this.get(dataId) !== void 0;
+ }
+
+ public get(dataId: string): StoreObject {
+ this.group.depend(dataId);
+ return this.data[dataId];
+ }
+
+ public getFieldValue(dataId: string, storeFieldName: string): StoreValue {
+ this.group.depend(dataId, storeFieldName);
+ const storeObject = this.data[dataId];
+ return storeObject && storeObject[storeFieldName];
+ }
+
+ public merge(dataId: string, incoming: StoreObject): void {
+ const existing = this.get(dataId);
+ const merged = new DeepMerger(storeObjectReconciler)
+ .merge(existing, incoming, this);
+ if (merged !== existing) {
+ this.data[dataId] = merged;
+ delete this.refs[dataId];
+ if (this.group.caching) {
+ // First, invalidate any dependents that called get rather than
+ // getFieldValue.
+ this.group.dirty(dataId);
+ // Now invalidate dependents who called getFieldValue for any
+ // fields that are changing as a result of this merge.
+ Object.keys(incoming).forEach(storeFieldName => {
+ if (!existing || incoming[storeFieldName] !== existing[storeFieldName]) {
+ this.group.dirty(dataId, storeFieldName);
+ }
+ });
+ }
+ }
+ }
+
+ // If called with only one argument, removes the entire entity
+ // identified by dataId. If called with a fieldName as well, removes all
+ // fields of that entity whose names match fieldName, according to the
+ // fieldNameFromStoreName helper function.
+ public delete(dataId: string, fieldName?: string) {
+ const storeObject = this.get(dataId);
+
+ if (storeObject) {
+ // In case someone passes in a storeFieldName (field.name.value +
+ // arguments key), normalize it down to just the field name.
+ fieldName = fieldName && fieldNameFromStoreName(fieldName);
+
+ const storeNamesToDelete: string[] = [];
+ Object.keys(storeObject).forEach(storeFieldName => {
+ // If the field value has already been set to undefined, we do not
+ // need to delete it again.
+ if (storeObject[storeFieldName] !== void 0 &&
+ // If no fieldName provided, delete all fields from storeObject.
+ // If provided, delete all fields matching fieldName.
+ (!fieldName || fieldName === fieldNameFromStoreName(storeFieldName))) {
+ storeNamesToDelete.push(storeFieldName);
+ }
+ });
+
+ if (storeNamesToDelete.length) {
+ // If we only have to worry about the Root layer of the store,
+ // then we can safely delete fields within entities, or whole
+ // entities by ID. If this instanceof EntityStore.Layer, however,
+ // then we need to set the "deleted" values to undefined instead
+ // of actually deleting them, so the deletion does not un-shadow
+ // values inherited from lower layers of the store.
+ const canDelete = this instanceof EntityStore.Root;
+ const remove = (obj: Record<string, any>, key: string) => {
+ if (canDelete) {
+ delete obj[key];
+ } else {
+ obj[key] = void 0;
+ }
+ };
+
+ // Note that we do not delete the this.rootIds[dataId] retainment
+ // count for this ID, since an object with the same ID could appear in
+ // the store again, and should not have to be retained again.
+ // delete this.rootIds[dataId];
+ delete this.refs[dataId];
+
+ const fieldsToDirty: Record<string, true> = Object.create(null);
+
+ if (fieldName) {
+ // If we have a fieldName and it matches more than zero fields,
+ // then we need to make a copy of this.data[dataId] without the
+ // fields that are getting deleted.
+ const cleaned = this.data[dataId] = { ...storeObject };
+ storeNamesToDelete.forEach(storeFieldName => {
+ remove(cleaned, storeFieldName);
+ });
+ // Although it would be logically correct to dirty each
+ // storeFieldName in the loop above, we know that they all have
+ // the same name, according to fieldNameFromStoreName.
+ fieldsToDirty[fieldName] = true;
+ } else {
+ // If no fieldName was provided, then we delete the whole entity
+ // from the cache.
+ remove(this.data, dataId);
+ storeNamesToDelete.forEach(storeFieldName => {
+ const fieldName = fieldNameFromStoreName(storeFieldName);
+ fieldsToDirty[fieldName] = true;
+ });
+ }
+
+ if (this.group.caching) {
+ this.group.dirty(dataId);
+ Object.keys(fieldsToDirty).forEach(fieldName => {
+ this.group.dirty(dataId, fieldName);
+ });
+ }
+ }
+ }
+ }
+
+ public clear(): void {
+ this.replace(null);
+ }
+
+ public replace(newData: NormalizedCacheObject | null): void {
+ Object.keys(this.data).forEach(dataId => {
+ if (!(newData && hasOwn.call(newData, dataId))) {
+ this.delete(dataId);
+ }
+ });
+ if (newData) {
+ Object.keys(newData).forEach(dataId => {
+ this.merge(dataId, newData[dataId]);
+ });
+ }
+ }
+
+ // Maps root entity IDs to the number of times they have been retained, minus
+ // the number of times they have been released. Retained entities keep other
+ // entities they reference (even indirectly) from being garbage collected.
+ private rootIds: {
+ [rootId: string]: number;
+ } = Object.create(null);
+
+ public retain(rootId: string): number {
+ return this.rootIds[rootId] = (this.rootIds[rootId] || 0) + 1;
+ }
+
+ public release(rootId: string): number {
+ if (this.rootIds[rootId] > 0) {
+ const count = --this.rootIds[rootId];
+ if (!count) delete this.rootIds[rootId];
+ return count;
+ }
+ return 0;
+ }
+
+ // This method will be overridden in the Layer class to merge root IDs for all
+ // layers (including the root).
+ public getRootIdSet() {
+ return new Set(Object.keys(this.rootIds));
+ }
+
+ // The goal of garbage collection is to remove IDs from the Root layer of the
+ // store that are no longer reachable starting from any IDs that have been
+ // explicitly retained (see retain and release, above). Returns an array of
+ // dataId strings that were removed from the store.
+ public gc() {
+ const ids = this.getRootIdSet();
+ const snapshot = this.toObject();
+ ids.forEach(id => {
+ if (hasOwn.call(snapshot, id)) {
+ // Because we are iterating over an ECMAScript Set, the IDs we add here
+ // will be visited in later iterations of the forEach loop only if they
+ // were not previously contained by the Set.
+ Object.keys(this.findChildRefIds(id)).forEach(ids.add, ids);
+ // By removing IDs from the snapshot object here, we protect them from
+ // getting removed from the root store layer below.
+ delete snapshot[id];
+ }
+ });
+ const idsToRemove = Object.keys(snapshot);
+ if (idsToRemove.length) {
+ let root: EntityStore = this;
+ while (root instanceof Layer) root = root.parent;
+ idsToRemove.forEach(id => root.delete(id));
+ }
+ return idsToRemove;
+ }
+
+ // Lazily tracks { __ref: <dataId> } strings contained by this.data[dataId].
+ private refs: {
+ [dataId: string]: Record<string, true>;
+ } = Object.create(null);
+
+ public findChildRefIds(dataId: string): Record<string, true> {
+ if (!hasOwn.call(this.refs, dataId)) {
+ const found = this.refs[dataId] = Object.create(null);
+ const workSet = new Set([this.data[dataId]]);
+ // Within the store, only arrays and objects can contain child entity
+ // references, so we can prune the traversal using this predicate:
+ const canTraverse = (obj: any) => obj !== null && typeof obj === 'object';
+ workSet.forEach(obj => {
+ if (isReference(obj)) {
+ found[obj.__ref] = true;
+ } else if (canTraverse(obj)) {
+ Object.values(obj)
+ // No need to add primitive values to the workSet, since they cannot
+ // contain reference objects.
+ .filter(canTraverse)
+ .forEach(workSet.add, workSet);
+ }
+ });
+ }
+ return this.refs[dataId];
+ }
+
+ // Used to compute cache keys specific to this.group.
+ public makeCacheKey(...args: any[]) {
+ return this.group.keyMaker.lookupArray(args);
+ }
+}
+
+// A single CacheGroup represents a set of one or more EntityStore objects,
+// typically the Root store in a CacheGroup by itself, and all active Layer
+// stores in a group together. A single EntityStore object belongs to only
+// one CacheGroup, store.group. The CacheGroup is responsible for tracking
+// dependencies, so store.group is helpful for generating unique keys for
+// cached results that need to be invalidated when/if those dependencies
+// change. If we used the EntityStore objects themselves as cache keys (that
+// is, store rather than store.group), the cache would become unnecessarily
+// fragmented by all the different Layer objects. Instead, the CacheGroup
+// approach allows all optimistic Layer objects in the same linked list to
+// belong to one CacheGroup, with the non-optimistic Root object belonging
+// to another CacheGroup, allowing resultCaching dependencies to be tracked
+// separately for optimistic and non-optimistic entity data.
+class CacheGroup {
+ private d: OptimisticDependencyFunction<string> | null = null;
+
+ constructor(public readonly caching: boolean) {
+ this.d = caching ? dep<string>() : null;
+ }
+
+ public depend(dataId: string, storeFieldName?: string) {
+ if (this.d) {
+ this.d(makeDepKey(dataId, storeFieldName));
+ }
+ }
+
+ public dirty(dataId: string, storeFieldName?: string) {
+ if (this.d) {
+ this.d.dirty(
+ typeof storeFieldName === "string"
+ ? makeDepKey(dataId, storeFieldName)
+ : makeDepKey(dataId),
+ );
+ }
+ }
+
+ // Used by the EntityStore#makeCacheKey method to compute cache keys
+ // specific to this CacheGroup.
+ public readonly keyMaker = new KeyTrie<object>(canUseWeakMap);
+}
+
+function makeDepKey(dataId: string, storeFieldName?: string) {
+ const parts = [dataId];
+ if (typeof storeFieldName === "string") {
+ parts.push(fieldNameFromStoreName(storeFieldName));
+ }
+ return JSON.stringify(parts);
+}
+
+export namespace EntityStore {
+ // Refer to this class as EntityStore.Root outside this namespace.
+ export class Root extends EntityStore {
+ // Although each Root instance gets its own unique CacheGroup object,
+ // any Layer instances created by calling addLayer need to share a
+ // single distinct CacheGroup object. Since this shared object must
+ // outlast the Layer instances themselves, it needs to be created and
+ // owned by the Root instance.
+ private sharedLayerGroup: CacheGroup = null;
+
+ constructor({
+ resultCaching = true,
+ seed,
+ }: {
+ resultCaching?: boolean;
+ seed?: NormalizedCacheObject;
+ }) {
+ super();
+ (this.group as any) = new CacheGroup(resultCaching);
+ this.sharedLayerGroup = new CacheGroup(resultCaching);
+ if (seed) this.replace(seed);
+ }
+
+ public addLayer(
+ layerId: string,
+ replay: (layer: EntityStore) => any,
+ ): EntityStore {
+ // The replay function will be called in the Layer constructor.
+ return new Layer(layerId, this, replay, this.sharedLayerGroup);
+ }
+
+ public removeLayer(layerId: string): Root {
+ // Never remove the root layer.
+ return this;
+ }
+ }
+}
+
+// Not exported, since all Layer instances are created by the addLayer method
+// of the EntityStore.Root class.
+class Layer extends EntityStore {
+ constructor(
+ public readonly id: string,
+ public readonly parent: Layer | EntityStore.Root,
+ public readonly replay: (layer: EntityStore) => any,
+ public readonly group: CacheGroup,
+ ) {
+ super();
+ replay(this);
+ }
+
+ public addLayer(
+ layerId: string,
+ replay: (layer: EntityStore) => any,
+ ): EntityStore {
+ return new Layer(layerId, this, replay, this.group);
+ }
+
+ public removeLayer(layerId: string): EntityStore {
+ // Remove all instances of the given id, not just the first one.
+ const parent = this.parent.removeLayer(layerId);
+
+ if (layerId === this.id) {
+ // Dirty every ID we're removing.
+ // TODO Some of these IDs could escape dirtying if value unchanged.
+ if (this.group.caching) {
+ Object.keys(this.data).forEach(dataId => this.delete(dataId));
+ }
+ return parent;
+ }
+
+ // No changes are necessary if the parent chain remains identical.
+ if (parent === this.parent) return this;
+
+ // Recreate this layer on top of the new parent.
+ return parent.addLayer(this.id, this.replay);
+ }
+
+ public toObject(): NormalizedCacheObject {
+ return {
+ ...this.parent.toObject(),
+ ...this.data,
+ };
+ }
+
+ public get(dataId: string): StoreObject {
+ if (hasOwn.call(this.data, dataId)) {
+ return super.get(dataId);
+ }
+
+ // If this layer has a this.depend function and it's not the one
+ // this.parent is using, we need to depend on the given dataId using
+ // this.depend before delegating to the parent. This check saves us
+ // from calling this.depend for every optimistic layer we examine, but
+ // ensures we call this.depend in the last optimistic layer before we
+ // reach the root layer.
+
+ if (this.group.caching && this.group !== this.parent.group) {
+ this.group.depend(dataId);
+ }
+
+ return this.parent.get(dataId);
+ }
+
+ public getFieldValue(dataId: string, storeFieldName: string): StoreValue {
+ if (hasOwn.call(this.data, dataId)) {
+ const storeObject = this.data[dataId];
+ if (storeObject && hasOwn.call(storeObject, storeFieldName)) {
+ return super.getFieldValue(dataId, storeFieldName);
+ }
+ }
+
+ if (this.group.caching && this.group !== this.parent.group) {
+ this.group.depend(dataId, storeFieldName);
+ }
+
+ return this.parent.getFieldValue(dataId, storeFieldName);
+ }
+
+ // Return a Set<string> of all the ID strings that have been retained by this
+ // Layer *and* any layers/roots beneath it.
+ public getRootIdSet(): Set<string> {
+ const ids = this.parent.getRootIdSet();
+ super.getRootIdSet().forEach(ids.add, ids);
+ return ids;
+ }
+
+ public findChildRefIds(dataId: string): Record<string, true> {
+ const fromParent = this.parent.findChildRefIds(dataId);
+ return hasOwn.call(this.data, dataId) ? {
+ ...fromParent,
+ ...super.findChildRefIds(dataId),
+ } : fromParent;
+ }
+}
+
+const storeObjectReconciler: ReconcilerFunction<[EntityStore]> = function (
+ existingObject,
+ incomingObject,
+ property,
+ // This parameter comes from the additional argument we pass to the
+ // merge method in context.mergeStoreObjects (see writeQueryToStore).
+ store,
+) {
+ // In the future, reconciliation logic may depend on the type of the parent
+ // StoreObject, not just the values of the given property.
+ const existing = existingObject[property];
+ const incoming = incomingObject[property];
+
+ if (
+ existing !== incoming &&
+ // The DeepMerger class has various helpful utilities that we might as
+ // well reuse here.
+ this.isObject(existing) &&
+ this.isObject(incoming)
+ ) {
+ const eType = getTypenameFromStoreObject(store, existing);
+ const iType = getTypenameFromStoreObject(store, incoming);
+ // If both objects have a typename and the typename is different, let the
+ // incoming object win. The typename can change when a different subtype
+ // of a union or interface is written to the store.
+ if (
+ typeof eType === 'string' &&
+ typeof iType === 'string' &&
+ eType !== iType
+ ) {
+ return incoming;
+ }
+
+ invariant(
+ !isReference(existing) || isReference(incoming),
+ `Store error: the application attempted to write an object with no provided id but the store already contains an id of ${existing.__ref} for this object.`,
+ );
+
+ // It's worth checking deep equality here (even though blindly
+ // returning incoming would be logically correct) because preserving
+ // the referential identity of existing data can prevent needless
+ // rereading and rerendering.
+ if (equal(existing, incoming)) {
+ return existing;
+ }
+ }
+
+ // In all other cases, incoming replaces existing without any effort to
+ // merge them deeply, since custom merge functions have already been
+ // applied to the incoming data by walkWithMergeOverrides.
+ return incoming;
+}
+
+export function supportsResultCaching(store: any): store is EntityStore {
+ // When result caching is disabled, store.depend will be null.
+ return !!(store instanceof EntityStore && store.group.caching);
+}
+
+export function defaultNormalizedCacheFactory(
+ seed?: NormalizedCacheObject,
+): NormalizedCache {
+ return new EntityStore.Root({ resultCaching: true, seed });
+}
diff --git a/packages/apollo-cache-inmemory/src/fixPolyfills.ts b/src/cache/inmemory/fixPolyfills.ts
similarity index 100%
rename from packages/apollo-cache-inmemory/src/fixPolyfills.ts
rename to src/cache/inmemory/fixPolyfills.ts
diff --git a/src/cache/inmemory/helpers.ts b/src/cache/inmemory/helpers.ts
new file mode 100644
--- /dev/null
+++ b/src/cache/inmemory/helpers.ts
@@ -0,0 +1,75 @@
+import { FieldNode } from 'graphql';
+import { NormalizedCache, StoreObject } from './types';
+import { Reference, isReference, StoreValue, isField } from '../../utilities/graphql/storeUtils';
+import { DeepMerger, ReconcilerFunction } from '../../utilities/common/mergeDeep';
+
+export function getTypenameFromStoreObject(
+ store: NormalizedCache,
+ objectOrReference: StoreObject | Reference,
+): string | undefined {
+ return isReference(objectOrReference)
+ ? store.getFieldValue(objectOrReference.__ref, "__typename") as string
+ : objectOrReference && objectOrReference.__typename;
+}
+
+const FieldNamePattern = /^[_A-Za-z0-9]+/;
+export function fieldNameFromStoreName(storeFieldName: string) {
+ const match = storeFieldName.match(FieldNamePattern);
+ return match && match[0];
+}
+
+// Invoking merge functions needs to happen after processSelectionSet has
+// finished, but requires information that is more readily available
+// during processSelectionSet, so processSelectionSet embeds special
+// objects of the following shape within its result tree, which then must
+// be removed by calling Policies#applyMerges.
+export interface FieldValueToBeMerged {
+ __field: FieldNode;
+ __typename: string;
+ __value: StoreValue;
+}
+
+export function isFieldValueToBeMerged(
+ value: any,
+): value is FieldValueToBeMerged {
+ const field = value && value.__field;
+ return field && isField(field);
+}
+
+export function makeProcessedFieldsMerger() {
+ // A DeepMerger that merges arrays and objects structurally, but otherwise
+ // prefers incoming scalar values over existing values. Provides special
+ // treatment for FieldValueToBeMerged objects. Used to accumulate fields
+ // when processing a single selection set.
+ return new DeepMerger(reconcileProcessedFields);
+}
+
+const reconcileProcessedFields: ReconcilerFunction<[]> = function (
+ existingObject,
+ incomingObject,
+ property,
+) {
+ const existing = existingObject[property];
+ const incoming = incomingObject[property];
+
+ if (isFieldValueToBeMerged(existing)) {
+ existing.__value = this.merge(
+ existing.__value,
+ isFieldValueToBeMerged(incoming)
+ // TODO Check compatibility of __field and __typename properties?
+ ? incoming.__value
+ : incoming,
+ );
+ return existing;
+ }
+
+ if (isFieldValueToBeMerged(incoming)) {
+ incoming.__value = this.merge(
+ existing,
+ incoming.__value,
+ );
+ return incoming;
+ }
+
+ return this.merge(existing, incoming);
+}
diff --git a/src/cache/inmemory/inMemoryCache.ts b/src/cache/inmemory/inMemoryCache.ts
new file mode 100644
--- /dev/null
+++ b/src/cache/inmemory/inMemoryCache.ts
@@ -0,0 +1,307 @@
+// Make builtins like Map and Set safe to use with non-extensible objects.
+import './fixPolyfills';
+
+import { DocumentNode } from 'graphql';
+import { wrap } from 'optimism';
+
+import { ApolloCache, Transaction } from '../core/cache';
+import { Cache } from '../core/types/Cache';
+import { addTypenameToDocument } from '../../utilities/graphql/transform';
+import {
+ ApolloReducerConfig,
+ NormalizedCacheObject,
+ StoreObject,
+} from './types';
+import { StoreReader } from './readFromStore';
+import { StoreWriter } from './writeToStore';
+import { EntityStore, supportsResultCaching } from './entityStore';
+import {
+ defaultDataIdFromObject,
+ PossibleTypesMap,
+ Policies,
+ TypePolicies,
+} from './policies';
+
+export interface InMemoryCacheConfig extends ApolloReducerConfig {
+ resultCaching?: boolean;
+ possibleTypes?: PossibleTypesMap;
+ typePolicies?: TypePolicies;
+}
+
+const defaultConfig: InMemoryCacheConfig = {
+ dataIdFromObject: defaultDataIdFromObject,
+ addTypename: true,
+ resultCaching: true,
+ typePolicies: {},
+};
+
+export class InMemoryCache extends ApolloCache<NormalizedCacheObject> {
+ private data: EntityStore;
+ private optimisticData: EntityStore;
+
+ protected config: InMemoryCacheConfig;
+ private watches = new Set<Cache.WatchOptions>();
+ private addTypename: boolean;
+ private policies: Policies;
+
+ private typenameDocumentCache = new Map<DocumentNode, DocumentNode>();
+ private storeReader: StoreReader;
+ private storeWriter: StoreWriter;
+
+ // Set this while in a transaction to prevent broadcasts...
+ // don't forget to turn it back on!
+ private silenceBroadcast: boolean = false;
+
+ constructor(config: InMemoryCacheConfig = {}) {
+ super();
+ this.config = { ...defaultConfig, ...config };
+ this.addTypename = !!this.config.addTypename;
+
+ this.policies = new Policies({
+ dataIdFromObject: this.config.dataIdFromObject,
+ possibleTypes: this.config.possibleTypes,
+ typePolicies: this.config.typePolicies,
+ });
+
+ // Passing { resultCaching: false } in the InMemoryCache constructor options
+ // will completely disable dependency tracking, which will improve memory
+ // usage but worsen the performance of repeated reads.
+ this.data = new EntityStore.Root({
+ resultCaching: this.config.resultCaching,
+ });
+
+ // When no optimistic writes are currently active, cache.optimisticData ===
+ // cache.data, so there are no additional layers on top of the actual data.
+ // When an optimistic update happens, this.optimisticData will become a
+ // linked list of OptimisticCacheLayer objects that terminates with the
+ // original this.data cache object.
+ this.optimisticData = this.data;
+
+ this.storeWriter = new StoreWriter({
+ policies: this.policies,
+ });
+
+ this.storeReader = new StoreReader({
+ addTypename: this.addTypename,
+ policies: this.policies,
+ });
+
+ const cache = this;
+ const { maybeBroadcastWatch } = cache;
+ this.maybeBroadcastWatch = wrap((c: Cache.WatchOptions) => {
+ return maybeBroadcastWatch.call(this, c);
+ }, {
+ makeCacheKey(c: Cache.WatchOptions) {
+ // Return a cache key (thus enabling result caching) only if we're
+ // currently using a data store that can track cache dependencies.
+ const store = c.optimistic ? cache.optimisticData : cache.data;
+ if (supportsResultCaching(store)) {
+ const { optimistic, rootId, variables } = c;
+ return store.makeCacheKey(
+ c.query,
+ // Different watches can have the same query, optimistic
+ // status, rootId, and variables, but if their callbacks are
+ // different, the (identical) result needs to be delivered to
+ // each distinct callback. The easiest way to achieve that
+ // separation is to include c.callback in the cache key for
+ // maybeBroadcastWatch calls. See issue #5733.
+ c.callback,
+ JSON.stringify({ optimistic, rootId, variables }),
+ );
+ }
+ }
+ });
+ }
+
+ public restore(data: NormalizedCacheObject): this {
+ if (data) this.data.replace(data);
+ return this;
+ }
+
+ public extract(optimistic: boolean = false): NormalizedCacheObject {
+ return (optimistic ? this.optimisticData : this.data).toObject();
+ }
+
+ public read<T>(options: Cache.ReadOptions): T | null {
+ if (typeof options.rootId === 'string' &&
+ !this.data.has(options.rootId)) {
+ return null;
+ }
+
+ return this.storeReader.readQueryFromStore({
+ store: options.optimistic ? this.optimisticData : this.data,
+ query: options.query,
+ variables: options.variables,
+ rootId: options.rootId,
+ config: this.config,
+ }) || null;
+ }
+
+ public write(options: Cache.WriteOptions): void {
+ this.storeWriter.writeQueryToStore({
+ store: this.data,
+ query: options.query,
+ result: options.result,
+ dataId: options.dataId,
+ variables: options.variables,
+ });
+
+ this.broadcastWatches();
+ }
+
+ public diff<T>(options: Cache.DiffOptions): Cache.DiffResult<T> {
+ return this.storeReader.diffQueryAgainstStore({
+ store: options.optimistic ? this.optimisticData : this.data,
+ query: options.query,
+ variables: options.variables,
+ returnPartialData: options.returnPartialData,
+ config: this.config,
+ });
+ }
+
+ public watch(watch: Cache.WatchOptions): () => void {
+ this.watches.add(watch);
+
+ return () => {
+ this.watches.delete(watch);
+ };
+ }
+
+ // Request garbage collection of unreachable normalized entities.
+ public gc() {
+ return this.optimisticData.gc();
+ }
+
+ // Call this method to ensure the given root ID remains in the cache after
+ // garbage collection, along with its transitive child entities. Note that
+ // the cache automatically retains all directly written entities. By default,
+ // the retainment persists after optimistic updates are removed. Pass true
+ // for the optimistic argument if you would prefer for the retainment to be
+ // discarded when the top-most optimistic layer is removed. Returns the
+ // resulting (non-negative) retainment count.
+ public retain(rootId: string, optimistic?: boolean): number {
+ return (optimistic ? this.optimisticData : this.data).retain(rootId);
+ }
+
+ // Call this method to undo the effect of the retain method, above. Once the
+ // retainment count falls to zero, the given ID will no longer be preserved
+ // during garbage collection, though it may still be preserved by other safe
+ // entities that refer to it. Returns the resulting (non-negative) retainment
+ // count, in case that's useful.
+ public release(rootId: string, optimistic?: boolean): number {
+ return (optimistic ? this.optimisticData : this.data).release(rootId);
+ }
+
+ // Returns the canonical ID for a given StoreObject, obeying typePolicies
+ // and keyFields (and dataIdFromObject, if you still use that). At minimum,
+ // the object must contain a __typename and any primary key fields required
+ // to identify entities of that type. If you pass a query result object, be
+ // sure that none of the primary key fields have been renamed by aliasing.
+ public identify(object: StoreObject): string | null {
+ return this.policies.identify(object);
+ }
+
+ public evict(dataId: string, fieldName?: string): boolean {
+ if (this.optimisticData.has(dataId)) {
+ // Note that this deletion does not trigger a garbage collection, which
+ // is convenient in cases where you want to evict multiple entities before
+ // performing a single garbage collection.
+ this.optimisticData.delete(dataId, fieldName);
+ this.broadcastWatches();
+ return !this.optimisticData.has(dataId);
+ }
+ return false;
+ }
+
+ public reset(): Promise<void> {
+ this.data.clear();
+ this.optimisticData = this.data;
+ this.broadcastWatches();
+ return Promise.resolve();
+ }
+
+ public removeOptimistic(idToRemove: string) {
+ const newOptimisticData = this.optimisticData.removeLayer(idToRemove);
+ if (newOptimisticData !== this.optimisticData) {
+ this.optimisticData = newOptimisticData;
+ this.broadcastWatches();
+ }
+ }
+
+ public performTransaction(
+ transaction: (proxy: InMemoryCache) => any,
+ // This parameter is not part of the performTransaction signature inherited
+ // from the ApolloCache abstract class, but it's useful because it saves us
+ // from duplicating this implementation in recordOptimisticTransaction.
+ optimisticId?: string,
+ ) {
+ const perform = (layer?: EntityStore) => {
+ const proxy: InMemoryCache = Object.create(this);
+ proxy.silenceBroadcast = true;
+ if (layer) {
+ // The proxy object is just like this except that silenceBroadcast
+ // is set to true, and proxy.data and proxy.optimisticData both
+ // point to the same layer.
+ proxy.data = proxy.optimisticData = layer;
+ }
+ // Because the proxy object can simply be forgotten, we do not need
+ // to wrap this call with a try-finally block.
+ return transaction(proxy);
+ };
+
+ if (typeof optimisticId === 'string') {
+ // Note that there can be multiple layers with the same optimisticId.
+ // When removeOptimistic(id) is called for that id, all matching layers
+ // will be removed, and the remaining layers will be reapplied.
+ this.optimisticData = this.optimisticData.addLayer(optimisticId, perform);
+ } else {
+ // If we don't have an optimisticId, perform the transaction anyway. Note
+ // that this.optimisticData.addLayer calls perform, too.
+ perform();
+ }
+
+ // This broadcast does nothing if this.silenceBroadcast is true.
+ this.broadcastWatches();
+ }
+
+ public recordOptimisticTransaction(
+ transaction: Transaction<NormalizedCacheObject>,
+ id: string,
+ ) {
+ return this.performTransaction(transaction, id);
+ }
+
+ public transformDocument(document: DocumentNode): DocumentNode {
+ if (this.addTypename) {
+ let result = this.typenameDocumentCache.get(document);
+ if (!result) {
+ result = addTypenameToDocument(document);
+ this.typenameDocumentCache.set(document, result);
+ // If someone calls transformDocument and then mistakenly passes the
+ // result back into an API that also calls transformDocument, make sure
+ // we don't keep creating new query documents.
+ this.typenameDocumentCache.set(result, result);
+ }
+ return result;
+ }
+ return document;
+ }
+
+ protected broadcastWatches() {
+ if (!this.silenceBroadcast) {
+ this.watches.forEach(c => this.maybeBroadcastWatch(c));
+ }
+ }
+
+ // This method is wrapped in the constructor so that it will be called only
+ // if the data that would be broadcast has changed.
+ private maybeBroadcastWatch(c: Cache.WatchOptions) {
+ c.callback(
+ this.diff({
+ query: c.query,
+ variables: c.variables,
+ optimistic: c.optimistic,
+ }),
+ );
+ }
+}
diff --git a/src/cache/inmemory/policies.ts b/src/cache/inmemory/policies.ts
new file mode 100644
--- /dev/null
+++ b/src/cache/inmemory/policies.ts
@@ -0,0 +1,811 @@
+import {
+ InlineFragmentNode,
+ FragmentDefinitionNode,
+ SelectionSetNode,
+ FieldNode,
+} from "graphql";
+
+import { dep, KeyTrie } from 'optimism';
+import invariant from 'ts-invariant';
+
+import {
+ FragmentMap,
+ getFragmentFromSelection,
+} from '../../utilities/graphql/fragments';
+
+import {
+ isField,
+ getTypenameFromResult,
+ valueToObjectRepresentation,
+ storeKeyNameFromField,
+ StoreValue,
+ argumentsObjectFromField,
+ Reference,
+ makeReference,
+ isReference,
+} from '../../utilities/graphql/storeUtils';
+
+import { maybeDeepFreeze } from '../../utilities/common/maybeDeepFreeze';
+import { canUseWeakMap } from '../../utilities/common/canUse';
+
+import {
+ IdGetter,
+ StoreObject,
+ NormalizedCache,
+} from "./types";
+
+import {
+ fieldNameFromStoreName,
+ FieldValueToBeMerged,
+ isFieldValueToBeMerged,
+} from './helpers';
+
+const hasOwn = Object.prototype.hasOwnProperty;
+
+export type TypePolicies = {
+ [__typename: string]: TypePolicy;
+}
+
+// TypeScript 3.7 will allow recursive type aliases, so this should work:
+// type KeySpecifier = (string | KeySpecifier)[]
+type KeySpecifier = (string | any[])[];
+
+type KeyFieldsFunction = (
+ object: Readonly<StoreObject>,
+ context: {
+ typename: string;
+ selectionSet?: SelectionSetNode;
+ fragmentMap?: FragmentMap;
+ policies: Policies;
+ },
+) => ReturnType<IdGetter>;
+
+type TypePolicy = {
+ // Allows defining the primary key fields for this type, either using an
+ // array of field names or a function that returns an arbitrary string.
+ keyFields?: KeySpecifier | KeyFieldsFunction | false;
+
+ // In the rare event that your schema happens to use a different
+ // __typename for the root Query, Mutation, and/or Schema types, you can
+ // express your deviant preferences by enabling one of these options.
+ queryType?: true,
+ mutationType?: true,
+ subscriptionType?: true,
+
+ fields?: {
+ [fieldName: string]:
+ | FieldPolicy<StoreValue>
+ | FieldReadFunction<StoreValue>;
+ }
+};
+
+type KeyArgsFunction = (
+ field: FieldNode,
+ context: {
+ typename: string;
+ variables: Record<string, any>;
+ policies: Policies;
+ },
+) => ReturnType<IdGetter>;
+
+export type FieldPolicy<TValue> = {
+ keyArgs?: KeySpecifier | KeyArgsFunction | false;
+ read?: FieldReadFunction<TValue>;
+ merge?: FieldMergeFunction<TValue>;
+};
+
+export type FieldValueGetter =
+ ReturnType<Policies["makeFieldValueGetter"]>;
+
+type StorageType = Record<string, any>;
+
+interface FieldFunctionOptions {
+ args: Record<string, any> | null;
+
+ // The name of the field, equal to options.field.name.value when
+ // options.field is available. Useful if you reuse the same function for
+ // multiple fields, and you need to know which field you're currently
+ // processing. Always a string, even when options.field is null.
+ fieldName: string;
+
+ // The FieldNode object used to read this field. Useful if you need to
+ // know about other attributes of the field, such as its directives. This
+ // option will be null when a string was passed to options.readField.
+ field: FieldNode | null;
+
+ variables?: Record<string, any>;
+
+ // In rare advanced use cases, a read or merge function may wish to
+ // consult the current Policies object, for example to call
+ // getStoreFieldName manually.
+ policies: Policies;
+
+ // Utilities for dealing with { __ref } objects.
+ isReference: typeof isReference;
+ toReference: Policies["toReference"];
+
+ // Helper function for reading other fields within the current object.
+ // If a foreign object or reference is provided, the field will be read
+ // from that object instead of the current object, so this function can
+ // be used (together with isReference) to examine the cache outside the
+ // current object. If a FieldNode is passed instead of a string, and
+ // that FieldNode has arguments, the same options.variables will be used
+ // to compute the argument values. Note that this function will invoke
+ // custom read functions for other fields, if defined. Always returns
+ // immutable data (enforced with Object.freeze in development).
+ readField<T = StoreValue>(
+ nameOrField: string | FieldNode,
+ foreignObjOrRef?: StoreObject | Reference,
+ ): Readonly<T>;
+
+ // A handy place to put field-specific data that you want to survive
+ // across multiple read function calls. Useful for field-level caching,
+ // if your read function does any expensive work.
+ storage: StorageType;
+
+ // Call this function to invalidate any cached queries that previously
+ // consumed this field. If you use options.storage to cache the result
+ // of an expensive read function, updating options.storage and then
+ // calling options.invalidate() can be a good way to deliver the new
+ // result asynchronously.
+ invalidate(): void;
+}
+
+type FieldReadFunction<TExisting, TResult = TExisting> = (
+ // When reading a field, one often needs to know about any existing
+ // value stored for that field. If the field is read before any value
+ // has been written to the cache, this existing parameter will be
+ // undefined, which makes it easy to use a default parameter expression
+ // to supply the initial value. This parameter is positional (rather
+ // than one of the named options) because that makes it possible for the
+ // developer to annotate it with a type, without also having to provide
+ // a whole new type for the options object.
+ existing: Readonly<TExisting> | undefined,
+ options: FieldFunctionOptions,
+) => TResult;
+
+type FieldMergeFunction<TExisting> = (
+ existing: Readonly<TExisting> | undefined,
+ // The incoming parameter needs to be positional as well, for the same
+ // reasons discussed in FieldReadFunction above.
+ incoming: Readonly<StoreValue>,
+ options: FieldFunctionOptions,
+) => TExisting;
+
+export function defaultDataIdFromObject(object: StoreObject) {
+ const { __typename, id, _id } = object;
+ if (typeof __typename === "string") {
+ if (typeof id !== "undefined") return `${__typename}:${id}`;
+ if (typeof _id !== "undefined") return `${__typename}:${_id}`;
+ }
+ return null;
+}
+
+const nullKeyFieldsFn: KeyFieldsFunction = () => null;
+const simpleKeyArgsFn: KeyArgsFunction = field => field.name.value;
+
+export type PossibleTypesMap = {
+ [supertype: string]: string[];
+};
+
+export class Policies {
+ private typePolicies: {
+ [__typename: string]: {
+ keyFn?: KeyFieldsFunction;
+ subtypes?: Set<string>;
+ fields?: {
+ [fieldName: string]: {
+ keyFn?: KeyArgsFunction;
+ read?: FieldReadFunction<StoreValue>;
+ merge?: FieldMergeFunction<StoreValue>;
+ };
+ };
+ };
+ } = Object.create(null);
+
+ public readonly rootTypenamesById: Readonly<Record<string, string>> = {
+ __proto__: null, // Equivalent to Object.create(null)
+ ROOT_QUERY: "Query",
+ ROOT_MUTATION: "Mutation",
+ ROOT_SUBSCRIPTION: "Subscription",
+ };
+
+ public readonly usingPossibleTypes = false;
+
+ constructor(private config: {
+ dataIdFromObject?: KeyFieldsFunction;
+ possibleTypes?: PossibleTypesMap;
+ typePolicies?: TypePolicies;
+ } = {}) {
+ this.config = {
+ dataIdFromObject: defaultDataIdFromObject,
+ ...config,
+ };
+
+ if (config.possibleTypes) {
+ this.addPossibleTypes(config.possibleTypes);
+ }
+
+ if (config.typePolicies) {
+ this.addTypePolicies(config.typePolicies);
+ }
+ }
+
+ // Bound function that returns a Reference using this.identify.
+ // Provided to read/merge functions as part of their options.
+ public toReference = (
+ object: StoreObject,
+ selectionSet?: SelectionSetNode,
+ fragmentMap?: FragmentMap,
+ ) => {
+ const id = this.identify(object, selectionSet, fragmentMap);
+ return id && makeReference(id);
+ }
+
+ public identify(
+ object: StoreObject,
+ selectionSet?: SelectionSetNode,
+ fragmentMap?: FragmentMap,
+ ): string | null {
+ // TODO Consider subtypes?
+ // TODO Use an AliasMap here?
+ const typename = selectionSet && fragmentMap
+ ? getTypenameFromResult(object, selectionSet, fragmentMap)
+ : object.__typename;
+
+ const context = {
+ typename,
+ selectionSet,
+ fragmentMap,
+ policies: this,
+ };
+
+ let id: string | null;
+
+ const policy = this.getTypePolicy(typename, false);
+ const keyFn = policy && policy.keyFn;
+ if (keyFn) {
+ id = keyFn(object, context);
+ } else {
+ id = this.config.dataIdFromObject
+ ? this.config.dataIdFromObject(object, context)
+ : null;
+ }
+
+ return id && String(id);
+ }
+
+ public addTypePolicies(typePolicies: TypePolicies) {
+ Object.keys(typePolicies).forEach(typename => {
+ const existing = this.getTypePolicy(typename, true);
+ const incoming = typePolicies[typename];
+ const { keyFields, fields } = incoming;
+
+ if (incoming.queryType) this.setRootTypename("Query", typename);
+ if (incoming.mutationType) this.setRootTypename("Mutation", typename);
+ if (incoming.subscriptionType) this.setRootTypename("Subscription", typename);
+
+ existing.keyFn =
+ // Pass false to disable normalization for this typename.
+ keyFields === false ? nullKeyFieldsFn :
+ // Pass an array of strings to use those fields to compute a
+ // composite ID for objects of this typename.
+ Array.isArray(keyFields) ? keyFieldsFnFromSpecifier(keyFields) :
+ // Pass a function to take full control over identification.
+ typeof keyFields === "function" ? keyFields : void 0;
+
+ if (fields) {
+ Object.keys(fields).forEach(fieldName => {
+ const existing = this.getFieldPolicy(typename, fieldName, true);
+ const incoming = fields[fieldName];
+
+ if (typeof incoming === "function") {
+ existing.read = incoming;
+ } else {
+ const { keyArgs, read, merge } = incoming;
+
+ existing.keyFn =
+ // Pass false to disable argument-based differentiation of
+ // field identities.
+ keyArgs === false ? simpleKeyArgsFn :
+ // Pass an array of strings to use named arguments to
+ // compute a composite identity for the field.
+ Array.isArray(keyArgs) ? keyArgsFnFromSpecifier(keyArgs) :
+ // Pass a function to take full control over field identity.
+ typeof keyArgs === "function" ? keyArgs :
+ // Leave existing.keyFn unchanged if all above cases fail.
+ existing.keyFn;
+
+ if (typeof read === "function") existing.read = read;
+ if (typeof merge === "function") existing.merge = merge;
+ }
+
+ if (existing.read || existing.merge) {
+ // If we have a read or merge function, assume keyArgs:false
+ // unless existing.keyFn has already been explicitly set.
+ existing.keyFn = existing.keyFn || simpleKeyArgsFn;
+ }
+ });
+ }
+ });
+ }
+
+ private setRootTypename(
+ which: "Query" | "Mutation" | "Subscription",
+ typename: string,
+ ) {
+ const rootId = "ROOT_" + which.toUpperCase();
+ const old = this.rootTypenamesById[rootId];
+ if (typename !== old) {
+ invariant(old === which, `Cannot change root ${which} __typename more than once`);
+ (this.rootTypenamesById as any)[rootId] = typename;
+ }
+ }
+
+ public addPossibleTypes(possibleTypes: PossibleTypesMap) {
+ (this.usingPossibleTypes as boolean) = true;
+ Object.keys(possibleTypes).forEach(supertype => {
+ const subtypeSet = this.getSubtypeSet(supertype, true);
+ possibleTypes[supertype].forEach(subtypeSet.add, subtypeSet);
+ });
+ }
+
+ private getTypePolicy(
+ typename: string,
+ createIfMissing: boolean,
+ ): Policies["typePolicies"][string] {
+ const { typePolicies } = this;
+ return typePolicies[typename] || (
+ createIfMissing && (typePolicies[typename] = Object.create(null)));
+ }
+
+ private getSubtypeSet(
+ supertype: string,
+ createIfMissing: boolean,
+ ): Set<string> {
+ const policy = this.getTypePolicy(supertype, createIfMissing);
+ if (policy) {
+ return policy.subtypes || (
+ createIfMissing && (policy.subtypes = new Set<string>()));
+ }
+ }
+
+ private getFieldPolicy(
+ typename: string,
+ fieldName: string,
+ createIfMissing: boolean,
+ ): Policies["typePolicies"][string]["fields"][string] {
+ const typePolicy = this.getTypePolicy(typename, createIfMissing);
+ if (typePolicy) {
+ const fieldPolicies = typePolicy.fields || (
+ createIfMissing && (typePolicy.fields = Object.create(null)));
+ if (fieldPolicies) {
+ return fieldPolicies[fieldName] || (
+ createIfMissing && (fieldPolicies[fieldName] = Object.create(null)));
+ }
+ }
+ }
+
+ public fragmentMatches(
+ fragment: InlineFragmentNode | FragmentDefinitionNode,
+ typename: string,
+ ): boolean {
+ if (!fragment.typeCondition) return true;
+
+ // If the fragment has a type condition but the object we're matching
+ // against does not have a __typename, the fragment cannot match.
+ if (!typename) return false;
+
+ const supertype = fragment.typeCondition.name.value;
+ if (typename === supertype) return true;
+
+ if (this.usingPossibleTypes) {
+ const workQueue = [this.getSubtypeSet(supertype, false)];
+ // It's important to keep evaluating workQueue.length each time through
+ // the loop, because the queue can grow while we're iterating over it.
+ for (let i = 0; i < workQueue.length; ++i) {
+ const subtypes = workQueue[i];
+ if (subtypes) {
+ if (subtypes.has(typename)) return true;
+ subtypes.forEach(subtype => {
+ const subsubtypes = this.getSubtypeSet(subtype, false);
+ if (subsubtypes && workQueue.indexOf(subsubtypes) < 0) {
+ workQueue.push(subsubtypes);
+ }
+ });
+ }
+ }
+ }
+
+ return false;
+ }
+
+ public makeFieldValueGetter(store: NormalizedCache) {
+ const policies = this;
+
+ // Provides a uniform interface for reading field values, whether or not
+ // objectOrReference is a normalized entity.
+ return function getFieldValue<T = StoreValue>(
+ objectOrReference: StoreObject | Reference,
+ storeFieldName: string,
+ ): Readonly<T> {
+ let fieldValue: StoreValue;
+ if (isReference(objectOrReference)) {
+ const dataId = objectOrReference.__ref;
+ fieldValue = store.getFieldValue(dataId, storeFieldName);
+ if (fieldValue === void 0 && storeFieldName === "__typename") {
+ // We can infer the __typename of singleton root objects like
+ // ROOT_QUERY ("Query") and ROOT_MUTATION ("Mutation"), even if
+ // we have never written that information into the cache.
+ return policies.rootTypenamesById[dataId] as any;
+ }
+ } else {
+ fieldValue = objectOrReference && objectOrReference[storeFieldName];
+ }
+ if (process.env.NODE_ENV !== "production") {
+ // Enforce Readonly<T> at runtime, in development.
+ maybeDeepFreeze(fieldValue);
+ }
+ return fieldValue as T;
+ };
+ }
+
+ public getStoreFieldName(
+ typename: string | undefined,
+ field: FieldNode,
+ variables: Record<string, any>,
+ ): string {
+ const fieldName = field.name.value;
+ let storeFieldName: string | undefined;
+
+ if (typeof typename === "string") {
+ const policy = this.getFieldPolicy(typename, fieldName, false);
+ const keyFn = policy && policy.keyFn;
+ if (keyFn) {
+ // If the custom keyFn returns a falsy value, fall back to
+ // fieldName instead.
+ storeFieldName = keyFn(field, {
+ typename,
+ variables,
+ policies: this,
+ }) || fieldName;
+ }
+ }
+
+ if (storeFieldName === void 0) {
+ storeFieldName = storeKeyNameFromField(field, variables);
+ }
+
+ // Make sure custom field names start with the actual field.name.value
+ // of the field, so we can always figure out which properties of a
+ // StoreObject correspond to which original field names.
+ return fieldName === fieldNameFromStoreName(storeFieldName)
+ ? storeFieldName
+ : fieldName + ":" + storeFieldName;
+ }
+
+ private storageTrie = new KeyTrie<StorageType>(true);
+ private fieldDep = dep<StorageType>();
+
+ public readField<V = StoreValue>(
+ objectOrReference: StoreObject | Reference,
+ nameOrField: string | FieldNode,
+ getFieldValue: FieldValueGetter,
+ variables?: Record<string, any>,
+ typename = getFieldValue<string>(objectOrReference, "__typename"),
+ ): Readonly<V> {
+ invariant(
+ objectOrReference,
+ "Must provide an object or Reference when calling Policies#readField",
+ );
+
+ const policies = this;
+ const storeFieldName = typeof nameOrField === "string" ? nameOrField
+ : policies.getStoreFieldName(typename, nameOrField, variables);
+ const fieldName = fieldNameFromStoreName(storeFieldName);
+ const existing = getFieldValue<V>(objectOrReference, storeFieldName);
+ const policy = policies.getFieldPolicy(typename, fieldName, false);
+ const read = policy && policy.read;
+
+ if (read) {
+ const storage = policies.storageTrie.lookup(
+ isReference(objectOrReference)
+ ? objectOrReference.__ref
+ : objectOrReference,
+ storeFieldName,
+ );
+
+ // By depending on the options.storage object when we call
+ // policy.read, we can easily invalidate the result of the read
+ // function when/if the options.invalidate function is called.
+ policies.fieldDep(storage);
+
+ return read(existing, {
+ args: typeof nameOrField === "string" ? null :
+ argumentsObjectFromField(nameOrField, variables),
+ field: typeof nameOrField === "string" ? null : nameOrField,
+ fieldName,
+ variables,
+ policies,
+ isReference,
+ toReference: policies.toReference,
+ storage,
+ // I'm not sure why it's necessary to repeat the parameter types
+ // here, but TypeScript complains if I leave them out.
+ readField<T>(
+ nameOrField: string | FieldNode,
+ foreignObjOrRef?: Reference,
+ ) {
+ return policies.readField<T>(
+ foreignObjOrRef || objectOrReference,
+ nameOrField,
+ getFieldValue,
+ variables,
+ );
+ },
+ invalidate() {
+ policies.fieldDep.dirty(storage);
+ },
+ }) as Readonly<V>;
+ }
+
+ return existing;
+ }
+
+ public hasMergeFunction(
+ typename: string,
+ fieldName: string,
+ ) {
+ const policy = this.getFieldPolicy(typename, fieldName, false);
+ return !!(policy && policy.merge);
+ }
+
+ public applyMerges<T extends StoreValue>(
+ existing: T | Reference,
+ incoming: T | FieldValueToBeMerged,
+ getFieldValue: FieldValueGetter,
+ variables: Record<string, any>,
+ storageKeys?: [string | StoreObject, string],
+ ): T {
+ const policies = this;
+
+ if (isFieldValueToBeMerged(incoming)) {
+ const field = incoming.__field;
+ const fieldName = field.name.value;
+ const policy = policies.getFieldPolicy(
+ incoming.__typename, fieldName, false);
+
+ // The incoming data can have multiple layers of nested objects, so we
+ // need to handle child merges before handling parent merges. This
+ // post-order traversal also ensures that the incoming data passed to
+ // parent merge functions never contains any FieldValueToBeMerged
+ // objects for fields within child objects.
+ const applied = policies.applyMerges(
+ existing,
+ incoming.__value as T,
+ getFieldValue,
+ variables,
+ storageKeys,
+ );
+
+ const merge = policy && policy.merge;
+ if (merge) {
+ if (process.env.NODE_ENV !== "production") {
+ // It may be tempting to modify existing data directly, for example
+ // by pushing more elements onto an existing array, but merge
+ // functions are expected to be pure, so it's important that we
+ // enforce immutability in development.
+ maybeDeepFreeze(existing);
+ }
+
+ // If storage ends up null, that just means no options.storage object
+ // has ever been created for a read function for this field before, so
+ // there's nothing this merge function could do with options.storage
+ // that would help the read function do its work. Most merge functions
+ // will never need to worry about options.storage, but if you're
+ // reading this comment then you probably have good reasons for
+ // wanting to know esoteric details like these, you wizard, you.
+ const storage = storageKeys
+ ? policies.storageTrie.lookupArray(storageKeys)
+ : null;
+
+ return merge(existing, applied, {
+ args: argumentsObjectFromField(field, variables),
+ field,
+ fieldName,
+ variables,
+ policies,
+ isReference,
+ toReference: policies.toReference,
+ readField<T>(
+ nameOrField: string | FieldNode,
+ foreignObjOrRef: StoreObject | Reference,
+ ) {
+ // Unlike options.readField for read functions, we do not fall
+ // back to the current object if no foreignObjOrRef is provided,
+ // because it's not clear what the current object should be for
+ // merge functions: the (possibly undefined) existing object, or
+ // the incoming object? If you think your merge function needs
+ // to read sibling fields in order to produce a new value for
+ // the current field, you might want to rethink your strategy,
+ // because that's a recipe for making merge behavior sensitive
+ // to the order in which fields are written into the cache.
+ // However, readField(name, ref) is useful for merge functions
+ // that need to deduplicate child objects and references.
+ return policies.readField<T>(
+ foreignObjOrRef,
+ nameOrField,
+ getFieldValue,
+ variables,
+ );
+ },
+ storage,
+ invalidate() {
+ if (storage) {
+ policies.fieldDep.dirty(storage);
+ }
+ },
+ }) as T;
+ }
+
+ return applied;
+ }
+
+ if (incoming && typeof incoming === "object") {
+ const e = existing as StoreObject | Reference;
+ const i = incoming as object as StoreObject;
+
+ // If the existing object is a { __ref } object, e.__ref provides a
+ // stable key for looking up the storage object associated with
+ // e.__ref and storeFieldName. Otherwise, storage is enabled only if
+ // existing is actually a non-null object. It's less common for a
+ // merge function to use options.storage, but it's conceivable that a
+ // pair of read and merge functions might want to cooperate in
+ // managing their shared options.storage object.
+ const firstStorageKey = isReference(e)
+ ? e.__ref
+ : typeof e === "object" && e;
+
+ Object.keys(i).forEach(storeFieldName => {
+ i[storeFieldName] = policies.applyMerges(
+ getFieldValue(e, storeFieldName),
+ i[storeFieldName],
+ getFieldValue,
+ variables,
+ // Avoid enabling storage when firstStorageKey is falsy, which
+ // implies no options.storage object has ever been created for a
+ // read function for this field.
+ firstStorageKey && [firstStorageKey, storeFieldName],
+ );
+ });
+ }
+
+ return incoming;
+ }
+}
+
+function keyArgsFnFromSpecifier(
+ specifier: KeySpecifier,
+): KeyArgsFunction {
+ const topLevelArgNames: Record<string, true> = Object.create(null);
+
+ specifier.forEach(name => {
+ if (typeof name === "string") {
+ topLevelArgNames[name] = true;
+ }
+ });
+
+ return (field, context) => {
+ const fieldName = field.name.value;
+
+ if (field.arguments && field.arguments.length > 0) {
+ const args = Object.create(null);
+
+ field.arguments.forEach(arg => {
+ // Avoid converting arguments that were not mentioned in the specifier.
+ if (topLevelArgNames[arg.name.value] === true) {
+ valueToObjectRepresentation(args, arg.name, arg.value, context.variables);
+ }
+ });
+
+ return `${fieldName}:${
+ JSON.stringify(computeKeyObject(args, specifier))
+ }`;
+ }
+
+ return fieldName;
+ };
+}
+
+function keyFieldsFnFromSpecifier(
+ specifier: KeySpecifier,
+): KeyFieldsFunction {
+ const trie = new KeyTrie<{
+ aliasMap?: AliasMap;
+ }>(canUseWeakMap);
+
+ return (object, context) => {
+ let aliasMap: AliasMap;
+ if (context.selectionSet && context.fragmentMap) {
+ const info = trie.lookupArray([
+ context.selectionSet,
+ context.fragmentMap,
+ ]);
+ aliasMap = info.aliasMap || (
+ info.aliasMap = makeAliasMap(context.selectionSet, context.fragmentMap)
+ );
+ }
+ return `${context.typename}:${
+ JSON.stringify(computeKeyObject(object, specifier, aliasMap))
+ }`;
+ };
+}
+
+type AliasMap = {
+ // Map from store key to corresponding response key. Undefined when there are
+ // no aliased fields in this selection set.
+ aliases?: Record<string, string>;
+ // Map from store key to AliasMap correponding to a child selection set.
+ // Undefined when there are no child selection sets.
+ subsets?: Record<string, AliasMap>;
+};
+
+function makeAliasMap(
+ selectionSet: SelectionSetNode,
+ fragmentMap: FragmentMap,
+): AliasMap {
+ let map: AliasMap = Object.create(null);
+ // TODO Cache this work, perhaps by storing selectionSet._aliasMap?
+ const workQueue = new Set([selectionSet]);
+ workQueue.forEach(selectionSet => {
+ selectionSet.selections.forEach(selection => {
+ if (isField(selection)) {
+ if (selection.alias) {
+ const responseKey = selection.alias.value;
+ const storeKey = selection.name.value;
+ if (storeKey !== responseKey) {
+ const aliases = map.aliases || (map.aliases = Object.create(null));
+ aliases[storeKey] = responseKey;
+ }
+ }
+ if (selection.selectionSet) {
+ const subsets = map.subsets || (map.subsets = Object.create(null));
+ subsets[selection.name.value] =
+ makeAliasMap(selection.selectionSet, fragmentMap);
+ }
+ } else {
+ const fragment = getFragmentFromSelection(selection, fragmentMap);
+ workQueue.add(fragment.selectionSet);
+ }
+ });
+ });
+ return map;
+}
+
+function computeKeyObject(
+ response: Record<string, any>,
+ specifier: KeySpecifier,
+ aliasMap?: AliasMap,
+): Record<string, any> {
+ const keyObj = Object.create(null);
+ let prevKey: string | undefined;
+ specifier.forEach(s => {
+ if (Array.isArray(s)) {
+ if (typeof prevKey === "string") {
+ const subsets = aliasMap && aliasMap.subsets;
+ const subset = subsets && subsets[prevKey];
+ keyObj[prevKey] = computeKeyObject(response[prevKey], s, subset);
+ }
+ } else {
+ const aliases = aliasMap && aliasMap.aliases;
+ const responseName = aliases && aliases[s] || s;
+ invariant(
+ hasOwn.call(response, responseName),
+ // TODO Make this appropriate for keyArgs as well
+ `Missing field ${responseName} while computing key fields`,
+ );
+ keyObj[prevKey = s] = response[responseName];
+ }
+ });
+ return keyObj;
+}
diff --git a/src/cache/inmemory/readFromStore.ts b/src/cache/inmemory/readFromStore.ts
new file mode 100644
--- /dev/null
+++ b/src/cache/inmemory/readFromStore.ts
@@ -0,0 +1,393 @@
+import {
+ DocumentNode,
+ FieldNode,
+ FragmentDefinitionNode,
+ InlineFragmentNode,
+ SelectionSetNode,
+} from 'graphql';
+import { wrap } from 'optimism';
+import { invariant, InvariantError } from 'ts-invariant';
+
+import {
+ isField,
+ isInlineFragment,
+ resultKeyNameFromField,
+ Reference,
+ isReference,
+ makeReference,
+} from '../../utilities/graphql/storeUtils';
+import { createFragmentMap, FragmentMap } from '../../utilities/graphql/fragments';
+import { shouldInclude } from '../../utilities/graphql/directives';
+import {
+ getDefaultValues,
+ getFragmentDefinitions,
+ getMainDefinition,
+ getQueryDefinition,
+} from '../../utilities/graphql/getFromAST';
+import { maybeDeepFreeze } from '../../utilities/common/maybeDeepFreeze';
+import { mergeDeepArray } from '../../utilities/common/mergeDeep';
+import { Cache } from '../core/types/Cache';
+import {
+ DiffQueryAgainstStoreOptions,
+ ReadQueryOptions,
+ StoreObject,
+ NormalizedCache,
+} from './types';
+import { supportsResultCaching } from './entityStore';
+import { getTypenameFromStoreObject } from './helpers';
+import { Policies, FieldValueGetter } from './policies';
+
+export type VariableMap = { [name: string]: any };
+
+interface ExecContext {
+ query: DocumentNode;
+ store: NormalizedCache;
+ policies: Policies;
+ fragmentMap: FragmentMap;
+ variables: VariableMap;
+ getFieldValue: FieldValueGetter;
+};
+
+export type ExecResultMissingField = {
+ object: StoreObject;
+ fieldName: string;
+};
+
+export type ExecResult<R = any> = {
+ result: R;
+ // Empty array if no missing fields encountered while computing result.
+ missing?: ExecResultMissingField[];
+};
+
+type ExecSelectionSetOptions = {
+ selectionSet: SelectionSetNode;
+ objectOrReference: StoreObject | Reference;
+ context: ExecContext;
+};
+
+type ExecSubSelectedArrayOptions = {
+ field: FieldNode;
+ array: any[];
+ context: ExecContext;
+};
+
+export interface StoreReaderConfig {
+ addTypename?: boolean;
+ policies: Policies;
+}
+
+export class StoreReader {
+ constructor(private config: StoreReaderConfig) {
+ this.config = { addTypename: true, ...config };
+
+ const {
+ executeSelectionSet,
+ executeSubSelectedArray,
+ } = this;
+
+ this.executeSelectionSet = wrap((options: ExecSelectionSetOptions) => {
+ return executeSelectionSet.call(this, options);
+ }, {
+ makeCacheKey({
+ selectionSet,
+ objectOrReference,
+ context,
+ }: ExecSelectionSetOptions) {
+ if (supportsResultCaching(context.store)) {
+ return context.store.makeCacheKey(
+ selectionSet,
+ JSON.stringify(context.variables),
+ isReference(objectOrReference)
+ ? objectOrReference.__ref
+ : objectOrReference,
+ );
+ }
+ }
+ });
+
+ this.executeSubSelectedArray = wrap((options: ExecSubSelectedArrayOptions) => {
+ return executeSubSelectedArray.call(this, options);
+ }, {
+ makeCacheKey({ field, array, context }) {
+ if (supportsResultCaching(context.store)) {
+ return context.store.makeCacheKey(
+ field,
+ array,
+ JSON.stringify(context.variables),
+ );
+ }
+ }
+ });
+ }
+
+ /**
+ * Resolves the result of a query solely from the store (i.e. never hits the server).
+ *
+ * @param {Store} store The {@link NormalizedCache} used by Apollo for the `data` portion of the
+ * store.
+ *
+ * @param {DocumentNode} query The query document to resolve from the data available in the store.
+ *
+ * @param {Object} [variables] A map from the name of a variable to its value. These variables can
+ * be referenced by the query document.
+ */
+ public readQueryFromStore<QueryType>(
+ options: ReadQueryOptions,
+ ): QueryType | undefined {
+ return this.diffQueryAgainstStore<QueryType>({
+ ...options,
+ returnPartialData: false,
+ }).result;
+ }
+
+ /**
+ * Given a store and a query, return as much of the result as possible and
+ * identify if any data was missing from the store.
+ * @param {DocumentNode} query A parsed GraphQL query document
+ * @param {Store} store The Apollo Client store object
+ * @return {result: Object, complete: [boolean]}
+ */
+ public diffQueryAgainstStore<T>({
+ store,
+ query,
+ variables,
+ returnPartialData = true,
+ rootId = 'ROOT_QUERY',
+ config,
+ }: DiffQueryAgainstStoreOptions): Cache.DiffResult<T> {
+ const { policies } = this.config;
+
+ const execResult = this.executeSelectionSet({
+ selectionSet: getMainDefinition(query).selectionSet,
+ objectOrReference: makeReference(rootId),
+ context: {
+ store,
+ query,
+ policies,
+ variables: {
+ ...getDefaultValues(getQueryDefinition(query)),
+ ...variables,
+ },
+ fragmentMap: createFragmentMap(getFragmentDefinitions(query)),
+ getFieldValue: policies.makeFieldValueGetter(store),
+ },
+ });
+
+ const hasMissingFields =
+ execResult.missing && execResult.missing.length > 0;
+
+ if (hasMissingFields && ! returnPartialData) {
+ execResult.missing!.forEach(info => {
+ throw new InvariantError(`Can't find field ${
+ info.fieldName
+ } on object ${
+ JSON.stringify(info.object, null, 2)
+ }.`);
+ });
+ }
+
+ return {
+ result: execResult.result,
+ complete: !hasMissingFields,
+ };
+ }
+
+ private executeSelectionSet({
+ selectionSet,
+ objectOrReference,
+ context,
+ }: ExecSelectionSetOptions): ExecResult {
+ const { fragmentMap, variables, policies, getFieldValue } = context;
+ const objectsToMerge: { [key: string]: any }[] = [];
+ const finalResult: ExecResult = { result: null };
+ const typename = getFieldValue<string>(objectOrReference, "__typename");
+
+ if (this.config.addTypename &&
+ typeof typename === "string" &&
+ Object.values(
+ policies.rootTypenamesById
+ ).indexOf(typename) < 0) {
+ // Ensure we always include a default value for the __typename
+ // field, if we have one, and this.config.addTypename is true. Note
+ // that this field can be overridden by other merged objects.
+ objectsToMerge.push({ __typename: typename });
+ }
+
+ function getMissing() {
+ return finalResult.missing || (finalResult.missing = []);
+ }
+
+ function handleMissing<T>(result: ExecResult<T>): T {
+ if (result.missing) getMissing().push(...result.missing);
+ return result.result;
+ }
+
+ selectionSet.selections.forEach(selection => {
+ // Omit fields with directives @skip(if: <truthy value>) or
+ // @include(if: <falsy value>).
+ if (!shouldInclude(selection, variables)) return;
+
+ if (isField(selection)) {
+ let fieldValue = policies.readField(
+ objectOrReference,
+ selection,
+ getFieldValue,
+ variables,
+ typename,
+ );
+
+ if (fieldValue === void 0) {
+ getMissing().push({
+ object: objectOrReference as StoreObject,
+ fieldName: selection.name.value,
+ });
+
+ } else if (Array.isArray(fieldValue)) {
+ fieldValue = handleMissing(this.executeSubSelectedArray({
+ field: selection,
+ array: fieldValue,
+ context,
+ }));
+
+ } else if (!selection.selectionSet) {
+ // If the field does not have a selection set, then we handle it
+ // as a scalar value. However, that value should not contain any
+ // Reference objects, and should be frozen in development, if it
+ // happens to be an object that is mutable.
+ if (process.env.NODE_ENV !== 'production') {
+ assertSelectionSetForIdValue(
+ context.store,
+ selection,
+ fieldValue,
+ );
+ maybeDeepFreeze(fieldValue);
+ }
+
+ } else if (fieldValue != null) {
+ // In this case, because we know the field has a selection set,
+ // it must be trying to query a GraphQLObjectType, which is why
+ // fieldValue must be != null.
+ fieldValue = handleMissing(this.executeSelectionSet({
+ selectionSet: selection.selectionSet,
+ objectOrReference: fieldValue as StoreObject | Reference,
+ context,
+ }));
+ }
+
+ if (fieldValue !== void 0) {
+ objectsToMerge.push({
+ [resultKeyNameFromField(selection)]: fieldValue,
+ });
+ }
+
+ } else {
+ let fragment: InlineFragmentNode | FragmentDefinitionNode;
+
+ if (isInlineFragment(selection)) {
+ fragment = selection;
+ } else {
+ // This is a named fragment
+ invariant(
+ fragment = fragmentMap[selection.name.value],
+ `No fragment named ${selection.name.value}`,
+ );
+ }
+
+ if (policies.fragmentMatches(fragment, typename)) {
+ objectsToMerge.push(handleMissing(
+ this.executeSelectionSet({
+ selectionSet: fragment.selectionSet,
+ objectOrReference,
+ context,
+ })
+ ));
+ }
+ }
+ });
+
+ // Perform a single merge at the end so that we can avoid making more
+ // defensive shallow copies than necessary.
+ finalResult.result = mergeDeepArray(objectsToMerge);
+
+ if (process.env.NODE_ENV !== 'production') {
+ Object.freeze(finalResult.result);
+ }
+
+ return finalResult;
+ }
+
+ private executeSubSelectedArray({
+ field,
+ array,
+ context,
+ }: ExecSubSelectedArrayOptions): ExecResult {
+ let missing: ExecResultMissingField[] | undefined;
+
+ function handleMissing<T>(childResult: ExecResult<T>): T {
+ if (childResult.missing) {
+ missing = missing || [];
+ missing.push(...childResult.missing);
+ }
+
+ return childResult.result;
+ }
+
+ array = array.map(item => {
+ // null value in array
+ if (item === null) {
+ return null;
+ }
+
+ // This is a nested array, recurse
+ if (Array.isArray(item)) {
+ return handleMissing(this.executeSubSelectedArray({
+ field,
+ array: item,
+ context,
+ }));
+ }
+
+ // This is an object, run the selection set on it
+ if (field.selectionSet) {
+ return handleMissing(this.executeSelectionSet({
+ selectionSet: field.selectionSet,
+ objectOrReference: item,
+ context,
+ }));
+ }
+
+ if (process.env.NODE_ENV !== 'production') {
+ assertSelectionSetForIdValue(context.store, field, item);
+ }
+
+ return item;
+ });
+
+ if (process.env.NODE_ENV !== 'production') {
+ Object.freeze(array);
+ }
+
+ return { result: array, missing };
+ }
+}
+
+function assertSelectionSetForIdValue(
+ store: NormalizedCache,
+ field: FieldNode,
+ fieldValue: any,
+) {
+ if (!field.selectionSet) {
+ const workSet = new Set([fieldValue]);
+ workSet.forEach(value => {
+ if (value && typeof value === "object") {
+ invariant(
+ !isReference(value),
+ `Missing selection set for object of type ${
+ getTypenameFromStoreObject(store, value)
+ } returned for query field ${field.name.value}`,
+ );
+ Object.values(value).forEach(workSet.add, workSet);
+ }
+ });
+ }
+}
diff --git a/packages/apollo-cache-inmemory/src/types.ts b/src/cache/inmemory/types.ts
similarity index 65%
rename from packages/apollo-cache-inmemory/src/types.ts
rename to src/cache/inmemory/types.ts
--- a/packages/apollo-cache-inmemory/src/types.ts
+++ b/src/cache/inmemory/types.ts
@@ -1,12 +1,14 @@
import { DocumentNode } from 'graphql';
-import { FragmentMatcher } from './readFromStore';
-import { Transaction } from 'apollo-cache';
-import { IdValue, StoreValue } from 'apollo-utilities';
+
+import { Transaction } from '../core/cache';
+import { StoreValue } from '../../utilities/graphql/storeUtils';
export interface IdGetterObj extends Object {
__typename?: string;
id?: string;
+ _id?: string;
}
+
export declare type IdGetter = (
value: IdGetterObj,
) => string | null | undefined;
@@ -16,9 +18,11 @@ export declare type IdGetter = (
* StoreObjects from the cache
*/
export interface NormalizedCache {
+ has(dataId: string): boolean;
get(dataId: string): StoreObject;
- set(dataId: string, value: StoreObject): void;
- delete(dataId: string): void;
+ getFieldValue(dataId: string, storeFieldName: string): StoreValue;
+ merge(dataId: string, incoming: StoreObject): void;
+ delete(dataId: string, fieldName?: string): void;
clear(): void;
// non-Map elements:
@@ -30,6 +34,17 @@ export interface NormalizedCache {
* replace the state of the store
*/
replace(newData: NormalizedCacheObject): void;
+
+ /**
+ * Retain (or release) a given root ID to protect (or expose) it and its
+ * transitive child entities from (or to) garbage collection. The current
+ * retainment count is returned by both methods. Note that releasing a root
+ * ID does not cause that entity to be garbage collected, but merely removes
+ * it from the set of root IDs that will be considered during the next
+ * mark-and-sweep collection.
+ */
+ retain(rootId: string): number;
+ release(rootId: string): number;
}
/**
@@ -42,7 +57,7 @@ export interface NormalizedCacheObject {
export interface StoreObject {
__typename?: string;
- [storeFieldKey: string]: StoreValue;
+ [storeFieldName: string]: StoreValue;
}
export type OptimisticStoreItem = {
@@ -54,7 +69,6 @@ export type OptimisticStoreItem = {
export type ReadQueryOptions = {
store: NormalizedCache;
query: DocumentNode;
- fragmentMatcherFunction?: FragmentMatcher;
variables?: Object;
previousResult?: any;
rootId?: string;
@@ -67,37 +81,7 @@ export type DiffQueryAgainstStoreOptions = ReadQueryOptions & {
export type ApolloReducerConfig = {
dataIdFromObject?: IdGetter;
- fragmentMatcher?: FragmentMatcherInterface;
addTypename?: boolean;
- cacheRedirects?: CacheResolverMap;
-};
-
-export type ReadStoreContext = {
- readonly store: NormalizedCache;
- readonly cacheRedirects: CacheResolverMap;
- readonly dataIdFromObject?: IdGetter;
-};
-
-export interface FragmentMatcherInterface {
- match(
- idValue: IdValue,
- typeCondition: string,
- context: ReadStoreContext,
- ): boolean | 'heuristic';
-}
-
-export type PossibleTypesMap = { [key: string]: string[] };
-
-export type IntrospectionResultData = {
- __schema: {
- types: {
- kind: string;
- name: string;
- possibleTypes: {
- name: string;
- }[];
- }[];
- };
};
export type CacheResolver = (
diff --git a/src/cache/inmemory/writeToStore.ts b/src/cache/inmemory/writeToStore.ts
new file mode 100644
--- /dev/null
+++ b/src/cache/inmemory/writeToStore.ts
@@ -0,0 +1,299 @@
+import { SelectionSetNode, FieldNode, DocumentNode } from 'graphql';
+import { invariant } from 'ts-invariant';
+
+import {
+ createFragmentMap,
+ FragmentMap,
+ getFragmentFromSelection,
+} from '../../utilities/graphql/fragments';
+
+import {
+ getDefaultValues,
+ getFragmentDefinitions,
+ getOperationDefinition,
+} from '../../utilities/graphql/getFromAST';
+
+import {
+ getTypenameFromResult,
+ makeReference,
+ isField,
+ resultKeyNameFromField,
+ StoreValue,
+} from '../../utilities/graphql/storeUtils';
+
+import { shouldInclude } from '../../utilities/graphql/directives';
+import { cloneDeep } from '../../utilities/common/cloneDeep';
+
+import { Policies, FieldValueGetter } from './policies';
+import { defaultNormalizedCacheFactory } from './entityStore';
+import { NormalizedCache, StoreObject } from './types';
+import { makeProcessedFieldsMerger } from './helpers';
+
+export type WriteContext = {
+ readonly store: NormalizedCache;
+ readonly written: {
+ [dataId: string]: SelectionSetNode[];
+ };
+ readonly variables?: any;
+ readonly fragmentMap?: FragmentMap;
+ getFieldValue: FieldValueGetter;
+ // General-purpose deep-merge function for use during writes.
+ merge<T>(existing: T, incoming: T): T;
+};
+
+export interface StoreWriterConfig {
+ policies: Policies;
+};
+
+export class StoreWriter {
+ private policies: Policies;
+
+ constructor(config: StoreWriterConfig) {
+ this.policies = config.policies;
+ }
+
+ /**
+ * Writes the result of a query to the store.
+ *
+ * @param result The result object returned for the query document.
+ *
+ * @param query The query document whose result we are writing to the store.
+ *
+ * @param store The {@link NormalizedCache} used by Apollo for the `data` portion of the store.
+ *
+ * @param variables A map from the name of a variable to its value. These variables can be
+ * referenced by the query document.
+ */
+ public writeQueryToStore({
+ query,
+ result,
+ dataId = 'ROOT_QUERY',
+ store = defaultNormalizedCacheFactory(),
+ variables,
+ }: {
+ query: DocumentNode;
+ result: Object;
+ dataId?: string;
+ store?: NormalizedCache;
+ variables?: Object;
+ }): NormalizedCache {
+ const operationDefinition = getOperationDefinition(query)!;
+
+ // Any IDs written explicitly to the cache (including ROOT_QUERY, most
+ // frequently) will be retained as reachable root IDs on behalf of their
+ // owner DocumentNode objects, until/unless evicted for all owners.
+ store.retain(dataId);
+
+ const merger = makeProcessedFieldsMerger();
+
+ return this.writeSelectionSetToStore({
+ result: result || Object.create(null),
+ dataId,
+ selectionSet: operationDefinition.selectionSet,
+ context: {
+ store,
+ written: Object.create(null),
+ merge<T>(existing: T, incoming: T) {
+ return merger.merge(existing, incoming) as T;
+ },
+ variables: {
+ ...getDefaultValues(operationDefinition),
+ ...variables,
+ },
+ fragmentMap: createFragmentMap(getFragmentDefinitions(query)),
+ getFieldValue: this.policies.makeFieldValueGetter(store),
+ },
+ });
+ }
+
+ private writeSelectionSetToStore({
+ dataId,
+ result,
+ selectionSet,
+ context,
+ }: {
+ dataId: string;
+ result: Record<string, any>;
+ selectionSet: SelectionSetNode;
+ context: WriteContext;
+ }): NormalizedCache {
+ const { policies } = this;
+ const { store, written } = context;
+
+ // Avoid processing the same entity object using the same selection set
+ // more than once. We use an array instead of a Set since most entity IDs
+ // will be written using only one selection set, so the size of this array
+ // is likely to be very small, meaning indexOf is likely to be faster than
+ // Set.prototype.has.
+ const sets = written[dataId] || (written[dataId] = []);
+ if (sets.indexOf(selectionSet) >= 0) return store;
+ sets.push(selectionSet);
+
+ const entityRef = makeReference(dataId);
+ const typename =
+ // If the result has a __typename, trust that.
+ getTypenameFromResult(result, selectionSet, context.fragmentMap) ||
+ // If the entity identified by dataId has a __typename in the store,
+ // fall back to that.
+ context.getFieldValue<string>(entityRef, "__typename");
+
+ store.merge(
+ dataId,
+ policies.applyMerges(
+ entityRef,
+ this.processSelectionSet({
+ result,
+ selectionSet,
+ context,
+ typename,
+ }),
+ context.getFieldValue,
+ context.variables,
+ ),
+ );
+
+ return store;
+ }
+
+ private processSelectionSet({
+ result,
+ selectionSet,
+ context,
+ typename,
+ }: {
+ result: Record<string, any>;
+ selectionSet: SelectionSetNode;
+ context: WriteContext;
+ typename: string;
+ }): StoreObject {
+ let mergedFields: StoreObject = Object.create(null);
+ if (typeof typename === "string") {
+ mergedFields.__typename = typename;
+ }
+
+ selectionSet.selections.forEach(selection => {
+ if (!shouldInclude(selection, context.variables)) {
+ return;
+ }
+
+ const { policies } = this;
+
+ if (isField(selection)) {
+ const resultFieldKey = resultKeyNameFromField(selection);
+ const value = result[resultFieldKey];
+
+ if (typeof value !== 'undefined') {
+ const storeFieldName = policies.getStoreFieldName(
+ typename,
+ selection,
+ context.variables,
+ );
+
+ const incomingValue =
+ this.processFieldValue(value, selection, context);
+
+ mergedFields = context.merge(mergedFields, {
+ // If a custom merge function is defined for this field, store
+ // a special FieldValueToBeMerged object, so that we can run
+ // the merge function later, after all processSelectionSet
+ // work is finished.
+ [storeFieldName]: policies.hasMergeFunction(
+ typename,
+ selection.name.value,
+ ) ? {
+ __field: selection,
+ __typename: typename,
+ __value: incomingValue,
+ } : incomingValue,
+ });
+
+ } else if (
+ policies.usingPossibleTypes &&
+ !(
+ selection.directives &&
+ selection.directives.some(
+ ({ name }) =>
+ name && (name.value === 'defer' || name.value === 'client'),
+ )
+ )
+ ) {
+ // XXX We'd like to throw an error, but for backwards compatibility's sake
+ // we just print a warning for the time being.
+ //throw new WriteError(`Missing field ${resultFieldKey} in ${JSON.stringify(result, null, 2).substring(0, 100)}`);
+ invariant.warn(
+ `Missing field ${resultFieldKey} in ${JSON.stringify(
+ result,
+ null,
+ 2,
+ ).substring(0, 100)}`,
+ );
+ }
+ } else {
+ // This is not a field, so it must be a fragment, either inline or named
+ const fragment = getFragmentFromSelection(
+ selection,
+ context.fragmentMap,
+ );
+
+ if (policies.fragmentMatches(fragment, typename)) {
+ mergedFields = context.merge(
+ mergedFields,
+ this.processSelectionSet({
+ result,
+ selectionSet: fragment.selectionSet,
+ context,
+ typename,
+ }),
+ );
+ }
+ }
+ });
+
+ return mergedFields;
+ }
+
+ private processFieldValue(
+ value: any,
+ field: FieldNode,
+ context: WriteContext,
+ ): StoreValue {
+ if (!field.selectionSet || value === null) {
+ // In development, we need to clone scalar values so that they can be
+ // safely frozen with maybeDeepFreeze in readFromStore.ts. In production,
+ // it's cheaper to store the scalar values directly in the cache.
+ return process.env.NODE_ENV === 'production' ? value : cloneDeep(value);
+ }
+
+ if (Array.isArray(value)) {
+ return value.map((item, i) => this.processFieldValue(item, field, context));
+ }
+
+ if (value) {
+ const dataId = this.policies.identify(
+ value,
+ // Since value is a result object rather than a normalized StoreObject,
+ // we need to consider aliases when computing its key fields.
+ field.selectionSet,
+ context.fragmentMap,
+ );
+
+ if (typeof dataId === 'string') {
+ this.writeSelectionSetToStore({
+ dataId,
+ result: value,
+ selectionSet: field.selectionSet,
+ context,
+ });
+ return makeReference(dataId);
+ }
+ }
+
+ return this.processSelectionSet({
+ result: value,
+ selectionSet: field.selectionSet,
+ context,
+ typename: getTypenameFromResult(
+ value, field.selectionSet, context.fragmentMap),
+ });
+ }
+}
diff --git a/packages/apollo-client/src/config/jest/setup.ts b/src/config/jest/setup.ts
similarity index 100%
rename from packages/apollo-client/src/config/jest/setup.ts
rename to src/config/jest/setup.ts
diff --git a/packages/apollo-client/src/core/LocalState.ts b/src/core/LocalState.ts
similarity index 95%
rename from packages/apollo-client/src/core/LocalState.ts
rename to src/core/LocalState.ts
--- a/packages/apollo-client/src/core/LocalState.ts
+++ b/src/core/LocalState.ts
@@ -10,30 +10,28 @@ import {
ASTNode,
} from 'graphql';
import { visit, BREAK } from 'graphql/language/visitor';
+import { invariant } from 'ts-invariant';
-import { ApolloCache } from 'apollo-cache';
+import { ApolloCache } from '../cache/core/cache';
import {
getMainDefinition,
+ getFragmentDefinitions,
+} from '../utilities/graphql/getFromAST';
+import { hasDirectives, shouldInclude } from '../utilities/graphql/directives';
+import { FragmentMap, createFragmentMap } from '../utilities/graphql/fragments';
+import {
buildQueryFromSelectionSet,
- hasDirectives,
removeClientSetsFromDocument,
- mergeDeep,
- mergeDeepArray,
- FragmentMap,
+} from '../utilities/graphql/transform';
+import { mergeDeep, mergeDeepArray } from '../utilities/common/mergeDeep';
+import {
argumentsObjectFromField,
resultKeyNameFromField,
- getFragmentDefinitions,
- createFragmentMap,
- shouldInclude,
isField,
isInlineFragment,
-} from 'apollo-utilities';
-
-import { invariant } from 'ts-invariant';
-
-import ApolloClient from '../ApolloClient';
+} from '../utilities/graphql/storeUtils';
+import { ApolloClient } from '../ApolloClient';
import { Resolvers, OperationVariables } from './types';
-import { capitalizeFirstLetter } from '../util/capitalizeFirstLetter';
export type Resolver = (
rootValue?: any,
@@ -279,7 +277,8 @@ export class LocalState<TCacheShape> {
.operation;
const defaultOperationType = definitionOperation
- ? capitalizeFirstLetter(definitionOperation)
+ ? definitionOperation.charAt(0).toUpperCase() +
+ definitionOperation.slice(1)
: 'Query';
const { cache, client } = this;
diff --git a/packages/apollo-client/src/core/ObservableQuery.ts b/src/core/ObservableQuery.ts
similarity index 84%
rename from packages/apollo-client/src/core/ObservableQuery.ts
rename to src/core/ObservableQuery.ts
--- a/packages/apollo-client/src/core/ObservableQuery.ts
+++ b/src/core/ObservableQuery.ts
@@ -1,12 +1,11 @@
-import {
- isEqual,
- tryFunctionOrLogError,
- cloneDeep,
- getOperationDefinition,
-} from 'apollo-utilities';
-import { GraphQLError } from 'graphql';
+import { invariant, InvariantError } from 'ts-invariant';
+import { equal } from '@wry/equality';
+
+import { tryFunctionOrLogError } from '../utilities/common/errorHandling';
+import { cloneDeep } from '../utilities/common/cloneDeep';
+import { getOperationDefinition } from '../utilities/graphql/getFromAST';
import { NetworkStatus, isNetworkRequestInFlight } from './networkStatus';
-import { Observable, Observer, Subscription } from '../util/Observable';
+import { Observable, Observer, Subscription } from '../utilities/observables/Observable';
import { ApolloError } from '../errors/ApolloError';
import { QueryManager } from './QueryManager';
import { ApolloQueryResult, FetchType, OperationVariables } from './types';
@@ -16,38 +15,18 @@ import {
SubscribeToMoreOptions,
ErrorPolicy,
} from './watchQueryOptions';
-
import { QueryStoreValue } from '../data/queries';
+import { isNonEmptyArray } from '../utilities/common/arrays';
-import { invariant, InvariantError } from 'ts-invariant';
-import { isNonEmptyArray } from '../util/arrays';
-
-// XXX remove in the next breaking semver change (3.0)
-// Deprecated, use ApolloCurrentQueryResult
-export type ApolloCurrentResult<T> = {
- data: T | {};
- errors?: ReadonlyArray<GraphQLError>;
- loading: boolean;
- networkStatus: NetworkStatus;
- error?: ApolloError;
- partial?: boolean;
-};
-
-export type ApolloCurrentQueryResult<T> = {
- data: T | undefined;
- errors?: ReadonlyArray<GraphQLError>;
- loading: boolean;
- networkStatus: NetworkStatus;
+export type ApolloCurrentQueryResult<T> = ApolloQueryResult<T> & {
error?: ApolloError;
- partial?: boolean;
- stale?: boolean;
};
export interface FetchMoreOptions<
TData = any,
TVariables = OperationVariables
> {
- updateQuery: (
+ updateQuery?: (
previousQueryResult: TData,
options: {
fetchMoreResult?: TData;
@@ -151,56 +130,44 @@ export class ObservableQuery<
});
}
- // XXX remove in the next breaking semver change (3.0)
- // Deprecated, use getCurrentResult()
- public currentResult(): ApolloCurrentResult<TData> {
- const result = this.getCurrentResult() as ApolloCurrentResult<TData>;
- if (result.data === undefined) {
- result.data = {};
- }
- return result;
- }
-
- /**
- * Return the result of the query from the local cache as well as some fetching status
- * `loading` and `networkStatus` allow to know if a request is in flight
- * `partial` lets you know if the result from the local cache is complete or partial
- * @return {data: Object, error: ApolloError, loading: boolean, networkStatus: number, partial: boolean}
- */
public getCurrentResult(): ApolloCurrentQueryResult<TData> {
- if (this.isTornDown) {
- const { lastResult } = this;
- return {
- data: !this.lastError && lastResult && lastResult.data || void 0,
- error: this.lastError,
- loading: false,
- networkStatus: NetworkStatus.error,
- };
- }
-
- const { data, partial } = this.queryManager.getCurrentQueryResult(this);
- const queryStoreValue = this.queryManager.queryStore.get(this.queryId);
- let result: ApolloQueryResult<TData>;
-
+ const { lastResult, lastError } = this;
const { fetchPolicy } = this.options;
-
const isNetworkFetchPolicy =
fetchPolicy === 'network-only' ||
fetchPolicy === 'no-cache';
+ const networkStatus =
+ lastError ? NetworkStatus.error :
+ lastResult ? lastResult.networkStatus :
+ isNetworkFetchPolicy ? NetworkStatus.loading :
+ NetworkStatus.ready;
+
+ const result: ApolloCurrentQueryResult<TData> = {
+ data: !lastError && lastResult && lastResult.data || void 0,
+ error: this.lastError,
+ loading: isNetworkRequestInFlight(networkStatus),
+ networkStatus,
+ stale: lastResult ? lastResult.stale : false,
+ };
+
+ if (this.isTornDown) {
+ return result;
+ }
+
+ const queryStoreValue = this.queryManager.queryStore.get(this.queryId);
if (queryStoreValue) {
const { networkStatus } = queryStoreValue;
if (hasError(queryStoreValue, this.options.errorPolicy)) {
- return {
+ return Object.assign(result, {
data: void 0,
- loading: false,
networkStatus,
error: new ApolloError({
graphQLErrors: queryStoreValue.graphQLErrors,
networkError: queryStoreValue.networkError,
}),
- };
+ });
}
// Variables might have been added dynamically at query time, when
@@ -216,38 +183,19 @@ export class ObservableQuery<
this.variables = this.options.variables;
}
- result = {
- data,
+ Object.assign(result, {
loading: isNetworkRequestInFlight(networkStatus),
networkStatus,
- } as ApolloQueryResult<TData>;
+ });
if (queryStoreValue.graphQLErrors && this.options.errorPolicy === 'all') {
result.errors = queryStoreValue.graphQLErrors;
}
-
- } else {
- // We need to be careful about the loading state we show to the user, to try
- // and be vaguely in line with what the user would have seen from .subscribe()
- // but to still provide useful information synchronously when the query
- // will not end up hitting the server.
- // See more: https://github.com/apollostack/apollo-client/issues/707
- // Basically: is there a query in flight right now (modolo the next tick)?
- const loading = isNetworkFetchPolicy ||
- (partial && fetchPolicy !== 'cache-only');
-
- result = {
- data,
- loading,
- networkStatus: loading ? NetworkStatus.loading : NetworkStatus.ready,
- } as ApolloQueryResult<TData>;
}
- if (!partial) {
- this.updateLastResult({ ...result, stale: false });
- }
+ this.updateLastResult(result);
- return { ...result, partial };
+ return result;
}
// Compares newResult to the snapshot we took of this.lastResult when it was
@@ -259,7 +207,7 @@ export class ObservableQuery<
newResult &&
snapshot.networkStatus === newResult.networkStatus &&
snapshot.stale === newResult.stale &&
- isEqual(snapshot.data, newResult.data)
+ equal(snapshot.data, newResult.data)
);
}
@@ -312,7 +260,7 @@ export class ObservableQuery<
fetchPolicy = 'network-only';
}
- if (!isEqual(this.variables, variables)) {
+ if (!equal(this.variables, variables)) {
// update observable variables
this.variables = {
...this.variables,
@@ -320,7 +268,7 @@ export class ObservableQuery<
};
}
- if (!isEqual(this.options.variables, this.variables)) {
+ if (!equal(this.options.variables, this.variables)) {
// Update the existing options with new variables
this.options.variables = {
...this.options.variables,
@@ -339,12 +287,6 @@ export class ObservableQuery<
fetchMoreOptions: FetchMoreQueryOptions<TVariables, K> &
FetchMoreOptions<TData, TVariables>,
): Promise<ApolloQueryResult<TData>> {
- // early return if no update Query
- invariant(
- fetchMoreOptions.updateQuery,
- 'updateQuery option is required. This function defines how to update the query data with the new results.',
- );
-
const combinedOptions = {
...(fetchMoreOptions.query ? fetchMoreOptions : {
...this.options,
@@ -368,12 +310,14 @@ export class ObservableQuery<
)
.then(
fetchMoreResult => {
- this.updateQuery((previousResult: any) =>
- fetchMoreOptions.updateQuery(previousResult, {
- fetchMoreResult: fetchMoreResult.data as TData,
+ this.updateQuery((previousResult: any) => {
+ const data = fetchMoreResult.data as TData;
+ const { updateQuery } = fetchMoreOptions;
+ return updateQuery ? updateQuery(previousResult, {
+ fetchMoreResult: data,
variables: combinedOptions.variables as TVariables,
- }),
- );
+ }) : data;
+ });
this.queryManager.stopQuery(qid);
return fetchMoreResult as ApolloQueryResult<TData>;
},
@@ -502,7 +446,7 @@ export class ObservableQuery<
variables = variables || this.variables;
- if (!tryFetch && isEqual(variables, this.variables)) {
+ if (!tryFetch && equal(variables, this.variables)) {
// If we have no observers, then we don't actually want to make a network
// request. As soon as someone observes the query, the request will kick
// off. For now, we just store any changes. (See #1077)
@@ -545,11 +489,13 @@ export class ObservableQuery<
);
if (newResult) {
- queryManager.dataStore.markUpdateQueryResult(
- document,
+ queryManager.cache.write({
+ query: document,
+ result: newResult,
+ dataId: 'ROOT_QUERY',
variables,
- newResult,
- );
+ });
+
queryManager.broadcastQueries();
}
}
@@ -571,6 +517,9 @@ export class ObservableQuery<
this.lastResultSnapshot = this.queryManager.assumeImmutableResults
? newResult
: cloneDeep(newResult);
+ if (!isNonEmptyArray(newResult.errors)) {
+ delete this.lastError;
+ }
return previousResult;
}
@@ -651,7 +600,7 @@ export class ObservableQuery<
previousResult &&
fetchPolicy !== 'cache-only' &&
queryManager.transform(query).serverQuery &&
- !isEqual(previousVariables, variables)
+ !equal(previousVariables, variables)
) {
this.refetch();
} else {
diff --git a/packages/apollo-client/src/core/QueryManager.ts b/src/core/QueryManager.ts
similarity index 86%
rename from packages/apollo-client/src/core/QueryManager.ts
rename to src/core/QueryManager.ts
--- a/packages/apollo-client/src/core/QueryManager.ts
+++ b/src/core/QueryManager.ts
@@ -1,25 +1,31 @@
-import { execute, ApolloLink, FetchResult } from 'apollo-link';
import { ExecutionResult, DocumentNode } from 'graphql';
-import { Cache } from 'apollo-cache';
+import { invariant, InvariantError } from 'ts-invariant';
+
+import { ApolloLink } from '../link/core/ApolloLink';
+import { execute } from '../link/core/execute';
+import { FetchResult } from '../link/core/types';
+import { Cache } from '../cache/core/types/Cache';
+import { DataProxy } from '../cache/core/types/DataProxy';
+
import {
getDefaultValues,
getOperationDefinition,
getOperationName,
+} from '../utilities/graphql/getFromAST';
+import {
hasDirectives,
- graphQLResultHasError,
hasClientExports,
- removeConnectionDirectiveFromDocument,
- canUseWeakMap,
-} from 'apollo-utilities';
-
-import { invariant, InvariantError } from 'ts-invariant';
-
+} from '../utilities/graphql/directives';
+import {
+ graphQLResultHasError,
+ tryFunctionOrLogError,
+} from '../utilities/common/errorHandling';
+import { removeConnectionDirectiveFromDocument } from '../utilities/graphql/transform';
+import { canUseWeakMap } from '../utilities/common/canUse';
import { isApolloError, ApolloError } from '../errors/ApolloError';
-import { Observer, Subscription, Observable } from '../util/Observable';
-import { QueryWithUpdater, DataStore } from '../data/store';
+import { Observer, Subscription, Observable } from '../utilities/observables/Observable';
import { MutationStore } from '../data/mutations';
import { QueryStore, QueryStoreValue } from '../data/queries';
-
import {
QueryOptions,
WatchQueryOptions,
@@ -34,10 +40,12 @@ import {
ApolloQueryResult,
FetchType,
OperationVariables,
+ MutationQueryReducer,
} from './types';
import { LocalState } from './LocalState';
-import { asyncMap, multiplex } from '../util/observables';
-import { isNonEmptyArray } from '../util/arrays';
+import { asyncMap, multiplex } from '../utilities/observables/observables';
+import { isNonEmptyArray } from '../utilities/common/arrays';
+import { ApolloCache } from '../cache/core/cache';
const { hasOwnProperty } = Object.prototype;
@@ -55,11 +63,16 @@ export interface QueryInfo {
cancel?: () => void;
}
+type QueryWithUpdater = {
+ updater: MutationQueryReducer<Object>;
+ query: QueryStoreValue;
+};
+
export class QueryManager<TStore> {
+ public cache: ApolloCache<TStore>;
public link: ApolloLink;
public mutationStore: MutationStore = new MutationStore();
public queryStore: QueryStore = new QueryStore();
- public dataStore: DataStore<TStore>;
public readonly assumeImmutableResults: boolean;
private queryDeduplication: boolean;
@@ -85,30 +98,30 @@ export class QueryManager<TStore> {
private fetchQueryRejectFns = new Map<string, Function>();
constructor({
+ cache,
link,
queryDeduplication = false,
- store,
onBroadcast = () => undefined,
ssrMode = false,
clientAwareness = {},
localState,
assumeImmutableResults,
}: {
+ cache: ApolloCache<TStore>;
link: ApolloLink;
queryDeduplication?: boolean;
- store: DataStore<TStore>;
onBroadcast?: () => void;
ssrMode?: boolean;
clientAwareness?: Record<string, string>;
localState?: LocalState<TStore>;
assumeImmutableResults?: boolean;
}) {
+ this.cache = cache;
this.link = link;
this.queryDeduplication = queryDeduplication;
- this.dataStore = store;
this.onBroadcast = onBroadcast;
this.clientAwareness = clientAwareness;
- this.localState = localState || new LocalState({ cache: store.getCache() });
+ this.localState = localState || new LocalState({ cache });
this.ssrMode = ssrMode;
this.assumeImmutableResults = !!assumeImmutableResults;
}
@@ -194,14 +207,22 @@ export class QueryManager<TStore> {
variables,
);
- this.dataStore.markMutationInit({
- mutationId,
- document: mutation,
- variables,
- updateQueries: generateUpdateQueriesInfo(),
- update: updateWithProxyFn,
- optimisticResponse,
- });
+ if (optimisticResponse) {
+ const optimistic = typeof optimisticResponse === 'function'
+ ? optimisticResponse(variables)
+ : optimisticResponse;
+
+ this.cache.recordOptimisticTransaction(cache => {
+ markMutationResult({
+ mutationId: mutationId,
+ result: { data: optimistic },
+ document: mutation,
+ variables: variables,
+ queryUpdatersById: generateUpdateQueriesInfo(),
+ update: updateWithProxyFn,
+ }, cache);
+ }, mutationId);
+ }
this.broadcastQueries();
@@ -231,14 +252,14 @@ export class QueryManager<TStore> {
self.mutationStore.markMutationResult(mutationId);
if (fetchPolicy !== 'no-cache') {
- self.dataStore.markMutationResult({
+ markMutationResult({
mutationId,
result,
document: mutation,
variables,
- updateQueries: generateUpdateQueriesInfo(),
+ queryUpdatersById: generateUpdateQueriesInfo(),
update: updateWithProxyFn,
- });
+ }, self.cache);
}
storeResult = result as FetchResult<T>;
@@ -246,10 +267,9 @@ export class QueryManager<TStore> {
error(err: Error) {
self.mutationStore.markMutationError(mutationId, err);
- self.dataStore.markMutationComplete({
- mutationId,
- optimisticResponse,
- });
+ if (optimisticResponse) {
+ self.cache.removeOptimistic(mutationId);
+ }
self.broadcastQueries();
self.setQuery(mutationId, () => ({ document: null }));
reject(
@@ -264,10 +284,9 @@ export class QueryManager<TStore> {
self.mutationStore.markMutationError(mutationId, error);
}
- self.dataStore.markMutationComplete({
- mutationId,
- optimisticResponse,
- });
+ if (optimisticResponse) {
+ self.cache.removeOptimistic(mutationId);
+ }
self.broadcastQueries();
@@ -366,7 +385,7 @@ export class QueryManager<TStore> {
// Unless we are completely skipping the cache, we want to diff the query
// against the cache before we fetch it from the network interface.
if (!isNetworkOnly) {
- const { complete, result } = this.dataStore.getCache().diff({
+ const { complete, result } = this.cache.diff({
query,
variables,
returnPartialData: true,
@@ -412,9 +431,9 @@ export class QueryManager<TStore> {
fetchMoreForQueryId,
});
- this.broadcastQueries();
-
if (shouldFetch) {
+ this.broadcastQueries();
+
const networkResult = this.fetchRequest<T>({
requestId,
queryId,
@@ -495,13 +514,22 @@ export class QueryManager<TStore> {
newData: { result: result.data, complete: true },
}));
} else {
- this.dataStore.markQueryResult(
- result,
- this.getQuery(queryId).document!,
- variables,
- fetchMoreForQueryId,
- errorPolicy === 'ignore' || errorPolicy === 'all',
- );
+ const document = this.getQuery(queryId).document!;
+ const ignoreErrors = errorPolicy === 'ignore' || errorPolicy === 'all';
+
+ let writeWithErrors = !graphQLResultHasError(result);
+ if (ignoreErrors && graphQLResultHasError(result) && result.data) {
+ writeWithErrors = true;
+ }
+
+ if (!fetchMoreForQueryId && writeWithErrors) {
+ this.cache.write({
+ result: result.data,
+ dataId: 'ROOT_QUERY',
+ query: document,
+ variables: variables,
+ });
+ }
}
}
@@ -607,7 +635,7 @@ export class QueryManager<TStore> {
data = lastResult.data;
isMissing = false;
} else {
- const diffResult = this.dataStore.getCache().diff({
+ const diffResult = this.cache.diff({
query: document as DocumentNode,
variables:
queryStoreValue.previousVariables ||
@@ -626,6 +654,7 @@ export class QueryManager<TStore> {
// result and mark it as stale.
const stale = isMissing && !(
options.returnPartialData ||
+ options.partialRefetch ||
fetchPolicy === 'cache-only'
);
@@ -665,10 +694,9 @@ export class QueryManager<TStore> {
const { transformCache } = this;
if (!transformCache.has(document)) {
- const cache = this.dataStore.getCache();
- const transformed = cache.transformDocument(document);
+ const transformed = this.cache.transformDocument(document);
const forLink = removeConnectionDirectiveFromDocument(
- cache.transformForLink(transformed));
+ this.cache.transformForLink(transformed));
const clientQuery = this.localState.clientQuery(transformed);
const serverQuery = this.localState.serverQuery(forLink);
@@ -738,11 +766,28 @@ export class QueryManager<TStore> {
let transformedOptions = { ...options } as WatchQueryOptions<TVariables>;
- return new ObservableQuery<T, TVariables>({
+ const observable = new ObservableQuery<T, TVariables>({
queryManager: this,
options: transformedOptions,
shouldSubscribe: shouldSubscribe,
});
+
+ this.queryStore.initQuery({
+ queryId: observable.queryId,
+ document: this.transform(options.query).document,
+ variables: options.variables,
+ storePreviousVariables: false,
+ // Even if options.pollInterval is a number, we have not started
+ // polling this query yet (and we have not yet performed the first
+ // fetch), so NetworkStatus.loading (not NetworkStatus.poll or
+ // NetworkStatus.refetch) is the appropriate status for now.
+ isPoll: false,
+ isRefetch: false,
+ metadata: options.metadata,
+ fetchMoreForQueryId: void 0,
+ });
+
+ return observable;
}
public query<T>(options: QueryOptions): Promise<ApolloQueryResult<T>> {
@@ -826,7 +871,7 @@ export class QueryManager<TStore> {
return previousResult;
};
- return this.dataStore.getCache().watch({
+ return this.cache.watch({
query: document as DocumentNode,
variables: options.variables,
optimistic: true,
@@ -873,7 +918,7 @@ export class QueryManager<TStore> {
this.mutationStore.reset();
// begin removing data from the store
- return this.dataStore.reset();
+ return this.cache.reset();
}
public resetStore(): Promise<ApolloQueryResult<any>[]> {
@@ -927,23 +972,6 @@ export class QueryManager<TStore> {
return this.fetchQuery<T>(queryId, options);
}
- public startQuery<T>(
- queryId: string,
- options: WatchQueryOptions,
- listener: QueryListener,
- ) {
- invariant.warn("The QueryManager.startQuery method has been deprecated");
-
- this.addQueryListener(queryId, listener);
-
- this.fetchQuery<T>(queryId, options)
- // `fetchQuery` returns a Promise. In case of a failure it should be caucht or else the
- // console will show an `Uncaught (in promise)` message. Ignore the error for now.
- .catch(() => undefined);
-
- return queryId;
- }
-
public startGraphQLSubscription<T = any>({
query,
fetchPolicy,
@@ -960,11 +988,17 @@ export class QueryManager<TStore> {
false,
).map(result => {
if (!fetchPolicy || fetchPolicy !== 'no-cache') {
- this.dataStore.markSubscriptionResult(
- result,
- query,
- variables,
- );
+ // the subscription interface should handle not sending us results we no longer subscribe to.
+ // XXX I don't think we ever send in an object with errors, but we might in the future...
+ if (!graphQLResultHasError(result)) {
+ this.cache.write({
+ query,
+ result: result.data,
+ dataId: 'ROOT_SUBSCRIPTION',
+ variables: variables,
+ });
+ }
+
this.broadcastQueries();
}
@@ -1018,7 +1052,7 @@ export class QueryManager<TStore> {
this.queries.delete(queryId);
}
- public getCurrentQueryResult<T>(
+ private getCurrentQueryResult<T>(
observableQuery: ObservableQuery<T>,
optimistic: boolean = true,
): {
@@ -1037,7 +1071,7 @@ export class QueryManager<TStore> {
return { data: undefined, partial: false };
}
- const { result, complete } = this.dataStore.getCache().diff<T>({
+ const { result, complete } = this.cache.diff<T>({
query,
variables,
previousResult: lastResult ? lastResult.data : undefined,
@@ -1060,14 +1094,14 @@ export class QueryManager<TStore> {
} {
let observableQuery: ObservableQuery<TData, any>;
if (typeof queryIdOrObservable === 'string') {
- const { observableQuery: foundObserveableQuery } = this.getQuery(
+ const { observableQuery: foundObservableQuery } = this.getQuery(
queryIdOrObservable,
);
invariant(
- foundObserveableQuery,
+ foundObservableQuery,
`ObservableQuery with this id doesn't exist: ${queryIdOrObservable}`
);
- observableQuery = foundObserveableQuery!;
+ observableQuery = foundObservableQuery!;
} else {
observableQuery = queryIdOrObservable;
}
@@ -1253,7 +1287,7 @@ export class QueryManager<TStore> {
resultFromStore = result.data;
} else {
// ensure result is combined with data already in store
- const { result, complete } = this.dataStore.getCache().diff<T>({
+ const { result, complete } = this.cache.diff<T>({
variables,
query: document,
optimistic: false,
@@ -1406,3 +1440,73 @@ export class QueryManager<TStore> {
this.pollingInfoByQueryId.delete(queryId);
}
}
+
+function markMutationResult<TStore>(
+ mutation: {
+ mutationId: string;
+ result: ExecutionResult;
+ document: DocumentNode;
+ variables: any;
+ queryUpdatersById: Record<string, QueryWithUpdater>;
+ update: ((proxy: DataProxy, mutationResult: Object) => void) | undefined;
+ },
+ cache: ApolloCache<TStore>,
+) {
+ // Incorporate the result from this mutation into the store
+ if (!graphQLResultHasError(mutation.result)) {
+ const cacheWrites: Cache.WriteOptions[] = [{
+ result: mutation.result.data,
+ dataId: 'ROOT_MUTATION',
+ query: mutation.document,
+ variables: mutation.variables,
+ }];
+
+ const { queryUpdatersById } = mutation;
+ if (queryUpdatersById) {
+ Object.keys(queryUpdatersById).forEach(id => {
+ const { query, updater } = queryUpdatersById[id];
+
+ // Read the current query result from the store.
+ const { result: currentQueryResult, complete } = cache.diff({
+ query: query.document,
+ variables: query.variables,
+ returnPartialData: true,
+ optimistic: false,
+ });
+
+ if (complete) {
+ // Run our reducer using the current query result and the mutation result.
+ const nextQueryResult = tryFunctionOrLogError(
+ () => updater(currentQueryResult, {
+ mutationResult: mutation.result,
+ queryName: getOperationName(query.document) || undefined,
+ queryVariables: query.variables,
+ }),
+ );
+
+ // Write the modified result back into the store if we got a new result.
+ if (nextQueryResult) {
+ cacheWrites.push({
+ result: nextQueryResult,
+ dataId: 'ROOT_QUERY',
+ query: query.document,
+ variables: query.variables,
+ });
+ }
+ }
+ });
+ }
+
+ cache.performTransaction(c => {
+ cacheWrites.forEach(write => c.write(write));
+
+ // If the mutation has some writes associated with it then we need to
+ // apply those writes to the store by running this reducer again with a
+ // write action.
+ const { update } = mutation;
+ if (update) {
+ tryFunctionOrLogError(() => update(c, mutation.result));
+ }
+ });
+ }
+}
diff --git a/src/core/index.ts b/src/core/index.ts
new file mode 100644
--- /dev/null
+++ b/src/core/index.ts
@@ -0,0 +1,77 @@
+/* Core */
+
+export {
+ ApolloClient,
+ ApolloClientOptions,
+ DefaultOptions
+} from '../ApolloClient';
+export {
+ ObservableQuery,
+ FetchMoreOptions,
+ UpdateQueryOptions,
+ ApolloCurrentQueryResult,
+} from '../core/ObservableQuery';
+export {
+ QueryBaseOptions,
+ QueryOptions,
+ WatchQueryOptions,
+ MutationOptions,
+ SubscriptionOptions,
+ FetchPolicy,
+ WatchQueryFetchPolicy,
+ ErrorPolicy,
+ FetchMoreQueryOptions,
+ SubscribeToMoreOptions,
+ MutationUpdaterFn,
+} from '../core/watchQueryOptions';
+export { NetworkStatus } from '../core/networkStatus';
+export * from '../core/types';
+export {
+ Resolver,
+ FragmentMatcher as LocalStateFragmentMatcher,
+} from '../core/LocalState';
+export { isApolloError, ApolloError } from '../errors/ApolloError';
+
+/* Cache */
+
+export * from '../cache';
+
+/* Link */
+
+export { empty } from '../link/core/empty';
+export { from } from '../link/core/from';
+export { split } from '../link/core/split';
+export { concat } from '../link/core/concat';
+export { execute } from '../link/core/execute';
+export { ApolloLink } from '../link/core/ApolloLink';
+export * from '../link/core/types';
+export {
+ parseAndCheckHttpResponse,
+ ServerParseError
+} from '../link/http/parseAndCheckHttpResponse';
+export {
+ serializeFetchParameter,
+ ClientParseError
+} from '../link/http/serializeFetchParameter';
+export {
+ HttpOptions,
+ fallbackHttpConfig,
+ selectHttpOptionsAndBody,
+ UriFunction
+} from '../link/http/selectHttpOptionsAndBody';
+export { checkFetcher } from '../link/http/checkFetcher';
+export { createSignalIfSupported } from '../link/http/createSignalIfSupported';
+export { selectURI } from '../link/http/selectURI';
+export { createHttpLink } from '../link/http/createHttpLink';
+export { HttpLink } from '../link/http/HttpLink';
+export { fromError } from '../link/utils/fromError';
+export { ServerError, throwServerError } from '../link/utils/throwServerError';
+
+/* Utilities */
+
+export { Observable } from '../utilities/observables/Observable';
+export { getMainDefinition } from '../utilities/graphql/getFromAST';
+
+/* Supporting */
+
+export { default as gql } from 'graphql-tag';
diff --git a/packages/apollo-client/src/core/networkStatus.ts b/src/core/networkStatus.ts
similarity index 100%
rename from packages/apollo-client/src/core/networkStatus.ts
rename to src/core/networkStatus.ts
diff --git a/packages/apollo-client/src/core/types.ts b/src/core/types.ts
similarity index 91%
rename from packages/apollo-client/src/core/types.ts
rename to src/core/types.ts
--- a/packages/apollo-client/src/core/types.ts
+++ b/src/core/types.ts
@@ -1,6 +1,6 @@
-import { FetchResult } from 'apollo-link';
import { DocumentNode, GraphQLError } from 'graphql';
+import { FetchResult } from '../link/core/types';
import { QueryStoreValue } from '../data/queries';
import { NetworkStatus } from './networkStatus';
import { Resolver } from './LocalState';
@@ -11,7 +11,7 @@ export type QueryListener = (
forceResolvers?: boolean,
) => void;
-export type OperationVariables = { [key: string]: any };
+export type OperationVariables = Record<string, any>;
export type PureQueryOptions = {
query: DocumentNode;
diff --git a/packages/apollo-client/src/core/watchQueryOptions.ts b/src/core/watchQueryOptions.ts
similarity index 96%
rename from packages/apollo-client/src/core/watchQueryOptions.ts
rename to src/core/watchQueryOptions.ts
--- a/packages/apollo-client/src/core/watchQueryOptions.ts
+++ b/src/core/watchQueryOptions.ts
@@ -1,9 +1,8 @@
import { DocumentNode, ExecutionResult } from 'graphql';
-import { FetchResult } from 'apollo-link';
-import { DataProxy } from 'apollo-cache';
+import { FetchResult } from '../link/core/types';
+import { DataProxy } from '../cache/core/types/DataProxy';
import { MutationQueryReducersMap } from './types';
-
import { PureQueryOptions, OperationVariables } from './types';
/**
@@ -104,6 +103,13 @@ export interface ModifiableWatchQueryOptions<TVariables = OperationVariables>
* be fully satisfied by the cache, instead of returning nothing.
*/
returnPartialData?: boolean;
+
+ /**
+ * If `true`, perform a query `refetch` if the query result is marked as
+ * being partial, and the returned data is reset to an empty Object by the
+ * Apollo Client `QueryManager` (due to a cache miss).
+ */
+ partialRefetch?: boolean;
}
/**
diff --git a/packages/apollo-client/src/data/mutations.ts b/src/data/mutations.ts
similarity index 100%
rename from packages/apollo-client/src/data/mutations.ts
rename to src/data/mutations.ts
diff --git a/packages/apollo-client/src/data/queries.ts b/src/data/queries.ts
similarity index 96%
rename from packages/apollo-client/src/data/queries.ts
rename to src/data/queries.ts
--- a/packages/apollo-client/src/data/queries.ts
+++ b/src/data/queries.ts
@@ -1,8 +1,9 @@
import { DocumentNode, GraphQLError, ExecutionResult } from 'graphql';
-import { isEqual } from 'apollo-utilities';
import { invariant } from 'ts-invariant';
+
+import { equal } from '@wry/equality';
import { NetworkStatus } from '../core/networkStatus';
-import { isNonEmptyArray } from '../util/arrays';
+import { isNonEmptyArray } from '../utilities/common/arrays';
export type QueryStoreValue = {
document: DocumentNode;
@@ -43,7 +44,7 @@ export class QueryStore {
invariant(
!previousQuery ||
previousQuery.document === query.document ||
- isEqual(previousQuery.document, query.document),
+ equal(previousQuery.document, query.document),
'Internal Error: may not update existing query string in store',
);
@@ -56,7 +57,7 @@ export class QueryStore {
previousQuery.networkStatus !== NetworkStatus.loading
// if the previous query was still loading, we don't want to remember it at all.
) {
- if (!isEqual(previousQuery.variables, query.variables)) {
+ if (!equal(previousQuery.variables, query.variables)) {
isSetVariables = true;
previousVariables = previousQuery.variables;
}
diff --git a/packages/apollo-client/src/errors/ApolloError.ts b/src/errors/ApolloError.ts
similarity index 97%
rename from packages/apollo-client/src/errors/ApolloError.ts
rename to src/errors/ApolloError.ts
--- a/packages/apollo-client/src/errors/ApolloError.ts
+++ b/src/errors/ApolloError.ts
@@ -1,5 +1,6 @@
import { GraphQLError } from 'graphql';
-import { isNonEmptyArray } from '../util/arrays';
+
+import { isNonEmptyArray } from '../utilities/common/arrays';
export function isApolloError(err: Error): err is ApolloError {
return err.hasOwnProperty('graphQLErrors');
diff --git a/src/index.ts b/src/index.ts
new file mode 100644
--- /dev/null
+++ b/src/index.ts
@@ -0,0 +1,2 @@
+export * from './core';
+export * from './react';
diff --git a/src/link/core/ApolloLink.ts b/src/link/core/ApolloLink.ts
new file mode 100644
--- /dev/null
+++ b/src/link/core/ApolloLink.ts
@@ -0,0 +1,150 @@
+import { InvariantError, invariant } from 'ts-invariant';
+
+import { Observable } from '../../utilities/observables/Observable';
+import {
+ NextLink,
+ Operation,
+ RequestHandler,
+ FetchResult,
+ GraphQLRequest
+} from './types';
+import { validateOperation } from '../utils/validateOperation';
+import { createOperation } from '../utils/createOperation';
+import { transformOperation } from '../utils/transformOperation';
+
+function passthrough(op: Operation, forward: NextLink) {
+ return forward ? forward(op) : Observable.of();
+}
+
+function toLink(handler: RequestHandler | ApolloLink) {
+ return typeof handler === 'function' ? new ApolloLink(handler) : handler;
+}
+
+function isTerminating(link: ApolloLink): boolean {
+ return link.request.length <= 1;
+}
+
+class LinkError extends Error {
+ public link: ApolloLink;
+ constructor(message?: string, link?: ApolloLink) {
+ super(message);
+ this.link = link;
+ }
+}
+
+export class ApolloLink {
+ public static empty(): ApolloLink {
+ return new ApolloLink(() => Observable.of());
+ }
+
+ public static from(links: ApolloLink[]): ApolloLink {
+ if (links.length === 0) return ApolloLink.empty();
+ return links.map(toLink).reduce((x, y) => x.concat(y)) as ApolloLink;
+ }
+
+ public static split(
+ test: (op: Operation) => boolean,
+ left: ApolloLink | RequestHandler,
+ right?: ApolloLink | RequestHandler,
+ ): ApolloLink {
+ const leftLink = toLink(left);
+ const rightLink = toLink(right || new ApolloLink(passthrough));
+
+ if (isTerminating(leftLink) && isTerminating(rightLink)) {
+ return new ApolloLink(operation => {
+ return test(operation)
+ ? leftLink.request(operation) || Observable.of()
+ : rightLink.request(operation) || Observable.of();
+ });
+ } else {
+ return new ApolloLink((operation, forward) => {
+ return test(operation)
+ ? leftLink.request(operation, forward) || Observable.of()
+ : rightLink.request(operation, forward) || Observable.of();
+ });
+ }
+ }
+
+ public static execute(
+ link: ApolloLink,
+ operation: GraphQLRequest,
+ ): Observable<FetchResult> {
+ return (
+ link.request(
+ createOperation(
+ operation.context,
+ transformOperation(validateOperation(operation)),
+ ),
+ ) || Observable.of()
+ );
+ }
+
+ public static concat(
+ first: ApolloLink | RequestHandler,
+ second: ApolloLink | RequestHandler,
+ ) {
+ const firstLink = toLink(first);
+ if (isTerminating(firstLink)) {
+ invariant.warn(
+ new LinkError(
+ `You are calling concat on a terminating link, which will have no effect`,
+ firstLink,
+ ),
+ );
+ return firstLink;
+ }
+ const nextLink = toLink(second);
+
+ if (isTerminating(nextLink)) {
+ return new ApolloLink(
+ operation =>
+ firstLink.request(
+ operation,
+ op => nextLink.request(op) || Observable.of(),
+ ) || Observable.of(),
+ );
+ } else {
+ return new ApolloLink((operation, forward) => {
+ return (
+ firstLink.request(operation, op => {
+ return nextLink.request(op, forward) || Observable.of();
+ }) || Observable.of()
+ );
+ });
+ }
+ }
+
+ constructor(request?: RequestHandler) {
+ if (request) this.request = request;
+ }
+
+ public split(
+ test: (op: Operation) => boolean,
+ left: ApolloLink | RequestHandler,
+ right?: ApolloLink | RequestHandler,
+ ): ApolloLink {
+ return this.concat(
+ ApolloLink.split(test, left, right || new ApolloLink(passthrough))
+ );
+ }
+
+ public concat(next: ApolloLink | RequestHandler): ApolloLink {
+ return ApolloLink.concat(this, next);
+ }
+
+ public request(
+ operation: Operation,
+ forward?: NextLink,
+ ): Observable<FetchResult> | null {
+ throw new InvariantError('request is not implemented');
+ }
+
+ protected onError(reason: any) {
+ throw reason;
+ }
+
+ public setOnError(fn: (reason: any) => any): this {
+ this.onError = fn;
+ return this;
+ }
+}
diff --git a/src/link/core/concat.ts b/src/link/core/concat.ts
new file mode 100644
--- /dev/null
+++ b/src/link/core/concat.ts
@@ -0,0 +1,3 @@
+import { ApolloLink } from './ApolloLink';
+
+export const concat = ApolloLink.concat;
diff --git a/src/link/core/empty.ts b/src/link/core/empty.ts
new file mode 100644
--- /dev/null
+++ b/src/link/core/empty.ts
@@ -0,0 +1,3 @@
+import { ApolloLink } from './ApolloLink';
+
+export const empty = ApolloLink.empty;
diff --git a/src/link/core/execute.ts b/src/link/core/execute.ts
new file mode 100644
--- /dev/null
+++ b/src/link/core/execute.ts
@@ -0,0 +1,3 @@
+import { ApolloLink } from './ApolloLink';
+
+export const execute = ApolloLink.execute;
diff --git a/src/link/core/from.ts b/src/link/core/from.ts
new file mode 100644
--- /dev/null
+++ b/src/link/core/from.ts
@@ -0,0 +1,3 @@
+import { ApolloLink } from './ApolloLink';
+
+export const from = ApolloLink.from;
diff --git a/src/link/core/split.ts b/src/link/core/split.ts
new file mode 100644
--- /dev/null
+++ b/src/link/core/split.ts
@@ -0,0 +1,3 @@
+import { ApolloLink } from './ApolloLink';
+
+export const split = ApolloLink.split;
diff --git a/src/link/core/types.ts b/src/link/core/types.ts
new file mode 100644
--- /dev/null
+++ b/src/link/core/types.ts
@@ -0,0 +1,38 @@
+import { DocumentNode } from 'graphql/language/ast';
+import { ExecutionResult } from 'graphql/execution/execute';
+export { ExecutionResult, DocumentNode };
+
+import { Observable } from '../../utilities/observables/Observable';
+
+export interface GraphQLRequest {
+ query: DocumentNode;
+ variables?: Record<string, any>;
+ operationName?: string;
+ context?: Record<string, any>;
+ extensions?: Record<string, any>;
+}
+
+export interface Operation {
+ query: DocumentNode;
+ variables: Record<string, any>;
+ operationName: string;
+ extensions: Record<string, any>;
+ setContext: (context: Record<string, any>) => Record<string, any>;
+ getContext: () => Record<string, any>;
+}
+
+export type FetchResult<
+ TData = { [key: string]: any },
+ C = Record<string, any>,
+ E = Record<string, any>
+> = ExecutionResult<TData> & {
+ extensions?: E;
+ context?: C;
+};
+
+export type NextLink = (operation: Operation) => Observable<FetchResult>;
+
+export type RequestHandler = (
+ operation: Operation,
+ forward: NextLink,
+) => Observable<FetchResult> | null;
diff --git a/src/link/http/HttpLink.ts b/src/link/http/HttpLink.ts
new file mode 100644
--- /dev/null
+++ b/src/link/http/HttpLink.ts
@@ -0,0 +1,11 @@
+import { ApolloLink } from '../core/ApolloLink';
+import { RequestHandler } from '../core/types';
+import { HttpOptions } from './selectHttpOptionsAndBody';
+import { createHttpLink } from './createHttpLink';
+
+export class HttpLink extends ApolloLink {
+ public requester: RequestHandler;
+ constructor(public options: HttpOptions = {}) {
+ super(createHttpLink(options).request);
+ }
+}
diff --git a/src/link/http/checkFetcher.ts b/src/link/http/checkFetcher.ts
new file mode 100644
--- /dev/null
+++ b/src/link/http/checkFetcher.ts
@@ -0,0 +1,20 @@
+import { InvariantError } from 'ts-invariant';
+
+export const checkFetcher = (fetcher: WindowOrWorkerGlobalScope['fetch']) => {
+ if (!fetcher && typeof fetch === 'undefined') {
+ let library: string = 'unfetch';
+ if (typeof window === 'undefined') library = 'node-fetch';
+ throw new InvariantError(
+ '"fetch" has not been found globally and no fetcher has been ' +
+ 'configured. To fix this, install a fetch package ' +
+ `(like https://www.npmjs.com/package/${library}), instantiate the ` +
+ 'fetcher, and pass it into your `HttpLink` constructor. For example:' +
+ '\n\n' +
+ `import fetch from '${library}';\n` +
+ "import { ApolloClient, HttpLink } from '@apollo/client';\n" +
+ 'const client = new ApolloClient({\n' +
+ " link: new HttpLink({ uri: '/graphq', fetch })\n" +
+ '});'
+ );
+ }
+};
diff --git a/src/link/http/createHttpLink.ts b/src/link/http/createHttpLink.ts
new file mode 100644
--- /dev/null
+++ b/src/link/http/createHttpLink.ts
@@ -0,0 +1,181 @@
+import { DefinitionNode } from 'graphql';
+
+import { Observable } from '../../utilities/observables/Observable';
+import { serializeFetchParameter } from './serializeFetchParameter';
+import { selectURI } from './selectURI';
+import { parseAndCheckHttpResponse } from './parseAndCheckHttpResponse';
+import { checkFetcher } from './checkFetcher';
+import {
+ selectHttpOptionsAndBody,
+ fallbackHttpConfig,
+ HttpOptions
+} from './selectHttpOptionsAndBody';
+import { createSignalIfSupported } from './createSignalIfSupported';
+import { rewriteURIForGET } from './rewriteURIForGET';
+import { ApolloLink } from '../core/ApolloLink';
+import { fromError } from '../utils/fromError';
+
+export const createHttpLink = (linkOptions: HttpOptions = {}) => {
+ let {
+ uri = '/graphql',
+ // use default global fetch if nothing passed in
+ fetch: fetcher,
+ includeExtensions,
+ useGETForQueries,
+ ...requestOptions
+ } = linkOptions;
+
+ // dev warnings to ensure fetch is present
+ checkFetcher(fetcher);
+
+ //fetcher is set here rather than the destructuring to ensure fetch is
+ //declared before referencing it. Reference in the destructuring would cause
+ //a ReferenceError
+ if (!fetcher) {
+ fetcher = fetch;
+ }
+
+ const linkConfig = {
+ http: { includeExtensions },
+ options: requestOptions.fetchOptions,
+ credentials: requestOptions.credentials,
+ headers: requestOptions.headers,
+ };
+
+ return new ApolloLink(operation => {
+ let chosenURI = selectURI(operation, uri);
+
+ const context = operation.getContext();
+
+ // `apollographql-client-*` headers are automatically set if a
+ // `clientAwareness` object is found in the context. These headers are
+ // set first, followed by the rest of the headers pulled from
+ // `context.headers`. If desired, `apollographql-client-*` headers set by
+ // the `clientAwareness` object can be overridden by
+ // `apollographql-client-*` headers set in `context.headers`.
+ const clientAwarenessHeaders: {
+ 'apollographql-client-name'?: string;
+ 'apollographql-client-version'?: string;
+ } = {};
+
+ if (context.clientAwareness) {
+ const { name, version } = context.clientAwareness;
+ if (name) {
+ clientAwarenessHeaders['apollographql-client-name'] = name;
+ }
+ if (version) {
+ clientAwarenessHeaders['apollographql-client-version'] = version;
+ }
+ }
+
+ const contextHeaders = { ...clientAwarenessHeaders, ...context.headers };
+
+ const contextConfig = {
+ http: context.http,
+ options: context.fetchOptions,
+ credentials: context.credentials,
+ headers: contextHeaders,
+ };
+
+ //uses fallback, link, and then context to build options
+ const { options, body } = selectHttpOptionsAndBody(
+ operation,
+ fallbackHttpConfig,
+ linkConfig,
+ contextConfig,
+ );
+
+ let controller: any;
+ if (!(options as any).signal) {
+ const { controller: _controller, signal } = createSignalIfSupported();
+ controller = _controller;
+ if (controller) (options as any).signal = signal;
+ }
+
+ // If requested, set method to GET if there are no mutations.
+ const definitionIsMutation = (d: DefinitionNode) => {
+ return d.kind === 'OperationDefinition' && d.operation === 'mutation';
+ };
+ if (
+ useGETForQueries &&
+ !operation.query.definitions.some(definitionIsMutation)
+ ) {
+ options.method = 'GET';
+ }
+
+ if (options.method === 'GET') {
+ const { newURI, parseError } = rewriteURIForGET(chosenURI, body);
+ if (parseError) {
+ return fromError(parseError);
+ }
+ chosenURI = newURI;
+ } else {
+ try {
+ (options as any).body = serializeFetchParameter(body, 'Payload');
+ } catch (parseError) {
+ return fromError(parseError);
+ }
+ }
+
+ return new Observable(observer => {
+ fetcher(chosenURI, options)
+ .then(response => {
+ operation.setContext({ response });
+ return response;
+ })
+ .then(parseAndCheckHttpResponse(operation))
+ .then(result => {
+ // we have data and can send it to back up the link chain
+ observer.next(result);
+ observer.complete();
+ return result;
+ })
+ .catch(err => {
+ // fetch was cancelled so it's already been cleaned up in the unsubscribe
+ if (err.name === 'AbortError') return;
+ // if it is a network error, BUT there is graphql result info
+ // fire the next observer before calling error
+ // this gives apollo-client (and react-apollo) the `graphqlErrors` and `networErrors`
+ // to pass to UI
+ // this should only happen if we *also* have data as part of the response key per
+ // the spec
+ if (err.result && err.result.errors && err.result.data) {
+ // if we don't call next, the UI can only show networkError because AC didn't
+ // get any graphqlErrors
+ // this is graphql execution result info (i.e errors and possibly data)
+ // this is because there is no formal spec how errors should translate to
+ // http status codes. So an auth error (401) could have both data
+ // from a public field, errors from a private field, and a status of 401
+ // {
+ // user { // this will have errors
+ // firstName
+ // }
+ // products { // this is public so will have data
+ // cost
+ // }
+ // }
+ //
+ // the result of above *could* look like this:
+ // {
+ // data: { products: [{ cost: "$10" }] },
+ // errors: [{
+ // message: 'your session has timed out',
+ // path: []
+ // }]
+ // }
+ // status code of above would be a 401
+ // in the UI you want to show data where you can, errors as data where you can
+ // and use correct http status codes
+ observer.next(err.result);
+ }
+ observer.error(err);
+ });
+
+ return () => {
+ // XXX support canceling this request
+ // https://developers.google.com/web/updates/2017/09/abortable-fetch
+ if (controller) controller.abort();
+ };
+ });
+ });
+};
diff --git a/src/link/http/createSignalIfSupported.ts b/src/link/http/createSignalIfSupported.ts
new file mode 100644
--- /dev/null
+++ b/src/link/http/createSignalIfSupported.ts
@@ -0,0 +1,8 @@
+export const createSignalIfSupported = () => {
+ if (typeof AbortController === 'undefined')
+ return { controller: false, signal: false };
+
+ const controller = new AbortController();
+ const signal = controller.signal;
+ return { controller, signal };
+};
diff --git a/src/link/http/parseAndCheckHttpResponse.ts b/src/link/http/parseAndCheckHttpResponse.ts
new file mode 100644
--- /dev/null
+++ b/src/link/http/parseAndCheckHttpResponse.ts
@@ -0,0 +1,57 @@
+import { Operation } from '../core/types';
+import { throwServerError } from '../utils/throwServerError';
+
+const { hasOwnProperty } = Object.prototype;
+
+export type ServerParseError = Error & {
+ response: Response;
+ statusCode: number;
+ bodyText: string;
+};
+
+export function parseAndCheckHttpResponse(
+ operations: Operation | Operation[],
+) {
+ return (response: Response) => response
+ .text()
+ .then(bodyText => {
+ try {
+ return JSON.parse(bodyText);
+ } catch (err) {
+ const parseError = err as ServerParseError;
+ parseError.name = 'ServerParseError';
+ parseError.response = response;
+ parseError.statusCode = response.status;
+ parseError.bodyText = bodyText;
+ throw parseError;
+ }
+ })
+ .then((result: any) => {
+ if (response.status >= 300) {
+ // Network error
+ throwServerError(
+ response,
+ result,
+ `Response not successful: Received status code ${response.status}`,
+ );
+ }
+
+ if (
+ !Array.isArray(result) &&
+ !hasOwnProperty.call(result, 'data') &&
+ !hasOwnProperty.call(result, 'errors')
+ ) {
+ // Data error
+ throwServerError(
+ response,
+ result,
+ `Server response was missing for query '${
+ Array.isArray(operations)
+ ? operations.map(op => op.operationName)
+ : operations.operationName
+ }'.`,
+ );
+ }
+ return result;
+ });
+}
diff --git a/src/link/http/rewriteURIForGET.ts b/src/link/http/rewriteURIForGET.ts
new file mode 100644
--- /dev/null
+++ b/src/link/http/rewriteURIForGET.ts
@@ -0,0 +1,62 @@
+import { serializeFetchParameter } from './serializeFetchParameter';
+import { Body } from './selectHttpOptionsAndBody';
+
+// For GET operations, returns the given URI rewritten with parameters, or a
+// parse error.
+export function rewriteURIForGET(chosenURI: string, body: Body) {
+ // Implement the standard HTTP GET serialization, plus 'extensions'. Note
+ // the extra level of JSON serialization!
+ const queryParams: string[] = [];
+ const addQueryParam = (key: string, value: string) => {
+ queryParams.push(`${key}=${encodeURIComponent(value)}`);
+ };
+
+ if ('query' in body) {
+ addQueryParam('query', body.query);
+ }
+ if (body.operationName) {
+ addQueryParam('operationName', body.operationName);
+ }
+ if (body.variables) {
+ let serializedVariables;
+ try {
+ serializedVariables = serializeFetchParameter(
+ body.variables,
+ 'Variables map',
+ );
+ } catch (parseError) {
+ return { parseError };
+ }
+ addQueryParam('variables', serializedVariables);
+ }
+ if (body.extensions) {
+ let serializedExtensions;
+ try {
+ serializedExtensions = serializeFetchParameter(
+ body.extensions,
+ 'Extensions map',
+ );
+ } catch (parseError) {
+ return { parseError };
+ }
+ addQueryParam('extensions', serializedExtensions);
+ }
+
+ // Reconstruct the URI with added query params.
+ // XXX This assumes that the URI is well-formed and that it doesn't
+ // already contain any of these query params. We could instead use the
+ // URL API and take a polyfill (whatwg-url@6) for older browsers that
+ // don't support URLSearchParams. Note that some browsers (and
+ // versions of whatwg-url) support URL but not URLSearchParams!
+ let fragment = '',
+ preFragment = chosenURI;
+ const fragmentStart = chosenURI.indexOf('#');
+ if (fragmentStart !== -1) {
+ fragment = chosenURI.substr(fragmentStart);
+ preFragment = chosenURI.substr(0, fragmentStart);
+ }
+ const queryParamsPrefix = preFragment.indexOf('?') === -1 ? '?' : '&';
+ const newURI =
+ preFragment + queryParamsPrefix + queryParams.join('&') + fragment;
+ return { newURI };
+}
diff --git a/src/link/http/selectHttpOptionsAndBody.ts b/src/link/http/selectHttpOptionsAndBody.ts
new file mode 100644
--- /dev/null
+++ b/src/link/http/selectHttpOptionsAndBody.ts
@@ -0,0 +1,138 @@
+import { print } from 'graphql/language/printer';
+
+import { Operation } from '../core/types';
+
+export interface UriFunction {
+ (operation: Operation): string;
+}
+
+export interface Body {
+ query?: string;
+ operationName?: string;
+ variables?: Record<string, any>;
+ extensions?: Record<string, any>;
+}
+
+export interface HttpOptions {
+ /**
+ * The URI to use when fetching operations.
+ *
+ * Defaults to '/graphql'.
+ */
+ uri?: string | UriFunction;
+
+ /**
+ * Passes the extensions field to your graphql server.
+ *
+ * Defaults to false.
+ */
+ includeExtensions?: boolean;
+
+ /**
+ * A `fetch`-compatible API to use when making requests.
+ */
+ fetch?: WindowOrWorkerGlobalScope['fetch'];
+
+ /**
+ * An object representing values to be sent as headers on the request.
+ */
+ headers?: any;
+
+ /**
+ * The credentials policy you want to use for the fetch call.
+ */
+ credentials?: string;
+
+ /**
+ * Any overrides of the fetch options argument to pass to the fetch call.
+ */
+ fetchOptions?: any;
+
+ /**
+ * If set to true, use the HTTP GET method for query operations. Mutations
+ * will still use the method specified in fetchOptions.method (which defaults
+ * to POST).
+ */
+ useGETForQueries?: boolean;
+}
+
+export interface HttpQueryOptions {
+ includeQuery?: boolean;
+ includeExtensions?: boolean;
+}
+
+export interface HttpConfig {
+ http?: HttpQueryOptions;
+ options?: any;
+ headers?: any;
+ credentials?: any;
+}
+
+const defaultHttpOptions: HttpQueryOptions = {
+ includeQuery: true,
+ includeExtensions: false,
+};
+
+const defaultHeaders = {
+ // headers are case insensitive (https://stackoverflow.com/a/5259004)
+ accept: '*/*',
+ 'content-type': 'application/json',
+};
+
+const defaultOptions = {
+ method: 'POST',
+};
+
+export const fallbackHttpConfig = {
+ http: defaultHttpOptions,
+ headers: defaultHeaders,
+ options: defaultOptions,
+};
+
+export const selectHttpOptionsAndBody = (
+ operation: Operation,
+ fallbackConfig: HttpConfig,
+ ...configs: Array<HttpConfig>
+) => {
+ let options: HttpConfig & Record<string, any> = {
+ ...fallbackConfig.options,
+ headers: fallbackConfig.headers,
+ credentials: fallbackConfig.credentials,
+ };
+ let http: HttpQueryOptions = fallbackConfig.http;
+
+ /*
+ * use the rest of the configs to populate the options
+ * configs later in the list will overwrite earlier fields
+ */
+ configs.forEach(config => {
+ options = {
+ ...options,
+ ...config.options,
+ headers: {
+ ...options.headers,
+ ...config.headers,
+ },
+ };
+ if (config.credentials) options.credentials = config.credentials;
+
+ http = {
+ ...http,
+ ...config.http,
+ };
+ });
+
+ //The body depends on the http options
+ const { operationName, extensions, variables, query } = operation;
+ const body: Body = { operationName, variables };
+
+ if (http.includeExtensions) (body as any).extensions = extensions;
+
+ // not sending the query (i.e persisted queries)
+ if (http.includeQuery) (body as any).query = print(query);
+
+ return {
+ options,
+ body,
+ };
+};
diff --git a/src/link/http/selectURI.ts b/src/link/http/selectURI.ts
new file mode 100644
--- /dev/null
+++ b/src/link/http/selectURI.ts
@@ -0,0 +1,17 @@
+import { Operation } from '../core/types';
+
+export const selectURI = (
+ operation: Operation,
+ fallbackURI?: string | ((operation: Operation) => string),
+) => {
+ const context = operation.getContext();
+ const contextURI = context.uri;
+
+ if (contextURI) {
+ return contextURI;
+ } else if (typeof fallbackURI === 'function') {
+ return fallbackURI(operation);
+ } else {
+ return (fallbackURI as string) || '/graphql';
+ }
+};
diff --git a/src/link/http/serializeFetchParameter.ts b/src/link/http/serializeFetchParameter.ts
new file mode 100644
--- /dev/null
+++ b/src/link/http/serializeFetchParameter.ts
@@ -0,0 +1,19 @@
+import { InvariantError } from 'ts-invariant';
+
+export type ClientParseError = InvariantError & {
+ parseError: Error;
+};
+
+export const serializeFetchParameter = (p: any, label: string) => {
+ let serialized;
+ try {
+ serialized = JSON.stringify(p);
+ } catch (e) {
+ const parseError = new InvariantError(
+ `Network request failed. ${label} is not serializable: ${e.message}`,
+ ) as ClientParseError;
+ parseError.parseError = e;
+ throw parseError;
+ }
+ return serialized;
+};
diff --git a/src/link/utils/createOperation.ts b/src/link/utils/createOperation.ts
new file mode 100644
--- /dev/null
+++ b/src/link/utils/createOperation.ts
@@ -0,0 +1,28 @@
+import { GraphQLRequest, Operation } from '../core/types';
+
+export function createOperation(
+ starting: any,
+ operation: GraphQLRequest,
+): Operation {
+ let context = { ...starting };
+ const setContext = (next: any) => {
+ if (typeof next === 'function') {
+ context = { ...context, ...next(context) };
+ } else {
+ context = { ...context, ...next };
+ }
+ };
+ const getContext = () => ({ ...context });
+
+ Object.defineProperty(operation, 'setContext', {
+ enumerable: false,
+ value: setContext,
+ });
+
+ Object.defineProperty(operation, 'getContext', {
+ enumerable: false,
+ value: getContext,
+ });
+
+ return operation as Operation;
+}
diff --git a/src/link/utils/fromError.ts b/src/link/utils/fromError.ts
new file mode 100644
--- /dev/null
+++ b/src/link/utils/fromError.ts
@@ -0,0 +1,7 @@
+import { Observable } from '../../utilities/observables/Observable';
+
+export function fromError<T>(errorValue: any): Observable<T> {
+ return new Observable<T>(observer => {
+ observer.error(errorValue);
+ });
+}
diff --git a/src/link/utils/fromPromise.ts b/src/link/utils/fromPromise.ts
new file mode 100644
--- /dev/null
+++ b/src/link/utils/fromPromise.ts
@@ -0,0 +1,12 @@
+import { Observable } from '../../utilities/observables/Observable';
+
+export function fromPromise<T>(promise: Promise<T>): Observable<T> {
+ return new Observable<T>(observer => {
+ promise
+ .then((value: T) => {
+ observer.next(value);
+ observer.complete();
+ })
+ .catch(observer.error.bind(observer));
+ });
+}
diff --git a/src/link/utils/throwServerError.ts b/src/link/utils/throwServerError.ts
new file mode 100644
--- /dev/null
+++ b/src/link/utils/throwServerError.ts
@@ -0,0 +1,18 @@
+export type ServerError = Error & {
+ response: Response;
+ result: Record<string, any>;
+ statusCode: number;
+};
+
+export const throwServerError = (
+ response: Response,
+ result: any,
+ message: string
+) => {
+ const error = new Error(message) as ServerError;
+ error.name = 'ServerError';
+ error.response = response;
+ error.statusCode = response.status;
+ error.result = result;
+ throw error;
+};
diff --git a/src/link/utils/toPromise.ts b/src/link/utils/toPromise.ts
new file mode 100644
--- /dev/null
+++ b/src/link/utils/toPromise.ts
@@ -0,0 +1,22 @@
+import { invariant } from 'ts-invariant';
+
+import { Observable } from '../../utilities/observables/Observable';
+
+export function toPromise<R>(observable: Observable<R>): Promise<R> {
+ let completed = false;
+ return new Promise<R>((resolve, reject) => {
+ observable.subscribe({
+ next: data => {
+ if (completed) {
+ invariant.warn(
+ `Promise Wrapper does not support multiple results from Observable`,
+ );
+ } else {
+ completed = true;
+ resolve(data);
+ }
+ },
+ error: reject,
+ });
+ });
+}
diff --git a/src/link/utils/transformOperation.ts b/src/link/utils/transformOperation.ts
new file mode 100644
--- /dev/null
+++ b/src/link/utils/transformOperation.ts
@@ -0,0 +1,21 @@
+import { GraphQLRequest, Operation } from '../core/types';
+import { getOperationName } from '../../utilities/graphql/getFromAST';
+
+export function transformOperation(operation: GraphQLRequest): GraphQLRequest {
+ const transformedOperation: GraphQLRequest = {
+ variables: operation.variables || {},
+ extensions: operation.extensions || {},
+ operationName: operation.operationName,
+ query: operation.query,
+ };
+
+ // Best guess at an operation name
+ if (!transformedOperation.operationName) {
+ transformedOperation.operationName =
+ typeof transformedOperation.query !== 'string'
+ ? getOperationName(transformedOperation.query)
+ : '';
+ }
+
+ return transformedOperation as Operation;
+}
diff --git a/src/link/utils/validateOperation.ts b/src/link/utils/validateOperation.ts
new file mode 100644
--- /dev/null
+++ b/src/link/utils/validateOperation.ts
@@ -0,0 +1,20 @@
+import { InvariantError } from 'ts-invariant';
+
+import { GraphQLRequest } from '../core/types';
+
+export function validateOperation(operation: GraphQLRequest): GraphQLRequest {
+ const OPERATION_FIELDS = [
+ 'query',
+ 'operationName',
+ 'variables',
+ 'extensions',
+ 'context',
+ ];
+ for (let key of Object.keys(operation)) {
+ if (OPERATION_FIELDS.indexOf(key) < 0) {
+ throw new InvariantError(`illegal argument: ${key}`);
+ }
+ }
+
+ return operation;
+}
diff --git a/src/react/context/ApolloContext.ts b/src/react/context/ApolloContext.ts
new file mode 100644
--- /dev/null
+++ b/src/react/context/ApolloContext.ts
@@ -0,0 +1,34 @@
+import { ApolloClient } from '../../ApolloClient';
+import { requireReactLazily } from '../react';
+
+export interface ApolloContextValue {
+ client?: ApolloClient<object>;
+ renderPromises?: Record<any, any>;
+}
+
+// To make sure Apollo Client doesn't create more than one React context
+// (which can lead to problems like having an Apollo Client instance added
+// in one context, then attempting to retrieve it from another different
+// context), a single Apollo context is created and tracked in global state.
+// Since the created context is React specific, we've decided to attach it to
+// the `React` object for sharing.
+
+const contextSymbol = Symbol.for('__APOLLO_CONTEXT__');
+
+export function resetApolloContext() {
+ const React = requireReactLazily();
+ Object.defineProperty(React, contextSymbol, {
+ value: React.createContext<ApolloContextValue>({}),
+ enumerable: false,
+ configurable: true,
+ writable: false,
+ });
+}
+
+export function getApolloContext() {
+ const React = requireReactLazily();
+ if (!(React as any)[contextSymbol]) {
+ resetApolloContext();
+ }
+ return (React as any)[contextSymbol] as React.Context<ApolloContextValue>;
+}
diff --git a/src/react/data/MutationData.ts b/src/react/data/MutationData.ts
new file mode 100644
--- /dev/null
+++ b/src/react/data/MutationData.ts
@@ -0,0 +1,184 @@
+import { equal } from '@wry/equality';
+
+import { DocumentType } from '../parser/parser';
+import { ApolloError } from '../../errors/ApolloError';
+import {
+ MutationDataOptions,
+ MutationTuple,
+ MutationFunctionOptions,
+ MutationResult
+} from '../types/types';
+import { OperationData } from './OperationData';
+import { OperationVariables } from '../../core/types';
+import { FetchResult } from '../../link/core/types';
+
+export class MutationData<
+ TData = any,
+ TVariables = OperationVariables
+> extends OperationData {
+ private mostRecentMutationId: number;
+ private result: MutationResult<TData>;
+ private previousResult?: MutationResult<TData>;
+ private setResult: (result: MutationResult<TData>) => any;
+
+ constructor({
+ options,
+ context,
+ result,
+ setResult
+ }: {
+ options: MutationDataOptions<TData, TVariables>;
+ context: any;
+ result: MutationResult<TData>;
+ setResult: (result: MutationResult<TData>) => any;
+ }) {
+ super(options, context);
+ this.verifyDocumentType(options.mutation, DocumentType.Mutation);
+ this.result = result;
+ this.setResult = setResult;
+ this.mostRecentMutationId = 0;
+ }
+
+ public execute(result: MutationResult<TData>) {
+ this.isMounted = true;
+ this.verifyDocumentType(this.getOptions().mutation, DocumentType.Mutation);
+ result.client = this.refreshClient().client;
+ return [this.runMutation, result] as MutationTuple<TData, TVariables>;
+ }
+
+ public afterExecute() {
+ this.isMounted = true;
+ return this.unmount.bind(this);
+ }
+
+ public cleanup() {
+ // No cleanup required.
+ }
+
+ private runMutation = (
+ mutationFunctionOptions: MutationFunctionOptions<
+ TData,
+ TVariables
+ > = {} as MutationFunctionOptions<TData, TVariables>
+ ) => {
+ this.onMutationStart();
+ const mutationId = this.generateNewMutationId();
+
+ return this.mutate(mutationFunctionOptions)
+ .then((response: FetchResult<TData>) => {
+ this.onMutationCompleted(response, mutationId);
+ return response;
+ })
+ .catch((error: ApolloError) => {
+ this.onMutationError(error, mutationId);
+ if (!this.getOptions().onError) throw error;
+ });
+ };
+
+ private mutate(
+ mutationFunctionOptions: MutationFunctionOptions<TData, TVariables>
+ ) {
+ const {
+ mutation,
+ variables,
+ optimisticResponse,
+ update,
+ context: mutationContext = {},
+ awaitRefetchQueries = false,
+ fetchPolicy
+ } = this.getOptions();
+ const mutateOptions = { ...mutationFunctionOptions };
+
+ const mutateVariables = Object.assign(
+ {},
+ variables,
+ mutateOptions.variables
+ );
+ delete mutateOptions.variables;
+
+ return this.refreshClient().client.mutate({
+ mutation,
+ optimisticResponse,
+ refetchQueries:
+ mutateOptions.refetchQueries || this.getOptions().refetchQueries,
+ awaitRefetchQueries,
+ update,
+ context: mutationContext,
+ fetchPolicy,
+ variables: mutateVariables,
+ ...mutateOptions
+ });
+ }
+
+ private onMutationStart() {
+ if (!this.result.loading && !this.getOptions().ignoreResults) {
+ this.updateResult({
+ loading: true,
+ error: undefined,
+ data: undefined,
+ called: true
+ });
+ }
+ }
+
+ private onMutationCompleted(
+ response: FetchResult<TData>,
+ mutationId: number
+ ) {
+ const { onCompleted, ignoreResults } = this.getOptions();
+
+ const { data, errors } = response;
+ const error =
+ errors && errors.length > 0
+ ? new ApolloError({ graphQLErrors: errors })
+ : undefined;
+
+ const callOncomplete = () =>
+ onCompleted ? onCompleted(data as TData) : null;
+
+ if (this.isMostRecentMutation(mutationId) && !ignoreResults) {
+ this.updateResult({
+ called: true,
+ loading: false,
+ data,
+ error
+ });
+ }
+ callOncomplete();
+ }
+
+ private onMutationError(error: ApolloError, mutationId: number) {
+ const { onError } = this.getOptions();
+
+ if (this.isMostRecentMutation(mutationId)) {
+ this.updateResult({
+ loading: false,
+ error,
+ data: undefined,
+ called: true
+ });
+ }
+
+ if (onError) {
+ onError(error);
+ }
+ }
+
+ private generateNewMutationId(): number {
+ return ++this.mostRecentMutationId;
+ }
+
+ private isMostRecentMutation(mutationId: number) {
+ return this.mostRecentMutationId === mutationId;
+ }
+
+ private updateResult(result: MutationResult<TData>) {
+ if (
+ this.isMounted &&
+ (!this.previousResult || !equal(this.previousResult, result))
+ ) {
+ this.setResult(result);
+ this.previousResult = result;
+ }
+ }
+}
diff --git a/src/react/data/OperationData.ts b/src/react/data/OperationData.ts
new file mode 100644
--- /dev/null
+++ b/src/react/data/OperationData.ts
@@ -0,0 +1,80 @@
+import { DocumentNode } from 'graphql';
+import { equal } from '@wry/equality';
+import { invariant } from 'ts-invariant';
+
+import { ApolloClient } from '../../ApolloClient';
+import { DocumentType, parser, operationName } from '../parser/parser';
+import { CommonOptions } from '../types/types';
+
+export abstract class OperationData<TOptions = any> {
+ public isMounted: boolean = false;
+ public previousOptions: CommonOptions<TOptions> = {} as CommonOptions<
+ TOptions
+ >;
+ public context: any = {};
+ public client: ApolloClient<object> | undefined;
+
+ private options: CommonOptions<TOptions> = {} as CommonOptions<TOptions>;
+
+ constructor(options?: CommonOptions<TOptions>, context?: any) {
+ this.options = options || ({} as CommonOptions<TOptions>);
+ this.context = context || {};
+ }
+
+ public getOptions(): CommonOptions<TOptions> {
+ return this.options;
+ }
+
+ public setOptions(
+ newOptions: CommonOptions<TOptions>,
+ storePrevious: boolean = false
+ ) {
+ if (storePrevious && !equal(this.options, newOptions)) {
+ this.previousOptions = this.options;
+ }
+ this.options = newOptions;
+ }
+
+ public abstract execute(...args: any): any;
+ public abstract afterExecute(...args: any): void | (() => void);
+ public abstract cleanup(): void;
+
+ protected unmount() {
+ this.isMounted = false;
+ }
+
+ protected refreshClient() {
+ const client =
+ (this.options && this.options.client) ||
+ (this.context && this.context.client);
+
+ invariant(
+ !!client,
+ 'Could not find "client" in the context or passed in as an option. ' +
+ 'Wrap the root component in an <ApolloProvider>, or pass an ' +
+ 'ApolloClient instance in via options.'
+ );
+
+ let isNew = false;
+ if (client !== this.client) {
+ isNew = true;
+ this.client = client;
+ this.cleanup();
+ }
+ return {
+ client: this.client as ApolloClient<object>,
+ isNew
+ };
+ }
+
+ protected verifyDocumentType(document: DocumentNode, type: DocumentType) {
+ const operation = parser(document);
+ const requiredOperationName = operationName(type);
+ const usedOperationName = operationName(operation.type);
+ invariant(
+ operation.type === type,
+ `Running a ${requiredOperationName} requires a graphql ` +
+ `${requiredOperationName}, but a ${usedOperationName} was used instead.`
+ );
+ }
+}
diff --git a/src/react/data/QueryData.ts b/src/react/data/QueryData.ts
new file mode 100644
--- /dev/null
+++ b/src/react/data/QueryData.ts
@@ -0,0 +1,507 @@
+import { equal } from '@wry/equality';
+
+import { ApolloError } from '../../errors/ApolloError';
+import { NetworkStatus } from '../../core/networkStatus';
+import {
+ FetchMoreQueryOptions,
+ SubscribeToMoreOptions
+} from '../../core/watchQueryOptions';
+import {
+ FetchMoreOptions,
+ UpdateQueryOptions
+} from '../../core/ObservableQuery';
+import { DocumentType } from '../parser/parser';
+import {
+ QueryResult,
+ QueryPreviousData,
+ QueryDataOptions,
+ QueryCurrentObservable,
+ QueryTuple,
+ QueryLazyOptions,
+ ObservableQueryFields
+} from '../types/types';
+import { OperationData } from './OperationData';
+
+export class QueryData<TData, TVariables> extends OperationData {
+ private previousData: QueryPreviousData<TData, TVariables> = {};
+ private currentObservable: QueryCurrentObservable<TData, TVariables> = {};
+ private forceUpdate: any;
+
+ private runLazy: boolean = false;
+ private lazyOptions?: QueryLazyOptions<TVariables>;
+
+ constructor({
+ options,
+ context,
+ forceUpdate
+ }: {
+ options: QueryDataOptions<TData, TVariables>;
+ context: any;
+ forceUpdate: any;
+ }) {
+ super(options, context);
+ this.forceUpdate = forceUpdate;
+ }
+
+ public execute(): QueryResult<TData, TVariables> {
+ this.refreshClient();
+
+ const { skip, query } = this.getOptions();
+ if (skip || query !== this.previousData.query) {
+ this.removeQuerySubscription();
+ this.previousData.query = query;
+ }
+
+ this.updateObservableQuery();
+
+ if (this.isMounted) this.startQuerySubscription();
+
+ return this.getExecuteSsrResult() || this.getExecuteResult();
+ }
+
+ public executeLazy(): QueryTuple<TData, TVariables> {
+ return !this.runLazy
+ ? [
+ this.runLazyQuery,
+ {
+ loading: false,
+ networkStatus: NetworkStatus.ready,
+ called: false,
+ data: undefined
+ } as QueryResult<TData, TVariables>
+ ]
+ : [this.runLazyQuery, this.execute()];
+ }
+
+ // For server-side rendering
+ public fetchData(): Promise<void> | boolean {
+ const options = this.getOptions();
+ if (options.skip || options.ssr === false) return false;
+ return new Promise(resolve => this.startQuerySubscription(resolve));
+ }
+
+ public afterExecute({
+ queryResult,
+ lazy = false,
+ }: {
+ queryResult: QueryResult<TData, TVariables>;
+ lazy?: boolean;
+ }) {
+ this.isMounted = true;
+
+ if (!lazy || this.runLazy) {
+ this.handleErrorOrCompleted(queryResult);
+
+ // When the component is done rendering stored query errors, we'll
+ // remove those errors from the `ObservableQuery` query store, so they
+ // aren't re-displayed on subsequent (potentially error free)
+ // requests/responses.
+ setTimeout(() => {
+ this.currentObservable.query &&
+ this.currentObservable.query.resetQueryStoreErrors();
+ });
+ }
+
+ this.previousOptions = this.getOptions();
+ return this.unmount.bind(this);
+ }
+
+ public cleanup() {
+ this.removeQuerySubscription();
+ delete this.currentObservable.query;
+ delete this.previousData.result;
+ }
+
+ public getOptions() {
+ const options = super.getOptions();
+
+ if (this.lazyOptions) {
+ options.variables = {
+ ...options.variables,
+ ...this.lazyOptions.variables
+ };
+ options.context = {
+ ...options.context,
+ ...this.lazyOptions.context
+ };
+ }
+
+ // skip is not supported when using lazy query execution.
+ if (this.runLazy) {
+ delete options.skip;
+ }
+
+ return options;
+ }
+
+ private runLazyQuery = (options?: QueryLazyOptions<TVariables>) => {
+ this.cleanup();
+
+ this.runLazy = true;
+ this.lazyOptions = options;
+ this.forceUpdate();
+ };
+
+ private getExecuteResult(): QueryResult<TData, TVariables> {
+ const result = this.getQueryResult();
+ this.startQuerySubscription();
+ return result;
+ };
+
+ private getExecuteSsrResult() {
+ const treeRenderingInitiated = this.context && this.context.renderPromises;
+ const ssrDisabled = this.getOptions().ssr === false;
+ const fetchDisabled = this.refreshClient().client.disableNetworkFetches;
+
+ const ssrLoading = {
+ loading: true,
+ networkStatus: NetworkStatus.loading,
+ called: true,
+ data: undefined
+ } as QueryResult<TData, TVariables>;
+
+ // If SSR has been explicitly disabled, and this function has been called
+ // on the server side, return the default loading state.
+ if (ssrDisabled && (treeRenderingInitiated || fetchDisabled)) {
+ return ssrLoading;
+ }
+
+ let result;
+ if (treeRenderingInitiated) {
+ result =
+ this.context.renderPromises!.addQueryPromise(
+ this,
+ this.getQueryResult
+ ) || ssrLoading;
+ }
+
+ return result;
+ }
+
+ private prepareObservableQueryOptions() {
+ const options = this.getOptions();
+ this.verifyDocumentType(options.query, DocumentType.Query);
+ const displayName = options.displayName || 'Query';
+
+ // Set the fetchPolicy to cache-first for network-only and cache-and-network
+ // fetches for server side renders.
+ if (
+ this.context &&
+ this.context.renderPromises &&
+ (options.fetchPolicy === 'network-only' ||
+ options.fetchPolicy === 'cache-and-network')
+ ) {
+ options.fetchPolicy = 'cache-first';
+ }
+
+ return {
+ ...options,
+ displayName,
+ context: options.context,
+ metadata: { reactComponent: { displayName } }
+ };
+ }
+
+ private initializeObservableQuery() {
+ // See if there is an existing observable that was used to fetch the same
+ // data and if so, use it instead since it will contain the proper queryId
+ // to fetch the result set. This is used during SSR.
+ if (this.context && this.context.renderPromises) {
+ this.currentObservable.query = this.context.renderPromises.getSSRObservable(
+ this.getOptions()
+ );
+ }
+
+ if (!this.currentObservable.query) {
+ const observableQueryOptions = this.prepareObservableQueryOptions();
+
+ this.previousData.observableQueryOptions = {
+ ...observableQueryOptions,
+ children: null
+ };
+ this.currentObservable.query = this.refreshClient().client.watchQuery({
+ ...observableQueryOptions
+ });
+
+ if (this.context && this.context.renderPromises) {
+ this.context.renderPromises.registerSSRObservable(
+ this.currentObservable.query,
+ observableQueryOptions
+ );
+ }
+ }
+ }
+
+ private updateObservableQuery() {
+ // If we skipped initially, we may not have yet created the observable
+ if (!this.currentObservable.query) {
+ this.initializeObservableQuery();
+ return;
+ }
+
+ const newObservableQueryOptions = {
+ ...this.prepareObservableQueryOptions(),
+ children: null
+ };
+
+ if (
+ !equal(
+ newObservableQueryOptions,
+ this.previousData.observableQueryOptions
+ )
+ ) {
+ this.previousData.observableQueryOptions = newObservableQueryOptions;
+ this.currentObservable
+ .query!.setOptions(newObservableQueryOptions)
+ // The error will be passed to the child container, so we don't
+ // need to log it here. We could conceivably log something if
+ // an option was set. OTOH we don't log errors w/ the original
+ // query. See https://github.com/apollostack/react-apollo/issues/404
+ .catch(() => {});
+ }
+ }
+
+ // Setup a subscription to watch for Apollo Client `ObservableQuery` changes.
+ // When new data is received, and it doesn't match the data that was used
+ // during the last `QueryData.execute` call (and ultimately the last query
+ // component render), trigger the `onNewData` callback. If not specified,
+ // `onNewData` will trigger the `forceUpdate` function, which leads to a
+ // query component re-render.
+ private startQuerySubscription(onNewData: () => void = this.forceUpdate) {
+ if (this.currentObservable.subscription || this.getOptions().skip) return;
+
+ const obsQuery = this.currentObservable.query!;
+ this.currentObservable.subscription = obsQuery.subscribe({
+ next: ({ loading, networkStatus, data }) => {
+ const previousResult = this.previousData.result;
+
+ // Make sure we're not attempting to re-render similar results
+ if (
+ previousResult &&
+ previousResult.loading === loading &&
+ previousResult.networkStatus === networkStatus &&
+ equal(previousResult.data, data)
+ ) {
+ return;
+ }
+
+ // If we skipped previously, `previousResult.data` is set to undefined.
+ // When this subscription is run after skipping, Apollo Client sends
+ // the last query result data alongside the `loading` true state. This
+ // means the previous skipped `data` of undefined and the incoming
+ // data won't match, which would normally mean we want to trigger a
+ // render to show the new data. In this case however we're already
+ // showing the loading state, and want to avoid triggering an
+ // additional and unnecessary render showing the same loading state.
+ if (this.previousOptions.skip) {
+ return;
+ }
+
+ onNewData();
+ },
+ error: error => {
+ this.resubscribeToQuery();
+ if (!error.hasOwnProperty('graphQLErrors')) throw error;
+
+ const previousResult = this.previousData.result;
+ if (
+ (previousResult && previousResult.loading) ||
+ !equal(error, this.previousData.error)
+ ) {
+ this.previousData.error = error;
+ onNewData();
+ }
+ }
+ });
+ }
+
+ private resubscribeToQuery() {
+ this.removeQuerySubscription();
+
+ // Unfortunately, if `lastError` is set in the current
+ // `observableQuery` when the subscription is re-created,
+ // the subscription will immediately receive the error, which will
+ // cause it to terminate again. To avoid this, we first clear
+ // the last error/result from the `observableQuery` before re-starting
+ // the subscription, and restore it afterwards (so the subscription
+ // has a chance to stay open).
+ const lastError = this.currentObservable.query!.getLastError();
+ const lastResult = this.currentObservable.query!.getLastResult();
+ this.currentObservable.query!.resetLastResults();
+ this.startQuerySubscription();
+ Object.assign(this.currentObservable.query!, {
+ lastError,
+ lastResult
+ });
+ }
+
+ private getQueryResult = (): QueryResult<TData, TVariables> => {
+ let result: any = this.observableQueryFields();
+ const options = this.getOptions();
+
+ // When skipping a query (ie. we're not querying for data but still want
+ // to render children), make sure the `data` is cleared out and
+ // `loading` is set to `false` (since we aren't loading anything).
+ if (options.skip) {
+ result = {
+ ...result,
+ data: undefined,
+ error: undefined,
+ loading: false,
+ called: true
+ };
+ } else {
+ // Fetch the current result (if any) from the store.
+ const currentResult = this.currentObservable.query!.getCurrentResult();
+ const { loading, networkStatus, errors } = currentResult;
+ let { error, data } = currentResult;
+
+ // Until a set naming convention for networkError and graphQLErrors is
+ // decided upon, we map errors (graphQLErrors) to the error options.
+ if (errors && errors.length > 0) {
+ error = new ApolloError({ graphQLErrors: errors });
+ }
+
+ result = {
+ ...result,
+ loading,
+ networkStatus,
+ error,
+ called: true
+ };
+
+ if (loading) {
+ const previousData =
+ this.previousData.result && this.previousData.result.data;
+ result.data =
+ previousData && data
+ ? {
+ ...previousData,
+ ...data
+ }
+ : previousData || data;
+ } else if (error) {
+ Object.assign(result, {
+ data: (this.currentObservable.query!.getLastResult() || ({} as any))
+ .data
+ });
+ } else {
+ const { fetchPolicy } = this.currentObservable.query!.options;
+ const { partialRefetch } = options;
+ if (
+ partialRefetch &&
+ (!data || Object.keys(data).length === 0) &&
+ fetchPolicy !== 'cache-only'
+ ) {
+ // When a `Query` component is mounted, and a mutation is executed
+ // that returns the same ID as the mounted `Query`, but has less
+ // fields in its result, Apollo Client's `QueryManager` returns the
+ // data as `undefined` since a hit can't be found in the cache.
+ // This can lead to application errors when the UI elements rendered by
+ // the original `Query` component are expecting certain data values to
+ // exist, and they're all of a sudden stripped away. To help avoid
+ // this we'll attempt to refetch the `Query` data.
+ Object.assign(result, {
+ loading: true,
+ networkStatus: NetworkStatus.loading
+ });
+ result.refetch();
+ return result;
+ }
+
+ result.data = data;
+ }
+ }
+
+ result.client = this.client;
+ // Store options as this.previousOptions.
+ this.setOptions(options, true);
+ this.previousData.loading =
+ this.previousData.result && this.previousData.result.loading || false;
+ return this.previousData.result = result;
+ }
+
+ private handleErrorOrCompleted({
+ data,
+ loading,
+ error,
+ }: QueryResult<TData, TVariables>) {
+ if (!loading) {
+ const { query, variables, onCompleted, onError } = this.getOptions();
+
+ // No changes, so we won't call onError/onCompleted.
+ if (
+ this.previousOptions &&
+ !this.previousData.loading &&
+ equal(this.previousOptions.query, query) &&
+ equal(this.previousOptions.variables, variables)
+ ) {
+ return;
+ }
+
+ if (onCompleted && !error) {
+ onCompleted(data);
+ } else if (onError && error) {
+ onError(error);
+ }
+ }
+ }
+
+ private removeQuerySubscription() {
+ if (this.currentObservable.subscription) {
+ this.currentObservable.subscription.unsubscribe();
+ delete this.currentObservable.subscription;
+ }
+ }
+
+ private obsRefetch = (variables?: TVariables) =>
+ this.currentObservable.query!.refetch(variables);
+
+ private obsFetchMore = <K extends keyof TVariables>(
+ fetchMoreOptions: FetchMoreQueryOptions<TVariables, K> &
+ FetchMoreOptions<TData, TVariables>
+ ) => this.currentObservable.query!.fetchMore(fetchMoreOptions);
+
+ private obsUpdateQuery = <TVars = TVariables>(
+ mapFn: (
+ previousQueryResult: TData,
+ options: UpdateQueryOptions<TVars>
+ ) => TData
+ ) => this.currentObservable.query!.updateQuery(mapFn);
+
+ private obsStartPolling = (pollInterval: number) => {
+ this.currentObservable &&
+ this.currentObservable.query! &&
+ this.currentObservable.query!.startPolling(pollInterval);
+ };
+
+ private obsStopPolling = () => {
+ this.currentObservable &&
+ this.currentObservable.query! &&
+ this.currentObservable.query!.stopPolling();
+ };
+
+ private obsSubscribeToMore = <
+ TSubscriptionData = TData,
+ TSubscriptionVariables = TVariables
+ >(
+ options: SubscribeToMoreOptions<
+ TData,
+ TSubscriptionVariables,
+ TSubscriptionData
+ >
+ ) => this.currentObservable.query!.subscribeToMore(options);
+
+ private observableQueryFields() {
+ const observable = this.currentObservable.query!;
+ return {
+ variables: observable.variables,
+ refetch: this.obsRefetch,
+ fetchMore: this.obsFetchMore,
+ updateQuery: this.obsUpdateQuery,
+ startPolling: this.obsStartPolling,
+ stopPolling: this.obsStopPolling,
+ subscribeToMore: this.obsSubscribeToMore
+ } as ObservableQueryFields<TData, TVariables>;
+ }
+}
diff --git a/src/react/data/SubscriptionData.ts b/src/react/data/SubscriptionData.ts
new file mode 100644
--- /dev/null
+++ b/src/react/data/SubscriptionData.ts
@@ -0,0 +1,150 @@
+import { equal } from '@wry/equality';
+
+import { OperationData } from './OperationData';
+import {
+ SubscriptionCurrentObservable,
+ SubscriptionDataOptions,
+ SubscriptionResult
+} from '../types/types';
+
+export class SubscriptionData<
+ TData = any,
+ TVariables = any
+> extends OperationData<SubscriptionDataOptions<TData, TVariables>> {
+ private setResult: any;
+ private currentObservable: SubscriptionCurrentObservable = {};
+
+ constructor({
+ options,
+ context,
+ setResult
+ }: {
+ options: SubscriptionDataOptions<TData, TVariables>;
+ context: any;
+ setResult: any;
+ }) {
+ super(options, context);
+ this.setResult = setResult;
+ this.initialize(options);
+ }
+
+ public execute(result: SubscriptionResult<TData>) {
+ if (this.getOptions().skip === true) {
+ this.cleanup();
+ return {
+ loading: false,
+ error: undefined,
+ data: undefined,
+ variables: this.getOptions().variables
+ };
+ }
+
+ let currentResult = result;
+ if (this.refreshClient().isNew) {
+ currentResult = this.getLoadingResult();
+ }
+
+ let { shouldResubscribe } = this.getOptions();
+ if (typeof shouldResubscribe === 'function') {
+ shouldResubscribe = !!shouldResubscribe(this.getOptions());
+ }
+
+ if (
+ shouldResubscribe !== false &&
+ this.previousOptions &&
+ Object.keys(this.previousOptions).length > 0 &&
+ (this.previousOptions.subscription !== this.getOptions().subscription ||
+ !equal(this.previousOptions.variables, this.getOptions().variables) ||
+ this.previousOptions.skip !== this.getOptions().skip)
+ ) {
+ this.cleanup();
+ currentResult = this.getLoadingResult();
+ }
+
+ this.initialize(this.getOptions());
+ this.startSubscription();
+
+ this.previousOptions = this.getOptions();
+ return { ...currentResult, variables: this.getOptions().variables };
+ }
+
+ public afterExecute() {
+ this.isMounted = true;
+ }
+
+ public cleanup() {
+ this.endSubscription();
+ delete this.currentObservable.query;
+ }
+
+ private initialize(options: SubscriptionDataOptions<TData, TVariables>) {
+ if (this.currentObservable.query || this.getOptions().skip === true) return;
+ this.currentObservable.query = this.refreshClient().client.subscribe({
+ query: options.subscription,
+ variables: options.variables,
+ fetchPolicy: options.fetchPolicy
+ });
+ }
+
+ private startSubscription() {
+ if (this.currentObservable.subscription) return;
+ this.currentObservable.subscription = this.currentObservable.query!.subscribe(
+ {
+ next: this.updateCurrentData.bind(this),
+ error: this.updateError.bind(this),
+ complete: this.completeSubscription.bind(this)
+ }
+ );
+ }
+
+ private getLoadingResult() {
+ return {
+ loading: true,
+ error: undefined,
+ data: undefined
+ } as SubscriptionResult<TData>;
+ }
+
+ private updateResult(result: SubscriptionResult) {
+ if (this.isMounted) {
+ this.setResult(result);
+ }
+ }
+
+ private updateCurrentData(result: SubscriptionResult<TData>) {
+ const { onSubscriptionData } = this.getOptions();
+
+ this.updateResult({
+ data: result.data,
+ loading: false,
+ error: undefined
+ });
+
+ if (onSubscriptionData) {
+ onSubscriptionData({
+ client: this.refreshClient().client,
+ subscriptionData: result
+ });
+ }
+ }
+
+ private updateError(error: any) {
+ this.updateResult({
+ error,
+ loading: false
+ });
+ }
+
+ private completeSubscription() {
+ const { onSubscriptionComplete } = this.getOptions();
+ if (onSubscriptionComplete) onSubscriptionComplete();
+ this.endSubscription();
+ }
+
+ private endSubscription() {
+ if (this.currentObservable.subscription) {
+ this.currentObservable.subscription.unsubscribe();
+ delete this.currentObservable.subscription;
+ }
+ }
+}
diff --git a/src/react/hooks/useApolloClient.ts b/src/react/hooks/useApolloClient.ts
new file mode 100644
--- /dev/null
+++ b/src/react/hooks/useApolloClient.ts
@@ -0,0 +1,16 @@
+import { invariant } from 'ts-invariant';
+
+import { ApolloClient } from '../../ApolloClient';
+import { getApolloContext } from '../context/ApolloContext';
+import { requireReactLazily } from '../react';
+
+export function useApolloClient(): ApolloClient<object> {
+ const React = requireReactLazily();
+ const { client } = React.useContext(getApolloContext());
+ invariant(
+ client,
+ 'No Apollo Client instance can be found. Please ensure that you ' +
+ 'have called `ApolloProvider` higher up in your tree.'
+ );
+ return client!;
+}
diff --git a/src/react/hooks/useLazyQuery.ts b/src/react/hooks/useLazyQuery.ts
new file mode 100644
--- /dev/null
+++ b/src/react/hooks/useLazyQuery.ts
@@ -0,0 +1,15 @@
+import { DocumentNode } from 'graphql';
+
+import { LazyQueryHookOptions, QueryTuple } from '../types/types';
+import { useBaseQuery } from './utils/useBaseQuery';
+import { OperationVariables } from '../../core/types';
+
+export function useLazyQuery<TData = any, TVariables = OperationVariables>(
+ query: DocumentNode,
+ options?: LazyQueryHookOptions<TData, TVariables>
+) {
+ return useBaseQuery<TData, TVariables>(query, options, true) as QueryTuple<
+ TData,
+ TVariables
+ >;
+}
diff --git a/src/react/hooks/useMutation.ts b/src/react/hooks/useMutation.ts
new file mode 100644
--- /dev/null
+++ b/src/react/hooks/useMutation.ts
@@ -0,0 +1,38 @@
+import { DocumentNode } from 'graphql';
+
+import { MutationHookOptions, MutationTuple } from '../types/types';
+import { MutationData } from '../data/MutationData';
+import { OperationVariables } from '../../core/types';
+import { getApolloContext } from '../context/ApolloContext';
+import { requireReactLazily } from '../react';
+
+export function useMutation<TData = any, TVariables = OperationVariables>(
+ mutation: DocumentNode,
+ options?: MutationHookOptions<TData, TVariables>
+): MutationTuple<TData, TVariables> {
+ const { useContext, useState, useRef, useEffect } = requireReactLazily();
+ const context = useContext(getApolloContext());
+ const [result, setResult] = useState({ called: false, loading: false });
+ const updatedOptions = options ? { ...options, mutation } : { mutation };
+
+ const mutationDataRef = useRef<MutationData<TData, TVariables>>();
+ function getMutationDataRef() {
+ if (!mutationDataRef.current) {
+ mutationDataRef.current = new MutationData<TData, TVariables>({
+ options: updatedOptions,
+ context,
+ result,
+ setResult
+ });
+ }
+ return mutationDataRef.current;
+ }
+
+ const mutationData = getMutationDataRef();
+ mutationData.setOptions(updatedOptions);
+ mutationData.context = context;
+
+ useEffect(() => mutationData.afterExecute());
+
+ return mutationData.execute(result);
+}
diff --git a/src/react/hooks/useQuery.ts b/src/react/hooks/useQuery.ts
new file mode 100644
--- /dev/null
+++ b/src/react/hooks/useQuery.ts
@@ -0,0 +1,15 @@
+import { DocumentNode } from 'graphql';
+
+import { QueryHookOptions, QueryResult } from '../types/types';
+import { useBaseQuery } from './utils/useBaseQuery';
+import { OperationVariables } from '../../core/types';
+
+export function useQuery<TData = any, TVariables = OperationVariables>(
+ query: DocumentNode,
+ options?: QueryHookOptions<TData, TVariables>
+) {
+ return useBaseQuery<TData, TVariables>(query, options, false) as QueryResult<
+ TData,
+ TVariables
+ >;
+}
diff --git a/src/react/hooks/useSubscription.ts b/src/react/hooks/useSubscription.ts
new file mode 100644
--- /dev/null
+++ b/src/react/hooks/useSubscription.ts
@@ -0,0 +1,45 @@
+import { DocumentNode } from 'graphql';
+
+import { SubscriptionHookOptions } from '../types/types';
+import { SubscriptionData } from '../data/SubscriptionData';
+import { OperationVariables } from '../../core/types';
+import { getApolloContext } from '../context/ApolloContext';
+import { requireReactLazily } from '../react';
+
+export function useSubscription<TData = any, TVariables = OperationVariables>(
+ subscription: DocumentNode,
+ options?: SubscriptionHookOptions<TData, TVariables>
+) {
+ const React = requireReactLazily();
+ const { useContext, useState, useRef, useEffect } = React;
+ const context = useContext(getApolloContext());
+ const updatedOptions = options
+ ? { ...options, subscription }
+ : { subscription };
+ const [result, setResult] = useState({
+ loading: !updatedOptions.skip,
+ error: undefined,
+ data: undefined
+ });
+
+ const subscriptionDataRef = useRef<SubscriptionData<TData, TVariables>>();
+ function getSubscriptionDataRef() {
+ if (!subscriptionDataRef.current) {
+ subscriptionDataRef.current = new SubscriptionData<TData, TVariables>({
+ options: updatedOptions,
+ context,
+ setResult
+ });
+ }
+ return subscriptionDataRef.current;
+ }
+
+ const subscriptionData = getSubscriptionDataRef();
+ subscriptionData.setOptions(updatedOptions, true);
+ subscriptionData.context = context;
+
+ useEffect(() => subscriptionData.afterExecute());
+ useEffect(() => subscriptionData.cleanup.bind(subscriptionData), []);
+
+ return subscriptionData.execute(result);
+}
diff --git a/src/react/hooks/utils/useBaseQuery.ts b/src/react/hooks/utils/useBaseQuery.ts
new file mode 100644
--- /dev/null
+++ b/src/react/hooks/utils/useBaseQuery.ts
@@ -0,0 +1,74 @@
+import { DocumentNode } from 'graphql';
+
+import {
+ QueryHookOptions,
+ QueryDataOptions,
+ QueryTuple,
+ QueryResult,
+} from '../../types/types';
+import { QueryData } from '../../data/QueryData';
+import { useDeepMemo } from './useDeepMemo';
+import { OperationVariables } from '../../../core/types';
+import { getApolloContext } from '../../context/ApolloContext';
+import { requireReactLazily } from '../../react';
+
+export function useBaseQuery<TData = any, TVariables = OperationVariables>(
+ query: DocumentNode,
+ options?: QueryHookOptions<TData, TVariables>,
+ lazy = false
+) {
+ const React = requireReactLazily();
+ const { useContext, useEffect, useReducer, useRef } = React;
+ const context = useContext(getApolloContext());
+ const [tick, forceUpdate] = useReducer(x => x + 1, 0);
+ const updatedOptions = options ? { ...options, query } : { query };
+
+ const queryDataRef = useRef<QueryData<TData, TVariables>>();
+
+ if (!queryDataRef.current) {
+ queryDataRef.current = new QueryData<TData, TVariables>({
+ options: updatedOptions as QueryDataOptions<TData, TVariables>,
+ context,
+ forceUpdate
+ });
+ }
+
+ const queryData = queryDataRef.current;
+ queryData.setOptions(updatedOptions);
+ queryData.context = context;
+
+ // `onError` and `onCompleted` callback functions will not always have a
+ // stable identity, so we'll exclude them from the memoization key to
+ // prevent `afterExecute` from being triggered un-necessarily.
+ const memo = {
+ options: {
+ ...updatedOptions,
+ onError: undefined,
+ onCompleted: undefined
+ } as QueryHookOptions<TData, TVariables>,
+ context,
+ tick
+ };
+
+ const result = useDeepMemo(
+ () => (lazy ? queryData.executeLazy() : queryData.execute()),
+ memo
+ );
+
+ const queryResult = lazy
+ ? (result as QueryTuple<TData, TVariables>)[1]
+ : (result as QueryResult<TData, TVariables>);
+
+ useEffect(() => queryData.afterExecute({ queryResult, lazy }), [
+ queryResult.loading,
+ queryResult.networkStatus,
+ queryResult.error,
+ queryResult.data
+ ]);
+
+ useEffect(() => {
+ return () => queryData.cleanup();
+ }, []);
+
+ return result;
+}
diff --git a/src/react/hooks/utils/useDeepMemo.ts b/src/react/hooks/utils/useDeepMemo.ts
new file mode 100644
--- /dev/null
+++ b/src/react/hooks/utils/useDeepMemo.ts
@@ -0,0 +1,25 @@
+import { equal } from '@wry/equality';
+
+import { requireReactLazily } from '../../react';
+
+/**
+ * Memoize a result using deep equality. This hook has two advantages over
+ * React.useMemo: it uses deep equality to compare memo keys, and it guarantees
+ * that the memo function will only be called if the keys are unequal.
+ * React.useMemo cannot be relied on to do this, since it is only a performance
+ * optimization (see https://reactjs.org/docs/hooks-reference.html#usememo).
+ */
+export function useDeepMemo<TKey, TValue>(
+ memoFn: () => TValue,
+ key: TKey
+): TValue {
+ const React = requireReactLazily();
+ const { useRef } = React;
+ const ref = useRef<{ key: TKey; value: TValue }>();
+
+ if (!ref.current || !equal(key, ref.current.key)) {
+ ref.current = { key, value: memoFn() };
+ }
+
+ return ref.current.value;
+}
diff --git a/src/react/index.ts b/src/react/index.ts
new file mode 100644
--- /dev/null
+++ b/src/react/index.ts
@@ -0,0 +1,21 @@
+export { ApolloProvider } from './context/ApolloProvider';
+export { ApolloConsumer } from './context/ApolloConsumer';
+export {
+ getApolloContext,
+ resetApolloContext,
+ ApolloContextValue
+} from './context/ApolloContext';
+export { useQuery } from './hooks/useQuery';
+export { useLazyQuery } from './hooks/useLazyQuery';
+export { useMutation } from './hooks/useMutation';
+export { useSubscription } from './hooks/useSubscription';
+export { useApolloClient } from './hooks/useApolloClient';
+export { RenderPromises } from './ssr/RenderPromises';
+export {
+ DocumentType,
+ IDocumentDefinition,
+ operationName,
+ parser
+} from './parser/parser';
+
+export * from './types/types';
diff --git a/src/react/parser/parser.ts b/src/react/parser/parser.ts
new file mode 100644
--- /dev/null
+++ b/src/react/parser/parser.ts
@@ -0,0 +1,115 @@
+import {
+ DocumentNode,
+ DefinitionNode,
+ VariableDefinitionNode,
+ OperationDefinitionNode
+} from 'graphql';
+import { invariant } from 'ts-invariant';
+
+export enum DocumentType {
+ Query,
+ Mutation,
+ Subscription
+}
+
+export interface IDocumentDefinition {
+ type: DocumentType;
+ name: string;
+ variables: ReadonlyArray<VariableDefinitionNode>;
+}
+
+const cache = new Map();
+
+export function operationName(type: DocumentType) {
+ let name;
+ switch (type) {
+ case DocumentType.Query:
+ name = 'Query';
+ break;
+ case DocumentType.Mutation:
+ name = 'Mutation';
+ break;
+ case DocumentType.Subscription:
+ name = 'Subscription';
+ break;
+ }
+ return name;
+}
+
+// This parser is mostly used to saftey check incoming documents.
+export function parser(document: DocumentNode): IDocumentDefinition {
+ const cached = cache.get(document);
+ if (cached) return cached;
+
+ let variables, type, name;
+
+ invariant(
+ !!document && !!document.kind,
+ `Argument of ${document} passed to parser was not a valid GraphQL ` +
+ `DocumentNode. You may need to use 'graphql-tag' or another method ` +
+ `to convert your operation into a document`
+ );
+
+ const fragments = document.definitions.filter(
+ (x: DefinitionNode) => x.kind === 'FragmentDefinition'
+ );
+
+ const queries = document.definitions.filter(
+ (x: DefinitionNode) =>
+ x.kind === 'OperationDefinition' && x.operation === 'query'
+ );
+
+ const mutations = document.definitions.filter(
+ (x: DefinitionNode) =>
+ x.kind === 'OperationDefinition' && x.operation === 'mutation'
+ );
+
+ const subscriptions = document.definitions.filter(
+ (x: DefinitionNode) =>
+ x.kind === 'OperationDefinition' && x.operation === 'subscription'
+ );
+
+ invariant(
+ !fragments.length ||
+ (queries.length || mutations.length || subscriptions.length),
+ `Passing only a fragment to 'graphql' is not yet supported. ` +
+ `You must include a query, subscription or mutation as well`
+ );
+
+ invariant(
+ queries.length + mutations.length + subscriptions.length <= 1,
+ `react-apollo only supports a query, subscription, or a mutation per HOC. ` +
+ `${document} had ${queries.length} queries, ${subscriptions.length} ` +
+ `subscriptions and ${mutations.length} mutations. ` +
+ `You can use 'compose' to join multiple operation types to a component`
+ );
+
+ type = queries.length ? DocumentType.Query : DocumentType.Mutation;
+ if (!queries.length && !mutations.length) type = DocumentType.Subscription;
+
+ const definitions = queries.length
+ ? queries
+ : mutations.length
+ ? mutations
+ : subscriptions;
+
+ invariant(
+ definitions.length === 1,
+ `react-apollo only supports one definition per HOC. ${document} had ` +
+ `${definitions.length} definitions. ` +
+ `You can use 'compose' to join multiple operation types to a component`
+ );
+
+ const definition = definitions[0] as OperationDefinitionNode;
+ variables = definition.variableDefinitions || [];
+
+ if (definition.name && definition.name.kind === 'Name') {
+ name = definition.name.value;
+ } else {
+ name = 'data'; // fallback to using data if no name
+ }
+
+ const payload = { name, type, variables };
+ cache.set(document, payload);
+ return payload;
+}
diff --git a/src/react/react.ts b/src/react/react.ts
new file mode 100644
--- /dev/null
+++ b/src/react/react.ts
@@ -0,0 +1,9 @@
+let React: typeof import('react');
+
+// Apollo Client can be used without React, which means we want to make sure
+// `react` is only imported/required if actually needed. To help with this
+// the `react` module is lazy loaded using `requireReactLazily` when used by
+// Apollo Client's React integration layer.
+export function requireReactLazily(): typeof import('react') {
+ return React || (React = require('react'));
+}
diff --git a/src/react/ssr/RenderPromises.ts b/src/react/ssr/RenderPromises.ts
new file mode 100644
--- /dev/null
+++ b/src/react/ssr/RenderPromises.ts
@@ -0,0 +1,98 @@
+import { DocumentNode } from 'graphql';
+
+import { ObservableQuery } from '../../core/ObservableQuery';
+import { QueryDataOptions } from '../types/types';
+import { QueryData } from '../data/QueryData';
+
+type QueryInfo = {
+ seen: boolean;
+ observable: ObservableQuery<any, any> | null;
+};
+
+function makeDefaultQueryInfo(): QueryInfo {
+ return {
+ seen: false,
+ observable: null
+ };
+}
+
+export class RenderPromises {
+ // Map from Query component instances to pending fetchData promises.
+ private queryPromises = new Map<QueryDataOptions<any, any>, Promise<any>>();
+
+ // Two-layered map from (query document, stringified variables) to QueryInfo
+ // objects. These QueryInfo objects are intended to survive through the whole
+ // getMarkupFromTree process, whereas specific Query instances do not survive
+ // beyond a single call to renderToStaticMarkup.
+ private queryInfoTrie = new Map<DocumentNode, Map<string, QueryInfo>>();
+
+ // Registers the server side rendered observable.
+ public registerSSRObservable<TData, TVariables>(
+ observable: ObservableQuery<any, TVariables>,
+ props: QueryDataOptions<TData, TVariables>
+ ) {
+ this.lookupQueryInfo(props).observable = observable;
+ }
+
+ // Get's the cached observable that matches the SSR Query instances query and variables.
+ public getSSRObservable<TData, TVariables>(
+ props: QueryDataOptions<TData, TVariables>
+ ) {
+ return this.lookupQueryInfo(props).observable;
+ }
+
+ public addQueryPromise<TData, TVariables>(
+ queryInstance: QueryData<TData, TVariables>,
+ finish: () => React.ReactNode
+ ): React.ReactNode {
+ const info = this.lookupQueryInfo(queryInstance.getOptions());
+ if (!info.seen) {
+ this.queryPromises.set(
+ queryInstance.getOptions(),
+ new Promise(resolve => {
+ resolve(queryInstance.fetchData());
+ })
+ );
+ // Render null to abandon this subtree for this rendering, so that we
+ // can wait for the data to arrive.
+ return null;
+ }
+ return finish();
+ }
+
+ public hasPromises() {
+ return this.queryPromises.size > 0;
+ }
+
+ public consumeAndAwaitPromises() {
+ const promises: Promise<any>[] = [];
+ this.queryPromises.forEach((promise, queryInstance) => {
+ // Make sure we never try to call fetchData for this query document and
+ // these variables again. Since the queryInstance objects change with
+ // every rendering, deduplicating them by query and variables is the
+ // best we can do. If a different Query component happens to have the
+ // same query document and variables, it will be immediately rendered
+ // by calling finish() in addQueryPromise, which could result in the
+ // rendering of an unwanted loading state, but that's not nearly as bad
+ // as getting stuck in an infinite rendering loop because we kept calling
+ // queryInstance.fetchData for the same Query component indefinitely.
+ this.lookupQueryInfo(queryInstance).seen = true;
+ promises.push(promise);
+ });
+ this.queryPromises.clear();
+ return Promise.all(promises);
+ }
+
+ private lookupQueryInfo<TData, TVariables>(
+ props: QueryDataOptions<TData, TVariables>
+ ): QueryInfo {
+ const { queryInfoTrie } = this;
+ const { query, variables } = props;
+ const varMap = queryInfoTrie.get(query) || new Map<string, QueryInfo>();
+ if (!queryInfoTrie.has(query)) queryInfoTrie.set(query, varMap);
+ const variablesString = JSON.stringify(variables);
+ const info = varMap.get(variablesString) || makeDefaultQueryInfo();
+ if (!varMap.has(variablesString)) varMap.set(variablesString, info);
+ return info;
+ }
+}
diff --git a/src/react/types/types.ts b/src/react/types/types.ts
new file mode 100644
--- /dev/null
+++ b/src/react/types/types.ts
@@ -0,0 +1,249 @@
+import { ReactNode } from 'react';
+import { DocumentNode } from 'graphql';
+
+import { Observable } from '../../utilities/observables/Observable';
+import { FetchResult } from '../../link/core/types';
+import { ApolloClient } from '../../ApolloClient';
+import {
+ ApolloQueryResult,
+ PureQueryOptions,
+ OperationVariables
+} from '../../core/types';
+import { ApolloError } from '../../errors/ApolloError';
+import {
+ FetchPolicy,
+ WatchQueryFetchPolicy,
+ ErrorPolicy,
+ FetchMoreQueryOptions,
+ MutationUpdaterFn,
+} from '../../core/watchQueryOptions';
+import { FetchMoreOptions, ObservableQuery } from '../../core/ObservableQuery';
+import { NetworkStatus } from '../../core/networkStatus';
+
+/* Common types */
+
+export type Context = Record<string, any>;
+
+export type CommonOptions<TOptions> = TOptions & {
+ client?: ApolloClient<object>;
+};
+
+/* Query types */
+
+export interface BaseQueryOptions<TVariables = OperationVariables> {
+ ssr?: boolean;
+ variables?: TVariables;
+ fetchPolicy?: WatchQueryFetchPolicy;
+ errorPolicy?: ErrorPolicy;
+ pollInterval?: number;
+ client?: ApolloClient<any>;
+ notifyOnNetworkStatusChange?: boolean;
+ context?: Context;
+ partialRefetch?: boolean;
+ returnPartialData?: boolean;
+}
+
+export interface QueryFunctionOptions<
+ TData = any,
+ TVariables = OperationVariables
+> extends BaseQueryOptions<TVariables> {
+ displayName?: string;
+ skip?: boolean;
+ onCompleted?: (data: TData) => void;
+ onError?: (error: ApolloError) => void;
+}
+
+export type ObservableQueryFields<TData, TVariables> = Pick<
+ ObservableQuery<TData, TVariables>,
+ | 'startPolling'
+ | 'stopPolling'
+ | 'subscribeToMore'
+ | 'updateQuery'
+ | 'refetch'
+ | 'variables'
+> & {
+ fetchMore: (<K extends keyof TVariables>(
+ fetchMoreOptions: FetchMoreQueryOptions<TVariables, K> &
+ FetchMoreOptions<TData, TVariables>
+ ) => Promise<ApolloQueryResult<TData>>) &
+ (<TData2, TVariables2, K extends keyof TVariables2>(
+ fetchMoreOptions: { query?: DocumentNode } & FetchMoreQueryOptions<
+ TVariables2,
+ K
+ > &
+ FetchMoreOptions<TData2, TVariables2>
+ ) => Promise<ApolloQueryResult<TData2>>);
+};
+
+export interface QueryResult<TData = any, TVariables = OperationVariables>
+ extends ObservableQueryFields<TData, TVariables> {
+ client: ApolloClient<any>;
+ data: TData | undefined;
+ error?: ApolloError;
+ loading: boolean;
+ networkStatus: NetworkStatus;
+ called: boolean;
+}
+
+export interface QueryDataOptions<TData = any, TVariables = OperationVariables>
+ extends QueryFunctionOptions<TData, TVariables> {
+ children?: (result: QueryResult<TData, TVariables>) => ReactNode;
+ query: DocumentNode;
+}
+
+export interface QueryHookOptions<TData = any, TVariables = OperationVariables>
+ extends QueryFunctionOptions<TData, TVariables> {
+ query?: DocumentNode;
+}
+
+export interface LazyQueryHookOptions<
+ TData = any,
+ TVariables = OperationVariables
+> extends Omit<QueryFunctionOptions<TData, TVariables>, 'skip'> {
+ query?: DocumentNode;
+}
+
+export interface QueryPreviousData<TData, TVariables> {
+ client?: ApolloClient<object>;
+ query?: DocumentNode;
+ observableQueryOptions?: {};
+ result?: ApolloQueryResult<TData> | null;
+ loading?: boolean;
+ options?: QueryDataOptions<TData, TVariables>;
+ error?: ApolloError;
+}
+
+export interface QueryCurrentObservable<TData, TVariables> {
+ query?: ObservableQuery<TData, TVariables> | null;
+ subscription?: ZenObservable.Subscription;
+}
+
+export interface QueryLazyOptions<TVariables> {
+ variables?: TVariables;
+ context?: Context;
+}
+
+export type QueryTuple<TData, TVariables> = [
+ (options?: QueryLazyOptions<TVariables>) => void,
+ QueryResult<TData, TVariables>
+];
+
+/* Mutation types */
+
+export type RefetchQueriesFunction = (
+ ...args: any[]
+) => Array<string | PureQueryOptions>;
+
+export interface BaseMutationOptions<
+ TData = any,
+ TVariables = OperationVariables
+> {
+ variables?: TVariables;
+ optimisticResponse?: TData | ((vars: TVariables) => TData);
+ refetchQueries?: Array<string | PureQueryOptions> | RefetchQueriesFunction;
+ awaitRefetchQueries?: boolean;
+ errorPolicy?: ErrorPolicy;
+ update?: MutationUpdaterFn<TData>;
+ client?: ApolloClient<object>;
+ notifyOnNetworkStatusChange?: boolean;
+ context?: Context;
+ onCompleted?: (data: TData) => void;
+ onError?: (error: ApolloError) => void;
+ fetchPolicy?: WatchQueryFetchPolicy;
+ ignoreResults?: boolean;
+}
+
+export interface MutationFunctionOptions<
+ TData = any,
+ TVariables = OperationVariables
+> {
+ variables?: TVariables;
+ optimisticResponse?: TData | ((vars: TVariables | {}) => TData);
+ refetchQueries?: Array<string | PureQueryOptions> | RefetchQueriesFunction;
+ awaitRefetchQueries?: boolean;
+ update?: MutationUpdaterFn<TData>;
+ context?: Context;
+ fetchPolicy?: WatchQueryFetchPolicy;
+}
+
+export interface MutationResult<TData = any> {
+ data?: TData;
+ error?: ApolloError;
+ loading: boolean;
+ called: boolean;
+ client?: ApolloClient<object>;
+}
+
+export declare type MutationFunction<
+ TData = any,
+ TVariables = OperationVariables
+> = (
+ options?: MutationFunctionOptions<TData, TVariables>
+) => Promise<FetchResult<TData>>;
+
+export interface MutationHookOptions<
+ TData = any,
+ TVariables = OperationVariables
+> extends BaseMutationOptions<TData, TVariables> {
+ mutation?: DocumentNode;
+}
+
+export interface MutationDataOptions<TData = any, TVariables = OperationVariables>
+ extends BaseMutationOptions<TData, TVariables> {
+ mutation: DocumentNode;
+}
+
+export type MutationTuple<TData, TVariables> = [
+ (
+ options?: MutationFunctionOptions<TData, TVariables>
+ ) => Promise<FetchResult<TData>>,
+ MutationResult<TData>
+];
+
+/* Subscription types */
+
+export interface OnSubscriptionDataOptions<TData = any> {
+ client: ApolloClient<object>;
+ subscriptionData: SubscriptionResult<TData>;
+}
+
+export interface BaseSubscriptionOptions<
+ TData = any,
+ TVariables = OperationVariables
+> {
+ variables?: TVariables;
+ fetchPolicy?: FetchPolicy;
+ shouldResubscribe?:
+ | boolean
+ | ((options: BaseSubscriptionOptions<TData, TVariables>) => boolean);
+ client?: ApolloClient<object>;
+ skip?: boolean;
+ onSubscriptionData?: (options: OnSubscriptionDataOptions<TData>) => any;
+ onSubscriptionComplete?: () => void;
+}
+
+export interface SubscriptionResult<TData = any> {
+ loading: boolean;
+ data?: TData;
+ error?: ApolloError;
+}
+
+export interface SubscriptionHookOptions<
+ TData = any,
+ TVariables = OperationVariables
+> extends BaseSubscriptionOptions<TData, TVariables> {
+ subscription?: DocumentNode;
+}
+
+export interface SubscriptionDataOptions<
+ TData = any,
+ TVariables = OperationVariables
+> extends BaseSubscriptionOptions<TData, TVariables> {
+ subscription: DocumentNode;
+ children?: null | ((result: SubscriptionResult<TData>) => JSX.Element | null);
+}
+
+export interface SubscriptionCurrentObservable {
+ query?: Observable<any>;
+ subscription?: ZenObservable.Subscription;
+}
diff --git a/src/utilities/codemods/mockLinkRejection.ts b/src/utilities/codemods/mockLinkRejection.ts
new file mode 100644
--- /dev/null
+++ b/src/utilities/codemods/mockLinkRejection.ts
@@ -0,0 +1,40 @@
+import * as recast from "recast";
+const n = recast.types.namedTypes;
+const b = recast.types.builders;
+
+export default function (fileInfo: any, api: any) {
+ const ast = recast.parse(fileInfo.source, {
+ parser: require("recast/parsers/typescript"),
+ });
+
+ // Transform mockSingleLink(reject, ...) to
+ // mockSingleLink(...).setOnError(reject):
+
+ const transformed = recast.visit(ast, {
+ visitCallExpression(path) {
+ this.traverse(path);
+ const node = path.node;
+ if (n.Identifier.check(node.callee) &&
+ node.callee.name === "mockSingleLink") {
+ const firstArg = node.arguments[0];
+ if ((n.Identifier.check(firstArg) &&
+ firstArg.name === "reject") ||
+ n.Function.check(firstArg)) {
+ path.get("arguments").shift();
+ path.replace(
+ b.callExpression(
+ b.memberExpression(
+ node,
+ b.identifier("setOnError"),
+ false,
+ ),
+ [firstArg],
+ ),
+ );
+ }
+ }
+ },
+ });
+
+ return recast.print(transformed).code;
+}
diff --git a/packages/apollo-client/src/util/arrays.ts b/src/utilities/common/arrays.ts
similarity index 100%
rename from packages/apollo-client/src/util/arrays.ts
rename to src/utilities/common/arrays.ts
diff --git a/packages/apollo-utilities/src/util/assign.ts b/src/utilities/common/assign.ts
similarity index 100%
rename from packages/apollo-utilities/src/util/assign.ts
rename to src/utilities/common/assign.ts
diff --git a/packages/apollo-utilities/src/util/canUse.ts b/src/utilities/common/canUse.ts
similarity index 100%
rename from packages/apollo-utilities/src/util/canUse.ts
rename to src/utilities/common/canUse.ts
diff --git a/packages/apollo-utilities/src/util/cloneDeep.ts b/src/utilities/common/cloneDeep.ts
similarity index 85%
rename from packages/apollo-utilities/src/util/cloneDeep.ts
rename to src/utilities/common/cloneDeep.ts
--- a/packages/apollo-utilities/src/util/cloneDeep.ts
+++ b/src/utilities/common/cloneDeep.ts
@@ -4,12 +4,13 @@ const { toString } = Object.prototype;
* Deeply clones a value to create a new instance.
*/
export function cloneDeep<T>(value: T): T {
- return cloneDeepHelper(value, new Map());
+ return cloneDeepHelper(value);
}
-function cloneDeepHelper<T>(val: T, seen: Map<any, any>): T {
+function cloneDeepHelper<T>(val: T, seen?: Map<any, any>): T {
switch (toString.call(val)) {
case "[object Array]": {
+ seen = seen || new Map;
if (seen.has(val)) return seen.get(val);
const copy: T & any[] = (val as any).slice(0);
seen.set(val, copy);
@@ -20,6 +21,7 @@ function cloneDeepHelper<T>(val: T, seen: Map<any, any>): T {
}
case "[object Object]": {
+ seen = seen || new Map;
if (seen.has(val)) return seen.get(val);
// High fidelity polyfills of Object.create and Object.getPrototypeOf are
// possible in all JS environments, so we will assume they exist/work.
diff --git a/packages/apollo-utilities/src/util/environment.ts b/src/utilities/common/environment.ts
similarity index 83%
rename from packages/apollo-utilities/src/util/environment.ts
rename to src/utilities/common/environment.ts
--- a/packages/apollo-utilities/src/util/environment.ts
+++ b/src/utilities/common/environment.ts
@@ -11,10 +11,6 @@ export function isEnv(env: string): boolean {
return getEnv() === env;
}
-export function isProduction(): boolean {
- return isEnv('production') === true;
-}
-
export function isDevelopment(): boolean {
return isEnv('development') === true;
}
diff --git a/packages/apollo-utilities/src/util/errorHandling.ts b/src/utilities/common/errorHandling.ts
similarity index 100%
rename from packages/apollo-utilities/src/util/errorHandling.ts
rename to src/utilities/common/errorHandling.ts
diff --git a/packages/apollo-utilities/src/util/filterInPlace.ts b/src/utilities/common/filterInPlace.ts
similarity index 100%
rename from packages/apollo-utilities/src/util/filterInPlace.ts
rename to src/utilities/common/filterInPlace.ts
diff --git a/src/utilities/common/maybeDeepFreeze.ts b/src/utilities/common/maybeDeepFreeze.ts
new file mode 100644
--- /dev/null
+++ b/src/utilities/common/maybeDeepFreeze.ts
@@ -0,0 +1,25 @@
+import { isDevelopment, isTest } from './environment';
+
+function isObject(value: any) {
+ return value !== null && typeof value === "object";
+}
+
+function deepFreeze(value: any) {
+ const workSet = new Set([value]);
+ workSet.forEach(obj => {
+ if (isObject(obj)) {
+ if (!Object.isFrozen(obj)) Object.freeze(obj);
+ Object.getOwnPropertyNames(obj).forEach(name => {
+ if (isObject(obj[name])) workSet.add(obj[name]);
+ });
+ }
+ });
+ return value;
+}
+
+export function maybeDeepFreeze(obj: any) {
+ if (isDevelopment() || isTest()) {
+ deepFreeze(obj);
+ }
+ return obj;
+}
diff --git a/src/utilities/common/mergeDeep.ts b/src/utilities/common/mergeDeep.ts
new file mode 100644
--- /dev/null
+++ b/src/utilities/common/mergeDeep.ts
@@ -0,0 +1,122 @@
+const { hasOwnProperty } = Object.prototype;
+
+// These mergeDeep and mergeDeepArray utilities merge any number of objects
+// together, sharing as much memory as possible with the source objects, while
+// remaining careful to avoid modifying any source objects.
+
+// Logically, the return type of mergeDeep should be the intersection of
+// all the argument types. The binary call signature is by far the most
+// common, but we support 0- through 5-ary as well. After that, the
+// resulting type is just the inferred array element type. Note to nerds:
+// there is a more clever way of doing this that converts the tuple type
+// first to a union type (easy enough: T[number]) and then converts the
+// union to an intersection type using distributive conditional type
+// inference, but that approach has several fatal flaws (boolean becomes
+// true & false, and the inferred type ends up as unknown in many cases),
+// in addition to being nearly impossible to explain/understand.
+export type TupleToIntersection<T extends any[]> =
+ T extends [infer A] ? A :
+ T extends [infer A, infer B] ? A & B :
+ T extends [infer A, infer B, infer C] ? A & B & C :
+ T extends [infer A, infer B, infer C, infer D] ? A & B & C & D :
+ T extends [infer A, infer B, infer C, infer D, infer E] ? A & B & C & D & E :
+ T extends (infer U)[] ? U : any;
+
+export function mergeDeep<T extends any[]>(
+ ...sources: T
+): TupleToIntersection<T> {
+ return mergeDeepArray(sources);
+}
+
+// In almost any situation where you could succeed in getting the
+// TypeScript compiler to infer a tuple type for the sources array, you
+// could just use mergeDeep instead of mergeDeepArray, so instead of
+// trying to convert T[] to an intersection type we just infer the array
+// element type, which works perfectly when the sources array has a
+// consistent element type.
+export function mergeDeepArray<T>(sources: T[]): T {
+ let target = sources[0] || ({} as T);
+ const count = sources.length;
+ if (count > 1) {
+ const merger = new DeepMerger();
+ for (let i = 1; i < count; ++i) {
+ target = merger.merge(target, sources[i]);
+ }
+ }
+ return target;
+}
+
+function isObject(obj: any): obj is Record<string | number, any> {
+ return obj !== null && typeof obj === 'object';
+}
+
+export type ReconcilerFunction<TContextArgs extends any[]> = (
+ this: DeepMerger<TContextArgs>,
+ target: Record<string | number, any>,
+ source: Record<string | number, any>,
+ property: string | number,
+ ...context: TContextArgs
+) => any;
+
+const defaultReconciler: ReconcilerFunction<any[]> =
+ function (target, source, property) {
+ return this.merge(target[property], source[property]);
+ };
+
+export class DeepMerger<TContextArgs extends any[]> {
+ private pastCopies: any[] = [];
+
+ constructor(
+ private reconciler: ReconcilerFunction<TContextArgs> = defaultReconciler,
+ ) {}
+
+ public merge(target: any, source: any, ...context: TContextArgs): any {
+ if (isObject(source) && isObject(target)) {
+ Object.keys(source).forEach(sourceKey => {
+ if (hasOwnProperty.call(target, sourceKey)) {
+ const targetValue = target[sourceKey];
+ if (source[sourceKey] !== targetValue) {
+ const result = this.reconciler(target, source, sourceKey, ...context);
+ // A well-implemented reconciler may return targetValue to indicate
+ // the merge changed nothing about the structure of the target.
+ if (result !== targetValue) {
+ target = this.shallowCopyForMerge(target);
+ target[sourceKey] = result;
+ }
+ }
+ } else {
+ // If there is no collision, the target can safely share memory with
+ // the source, and the recursion can terminate here.
+ target = this.shallowCopyForMerge(target);
+ target[sourceKey] = source[sourceKey];
+ }
+ });
+
+ return target;
+ }
+
+ // If source (or target) is not an object, let source replace target.
+ return source;
+ }
+
+ public isObject = isObject;
+
+ public shallowCopyForMerge<T>(value: T): T {
+ if (
+ value !== null &&
+ typeof value === 'object' &&
+ this.pastCopies.indexOf(value) < 0
+ ) {
+ if (Array.isArray(value)) {
+ value = (value as any).slice(0);
+ } else {
+ value = {
+ __proto__: Object.getPrototypeOf(value),
+ ...value,
+ };
+ }
+ this.pastCopies.push(value);
+ }
+ return value;
+ }
+}
diff --git a/packages/apollo-utilities/src/directives.ts b/src/utilities/graphql/directives.ts
similarity index 85%
rename from packages/apollo-utilities/src/directives.ts
rename to src/utilities/graphql/directives.ts
--- a/packages/apollo-utilities/src/directives.ts
+++ b/src/utilities/graphql/directives.ts
@@ -1,7 +1,6 @@
// Provides the methods that allow QueryManager to handle the `skip` and
// `include` directives within GraphQL.
import {
- FieldNode,
SelectionNode,
VariableNode,
BooleanValueNode,
@@ -15,29 +14,10 @@ import { visit } from 'graphql/language/visitor';
import { invariant } from 'ts-invariant';
-import { argumentsObjectFromField } from './storeUtils';
-
export type DirectiveInfo = {
[fieldName: string]: { [argName: string]: any };
};
-export function getDirectiveInfoFromField(
- field: FieldNode,
- variables: Object,
-): DirectiveInfo {
- if (field.directives && field.directives.length) {
- const directiveObj: DirectiveInfo = {};
- field.directives.forEach((directive: DirectiveNode) => {
- directiveObj[directive.name.value] = argumentsObjectFromField(
- directive,
- variables,
- );
- });
- return directiveObj;
- }
- return null;
-}
-
export function shouldInclude(
selection: SelectionNode,
variables: { [name: string]: any } = {},
diff --git a/packages/apollo-utilities/src/fragments.ts b/src/utilities/graphql/fragments.ts
similarity index 70%
rename from packages/apollo-utilities/src/fragments.ts
rename to src/utilities/graphql/fragments.ts
--- a/packages/apollo-utilities/src/fragments.ts
+++ b/src/utilities/graphql/fragments.ts
@@ -1,4 +1,9 @@
-import { DocumentNode, FragmentDefinitionNode } from 'graphql';
+import {
+ DocumentNode,
+ FragmentDefinitionNode,
+ InlineFragmentNode,
+ SelectionNode
+} from 'graphql';
import { invariant, InvariantError } from 'ts-invariant';
/**
@@ -90,3 +95,39 @@ export function getFragmentQueryDocument(
return query;
}
+
+/**
+ * This is an interface that describes a map from fragment names to fragment definitions.
+ */
+export interface FragmentMap {
+ [fragmentName: string]: FragmentDefinitionNode;
+}
+
+// Utility function that takes a list of fragment definitions and makes a hash out of them
+// that maps the name of the fragment to the fragment definition.
+export function createFragmentMap(
+ fragments: FragmentDefinitionNode[] = [],
+): FragmentMap {
+ const symTable: FragmentMap = {};
+ fragments.forEach(fragment => {
+ symTable[fragment.name.value] = fragment;
+ });
+ return symTable;
+}
+
+export function getFragmentFromSelection(
+ selection: SelectionNode,
+ fragmentMap: FragmentMap,
+): InlineFragmentNode | FragmentDefinitionNode | null {
+ switch (selection.kind) {
+ case 'InlineFragment':
+ return selection;
+ case 'FragmentSpread': {
+ const fragment = fragmentMap && fragmentMap[selection.name.value];
+ invariant(fragment, `No fragment named ${selection.name.value}.`);
+ return fragment;
+ }
+ default:
+ return null;
+ }
+}
diff --git a/packages/apollo-utilities/src/getFromAST.ts b/src/utilities/graphql/getFromAST.ts
similarity index 70%
rename from packages/apollo-utilities/src/getFromAST.ts
rename to src/utilities/graphql/getFromAST.ts
--- a/packages/apollo-utilities/src/getFromAST.ts
+++ b/src/utilities/graphql/getFromAST.ts
@@ -7,25 +7,9 @@ import {
import { invariant, InvariantError } from 'ts-invariant';
-import { assign } from './util/assign';
+import { assign } from '../common/assign';
-import { valueToObjectRepresentation, JsonValue } from './storeUtils';
-
-export function getMutationDefinition(
- doc: DocumentNode,
-): OperationDefinitionNode {
- checkDocument(doc);
-
- let mutationDef: OperationDefinitionNode | null = doc.definitions.filter(
- definition =>
- definition.kind === 'OperationDefinition' &&
- definition.operation === 'mutation',
- )[0] as OperationDefinitionNode;
-
- invariant(mutationDef, 'Must contain a mutation definition.');
-
- return mutationDef;
-}
+import { valueToObjectRepresentation } from './storeUtils';
// Checks the document for errors and throws an exception if there is an error.
export function checkDocument(doc: DocumentNode) {
@@ -65,14 +49,6 @@ export function getOperationDefinition(
)[0] as OperationDefinitionNode;
}
-export function getOperationDefinitionOrDie(
- document: DocumentNode,
-): OperationDefinitionNode {
- const def = getOperationDefinition(document);
- invariant(def, `GraphQL document is missing an operation`);
- return def;
-}
-
export function getOperationName(doc: DocumentNode): string | null {
return (
doc.definitions
@@ -167,29 +143,9 @@ export function getMainDefinition(
);
}
-/**
- * This is an interface that describes a map from fragment names to fragment definitions.
- */
-export interface FragmentMap {
- [fragmentName: string]: FragmentDefinitionNode;
-}
-
-// Utility function that takes a list of fragment definitions and makes a hash out of them
-// that maps the name of the fragment to the fragment definition.
-export function createFragmentMap(
- fragments: FragmentDefinitionNode[] = [],
-): FragmentMap {
- const symTable: FragmentMap = {};
- fragments.forEach(fragment => {
- symTable[fragment.name.value] = fragment;
- });
-
- return symTable;
-}
-
export function getDefaultValues(
definition: OperationDefinitionNode | undefined,
-): { [key: string]: JsonValue } {
+): Record<string, any> {
if (
definition &&
definition.variableDefinitions &&
@@ -198,8 +154,8 @@ export function getDefaultValues(
const defaultValues = definition.variableDefinitions
.filter(({ defaultValue }) => defaultValue)
.map(
- ({ variable, defaultValue }): { [key: string]: JsonValue } => {
- const defaultValueObj: { [key: string]: JsonValue } = {};
+ ({ variable, defaultValue }): Record<string, any> => {
+ const defaultValueObj: Record<string, any> = {};
valueToObjectRepresentation(
defaultValueObj,
variable.name,
@@ -215,19 +171,3 @@ export function getDefaultValues(
return {};
}
-
-/**
- * Returns the names of all variables declared by the operation.
- */
-export function variablesInOperation(
- operation: OperationDefinitionNode,
-): Set<string> {
- const names = new Set<string>();
- if (operation.variableDefinitions) {
- for (const definition of operation.variableDefinitions) {
- names.add(definition.variable.name.value);
- }
- }
-
- return names;
-}
diff --git a/packages/apollo-utilities/src/storeUtils.ts b/src/utilities/graphql/storeUtils.ts
similarity index 73%
rename from packages/apollo-utilities/src/storeUtils.ts
rename to src/utilities/graphql/storeUtils.ts
--- a/packages/apollo-utilities/src/storeUtils.ts
+++ b/src/utilities/graphql/storeUtils.ts
@@ -14,49 +14,36 @@ import {
ValueNode,
SelectionNode,
NameNode,
+ SelectionSetNode,
} from 'graphql';
import stringify from 'fast-json-stable-stringify';
import { InvariantError } from 'ts-invariant';
+import { FragmentMap, getFragmentFromSelection } from './fragments';
-export interface IdValue {
- type: 'id';
- id: string;
- generated: boolean;
- typename: string | undefined;
+export interface Reference {
+ readonly __ref: string;
}
-export interface JsonValue {
- type: 'json';
- json: any;
+export function makeReference(id: string): Reference {
+ return { __ref: String(id) };
}
-export type ListValue = Array<null | IdValue>;
+export function isReference(obj: any): obj is Reference {
+ return obj && typeof obj === 'object' && typeof obj.__ref === 'string';
+}
export type StoreValue =
| number
| string
| string[]
- | IdValue
- | ListValue
- | JsonValue
+ | Reference
+ | Reference[]
| null
| undefined
| void
| Object;
-export type ScalarValue = StringValueNode | BooleanValueNode | EnumValueNode;
-
-export function isScalarValue(value: ValueNode): value is ScalarValue {
- return ['StringValue', 'BooleanValue', 'EnumValue'].indexOf(value.kind) > -1;
-}
-
-export type NumberValue = IntValueNode | FloatValueNode;
-
-export function isNumberValue(value: ValueNode): value is NumberValue {
- return ['IntValue', 'FloatValue'].indexOf(value.kind) > -1;
-}
-
function isStringValue(value: ValueNode): value is StringValueNode {
return value.kind === 'StringValue';
}
@@ -261,6 +248,33 @@ export function resultKeyNameFromField(field: FieldNode): string {
return field.alias ? field.alias.value : field.name.value;
}
+export function getTypenameFromResult(
+ result: Record<string, any>,
+ selectionSet: SelectionSetNode,
+ fragmentMap: FragmentMap,
+): string | undefined {
+ if (typeof result.__typename === 'string') {
+ return result.__typename;
+ }
+
+ for (const selection of selectionSet.selections) {
+ if (isField(selection)) {
+ if (selection.name.value === '__typename') {
+ return result[resultKeyNameFromField(selection)];
+ }
+ } else {
+ const typename = getTypenameFromResult(
+ result,
+ getFragmentFromSelection(selection, fragmentMap).selectionSet,
+ fragmentMap,
+ );
+ if (typeof typename === 'string') {
+ return typename;
+ }
+ }
+ }
+}
+
export function isField(selection: SelectionNode): selection is FieldNode {
return selection.kind === 'Field';
}
@@ -271,70 +285,5 @@ export function isInlineFragment(
return selection.kind === 'InlineFragment';
}
-export function isIdValue(idObject: StoreValue): idObject is IdValue {
- return idObject &&
- (idObject as IdValue | JsonValue).type === 'id' &&
- typeof (idObject as IdValue).generated === 'boolean';
-}
-
-export type IdConfig = {
- id: string;
- typename: string | undefined;
-};
-
-export function toIdValue(
- idConfig: string | IdConfig,
- generated = false,
-): IdValue {
- return {
- type: 'id',
- generated,
- ...(typeof idConfig === 'string'
- ? { id: idConfig, typename: undefined }
- : idConfig),
- };
-}
-
-export function isJsonValue(jsonObject: StoreValue): jsonObject is JsonValue {
- return (
- jsonObject != null &&
- typeof jsonObject === 'object' &&
- (jsonObject as IdValue | JsonValue).type === 'json'
- );
-}
-
-function defaultValueFromVariable(node: VariableNode) {
- throw new InvariantError(`Variable nodes are not supported by valueFromNode`);
-}
-
export type VariableValue = (node: VariableNode) => any;
-/**
- * Evaluate a ValueNode and yield its value in its natural JS form.
- */
-export function valueFromNode(
- node: ValueNode,
- onVariable: VariableValue = defaultValueFromVariable,
-): any {
- switch (node.kind) {
- case 'Variable':
- return onVariable(node);
- case 'NullValue':
- return null;
- case 'IntValue':
- return parseInt(node.value, 10);
- case 'FloatValue':
- return parseFloat(node.value);
- case 'ListValue':
- return node.values.map(v => valueFromNode(v, onVariable));
- case 'ObjectValue': {
- const value: { [key: string]: any } = {};
- for (const field of node.fields) {
- value[field.name.value] = valueFromNode(field.value, onVariable);
- }
- return value;
- }
- default:
- return node.value;
- }
-}
diff --git a/packages/apollo-utilities/src/transform.ts b/src/utilities/graphql/transform.ts
similarity index 92%
rename from packages/apollo-utilities/src/transform.ts
rename to src/utilities/graphql/transform.ts
--- a/packages/apollo-utilities/src/transform.ts
+++ b/src/utilities/graphql/transform.ts
@@ -12,19 +12,21 @@ import {
VariableNode,
} from 'graphql';
import { visit } from 'graphql/language/visitor';
+import { invariant } from 'ts-invariant';
import {
checkDocument,
getOperationDefinition,
getFragmentDefinition,
getFragmentDefinitions,
- createFragmentMap,
- FragmentMap,
getMainDefinition,
} from './getFromAST';
-import { filterInPlace } from './util/filterInPlace';
-import { invariant } from 'ts-invariant';
+import { filterInPlace } from '../common/filterInPlace';
import { isField, isInlineFragment } from './storeUtils';
+import {
+ createFragmentMap,
+ FragmentMap,
+} from './fragments';
export type RemoveNodeConfig<N> = {
name?: string;
@@ -323,48 +325,6 @@ function hasDirectivesInSelection(
);
}
-export function getDirectivesFromDocument(
- directives: GetDirectiveConfig[],
- doc: DocumentNode,
-): DocumentNode {
- checkDocument(doc);
-
- let parentPath: string;
-
- return nullIfDocIsEmpty(
- visit(doc, {
- SelectionSet: {
- enter(node, _key, _parent, path) {
- const currentPath = path.join('-');
-
- if (
- !parentPath ||
- currentPath === parentPath ||
- !currentPath.startsWith(parentPath)
- ) {
- if (node.selections) {
- const selectionsWithDirectives = node.selections.filter(
- selection => hasDirectivesInSelection(directives, selection),
- );
-
- if (hasDirectivesInSelectionSet(directives, node, false)) {
- parentPath = currentPath;
- }
-
- return {
- ...node,
- selections: selectionsWithDirectives,
- };
- } else {
- return null;
- }
- }
- },
- },
- }),
- );
-}
-
function getArgumentMatcher(config: RemoveArgumentsConfig[]) {
return function argumentMatcher(argument: ArgumentNode) {
return config.some(
diff --git a/src/utilities/index.ts b/src/utilities/index.ts
new file mode 100644
--- /dev/null
+++ b/src/utilities/index.ts
@@ -0,0 +1,63 @@
+export {
+ DirectiveInfo,
+ InclusionDirectives,
+ shouldInclude,
+ hasDirectives,
+ hasClientExports,
+ getDirectiveNames,
+ getInclusionDirectives,
+} from './graphql/directives';
+
+export {
+ FragmentMap,
+ createFragmentMap,
+ getFragmentQueryDocument,
+ getFragmentFromSelection,
+} from './graphql/fragments';
+
+export {
+ checkDocument,
+ getOperationDefinition,
+ getOperationName,
+ getFragmentDefinitions,
+ getQueryDefinition,
+ getFragmentDefinition,
+ getMainDefinition,
+ getDefaultValues,
+} from './graphql/getFromAST';
+
+export {
+ Reference,
+ StoreValue,
+ Directives,
+ VariableValue,
+ makeReference,
+ isReference,
+ isField,
+ isInlineFragment,
+ valueToObjectRepresentation,
+ storeKeyNameFromField,
+ argumentsObjectFromField,
+ resultKeyNameFromField,
+ getStoreKeyName,
+ getTypenameFromResult,
+} from './graphql/storeUtils';
+
+export {
+ RemoveNodeConfig,
+ GetNodeConfig,
+ RemoveDirectiveConfig,
+ GetDirectiveConfig,
+ RemoveArgumentsConfig,
+ GetFragmentSpreadConfig,
+ RemoveFragmentSpreadConfig,
+ RemoveFragmentDefinitionConfig,
+ RemoveVariableDefinitionConfig,
+ addTypenameToDocument,
+ buildQueryFromSelectionSet,
+ removeDirectivesFromDocument,
+ removeConnectionDirectiveFromDocument,
+ removeArgumentsFromDocument,
+ removeFragmentSpreadFromDocument,
+ removeClientSetsFromDocument,
+} from './graphql/transform';
diff --git a/src/utilities/observables/Observable.ts b/src/utilities/observables/Observable.ts
new file mode 100644
--- /dev/null
+++ b/src/utilities/observables/Observable.ts
@@ -0,0 +1,21 @@
+import Observable from 'zen-observable';
+
+// This simplified polyfill attempts to follow the ECMAScript Observable
+// proposal (https://github.com/zenparsing/es-observable)
+import 'symbol-observable';
+
+export type Subscription = ZenObservable.Subscription;
+export type Observer<T> = ZenObservable.Observer<T>;
+
+// Use global module augmentation to add RxJS interop functionality. By
+// using this approach (instead of subclassing `Observable` and adding an
+// ['@@observable']() method), we ensure the exported `Observable` retains all
+// existing type declarations from `@types/zen-observable` (which is important
+// for projects like `apollo-link`).
+declare global {
+ interface Observable<T> {
+ ['@@observable'](): Observable<T>;
+ }
+}
+(Observable.prototype as any)['@@observable'] = function () { return this; };
+export { Observable };
diff --git a/packages/apollo-client/src/util/observables.ts b/src/utilities/observables/observables.ts
similarity index 100%
rename from packages/apollo-client/src/util/observables.ts
rename to src/utilities/observables/observables.ts
diff --git a/packages/apollo-client/src/version.ts b/src/version.ts
similarity index 100%
rename from packages/apollo-client/src/version.ts
rename to src/version.ts
| diff --git a/docs/source/development-testing/testing.mdx b/docs/source/development-testing/testing.mdx
--- a/docs/source/development-testing/testing.mdx
+++ b/docs/source/development-testing/testing.mdx
@@ -1,21 +1,21 @@
---
title: Testing React components
-description: Have peace of mind when using React Apollo in production
+description: Peace of mind when using Apollo Client with React in production
---
import { MultiCodeBlock } from 'gatsby-theme-apollo-docs';
-Running tests against code meant for production has long been a best practice. It provides additional security for the code that's already written, and prevents accidental regressions in the future. Components utilizing React Apollo, the React implementation of Apollo Client, are no exception.
+Running tests against code meant for production has long been a best practice. It provides additional security for the code that's already written, and prevents accidental regressions in the future. Components utilizing React with Apollo Client are no exception.
-Although React Apollo has a lot going on under the hood, the library provides multiple tools for testing that simplify those abstractions, and allows complete focus on the component logic. These testing utilities have long been used to test the React Apollo library itself, so they will be supported long-term.
+Although Apollo Client's React integration has a lot going on under the hood, the library provides multiple tools for testing that simplify those abstractions, and allows complete focus on the component logic. These testing utilities have long been used to test React functoinality with Apollo Client itself, so they will be supported long-term.
## An introduction
-The React Apollo library relies on [React's context](https://reactjs.org/docs/context.html) to pass the `ApolloClient` instance through the React component tree. In addition, React Apollo makes network requests in order to fetch data. This behavior affects how tests should be written for components that use React Apollo.
+Apollo Client's React integration relies on [React's context](https://reactjs.org/docs/context.html) to pass the `ApolloClient` instance through the React component tree. In addition, Apollo Client makes network requests in order to fetch data. This behavior affects how tests should be written for components that use Apollo Client with React.
-This guide will explain step-by-step how to test React Apollo code. The following examples use the [Jest](https://facebook.github.io/jest/docs/en/tutorial-react.html) testing framework, but most concepts should be reusable with other libraries. These examples aim to use as simple of a toolset as possible, so React's [test renderer](https://reactjs.org/docs/test-renderer.html) will be used in place of React-specific tools like [Enzyme](https://github.com/airbnb/enzyme) and [react-testing-library](https://github.com/kentcdodds/react-testing-library).
+This guide will explain step-by-step how to test Apollo Client React code. The following examples use the [Jest](https://facebook.github.io/jest/docs/en/tutorial-react.html) testing framework, but most concepts should be reusable with other libraries. These examples aim to use as simple of a toolset as possible, so React's [test renderer](https://reactjs.org/docs/test-renderer.html) will be used in place of React-specific tools like [Enzyme](https://github.com/airbnb/enzyme) and [react-testing-library](https://github.com/kentcdodds/react-testing-library).
-> **Note:** As of React Apollo 3, all testing utilities can now be found in their own `@apollo/react-testing` package.
+> **Note:** As of Apollo Client 3, all testing utilities can now be imported from `@apollo/client/testing`.
Consider the component below, which makes a basic query, and displays its results:
@@ -24,8 +24,7 @@ Consider the component below, which makes a basic query, and displays its result
```jsx
import React from 'react';
-import gql from 'graphql-tag';
-import { useQuery } from '@apollo/react-hooks';
+import { gql, useQuery } from '@apollo/client';
// Make sure the query is also exported -- not just the component
export const GET_DOG_QUERY = gql`
@@ -59,8 +58,8 @@ export function Dog({ name }) {
```jsx
import React from 'react';
-import gql from 'graphql-tag';
-import { Query } from 'react-apollo';
+import { gql } from "@apollo/client";
+import { Query } from '@apollo/react-components';
// Make sure the query is also exported -- not just the component
export const GET_DOG_QUERY = gql`
@@ -122,7 +121,7 @@ it('renders without error', () => {
## `MockedProvider`
-The `@apollo/react-testing` package exports a `MockedProvider` component which simplifies the testing of React components by mocking calls to the GraphQL endpoint. This allows the tests to be run in isolation and provides consistent results on every run by removing the dependence on remote data.
+The `@apollo/client` package exports a `MockedProvider` component which simplifies the testing of React components by mocking calls to the GraphQL endpoint. This allows the tests to be run in isolation and provides consistent results on every run by removing the dependence on remote data.
By using this `MockedProvider` component, it's possible to specify the exact results that should be returned for a certain query using the `mocks` prop.
@@ -131,7 +130,7 @@ Here's an example of a test for the above `Dog` component using `MockedProvider`
```jsx
// dog.test.js
-import { MockedProvider } from '@apollo/react-testing';
+import { MockedProvider } from '@apollo/client/testing';
// The component AND the query need to be exported
import { GET_DOG_QUERY, Dog } from './dog';
diff --git a/packages/apollo-boost/src/__tests__/__snapshots__/config.ts.snap b/packages/apollo-boost/src/__tests__/__snapshots__/config.ts.snap
deleted file mode 100644
--- a/packages/apollo-boost/src/__tests__/__snapshots__/config.ts.snap
+++ /dev/null
@@ -1,9 +0,0 @@
-// Jest Snapshot v1, https://goo.gl/fbAQLP
-
-exports[`config warns about unsupported parameter 1`] = `
-Array [
- Array [
- "ApolloBoost was initialized with unsupported options: link",
- ],
-]
-`;
diff --git a/packages/apollo-boost/src/__tests__/config.ts b/packages/apollo-boost/src/__tests__/config.ts
deleted file mode 100644
--- a/packages/apollo-boost/src/__tests__/config.ts
+++ /dev/null
@@ -1,225 +0,0 @@
-import ApolloClient, { gql, InMemoryCache } from '../';
-import { stripSymbols } from 'apollo-utilities';
-import fetchMock from 'fetch-mock';
-
-global.fetch = jest.fn(() =>
- Promise.resolve({ json: () => Promise.resolve({}) }),
-);
-
-const sleep = ms => new Promise(res => setTimeout(res, ms));
-
-describe('config', () => {
- const query = gql`
- {
- foo @client
- }
- `;
-
- const remoteQuery = gql`
- {
- foo
- }
- `;
-
- const resolvers = {
- Query: {
- foo: () => 'woo',
- },
- };
-
- it('warns about unsupported parameter', () => {
- jest.spyOn(global.console, 'warn');
-
- const client = new ApolloClient({
- link: [],
- });
-
- expect(global.console.warn.mock.calls).toMatchSnapshot();
- });
-
- it('allows you to pass in a custom fetcher', () => {
- const customFetcher = jest.fn(() =>
- Promise.resolve({
- text: () => Promise.resolve('{"data": {"foo": "bar" }}'),
- }),
- );
-
- const client = new ApolloClient({
- fetch: customFetcher,
- });
-
- client.query({ query }).then(({ data }) => {
- expect(customFetcher).toHaveBeenCalledTimes(1);
- expect(stripSymbols(data)).toEqual({ foo: 'bar' });
- });
- });
-
- it('allows you to pass in a request handler', () => {
- const customFetcher = jest.fn(() =>
- Promise.resolve({
- text: () => Promise.resolve('{"data": {"foo": "woo" }}'),
- }),
- );
-
- let requestCalled;
-
- const client = new ApolloClient({
- request: () => {
- requestCalled = true;
- },
- fetch: customFetcher,
- });
-
- return client
- .query({ query: remoteQuery, fetchPolicy: 'network-only' })
- .then(({ data }) => {
- expect(stripSymbols(data)).toEqual({ foo: 'woo' });
- expect(requestCalled).toEqual(true);
- });
- });
-
- it('allows you to pass in an async request handler', () => {
- const customFetcher = jest.fn(() =>
- Promise.resolve({
- text: () => Promise.resolve('{"data": {"foo": "woo" }}'),
- }),
- );
-
- let requestCalled;
-
- const client = new ApolloClient({
- request: () => {
- Promise.resolve().then(() => {
- requestCalled = true;
- });
- },
- fetch: customFetcher,
- });
-
- return client
- .query({ query: remoteQuery, fetchPolicy: 'network-only' })
- .then(({ data }) => {
- expect(stripSymbols(data)).toEqual({ foo: 'woo' });
- expect(requestCalled).toEqual(true);
- });
- });
-
- it('throws if passed cache and cacheRedirects', () => {
- const cache = new InMemoryCache();
- const cacheRedirects = { Query: { foo: () => 'woo' } };
-
- expect(_ => {
- const client = new ApolloClient({
- cache,
- cacheRedirects,
- });
- }).toThrow('Incompatible cache configuration');
- });
-
- it('allows you to pass in cache', () => {
- const cache = new InMemoryCache();
-
- const client = new ApolloClient({
- cache,
- });
-
- expect(client.cache).toBe(cache);
- });
-
- it('allows you to pass in cacheRedirects', () => {
- const cacheRedirects = { Query: { foo: () => 'woo' } };
-
- const client = new ApolloClient({
- cacheRedirects,
- });
-
- expect(client.cache.config.cacheRedirects).toEqual(cacheRedirects);
- });
-
- it('allows you to pass in name and version', () => {
- const name = 'client-name';
- const version = 'client-version';
-
- const client = new ApolloClient({
- name,
- version,
- });
-
- const { clientAwareness } = client.queryManager as any;
- expect(clientAwareness.name).toEqual(name);
- expect(clientAwareness.version).toEqual(version);
- });
-
- const makePromise = res =>
- new Promise((resolve, reject) => setTimeout(() => resolve(res)));
- const data = { data: { hello: 'world' } };
-
- describe('credentials', () => {
- beforeEach(() => {
- fetchMock.restore();
- fetchMock.post('/graphql', makePromise(data));
- });
-
- afterEach(() => {
- fetchMock.restore();
- });
-
- it('should set `credentials` to `same-origin` by default', () => {
- const client = new ApolloClient({});
- client.query({ query: remoteQuery, errorPolicy: 'ignore' });
- const [uri, options] = fetchMock.lastCall();
- expect(options.credentials).toEqual('same-origin');
- });
-
- it('should set `credentials` to `config.credentials` if supplied', () => {
- const client = new ApolloClient({
- credentials: 'some-new-value',
- });
- client.query({ query: remoteQuery, errorPolicy: 'ignore' });
- const [uri, options] = fetchMock.lastCall();
- expect(options.credentials).toEqual('some-new-value');
- });
- });
-
- describe('headers', () => {
- beforeEach(() => {
- fetchMock.restore();
- fetchMock.post('/graphql', makePromise(data));
- });
-
- afterEach(() => {
- fetchMock.restore();
- });
-
- it(
- 'should leave existing `headers` in place if no new headers are ' +
- 'provided',
- () => {
- const client = new ApolloClient({});
- client.query({ query: remoteQuery, errorPolicy: 'ignore' });
- const [uri, options] = fetchMock.lastCall();
- expect(options.headers).toEqual({
- accept: '*/*',
- 'content-type': 'application/json',
- });
- },
- );
-
- it('should add new `config.headers` to existing headers', () => {
- const client = new ApolloClient({
- headers: {
- 'new-header1': 'value1',
- 'new-header2': 'value2',
- },
- });
- client.query({ query: remoteQuery, errorPolicy: 'ignore' });
- const [uri, options] = fetchMock.lastCall();
- expect(options.headers).toEqual({
- accept: '*/*',
- 'content-type': 'application/json',
- 'new-header1': 'value1',
- 'new-header2': 'value2',
- });
- });
- });
-});
diff --git a/packages/apollo-boost/src/__tests__/smoke.ts b/packages/apollo-boost/src/__tests__/smoke.ts
deleted file mode 100644
--- a/packages/apollo-boost/src/__tests__/smoke.ts
+++ /dev/null
@@ -1,17 +0,0 @@
-import ApolloClient, { gql, HttpLink, InMemoryCache } from '../';
-
-global.fetch = jest.fn(() =>
- Promise.resolve({ json: () => Promise.resolve({}) }),
-);
-it('should have the required exports', () => {
- expect(ApolloClient).toBeDefined();
- expect(gql).toBeDefined();
- expect(HttpLink).toBeDefined();
- expect(InMemoryCache).toBeDefined();
-});
-
-it('should make a client with defaults', () => {
- const client = new ApolloClient();
- expect(client.link).toBeDefined();
- expect(client.store.cache).toBeDefined();
-});
diff --git a/packages/apollo-cache-inmemory/src/__tests__/__snapshots__/cache.ts.snap b/packages/apollo-cache-inmemory/src/__tests__/__snapshots__/cache.ts.snap
deleted file mode 100644
--- a/packages/apollo-cache-inmemory/src/__tests__/__snapshots__/cache.ts.snap
+++ /dev/null
@@ -1,367 +0,0 @@
-// Jest Snapshot v1, https://goo.gl/fbAQLP
-
-exports[`Cache writeFragment will write some deeply nested data into the store at any id (1/3) 1`] = `
-Object {
- "bar": Object {
- "i": 7,
- },
- "foo": Object {
- "e": 4,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`Cache writeFragment will write some deeply nested data into the store at any id (1/3) 2`] = `
-Object {
- "bar": Object {
- "i": 7,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`Cache writeFragment will write some deeply nested data into the store at any id (1/3) 3`] = `
-Object {
- "bar": Object {
- "i": 10,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`Cache writeFragment will write some deeply nested data into the store at any id (1/3) 4`] = `
-Object {
- "bar": Object {
- "i": 10,
- "j": 11,
- "k": 12,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`Cache writeFragment will write some deeply nested data into the store at any id (1/3) 5`] = `
-Object {
- "bar": Object {
- "i": 7,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": "Bar",
- },
- },
-}
-`;
-
-exports[`Cache writeFragment will write some deeply nested data into the store at any id (1/3) 6`] = `
-Object {
- "bar": Object {
- "i": 10,
- "j": 11,
- "k": 12,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": "Bar",
- },
- },
-}
-`;
-
-exports[`Cache writeFragment will write some deeply nested data into the store at any id (2/3) 1`] = `
-Object {
- "bar": Object {
- "i": 7,
- },
- "foo": Object {
- "e": 4,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`Cache writeFragment will write some deeply nested data into the store at any id (2/3) 2`] = `
-Object {
- "bar": Object {
- "i": 7,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`Cache writeFragment will write some deeply nested data into the store at any id (2/3) 3`] = `
-Object {
- "bar": Object {
- "i": 10,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`Cache writeFragment will write some deeply nested data into the store at any id (2/3) 4`] = `
-Object {
- "bar": Object {
- "i": 10,
- "j": 11,
- "k": 12,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`Cache writeFragment will write some deeply nested data into the store at any id (2/3) 5`] = `
-Object {
- "bar": Object {
- "i": 7,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": "Bar",
- },
- },
-}
-`;
-
-exports[`Cache writeFragment will write some deeply nested data into the store at any id (2/3) 6`] = `
-Object {
- "bar": Object {
- "i": 10,
- "j": 11,
- "k": 12,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": "Bar",
- },
- },
-}
-`;
-
-exports[`Cache writeFragment will write some deeply nested data into the store at any id (3/3) 1`] = `
-Object {
- "bar": Object {
- "i": 7,
- },
- "foo": Object {
- "e": 4,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`Cache writeFragment will write some deeply nested data into the store at any id (3/3) 2`] = `
-Object {
- "bar": Object {
- "i": 7,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`Cache writeFragment will write some deeply nested data into the store at any id (3/3) 3`] = `
-Object {
- "bar": Object {
- "i": 10,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`Cache writeFragment will write some deeply nested data into the store at any id (3/3) 4`] = `
-Object {
- "bar": Object {
- "i": 10,
- "j": 11,
- "k": 12,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`Cache writeFragment will write some deeply nested data into the store at any id (3/3) 5`] = `
-Object {
- "bar": Object {
- "i": 7,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": "Bar",
- },
- },
-}
-`;
-
-exports[`Cache writeFragment will write some deeply nested data into the store at any id (3/3) 6`] = `
-Object {
- "bar": Object {
- "i": 10,
- "j": 11,
- "k": 12,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": "Bar",
- },
- },
-}
-`;
diff --git a/packages/apollo-cache-inmemory/src/__tests__/__snapshots__/mapCache.ts.snap b/packages/apollo-cache-inmemory/src/__tests__/__snapshots__/mapCache.ts.snap
deleted file mode 100644
--- a/packages/apollo-cache-inmemory/src/__tests__/__snapshots__/mapCache.ts.snap
+++ /dev/null
@@ -1,374 +0,0 @@
-// Jest Snapshot v1, https://goo.gl/fbAQLP
-
-exports[`MapCache Cache writeFragment will write some deeply nested data into the store at any id (1/3) 1`] = `
-Object {
- "bar": Object {
- "i": 7,
- },
- "foo": Object {
- "e": 4,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`MapCache Cache writeFragment will write some deeply nested data into the store at any id (1/3) 2`] = `
-Object {
- "bar": Object {
- "i": 7,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`MapCache Cache writeFragment will write some deeply nested data into the store at any id (1/3) 3`] = `
-Object {
- "bar": Object {
- "i": 10,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`MapCache Cache writeFragment will write some deeply nested data into the store at any id (1/3) 4`] = `
-Object {
- "bar": Object {
- "i": 10,
- "j": 11,
- "k": 12,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`MapCache Cache writeFragment will write some deeply nested data into the store at any id (1/3) 5`] = `
-Object {
- "bar": Object {
- "i": 7,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": "Bar",
- },
- },
-}
-`;
-
-exports[`MapCache Cache writeFragment will write some deeply nested data into the store at any id (1/3) 6`] = `
-Object {
- "bar": Object {
- "i": 10,
- "j": 11,
- "k": 12,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": "Bar",
- },
- },
-}
-`;
-
-exports[`MapCache Cache writeFragment will write some deeply nested data into the store at any id (2/3) 1`] = `
-Object {
- "bar": Object {
- "i": 7,
- },
- "foo": Object {
- "e": 4,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`MapCache Cache writeFragment will write some deeply nested data into the store at any id (2/3) 2`] = `
-Object {
- "bar": Object {
- "i": 7,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`MapCache Cache writeFragment will write some deeply nested data into the store at any id (2/3) 3`] = `
-Object {
- "bar": Object {
- "i": 10,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`MapCache Cache writeFragment will write some deeply nested data into the store at any id (2/3) 4`] = `
-Object {
- "bar": Object {
- "i": 10,
- "j": 11,
- "k": 12,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`MapCache Cache writeFragment will write some deeply nested data into the store at any id (2/3) 5`] = `
-Object {
- "bar": Object {
- "i": 7,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": "Bar",
- },
- },
-}
-`;
-
-exports[`MapCache Cache writeFragment will write some deeply nested data into the store at any id (2/3) 6`] = `
-Object {
- "bar": Object {
- "i": 10,
- "j": 11,
- "k": 12,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": "Bar",
- },
- },
-}
-`;
-
-exports[`MapCache Cache writeFragment will write some deeply nested data into the store at any id (3/3) 1`] = `
-Object {
- "bar": Object {
- "i": 7,
- },
- "foo": Object {
- "e": 4,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`MapCache Cache writeFragment will write some deeply nested data into the store at any id (3/3) 2`] = `
-Object {
- "bar": Object {
- "i": 7,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`MapCache Cache writeFragment will write some deeply nested data into the store at any id (3/3) 3`] = `
-Object {
- "bar": Object {
- "i": 10,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`MapCache Cache writeFragment will write some deeply nested data into the store at any id (3/3) 4`] = `
-Object {
- "bar": Object {
- "i": 10,
- "j": 11,
- "k": 12,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": undefined,
- },
- },
-}
-`;
-
-exports[`MapCache Cache writeFragment will write some deeply nested data into the store at any id (3/3) 5`] = `
-Object {
- "bar": Object {
- "i": 7,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": "Bar",
- },
- },
-}
-`;
-
-exports[`MapCache Cache writeFragment will write some deeply nested data into the store at any id (3/3) 6`] = `
-Object {
- "bar": Object {
- "i": 10,
- "j": 11,
- "k": 12,
- },
- "foo": Object {
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": "Bar",
- },
- },
-}
-`;
-
-exports[`MapCache writing to the store throws when trying to write an object without id that was previously queried with id 1`] = `
-"Error writing result to store for query:
- {\\"kind\\":\\"Document\\",\\"definitions\\":[{\\"kind\\":\\"OperationDefinition\\",\\"operation\\":\\"query\\",\\"name\\":{\\"kind\\":\\"Name\\",\\"value\\":\\"Failure\\"},\\"variableDefinitions\\":[],\\"directives\\":[],\\"selectionSet\\":{\\"kind\\":\\"SelectionSet\\",\\"selections\\":[{\\"kind\\":\\"Field\\",\\"name\\":{\\"kind\\":\\"Name\\",\\"value\\":\\"item\\"},\\"arguments\\":[],\\"directives\\":[],\\"selectionSet\\":{\\"kind\\":\\"SelectionSet\\",\\"selections\\":[{\\"kind\\":\\"Field\\",\\"name\\":{\\"kind\\":\\"Name\\",\\"value\\":\\"stringField\\"},\\"arguments\\":[],\\"directives\\":[]}]}}]}}],\\"loc\\":{\\"start\\":0,\\"end\\":106}}
-Store error: the application attempted to write an object with no provided id but the store already contains an id of abcd for this object. The selectionSet that was trying to be written is:
-{\\"kind\\":\\"Field\\",\\"name\\":{\\"kind\\":\\"Name\\",\\"value\\":\\"item\\"},\\"arguments\\":[],\\"directives\\":[],\\"selectionSet\\":{\\"kind\\":\\"SelectionSet\\",\\"selections\\":[{\\"kind\\":\\"Field\\",\\"name\\":{\\"kind\\":\\"Name\\",\\"value\\":\\"stringField\\"},\\"arguments\\":[],\\"directives\\":[]}]}}"
-`;
diff --git a/packages/apollo-cache-inmemory/src/__tests__/__snapshots__/roundtrip.ts.snap b/packages/apollo-cache-inmemory/src/__tests__/__snapshots__/roundtrip.ts.snap
deleted file mode 100644
--- a/packages/apollo-cache-inmemory/src/__tests__/__snapshots__/roundtrip.ts.snap
+++ /dev/null
@@ -1,10 +0,0 @@
-// Jest Snapshot v1, https://goo.gl/fbAQLP
-
-exports[
- `writing to the store throws when trying to write an object without id that was previously queried with id 1`
-] = `
-"Error writing result to store for query:
- {\\"kind\\":\\"Document\\",\\"definitions\\":[{\\"kind\\":\\"OperationDefinition\\",\\"operation\\":\\"query\\",\\"name\\":{\\"kind\\":\\"Name\\",\\"value\\":\\"Failure\\"},\\"variableDefinitions\\":[],\\"directives\\":[],\\"selectionSet\\":{\\"kind\\":\\"SelectionSet\\",\\"selections\\":[{\\"kind\\":\\"Field\\",\\"name\\":{\\"kind\\":\\"Name\\",\\"value\\":\\"item\\"},\\"arguments\\":[],\\"directives\\":[],\\"selectionSet\\":{\\"kind\\":\\"SelectionSet\\",\\"selections\\":[{\\"kind\\":\\"Field\\",\\"name\\":{\\"kind\\":\\"Name\\",\\"value\\":\\"stringField\\"},\\"arguments\\":[],\\"directives\\":[]}]}}]}}],\\"loc\\":{\\"start\\":0,\\"end\\":106}}
-Store error: the application attempted to write an object with no provided id but the store already contains an id of abcd for this object. The selectionSet that was trying to be written is:
-{\\"kind\\":\\"Field\\",\\"name\\":{\\"kind\\":\\"Name\\",\\"value\\":\\"item\\"},\\"arguments\\":[],\\"directives\\":[],\\"selectionSet\\":{\\"kind\\":\\"SelectionSet\\",\\"selections\\":[{\\"kind\\":\\"Field\\",\\"name\\":{\\"kind\\":\\"Name\\",\\"value\\":\\"stringField\\"},\\"arguments\\":[],\\"directives\\":[]}]}}"
-`;
diff --git a/packages/apollo-cache-inmemory/src/__tests__/__snapshots__/writeToStore.ts.snap b/packages/apollo-cache-inmemory/src/__tests__/__snapshots__/writeToStore.ts.snap
deleted file mode 100644
--- a/packages/apollo-cache-inmemory/src/__tests__/__snapshots__/writeToStore.ts.snap
+++ /dev/null
@@ -1,10 +0,0 @@
-// Jest Snapshot v1, https://goo.gl/fbAQLP
-
-exports[
- `writing to the store throws when trying to write an object without id that was previously queried with id 1`
-] = `
-"Error writing result to store for query:
- {\\"kind\\":\\"Document\\",\\"definitions\\":[{\\"kind\\":\\"OperationDefinition\\",\\"operation\\":\\"query\\",\\"name\\":{\\"kind\\":\\"Name\\",\\"value\\":\\"Failure\\"},\\"variableDefinitions\\":[],\\"directives\\":[],\\"selectionSet\\":{\\"kind\\":\\"SelectionSet\\",\\"selections\\":[{\\"kind\\":\\"Field\\",\\"name\\":{\\"kind\\":\\"Name\\",\\"value\\":\\"item\\"},\\"arguments\\":[],\\"directives\\":[],\\"selectionSet\\":{\\"kind\\":\\"SelectionSet\\",\\"selections\\":[{\\"kind\\":\\"Field\\",\\"name\\":{\\"kind\\":\\"Name\\",\\"value\\":\\"stringField\\"},\\"arguments\\":[],\\"directives\\":[]}]}}]}}],\\"loc\\":{\\"start\\":0,\\"end\\":106}}
-Store error: the application attempted to write an object with no provided id but the store already contains an id of abcd for this object. The selectionSet that was trying to be written is:
-{\\"kind\\":\\"Field\\",\\"name\\":{\\"kind\\":\\"Name\\",\\"value\\":\\"item\\"},\\"arguments\\":[],\\"directives\\":[],\\"selectionSet\\":{\\"kind\\":\\"SelectionSet\\",\\"selections\\":[{\\"kind\\":\\"Field\\",\\"name\\":{\\"kind\\":\\"Name\\",\\"value\\":\\"stringField\\"},\\"arguments\\":[],\\"directives\\":[]}]}}"
-`;
diff --git a/packages/apollo-cache-inmemory/src/__tests__/fragmentMatcher.ts b/packages/apollo-cache-inmemory/src/__tests__/fragmentMatcher.ts
deleted file mode 100644
--- a/packages/apollo-cache-inmemory/src/__tests__/fragmentMatcher.ts
+++ /dev/null
@@ -1,101 +0,0 @@
-import { IntrospectionFragmentMatcher } from '../fragmentMatcher';
-import { defaultNormalizedCacheFactory } from '../objectCache';
-import { ReadStoreContext } from '../types';
-import { InMemoryCache } from '../inMemoryCache';
-import gql from 'graphql-tag';
-
-describe('FragmentMatcher', () => {
- it('can match against the root Query', () => {
- const cache = new InMemoryCache({
- addTypename: true,
- });
-
- const query = gql`
- query AllPeople {
- people {
- id
- name
- }
- ...PeopleTypes
- }
- fragment PeopleTypes on Query {
- __type(name: "Person") {
- name
- kind
- }
- }
- `;
-
- const data = {
- people: [
- {
- __typename: 'Person',
- id: 123,
- name: 'Ben',
- },
- ],
- __type: {
- __typename: '__Type',
- name: 'Person',
- kind: 'OBJECT',
- },
- };
-
- cache.writeQuery({ query, data });
- expect(cache.readQuery({ query })).toEqual(data);
- });
-});
-
-describe('IntrospectionFragmentMatcher', () => {
- it('will throw an error if match is called if it is not ready', () => {
- const ifm = new IntrospectionFragmentMatcher();
- expect(() => (ifm.match as any)()).toThrowError(/called before/);
- });
-
- it('can be seeded with an introspection query result', () => {
- const ifm = new IntrospectionFragmentMatcher({
- introspectionQueryResultData: {
- __schema: {
- types: [
- {
- kind: 'UNION',
- name: 'Item',
- possibleTypes: [
- {
- name: 'ItemA',
- },
- {
- name: 'ItemB',
- },
- ],
- },
- ],
- },
- },
- });
-
- const store = defaultNormalizedCacheFactory({
- a: {
- __typename: 'ItemB',
- },
- });
-
- const idValue = {
- type: 'id',
- id: 'a',
- generated: false,
- };
-
- const readStoreContext = {
- store,
- returnPartialData: false,
- hasMissingField: false,
- cacheRedirects: {},
- } as ReadStoreContext;
-
- expect(ifm.match(idValue as any, 'Item', readStoreContext)).toBe(true);
- expect(ifm.match(idValue as any, 'NotAnItem', readStoreContext)).toBe(
- false,
- );
- });
-});
diff --git a/packages/apollo-cache-inmemory/src/__tests__/mapCache.ts b/packages/apollo-cache-inmemory/src/__tests__/mapCache.ts
deleted file mode 100644
--- a/packages/apollo-cache-inmemory/src/__tests__/mapCache.ts
+++ /dev/null
@@ -1,20 +0,0 @@
-jest.mock('../objectCache', () => {
- const { MapCache, mapNormalizedCacheFactory } = require('../mapCache');
- return {
- ObjectCache: MapCache,
- defaultNormalizedCacheFactory: mapNormalizedCacheFactory,
- };
-});
-
-describe('MapCache', () => {
- // simply re-runs all the tests
- // with the alternative implementation of the cache
- require('./objectCache');
- require('./cache');
- require('./diffAgainstStore');
- require('./fragmentMatcher');
- require('./readFromStore');
- require('./diffAgainstStore');
- require('./roundtrip');
- require('./writeToStore');
-});
diff --git a/packages/apollo-cache-inmemory/src/__tests__/objectCache.ts b/packages/apollo-cache-inmemory/src/__tests__/objectCache.ts
deleted file mode 100644
--- a/packages/apollo-cache-inmemory/src/__tests__/objectCache.ts
+++ /dev/null
@@ -1,36 +0,0 @@
-import { ObjectCache } from '../objectCache';
-import { NormalizedCacheObject } from '../types';
-
-describe('ObjectCache', () => {
- it('should create an empty cache', () => {
- const cache = new ObjectCache();
- expect(cache.toObject()).toEqual({});
- });
-
- it('should create a cache based on an Object', () => {
- const contents: NormalizedCacheObject = { a: {} };
- const cache = new ObjectCache(contents);
- expect(cache.toObject()).toEqual(contents);
- });
-
- it(`should .get() an object from the store by dataId`, () => {
- const contents: NormalizedCacheObject = { a: {} };
- const cache = new ObjectCache(contents);
- expect(cache.get('a')).toBe(contents.a);
- });
-
- it(`should .set() an object from the store by dataId`, () => {
- const obj = {};
- const cache = new ObjectCache();
- cache.set('a', obj);
- expect(cache.get('a')).toBe(obj);
- });
-
- it(`should .clear() the store`, () => {
- const obj = {};
- const cache = new ObjectCache();
- cache.set('a', obj);
- cache.clear();
- expect(cache.get('a')).toBeUndefined();
- });
-});
diff --git a/packages/apollo-client/scripts/test_and_lint.sh b/packages/apollo-client/scripts/test_and_lint.sh
deleted file mode 100644
--- a/packages/apollo-client/scripts/test_and_lint.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-sleep 5
-
-run_command="npm run testonly"
-
-if [[ $# -gt 0 ]]; then
- run_command+=' -- --grep "'
- run_command+=$@
- run_command+='"'
-fi
-
-lint_command="npm run lint"
-
-command="$run_command"
-command+=" && "
-command+="$lint_command"
-
-nodemon --watch lib --exec "$command" --delay 0.5
diff --git a/packages/apollo-client/src/__tests__/__snapshots__/ApolloClient.ts.snap b/packages/apollo-client/src/__tests__/__snapshots__/ApolloClient.ts.snap
deleted file mode 100644
--- a/packages/apollo-client/src/__tests__/__snapshots__/ApolloClient.ts.snap
+++ /dev/null
@@ -1,511 +0,0 @@
-// Jest Snapshot v1, https://goo.gl/fbAQLP
-
-exports[`ApolloClient constructor will throw an error if cache is not passed in 1`] = `
-"In order to initialize Apollo Client, you must specify 'link' and 'cache' properties in the options object.
-These options are part of the upgrade requirements when migrating from Apollo Client 1.x to Apollo Client 2.x.
-For more information, please visit: https://www.apollographql.com/docs/tutorial/client.html#apollo-client-setup"
-`;
-
-exports[`ApolloClient constructor will throw an error if link is not passed in 1`] = `
-"In order to initialize Apollo Client, you must specify 'link' and 'cache' properties in the options object.
-These options are part of the upgrade requirements when migrating from Apollo Client 1.x to Apollo Client 2.x.
-For more information, please visit: https://www.apollographql.com/docs/tutorial/client.html#apollo-client-setup"
-`;
-
-exports[`ApolloClient write then read will not use a default id getter if either _id or id is present when __typename is not also present 1`] = `
-Object {
- "$ROOT_QUERY.bar": Object {
- "foo": Object {
- "generated": true,
- "id": "$ROOT_QUERY.bar.foo",
- "type": "id",
- "typename": undefined,
- },
- "i": 10,
- "j": 11,
- },
- "$ROOT_QUERY.bar.foo": Object {
- "_id": "barfoo",
- "k": 12,
- "l": 13,
- },
- "$ROOT_QUERY.foo": Object {
- "bar": Object {
- "generated": false,
- "id": "bar:foobar",
- "type": "id",
- "typename": "bar",
- },
- "c": 3,
- "d": 4,
- },
- "ROOT_QUERY": Object {
- "a": 1,
- "b": 2,
- "bar": Object {
- "generated": true,
- "id": "$ROOT_QUERY.bar",
- "type": "id",
- "typename": undefined,
- },
- "foo": Object {
- "generated": true,
- "id": "$ROOT_QUERY.foo",
- "type": "id",
- "typename": undefined,
- },
- "g": 8,
- "h": 9,
- },
- "bar:foobar": Object {
- "e": 5,
- "f": 6,
- "id": "foobar",
- },
-}
-`;
-
-exports[`ApolloClient write then read will not use a default id getter if id and _id are not present 1`] = `
-Object {
- "$ROOT_QUERY.bar": Object {
- "__typename": "bar",
- "foo": Object {
- "generated": true,
- "id": "$ROOT_QUERY.bar.foo",
- "type": "id",
- "typename": "foo",
- },
- "i": 10,
- "j": 11,
- },
- "$ROOT_QUERY.bar.foo": Object {
- "__typename": "foo",
- "k": 12,
- "l": 13,
- },
- "$ROOT_QUERY.foo": Object {
- "__typename": "foo",
- "bar": Object {
- "generated": true,
- "id": "$ROOT_QUERY.foo.bar",
- "type": "id",
- "typename": "bar",
- },
- "c": 3,
- "d": 4,
- },
- "$ROOT_QUERY.foo.bar": Object {
- "__typename": "bar",
- "e": 5,
- "f": 6,
- },
- "ROOT_QUERY": Object {
- "a": 1,
- "b": 2,
- "bar": Object {
- "generated": true,
- "id": "$ROOT_QUERY.bar",
- "type": "id",
- "typename": "bar",
- },
- "foo": Object {
- "generated": true,
- "id": "$ROOT_QUERY.foo",
- "type": "id",
- "typename": "foo",
- },
- "g": 8,
- "h": 9,
- },
-}
-`;
-
-exports[`ApolloClient write then read will use a default id getter if __typename and _id are present 1`] = `
-Object {
- "$ROOT_QUERY.foo": Object {
- "__typename": "foo",
- "bar": Object {
- "generated": false,
- "id": "bar:foobar",
- "type": "id",
- "typename": "bar",
- },
- "c": 3,
- "d": 4,
- },
- "ROOT_QUERY": Object {
- "a": 1,
- "b": 2,
- "foo": Object {
- "generated": true,
- "id": "$ROOT_QUERY.foo",
- "type": "id",
- "typename": "foo",
- },
- },
- "bar:foobar": Object {
- "__typename": "bar",
- "_id": "foobar",
- "e": 5,
- "f": 6,
- },
-}
-`;
-
-exports[`ApolloClient write then read will use a default id getter if __typename and id are present 1`] = `
-Object {
- "$ROOT_QUERY.foo": Object {
- "__typename": "foo",
- "bar": Object {
- "generated": false,
- "id": "bar:foobar",
- "type": "id",
- "typename": "bar",
- },
- "c": 3,
- "d": 4,
- },
- "ROOT_QUERY": Object {
- "a": 1,
- "b": 2,
- "foo": Object {
- "generated": true,
- "id": "$ROOT_QUERY.foo",
- "type": "id",
- "typename": "foo",
- },
- },
- "bar:foobar": Object {
- "__typename": "bar",
- "e": 5,
- "f": 6,
- "id": "foobar",
- },
-}
-`;
-
-exports[`ApolloClient write then read will use a default id getter if one is not specified and __typename is present along with either _id or id 1`] = `
-Object {
- "$ROOT_QUERY.bar": Object {
- "__typename": "bar",
- "foo": Object {
- "generated": false,
- "id": "foo:barfoo",
- "type": "id",
- "typename": "foo",
- },
- "i": 10,
- "j": 11,
- },
- "$ROOT_QUERY.foo": Object {
- "__typename": "foo",
- "bar": Object {
- "generated": false,
- "id": "bar:foobar",
- "type": "id",
- "typename": "bar",
- },
- "c": 3,
- "d": 4,
- },
- "ROOT_QUERY": Object {
- "a": 1,
- "b": 2,
- "bar": Object {
- "generated": true,
- "id": "$ROOT_QUERY.bar",
- "type": "id",
- "typename": "bar",
- },
- "foo": Object {
- "generated": true,
- "id": "$ROOT_QUERY.foo",
- "type": "id",
- "typename": "foo",
- },
- "g": 8,
- "h": 9,
- },
- "bar:foobar": Object {
- "__typename": "bar",
- "e": 5,
- "f": 6,
- "id": "foobar",
- },
- "foo:barfoo": Object {
- "__typename": "foo",
- "_id": "barfoo",
- "k": 12,
- "l": 13,
- },
-}
-`;
-
-exports[`ApolloClient write then read will write data locally which will then be read back 1`] = `
-Object {
- "$foo.bar": Object {
- "__typename": "Bar",
- "d": 8,
- "e": 9,
- "f": 6,
- },
- "foo": Object {
- "__typename": "Foo",
- "a": 7,
- "b": 2,
- "bar": Object {
- "generated": true,
- "id": "$foo.bar",
- "type": "id",
- "typename": "Bar",
- },
- "c": 3,
- },
-}
-`;
-
-exports[`ApolloClient write then read will write data to a specific id 1`] = `
-Object {
- "$ROOT_QUERY.foo": Object {
- "__typename": "foo",
- "bar": Object {
- "generated": false,
- "id": "foobar",
- "type": "id",
- "typename": "bar",
- },
- "c": 3,
- "d": 4,
- },
- "ROOT_QUERY": Object {
- "a": 1,
- "b": 2,
- "foo": Object {
- "generated": true,
- "id": "$ROOT_QUERY.foo",
- "type": "id",
- "typename": "foo",
- },
- },
- "foobar": Object {
- "__typename": "bar",
- "e": 5,
- "f": 6,
- "key": "foobar",
- },
-}
-`;
-
-exports[`ApolloClient writeFragment will write some deeply nested data into the store at any id 1`] = `
-Object {
- "bar": Object {
- "__typename": "Bar",
- "i": 7,
- },
- "foo": Object {
- "__typename": "Foo",
- "e": 4,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": "Bar",
- },
- },
-}
-`;
-
-exports[`ApolloClient writeFragment will write some deeply nested data into the store at any id 2`] = `
-Object {
- "bar": Object {
- "__typename": "Bar",
- "i": 7,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "__typename": "Foo",
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": "Bar",
- },
- },
-}
-`;
-
-exports[`ApolloClient writeFragment will write some deeply nested data into the store at any id 3`] = `
-Object {
- "bar": Object {
- "__typename": "Bar",
- "i": 10,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "__typename": "Foo",
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": "Bar",
- },
- },
-}
-`;
-
-exports[`ApolloClient writeFragment will write some deeply nested data into the store at any id 4`] = `
-Object {
- "bar": Object {
- "__typename": "Bar",
- "i": 10,
- "j": 11,
- "k": 12,
- },
- "foo": Object {
- "__typename": "Foo",
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": "Bar",
- },
- },
-}
-`;
-
-exports[`ApolloClient writeFragment will write some deeply nested data into the store at any id 5`] = `
-Object {
- "bar": Object {
- "__typename": "Bar",
- "i": 7,
- "j": 8,
- "k": 9,
- },
- "foo": Object {
- "__typename": "Foo",
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": "Bar",
- },
- },
-}
-`;
-
-exports[`ApolloClient writeFragment will write some deeply nested data into the store at any id 6`] = `
-Object {
- "bar": Object {
- "__typename": "Bar",
- "i": 10,
- "j": 11,
- "k": 12,
- },
- "foo": Object {
- "__typename": "Foo",
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": false,
- "id": "bar",
- "type": "id",
- "typename": "Bar",
- },
- },
-}
-`;
-
-exports[`ApolloClient writeQuery will write some deeply nested data to the store 1`] = `
-Object {
- "$ROOT_QUERY.d": Object {
- "__typename": "D",
- "e": 4,
- },
- "ROOT_QUERY": Object {
- "a": 1,
- "d": Object {
- "generated": true,
- "id": "$ROOT_QUERY.d",
- "type": "id",
- "typename": "D",
- },
- },
-}
-`;
-
-exports[`ApolloClient writeQuery will write some deeply nested data to the store 2`] = `
-Object {
- "$ROOT_QUERY.d": Object {
- "__typename": "D",
- "e": 4,
- "h": Object {
- "generated": true,
- "id": "$ROOT_QUERY.d.h",
- "type": "id",
- "typename": "H",
- },
- },
- "$ROOT_QUERY.d.h": Object {
- "__typename": "H",
- "i": 7,
- },
- "ROOT_QUERY": Object {
- "a": 1,
- "d": Object {
- "generated": true,
- "id": "$ROOT_QUERY.d",
- "type": "id",
- "typename": "D",
- },
- },
-}
-`;
-
-exports[`ApolloClient writeQuery will write some deeply nested data to the store 3`] = `
-Object {
- "$ROOT_QUERY.d": Object {
- "__typename": "D",
- "e": 4,
- "f": 5,
- "g": 6,
- "h": Object {
- "generated": true,
- "id": "$ROOT_QUERY.d.h",
- "type": "id",
- "typename": "H",
- },
- },
- "$ROOT_QUERY.d.h": Object {
- "__typename": "H",
- "i": 7,
- "j": 8,
- "k": 9,
- },
- "ROOT_QUERY": Object {
- "a": 1,
- "b": 2,
- "c": 3,
- "d": Object {
- "generated": true,
- "id": "$ROOT_QUERY.d",
- "type": "id",
- "typename": "D",
- },
- },
-}
-`;
diff --git a/packages/apollo-client/src/__tests__/__snapshots__/client.ts.snap b/packages/apollo-client/src/__tests__/__snapshots__/client.ts.snap
deleted file mode 100644
--- a/packages/apollo-client/src/__tests__/__snapshots__/client.ts.snap
+++ /dev/null
@@ -1,43 +0,0 @@
-// Jest Snapshot v1, https://goo.gl/fbAQLP
-
-exports[
- `@connect should run a query with the connection directive and filter arguments and write the result to the correct store key 1`
-] = `
-Object {
- "ROOT_QUERY": Object {
- "abc({\\"order\\":\\"popularity\\"})": Array [
- Object {
- "generated": true,
- "id": "ROOT_QUERY.abc({\\"order\\":\\"popularity\\"}).0",
- "type": "id",
- "typename": "Book",
- },
- ],
- },
- "ROOT_QUERY.abc({\\"order\\":\\"popularity\\"}).0": Object {
- "__typename": "Book",
- "name": "abcd",
- },
-}
-`;
-
-exports[
- `@connect should run a query with the connection directive and write the result to the store key defined in the directive 1`
-] = `
-Object {
- "ROOT_QUERY": Object {
- "abc": Array [
- Object {
- "generated": true,
- "id": "ROOT_QUERY.abc.0",
- "type": "id",
- "typename": "Book",
- },
- ],
- },
- "ROOT_QUERY.abc.0": Object {
- "__typename": "Book",
- "name": "abcd",
- },
-}
-`;
diff --git a/packages/apollo-client/tsconfig.test.json b/packages/apollo-client/tsconfig.test.json
deleted file mode 100644
--- a/packages/apollo-client/tsconfig.test.json
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "extends": "./tsconfig",
- "compilerOptions": {
- "module": "commonjs",
- },
- "exclude": [
- "src/__tests__/ApolloClient.ts",
- "src/__tests__/client.ts",
- "src/__tests__/fetchMore.ts",
- "src/__tests__/graphqlSubscriptions.ts",
- "src/__tests__/mutationResults.ts",
- "src/__tests__/optimistic.ts",
- "src/core/__tests__/ObservableQuery.ts",
- "src/core/__tests__/QueryManager/index.ts",
- "src/core/__tests__/QueryManager/links.ts",
- "src/core/__tests__/QueryManager/live.ts",
- "src/core/__tests__/QueryManager/multiple-results.ts",
- "src/core/__tests__/QueryManager/recycler.ts",
- "src/core/__tests__/fetchPolicies.ts",
- "src/data/__tests__/queries.ts",
- "src/errors/__tests__/ApolloError.ts",
- ]
-}
diff --git a/packages/apollo-utilities/src/util/__tests__/isEqual.ts b/packages/apollo-utilities/src/util/__tests__/isEqual.ts
deleted file mode 100644
--- a/packages/apollo-utilities/src/util/__tests__/isEqual.ts
+++ /dev/null
@@ -1,174 +0,0 @@
-import { isEqual } from '../isEqual';
-
-describe('isEqual', () => {
- it('should return true for equal primitive values', () => {
- expect(isEqual(undefined, undefined)).toBe(true);
- expect(isEqual(null, null)).toBe(true);
- expect(isEqual(true, true)).toBe(true);
- expect(isEqual(false, false)).toBe(true);
- expect(isEqual(-1, -1)).toBe(true);
- expect(isEqual(+1, +1)).toBe(true);
- expect(isEqual(42, 42)).toBe(true);
- expect(isEqual(0, 0)).toBe(true);
- expect(isEqual(0.5, 0.5)).toBe(true);
- expect(isEqual('hello', 'hello')).toBe(true);
- expect(isEqual('world', 'world')).toBe(true);
- });
-
- it('should return false for not equal primitive values', () => {
- expect(!isEqual(undefined, null)).toBe(true);
- expect(!isEqual(null, undefined)).toBe(true);
- expect(!isEqual(true, false)).toBe(true);
- expect(!isEqual(false, true)).toBe(true);
- expect(!isEqual(-1, +1)).toBe(true);
- expect(!isEqual(+1, -1)).toBe(true);
- expect(!isEqual(42, 42.00000000000001)).toBe(true);
- expect(!isEqual(0, 0.5)).toBe(true);
- expect(!isEqual('hello', 'world')).toBe(true);
- expect(!isEqual('world', 'hello')).toBe(true);
- });
-
- it('should return false when comparing primitives with objects', () => {
- expect(!isEqual({}, null)).toBe(true);
- expect(!isEqual(null, {})).toBe(true);
- expect(!isEqual({}, true)).toBe(true);
- expect(!isEqual(true, {})).toBe(true);
- expect(!isEqual({}, 42)).toBe(true);
- expect(!isEqual(42, {})).toBe(true);
- expect(!isEqual({}, 'hello')).toBe(true);
- expect(!isEqual('hello', {})).toBe(true);
- });
-
- it('should correctly compare shallow objects', () => {
- expect(isEqual({}, {})).toBe(true);
- expect(isEqual({ a: 1, b: 2, c: 3 }, { a: 1, b: 2, c: 3 })).toBe(true);
- expect(!isEqual({ a: 1, b: 2, c: 3 }, { a: 3, b: 2, c: 1 })).toBe(true);
- expect(!isEqual({ a: 1, b: 2, c: 3 }, { a: 1, b: 2 })).toBe(true);
- expect(!isEqual({ a: 1, b: 2 }, { a: 1, b: 2, c: 3 })).toBe(true);
- });
-
- it('should correctly compare deep objects', () => {
- expect(isEqual({ x: {} }, { x: {} })).toBe(true);
- expect(
- isEqual({ x: { a: 1, b: 2, c: 3 } }, { x: { a: 1, b: 2, c: 3 } }),
- ).toBe(true);
- expect(
- !isEqual({ x: { a: 1, b: 2, c: 3 } }, { x: { a: 3, b: 2, c: 1 } }),
- ).toBe(true);
- expect(!isEqual({ x: { a: 1, b: 2, c: 3 } }, { x: { a: 1, b: 2 } })).toBe(
- true,
- );
- expect(!isEqual({ x: { a: 1, b: 2 } }, { x: { a: 1, b: 2, c: 3 } })).toBe(
- true,
- );
- });
-
- it('should correctly compare deep objects without object prototype ', () => {
- // Solves https://github.com/apollographql/apollo-client/issues/2132
- const objNoProto = Object.create(null);
- objNoProto.a = { b: 2, c: [3, 4] };
- objNoProto.e = Object.create(null);
- objNoProto.e.f = 5;
- expect(isEqual(objNoProto, { a: { b: 2, c: [3, 4] }, e: { f: 5 } })).toBe(
- true,
- );
- expect(!isEqual(objNoProto, { a: { b: 2, c: [3, 4] }, e: { f: 6 } })).toBe(
- true,
- );
- expect(!isEqual(objNoProto, { a: { b: 2, c: [3, 4] }, e: null })).toBe(
- true,
- );
- expect(!isEqual(objNoProto, { a: { b: 2, c: [3] }, e: { f: 5 } })).toBe(
- true,
- );
- expect(!isEqual(objNoProto, null)).toBe(true);
- });
-
- it('should correctly handle modified prototypes', () => {
- Array.prototype.foo = null;
- expect(isEqual([1, 2, 3], [1, 2, 3])).toBe(true);
- expect(!isEqual([1, 2, 3], [1, 2, 4])).toBe(true);
- delete Array.prototype.foo;
- });
-
- describe('comparing objects with circular refs', () => {
- // copied with slight modification from lodash test suite
- it('should compare objects with circular references', () => {
- const object1 = {},
- object2 = {};
-
- object1.a = object1;
- object2.a = object2;
-
- expect(isEqual(object1, object2)).toBe(true);
-
- object1.b = 0;
- object2.b = Object(0);
-
- expect(isEqual(object1, object2)).toBe(true);
-
- object1.c = Object(1);
- object2.c = Object(2);
-
- expect(isEqual(object1, object2)).toBe(false);
-
- object1 = { a: 1, b: 2, c: 3 };
- object1.b = object1;
- object2 = { a: 1, b: { a: 1, b: 2, c: 3 }, c: 3 };
-
- expect(isEqual(object1, object2)).toBe(false);
- });
-
- it('should have transitive equivalence for circular references of objects', () => {
- const object1 = {},
- object2 = { a: object1 },
- object3 = { a: object2 };
-
- object1.a = object1;
-
- expect(isEqual(object1, object2)).toBe(true);
- expect(isEqual(object2, object3)).toBe(true);
- expect(isEqual(object1, object3)).toBe(true);
- });
-
- it('should compare objects with multiple circular references', () => {
- const array1 = [{}],
- array2 = [{}];
-
- (array1[0].a = array1).push(array1);
- (array2[0].a = array2).push(array2);
-
- expect(isEqual(array1, array2)).toBe(true);
-
- array1[0].b = 0;
- array2[0].b = Object(0);
-
- expect(isEqual(array1, array2)).toBe(true);
-
- array1[0].c = Object(1);
- array2[0].c = Object(2);
-
- expect(isEqual(array1, array2)).toBe(false);
- });
-
- it('should compare objects with complex circular references', () => {
- const object1 = {
- foo: { b: { c: { d: {} } } },
- bar: { a: 2 },
- };
-
- const object2 = {
- foo: { b: { c: { d: {} } } },
- bar: { a: 2 },
- };
-
- object1.foo.b.c.d = object1;
- object1.bar.b = object1.foo.b;
-
- object2.foo.b.c.d = object2;
- object2.bar.b = object2.foo.b;
-
- expect(isEqual(object1, object2)).toBe(true);
- });
- });
-});
diff --git a/packages/apollo-utilities/src/util/__tests__/mergeDeep.ts b/packages/apollo-utilities/src/util/__tests__/mergeDeep.ts
deleted file mode 100644
--- a/packages/apollo-utilities/src/util/__tests__/mergeDeep.ts
+++ /dev/null
@@ -1,139 +0,0 @@
-import { mergeDeep, mergeDeepArray } from '../mergeDeep';
-
-describe('mergeDeep', function() {
- it('should return an object if first argument falsy', function() {
- expect(mergeDeep()).toEqual({});
- expect(mergeDeep(null)).toEqual({});
- expect(mergeDeep(null, { foo: 42 })).toEqual({ foo: 42 });
- });
-
- it('should preserve identity for single arguments', function() {
- const arg = Object.create(null);
- expect(mergeDeep(arg)).toBe(arg);
- });
-
- it('should preserve identity when merging non-conflicting objects', function() {
- const a = { a: { name: 'ay' } };
- const b = { b: { name: 'bee' } };
- const c = mergeDeep(a, b);
- expect(c.a).toBe(a.a);
- expect(c.b).toBe(b.b);
- expect(c).toEqual({
- a: { name: 'ay' },
- b: { name: 'bee' },
- });
- });
-
- it('should shallow-copy conflicting fields', function() {
- const a = { conflict: { fromA: [1, 2, 3] } };
- const b = { conflict: { fromB: [4, 5] } };
- const c = mergeDeep(a, b);
- expect(c.conflict).not.toBe(a.conflict);
- expect(c.conflict).not.toBe(b.conflict);
- expect(c.conflict.fromA).toBe(a.conflict.fromA);
- expect(c.conflict.fromB).toBe(b.conflict.fromB);
- expect(c).toEqual({
- conflict: {
- fromA: [1, 2, 3],
- fromB: [4, 5],
- },
- });
- });
-
- it('should resolve conflicts among more than two objects', function() {
- const sources = [];
-
- for (let i = 0; i < 100; ++i) {
- sources.push({
- ['unique' + i]: { value: i },
- conflict: {
- ['from' + i]: { value: i },
- nested: {
- ['nested' + i]: { value: i },
- },
- },
- });
- }
-
- const merged = mergeDeep(...sources);
-
- sources.forEach((source, i) => {
- expect(merged['unique' + i].value).toBe(i);
- expect(source['unique' + i]).toBe(merged['unique' + i]);
-
- expect(merged.conflict).not.toBe(source.conflict);
- expect(merged.conflict['from' + i].value).toBe(i);
- expect(merged.conflict['from' + i]).toBe(source.conflict['from' + i]);
-
- expect(merged.conflict.nested).not.toBe(source.conflict.nested);
- expect(merged.conflict.nested['nested' + i].value).toBe(i);
- expect(merged.conflict.nested['nested' + i]).toBe(
- source.conflict.nested['nested' + i],
- );
- });
- });
-
- it('can merge array elements', function() {
- const a = [{ a: 1 }, { a: 'ay' }, 'a'];
- const b = [{ b: 2 }, { b: 'bee' }, 'b'];
- const c = [{ c: 3 }, { c: 'cee' }, 'c'];
- const d = { 1: { d: 'dee' } };
-
- expect(mergeDeep(a, b, c, d)).toEqual([
- { a: 1, b: 2, c: 3 },
- { a: 'ay', b: 'bee', c: 'cee', d: 'dee' },
- 'c',
- ]);
- });
-
- it('lets the last conflicting value win', function() {
- expect(mergeDeep('a', 'b', 'c')).toBe('c');
-
- expect(
- mergeDeep(
- { a: 'a', conflict: 1 },
- { b: 'b', conflict: 2 },
- { c: 'c', conflict: 3 },
- ),
- ).toEqual({
- a: 'a',
- b: 'b',
- c: 'c',
- conflict: 3,
- });
-
- expect(mergeDeep(
- ['a', ['b', 'c'], 'd'],
- [/*empty*/, ['B'], 'D'],
- )).toEqual(
- ['a', ['B', 'c'], 'D'],
- );
-
- expect(mergeDeep(
- ['a', ['b', 'c'], 'd'],
- ['A', [/*empty*/, 'C']],
- )).toEqual(
- ['A', ['b', 'C'], 'd'],
- );
- });
-
- it('mergeDeep returns the intersection of its argument types', function() {
- const abc = mergeDeep({ str: "hi", a: 1 }, { a: 3, b: 2 }, { b: 1, c: 2 });
- // The point of this test is that the following lines type-check without
- // resorting to any `any` loopholes:
- expect(abc.str.slice(0)).toBe("hi");
- expect(abc.a * 2).toBe(6);
- expect(abc.b - 0).toBe(1);
- expect(abc.c / 2).toBe(1);
- });
-
- it('mergeDeepArray returns the supertype of its argument types', function() {
- class F {
- check() { return "ok" };
- }
- const fs: F[] = [new F, new F, new F];
- // Although mergeDeepArray doesn't have the same tuple type awareness as
- // mergeDeep, it does infer that F should be the return type here:
- expect(mergeDeepArray(fs).check()).toBe("ok");
- });
-});
diff --git a/packages/apollo-utilities/src/util/__tests__/warnOnce.ts b/packages/apollo-utilities/src/util/__tests__/warnOnce.ts
deleted file mode 100644
--- a/packages/apollo-utilities/src/util/__tests__/warnOnce.ts
+++ /dev/null
@@ -1,61 +0,0 @@
-import { warnOnceInDevelopment } from '../warnOnce';
-
-let lastWarning: string | null;
-let keepEnv: string | undefined;
-let numCalls = 0;
-let oldConsoleWarn: any;
-
-describe('warnOnce', () => {
- beforeEach(() => {
- keepEnv = process.env.NODE_ENV;
- numCalls = 0;
- lastWarning = null;
- oldConsoleWarn = console.warn;
- console.warn = (msg: any) => {
- numCalls++;
- lastWarning = msg;
- };
- });
- afterEach(() => {
- process.env.NODE_ENV = keepEnv;
- console.warn = oldConsoleWarn;
- });
- it('actually warns', () => {
- process.env.NODE_ENV = 'development';
- warnOnceInDevelopment('hi');
- expect(lastWarning).toBe('hi');
- expect(numCalls).toEqual(1);
- });
-
- it('does not warn twice', () => {
- process.env.NODE_ENV = 'development';
- warnOnceInDevelopment('ho');
- warnOnceInDevelopment('ho');
- expect(lastWarning).toEqual('ho');
- expect(numCalls).toEqual(1);
- });
-
- it('warns two different things once each', () => {
- process.env.NODE_ENV = 'development';
- warnOnceInDevelopment('slow');
- expect(lastWarning).toEqual('slow');
- warnOnceInDevelopment('mo');
- expect(lastWarning).toEqual('mo');
- expect(numCalls).toEqual(2);
- });
-
- it('does not warn in production', () => {
- process.env.NODE_ENV = 'production';
- warnOnceInDevelopment('lo');
- warnOnceInDevelopment('lo');
- expect(numCalls).toEqual(0);
- });
-
- it('warns many times in test', () => {
- process.env.NODE_ENV = 'test';
- warnOnceInDevelopment('yo');
- warnOnceInDevelopment('yo');
- expect(lastWarning).toEqual('yo');
- expect(numCalls).toEqual(2);
- });
-});
diff --git a/packages/graphql-anywhere/src/__tests__/__snapshots__/utilities.ts.snap b/packages/graphql-anywhere/src/__tests__/__snapshots__/utilities.ts.snap
deleted file mode 100644
--- a/packages/graphql-anywhere/src/__tests__/__snapshots__/utilities.ts.snap
+++ /dev/null
@@ -1,13 +0,0 @@
-// Jest Snapshot v1, https://goo.gl/fbAQLP
-
-exports[`utilities with a single query can check propTypes for fragments 1`] = `
-Array [
- "Warning: Failed prop type: alias missing on {}",
-]
-`;
-
-exports[`utilities with a single query can check propTypes for fragments with variables 1`] = `
-Array [
- "Warning: Failed prop type: height missing on {\\"alias\\":\\"Bob\\",\\"avatar\\":{\\"square\\":\\"abc\\"}}",
-]
-`;
diff --git a/packages/graphql-anywhere/src/__tests__/directives.ts b/packages/graphql-anywhere/src/__tests__/directives.ts
deleted file mode 100644
--- a/packages/graphql-anywhere/src/__tests__/directives.ts
+++ /dev/null
@@ -1,51 +0,0 @@
-import gql from 'graphql-tag';
-
-import graphql from '../';
-
-describe('directives', () => {
- it('skips a field that has the skip directive', () => {
- const resolver = () => {
- throw new Error('should not be called');
- };
-
- const query = gql`
- {
- a @skip(if: true)
- }
- `;
-
- const result = graphql(resolver, query);
-
- expect(result).toEqual({});
- });
-
- it('includes info about arbitrary directives', () => {
- const resolver = (fieldName, root, args, context, info) => {
- const { doSomethingDifferent } = info.directives;
- let data = root[info.resultKey];
- if (doSomethingDifferent) {
- if (doSomethingDifferent.but === 'notTooCrazy') {
- return data;
- }
- return undefined;
- }
- return data;
- };
-
- const input = {
- a: 'something',
- b: 'hidden',
- };
-
- const query = gql`
- {
- a @doSomethingDifferent(but: notTooCrazy)
- b @doSomethingDifferent(but: nope)
- }
- `;
-
- const result = graphql(resolver, query, input);
-
- expect(result).toEqual({ a: 'something' });
- });
-});
diff --git a/packages/graphql-anywhere/src/__tests__/index.ts b/packages/graphql-anywhere/src/__tests__/index.ts
deleted file mode 100644
--- a/packages/graphql-anywhere/src/__tests__/index.ts
+++ /dev/null
@@ -1,854 +0,0 @@
-import gql from 'graphql-tag';
-
-import { Resolver, ExecInfo } from '..';
-
-const execute = (graphql, r) => () => {
- it('does basic things', async () => {
- const resolver = (_, root) => r(root + 'fake');
-
- const query = gql`
- {
- a {
- b
- ...frag
- }
- }
-
- fragment frag on X {
- c
- }
- `;
-
- const result = await graphql(resolver, query, '', null, null);
-
- expect(result).toEqual({
- a: {
- b: 'fakefake',
-
- c: 'fakefake',
- },
- });
- });
-
- it('works with enum args', async () => {
- const resolver = (fieldName, root, args) => r(args.value);
-
- const query = gql`
- {
- a(value: ENUM_VALUE)
- }
- `;
-
- const result = await graphql(resolver, query);
-
- expect(result).toEqual({ a: 'ENUM_VALUE' });
- });
-
- it('works with null args', async () => {
- const resolver = (fieldName, root, args) => r(args.value);
-
- const query = gql`
- {
- a(value: null)
- }
- `;
-
- const result = await graphql(resolver, query);
-
- expect(result).toEqual({ a: null });
- });
-
- it('traverses arrays returned from the resolver', async () => {
- const resolver = () => r([1, 2]);
-
- const query = gql`
- {
- a {
- b
- }
- }
- `;
-
- const result = await graphql(resolver, query);
-
- expect(result).toEqual({ a: [{ b: [1, 2] }, { b: [1, 2] }] });
- });
-
- it('can traverse an object', async () => {
- const obj = {
- a: {
- b: 'fun',
-
- c: ['also fun', 'also fun 2'],
-
- d: 'not fun',
- },
- };
-
- const resolver = (fieldName, root) => r(root[fieldName]);
-
- const query = gql`
- {
- a {
- b
- c
- }
- }
- `;
-
- const result = await graphql(resolver, query, obj, null, null);
-
- expect(result).toEqual({
- a: {
- b: 'fun',
-
- c: ['also fun', 'also fun 2'],
- },
- });
- });
-
- it('can traverse nested arrays', async () => {
- const obj = { a: [{ b: [[{ c: 1 }, { c: 2 }], [{ c: 3 }, { c: 4 }]] }] };
-
- const resolver = (fieldName, root) => r(root[fieldName]);
-
- const query = gql`
- {
- a {
- b {
- c
- }
- }
- }
- `;
-
- const result = await graphql(resolver, query, obj, null, null);
-
- expect(result).toEqual({
- a: [{ b: [[{ c: 1 }, { c: 2 }], [{ c: 3 }, { c: 4 }]] }],
- });
- });
-
- it('can use arguments, both inline and variables', async () => {
- const resolver = (fieldName, _, args) => r(args);
-
- const query = gql`
- {
- inline(int: 5, float: 3.14, string: "string")
- variables(int: $int, float: $float, string: $string)
- }
- `;
-
- const variables = {
- int: 6,
-
- float: 6.28,
-
- string: 'varString',
- };
-
- const result = await graphql(resolver, query, null, null, variables);
-
- expect(result).toEqual({
- inline: {
- int: 5,
-
- float: 3.14,
-
- string: 'string',
- },
-
- variables: {
- int: 6,
-
- float: 6.28,
-
- string: 'varString',
- },
- });
- });
-
- it('will tolerate missing variables', async () => {
- const resolver = (fieldName, _, args) => r(args);
-
- const query = gql`
- {
- variables(int: $int, float: $float, string: $string, missing: $missing)
- }
- `;
-
- const variables = {
- int: 6,
-
- float: 6.28,
-
- string: 'varString',
- };
-
- const result = await graphql(resolver, query, null, null, variables);
-
- expect(result).toEqual({
- variables: {
- int: 6,
-
- float: 6.28,
-
- string: 'varString',
-
- missing: undefined,
- },
- });
- });
-
- it('can use skip and include', async () => {
- const resolver = fieldName => r(fieldName);
-
- const query = gql`
- {
- a {
- b @skip(if: true)
- c @include(if: true)
- d @skip(if: false)
- e @include(if: false)
- }
- }
- `;
-
- const result = await graphql(resolver, query, null, null);
-
- expect(result).toEqual({
- a: {
- c: 'c',
-
- d: 'd',
- },
- });
- });
-
- it('can use inline and named fragments', async () => {
- const resolver = fieldName => r(fieldName);
-
- const query = gql`
- {
- a {
- ... on Type {
- b
- c
- }
- ...deFrag
- }
- }
-
- fragment deFrag on Type {
- d
- e
- }
- `;
-
- const result = await graphql(resolver, query, null, null, null);
-
- expect(result).toEqual({
- a: {
- b: 'b',
-
- c: 'c',
-
- d: 'd',
-
- e: 'e',
- },
- });
- });
-
- it('can resolve deeply nested fragments', async () => {
- const resolver = (fieldName, root) => {
- return r(root[fieldName]);
- };
-
- const query = gql`
- {
- stringField
- numberField
- nullField
- ... on Item {
- nestedObj {
- stringField
- nullField
- deepNestedObj {
- stringField
- nullField
- }
- }
- }
- ... on Item {
- nestedObj {
- numberField
- nullField
- deepNestedObj {
- numberField
- nullField
- }
- }
- }
- ... on Item {
- nullObject
- }
- nestedObj {
- inlinedObjectStringField
- }
- }
- `;
-
- const result: any = {
- id: 'abcd',
-
- stringField: 'This is a string!',
-
- numberField: 5,
-
- nullField: null,
-
- nestedObj: {
- id: 'abcde',
-
- stringField: 'This is a string too!',
-
- numberField: 6,
-
- nullField: null,
-
- deepNestedObj: {
- stringField: 'This is a deep string',
-
- numberField: 7,
-
- nullField: null,
- },
-
- inlinedObjectStringField: 'This is a string of an inlined object',
- },
-
- nullObject: null,
- };
-
- const queryResult = await graphql(resolver, query, result);
-
- // The result of the query shouldn't contain __data_id fields
-
- expect(queryResult).toEqual({
- stringField: 'This is a string!',
-
- numberField: 5,
-
- nullField: null,
-
- nestedObj: {
- stringField: 'This is a string too!',
-
- numberField: 6,
-
- nullField: null,
-
- deepNestedObj: {
- stringField: 'This is a deep string',
-
- numberField: 7,
-
- nullField: null,
- },
-
- inlinedObjectStringField: 'This is a string of an inlined object',
- },
-
- nullObject: null,
- });
- });
-
- it('can resolve deeply nested fragments with arrays', async () => {
- const resolver = (fieldName, root) => {
- return r(root[fieldName]);
- };
-
- const query = gql`
- {
- ... on Item {
- array {
- id
- field1
- }
- }
- ... on Item {
- array {
- id
- field2
- }
- }
- ... on Item {
- array {
- id
- field3
- }
- }
- }
- `;
-
- const result: any = {
- array: [
- {
- id: 'abcde',
-
- field1: 1,
-
- field2: 2,
-
- field3: 3,
- },
- ],
- };
-
- const queryResult = await graphql(resolver, query, result);
-
- expect(queryResult).toEqual({
- array: [
- {
- id: 'abcde',
-
- field1: 1,
-
- field2: 2,
-
- field3: 3,
- },
- ],
- });
- });
-
- it('passes info including isLeaf, resultKey, directives, and field', async () => {
- const leafMap: { [s: string]: ExecInfo } = {};
-
- const resolver: Resolver = (fieldName, root, args, context, info) => {
- leafMap[fieldName] = info;
-
- return r('continue');
- };
-
- const query = gql`
- {
- alias: a {
- b
- hasDirective @skip(if: false) @otherDirective(arg: $x)
- }
- }
- `;
-
- await graphql(resolver, query, null, null, { x: 'argument' });
-
- expect(leafMap).toEqual({
- a: {
- directives: null,
-
- isLeaf: false,
-
- resultKey: 'alias',
-
- field: expect.any(Object),
- },
-
- b: {
- directives: null,
-
- isLeaf: true,
-
- resultKey: 'b',
-
- field: expect.any(Object),
- },
-
- hasDirective: {
- directives: {
- skip: { if: false },
-
- otherDirective: { arg: 'argument' },
- },
-
- isLeaf: true,
-
- resultKey: 'hasDirective',
-
- field: expect.any(Object),
- },
- });
- });
-
- it('can filter GraphQL results', async () => {
- const data = {
- alias: 'Bob',
-
- name: 'Wrong',
-
- height: 1.89,
-
- avatar: {
- square: 'abc',
-
- circle: 'def',
-
- triangle: 'qwe',
- },
- };
-
- const fragment = gql`
- fragment PersonDetails on Person {
- alias: name
- height(unit: METERS)
- avatar {
- square
- ... on Avatar {
- circle
- }
- }
- }
- `;
-
- const resolver: Resolver = (fieldName, root, args, context, info) => {
- return r(root[info.resultKey]);
- };
-
- const filtered = await graphql(resolver, fragment, data);
-
- expect(filtered).toEqual({
- alias: 'Bob',
-
- height: 1.89,
-
- avatar: {
- square: 'abc',
-
- circle: 'def',
- },
- });
- });
-
- it('can handle mutations', async () => {
- const resolver = (fieldName, root, args) => {
- let value;
-
- if (fieldName === 'operateOnNumbers') {
- value = args;
- } else if (fieldName === 'add') {
- value = root.a + root.b;
- } else if (fieldName === 'subtract') {
- value = root.a - root.b;
- } else if (fieldName === 'multiply') {
- value = root.a * root.b;
- } else if (fieldName === 'divide') {
- value = root.a / root.b;
- }
-
- return r(value);
- };
-
- const query = gql`
- mutation {
- operateOnNumbers(a: 10, b: 2) {
- add
- subtract
- multiply
- divide
- }
- }
- `;
-
- const result = await graphql(resolver, query, '', null, null);
-
- expect(result).toEqual({
- operateOnNumbers: {
- add: 12,
-
- subtract: 8,
-
- multiply: 20,
-
- divide: 5,
- },
- });
- });
-
- it('does not error on subscriptions', async () => {
- const data = {
- user: {
- id: 1,
-
- name: 'Some User',
-
- height: 1.89,
- },
- };
-
- const resolver = (fieldName, root) => r(root[fieldName]);
-
- const query = gql`
- subscription {
- user {
- id
- name
- height
- }
- }
- `;
-
- const result = await graphql(resolver, query, data);
-
- expect(result).toEqual({
- user: {
- id: 1,
-
- name: 'Some User',
-
- height: 1.89,
- },
- });
- });
-
- it('can handle documents with multiple fragments', async () => {
- const data = {
- user: {
- id: 1,
-
- name: 'Some User',
-
- height: 1.89,
- },
- };
-
- const resolver = (fieldName, root) => r(root[fieldName]);
-
- const query = gql`
- fragment A on User {
- name
- }
-
- fragment B on User {
- height
- }
-
- query {
- user {
- id
- ...A
- ...B
- }
- }
- `;
-
- const result = await graphql(resolver, query, data);
-
- expect(result).toEqual({
- user: {
- id: 1,
-
- name: 'Some User',
-
- height: 1.89,
- },
- });
- });
-
- describe('examples', () => {
- it('readme example', async () => {
- // I don't need all this stuff!
-
- const gitHubAPIResponse = {
- url: 'https://api.github.com/repos/octocat/Hello-World/issues/1347',
-
- title: 'Found a bug',
-
- body: "I'm having a problem with this.",
-
- user: {
- login: 'octocat',
-
- avatar_url: 'https://github.com/images/error/octocat_happy.gif',
-
- url: 'https://api.github.com/users/octocat',
- },
-
- labels: [
- {
- url: 'https://api.github.com/repos/octocat/Hello-World/labels/bug',
-
- name: 'bug',
-
- color: 'f29513',
- },
- ],
- };
-
- // Write a query that gets just the fields we want
-
- const query = gql`
- {
- title
- user {
- login
- }
- labels {
- name
- }
- }
- `;
-
- // Define a resolver that just returns a property
-
- const resolver = (fieldName, root) => root[fieldName];
-
- // Filter the data!
-
- const result = await graphql(resolver, query, gitHubAPIResponse);
-
- expect(result).toEqual({
- title: 'Found a bug',
-
- user: { login: 'octocat' },
-
- labels: [{ name: 'bug' }],
- });
- });
-
- it('readme example 2', async () => {
- // Write a query where the fields are types, but we alias them
-
- const query = gql`
- {
- author {
- name: string
- age: int
- address {
- state: string
- }
- }
- }
- `;
-
- // Define a resolver that uses the field name to determine the type
-
- // Note that we get the actual name, not the alias, but the alias
-
- // is used to determine the location in the response
-
- const resolver = fieldName =>
- ({ string: 'This is a string', int: 5 }[fieldName] || 'continue');
-
- // Generate the object!
-
- const result = await graphql(resolver, query);
-
- expect(result).toEqual({
- author: {
- name: 'This is a string',
-
- age: 5,
-
- address: { state: 'This is a string' },
- },
- });
- });
-
- it('read from Redux normalized store', async () => {
- const data = {
- result: [1, 2],
-
- entities: {
- articles: {
- 1: { id: 1, title: 'Some Article', author: 1 },
-
- 2: { id: 2, title: 'Other Article', author: 1 },
- },
-
- users: { 1: { id: 1, name: 'Dan' } },
- },
- };
-
- const query = gql`
- {
- result {
- title
- author {
- name
- }
- }
- }
- `;
-
- const schema = { articles: { author: 'users' } };
-
- // This resolver is a bit more complex than others, since it has to
-
- // correctly handle the root object, values by ID, and scalar leafs.
-
- const resolver = (fieldName, rootValue, args, context): any => {
- if (!rootValue) {
- return context.result.map(id => {
- return {
- ...context.entities.articles[id],
-
- __typename: 'articles',
- };
- });
- }
-
- const typename = rootValue.__typename;
-
- // If this field is a reference according to the schema
-
- if (typename && schema[typename] && schema[typename][fieldName]) {
- // Get the target type, and get it from entities by ID
-
- const targetType: string = schema[typename][fieldName];
-
- return {
- ...context.entities[targetType][rootValue[fieldName]],
-
- __typename: targetType,
- };
- }
-
- // This field is just a scalar
-
- return rootValue[fieldName];
- };
-
- const result = await graphql(
- resolver,
- query,
- null,
- data, // pass data as context since we have to access it all the time
- );
-
- // This is the non-normalized data, with only the fields we asked for in our query!
-
- expect(result).toEqual({
- result: [
- {
- title: 'Some Article',
-
- author: { name: 'Dan' },
- },
-
- {
- title: 'Other Article',
-
- author: { name: 'Dan' },
- },
- ],
- });
- });
- });
-};
-
-describe('basic operations done sync', execute(require('../').default, x => x));
-
-describe(
- 'basic operations done async',
- execute(require('../async').graphql, x => Promise.resolve(x)),
-);
diff --git a/packages/graphql-anywhere/src/__tests__/mapper.ts b/packages/graphql-anywhere/src/__tests__/mapper.ts
deleted file mode 100644
--- a/packages/graphql-anywhere/src/__tests__/mapper.ts
+++ /dev/null
@@ -1,92 +0,0 @@
-import graphql from '../';
-import gql from 'graphql-tag';
-
-import { cloneElement, createElement } from 'react';
-import { renderToStaticMarkup } from 'react-dom/server';
-
-describe('result mapper', () => {
- it('can deal with promises', () => {
- const resolver = (_, root) => {
- return new Promise(res => {
- setTimeout(() => {
- Promise.resolve(root).then(val => res(val + 'fake'));
- }, 10);
- });
- };
-
- function promiseForObject(object): Promise<{ [key: string]: any }> {
- const keys = Object.keys(object);
- const valuesAndPromises = keys.map(name => object[name]);
-
- return Promise.all(valuesAndPromises).then(values =>
- values.reduce((resolvedObject, value, i) => {
- resolvedObject[keys[i]] = value;
- return resolvedObject;
- }, Object.create(null)),
- );
- }
-
- const query = gql`
- {
- a {
- b
- c
- }
- }
- `;
-
- const result = graphql(resolver, query, '', null, null, {
- resultMapper: promiseForObject,
- });
-
- return result.then(value => {
- expect(value).toEqual({
- a: {
- b: 'fakefake',
- c: 'fakefake',
- },
- });
- });
- });
-
- it('can construct React elements', () => {
- const resolver = (fieldName, root, args) => {
- if (fieldName === 'text') {
- return args.value;
- }
-
- return createElement(fieldName, args);
- };
-
- const reactMapper = (childObj, root) => {
- const reactChildren = Object.keys(childObj).map(key => childObj[key]);
-
- if (root) {
- return cloneElement(root, root.props, ...reactChildren);
- }
-
- return reactChildren[0];
- };
-
- function gqlToReact(document): any {
- return graphql(resolver, document, '', null, null, {
- resultMapper: reactMapper,
- });
- }
-
- const query = gql`
- {
- div {
- s1: span(id: "my-id") {
- text(value: "This is text")
- }
- s2: span
- }
- }
- `;
-
- expect(renderToStaticMarkup(gqlToReact(query))).toBe(
- '<div><span id="my-id">This is text</span><span></span></div>',
- );
- });
-});
diff --git a/packages/graphql-anywhere/src/__tests__/matcher.ts b/packages/graphql-anywhere/src/__tests__/matcher.ts
deleted file mode 100644
--- a/packages/graphql-anywhere/src/__tests__/matcher.ts
+++ /dev/null
@@ -1,60 +0,0 @@
-import gql from 'graphql-tag';
-
-import graphql, { FragmentMatcher } from '../';
-
-describe('fragment matcher', () => {
- it('does basic things', () => {
- const resolver = fieldName => fieldName;
-
- const query = gql`
- {
- a {
- b
- ...yesFrag
- ...noFrag
- ... on Yes {
- e
- }
- ... on No {
- f
- }
- }
- }
-
- fragment yesFrag on Yes {
- c
- }
-
- fragment noFrag on No {
- d
- }
- `;
-
- const fragmentMatcher: FragmentMatcher = (_, typeCondition) =>
- typeCondition === 'Yes';
-
- const resultWithMatcher = graphql(resolver, query, '', null, null, {
- fragmentMatcher,
- });
-
- expect(resultWithMatcher).toEqual({
- a: {
- b: 'b',
- c: 'c',
- e: 'e',
- },
- });
-
- const resultNoMatcher = graphql(resolver, query, '', null, null);
-
- expect(resultNoMatcher).toEqual({
- a: {
- b: 'b',
- c: 'c',
- d: 'd',
- e: 'e',
- f: 'f',
- },
- });
- });
-});
diff --git a/packages/graphql-anywhere/src/__tests__/utilities.ts b/packages/graphql-anywhere/src/__tests__/utilities.ts
deleted file mode 100644
--- a/packages/graphql-anywhere/src/__tests__/utilities.ts
+++ /dev/null
@@ -1,561 +0,0 @@
-import gql, { disableFragmentWarnings } from 'graphql-tag';
-import { checkPropTypes } from 'prop-types';
-
-// Turn off warnings for repeated fragment names
-disableFragmentWarnings();
-
-import { filter, check, propType } from '../utilities';
-
-describe('utilities', () => {
- describe('with a single query', () => {
- const doc = gql`
- {
- alias: name
- height(unit: METERS)
- avatar {
- square
- }
- }
- `;
- const fragment = gql`
- fragment foo on Foo {
- alias: name
- height(unit: METERS)
- avatar {
- square
- }
- }
- `;
- const fragmentWithAVariable = gql`
- fragment foo on Foo {
- alias: name
- height(unit: METERS)
- avatar @include(if: $foo) {
- square
- }
- }
- `;
- const data = {
- alias: 'Bob',
- name: 'Wrong',
- height: 1.89,
- avatar: {
- square: 'abc',
- circle: 'def',
- triangle: 'qwe',
- },
- };
- const filteredData = {
- alias: 'Bob',
- height: 1.89,
- avatar: {
- square: 'abc',
- },
- };
- const arrayData = [
- {
- alias: 'Bob',
- name: 'Wrong',
- height: 1.89,
- avatar: {
- square: 'abc',
- circle: 'def',
- triangle: 'qwe',
- },
- },
- {
- alias: 'Tom',
- name: 'Right',
- height: 1.77,
- avatar: {
- square: 'jkl',
- circle: 'bnm',
- triangle: 'uio',
- },
- },
- ];
- const filteredArrayData = [
- {
- alias: 'Bob',
- height: 1.89,
- avatar: {
- square: 'abc',
- },
- },
- {
- alias: 'Tom',
- height: 1.77,
- avatar: {
- square: 'jkl',
- },
- },
- ];
-
- beforeEach(() => {
- checkPropTypes.resetWarningCache();
- jest.spyOn(global.console, 'error').mockImplementation(() => {});
- });
-
- afterEach(() => {
- global.console.error.mockRestore();
- });
-
- it('can filter data', () => {
- expect(filter(doc, data)).toEqual(filteredData);
- });
-
- it('can filter an array of data', () => {
- expect(filter(doc, arrayData)).toEqual(filteredArrayData);
- });
-
- it('can short circuit when data is null', () => {
- expect(filter(doc, null)).toEqual(null);
- });
-
- it('can filter data for fragments', () => {
- expect(filter(fragment, data)).toEqual(filteredData);
- });
-
- it('can filter data for fragments with variables', () => {
- expect(filter(fragmentWithAVariable, data, { foo: true })).toEqual(
- filteredData,
- );
- });
-
- it('can generate propTypes for fragments', () => {
- expect(propType(fragment)).toEqual(expect.any(Function));
- });
-
- it('can check propTypes for fragments', () => {
- const propTypes = {
- foo: propType(fragment),
- };
- checkPropTypes(propTypes, filteredData, 'prop', 'MyComponent');
- expect(global.console.error).not.toHaveBeenCalled();
- checkPropTypes(propTypes, { foo: {} }, 'prop', 'MyComponent');
- expect(global.console.error.mock.calls[0]).toMatchSnapshot();
- });
-
- it('can generate propTypes for fragments with variables', () => {
- expect(propType(fragmentWithAVariable)).toEqual(expect.any(Function));
- });
-
- it('can check propTypes for fragments with variables', () => {
- const mapPropsToVariables = () => null;
- const propTypes = {
- foo: propType(fragmentWithAVariable, mapPropsToVariables),
- };
- checkPropTypes(propTypes, { foo: filteredData }, 'prop', 'MyComponent');
- expect(global.console.error).not.toHaveBeenCalled();
- const badProps = { foo: { ...filteredData } };
- delete badProps.foo.height;
- checkPropTypes(propTypes, badProps, 'prop', 'MyComponent');
- expect(global.console.error).toHaveBeenCalled();
- expect(global.console.error.mock.calls[0]).toMatchSnapshot();
- });
-
- it('makes variable inclusion props optional, when no variables are passed', () => {
- const propTypes = {
- foo: propType(fragmentWithAVariable),
- };
- const propsWithoutAvatar = { foo: { ...filteredData } };
- delete propsWithoutAvatar.foo.avatar;
- checkPropTypes(propTypes, propsWithoutAvatar, 'prop', 'MyComponent');
- expect(global.console.error).not.toHaveBeenCalled();
- });
-
- it('can check matching data', () => {
- check(doc, filteredData);
- });
-
- it('can check matching data for fragments with variables', () => {
- check(doc, filteredData, { foo: true });
- });
-
- it('throws when checking non-matching data for fragments with variables', () => {
- const badFilteredData = { ...filteredData };
- delete badFilteredData.avatar;
- expect(() => {
- check(doc, badFilteredData);
- }).toThrow();
- });
-
- // This doesn't throw but potentially it should?
- it('can check overspecified data', () => {
- check(doc, data);
- });
-
- it('throws when checking underspecified data', () => {
- expect(() => {
- check(doc, {
- name: 'Wrong',
- });
- }).toThrow();
-
- expect(() => {
- check(doc, {
- alias: 'Bob',
- height: 1.89,
- });
- }).toThrow();
- });
- });
-
- describe('with a single fragment', () => {
- const doc = gql`
- fragment PersonDetails on Person {
- alias: name
- height(unit: METERS)
- avatar {
- square
- }
- }
- `;
- const data = {
- alias: 'Bob',
- name: 'Wrong',
- height: 1.89,
- avatar: {
- square: 'abc',
- circle: 'def',
- triangle: 'qwe',
- },
- };
- const filteredData = {
- alias: 'Bob',
- height: 1.89,
- avatar: {
- square: 'abc',
- },
- };
- const arrayData = [
- {
- alias: 'Bob',
- name: 'Wrong',
- height: 1.89,
- avatar: {
- square: 'abc',
- circle: 'def',
- triangle: 'qwe',
- },
- },
- {
- alias: 'Tom',
- name: 'Right',
- height: 1.77,
- avatar: {
- square: 'jkl',
- circle: 'bnm',
- triangle: 'uio',
- },
- },
- ];
- const filteredArrayData = [
- {
- alias: 'Bob',
- height: 1.89,
- avatar: {
- square: 'abc',
- },
- },
- {
- alias: 'Tom',
- height: 1.77,
- avatar: {
- square: 'jkl',
- },
- },
- ];
-
- it('can filter data', () => {
- expect(filter(doc, data)).toEqual(filteredData);
- });
-
- it('can filter an array of data', () => {
- expect(filter(doc, arrayData)).toEqual(filteredArrayData);
- });
-
- it('can check matching data', () => {
- check(doc, filteredData);
- });
-
- // This doesn't throw but potentially it should?
- it('can check overspecified data', () => {
- check(doc, data);
- });
-
- it('throws when checking underspecified data', () => {
- expect(() => {
- check(doc, {
- name: 'Wrong',
- });
- }).toThrow();
-
- expect(() => {
- check(doc, {
- alias: 'Bob',
- height: 1.89,
- });
- }).toThrow();
- });
- });
-
- describe('with a single fragment', () => {
- const doc = gql`
- fragment PersonDetails on Person {
- alias: name
- height(unit: METERS)
- avatar {
- square
- }
- }
- `;
- const data = {
- alias: 'Bob',
- name: 'Wrong',
- height: 1.89,
- avatar: {
- square: 'abc',
- circle: 'def',
- triangle: 'qwe',
- },
- };
- const filteredData = {
- alias: 'Bob',
- height: 1.89,
- avatar: {
- square: 'abc',
- },
- };
- const arrayData = [
- {
- alias: 'Bob',
- name: 'Wrong',
- height: 1.89,
- avatar: {
- square: 'abc',
- circle: 'def',
- triangle: 'qwe',
- },
- },
- {
- alias: 'Tom',
- name: 'Right',
- height: 1.77,
- avatar: {
- square: 'jkl',
- circle: 'bnm',
- triangle: 'uio',
- },
- },
- ];
- const filteredArrayData = [
- {
- alias: 'Bob',
- height: 1.89,
- avatar: {
- square: 'abc',
- },
- },
- {
- alias: 'Tom',
- height: 1.77,
- avatar: {
- square: 'jkl',
- },
- },
- ];
-
- it('can filter data', () => {
- expect(filter(doc, data)).toEqual(filteredData);
- });
-
- it('can filter an array of data', () => {
- expect(filter(doc, arrayData)).toEqual(filteredArrayData);
- });
-
- it('can check matching data', () => {
- check(doc, filteredData);
- });
-
- // This doesn't throw but potentially it should?
- it('can check overspecified data', () => {
- check(doc, data);
- });
-
- it('throws when checking underspecified data', () => {
- expect(() => {
- check(doc, {
- name: 'Wrong',
- });
- }).toThrow();
-
- expect(() => {
- check(doc, {
- alias: 'Bob',
- height: 1.89,
- });
- }).toThrow();
- });
- });
-
- describe('with nested fragments', () => {
- const doc = gql`
- fragment PersonDetails on Person {
- alias: name
- height(unit: METERS)
- avatar {
- square
- ... on Avatar {
- circle
- }
- }
- }
- `;
- const data = {
- alias: 'Bob',
- name: 'Wrong',
- height: 1.89,
- avatar: {
- square: 'abc',
- circle: 'def',
- triangle: 'qwe',
- },
- };
- const filteredData = {
- alias: 'Bob',
- height: 1.89,
- avatar: {
- square: 'abc',
- circle: 'def',
- },
- };
- const arrayData = [
- {
- alias: 'Bob',
- name: 'Wrong',
- height: 1.89,
- avatar: {
- square: 'abc',
- circle: 'def',
- triangle: 'qwe',
- },
- },
- {
- alias: 'Tom',
- name: 'Right',
- height: 1.77,
- avatar: {
- square: 'jkl',
- circle: 'bnm',
- triangle: 'uio',
- },
- },
- ];
- const filteredArrayData = [
- {
- alias: 'Bob',
- height: 1.89,
- avatar: {
- square: 'abc',
- circle: 'def',
- },
- },
- {
- alias: 'Tom',
- height: 1.77,
- avatar: {
- square: 'jkl',
- circle: 'bnm',
- },
- },
- ];
-
- it('can filter data', () => {
- expect(filter(doc, data)).toEqual(filteredData);
- });
-
- it('can filter an array of data', () => {
- expect(filter(doc, arrayData)).toEqual(filteredArrayData);
- });
-
- it('can check matching data', () => {
- check(doc, filteredData);
- });
-
- // This doesn't throw but potentially it should?
- it('can check overspecified data', () => {
- check(doc, data);
- });
-
- it('throws when checking underspecified data', () => {
- expect(() => {
- check(doc, {
- name: 'Wrong',
- });
- }).toThrow();
-
- expect(() => {
- check(doc, [
- {
- name: 'Wrong',
- },
- ]);
- }).toThrow();
-
- expect(() => {
- check(doc, {
- alias: 'Bob',
- height: 1.89,
- });
- }).toThrow();
-
- expect(() => {
- check(doc, {
- alias: 'Bob',
- height: 1.89,
- avatar: {
- // missing the correct field
- triangle: 'qwe',
- },
- });
- }).toThrow();
- });
-
- describe('if the nested fragment has not matched', () => {
- it('can filter data', () => {
- const filtered = filter(doc, {
- alias: 'Bob',
- name: 'Wrong',
- height: 1.89,
- avatar: {
- square: 'abc',
- // there is no circle field here, but we can't know if that's not
- // because avatar is not an Avatar
- triangle: 'qwe',
- },
- });
-
- expect(filtered).toEqual({
- alias: 'Bob',
- height: 1.89,
- avatar: {
- square: 'abc',
- },
- });
- });
-
- it('does not throw when checking', () => {
- check(doc, {
- alias: 'Wrong',
- height: 1.89,
- avatar: {
- square: 'abc',
- // there is no circle field here, but we can't know if that's not
- // because avatar is not an Avatar
- },
- });
- });
- });
- });
-});
diff --git a/packages/apollo-client/src/__tests__/ApolloClient.ts b/src/__tests__/ApolloClient.ts
similarity index 94%
rename from packages/apollo-client/src/__tests__/ApolloClient.ts
rename to src/__tests__/ApolloClient.ts
--- a/packages/apollo-client/src/__tests__/ApolloClient.ts
+++ b/src/__tests__/ApolloClient.ts
@@ -1,16 +1,30 @@
import gql from 'graphql-tag';
-import { ApolloLink, Observable } from 'apollo-link';
-import { InMemoryCache } from 'apollo-cache-inmemory';
-import { stripSymbols } from 'apollo-utilities';
-import { withWarning } from '../util/wrap';
-import ApolloClient from '../';
+
+import { Observable } from '../utilities/observables/Observable';
+import { makeReference } from '../utilities/graphql/storeUtils';
+import { ApolloLink } from '../link/core/ApolloLink';
+import { HttpLink } from '../link/http/HttpLink';
+import { InMemoryCache } from '../cache/inmemory/inMemoryCache';
+import { stripSymbols } from '../utilities/testing/stripSymbols';
+import { withWarning } from '../utilities/testing/wrap';
+import { ApolloClient } from '../';
import { DefaultOptions } from '../ApolloClient';
import { FetchPolicy, QueryOptions } from '../core/watchQueryOptions';
-import { DataProxy } from 'apollo-cache';
describe('ApolloClient', () => {
describe('constructor', () => {
- it('will throw an error if link is not passed in', () => {
+ let oldFetch: any;
+
+ beforeEach(() => {
+ oldFetch = window.fetch;
+ window.fetch = () => null;
+ })
+
+ afterEach(() => {
+ window.fetch = oldFetch;
+ });
+
+ it('will throw an error if `uri` or `link` is not passed in', () => {
expect(() => {
new ApolloClient({ cache: new InMemoryCache() } as any);
}).toThrowErrorMatchingSnapshot();
@@ -21,6 +35,28 @@ describe('ApolloClient', () => {
new ApolloClient({ link: ApolloLink.empty() } as any);
}).toThrowErrorMatchingSnapshot();
});
+
+ it('should create an `HttpLink` instance if `uri` is provided', () => {
+ const uri = 'http://localhost:4000';
+ const client = new ApolloClient({
+ cache: new InMemoryCache(),
+ uri,
+ });
+
+ expect(client.link).toBeDefined();
+ expect((client.link as HttpLink).options.uri).toEqual(uri);
+ });
+
+ it('should accept `link` over `uri` if both are provided', () => {
+ const uri1 = 'http://localhost:3000';
+ const uri2 = 'http://localhost:4000';
+ const client = new ApolloClient({
+ cache: new InMemoryCache(),
+ uri: uri1,
+ link: new HttpLink({ uri: uri2 })
+ });
+ expect((client.link as HttpLink).options.uri).toEqual(uri2);
+ });
});
describe('readQuery', () => {
@@ -82,22 +118,14 @@ describe('ApolloClient', () => {
a: 1,
b: 2,
c: 3,
- d: {
- type: 'id',
- id: 'foo',
- generated: false,
- },
+ d: makeReference('foo'),
},
foo: {
__typename: 'Foo',
e: 4,
f: 5,
g: 6,
- h: {
- type: 'id',
- id: 'bar',
- generated: false,
- },
+ h: makeReference('bar'),
},
bar: {
__typename: 'Bar',
@@ -339,22 +367,14 @@ describe('ApolloClient', () => {
a: 1,
b: 2,
c: 3,
- d: {
- type: 'id',
- id: 'foo',
- generated: false,
- },
+ d: makeReference('foo'),
},
foo: {
__typename: 'Foo',
e: 4,
f: 5,
g: 6,
- h: {
- type: 'id',
- id: 'bar',
- generated: false,
- },
+ h: makeReference('bar'),
},
bar: {
__typename: 'Bar',
@@ -599,6 +619,7 @@ describe('ApolloClient', () => {
expect((client.cache as InMemoryCache).extract()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
a: 1,
},
});
@@ -615,6 +636,7 @@ describe('ApolloClient', () => {
expect((client.cache as InMemoryCache).extract()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
a: 1,
b: 2,
c: 3,
@@ -634,6 +656,7 @@ describe('ApolloClient', () => {
expect((client.cache as InMemoryCache).extract()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
a: 4,
b: 5,
c: 6,
@@ -742,6 +765,7 @@ describe('ApolloClient', () => {
expect((client.cache as InMemoryCache).extract()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
'field({"literal":true,"value":42})': 1,
'field({"literal":false,"value":42})': 2,
},
@@ -785,6 +809,7 @@ describe('ApolloClient', () => {
expect((client.cache as InMemoryCache).extract()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
'field({"literal":true,"value":42})': 2,
'field({"literal":false,"value":-1})': 1,
},
@@ -794,7 +819,10 @@ describe('ApolloClient', () => {
it('should warn when the data provided does not match the query shape', () => {
const client = new ApolloClient({
link: ApolloLink.empty(),
- cache: new InMemoryCache(),
+ cache: new InMemoryCache({
+ // Passing an empty map enables the warning:
+ possibleTypes: {},
+ }),
});
return withWarning(() => {
@@ -1073,7 +1101,10 @@ describe('ApolloClient', () => {
it('should warn when the data provided does not match the fragment shape', () => {
const client = new ApolloClient({
link: ApolloLink.empty(),
- cache: new InMemoryCache(),
+ cache: new InMemoryCache({
+ // Passing an empty map enables the warning:
+ possibleTypes: {},
+ }),
});
return withWarning(() => {
@@ -1160,7 +1191,7 @@ describe('ApolloClient', () => {
++count;
if (count === 1) {
expect(stripSymbols(nextResult.data)).toEqual(data);
- expect(stripSymbols(observable.currentResult().data)).toEqual(
+ expect(stripSymbols(observable.getCurrentResult().data)).toEqual(
data,
);
@@ -1212,7 +1243,7 @@ describe('ApolloClient', () => {
count++;
if (count === 1) {
expect(stripSymbols(nextResult.data)).toEqual(data);
- expect(stripSymbols(observable.currentResult().data)).toEqual(
+ expect(stripSymbols(observable.getCurrentResult().data)).toEqual(
data,
);
@@ -1284,7 +1315,7 @@ describe('ApolloClient', () => {
count++;
if (count === 1) {
expect(stripSymbols(result.data)).toEqual(data);
- expect(stripSymbols(observable.currentResult().data)).toEqual(
+ expect(stripSymbols(observable.getCurrentResult().data)).toEqual(
data,
);
const bestFriends = result.data.people.friends.filter(
@@ -1335,12 +1366,10 @@ describe('ApolloClient', () => {
count++;
if (count === 1) {
expect(stripSymbols(result.data)).toEqual(data);
- expect(stripSymbols(observable.currentResult().data)).toEqual(
+ expect(stripSymbols(observable.getCurrentResult().data)).toEqual(
data,
);
const friends = result.data.people.friends;
- friends[0].type = 'okayest';
- friends[1].type = 'okayest';
// this should re call next
client.writeFragment({
@@ -1349,11 +1378,15 @@ describe('ApolloClient', () => {
fragment bestFriends on Person {
friends {
id
+ type
}
}
`,
data: {
- friends,
+ friends: [
+ { ...friends[0], type: 'okayest' },
+ { ...friends[1], type: 'okayest' },
+ ],
__typename: 'Person',
},
});
@@ -1521,6 +1554,7 @@ describe('ApolloClient', () => {
query,
data: {
obj: {
+ __typename: 'Obj',
field: {
field2: 1,
__typename: 'Field',
@@ -1531,7 +1565,7 @@ describe('ApolloClient', () => {
});
client.writeData({
- id: '$ROOT_QUERY.obj',
+ id: 'Obj:uniqueId',
data: {
field: {
field2: 2,
@@ -1544,7 +1578,7 @@ describe('ApolloClient', () => {
.query({ query })
.then(({ data }: any) => {
console.warn = originalWarn;
- expect(data.obj.__typename).toEqual('__ClientData');
+ expect(data.obj.__typename).toEqual('Obj');
expect(data.obj.field.__typename).toEqual('Field');
})
.catch(e => console.log(e));
@@ -1555,19 +1589,21 @@ describe('ApolloClient', () => {
it('will write data locally which will then be read back', () => {
const client = new ApolloClient({
link: ApolloLink.empty(),
- cache: new InMemoryCache().restore({
+ cache: new InMemoryCache({
+ dataIdFromObject(object) {
+ if (typeof object.__typename === 'string') {
+ return object.__typename.toLowerCase();
+ }
+ },
+ }).restore({
foo: {
__typename: 'Foo',
a: 1,
b: 2,
c: 3,
- bar: {
- type: 'id',
- id: '$foo.bar',
- generated: true,
- },
+ bar: makeReference('bar'),
},
- '$foo.bar': {
+ bar: {
__typename: 'Bar',
d: 4,
e: 5,
@@ -1677,7 +1713,7 @@ describe('ApolloClient', () => {
});
client.writeFragment({
- id: '$foo.bar',
+ id: 'bar',
fragment: gql`
fragment y on Bar {
e
@@ -1841,49 +1877,30 @@ describe('ApolloClient', () => {
expect((client.cache as InMemoryCache).extract()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
a: 1,
b: 2,
g: 8,
h: 9,
bar: {
- type: 'id',
- id: '$ROOT_QUERY.bar',
- generated: true,
+ i: 10,
+ j: 11,
+ foo: {
+ _id: 'barfoo',
+ k: 12,
+ l: 13,
+ },
},
foo: {
- type: 'id',
- id: '$ROOT_QUERY.foo',
- generated: true,
- },
- },
- '$ROOT_QUERY.foo': {
- c: 3,
- d: 4,
- bar: {
- type: 'id',
- id: '$ROOT_QUERY.foo.bar',
- generated: true,
- },
- },
- '$ROOT_QUERY.bar': {
- i: 10,
- j: 11,
- foo: {
- type: 'id',
- id: '$ROOT_QUERY.bar.foo',
- generated: true,
+ c: 3,
+ d: 4,
+ bar: {
+ id: 'foobar',
+ e: 5,
+ f: 6,
+ },
},
},
- '$ROOT_QUERY.foo.bar': {
- id: 'foobar',
- e: 5,
- f: 6,
- },
- '$ROOT_QUERY.bar.foo': {
- _id: 'barfoo',
- k: 12,
- l: 13,
- },
});
});
@@ -2057,28 +2074,19 @@ describe('ApolloClient', () => {
expect((client.cache as InMemoryCache).extract()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
a: 1,
b: 2,
foo: {
- type: 'id',
- id: '$ROOT_QUERY.foo',
- generated: true,
- },
- },
- '$ROOT_QUERY.foo': {
- c: 3,
- d: 4,
- bar: {
- type: 'id',
- id: '$ROOT_QUERY.foo.bar',
- generated: true,
+ c: 3,
+ d: 4,
+ bar: {
+ id: 'foobar',
+ e: 5,
+ f: 6,
+ },
},
},
- '$ROOT_QUERY.foo.bar': {
- id: 'foobar',
- e: 5,
- f: 6,
- },
});
});
@@ -2115,28 +2123,19 @@ describe('ApolloClient', () => {
expect((client.cache as InMemoryCache).extract()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
a: 1,
b: 2,
foo: {
- type: 'id',
- id: '$ROOT_QUERY.foo',
- generated: true,
- },
- },
- '$ROOT_QUERY.foo': {
- c: 3,
- d: 4,
- bar: {
- type: 'id',
- id: '$ROOT_QUERY.foo.bar',
- generated: true,
+ c: 3,
+ d: 4,
+ bar: {
+ _id: 'foobar',
+ e: 5,
+ f: 6,
+ },
},
},
- '$ROOT_QUERY.foo.bar': {
- _id: 'foobar',
- e: 5,
- f: 6,
- },
});
});
@@ -2374,8 +2373,6 @@ describe('ApolloClient', () => {
defaultOptions,
});
- client.initQueryManager();
-
let queryOptions: QueryOptions = {
query: gql`
{
@@ -2429,6 +2426,7 @@ describe('ApolloClient', () => {
expect((client.cache as any).data.data).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
a: 1,
},
});
diff --git a/src/__tests__/__snapshots__/ApolloClient.ts.snap b/src/__tests__/__snapshots__/ApolloClient.ts.snap
new file mode 100644
--- /dev/null
+++ b/src/__tests__/__snapshots__/ApolloClient.ts.snap
@@ -0,0 +1,377 @@
+// Jest Snapshot v1, https://goo.gl/fbAQLP
+
+exports[`ApolloClient constructor will throw an error if \`uri\` or \`link\` is not passed in 1`] = `
+"To initialize Apollo Client, you must specify 'uri' or 'link' and 'cache' properties in the options object.
+For more information, please visit: https://www.apollographql.com/docs/react/"
+`;
+
+exports[`ApolloClient constructor will throw an error if cache is not passed in 1`] = `
+"To initialize Apollo Client, you must specify 'uri' or 'link' and 'cache' properties in the options object.
+For more information, please visit: https://www.apollographql.com/docs/react/"
+`;
+
+exports[`ApolloClient write then read will not use a default id getter if either _id or id is present when __typename is not also present 1`] = `
+Object {
+ "ROOT_QUERY": Object {
+ "__typename": "Query",
+ "a": 1,
+ "b": 2,
+ "bar": Object {
+ "foo": Object {
+ "_id": "barfoo",
+ "k": 12,
+ "l": 13,
+ },
+ "i": 10,
+ "j": 11,
+ },
+ "foo": Object {
+ "bar": Object {
+ "__ref": "bar:foobar",
+ },
+ "c": 3,
+ "d": 4,
+ },
+ "g": 8,
+ "h": 9,
+ },
+ "bar:foobar": Object {
+ "__typename": "bar",
+ "e": 5,
+ "f": 6,
+ "id": "foobar",
+ },
+}
+`;
+
+exports[`ApolloClient write then read will not use a default id getter if id and _id are not present 1`] = `
+Object {
+ "ROOT_QUERY": Object {
+ "__typename": "Query",
+ "a": 1,
+ "b": 2,
+ "bar": Object {
+ "__typename": "bar",
+ "foo": Object {
+ "__typename": "foo",
+ "k": 12,
+ "l": 13,
+ },
+ "i": 10,
+ "j": 11,
+ },
+ "foo": Object {
+ "__typename": "foo",
+ "bar": Object {
+ "__typename": "bar",
+ "e": 5,
+ "f": 6,
+ },
+ "c": 3,
+ "d": 4,
+ },
+ "g": 8,
+ "h": 9,
+ },
+}
+`;
+
+exports[`ApolloClient write then read will use a default id getter if __typename and _id are present 1`] = `
+Object {
+ "ROOT_QUERY": Object {
+ "__typename": "Query",
+ "a": 1,
+ "b": 2,
+ "foo": Object {
+ "__typename": "foo",
+ "bar": Object {
+ "__ref": "bar:foobar",
+ },
+ "c": 3,
+ "d": 4,
+ },
+ },
+ "bar:foobar": Object {
+ "__typename": "bar",
+ "_id": "foobar",
+ "e": 5,
+ "f": 6,
+ },
+}
+`;
+
+exports[`ApolloClient write then read will use a default id getter if __typename and id are present 1`] = `
+Object {
+ "ROOT_QUERY": Object {
+ "__typename": "Query",
+ "a": 1,
+ "b": 2,
+ "foo": Object {
+ "__typename": "foo",
+ "bar": Object {
+ "__ref": "bar:foobar",
+ },
+ "c": 3,
+ "d": 4,
+ },
+ },
+ "bar:foobar": Object {
+ "__typename": "bar",
+ "e": 5,
+ "f": 6,
+ "id": "foobar",
+ },
+}
+`;
+
+exports[`ApolloClient write then read will use a default id getter if one is not specified and __typename is present along with either _id or id 1`] = `
+Object {
+ "ROOT_QUERY": Object {
+ "__typename": "Query",
+ "a": 1,
+ "b": 2,
+ "bar": Object {
+ "__typename": "bar",
+ "foo": Object {
+ "__ref": "foo:barfoo",
+ },
+ "i": 10,
+ "j": 11,
+ },
+ "foo": Object {
+ "__typename": "foo",
+ "bar": Object {
+ "__ref": "bar:foobar",
+ },
+ "c": 3,
+ "d": 4,
+ },
+ "g": 8,
+ "h": 9,
+ },
+ "bar:foobar": Object {
+ "__typename": "bar",
+ "e": 5,
+ "f": 6,
+ "id": "foobar",
+ },
+ "foo:barfoo": Object {
+ "__typename": "foo",
+ "_id": "barfoo",
+ "k": 12,
+ "l": 13,
+ },
+}
+`;
+
+exports[`ApolloClient write then read will write data locally which will then be read back 1`] = `
+Object {
+ "bar": Object {
+ "__typename": "Bar",
+ "d": 8,
+ "e": 9,
+ "f": 6,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "a": 7,
+ "b": 2,
+ "bar": Object {
+ "__ref": "bar",
+ },
+ "c": 3,
+ },
+}
+`;
+
+exports[`ApolloClient write then read will write data to a specific id 1`] = `
+Object {
+ "ROOT_QUERY": Object {
+ "__typename": "Query",
+ "a": 1,
+ "b": 2,
+ "foo": Object {
+ "__typename": "foo",
+ "bar": Object {
+ "__ref": "foobar",
+ },
+ "c": 3,
+ "d": 4,
+ },
+ },
+ "foobar": Object {
+ "__typename": "bar",
+ "e": 5,
+ "f": 6,
+ "key": "foobar",
+ },
+}
+`;
+
+exports[`ApolloClient writeFragment will write some deeply nested data into the store at any id 1`] = `
+Object {
+ "bar": Object {
+ "__typename": "Bar",
+ "i": 7,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "e": 4,
+ "h": Object {
+ "__ref": "bar",
+ },
+ },
+}
+`;
+
+exports[`ApolloClient writeFragment will write some deeply nested data into the store at any id 2`] = `
+Object {
+ "bar": Object {
+ "__typename": "Bar",
+ "i": 7,
+ "j": 8,
+ "k": 9,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "e": 4,
+ "f": 5,
+ "g": 6,
+ "h": Object {
+ "__ref": "bar",
+ },
+ },
+}
+`;
+
+exports[`ApolloClient writeFragment will write some deeply nested data into the store at any id 3`] = `
+Object {
+ "bar": Object {
+ "__typename": "Bar",
+ "i": 10,
+ "j": 8,
+ "k": 9,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "e": 4,
+ "f": 5,
+ "g": 6,
+ "h": Object {
+ "__ref": "bar",
+ },
+ },
+}
+`;
+
+exports[`ApolloClient writeFragment will write some deeply nested data into the store at any id 4`] = `
+Object {
+ "bar": Object {
+ "__typename": "Bar",
+ "i": 10,
+ "j": 11,
+ "k": 12,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "e": 4,
+ "f": 5,
+ "g": 6,
+ "h": Object {
+ "__ref": "bar",
+ },
+ },
+}
+`;
+
+exports[`ApolloClient writeFragment will write some deeply nested data into the store at any id 5`] = `
+Object {
+ "bar": Object {
+ "__typename": "Bar",
+ "i": 7,
+ "j": 8,
+ "k": 9,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "e": 4,
+ "f": 5,
+ "g": 6,
+ "h": Object {
+ "__ref": "bar",
+ },
+ },
+}
+`;
+
+exports[`ApolloClient writeFragment will write some deeply nested data into the store at any id 6`] = `
+Object {
+ "bar": Object {
+ "__typename": "Bar",
+ "i": 10,
+ "j": 11,
+ "k": 12,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "e": 4,
+ "f": 5,
+ "g": 6,
+ "h": Object {
+ "__ref": "bar",
+ },
+ },
+}
+`;
+
+exports[`ApolloClient writeQuery will write some deeply nested data to the store 1`] = `
+Object {
+ "ROOT_QUERY": Object {
+ "__typename": "Query",
+ "a": 1,
+ "d": Object {
+ "__typename": "D",
+ "e": 4,
+ },
+ },
+}
+`;
+
+exports[`ApolloClient writeQuery will write some deeply nested data to the store 2`] = `
+Object {
+ "ROOT_QUERY": Object {
+ "__typename": "Query",
+ "a": 1,
+ "d": Object {
+ "__typename": "D",
+ "h": Object {
+ "__typename": "H",
+ "i": 7,
+ },
+ },
+ },
+}
+`;
+
+exports[`ApolloClient writeQuery will write some deeply nested data to the store 3`] = `
+Object {
+ "ROOT_QUERY": Object {
+ "__typename": "Query",
+ "a": 1,
+ "b": 2,
+ "c": 3,
+ "d": Object {
+ "__typename": "D",
+ "e": 4,
+ "f": 5,
+ "g": 6,
+ "h": Object {
+ "__typename": "H",
+ "i": 7,
+ "j": 8,
+ "k": 9,
+ },
+ },
+ },
+}
+`;
diff --git a/src/__tests__/__snapshots__/client.ts.snap b/src/__tests__/__snapshots__/client.ts.snap
new file mode 100644
--- /dev/null
+++ b/src/__tests__/__snapshots__/client.ts.snap
@@ -0,0 +1,43 @@
+// Jest Snapshot v1, https://goo.gl/fbAQLP
+
+exports[`@connection should run a query with the @connection directive and write the result to the store key defined in the directive 1`] = `
+Object {
+ "ROOT_QUERY": Object {
+ "__typename": "Query",
+ "books:abc": Array [
+ Object {
+ "__typename": "Book",
+ "name": "abcd",
+ },
+ ],
+ },
+}
+`;
+
+exports[`@connection should run a query with the connection directive and filter arguments and write the result to the correct store key 1`] = `
+Object {
+ "ROOT_QUERY": Object {
+ "__typename": "Query",
+ "books:abc({\\"order\\":\\"popularity\\"})": Array [
+ Object {
+ "__typename": "Book",
+ "name": "abcd",
+ },
+ ],
+ },
+}
+`;
+
+exports[`@connection should support cache field policies that filter key arguments 1`] = `
+Object {
+ "ROOT_QUERY": Object {
+ "__typename": "Query",
+ "books:{\\"order\\":\\"popularity\\"}": Array [
+ Object {
+ "__typename": "Book",
+ "name": "abcd",
+ },
+ ],
+ },
+}
+`;
diff --git a/packages/apollo-client/src/__tests__/__snapshots__/graphqlSubscriptions.ts.snap b/src/__tests__/__snapshots__/graphqlSubscriptions.ts.snap
similarity index 100%
rename from packages/apollo-client/src/__tests__/__snapshots__/graphqlSubscriptions.ts.snap
rename to src/__tests__/__snapshots__/graphqlSubscriptions.ts.snap
diff --git a/packages/apollo-client/src/__tests__/client.ts b/src/__tests__/client.ts
similarity index 79%
rename from packages/apollo-client/src/__tests__/client.ts
rename to src/__tests__/client.ts
--- a/packages/apollo-client/src/__tests__/client.ts
+++ b/src/__tests__/client.ts
@@ -1,30 +1,24 @@
import { cloneDeep, assign } from 'lodash';
import { GraphQLError, ExecutionResult, DocumentNode } from 'graphql';
import gql from 'graphql-tag';
-import { ApolloLink, Observable } from 'apollo-link';
-import {
- InMemoryCache,
- IntrospectionFragmentMatcher,
- FragmentMatcherInterface,
-} from 'apollo-cache-inmemory';
-import { stripSymbols } from 'apollo-utilities';
-
-import { QueryManager } from '../core/QueryManager';
-import { WatchQueryOptions, FetchPolicy } from '../core/watchQueryOptions';
+import { Observable } from '../utilities/observables/Observable';
+import { ApolloLink } from '../link/core/ApolloLink';
+import { InMemoryCache } from '../cache/inmemory/inMemoryCache';
+import { PossibleTypesMap } from '../cache/inmemory/types';
+import { stripSymbols } from '../utilities/testing/stripSymbols';
+import { WatchQueryOptions, FetchPolicy } from '../core/watchQueryOptions';
import { ApolloError } from '../errors/ApolloError';
-
-import ApolloClient, { printAST } from '..';
-
-import subscribeAndCount from '../util/subscribeAndCount';
-import { withWarning } from '../util/wrap';
-
-import { mockSingleLink } from '../__mocks__/mockLinks';
+import { ApolloClient } from '..';
+import subscribeAndCount from '../utilities/testing/subscribeAndCount';
+import { withWarning } from '../utilities/testing/wrap';
+import { itAsync } from '../utilities/testing/itAsync';
+import { mockSingleLink } from '../utilities/testing/mocking/mockLink';
describe('client', () => {
it('can be loaded via require', () => {
/* tslint:disable */
- const ApolloClientRequire = require('../').default;
+ const ApolloClientRequire = require('../').ApolloClient;
/* tslint:enable */
const client = new ApolloClientRequire({
@@ -85,7 +79,7 @@ describe('client', () => {
);
});
- it('should allow for a single query to take place', () => {
+ itAsync('should allow for a single query to take place', (resolve, reject) => {
const query = gql`
query people {
allPeople(first: 1) {
@@ -110,10 +104,10 @@ describe('client', () => {
},
};
- return clientRoundtrip(query, { data });
+ return clientRoundtrip(resolve, reject, query, { data });
});
- it('should allow a single query with an apollo-link enabled network interface', done => {
+ itAsync('should allow a single query with an apollo-link enabled network interface', (resolve, reject) => {
const query = gql`
query people {
allPeople(first: 1) {
@@ -149,11 +143,11 @@ describe('client', () => {
client.query({ query, variables }).then(actualResult => {
expect(stripSymbols(actualResult.data)).toEqual(data);
- done();
+ resolve();
});
});
- it('should allow for a single query with complex default variables to take place', () => {
+ itAsync('should allow for a single query with complex default variables to take place', (resolve, reject) => {
const query = gql`
query stuff(
$test: Input = { key1: ["value", "value2"], key2: { key3: 4 } }
@@ -186,7 +180,7 @@ describe('client', () => {
const link = mockSingleLink({
request: { query, variables },
result: { data: result },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
@@ -201,10 +195,13 @@ describe('client', () => {
expect(stripSymbols(actualResult.data)).toEqual(result);
});
- return Promise.all([basic, withDefault]);
+ return Promise.all([
+ basic,
+ withDefault,
+ ]).then(resolve, reject);
});
- it('should allow for a single query with default values that get overridden with variables', () => {
+ itAsync('should allow for a single query with default values that get overridden with variables', (resolve, reject) => {
const query = gql`
query people($first: Int = 1) {
allPeople(first: $first) {
@@ -241,16 +238,13 @@ describe('client', () => {
},
};
- const link = mockSingleLink(
- {
- request: { query, variables },
- result: { data: result },
- },
- {
- request: { query, variables: override },
- result: { data: overriddenResult },
- },
- );
+ const link = mockSingleLink({
+ request: { query, variables },
+ result: { data: result },
+ }, {
+ request: { query, variables: override },
+ result: { data: overriddenResult },
+ }).setOnError(reject);
const client = new ApolloClient({
link,
@@ -273,10 +267,14 @@ describe('client', () => {
);
});
- return Promise.all([basic, withDefault, withOverride]);
+ return Promise.all([
+ basic,
+ withDefault,
+ withOverride,
+ ]).then(resolve, reject);
});
- it('should allow fragments on root query', () => {
+ itAsync('should allow fragments on root query', (resolve, reject) => {
const query = gql`
query {
...QueryFragment
@@ -300,10 +298,10 @@ describe('client', () => {
__typename: 'Query',
};
- return clientRoundtrip(query, { data }, null);
+ return clientRoundtrip(resolve, reject, query, { data }, null);
});
- it('should allow fragments on root query with ifm', () => {
+ itAsync('should allow fragments on root query with ifm', (resolve, reject) => {
const query = gql`
query {
...QueryFragment
@@ -327,28 +325,12 @@ describe('client', () => {
__typename: 'Query',
};
- const ifm = new IntrospectionFragmentMatcher({
- introspectionQueryResultData: {
- __schema: {
- types: [
- {
- kind: 'UNION',
- name: 'Query',
- possibleTypes: [
- {
- name: 'Record',
- },
- ],
- },
- ],
- },
- },
+ return clientRoundtrip(resolve, reject, query, { data }, null, {
+ Query: ['Record'],
});
-
- return clientRoundtrip(query, { data }, null, ifm);
});
- it('should merge fragments on root query', () => {
+ itAsync('should merge fragments on root query', (resolve, reject) => {
// The fragment should be used after the selected fields for the query.
// Otherwise, the results aren't merged.
// see: https://github.com/apollographql/apollo-client/issues/1479
@@ -378,28 +360,12 @@ describe('client', () => {
__typename: 'Query',
};
- const ifm = new IntrospectionFragmentMatcher({
- introspectionQueryResultData: {
- __schema: {
- types: [
- {
- kind: 'UNION',
- name: 'Query',
- possibleTypes: [
- {
- name: 'Record',
- },
- ],
- },
- ],
- },
- },
+ return clientRoundtrip(resolve, reject, query, { data }, null, {
+ Query: ['Record'],
});
-
- return clientRoundtrip(query, { data }, null, ifm);
});
- it('store can be rehydrated from the server', () => {
+ itAsync('store can be rehydrated from the server', (resolve, reject) => {
const query = gql`
query people {
allPeople(first: 1) {
@@ -423,27 +389,17 @@ describe('client', () => {
const link = mockSingleLink({
request: { query },
result: { data },
- });
+ }).setOnError(reject);
const initialState: any = {
data: {
- 'ROOT_QUERY.allPeople({"first":"1"}).people.0': {
- name: 'Luke Skywalker',
- },
- 'ROOT_QUERY.allPeople({"first":1})': {
- people: [
- {
- type: 'id',
- generated: true,
- id: 'ROOT_QUERY.allPeople({"first":"1"}).people.0',
- },
- ],
- },
ROOT_QUERY: {
'allPeople({"first":1})': {
- type: 'id',
- id: 'ROOT_QUERY.allPeople({"first":1})',
- generated: true,
+ people: [
+ {
+ name: 'Luke Skywalker',
+ },
+ ],
},
},
optimistic: [],
@@ -464,10 +420,10 @@ describe('client', () => {
expect(finalState.data).toEqual(
(client.cache as InMemoryCache).extract(),
);
- });
+ }).then(resolve, reject);
});
- it('store can be rehydrated from the server using the shadow method', () => {
+ itAsync('store can be rehydrated from the server using the shadow method', (resolve, reject) => {
const query = gql`
query people {
allPeople(first: 1) {
@@ -491,27 +447,17 @@ describe('client', () => {
const link = mockSingleLink({
request: { query },
result: { data },
- });
+ }).setOnError(reject);
const initialState: any = {
data: {
- 'ROOT_QUERY.allPeople({"first":"1"}).people.0': {
- name: 'Luke Skywalker',
- },
- 'ROOT_QUERY.allPeople({"first":1})': {
- people: [
- {
- type: 'id',
- generated: true,
- id: 'ROOT_QUERY.allPeople({"first":"1"}).people.0',
- },
- ],
- },
ROOT_QUERY: {
'allPeople({"first":1})': {
- type: 'id',
- id: 'ROOT_QUERY.allPeople({"first":1})',
- generated: true,
+ people: [
+ {
+ name: 'Luke Skywalker',
+ },
+ ],
},
},
optimistic: [],
@@ -530,10 +476,10 @@ describe('client', () => {
return client.query({ query }).then(result => {
expect(stripSymbols(result.data)).toEqual(data);
expect(finalState.data).toEqual(client.extract());
- });
+ }).then(resolve, reject);
});
- it('stores shadow of restore returns the same result as accessing the method directly on the cache', () => {
+ itAsync('stores shadow of restore returns the same result as accessing the method directly on the cache', (resolve, reject) => {
const query = gql`
query people {
allPeople(first: 1) {
@@ -557,7 +503,7 @@ describe('client', () => {
const link = mockSingleLink({
request: { query },
result: { data },
- });
+ }).setOnError(reject);
const initialState: any = {
data: {
@@ -594,9 +540,11 @@ describe('client', () => {
expect(client.restore(initialState.data)).toEqual(
client.cache.restore(initialState.data),
);
+
+ resolve();
});
- it('should return errors correctly for a single query', () => {
+ itAsync('should return errors correctly for a single query', (resolve, reject) => {
const query = gql`
query people {
allPeople(first: 1) {
@@ -617,7 +565,7 @@ describe('client', () => {
const link = mockSingleLink({
request: { query },
result: { errors },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
@@ -626,10 +574,10 @@ describe('client', () => {
return client.query({ query }).catch((error: ApolloError) => {
expect(error.graphQLErrors).toEqual(errors);
- });
+ }).then(resolve, reject);
});
- it('should return GraphQL errors correctly for a single query with an apollo-link enabled network interface', done => {
+ itAsync('should return GraphQL errors correctly for a single query with an apollo-link enabled network interface', (resolve, reject) => {
const query = gql`
query people {
allPeople(first: 1) {
@@ -672,11 +620,11 @@ describe('client', () => {
client.query({ query }).catch((error: ApolloError) => {
expect(error.graphQLErrors).toEqual(errors);
- done();
+ resolve();
});
});
- xit('should pass a network error correctly on a query using an observable network interface with a warning', done => {
+ itAsync.skip('should pass a network error correctly on a query using an observable network interface with a warning', (resolve, reject) => {
withWarning(() => {
const query = gql`
query people {
@@ -706,12 +654,12 @@ describe('client', () => {
client.query({ query }).catch((error: ApolloError) => {
expect(error.networkError).toBeDefined();
expect(error.networkError!.message).toEqual(networkError.message);
- done();
+ resolve();
});
}, /deprecated/);
});
- it('should pass a network error correctly on a query with apollo-link network interface', done => {
+ itAsync('should pass a network error correctly on a query with apollo-link network interface', (resolve, reject) => {
const query = gql`
query people {
allPeople(first: 1) {
@@ -740,7 +688,7 @@ describe('client', () => {
client.query({ query }).catch((error: ApolloError) => {
expect(error.networkError).toBeDefined();
expect(error.networkError!.message).toEqual(networkError.message);
- done();
+ resolve();
});
});
@@ -777,7 +725,7 @@ describe('client', () => {
});
});
- xit('should surface errors in observer.next as uncaught', done => {
+ itAsync.skip('should surface errors in observer.next as uncaught', (resolve, reject) => {
const expectedError = new Error('this error should not reach the store');
const listeners = process.listeners('uncaughtException');
const oldHandler = listeners[listeners.length - 1];
@@ -787,9 +735,9 @@ describe('client', () => {
if (typeof oldHandler === 'function')
process.addListener('uncaughtException', oldHandler);
if (e === expectedError) {
- done();
+ resolve();
} else {
- done.fail(e);
+ reject(e);
}
};
process.removeListener('uncaughtException', oldHandler);
@@ -818,7 +766,7 @@ describe('client', () => {
const link = mockSingleLink({
request: { query },
result: { data },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
@@ -834,7 +782,7 @@ describe('client', () => {
});
});
- xit('should surfaces errors in observer.error as uncaught', done => {
+ itAsync.skip('should surfaces errors in observer.error as uncaught', (resolve, reject) => {
const expectedError = new Error('this error should not reach the store');
const listeners = process.listeners('uncaughtException');
const oldHandler = listeners[listeners.length - 1];
@@ -842,9 +790,9 @@ describe('client', () => {
process.removeListener('uncaughtException', handleUncaught);
process.addListener('uncaughtException', oldHandler);
if (e === expectedError) {
- done();
+ resolve();
} else {
- done.fail(e);
+ reject(e);
}
};
process.removeListener('uncaughtException', oldHandler);
@@ -863,7 +811,7 @@ describe('client', () => {
const link = mockSingleLink({
request: { query },
result: {},
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
@@ -873,7 +821,7 @@ describe('client', () => {
const handle = client.watchQuery({ query });
handle.subscribe({
next() {
- done.fail(new Error('did not expect next to be called'));
+ reject(new Error('did not expect next to be called'));
},
error() {
throw expectedError;
@@ -881,7 +829,7 @@ describe('client', () => {
});
});
- it('should allow for subscribing to a request', done => {
+ itAsync('should allow for subscribing to a request', (resolve, reject) => {
const query = gql`
query people {
allPeople(first: 1) {
@@ -905,7 +853,7 @@ describe('client', () => {
const link = mockSingleLink({
request: { query },
result: { data },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
@@ -917,12 +865,12 @@ describe('client', () => {
handle.subscribe({
next(result) {
expect(stripSymbols(result.data)).toEqual(data);
- done();
+ resolve();
},
});
});
- it('should be able to transform queries', () => {
+ itAsync('should be able to transform queries', (resolve, reject) => {
const query = gql`
query {
author {
@@ -955,16 +903,13 @@ describe('client', () => {
},
};
- const link = mockSingleLink(
- {
- request: { query },
- result: { data: result },
- },
- {
- request: { query: transformedQuery },
- result: { data: transformedResult },
- },
- );
+ const link = mockSingleLink({
+ request: { query },
+ result: { data: result },
+ }, {
+ request: { query: transformedQuery },
+ result: { data: transformedResult },
+ }, false).setOnError(reject);
const client = new ApolloClient({
link,
@@ -973,10 +918,10 @@ describe('client', () => {
return client.query({ query }).then(actualResult => {
expect(stripSymbols(actualResult.data)).toEqual(transformedResult);
- });
+ }).then(resolve, reject);
});
- it('should be able to transform queries on network-only fetches', () => {
+ itAsync('should be able to transform queries on network-only fetches', (resolve, reject) => {
const query = gql`
query {
author {
@@ -1007,16 +952,13 @@ describe('client', () => {
__typename: 'Author',
},
};
- const link = mockSingleLink(
- {
- request: { query },
- result: { data: result },
- },
- {
- request: { query: transformedQuery },
- result: { data: transformedResult },
- },
- );
+ const link = mockSingleLink({
+ request: { query },
+ result: { data: result },
+ }, {
+ request: { query: transformedQuery },
+ result: { data: transformedResult },
+ }, false).setOnError(reject);
const client = new ApolloClient({
link,
@@ -1027,10 +969,11 @@ describe('client', () => {
.query({ fetchPolicy: 'network-only', query })
.then(actualResult => {
expect(stripSymbols(actualResult.data)).toEqual(transformedResult);
- });
+ })
+ .then(resolve, reject);
});
- it('should handle named fragments on mutations', () => {
+ itAsync('should handle named fragments on mutations', (resolve, reject) => {
const mutation = gql`
mutation {
starAuthor(id: 12) {
@@ -1058,7 +1001,7 @@ describe('client', () => {
const link = mockSingleLink({
request: { query: mutation },
result: { data: result },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
cache: new InMemoryCache({ addTypename: false }),
@@ -1066,10 +1009,10 @@ describe('client', () => {
return client.mutate({ mutation }).then(actualResult => {
expect(stripSymbols(actualResult.data)).toEqual(result);
- });
+ }).then(resolve, reject);
});
- it('should be able to handle named fragments on network-only queries', () => {
+ itAsync('should be able to handle named fragments on network-only queries', (resolve, reject) => {
const query = gql`
fragment authorDetails on Author {
firstName
@@ -1094,7 +1037,7 @@ describe('client', () => {
const link = mockSingleLink({
request: { query },
result: { data: result },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
@@ -1105,10 +1048,11 @@ describe('client', () => {
.query({ fetchPolicy: 'network-only', query })
.then(actualResult => {
expect(stripSymbols(actualResult.data)).toEqual(result);
- });
+ })
+ .then(resolve, reject);
});
- it('should be able to handle named fragments with multiple fragments', () => {
+ itAsync('should be able to handle named fragments with multiple fragments', (resolve, reject) => {
const query = gql`
query {
author {
@@ -1139,7 +1083,7 @@ describe('client', () => {
const link = mockSingleLink({
request: { query },
result: { data: result },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
cache: new InMemoryCache({ addTypename: false }),
@@ -1147,10 +1091,10 @@ describe('client', () => {
return client.query({ query }).then(actualResult => {
expect(stripSymbols(actualResult.data)).toEqual(result);
- });
+ }).then(resolve, reject);
});
- it('should be able to handle named fragments', () => {
+ itAsync('should be able to handle named fragments', (resolve, reject) => {
const query = gql`
query {
author {
@@ -1175,7 +1119,7 @@ describe('client', () => {
const link = mockSingleLink({
request: { query },
result: { data: result },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
cache: new InMemoryCache({ addTypename: false }),
@@ -1183,10 +1127,10 @@ describe('client', () => {
return client.query({ query }).then(actualResult => {
expect(stripSymbols(actualResult.data)).toEqual(result);
- });
+ }).then(resolve, reject);
});
- it('should be able to handle inlined fragments on an Interface type', () => {
+ itAsync('should be able to handle inlined fragments on an Interface type', (resolve, reject) => {
const query = gql`
query items {
items {
@@ -1218,49 +1162,24 @@ describe('client', () => {
],
};
- const fancyFragmentMatcher = (
- idValue: any, // TODO types, please.
- typeCondition: string,
- context: any,
- ): boolean => {
- const obj = context.store.get(idValue.id);
-
- if (!obj) {
- return false;
- }
-
- const implementingTypesMap: { [key: string]: string[] } = {
- Item: ['ColorItem', 'MonochromeItem'],
- };
-
- if (obj.__typename === typeCondition) {
- return true;
- }
-
- const implementingTypes = implementingTypesMap[typeCondition];
- if (implementingTypes && implementingTypes.indexOf(obj.__typename) > -1) {
- return true;
- }
-
- return false;
- };
-
const link = mockSingleLink({
request: { query },
result: { data: result },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
cache: new InMemoryCache({
- fragmentMatcher: { match: fancyFragmentMatcher },
+ possibleTypes: {
+ Item: ['ColorItem', 'MonochromeItem'],
+ },
}),
});
return client.query({ query }).then((actualResult: any) => {
expect(stripSymbols(actualResult.data)).toEqual(result);
- });
+ }).then(resolve, reject);
});
- it('should be able to handle inlined fragments on an Interface type with introspection fragment matcher', () => {
+ itAsync('should be able to handle inlined fragments on an Interface type with introspection fragment matcher', (resolve, reject) => {
const query = gql`
query items {
items {
@@ -1295,40 +1214,23 @@ describe('client', () => {
const link = mockSingleLink({
request: { query },
result: { data: result },
- });
-
- const ifm = new IntrospectionFragmentMatcher({
- introspectionQueryResultData: {
- __schema: {
- types: [
- {
- kind: 'UNION',
- name: 'Item',
- possibleTypes: [
- {
- name: 'ColorItem',
- },
- {
- name: 'MonochromeItem',
- },
- ],
- },
- ],
- },
- },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
- cache: new InMemoryCache({ fragmentMatcher: ifm }),
+ cache: new InMemoryCache({
+ possibleTypes: {
+ Item: ['ColorItem', 'MonochromeItem'],
+ },
+ }),
});
return client.query({ query }).then(actualResult => {
expect(stripSymbols(actualResult.data)).toEqual(result);
- });
+ }).then(resolve, reject);
});
- it('should call updateQueries and update after mutation on query with inlined fragments on an Interface type', done => {
+ itAsync('should call updateQueries and update after mutation on query with inlined fragments on an Interface type', (resolve, reject) => {
const query = gql`
query items {
items {
@@ -1369,41 +1271,21 @@ describe('client', () => {
fortuneCookie: 'The waiter spit in your food',
};
- const link = mockSingleLink(
- {
- request: { query },
- result: { data: result },
- },
- {
- request: { query: mutation },
- result: { data: mutationResult },
- },
- );
-
- const ifm = new IntrospectionFragmentMatcher({
- introspectionQueryResultData: {
- __schema: {
- types: [
- {
- kind: 'UNION',
- name: 'Item',
- possibleTypes: [
- {
- name: 'ColorItem',
- },
- {
- name: 'MonochromeItem',
- },
- ],
- },
- ],
- },
- },
- });
+ const link = mockSingleLink({
+ request: { query },
+ result: { data: result },
+ }, {
+ request: { query: mutation },
+ result: { data: mutationResult },
+ }).setOnError(reject);
const client = new ApolloClient({
link,
- cache: new InMemoryCache({ fragmentMatcher: ifm }),
+ cache: new InMemoryCache({
+ possibleTypes: {
+ Item: ['ColorItem', 'MonochromeItem'],
+ },
+ }),
});
const queryUpdaterSpy = jest.fn();
@@ -1427,14 +1309,14 @@ describe('client', () => {
expect(queryUpdaterSpy).toBeCalled();
expect(updateSpy).toBeCalled();
sub.unsubscribe();
- done();
+ resolve();
})
.catch(err => {
- done.fail(err);
+ reject(err);
});
},
error(err) {
- done.fail(err);
+ reject(err);
},
});
});
@@ -1489,7 +1371,7 @@ describe('client', () => {
});
});
- it('does not deduplicate queries if option is set to false', () => {
+ itAsync('does not deduplicate queries if option is set to false', (resolve, reject) => {
const queryDoc = gql`
query {
author {
@@ -1510,17 +1392,14 @@ describe('client', () => {
// we have two responses for identical queries, but only the first should be requested.
// the second one should never make it through to the network interface.
- const link = mockSingleLink(
- {
- request: { query: queryDoc },
- result: { data },
- delay: 10,
- },
- {
- request: { query: queryDoc },
- result: { data: data2 },
- },
- );
+ const link = mockSingleLink({
+ request: { query: queryDoc },
+ result: { data },
+ delay: 10,
+ }, {
+ request: { query: queryDoc },
+ result: { data: data2 },
+ }).setOnError(reject);
const client = new ApolloClient({
link,
cache: new InMemoryCache({ addTypename: false }),
@@ -1535,10 +1414,10 @@ describe('client', () => {
return Promise.all([q1, q2]).then(([result1, result2]) => {
expect(stripSymbols(result1.data)).toEqual(data);
expect(stripSymbols(result2.data)).toEqual(data2);
- });
+ }).then(resolve, reject);
});
- it('deduplicates queries by default', () => {
+ itAsync('deduplicates queries by default', (resolve, reject) => {
const queryDoc = gql`
query {
author {
@@ -1559,17 +1438,14 @@ describe('client', () => {
// we have two responses for identical queries, but only the first should be requested.
// the second one should never make it through to the network interface.
- const link = mockSingleLink(
- {
- request: { query: queryDoc },
- result: { data },
- delay: 10,
- },
- {
- request: { query: queryDoc },
- result: { data: data2 },
- },
- );
+ const link = mockSingleLink({
+ request: { query: queryDoc },
+ result: { data },
+ delay: 10,
+ }, {
+ request: { query: queryDoc },
+ result: { data: data2 },
+ }).setOnError(reject);
const client = new ApolloClient({
link,
cache: new InMemoryCache({ addTypename: false }),
@@ -1581,10 +1457,10 @@ describe('client', () => {
// if deduplication didn't happen, result.data will equal data2.
return Promise.all([q1, q2]).then(([result1, result2]) => {
expect(result1.data).toEqual(result2.data);
- });
+ }).then(resolve, reject);
});
- it('unsubscribes from deduplicated observables only once', done => {
+ itAsync('unsubscribes from deduplicated observables only once', (resolve, reject) => {
const document: DocumentNode = gql`
query test1($x: String) {
test(x: $x)
@@ -1602,7 +1478,7 @@ describe('client', () => {
observer.complete();
return () => {
unsubscribed = true;
- setTimeout(done, 0);
+ setTimeout(resolve, 0);
};
});
}),
@@ -1680,11 +1556,11 @@ describe('client', () => {
},
};
- it('for internal store', () => {
+ itAsync('for internal store', (resolve, reject) => {
const link = mockSingleLink({
request: { query },
result: { data },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
@@ -1701,7 +1577,7 @@ describe('client', () => {
id: '1',
name: 'Luke Skywalker',
});
- });
+ }).then(resolve, reject);
});
});
@@ -1769,11 +1645,11 @@ describe('client', () => {
checkCacheAndNetworkError(() => client.query({ query }));
});
- it('fetches from cache first, then network', done => {
+ itAsync('fetches from cache first, then network', (resolve, reject) => {
const link = mockSingleLink({
request: { query },
result: { data: networkFetch },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
@@ -1787,21 +1663,21 @@ describe('client', () => {
fetchPolicy: 'cache-and-network',
});
- subscribeAndCount(done, obs, (handleCount, result) => {
+ subscribeAndCount(reject, obs, (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(initialData);
} else if (handleCount === 2) {
expect(stripSymbols(result.data)).toEqual(networkFetch);
- done();
+ resolve();
}
});
});
- it('does not fail if cache entry is not present', done => {
+ itAsync('does not fail if cache entry is not present', (resolve, reject) => {
const link = mockSingleLink({
request: { query },
result: { data: networkFetch },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
cache: new InMemoryCache({ addTypename: false }),
@@ -1812,20 +1688,20 @@ describe('client', () => {
fetchPolicy: 'cache-and-network',
});
- subscribeAndCount(done, obs, (handleCount, result) => {
+ subscribeAndCount(reject, obs, (handleCount, result) => {
if (handleCount === 1) {
expect(result.data).toBe(undefined);
expect(result.loading).toBe(true);
} else if (handleCount === 2) {
expect(stripSymbols(result.data)).toEqual(networkFetch);
expect(result.loading).toBe(false);
- done();
+ resolve();
}
});
});
- it('fails if network request fails', done => {
- const link = mockSingleLink(); // no queries = no replies.
+ itAsync('fails if network request fails', (resolve, reject) => {
+ const link = mockSingleLink().setOnError(error => { throw error }); // no queries = no replies.
const client = new ApolloClient({
link,
cache: new InMemoryCache({ addTypename: false }),
@@ -1846,16 +1722,16 @@ describe('client', () => {
error: e => {
expect(e.message).toMatch(/No more mocked responses/);
expect(count).toBe(1); // make sure next was called.
- setTimeout(done, 100);
+ setTimeout(resolve, 100);
},
});
});
- it('fetches from cache first, then network and does not have an unhandled error', done => {
+ itAsync('fetches from cache first, then network and does not have an unhandled error', (resolve, reject) => {
const link = mockSingleLink({
request: { query },
result: { errors: [{ message: 'network failure' }] },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
@@ -1870,7 +1746,7 @@ describe('client', () => {
});
let shouldFail = true;
process.once('unhandledRejection', rejection => {
- if (shouldFail) done.fail('promise had an unhandledRejection');
+ if (shouldFail) reject('promise had an unhandledRejection');
});
let count = 0;
obs.subscribe({
@@ -1884,7 +1760,7 @@ describe('client', () => {
expect(count).toBe(1); // make sure next was called.
setTimeout(() => {
shouldFail = false;
- done();
+ resolve();
}, 0);
},
});
@@ -1914,7 +1790,7 @@ describe('client', () => {
);
});
- it('are not watching the store or notifying on updates', done => {
+ itAsync('are not watching the store or notifying on updates', (resolve, reject) => {
const query = gql`
{
test
@@ -1926,14 +1802,14 @@ describe('client', () => {
const link = mockSingleLink({
request: { query },
result: { data },
- });
+ }).setOnError(reject);
const client = new ApolloClient({ link, cache: new InMemoryCache() });
const obs = client.watchQuery({ query, fetchPolicy: 'cache-first' });
let handleCalled = false;
- subscribeAndCount(done, obs, (handleCount, result) => {
+ subscribeAndCount(reject, obs, (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(data);
obs.setOptions({ fetchPolicy: 'standby' }).then(() => {
@@ -1942,20 +1818,20 @@ describe('client', () => {
});
setTimeout(() => {
if (!handleCalled) {
- done();
+ resolve();
}
}, 20);
}
if (handleCount === 2) {
handleCalled = true;
- done.fail(
+ reject(
new Error('Handle should never be called on standby query'),
);
}
});
});
- it('return the current result when coming out of standby', done => {
+ itAsync('return the current result when coming out of standby', (resolve, reject) => {
const query = gql`
{
test
@@ -1967,14 +1843,14 @@ describe('client', () => {
const link = mockSingleLink({
request: { query },
result: { data },
- });
+ }).setOnError(reject);
const client = new ApolloClient({ link, cache: new InMemoryCache() });
const obs = client.watchQuery({ query, fetchPolicy: 'cache-first' });
let handleCalled = false;
- subscribeAndCount(done, obs, (handleCount, result) => {
+ subscribeAndCount(reject, obs, (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(data);
obs.setOptions({ fetchPolicy: 'standby' }).then(() => {
@@ -1988,7 +1864,7 @@ describe('client', () => {
if (handleCount === 2) {
handleCalled = true;
expect(stripSymbols(result.data)).toEqual(data2);
- done();
+ resolve();
}
});
});
@@ -2014,44 +1890,36 @@ describe('client', () => {
},
};
- let link: any;
- beforeEach(() => {
- link = mockSingleLink(
- {
- request: { query },
- result: { data: firstFetch },
- },
- {
- request: { query },
- result: { data: secondFetch },
- },
- );
- //
- });
-
- afterAll(() => jest.useRealTimers());
+ function makeLink(reject: (reason: any) => any) {
+ return mockSingleLink({
+ request: { query },
+ result: { data: firstFetch },
+ }, {
+ request: { query },
+ result: { data: secondFetch },
+ }).setOnError(reject);
+ }
- it('forces the query to rerun', () => {
+ itAsync('forces the query to rerun', (resolve, reject) => {
const client = new ApolloClient({
- link,
+ link: makeLink(reject),
cache: new InMemoryCache({ addTypename: false }),
});
// Run a query first to initialize the store
- return (
- client
- .query({ query })
- // then query for real
- .then(() => client.query({ query, fetchPolicy: 'network-only' }))
- .then(result => {
- expect(stripSymbols(result.data)).toEqual({ myNumber: { n: 2 } });
- })
- );
+ return client
+ .query({ query })
+ // then query for real
+ .then(() => client.query({ query, fetchPolicy: 'network-only' }))
+ .then(result => {
+ expect(stripSymbols(result.data)).toEqual({ myNumber: { n: 2 } });
+ })
+ .then(resolve, reject);
});
- it('can be disabled with ssrMode', () => {
+ itAsync('can be disabled with ssrMode', (resolve, reject) => {
const client = new ApolloClient({
- link,
+ link: makeLink(reject),
ssrMode: true,
cache: new InMemoryCache({ addTypename: false }),
});
@@ -2059,56 +1927,48 @@ describe('client', () => {
const options: WatchQueryOptions = { query, fetchPolicy: 'network-only' };
// Run a query first to initialize the store
- return (
- client
- .query({ query })
- // then query for real
- .then(() => client.query(options))
- .then(result => {
- expect(stripSymbols(result.data)).toEqual({ myNumber: { n: 1 } });
-
- // Test that options weren't mutated, issue #339
- expect(options).toEqual({
- query,
- fetchPolicy: 'network-only',
- });
- })
- );
+ return client
+ .query({ query })
+ // then query for real
+ .then(() => client.query(options))
+ .then(result => {
+ expect(stripSymbols(result.data)).toEqual({ myNumber: { n: 1 } });
+ // Test that options weren't mutated, issue #339
+ expect(options).toEqual({
+ query,
+ fetchPolicy: 'network-only',
+ });
+ })
+ .then(resolve, reject);
});
- it('can temporarily be disabled with ssrForceFetchDelay', () => {
- jest.useFakeTimers();
+ itAsync('can temporarily be disabled with ssrForceFetchDelay', (resolve, reject) => {
const client = new ApolloClient({
- link,
+ link: makeLink(reject),
ssrForceFetchDelay: 100,
cache: new InMemoryCache({ addTypename: false }),
});
// Run a query first to initialize the store
- const outerPromise = client
+ return client
.query({ query })
// then query for real
.then(() => {
- const promise = client.query({ query, fetchPolicy: 'network-only' });
- jest.runTimersToTime(0);
- return promise;
+ return client.query({ query, fetchPolicy: 'network-only' });
})
- .then(result => {
+ .then(async result => {
expect(stripSymbols(result.data)).toEqual({ myNumber: { n: 1 } });
- jest.runTimersToTime(100);
- const promise = client.query({ query, fetchPolicy: 'network-only' });
- jest.runTimersToTime(0);
- return promise;
+ await new Promise(resolve => setTimeout(resolve, 100));
+ return client.query({ query, fetchPolicy: 'network-only' });
})
.then(result => {
expect(stripSymbols(result.data)).toEqual({ myNumber: { n: 2 } });
- });
- jest.runTimersToTime(0);
- return outerPromise;
+ })
+ .then(resolve, reject);
});
});
- it('should pass a network error correctly on a mutation', done => {
+ itAsync('should pass a network error correctly on a mutation', (resolve, reject) => {
const mutation = gql`
mutation {
person {
@@ -2129,23 +1989,23 @@ describe('client', () => {
request: { query: mutation },
result: { data },
error: networkError,
- }),
+ }).setOnError(reject),
cache: new InMemoryCache({ addTypename: false }),
});
client
.mutate({ mutation })
.then(_ => {
- done.fail(new Error('Returned a result when it should not have.'));
+ reject(new Error('Returned a result when it should not have.'));
})
.catch((error: ApolloError) => {
expect(error.networkError).toBeDefined();
expect(error.networkError!.message).toBe(networkError.message);
- done();
+ resolve();
});
});
- it('should pass a GraphQL error correctly on a mutation', done => {
+ itAsync('should pass a GraphQL error correctly on a mutation', (resolve, reject) => {
const mutation = gql`
mutation {
newPerson {
@@ -2167,22 +2027,22 @@ describe('client', () => {
link: mockSingleLink({
request: { query: mutation },
result: { data, errors },
- }),
+ }).setOnError(reject),
cache: new InMemoryCache({ addTypename: false }),
});
client
.mutate({ mutation })
.then(_ => {
- done.fail(new Error('Returned a result when it should not have.'));
+ reject(new Error('Returned a result when it should not have.'));
})
.catch((error: ApolloError) => {
expect(error.graphQLErrors).toBeDefined();
expect(error.graphQLErrors.length).toBe(1);
expect(error.graphQLErrors[0].message).toBe(errors[0].message);
- done();
+ resolve();
});
});
- it('should allow errors to be returned from a mutation', done => {
+ itAsync('should allow errors to be returned from a mutation', (resolve, reject) => {
const mutation = gql`
mutation {
newPerson {
@@ -2204,7 +2064,7 @@ describe('client', () => {
link: mockSingleLink({
request: { query: mutation },
result: { data, errors },
- }),
+ }).setOnError(reject),
cache: new InMemoryCache({ addTypename: false }),
});
client
@@ -2214,13 +2074,14 @@ describe('client', () => {
expect(result.errors.length).toBe(1);
expect(result.errors[0].message).toBe(errors[0].message);
expect(result.data).toEqual(data);
- done();
+ resolve();
})
.catch((error: ApolloError) => {
throw error;
});
});
- it('should strip errors on a mutation if ignored', done => {
+
+ itAsync('should strip errors on a mutation if ignored', (resolve, reject) => {
const mutation = gql`
mutation {
newPerson {
@@ -2242,7 +2103,7 @@ describe('client', () => {
link: mockSingleLink({
request: { query: mutation },
result: { data, errors },
- }),
+ }).setOnError(reject),
cache: new InMemoryCache({ addTypename: false }),
});
client
@@ -2250,14 +2111,14 @@ describe('client', () => {
.then(result => {
expect(result.errors).toBeUndefined();
expect(stripSymbols(result.data)).toEqual(data);
- done();
+ resolve();
})
.catch((error: ApolloError) => {
throw error;
});
});
- it('should rollback optimistic after mutation got a GraphQL error', done => {
+ itAsync('should rollback optimistic after mutation got a GraphQL error', (resolve, reject) => {
const mutation = gql`
mutation {
newPerson {
@@ -2281,7 +2142,7 @@ describe('client', () => {
link: mockSingleLink({
request: { query: mutation },
result: { data, errors },
- }),
+ }).setOnError(reject),
cache: new InMemoryCache({ addTypename: false }),
});
const mutatePromise = client.mutate({
@@ -2296,18 +2157,20 @@ describe('client', () => {
},
});
- const { data, optimisticData } = client.cache as any;
- expect(optimisticData).not.toBe(data);
- expect(optimisticData.parent).toBe(data);
+ {
+ const { data, optimisticData } = client.cache as any;
+ expect(optimisticData).not.toBe(data);
+ expect(optimisticData.parent).toBe(data);
+ }
mutatePromise
.then(_ => {
- done.fail(new Error('Returned a result when it should not have.'));
+ reject(new Error('Returned a result when it should not have.'));
})
.catch((_: ApolloError) => {
const { data, optimisticData } = client.cache as any;
expect(optimisticData).toBe(data);
- done();
+ resolve();
});
});
@@ -2549,7 +2412,7 @@ describe('client', () => {
};
// it('with self-made store', () => {
- // const link = mockSingleLink({
+ // const link = mockSingleLink(reject, {
// request: { query: cloneDeep(query) },
// result: { data },
// });
@@ -2571,7 +2434,7 @@ describe('client', () => {
// });
});
- it('should propagate errors from network interface to observers', done => {
+ itAsync('should propagate errors from network interface to observers', (resolve, reject) => {
const link = ApolloLink.from([
() =>
new Observable(x => {
@@ -2598,12 +2461,12 @@ describe('client', () => {
handle.subscribe({
error(error) {
expect(error.message).toBe('Network error: Uh oh!');
- done();
+ resolve();
},
});
});
- it('should be able to refetch after there was a network error', done => {
+ itAsync('should be able to refetch after there was a network error', (resolve, reject) => {
const query: DocumentNode = gql`
query somethingelse {
allPeople(first: 1) {
@@ -2619,8 +2482,8 @@ describe('client', () => {
const link = mockSingleLink(
{ request: { query }, result: { data } },
{ request: { query }, error: new Error('This is an error!') },
- { request: { query }, result: { data: dataTwo } },
- );
+ { request: { query }, result: { data: dataTwo } }
+ ).setOnError(reject);
const client = new ApolloClient({
link,
cache: new InMemoryCache({ addTypename: false }),
@@ -2642,7 +2505,7 @@ describe('client', () => {
switch (count++) {
case 0:
if (!result.data!.allPeople) {
- done.fail('Should have data by this point');
+ reject('Should have data by this point');
break;
}
// First result is loaded, run a refetch to get the second result
@@ -2654,7 +2517,7 @@ describe('client', () => {
);
setTimeout(() => {
observable.refetch().then(() => {
- done.fail('Expected error value on first refetch.');
+ reject('Expected error value on first refetch.');
}, noop);
}, 0);
break;
@@ -2675,19 +2538,19 @@ describe('client', () => {
expect(result.networkStatus).toBe(7);
expect(result.errors).toBeFalsy();
if (!result.data) {
- done.fail('Should have data by this point');
+ reject('Should have data by this point');
break;
}
expect(stripSymbols(result.data.allPeople)).toEqual(
dataTwo.allPeople,
);
- done();
+ resolve();
break;
default:
throw new Error('Unexpected fall through');
}
} catch (e) {
- done.fail(e);
+ reject(e);
}
},
error(error) {
@@ -2710,7 +2573,7 @@ describe('client', () => {
// which should now contain valid data.
setTimeout(() => {
observable.refetch().catch(() => {
- done.fail('Expected good data on second refetch.');
+ reject('Expected good data on second refetch.');
});
}, 0);
},
@@ -2719,7 +2582,7 @@ describe('client', () => {
subscription = observable.subscribe(observerOptions);
});
- it('should throw a GraphQL error', () => {
+ itAsync('should throw a GraphQL error', (resolve, reject) => {
const query = gql`
query {
posts {
@@ -2737,7 +2600,7 @@ describe('client', () => {
const link = mockSingleLink({
request: { query },
result: { errors },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
cache: new InMemoryCache(),
@@ -2747,10 +2610,10 @@ describe('client', () => {
expect(err.message).toBe(
'GraphQL error: Cannot query field "foo" on type "Post".',
);
- });
+ }).then(resolve, reject);
});
- it('should warn if server returns wrong data', () => {
+ itAsync('should warn if server returns wrong data', (resolve, reject) => {
const query = gql`
query {
todos {
@@ -2776,19 +2639,22 @@ describe('client', () => {
const link = mockSingleLink({
request: { query },
result,
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
- cache: new InMemoryCache(),
+ cache: new InMemoryCache({
+ // Passing an empty map enables the warning:
+ possibleTypes: {},
+ }),
});
return withWarning(
() => client.query({ query }),
/Missing field description/,
- );
+ ).then(resolve, reject);
});
- it('runs a query with the connection directive and writes it to the store key defined in the directive', () => {
+ itAsync('runs a query with the connection directive and writes it to the store key defined in the directive', (resolve, reject) => {
const query = gql`
{
books(skip: 0, limit: 2) @connection(key: "abc") {
@@ -2818,7 +2684,7 @@ describe('client', () => {
const link = mockSingleLink({
request: { query: transformedQuery },
result: { data: result },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
@@ -2827,13 +2693,13 @@ describe('client', () => {
return client.query({ query }).then(actualResult => {
expect(stripSymbols(actualResult.data)).toEqual(result);
- });
+ }).then(resolve, reject);
});
- it('should remove the connection directive before the link is sent', () => {
+ itAsync('runs query with cache field policy analogous to @connection', (resolve, reject) => {
const query = gql`
{
- books(skip: 0, limit: 2) @connection {
+ books(skip: 0, limit: 2) {
name
}
}
@@ -2860,8 +2726,60 @@ describe('client', () => {
const link = mockSingleLink({
request: { query: transformedQuery },
result: { data: result },
+ }).setOnError(reject);
+
+ const client = new ApolloClient({
+ link,
+ cache: new InMemoryCache({
+ typePolicies: {
+ Query: {
+ fields: {
+ books: {
+ keyArgs: () => "abc",
+ },
+ },
+ },
+ },
+ }),
});
+ return client.query({ query }).then(actualResult => {
+ expect(stripSymbols(actualResult.data)).toEqual(result);
+ }).then(resolve, reject);
+ });
+
+ itAsync('should remove the connection directive before the link is sent', (resolve, reject) => {
+ const query = gql`
+ {
+ books(skip: 0, limit: 2) @connection(key: "books") {
+ name
+ }
+ }
+ `;
+
+ const transformedQuery = gql`
+ {
+ books(skip: 0, limit: 2) {
+ name
+ __typename
+ }
+ }
+ `;
+
+ const result = {
+ books: [
+ {
+ name: 'abcd',
+ __typename: 'Book',
+ },
+ ],
+ };
+
+ const link = mockSingleLink({
+ request: { query: transformedQuery },
+ result: { data: result },
+ }).setOnError(reject);
+
const client = new ApolloClient({
link,
cache: new InMemoryCache(),
@@ -2869,12 +2787,12 @@ describe('client', () => {
return client.query({ query }).then(actualResult => {
expect(stripSymbols(actualResult.data)).toEqual(result);
- });
+ }).then(resolve, reject);
});
});
-describe('@connect', () => {
- it('should run a query with the connection directive and write the result to the store key defined in the directive', () => {
+describe('@connection', () => {
+ itAsync('should run a query with the @connection directive and write the result to the store key defined in the directive', (resolve, reject) => {
const query = gql`
{
books(skip: 0, limit: 2) @connection(key: "abc") {
@@ -2904,7 +2822,7 @@ describe('@connect', () => {
const link = mockSingleLink({
request: { query: transformedQuery },
result: { data: result },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
@@ -2914,10 +2832,10 @@ describe('@connect', () => {
return client.query({ query }).then(actualResult => {
expect(stripSymbols(actualResult.data)).toEqual(result);
expect((client.cache as InMemoryCache).extract()).toMatchSnapshot();
- });
+ }).then(resolve, reject);
});
- it('should run a query with the connection directive and filter arguments and write the result to the correct store key', () => {
+ itAsync('should run a query with the connection directive and filter arguments and write the result to the correct store key', (resolve, reject) => {
const query = gql`
query books($order: string) {
books(skip: 0, limit: 2, order: $order)
@@ -2949,7 +2867,7 @@ describe('@connect', () => {
const link = mockSingleLink({
request: { query: transformedQuery, variables },
result: { data: result },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
@@ -2959,7 +2877,61 @@ describe('@connect', () => {
return client.query({ query, variables }).then(actualResult => {
expect(stripSymbols(actualResult.data)).toEqual(result);
expect((client.cache as InMemoryCache).extract()).toMatchSnapshot();
+ }).then(resolve, reject);
+ });
+
+ itAsync('should support cache field policies that filter key arguments', (resolve, reject) => {
+ const query = gql`
+ query books($order: string) {
+ books(skip: 0, limit: 2, order: $order) {
+ name
+ }
+ }
+ `;
+ const transformedQuery = gql`
+ query books($order: string) {
+ books(skip: 0, limit: 2, order: $order) {
+ name
+ __typename
+ }
+ }
+ `;
+
+ const result = {
+ books: [
+ {
+ name: 'abcd',
+ __typename: 'Book',
+ },
+ ],
+ };
+
+ const variables = { order: 'popularity' };
+
+ const link = mockSingleLink({
+ request: { query: transformedQuery, variables },
+ result: { data: result },
+ }).setOnError(reject);
+
+ const client = new ApolloClient({
+ link,
+ cache: new InMemoryCache({
+ typePolicies: {
+ Query: {
+ fields: {
+ books: {
+ keyArgs: ["order"],
+ }
+ }
+ }
+ }
+ }),
});
+
+ return client.query({ query, variables }).then(actualResult => {
+ expect(stripSymbols(actualResult.data)).toEqual(result);
+ expect((client.cache as InMemoryCache).extract()).toMatchSnapshot();
+ }).then(resolve, reject);
});
describe('default settings', () => {
@@ -2981,11 +2953,12 @@ describe('@connect', () => {
n: 2,
},
};
- it('allows setting default options for watchQuery', done => {
+
+ itAsync('allows setting default options for watchQuery', (resolve, reject) => {
const link = mockSingleLink({
request: { query },
result: { data: networkFetch },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
cache: new InMemoryCache({ addTypename: false }),
@@ -3003,22 +2976,23 @@ describe('@connect', () => {
const obs = client.watchQuery({ query });
- subscribeAndCount(done, obs, (handleCount, result) => {
+ subscribeAndCount(reject, obs, (handleCount, result) => {
const resultData = stripSymbols(result.data);
if (handleCount === 1) {
expect(resultData).toEqual(initialData);
} else if (handleCount === 2) {
expect(resultData).toEqual(networkFetch);
- done();
+ resolve();
}
});
});
- it('allows setting default options for query', () => {
+
+ itAsync('allows setting default options for query', (resolve, reject) => {
const errors = [{ message: 'failure', name: 'failure' }];
const link = mockSingleLink({
request: { query },
result: { errors },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
cache: new InMemoryCache({ addTypename: false }),
@@ -3029,9 +3003,10 @@ describe('@connect', () => {
return client.query({ query }).then(result => {
expect(result.errors).toEqual(errors);
- });
+ }).then(resolve, reject);
});
- it('allows setting default options for mutation', () => {
+
+ itAsync('allows setting default options for mutation', (resolve, reject) => {
const mutation = gql`
mutation upVote($id: ID!) {
upvote(id: $id) {
@@ -3047,7 +3022,7 @@ describe('@connect', () => {
const link = mockSingleLink({
request: { query: mutation, variables: { id: 1 } },
result: { data },
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
@@ -3059,31 +3034,32 @@ describe('@connect', () => {
return client.mutate({ mutation }).then(result => {
expect(result.data).toEqual(data);
- });
+ }).then(resolve, reject);
});
});
});
function clientRoundtrip(
+ resolve: (result: any) => any,
+ reject: (reason: any) => any,
query: DocumentNode,
data: ExecutionResult,
variables?: any,
- fragmentMatcher?: FragmentMatcherInterface,
+ possibleTypes?: PossibleTypesMap,
) {
const link = mockSingleLink({
request: { query: cloneDeep(query) },
result: data,
- });
-
- const config = {};
- if (fragmentMatcher) config.fragmentMatcher = fragmentMatcher;
+ }).setOnError(reject);
const client = new ApolloClient({
link,
- cache: new InMemoryCache(config),
+ cache: new InMemoryCache({
+ possibleTypes,
+ }),
});
return client.query({ query, variables }).then(result => {
expect(stripSymbols(result.data)).toEqual(data.data);
- });
+ }).then(resolve, reject);
}
diff --git a/packages/apollo-client/src/__tests__/fetchMore.ts b/src/__tests__/fetchMore.ts
similarity index 82%
rename from packages/apollo-client/src/__tests__/fetchMore.ts
rename to src/__tests__/fetchMore.ts
--- a/packages/apollo-client/src/__tests__/fetchMore.ts
+++ b/src/__tests__/fetchMore.ts
@@ -1,9 +1,10 @@
-import { InMemoryCache } from 'apollo-cache-inmemory';
import { assign, cloneDeep } from 'lodash';
import gql from 'graphql-tag';
-import { mockSingleLink } from '../__mocks__/mockLinks';
-import ApolloClient, { NetworkStatus, ObservableQuery } from '../';
+import { mockSingleLink } from '../utilities/testing/mocking/mockLink';
+import { InMemoryCache } from '../cache/inmemory/inMemoryCache';
+import { ApolloClient, NetworkStatus, ObservableQuery } from '../';
+import { itAsync } from '../utilities/testing/itAsync';
describe('updateQuery on a simple query', () => {
const query = gql`
@@ -25,12 +26,12 @@ describe('updateQuery on a simple query', () => {
},
};
- it('triggers new result from updateQuery', () => {
+ itAsync('triggers new result from updateQuery', (resolve, reject) => {
let latestResult: any = null;
const link = mockSingleLink({
request: { query },
result,
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
@@ -58,7 +59,8 @@ describe('updateQuery on a simple query', () => {
});
})
.then(() => expect(latestResult.data.entry.value).toBe(2))
- .then(() => sub.unsubscribe());
+ .then(() => sub.unsubscribe())
+ .then(resolve, reject);
});
});
@@ -87,7 +89,7 @@ describe('updateQuery on a query with required and optional variables', () => {
},
};
- it('triggers new result from updateQuery', () => {
+ itAsync('triggers new result from updateQuery', (resolve, reject) => {
let latestResult: any = null;
const link = mockSingleLink({
request: {
@@ -95,7 +97,7 @@ describe('updateQuery on a query with required and optional variables', () => {
variables,
},
result,
- });
+ }).setOnError(reject);
const client = new ApolloClient({
link,
@@ -124,7 +126,8 @@ describe('updateQuery on a query with required and optional variables', () => {
});
})
.then(() => expect(latestResult.data.entry.value).toBe(2))
- .then(() => sub.unsubscribe());
+ .then(() => sub.unsubscribe())
+ .then(resolve, reject);
});
});
@@ -199,17 +202,17 @@ describe('fetchMore on an observable query', () => {
let link: any;
let sub: any;
- function setup(...mockedResponses: any[]) {
- link = mockSingleLink(
- {
- request: {
- query,
- variables,
- },
- result,
+ function setup(
+ reject: (reason: any) => any,
+ ...mockedResponses: any[]
+ ) {
+ link = mockSingleLink({
+ request: {
+ query,
+ variables,
},
- ...mockedResponses,
- );
+ result,
+ }, ...mockedResponses).setOnError(reject);
client = new ApolloClient({
link,
@@ -235,9 +238,9 @@ describe('fetchMore on an observable query', () => {
sub = null;
}
- it('triggers new result with new variables', () => {
+ itAsync('triggers new result withAsync new variables', (resolve, reject) => {
latestResult = null;
- return setup({
+ return setup(reject, {
request: {
query,
variables: variablesMore,
@@ -270,12 +273,13 @@ describe('fetchMore on an observable query', () => {
expect(comments[i - 1].text).toEqual(`comment ${i}`);
}
unsetup();
- });
+ })
+ .then(resolve, reject);
});
- it('basic fetchMore results merging', () => {
+ itAsync('basic fetchMore results merging', (resolve, reject) => {
latestResult = null;
- return setup({
+ return setup(reject, {
request: {
query,
variables: variablesMore,
@@ -304,12 +308,13 @@ describe('fetchMore on an observable query', () => {
expect(comments[i - 1].text).toEqual(`comment ${i}`);
}
unsetup();
- });
+ })
+ .then(resolve, reject);
});
- it('fetching more with a different query', () => {
+ itAsync('fetching more with a different query', (resolve, reject) => {
latestResult = null;
- return setup({
+ return setup(reject, {
request: {
query: query2,
variables: variables2,
@@ -340,18 +345,16 @@ describe('fetchMore on an observable query', () => {
expect(comments[i - 1].text).toEqual(`new comment ${i}`);
}
unsetup();
- });
+ })
+ .then(resolve, reject);
});
- it('will set the `network` status to `fetchMore`', done => {
- link = mockSingleLink(
- { request: { query, variables }, result, delay: 5 },
- {
- request: { query, variables: variablesMore },
- result: resultMore,
- delay: 5,
- },
- );
+ itAsync('will set the `network` status to `fetchMore`', (resolve, reject) => {
+ link = mockSingleLink({ request: { query, variables }, result, delay: 5 }, {
+ request: { query, variables: variablesMore },
+ result: resultMore,
+ delay: 5,
+ }).setOnError(reject);
client = new ApolloClient({
link,
@@ -394,27 +397,24 @@ describe('fetchMore on an observable query', () => {
case 3:
expect(networkStatus).toBe(NetworkStatus.ready);
expect((data as any).entry.comments.length).toBe(20);
- done();
+ resolve();
break;
default:
- done.fail(new Error('`next` called too many times'));
+ reject(new Error('`next` called too many times'));
}
},
- error: error => done.fail(error),
- complete: () => done.fail(new Error('Should not have completed')),
+ error: error => reject(error),
+ complete: () => reject(new Error('Should not have completed')),
});
});
- it('will not get an error from `fetchMore` if thrown', done => {
+ itAsync('will not get an error from `fetchMore` if thrown', (resolve, reject) => {
const fetchMoreError = new Error('Uh, oh!');
- link = mockSingleLink(
- { request: { query, variables }, result, delay: 5 },
- {
- request: { query, variables: variablesMore },
- error: fetchMoreError,
- delay: 5,
- },
- );
+ link = mockSingleLink({ request: { query, variables }, result, delay: 5 }, {
+ request: { query, variables: variablesMore },
+ error: fetchMoreError,
+ delay: 5,
+ }).setOnError(reject);
client = new ApolloClient({
link,
@@ -457,25 +457,25 @@ describe('fetchMore on an observable query', () => {
default:
expect(networkStatus).toBe(NetworkStatus.ready);
expect((data as any).entry.comments.length).toBe(10);
- done();
+ resolve();
break;
}
},
error: () => {
- done.fail(new Error('`error` called when it wasn’t supposed to be.'));
+ reject(new Error('`error` called when it wasn’t supposed to be.'));
},
complete: () => {
- done.fail(
+ reject(
new Error('`complete` called when it wasn’t supposed to be.'),
);
},
});
});
- it('will not leak fetchMore query', () => {
+ itAsync('will not leak fetchMore query', (resolve, reject) => {
latestResult = null;
var beforeQueryCount;
- return setup({
+ return setup(reject, {
request: {
query,
variables: variablesMore,
@@ -504,7 +504,8 @@ describe('fetchMore on an observable query', () => {
).length;
expect(afterQueryCount).toBe(beforeQueryCount);
unsetup();
- });
+ })
+ .then(resolve, reject);
});
});
@@ -568,17 +569,17 @@ describe('fetchMore on an observable query with connection', () => {
let link: any;
let sub: any;
- function setup(...mockedResponses: any[]) {
- link = mockSingleLink(
- {
- request: {
- query: transformedQuery,
- variables,
- },
- result,
+ function setup(
+ reject: (reason: any) => any,
+ ...mockedResponses: any[]
+ ) {
+ link = mockSingleLink({
+ request: {
+ query: transformedQuery,
+ variables,
},
- ...mockedResponses,
- );
+ result,
+ }, ...mockedResponses).setOnError(reject);
client = new ApolloClient({
link,
@@ -604,9 +605,9 @@ describe('fetchMore on an observable query with connection', () => {
sub = null;
}
- it('fetchMore with connection results merging', () => {
+ itAsync('fetchMore with connection results merging', (resolve, reject) => {
latestResult = null;
- return setup({
+ return setup(reject, {
request: {
query: transformedQuery,
variables: variablesMore,
@@ -635,18 +636,16 @@ describe('fetchMore on an observable query with connection', () => {
expect(comments[i - 1].text).toBe(`comment ${i}`);
}
unsetup();
- });
+ })
+ .then(resolve, reject);
});
- it('will set the network status to `fetchMore`', done => {
- link = mockSingleLink(
- { request: { query: transformedQuery, variables }, result, delay: 5 },
- {
- request: { query: transformedQuery, variables: variablesMore },
- result: resultMore,
- delay: 5,
- },
- );
+ itAsync('will set the network status to `fetchMore`', (resolve, reject) => {
+ link = mockSingleLink({ request: { query: transformedQuery, variables }, result, delay: 5 }, {
+ request: { query: transformedQuery, variables: variablesMore },
+ result: resultMore,
+ delay: 5,
+ }).setOnError(reject);
client = new ApolloClient({
link,
@@ -689,27 +688,24 @@ describe('fetchMore on an observable query with connection', () => {
case 3:
expect(networkStatus).toBe(NetworkStatus.ready);
expect((data as any).entry.comments.length).toBe(20);
- done();
+ resolve();
break;
default:
- done.fail(new Error('`next` called too many times'));
+ reject(new Error('`next` called too many times'));
}
},
- error: error => done.fail(error),
- complete: () => done.fail(new Error('Should not have completed')),
+ error: error => reject(error),
+ complete: () => reject(new Error('Should not have completed')),
});
});
- it('will not get an error from `fetchMore` if thrown', done => {
+ itAsync('will not get an error from `fetchMore` if thrown', (resolve, reject) => {
const fetchMoreError = new Error('Uh, oh!');
- link = mockSingleLink(
- { request: { query: transformedQuery, variables }, result, delay: 5 },
- {
- request: { query: transformedQuery, variables: variablesMore },
- error: fetchMoreError,
- delay: 5,
- },
- );
+ link = mockSingleLink({ request: { query: transformedQuery, variables }, result, delay: 5 }, {
+ request: { query: transformedQuery, variables: variablesMore },
+ error: fetchMoreError,
+ delay: 5,
+ }).setOnError(reject);
client = new ApolloClient({
link,
@@ -752,14 +748,14 @@ describe('fetchMore on an observable query with connection', () => {
default:
expect(networkStatus).toBe(NetworkStatus.ready);
expect((data as any).entry.comments.length).toBe(10);
- done();
+ resolve();
}
},
error: () => {
- done.fail(new Error('`error` called when it wasn’t supposed to be.'));
+ reject(new Error('`error` called when it wasn’t supposed to be.'));
},
complete: () => {
- done.fail(
+ reject(
new Error('`complete` called when it wasn’t supposed to be.'),
);
},
diff --git a/packages/apollo-client/src/__tests__/graphqlSubscriptions.ts b/src/__tests__/graphqlSubscriptions.ts
similarity index 93%
rename from packages/apollo-client/src/__tests__/graphqlSubscriptions.ts
rename to src/__tests__/graphqlSubscriptions.ts
--- a/packages/apollo-client/src/__tests__/graphqlSubscriptions.ts
+++ b/src/__tests__/graphqlSubscriptions.ts
@@ -1,12 +1,12 @@
import gql from 'graphql-tag';
-import { InMemoryCache } from 'apollo-cache-inmemory';
-
-import { mockObservableLink, MockedSubscription } from '../__mocks__/mockLinks';
-
-import ApolloClient from '../';
+import {
+ mockObservableLink,
+ MockedSubscription
+} from '../utilities/testing/mocking/mockSubscriptionLink';
+import { InMemoryCache } from '../cache/inmemory/inMemoryCache';
+import { ApolloClient } from '../';
import { QueryManager } from '../core/QueryManager';
-import { DataStore } from '../data/store';
describe('GraphQL Subscriptions', () => {
const results = [
@@ -131,7 +131,7 @@ describe('GraphQL Subscriptions', () => {
const link = mockObservableLink(sub1);
const queryManager = new QueryManager({
link,
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
});
const obs = queryManager.startGraphQLSubscription(options);
@@ -169,7 +169,7 @@ describe('GraphQL Subscriptions', () => {
let numResults = 0;
const queryManager = new QueryManager({
link,
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
});
// tslint:disable-next-line
@@ -214,7 +214,7 @@ describe('GraphQL Subscriptions', () => {
const link = mockObservableLink(sub1);
const queryManager = new QueryManager({
link,
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
});
const obs = queryManager.startGraphQLSubscription(options);
diff --git a/packages/apollo-client/src/__tests__/local-state/__snapshots__/general.ts.snap b/src/__tests__/local-state/__snapshots__/general.ts.snap
similarity index 100%
rename from packages/apollo-client/src/__tests__/local-state/__snapshots__/general.ts.snap
rename to src/__tests__/local-state/__snapshots__/general.ts.snap
diff --git a/packages/apollo-client/src/__tests__/local-state/export.ts b/src/__tests__/local-state/export.ts
similarity index 97%
rename from packages/apollo-client/src/__tests__/local-state/export.ts
rename to src/__tests__/local-state/export.ts
--- a/packages/apollo-client/src/__tests__/local-state/export.ts
+++ b/src/__tests__/local-state/export.ts
@@ -1,10 +1,12 @@
import gql from 'graphql-tag';
-
-import ApolloClient from '../..';
-import { InMemoryCache } from 'apollo-cache-inmemory';
-import { ApolloLink, Observable } from 'apollo-link';
import { print } from 'graphql/language/printer';
+import { Observable } from '../../utilities/observables/Observable';
+import { itAsync } from '../../utilities/testing/itAsync';
+import { ApolloLink } from '../../link/core/ApolloLink';
+import { ApolloClient } from '../..';
+import { InMemoryCache } from '../../cache/inmemory/inMemoryCache';
+
describe('@client @export tests', () => {
it(
'should not break @client only queries when the @export directive is ' +
@@ -324,10 +326,10 @@ describe('@client @export tests', () => {
});
});
- it(
+ itAsync(
'should support setting an @client @export variable, loaded from the ' +
'cache, on a virtual field that is combined into a remote query.',
- done => {
+ (resolve, reject) => {
const query = gql`
query postRequiringReview($reviewerId: Int!) {
postRequiringReview {
@@ -360,7 +362,7 @@ describe('@client @export tests', () => {
reviewerDetails,
},
});
- });
+ }).setOnError(reject);
const cache = new InMemoryCache();
const client = new ApolloClient({
@@ -374,6 +376,7 @@ describe('@client @export tests', () => {
postRequiringReview: {
loggedInReviewerId,
__typename: 'Post',
+ id: 10,
},
},
});
@@ -387,8 +390,7 @@ describe('@client @export tests', () => {
},
reviewerDetails,
});
- done();
- });
+ }).then(resolve, reject);
},
);
diff --git a/packages/apollo-client/src/__tests__/local-state/general.ts b/src/__tests__/local-state/general.ts
similarity index 90%
rename from packages/apollo-client/src/__tests__/local-state/general.ts
rename to src/__tests__/local-state/general.ts
--- a/packages/apollo-client/src/__tests__/local-state/general.ts
+++ b/src/__tests__/local-state/general.ts
@@ -1,15 +1,15 @@
import gql from 'graphql-tag';
import { DocumentNode, GraphQLError } from 'graphql';
-import { introspectionQuery } from 'graphql/utilities';
+import { getIntrospectionQuery } from 'graphql/utilities';
-import ApolloClient from '../..';
-import { ApolloCache } from 'apollo-cache';
-import {
- InMemoryCache,
- IntrospectionFragmentMatcher,
-} from 'apollo-cache-inmemory';
-import { ApolloLink, Observable, Operation } from 'apollo-link';
-import { hasDirectives } from 'apollo-utilities';
+import { Observable } from '../../utilities/observables/Observable';
+import { ApolloLink } from '../../link/core/ApolloLink';
+import { Operation } from '../../link/core/types';
+import { ApolloClient } from '../..';
+import { ApolloCache } from '../../cache/core/cache';
+import { InMemoryCache } from '../../cache/inmemory/inMemoryCache';
+import { hasDirectives } from '../../utilities/graphql/directives';
+import { itAsync } from '../../utilities/testing/itAsync';
describe('General functionality', () => {
it('should not impact normal non-@client use', () => {
@@ -71,7 +71,7 @@ describe('General functionality', () => {
it('should not interfere with server introspection queries', () => {
const query = gql`
- ${introspectionQuery}
+ ${getIntrospectionQuery()}
`;
const error = new GraphQLError('no introspection result found');
@@ -227,19 +227,9 @@ describe('General functionality', () => {
const client = new ApolloClient({
cache: new InMemoryCache({
- fragmentMatcher: new IntrospectionFragmentMatcher({
- introspectionQueryResultData: {
- __schema: {
- types: [
- {
- kind: 'UnionTypeDefinition',
- name: 'Foo',
- possibleTypes: [{ name: 'Bar' }, { name: 'Baz' }],
- },
- ],
- },
- },
- }),
+ possibleTypes: {
+ Foo: ['Bar', 'Baz'],
+ },
}),
link,
resolvers,
@@ -314,10 +304,10 @@ describe('Cache manipulation', () => {
});
});
- it(
+ itAsync(
'should be able to write to the cache with a local mutation and have ' +
'things rerender automatically',
- done => {
+ (resolve, reject) => {
const query = gql`
{
field @client
@@ -359,7 +349,7 @@ describe('Cache manipulation', () => {
if (count === 2) {
expect({ ...data }).toMatchObject({ field: 1 });
- done();
+ resolve();
}
},
});
@@ -416,7 +406,7 @@ describe('Cache manipulation', () => {
});
});
- it("should read @client fields from cache on refetch (#4741)", function (done) {
+ itAsync("should read @client fields from cache on refetch (#4741)", (resolve, reject) => {
const query = gql`
query FetchInitialData {
serverData {
@@ -480,7 +470,7 @@ describe('Cache manipulation', () => {
],
});
} else {
- done();
+ resolve();
}
},
});
@@ -488,7 +478,7 @@ describe('Cache manipulation', () => {
});
describe('Sample apps', () => {
- it('should support a simple counter app using local state', done => {
+ itAsync('should support a simple counter app using local state', (resolve, reject) => {
const query = gql`
query GetCount {
count @client
@@ -565,7 +555,7 @@ describe('Sample apps', () => {
try {
expect({ ...data }).toMatchObject({ count: 0, lastCount: 1 });
} catch (e) {
- done.fail(e);
+ reject(e);
}
client.mutate({ mutation: increment, variables: { amount: 2 } });
}
@@ -574,7 +564,7 @@ describe('Sample apps', () => {
try {
expect({ ...data }).toMatchObject({ count: 2, lastCount: 1 });
} catch (e) {
- done.fail(e);
+ reject(e);
}
client.mutate({ mutation: decrement, variables: { amount: 1 } });
}
@@ -582,17 +572,17 @@ describe('Sample apps', () => {
try {
expect({ ...data }).toMatchObject({ count: 1, lastCount: 1 });
} catch (e) {
- done.fail(e);
+ reject(e);
}
- done();
+ resolve();
}
},
- error: e => done.fail(e),
- complete: done.fail,
+ error: e => reject(e),
+ complete: reject,
});
});
- it('should support a simple todo app using local state', done => {
+ itAsync('should support a simple todo app using local state', (resolve, reject) => {
const query = gql`
query GetTasks {
todos @client {
@@ -669,7 +659,7 @@ describe('Sample apps', () => {
__typename: 'Todo',
},
]);
- done();
+ resolve();
}
},
});
@@ -677,7 +667,7 @@ describe('Sample apps', () => {
});
describe('Combining client and server state/operations', () => {
- it('should merge remote and local state', done => {
+ itAsync('should merge remote and local state', (resolve, reject) => {
const query = gql`
query list {
list(name: "my list") {
@@ -756,11 +746,11 @@ describe('Combining client and server state/operations', () => {
if (count === 1) {
expect((response.data as any).list.items[0].isSelected).toBe(true);
expect((response.data as any).list.items[1].isSelected).toBe(false);
- done();
+ resolve();
}
count++;
},
- error: done.fail,
+ error: reject,
});
const variables = { id: 1 };
const mutation = gql`
@@ -774,7 +764,7 @@ describe('Combining client and server state/operations', () => {
}, 10);
});
- it('should correctly propagate an error from a client resolver', async done => {
+ itAsync('should correctly propagate an error from a client resolver', async (resolve, reject) => {
const data = {
list: {
__typename: 'List',
@@ -819,7 +809,7 @@ describe('Combining client and server state/operations', () => {
try {
await client.query({ query, variables });
- done.fail('Should have thrown!');
+ reject('Should have thrown!');
} catch (e) {
// Test Passed!
expect(() => {
@@ -829,7 +819,7 @@ describe('Combining client and server state/operations', () => {
try {
await client.mutate({ mutation, variables });
- done.fail('Should have thrown!');
+ reject('Should have thrown!');
} catch (e) {
// Test Passed!
expect(() => {
@@ -837,10 +827,10 @@ describe('Combining client and server state/operations', () => {
}).toThrowErrorMatchingSnapshot();
}
- done();
+ resolve();
});
- it('should handle a simple query with both server and client fields', done => {
+ itAsync('should handle a simple query with both server and client fields', (resolve, reject) => {
const query = gql`
query GetCount {
count @client
@@ -869,12 +859,12 @@ describe('Combining client and server state/operations', () => {
client.watchQuery({ query }).subscribe({
next: ({ data }) => {
expect({ ...data }).toMatchObject({ count: 0, lastCount: 1 });
- done();
+ resolve();
},
});
});
- it('should support nested quering of both server and client fields', done => {
+ itAsync('should support nested querying of both server and client fields', (resolve, reject) => {
const query = gql`
query GetUser {
user {
@@ -888,7 +878,17 @@ describe('Combining client and server state/operations', () => {
const link = new ApolloLink(operation => {
expect(operation.operationName).toBe('GetUser');
return Observable.of({
- data: { user: { lastName: 'Doe', __typename: 'User' } },
+ data: {
+ user: {
+ __typename: 'User',
+ // We need an id (or a keyFields policy) because, if the User
+ // object is not identifiable, the call to cache.writeData
+ // below will simply replace the existing data rather than
+ // merging the new data with the existing data.
+ id: 123,
+ lastName: 'Doe',
+ },
+ },
});
});
@@ -902,13 +902,14 @@ describe('Combining client and server state/operations', () => {
data: {
user: {
__typename: 'User',
+ id: 123,
firstName: 'John',
},
},
});
client.watchQuery({ query }).subscribe({
- next: ({ data }: any) => {
+ next({ data }: any) {
const { user } = data;
try {
expect(user).toMatchObject({
@@ -917,14 +918,14 @@ describe('Combining client and server state/operations', () => {
__typename: 'User',
});
} catch (e) {
- done.fail(e);
+ reject(e);
}
- done();
+ resolve();
},
});
});
- it('should combine both server and client mutations', done => {
+ itAsync('should combine both server and client mutations', (resolve, reject) => {
const query = gql`
query SampleQuery {
count @client
@@ -1022,7 +1023,7 @@ describe('Combining client and server state/operations', () => {
__typename: 'User',
firstName: 'Harry',
});
- done();
+ resolve();
}
},
});
diff --git a/packages/apollo-client/src/__tests__/local-state/resolvers.ts b/src/__tests__/local-state/resolvers.ts
similarity index 92%
rename from packages/apollo-client/src/__tests__/local-state/resolvers.ts
rename to src/__tests__/local-state/resolvers.ts
--- a/packages/apollo-client/src/__tests__/local-state/resolvers.ts
+++ b/src/__tests__/local-state/resolvers.ts
@@ -1,21 +1,22 @@
import gql from 'graphql-tag';
import { DocumentNode, ExecutionResult } from 'graphql';
import { assign } from 'lodash';
-import { InMemoryCache } from 'apollo-cache-inmemory';
-import { ApolloLink, Observable } from 'apollo-link';
-import ApolloClient from '../..';
-import mockQueryManager from '../../__mocks__/mockQueryManager';
-import { Observer } from '../../util/Observable';
-import wrap from '../../util/wrap';
+import { Observable, Observer } from '../../utilities/observables/Observable';
+import { ApolloLink } from '../../link/core/ApolloLink';
+import { ApolloClient } from '../..';
+import mockQueryManager from '../../utilities/testing/mocking/mockQueryManager';
+import wrap from '../../utilities/testing/wrap';
+import { itAsync } from '../../utilities/testing/itAsync';
import { ApolloQueryResult, Resolvers } from '../../core/types';
import { WatchQueryOptions } from '../../core/watchQueryOptions';
import { LocalState } from '../../core/LocalState';
+import { InMemoryCache } from '../../cache/inmemory/inMemoryCache';
// Helper method that sets up a mockQueryManager and then passes on the
// results to an observer.
const assertWithObserver = ({
- done,
+ reject,
resolvers,
query,
serverQuery,
@@ -26,7 +27,7 @@ const assertWithObserver = ({
delay,
observer,
}: {
- done: jest.DoneCallback;
+ reject: (reason: any) => any;
resolvers?: Resolvers;
query: DocumentNode;
serverQuery?: DocumentNode;
@@ -37,7 +38,7 @@ const assertWithObserver = ({
delay?: number;
observer: Observer<ApolloQueryResult<any>>;
}) => {
- const queryManager = mockQueryManager({
+ const queryManager = mockQueryManager(reject, {
request: { query: serverQuery || query, variables },
result: serverResult,
error,
@@ -53,13 +54,13 @@ const assertWithObserver = ({
queryOptions,
) as WatchQueryOptions;
return queryManager.watchQuery<any>(finalOptions).subscribe({
- next: wrap(done, observer.next!),
+ next: wrap(reject, observer.next!),
error: observer.error,
});
};
describe('Basic resolver capabilities', () => {
- it('should run resolvers for @client queries', done => {
+ itAsync('should run resolvers for @client queries', (resolve, reject) => {
const query = gql`
query Test {
foo @client {
@@ -75,7 +76,7 @@ describe('Basic resolver capabilities', () => {
};
assertWithObserver({
- done,
+ reject,
resolvers,
query,
observer: {
@@ -83,15 +84,15 @@ describe('Basic resolver capabilities', () => {
try {
expect(data).toEqual({ foo: { bar: true } });
} catch (error) {
- done.fail(error);
+ reject(error);
}
- done();
+ resolve();
},
},
});
});
- it('should handle queries with a mix of @client and server fields', done => {
+ itAsync('should handle queries with a mix of @client and server fields', (resolve, reject) => {
const query = gql`
query Mixed {
foo @client {
@@ -118,7 +119,7 @@ describe('Basic resolver capabilities', () => {
};
assertWithObserver({
- done,
+ reject,
resolvers,
query,
serverQuery,
@@ -128,15 +129,15 @@ describe('Basic resolver capabilities', () => {
try {
expect(data).toEqual({ foo: { bar: true }, bar: { baz: true } });
} catch (error) {
- done.fail(error);
+ reject(error);
}
- done();
+ resolve();
},
},
});
});
- it('should handle a mix of @client fields with fragments and server fields', done => {
+ itAsync('should handle a mix of @client fields with fragments and server fields', (resolve, reject) => {
const query = gql`
fragment client on ClientData {
bar
@@ -168,7 +169,7 @@ describe('Basic resolver capabilities', () => {
};
assertWithObserver({
- done,
+ reject,
resolvers,
query,
serverQuery,
@@ -181,15 +182,15 @@ describe('Basic resolver capabilities', () => {
bar: { baz: true },
});
} catch (error) {
- done.fail(error);
+ reject(error);
}
- done();
+ resolve();
},
},
});
});
- it('should have access to query variables when running @client resolvers', done => {
+ itAsync('should have access to query variables when running @client resolvers', (resolve, reject) => {
const query = gql`
query WithVariables($id: ID!) {
foo @client {
@@ -208,7 +209,7 @@ describe('Basic resolver capabilities', () => {
};
assertWithObserver({
- done,
+ reject,
resolvers,
query,
variables: { id: 1 },
@@ -217,15 +218,15 @@ describe('Basic resolver capabilities', () => {
try {
expect(data).toEqual({ foo: { bar: 1 } });
} catch (error) {
- done.fail(error);
+ reject(error);
}
- done();
+ resolve();
},
},
});
});
- it('should pass context to @client resolvers', done => {
+ itAsync('should pass context to @client resolvers', (resolve, reject) => {
const query = gql`
query WithContext {
foo @client {
@@ -244,7 +245,7 @@ describe('Basic resolver capabilities', () => {
};
assertWithObserver({
- done,
+ reject,
resolvers,
query,
queryOptions: { context: { id: 1 } },
@@ -253,18 +254,18 @@ describe('Basic resolver capabilities', () => {
try {
expect(data).toEqual({ foo: { bar: 1 } });
} catch (error) {
- done.fail(error);
+ reject(error);
}
- done();
+ resolve();
},
},
});
});
- it(
+ itAsync(
'should combine local @client resolver results with server results, for ' +
'the same field',
- done => {
+ (resolve, reject) => {
const query = gql`
query author {
author {
@@ -295,7 +296,7 @@ describe('Basic resolver capabilities', () => {
};
assertWithObserver({
- done,
+ reject,
resolvers,
query,
serverQuery,
@@ -324,16 +325,16 @@ describe('Basic resolver capabilities', () => {
},
});
} catch (error) {
- done.fail(error);
+ reject(error);
}
- done();
+ resolve();
},
},
});
},
);
- it('should handle resolvers that work with booleans properly', done => {
+ itAsync('should handle resolvers that work with booleans properly', (resolve, reject) => {
const query = gql`
query CartDetails {
isInCart @client
@@ -358,7 +359,7 @@ describe('Basic resolver capabilities', () => {
expect({ ...data }).toMatchObject({
isInCart: false,
});
- done();
+ resolve();
});
});
@@ -702,7 +703,7 @@ describe('Writing cache data from resolvers', () => {
});
describe('Resolving field aliases', () => {
- it('should run resolvers for missing client queries with aliased field', done => {
+ itAsync('should run resolvers for missing client queries with aliased field', (resolve, reject) => {
// expect.assertions(1);
const query = gql`
query Aliased {
@@ -738,17 +739,17 @@ describe('Resolving field aliases', () => {
baz: { foo: true, __typename: 'Baz' },
});
} catch (e) {
- done.fail(e);
+ reject(e);
return;
}
- done();
- }, done.fail);
+ resolve();
+ }, reject);
});
- it(
+ itAsync(
'should run resolvers for client queries when aliases are in use on ' +
'the @client-tagged node',
- done => {
+ (resolve, reject) => {
const aliasedQuery = gql`
query Test {
fie: foo @client {
@@ -764,7 +765,7 @@ describe('Resolving field aliases', () => {
Query: {
foo: () => ({ bar: true, __typename: 'Foo' }),
fie: () => {
- done.fail(
+ reject(
"Called the resolver using the alias' name, instead of " +
'the correct resolver name.',
);
@@ -775,12 +776,12 @@ describe('Resolving field aliases', () => {
client.query({ query: aliasedQuery }).then(({ data }) => {
expect(data).toEqual({ fie: { bar: true, __typename: 'Foo' } });
- done();
- }, done.fail);
+ resolve();
+ }, reject);
},
);
- it('should respect aliases for *nested fields* on the @client-tagged node', done => {
+ itAsync('should respect aliases for *nested fields* on the @client-tagged node', (resolve, reject) => {
const aliasedQuery = gql`
query Test {
fie: foo @client {
@@ -803,7 +804,7 @@ describe('Resolving field aliases', () => {
Query: {
foo: () => ({ bar: true, __typename: 'Foo' }),
fie: () => {
- done.fail(
+ reject(
"Called the resolver using the alias' name, instead of " +
'the correct resolver name.',
);
@@ -817,8 +818,8 @@ describe('Resolving field aliases', () => {
fie: { fum: true, __typename: 'Foo' },
baz: { foo: true, __typename: 'Baz' },
});
- done();
- }, done.fail);
+ resolve();
+ }, reject);
});
it(
@@ -1059,10 +1060,10 @@ describe('Force local resolvers', () => {
},
);
- it(
+ itAsync(
'should force the running of local resolvers marked with ' +
'`@client(always: true)` when using `ApolloClient.watchQuery`',
- (done) => {
+ (resolve, reject) => {
const query = gql`
query IsUserLoggedIn {
isUserLoggedIn @client(always: true)
@@ -1101,7 +1102,7 @@ describe('Force local resolvers', () => {
// Result is loaded from the cache since the resolver
// isn't being forced.
expect(callCount).toBe(2);
- done();
+ resolve();
}
});
}
@@ -1156,7 +1157,7 @@ describe('Force local resolvers', () => {
});
describe('Async resolvers', () => {
- it('should support async @client resolvers', async (done) => {
+ itAsync('should support async @client resolvers', async (resolve, reject) => {
const query = gql`
query Member {
isLoggedIn @client
@@ -1176,12 +1177,12 @@ describe('Async resolvers', () => {
const { data: { isLoggedIn } } = await client.query({ query });
expect(isLoggedIn).toBe(true);
- return done();
+ return resolve();
});
- it(
+ itAsync(
'should support async @client resolvers mixed with remotely resolved data',
- async (done) => {
+ async (resolve, reject) => {
const query = gql`
query Member {
member {
@@ -1228,8 +1229,8 @@ describe('Async resolvers', () => {
expect(member.name).toBe(testMember.name);
expect(member.isLoggedIn).toBe(testMember.isLoggedIn);
expect(member.sessionCount).toBe(testMember.sessionCount);
- return done();
- }
+ return resolve();
+ },
);
});
diff --git a/packages/apollo-client/src/__tests__/local-state/subscriptions.ts b/src/__tests__/local-state/subscriptions.ts
similarity index 88%
rename from packages/apollo-client/src/__tests__/local-state/subscriptions.ts
rename to src/__tests__/local-state/subscriptions.ts
--- a/packages/apollo-client/src/__tests__/local-state/subscriptions.ts
+++ b/src/__tests__/local-state/subscriptions.ts
@@ -1,8 +1,9 @@
import gql from 'graphql-tag';
-import ApolloClient from '../..';
-import { InMemoryCache } from 'apollo-cache-inmemory';
-import { ApolloLink, Observable } from 'apollo-link';
+import { Observable } from '../../utilities/observables/Observable';
+import { ApolloLink } from '../../link/core/ApolloLink';
+import { ApolloClient } from '../..';
+import { InMemoryCache } from '../../cache/inmemory/inMemoryCache';
describe('Basic functionality', () => {
it('should not break subscriptions', done => {
diff --git a/packages/apollo-client/src/__tests__/mutationResults.ts b/src/__tests__/mutationResults.ts
similarity index 85%
rename from packages/apollo-client/src/__tests__/mutationResults.ts
rename to src/__tests__/mutationResults.ts
--- a/packages/apollo-client/src/__tests__/mutationResults.ts
+++ b/src/__tests__/mutationResults.ts
@@ -1,13 +1,13 @@
-import { ApolloLink, Observable } from 'apollo-link';
import { cloneDeep } from 'lodash';
import gql from 'graphql-tag';
-import { InMemoryCache } from 'apollo-cache-inmemory';
-import { mockSingleLink } from '../__mocks__/mockLinks';
-import ApolloClient from '..';
-
-import { Subscription } from '../util/Observable';
-import { withWarning } from '../util/wrap';
+import { Observable, Subscription } from '../utilities/observables/Observable';
+import { ApolloLink } from '../link/core/ApolloLink';
+import { mockSingleLink } from '../utilities/testing/mocking/mockLink';
+import { ApolloClient } from '..';
+import { InMemoryCache } from '../cache/inmemory/inMemoryCache';
+import { withWarning } from '../utilities/testing/wrap';
+import { itAsync } from '../utilities/testing/itAsync';
describe('mutation results', () => {
const query = gql`
@@ -120,14 +120,14 @@ describe('mutation results', () => {
let client: ApolloClient<any>;
let link: any;
- function setupObsHandle(...mockedResponses: any[]) {
- link = mockSingleLink(
- {
- request: { query: queryWithTypename } as any,
- result,
- },
- ...mockedResponses,
- );
+ function setupObsHandle(
+ reject: (reason: any) => any,
+ ...mockedResponses: any[]
+ ) {
+ link = mockSingleLink({
+ request: { query: queryWithTypename } as any,
+ result,
+ }, ...mockedResponses).setOnError(reject);
client = new ApolloClient({
link,
@@ -138,6 +138,8 @@ describe('mutation results', () => {
}
return null;
},
+ // Passing an empty map enables warnings about missing fields:
+ possibleTypes: {},
}),
});
@@ -147,15 +149,16 @@ describe('mutation results', () => {
});
}
- function setupDelayObsHandle(delay: number, ...mockedResponses: any[]) {
- link = mockSingleLink(
- {
- request: { query: queryWithTypename } as any,
- result,
- delay,
- },
- ...mockedResponses,
- );
+ function setupDelayObsHandle(
+ reject: (reason: any) => any,
+ delay: number,
+ ...mockedResponses: any[]
+ ) {
+ link = mockSingleLink({
+ request: { query: queryWithTypename } as any,
+ result,
+ delay,
+ }, ...mockedResponses).setOnError(reject);
client = new ApolloClient({
link,
@@ -166,6 +169,8 @@ describe('mutation results', () => {
}
return null;
},
+ // Passing an empty map enables warnings about missing fields:
+ possibleTypes: {},
}),
});
@@ -175,20 +180,21 @@ describe('mutation results', () => {
});
}
- function setup(...mockedResponses: any[]) {
- const obsHandle = setupObsHandle(...mockedResponses);
+ function setup(
+ reject: (reason: any) => any,
+ ...mockedResponses: any[]
+ ) {
+ const obsHandle = setupObsHandle(reject, ...mockedResponses);
return obsHandle.result();
}
- it('correctly primes cache for tests', () => {
- return setup().then(() =>
- client.query({
- query,
- }),
- );
+ itAsync('correctly primes cache for tests', (resolve, reject) => {
+ return setup(reject).then(
+ () => client.query({ query })
+ ).then(resolve, reject);
});
- it('correctly integrates field changes by default', () => {
+ itAsync('correctly integrates field changes by default', (resolve, reject) => {
const mutation = gql`
mutation setCompleted {
setCompleted(todoId: "3") {
@@ -211,7 +217,7 @@ describe('mutation results', () => {
},
};
- return setup({
+ return setup(reject, {
request: { query: mutation },
result: mutationResult,
})
@@ -223,9 +229,11 @@ describe('mutation results', () => {
})
.then((newResult: any) => {
expect(newResult.data.todoList.todos[0].completed).toBe(true);
- });
+ })
+ .then(resolve, reject);
});
- it('correctly integrates field changes by default with variables', done => {
+
+ itAsync('correctly integrates field changes by default with variables', (resolve, reject) => {
const query = gql`
query getMini($id: ID!) {
mini(id: $id) {
@@ -245,28 +253,25 @@ describe('mutation results', () => {
}
`;
- const link = mockSingleLink(
- {
- request: {
- query,
- variables: { id: 1 },
- } as any,
- delay: 100,
- result: {
- data: { mini: { id: 1, cover: 'image', __typename: 'Mini' } },
- },
+ const link = mockSingleLink({
+ request: {
+ query,
+ variables: { id: 1 },
+ } as any,
+ delay: 100,
+ result: {
+ data: { mini: { id: 1, cover: 'image', __typename: 'Mini' } },
},
- {
- request: {
- query: mutation,
- variables: { signature: '1234' },
- } as any,
- delay: 150,
- result: {
- data: { mini: { id: 1, cover: 'image2', __typename: 'Mini' } },
- },
+ }, {
+ request: {
+ query: mutation,
+ variables: { signature: '1234' },
+ } as any,
+ delay: 150,
+ result: {
+ data: { mini: { id: 1, cover: 'image2', __typename: 'Mini' } },
},
- );
+ }).setOnError(reject);
interface Data {
mini: { id: number; cover: string; __typename: string };
@@ -298,22 +303,22 @@ describe('mutation results', () => {
setTimeout(() => {
if (count === 0)
- done.fail(
+ reject(
new Error('mutate did not re-call observable with next value'),
);
}, 250);
}
if (count === 1) {
expect(result.data.mini.cover).toBe('image2');
- done();
+ resolve();
}
count++;
},
- error: done.fail,
+ error: reject,
});
});
- it("should warn when the result fields don't match the query fields", () => {
+ itAsync("should warn when the result fields don't match the query fields", (resolve, reject) => {
let handle: any;
let subscriptionHandle: Subscription;
let counter = 0;
@@ -365,6 +370,7 @@ describe('mutation results', () => {
return withWarning(() => {
return setup(
+ reject,
{
request: { query: queryTodos },
result: queryTodosResult,
@@ -401,7 +407,8 @@ describe('mutation results', () => {
},
});
})
- .then(() => subscriptionHandle.unsubscribe());
+ .then(() => subscriptionHandle.unsubscribe())
+ .then(resolve, reject);
}, /Missing field description/);
});
@@ -431,9 +438,9 @@ describe('mutation results', () => {
},
};
- it('analogous of ARRAY_INSERT', () => {
+ itAsync('analogous of ARRAY_INSERT', (resolve, reject) => {
let subscriptionHandle: Subscription;
- return setup({
+ return setup(reject, {
request: { query: mutation },
result: mutationResult,
})
@@ -479,10 +486,11 @@ describe('mutation results', () => {
expect(newResult.data.todoList.todos[0].text).toBe(
'This one was created with a mutation.',
);
- });
+ })
+ .then(resolve, reject);
});
- it('does not fail if optional query variables are not supplied', () => {
+ itAsync('does not fail if optional query variables are not supplied', (resolve, reject) => {
let subscriptionHandle: Subscription;
const mutationWithVars = gql`
mutation createTodo($requiredVar: String!, $optionalVar: String) {
@@ -501,7 +509,7 @@ describe('mutation results', () => {
requiredVar: 'x',
// optionalVar: 'y',
};
- return setup({
+ return setup(reject, {
request: {
query: mutationWithVars,
variables,
@@ -554,11 +562,12 @@ describe('mutation results', () => {
expect(newResult.data.todoList.todos[0].text).toBe(
'This one was created with a mutation.',
);
- });
+ })
+ .then(resolve, reject);
});
- it('does not fail if the query did not complete correctly', () => {
- const obsHandle = setupObsHandle({
+ itAsync('does not fail if the query did not complete correctly', (resolve, reject) => {
+ const obsHandle = setupObsHandle(reject, {
request: { query: mutation },
result: mutationResult,
});
@@ -582,11 +591,11 @@ describe('mutation results', () => {
return state;
},
},
- });
+ }).then(resolve, reject);
});
- it('does not fail if the query did not finish loading', () => {
- const obsHandle = setupDelayObsHandle(15, {
+ itAsync('does not fail if the query did not finish loading', (resolve, reject) => {
+ const obsHandle = setupDelayObsHandle(reject, 15, {
request: { query: mutation },
result: mutationResult,
});
@@ -608,11 +617,12 @@ describe('mutation results', () => {
return state;
},
},
- });
+ }).then(resolve, reject);
});
- it('does not make next queries fail if a mutation fails', done => {
+ itAsync('does not make next queries fail if a mutation fails', (resolve, reject) => {
const obsHandle = setupObsHandle(
+ error => { throw error },
{
request: { query: mutation },
result: { errors: [new Error('mock error')] },
@@ -642,7 +652,7 @@ describe('mutation results', () => {
},
})
.then(
- () => done.fail(new Error('Mutation should have failed')),
+ () => reject(new Error('Mutation should have failed')),
() =>
client.mutate({
mutation,
@@ -657,15 +667,15 @@ describe('mutation results', () => {
}),
)
.then(
- () => done.fail(new Error('Mutation should have failed')),
+ () => reject(new Error('Mutation should have failed')),
() => obsHandle.refetch(),
)
- .then(() => done(), done.fail);
+ .then(resolve, reject);
},
});
});
- it('error handling in reducer functions', () => {
+ itAsync('error handling in reducer functions', (resolve, reject) => {
const oldError = console.error;
const errors: any[] = [];
console.error = (msg: string) => {
@@ -673,7 +683,7 @@ describe('mutation results', () => {
};
let subscriptionHandle: Subscription;
- return setup({
+ return setup(reject, {
request: { query: mutation },
result: mutationResult,
})
@@ -703,11 +713,12 @@ describe('mutation results', () => {
expect(errors).toHaveLength(1);
expect(errors[0].message).toBe(`Hello... It's me.`);
console.error = oldError;
- });
+ })
+ .then(resolve, reject);
});
});
- it('does not fail if one of the previous queries did not complete correctly', done => {
+ itAsync('does not fail if one of the previous queries did not complete correctly', (resolve, reject) => {
const variableQuery = gql`
query Echo($message: String) {
echo(message: $message)
@@ -750,20 +761,16 @@ describe('mutation results', () => {
},
};
- link = mockSingleLink(
- {
- request: { query: variableQuery, variables: variables1 } as any,
- result: result1,
- },
- {
- request: { query: variableQuery, variables: variables2 } as any,
- result: result2,
- },
- {
- request: { query: resetMutation } as any,
- result: resetMutationResult,
- },
- );
+ link = mockSingleLink({
+ request: { query: variableQuery, variables: variables1 } as any,
+ result: result1,
+ }, {
+ request: { query: variableQuery, variables: variables2 } as any,
+ result: result2,
+ }, {
+ request: { query: resetMutation } as any,
+ result: resetMutationResult,
+ }).setOnError(reject);
client = new ApolloClient({
link,
@@ -777,7 +784,7 @@ describe('mutation results', () => {
const firstSubs = watchedQuery.subscribe({
next: () => null,
- error: done.fail,
+ error: reject,
});
// Cancel the query right away!
@@ -799,7 +806,7 @@ describe('mutation results', () => {
});
} else if (yieldCount === 2) {
expect(data.echo).toBe('0');
- done();
+ resolve();
}
},
error: () => {
@@ -810,7 +817,7 @@ describe('mutation results', () => {
watchedQuery.refetch(variables2);
});
- it('allows mutations with optional arguments', done => {
+ itAsync('allows mutations with optional arguments', (resolve, reject) => {
let count = 0;
client = new ApolloClient({
@@ -877,18 +884,19 @@ describe('mutation results', () => {
.then(() => {
expect((client.cache as InMemoryCache).extract()).toEqual({
ROOT_MUTATION: {
+ __typename: 'Mutation',
'result({"a":1,"b":2})': 'hello',
'result({"a":1,"c":3})': 'world',
'result({"b":2,"c":3})': 'goodbye',
'result({})': 'moon',
},
});
- done();
+ resolve();
})
- .catch(done.fail);
+ .catch(reject);
});
- it('allows mutations with default values', done => {
+ itAsync('allows mutations with default values', (resolve, reject) => {
let count = 0;
client = new ApolloClient({
@@ -954,17 +962,18 @@ describe('mutation results', () => {
.then(() => {
expect((client.cache as InMemoryCache).extract()).toEqual({
ROOT_MUTATION: {
+ __typename: 'Mutation',
'result({"a":1,"b":"water"})': 'hello',
'result({"a":2,"b":"cheese","c":3})': 'world',
'result({"a":1,"b":"cheese","c":3})': 'goodbye',
},
});
- done();
+ resolve();
})
- .catch(done.fail);
+ .catch(reject);
});
- it('will pass null to the network interface when provided', done => {
+ itAsync('will pass null to the network interface when provided', (resolve, reject) => {
let count = 0;
client = new ApolloClient({
@@ -1031,14 +1040,15 @@ describe('mutation results', () => {
.then(() => {
expect((client.cache as InMemoryCache).extract()).toEqual({
ROOT_MUTATION: {
+ __typename: 'Mutation',
'result({"a":1,"b":2,"c":null})': 'hello',
'result({"a":1,"b":null,"c":3})': 'world',
'result({"a":null,"b":null,"c":null})': 'moon',
},
});
- done();
+ resolve();
})
- .catch(done.fail);
+ .catch(reject);
});
describe('store transaction updater', () => {
@@ -1067,9 +1077,9 @@ describe('mutation results', () => {
},
};
- it('analogous of ARRAY_INSERT', () => {
+ itAsync('analogous of ARRAY_INSERT', (resolve, reject) => {
let subscriptionHandle: Subscription;
- return setup({
+ return setup(reject, {
request: { query: mutation },
result: mutationResult,
})
@@ -1131,10 +1141,11 @@ describe('mutation results', () => {
expect(newResult.data.todoList.todos[0].text).toBe(
'This one was created with a mutation.',
);
- });
+ })
+ .then(resolve, reject);
});
- it('does not fail if optional query variables are not supplied', () => {
+ itAsync('does not fail if optional query variables are not supplied', (resolve, reject) => {
let subscriptionHandle: Subscription;
const mutationWithVars = gql`
mutation createTodo($requiredVar: String!, $optionalVar: String) {
@@ -1153,7 +1164,7 @@ describe('mutation results', () => {
requiredVar: 'x',
// optionalVar: 'y',
};
- return setup({
+ return setup(reject, {
request: {
query: mutationWithVars,
variables,
@@ -1222,11 +1233,13 @@ describe('mutation results', () => {
expect(newResult.data.todoList.todos[0].text).toBe(
'This one was created with a mutation.',
);
- });
+ })
+ .then(resolve, reject);
});
- it('does not make next queries fail if a mutation fails', done => {
+ itAsync('does not make next queries fail if a mutation fails', (resolve, reject) => {
const obsHandle = setupObsHandle(
+ error => { throw error },
{
request: { query: mutation },
result: { errors: [new Error('mock error')] },
@@ -1273,7 +1286,7 @@ describe('mutation results', () => {
},
})
.then(
- () => done.fail(new Error('Mutation should have failed')),
+ () => reject(new Error('Mutation should have failed')),
() =>
client.mutate({
mutation,
@@ -1309,14 +1322,15 @@ describe('mutation results', () => {
}),
)
.then(
- () => done.fail(new Error('Mutation should have failed')),
+ () => reject(new Error('Mutation should have failed')),
() => obsHandle.refetch(),
)
- .then(() => done(), done.fail);
+ .then(resolve, reject);
},
});
});
- it('error handling in reducer functions', () => {
+
+ itAsync('error handling in reducer functions', (resolve, reject) => {
const oldError = console.error;
const errors: any[] = [];
console.error = (msg: string) => {
@@ -1324,7 +1338,7 @@ describe('mutation results', () => {
};
let subscriptionHandle: Subscription;
- return setup({
+ return setup(reject, {
request: { query: mutation },
result: mutationResult,
})
@@ -1352,7 +1366,8 @@ describe('mutation results', () => {
expect(errors).toHaveLength(1);
expect(errors[0].message).toBe(`Hello... It's me.`);
console.error = oldError;
- });
+ })
+ .then(resolve, reject);
});
});
});
diff --git a/packages/apollo-client/src/__tests__/optimistic.ts b/src/__tests__/optimistic.ts
similarity index 88%
rename from packages/apollo-client/src/__tests__/optimistic.ts
rename to src/__tests__/optimistic.ts
--- a/packages/apollo-client/src/__tests__/optimistic.ts
+++ b/src/__tests__/optimistic.ts
@@ -1,18 +1,17 @@
import { from } from 'rxjs';
import { take, toArray, map } from 'rxjs/operators';
import { assign, cloneDeep } from 'lodash';
-import { addTypenameToDocument } from 'apollo-utilities';
-import { InMemoryCache } from 'apollo-cache-inmemory';
-
-import { mockSingleLink } from '../__mocks__/mockLinks';
+import gql from 'graphql-tag';
+import { mockSingleLink } from '../utilities/testing/mocking/mockLink';
import { MutationQueryReducersMap } from '../core/types';
-import { Subscription } from '../util/Observable';
-
-import ApolloClient from '../';
-
-import gql from 'graphql-tag';
-import { stripSymbols } from 'apollo-utilities';
+import { Subscription } from '../utilities/observables/Observable';
+import { ApolloClient } from '../';
+import { addTypenameToDocument } from '../utilities/graphql/transform';
+import { makeReference } from '../utilities/graphql/storeUtils';
+import { stripSymbols } from '../utilities/testing/stripSymbols';
+import { itAsync } from '../utilities/testing/itAsync';
+import { InMemoryCache } from '../cache/inmemory/inMemoryCache';
describe('optimistic mutation results', () => {
const query = gql`
@@ -101,14 +100,14 @@ describe('optimistic mutation results', () => {
let client: ApolloClient;
let link: any;
- function setup(...mockedResponses: any[]) {
- link = mockSingleLink(
- {
- request: { query },
- result,
- },
- ...mockedResponses,
- );
+ function setup(
+ reject: (reason: any) => any,
+ ...mockedResponses: any[]
+ ) {
+ link = mockSingleLink({
+ request: { query },
+ result,
+ }, ...mockedResponses).setOnError(reject);
client = new ApolloClient({
link,
@@ -190,9 +189,9 @@ describe('optimistic mutation results', () => {
},
};
- it('handles a single error for a single mutation', async () => {
+ itAsync('handles a single error for a single mutation', async (resolve, reject) => {
expect.assertions(6);
- await setup({
+ await setup(reject, {
request: { query: mutation },
error: new Error('forbidden (test error)'),
});
@@ -217,12 +216,15 @@ describe('optimistic mutation results', () => {
expect((dataInStore['TodoList5'] as any).todos.length).toBe(3);
expect(stripSymbols(dataInStore)).not.toHaveProperty('Todo99');
}
+
+ resolve();
});
- it('handles errors produced by one mutation in a series', async () => {
+ itAsync('handles errors produced by one mutation in a series', async (resolve, reject) => {
expect.assertions(10);
let subscriptionHandle: Subscription;
await setup(
+ reject,
{
request: { query: mutation },
error: new Error('forbidden (test error)'),
@@ -274,19 +276,22 @@ describe('optimistic mutation results', () => {
await Promise.all([promise, promise2]);
subscriptionHandle.unsubscribe();
- const dataInStore = (client.cache as InMemoryCache).extract(true);
- expect((dataInStore['TodoList5'] as any).todos.length).toBe(4);
- expect(stripSymbols(dataInStore)).not.toHaveProperty('Todo99');
- expect(dataInStore).toHaveProperty('Todo66');
- expect((dataInStore['TodoList5'] as any).todos).toContainEqual(
- realIdValue('Todo66', 'Todo'),
- );
- expect((dataInStore['TodoList5'] as any).todos).not.toContainEqual(
- realIdValue('Todo99', 'Todo'),
- );
+ {
+ const dataInStore = (client.cache as InMemoryCache).extract(true);
+ expect((dataInStore['TodoList5'] as any).todos.length).toBe(4);
+ expect(stripSymbols(dataInStore)).not.toHaveProperty('Todo99');
+ expect(dataInStore).toHaveProperty('Todo66');
+ expect((dataInStore['TodoList5'] as any).todos).toContainEqual(
+ makeReference('Todo66'),
+ );
+ expect((dataInStore['TodoList5'] as any).todos).not.toContainEqual(
+ makeReference('Todo99'),
+ );
+ resolve();
+ }
});
- it('can run 2 mutations concurrently and handles all intermediate states well', async () => {
+ itAsync('can run 2 mutations concurrently and handles all intermediate states well', async (resolve, reject) => {
expect.assertions(34);
function checkBothMutationsAreApplied(
expectedText1: any,
@@ -298,16 +303,17 @@ describe('optimistic mutation results', () => {
expect(dataInStore).toHaveProperty('Todo66');
// <any> can be removed once @types/chai adds deepInclude
expect((dataInStore['TodoList5'] as any).todos).toContainEqual(
- realIdValue('Todo66', 'Todo'),
+ makeReference('Todo66'),
);
expect((dataInStore['TodoList5'] as any).todos).toContainEqual(
- realIdValue('Todo99', 'Todo'),
+ makeReference('Todo99'),
);
expect((dataInStore['Todo99'] as any).text).toBe(expectedText1);
expect((dataInStore['Todo66'] as any).text).toBe(expectedText2);
}
let subscriptionHandle: Subscription;
await setup(
+ reject,
{
request: { query: mutation },
result: mutationResult,
@@ -381,6 +387,8 @@ describe('optimistic mutation results', () => {
'This one was created with a mutation.',
'Second mutation.',
);
+
+ resolve();
});
});
@@ -416,10 +424,10 @@ describe('optimistic mutation results', () => {
});
};
- it('handles a single error for a single mutation', async () => {
+ itAsync('handles a single error for a single mutation', async (resolve, reject) => {
expect.assertions(6);
try {
- await setup({
+ await setup(reject, {
request: { query: mutation },
error: new Error('forbidden (test error)'),
});
@@ -445,12 +453,15 @@ describe('optimistic mutation results', () => {
expect((dataInStore['TodoList5'] as any).todos.length).toBe(3);
expect(stripSymbols(dataInStore)).not.toHaveProperty('Todo99');
}
+
+ resolve();
});
- it('handles errors produced by one mutation in a series', async () => {
+ itAsync('handles errors produced by one mutation in a series', async (resolve, reject) => {
expect.assertions(10);
let subscriptionHandle: Subscription;
await setup(
+ reject,
{
request: { query: mutation },
error: new Error('forbidden (test error)'),
@@ -502,19 +513,22 @@ describe('optimistic mutation results', () => {
await Promise.all([promise, promise2]);
subscriptionHandle.unsubscribe();
- const dataInStore = (client.cache as InMemoryCache).extract(true);
- expect((dataInStore['TodoList5'] as any).todos.length).toBe(4);
- expect(stripSymbols(dataInStore)).not.toHaveProperty('Todo99');
- expect(dataInStore).toHaveProperty('Todo66');
- expect((dataInStore['TodoList5'] as any).todos).toContainEqual(
- realIdValue('Todo66', 'Todo'),
- );
- expect((dataInStore['TodoList5'] as any).todos).not.toContainEqual(
- realIdValue('Todo99', 'Todo'),
- );
+ {
+ const dataInStore = (client.cache as InMemoryCache).extract(true);
+ expect((dataInStore['TodoList5'] as any).todos.length).toBe(4);
+ expect(stripSymbols(dataInStore)).not.toHaveProperty('Todo99');
+ expect(dataInStore).toHaveProperty('Todo66');
+ expect((dataInStore['TodoList5'] as any).todos).toContainEqual(
+ makeReference('Todo66'),
+ );
+ expect((dataInStore['TodoList5'] as any).todos).not.toContainEqual(
+ makeReference('Todo99'),
+ );
+ resolve();
+ }
});
- it('can run 2 mutations concurrently and handles all intermediate states well', async () => {
+ itAsync('can run 2 mutations concurrently and handles all intermediate states well', async (resolve, reject) => {
expect.assertions(34);
function checkBothMutationsAreApplied(
expectedText1: any,
@@ -525,16 +539,17 @@ describe('optimistic mutation results', () => {
expect(dataInStore).toHaveProperty('Todo99');
expect(dataInStore).toHaveProperty('Todo66');
expect((dataInStore['TodoList5'] as any).todos).toContainEqual(
- realIdValue('Todo66', 'Todo'),
+ makeReference('Todo66'),
);
expect((dataInStore['TodoList5'] as any).todos).toContainEqual(
- realIdValue('Todo99', 'Todo'),
+ makeReference('Todo99'),
);
expect((dataInStore['Todo99'] as any).text).toBe(expectedText1);
expect((dataInStore['Todo66'] as any).text).toBe(expectedText2);
}
let subscriptionHandle: Subscription;
await setup(
+ reject,
{
request: { query: mutation },
result: mutationResult,
@@ -609,6 +624,8 @@ describe('optimistic mutation results', () => {
'This one was created with a mutation.',
'Second mutation.',
);
+
+ resolve();
});
});
});
@@ -679,12 +696,12 @@ describe('optimistic mutation results', () => {
}
`;
- it(
+ itAsync(
'should read the optimistic response of a mutation when making an ' +
'ApolloClient.readQuery() call, if the `optimistic` param is set to ' +
'true',
- () => {
- return setup({
+ (resolve, reject) => {
+ return setup(reject, {
request: { query: todoListMutation },
result: todoListMutationResult,
}).then(() => {
@@ -692,22 +709,22 @@ describe('optimistic mutation results', () => {
mutation: todoListMutation,
optimisticResponse: todoListOptimisticResponse,
update: (proxy, mResult: any) => {
- const data = client.readQuery({ query: todoListQuery }, true);
+ const data = proxy.readQuery({ query: todoListQuery }, true);
expect(data.todoList.todos[0].text).toEqual(
todoListOptimisticResponse.createTodo.todos[0].text,
);
},
});
- });
+ }).then(resolve, reject);
},
);
- it(
+ itAsync(
'should not read the optimistic response of a mutation when making ' +
'an ApolloClient.readQuery() call, if the `optimistic` param is set ' +
'to false',
- () => {
- return setup({
+ (resolve, reject) => {
+ return setup(reject, {
request: { query: todoListMutation },
result: todoListMutationResult,
}).then(() => {
@@ -716,11 +733,11 @@ describe('optimistic mutation results', () => {
optimisticResponse: todoListOptimisticResponse,
update: (proxy, mResult: any) => {
const incomingText = mResult.data.createTodo.todos[0].text;
- const data = client.readQuery({ query: todoListQuery }, false);
+ const data = proxy.readQuery({ query: todoListQuery }, false);
expect(data.todoList.todos[0].text).toEqual(incomingText);
},
});
- });
+ }).then(resolve, reject);
},
);
@@ -735,12 +752,12 @@ describe('optimistic mutation results', () => {
}
`;
- it(
+ itAsync(
'should read the optimistic response of a mutation when making an ' +
'ApolloClient.readFragment() call, if the `optimistic` param is set ' +
'to true',
- () => {
- return setup({
+ (resolve, reject) => {
+ return setup(reject, {
request: { query: todoListMutation },
result: todoListMutationResult,
}).then(() => {
@@ -748,7 +765,7 @@ describe('optimistic mutation results', () => {
mutation: todoListMutation,
optimisticResponse: todoListOptimisticResponse,
update: (proxy, mResult: any) => {
- const data: any = client.readFragment(
+ const data: any = proxy.readFragment(
{
id: 'TodoList5',
fragment: todoListFragment,
@@ -760,16 +777,16 @@ describe('optimistic mutation results', () => {
);
},
});
- });
+ }).then(resolve, reject);
},
);
- it(
+ itAsync(
'should not read the optimistic response of a mutation when making ' +
'an ApolloClient.readFragment() call, if the `optimistic` param is ' +
'set to false',
- () => {
- return setup({
+ (resolve, reject) => {
+ return setup(reject, {
request: { query: todoListMutation },
result: todoListMutationResult,
}).then(() => {
@@ -778,7 +795,7 @@ describe('optimistic mutation results', () => {
optimisticResponse: todoListOptimisticResponse,
update: (proxy, mResult: any) => {
const incomingText = mResult.data.createTodo.todos[0].text;
- const data: any = client.readFragment(
+ const data: any = proxy.readFragment(
{
id: 'TodoList5',
fragment: todoListFragment,
@@ -788,7 +805,7 @@ describe('optimistic mutation results', () => {
expect(data.todos[0].text).toEqual(incomingText);
},
});
- });
+ }).then(resolve, reject);
},
);
});
@@ -830,10 +847,10 @@ describe('optimistic mutation results', () => {
},
});
- it('will use a passed variable in optimisticResponse', async () => {
+ itAsync('will use a passed variable in optimisticResponse', async (resolve, reject) => {
expect.assertions(6);
let subscriptionHandle: Subscription;
- await setup({
+ await setup(reject, {
request: { query: mutation, variables },
result: mutationResult,
});
@@ -898,6 +915,8 @@ describe('optimistic mutation results', () => {
expect(newResult.data.todoList.todos[0].text).toEqual(
'This one was created with a mutation.',
);
+
+ resolve();
});
});
@@ -966,10 +985,10 @@ describe('optimistic mutation results', () => {
},
};
- it('will insert a single item to the beginning', async () => {
+ itAsync('will insert a single itemAsync to the beginning', async (resolve, reject) => {
expect.assertions(7);
let subscriptionHandle: Subscription;
- await setup({
+ await setup(reject, {
request: { query: mutation },
result: mutationResult,
});
@@ -988,13 +1007,19 @@ describe('optimistic mutation results', () => {
mutation,
optimisticResponse,
updateQueries: {
- todoList: (prev, options) => {
+ todoList(prev, options) {
const mResult = options.mutationResult as any;
expect(mResult.data.createTodo.id).toEqual('99');
-
- const state = cloneDeep(prev) as any;
- state.todoList.todos.unshift(mResult.data.createTodo);
- return state;
+ return {
+ ...prev,
+ todoList: {
+ ...prev.todoList,
+ todos: [
+ mResult.data.createTodo,
+ ...prev.todoList.todos,
+ ],
+ },
+ };
},
},
});
@@ -1017,12 +1042,15 @@ describe('optimistic mutation results', () => {
expect(newResult.data.todoList.todos[0].text).toEqual(
'This one was created with a mutation.',
);
+
+ resolve();
});
- it('two array insert like mutations', async () => {
+ itAsync('two array insert like mutations', async (resolve, reject) => {
expect.assertions(9);
let subscriptionHandle: Subscription;
await setup(
+ reject,
{
request: { query: mutation },
result: mutationResult,
@@ -1107,12 +1135,15 @@ describe('optimistic mutation results', () => {
expect(newResult.data.todoList.todos[1].text).toEqual(
'This one was created with a mutation.',
);
+
+ resolve();
});
- it('two mutations, one fails', async () => {
+ itAsync('two mutations, one fails', async (resolve, reject) => {
expect.assertions(10);
let subscriptionHandle: Subscription;
await setup(
+ reject,
{
request: { query: mutation },
error: new Error('forbidden (test error)'),
@@ -1184,36 +1215,35 @@ describe('optimistic mutation results', () => {
await Promise.all([promise, promise2]);
subscriptionHandle.unsubscribe();
- const dataInStore = (client.cache as InMemoryCache).extract(true);
- expect((dataInStore['TodoList5'] as any).todos.length).toEqual(4);
- expect(stripSymbols(dataInStore)).not.toHaveProperty('Todo99');
- expect(dataInStore).toHaveProperty('Todo66');
- expect((dataInStore['TodoList5'] as any).todos).toContainEqual(
- realIdValue('Todo66', 'Todo'),
- );
- expect((dataInStore['TodoList5'] as any).todos).not.toContainEqual(
- realIdValue('Todo99', 'Todo'),
- );
+ {
+ const dataInStore = (client.cache as InMemoryCache).extract(true);
+ expect((dataInStore['TodoList5'] as any).todos.length).toEqual(4);
+ expect(stripSymbols(dataInStore)).not.toHaveProperty('Todo99');
+ expect(dataInStore).toHaveProperty('Todo66');
+ expect((dataInStore['TodoList5'] as any).todos).toContainEqual(
+ makeReference('Todo66'),
+ );
+ expect((dataInStore['TodoList5'] as any).todos).not.toContainEqual(
+ makeReference('Todo99'),
+ );
+ resolve();
+ }
});
- it('will handle dependent updates', async () => {
+ itAsync('will handle dependent updates', async (resolve, reject) => {
expect.assertions(1);
- link = mockSingleLink(
- {
- request: { query },
- result,
- },
- {
- request: { query: mutation },
- result: mutationResult,
- delay: 10,
- },
- {
- request: { query: mutation },
- result: mutationResult2,
- delay: 20,
- },
- );
+ link = mockSingleLink({
+ request: { query },
+ result,
+ }, {
+ request: { query: mutation },
+ result: mutationResult,
+ delay: 10,
+ }, {
+ request: { query: mutation },
+ result: mutationResult2,
+ delay: 20,
+ }).setOnError(reject);
const customOptimisticResponse1 = {
__typename: 'Mutation',
@@ -1308,6 +1338,8 @@ describe('optimistic mutation results', () => {
...defaultTodos,
],
]);
+
+ resolve();
});
});
@@ -1366,10 +1398,10 @@ describe('optimistic mutation results', () => {
},
};
- it('will insert a single item to the beginning', async () => {
+ itAsync('will insert a single itemAsync to the beginning', async (resolve, reject) => {
expect.assertions(6);
let subscriptionHandle: Subscription;
- await setup({
+ await setup(reject, {
request: { query: mutation },
delay: 300,
result: mutationResult,
@@ -1386,12 +1418,12 @@ describe('optimistic mutation results', () => {
});
let firstTime = true;
- let before = new Date();
+ let before = Date.now();
const promise = client.mutate({
mutation,
optimisticResponse,
update: (proxy, mResult: any) => {
- const after = new Date();
+ const after = Date.now();
const duration = after - before;
if (firstTime) {
expect(duration < 300).toBe(true);
@@ -1401,13 +1433,18 @@ describe('optimistic mutation results', () => {
}
let data = proxy.readQuery({ query });
- data.todoList.todos = [
- mResult.data.createTodo,
- ...data.todoList.todos,
- ];
proxy.writeQuery({
query,
- data,
+ data: {
+ ...data,
+ todoList: {
+ ...data.todoList,
+ todos: [
+ mResult.data.createTodo,
+ ...data.todoList.todos,
+ ],
+ },
+ },
});
},
});
@@ -1428,12 +1465,15 @@ describe('optimistic mutation results', () => {
'This one was created with a mutation.',
);
});
+
+ resolve();
});
- it('two array insert like mutations', async () => {
+ itAsync('two array insert like mutations', async (resolve, reject) => {
expect.assertions(9);
let subscriptionHandle: Subscription;
await setup(
+ reject,
{
request: { query: mutation },
result: mutationResult,
@@ -1536,12 +1576,15 @@ describe('optimistic mutation results', () => {
expect(newResult.data.todoList.todos[1].text).toBe(
'This one was created with a mutation.',
);
+
+ resolve();
});
- it('two mutations, one fails', async () => {
+ itAsync('two mutations, one fails', async (resolve, reject) => {
expect.assertions(10);
let subscriptionHandle: Subscription;
await setup(
+ reject,
{
request: { query: mutation },
error: new Error('forbidden (test error)'),
@@ -1633,36 +1676,35 @@ describe('optimistic mutation results', () => {
await Promise.all([promise, promise2]);
subscriptionHandle.unsubscribe();
- const dataInStore = (client.cache as InMemoryCache).extract(true);
- expect((dataInStore['TodoList5'] as any).todos.length).toBe(4);
- expect(stripSymbols(dataInStore)).not.toHaveProperty('Todo99');
- expect(dataInStore).toHaveProperty('Todo66');
- expect((dataInStore['TodoList5'] as any).todos).toContainEqual(
- realIdValue('Todo66', 'Todo'),
- );
- expect((dataInStore['TodoList5'] as any).todos).not.toContainEqual(
- realIdValue('Todo99', 'Todo'),
- );
+ {
+ const dataInStore = (client.cache as InMemoryCache).extract(true);
+ expect((dataInStore['TodoList5'] as any).todos.length).toBe(4);
+ expect(stripSymbols(dataInStore)).not.toHaveProperty('Todo99');
+ expect(dataInStore).toHaveProperty('Todo66');
+ expect((dataInStore['TodoList5'] as any).todos).toContainEqual(
+ makeReference('Todo66'),
+ );
+ expect((dataInStore['TodoList5'] as any).todos).not.toContainEqual(
+ makeReference('Todo99'),
+ );
+ resolve();
+ }
});
- it('will handle dependent updates', async () => {
+ itAsync('will handle dependent updates', async (resolve, reject) => {
expect.assertions(1);
- link = mockSingleLink(
- {
- request: { query },
- result,
- },
- {
- request: { query: mutation },
- result: mutationResult,
- delay: 10,
- },
- {
- request: { query: mutation },
- result: mutationResult2,
- delay: 20,
- },
- );
+ link = mockSingleLink({
+ request: { query },
+ result,
+ }, {
+ request: { query: mutation },
+ result: mutationResult,
+ delay: 10,
+ }, {
+ request: { query: mutation },
+ result: mutationResult2,
+ delay: 20,
+ }).setOnError(reject);
const customOptimisticResponse1 = {
__typename: 'Mutation',
@@ -1772,6 +1814,8 @@ describe('optimistic mutation results', () => {
...defaultTodos,
],
]);
+
+ resolve();
});
});
});
@@ -1832,24 +1876,23 @@ describe('optimistic mutation - githunt comments', () => {
let client: ApolloClient;
let link: any;
- function setup(...mockedResponses: any[]) {
- link = mockSingleLink(
- {
- request: {
- query: addTypenameToDocument(query),
- variables,
- },
- result,
+ function setup(
+ reject: (reason: any) => any,
+ ...mockedResponses: any[]
+ ) {
+ link = mockSingleLink({
+ request: {
+ query: addTypenameToDocument(query),
+ variables,
},
- {
- request: {
- query: addTypenameToDocument(queryWithFragment),
- variables,
- },
- result,
+ result,
+ }, {
+ request: {
+ query: addTypenameToDocument(queryWithFragment),
+ variables,
},
- ...mockedResponses,
- );
+ result,
+ }, ...mockedResponses).setOnError(reject);
client = new ApolloClient({
link,
@@ -1925,7 +1968,7 @@ describe('optimistic mutation - githunt comments', () => {
},
};
- it('can post a new comment', async () => {
+ itAsync('can post a new comment', async (resolve, reject) => {
expect.assertions(1);
const mutationVariables = {
repoFullName: 'org/repo',
@@ -1933,7 +1976,7 @@ describe('optimistic mutation - githunt comments', () => {
};
let subscriptionHandle: Subscription;
- await setup({
+ await setup(reject, {
request: {
query: addTypenameToDocument(mutation),
variables: mutationVariables,
@@ -1962,14 +2005,7 @@ describe('optimistic mutation - githunt comments', () => {
subscriptionHandle.unsubscribe();
expect(newResult.data.entry.comments.length).toBe(2);
+
+ resolve();
});
});
-
-function realIdValue(id: string, typename: string) {
- return {
- type: 'id',
- generated: false,
- id,
- typename,
- };
-}
diff --git a/packages/apollo-client/src/__tests__/subscribeToMore.ts b/src/__tests__/subscribeToMore.ts
similarity index 86%
rename from packages/apollo-client/src/__tests__/subscribeToMore.ts
rename to src/__tests__/subscribeToMore.ts
--- a/packages/apollo-client/src/__tests__/subscribeToMore.ts
+++ b/src/__tests__/subscribeToMore.ts
@@ -1,13 +1,14 @@
import gql from 'graphql-tag';
-import { ApolloLink, Operation } from 'apollo-link';
-import { InMemoryCache } from 'apollo-cache-inmemory';
-import { stripSymbols } from 'apollo-utilities';
-
import { DocumentNode, OperationDefinitionNode } from 'graphql';
-import { mockSingleLink, mockObservableLink } from '../__mocks__/mockLinks';
-
-import ApolloClient from '../';
+import { ApolloLink } from '../link/core/ApolloLink';
+import { Operation } from '../link/core/types';
+import { mockSingleLink } from '../utilities/testing/mocking/mockLink';
+import { mockObservableLink } from '../utilities/testing/mocking/mockSubscriptionLink';
+import { ApolloClient } from '../';
+import { InMemoryCache } from '../cache/inmemory/inMemoryCache';
+import { stripSymbols } from '../utilities/testing/stripSymbols';
+import { itAsync } from '../utilities/testing/itAsync';
const isSub = (operation: Operation) =>
(operation.query as DocumentNode).definitions
@@ -58,10 +59,10 @@ describe('subscribeToMore', () => {
name: string;
}
- it('triggers new result from subscription data', done => {
+ itAsync('triggers new result from subscription data', (resolve, reject) => {
let latestResult: any = null;
const wSLink = mockObservableLink();
- const httpLink = mockSingleLink(req1);
+ const httpLink = mockSingleLink(req1).setOnError(reject);
const link = ApolloLink.split(isSub, wSLink, httpLink);
let counter = 0;
@@ -102,7 +103,7 @@ describe('subscribeToMore', () => {
networkStatus: 7,
stale: false,
});
- done();
+ resolve();
}, 15);
for (let i = 0; i < 2; i++) {
@@ -110,10 +111,10 @@ describe('subscribeToMore', () => {
}
});
- it('calls error callback on error', done => {
+ itAsync('calls error callback on error', (resolve, reject) => {
let latestResult: any = null;
const wSLink = mockObservableLink();
- const httpLink = mockSingleLink(req1);
+ const httpLink = mockSingleLink(req1).setOnError(reject);
const link = ApolloLink.split(isSub, wSLink, httpLink);
@@ -160,7 +161,7 @@ describe('subscribeToMore', () => {
});
expect(counter).toBe(2);
expect(errorCount).toBe(1);
- done();
+ resolve();
}, 15);
for (let i = 0; i < 2; i++) {
@@ -168,11 +169,11 @@ describe('subscribeToMore', () => {
}
});
- it('prints unhandled subscription errors to the console', done => {
+ itAsync('prints unhandled subscription errors to the console', (resolve, reject) => {
let latestResult: any = null;
const wSLink = mockObservableLink();
- const httpLink = mockSingleLink(req1);
+ const httpLink = mockSingleLink(req1).setOnError(reject);
const link = ApolloLink.split(isSub, wSLink, httpLink);
@@ -221,7 +222,7 @@ describe('subscribeToMore', () => {
expect(counter).toBe(1);
expect(errorCount).toBe(1);
console.error = consoleErr;
- done();
+ resolve();
}, 15);
for (let i = 0; i < 2; i++) {
@@ -229,33 +230,23 @@ describe('subscribeToMore', () => {
}
});
- it('should not corrupt the cache (#3062)', async done => {
+ itAsync('should not corrupt the cache (#3062)', async (resolve, reject) => {
let latestResult: any = null;
const wSLink = mockObservableLink();
- const httpLink = mockSingleLink(req4);
+ const httpLink = mockSingleLink(req4).setOnError(reject);
const link = ApolloLink.split(isSub, wSLink, httpLink);
let counter = 0;
const client = new ApolloClient({
cache: new InMemoryCache({ addTypename: false }).restore({
- 'ROOT_QUERY.entry.0': {
- value: 1,
- },
- 'ROOT_QUERY.entry.1': {
- value: 2,
- },
ROOT_QUERY: {
entry: [
{
- type: 'id',
- id: 'ROOT_QUERY.entry.0',
- generated: true,
+ value: 1,
},
{
- type: 'id',
- id: 'ROOT_QUERY.entry.1',
- generated: true,
+ value: 2,
},
],
},
@@ -334,11 +325,11 @@ describe('subscribeToMore', () => {
networkStatus: 7,
stale: false,
});
- done();
+ resolve();
});
// TODO add a test that checks that subscriptions are cancelled when obs is unsubscribed from.
- it('allows specification of custom types for variables and payload (#4246)', done => {
+ itAsync('allows specification of custom types for variables and payload (#4246)', (resolve, reject) => {
interface TypedOperation extends Operation {
variables: {
someNumber: number;
@@ -354,7 +345,7 @@ describe('subscribeToMore', () => {
let latestResult: any = null;
const wSLink = mockObservableLink();
- const httpLink = mockSingleLink(typedReq);
+ const httpLink = mockSingleLink(typedReq).setOnError(reject);
const link = ApolloLink.split(isSub, wSLink, httpLink);
let counter = 0;
@@ -402,7 +393,7 @@ describe('subscribeToMore', () => {
networkStatus: 7,
stale: false,
});
- done();
+ resolve();
}, 15);
for (let i = 0; i < 2; i++) {
diff --git a/packages/apollo-cache/src/__tests__/__snapshots__/utils.ts.snap b/src/cache/core/__tests__/__snapshots__/utils.ts.snap
similarity index 100%
rename from packages/apollo-cache/src/__tests__/__snapshots__/utils.ts.snap
rename to src/cache/core/__tests__/__snapshots__/utils.ts.snap
diff --git a/packages/apollo-cache/src/__tests__/cache.ts b/src/cache/core/__tests__/cache.ts
similarity index 100%
rename from packages/apollo-cache/src/__tests__/cache.ts
rename to src/cache/core/__tests__/cache.ts
diff --git a/packages/apollo-cache/src/__tests__/utils.ts b/src/cache/core/__tests__/utils.ts
similarity index 99%
rename from packages/apollo-cache/src/__tests__/utils.ts
rename to src/cache/core/__tests__/utils.ts
--- a/packages/apollo-cache/src/__tests__/utils.ts
+++ b/src/cache/core/__tests__/utils.ts
@@ -1,4 +1,5 @@
import { print } from 'graphql/language/printer';
+
import { queryFromPojo, fragmentFromPojo } from '../utils';
describe('writing data with no query', () => {
diff --git a/src/cache/inmemory/__tests__/__snapshots__/cache.ts.snap b/src/cache/inmemory/__tests__/__snapshots__/cache.ts.snap
new file mode 100644
--- /dev/null
+++ b/src/cache/inmemory/__tests__/__snapshots__/cache.ts.snap
@@ -0,0 +1,229 @@
+// Jest Snapshot v1, https://goo.gl/fbAQLP
+
+exports[`Cache writeFragment will write some deeply nested data into the store at any id (1/2) 1`] = `
+Object {
+ "bar": Object {
+ "i": 7,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "e": 4,
+ "h": Object {
+ "__ref": "bar",
+ },
+ },
+}
+`;
+
+exports[`Cache writeFragment will write some deeply nested data into the store at any id (1/2) 2`] = `
+Object {
+ "bar": Object {
+ "i": 7,
+ "j": 8,
+ "k": 9,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "e": 4,
+ "f": 5,
+ "g": 6,
+ "h": Object {
+ "__ref": "bar",
+ },
+ },
+}
+`;
+
+exports[`Cache writeFragment will write some deeply nested data into the store at any id (1/2) 3`] = `
+Object {
+ "bar": Object {
+ "__typename": "Bar",
+ "i": 10,
+ "j": 8,
+ "k": 9,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "e": 4,
+ "f": 5,
+ "g": 6,
+ "h": Object {
+ "__ref": "bar",
+ },
+ },
+}
+`;
+
+exports[`Cache writeFragment will write some deeply nested data into the store at any id (1/2) 4`] = `
+Object {
+ "bar": Object {
+ "__typename": "Bar",
+ "i": 10,
+ "j": 11,
+ "k": 12,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "e": 4,
+ "f": 5,
+ "g": 6,
+ "h": Object {
+ "__ref": "bar",
+ },
+ },
+}
+`;
+
+exports[`Cache writeFragment will write some deeply nested data into the store at any id (1/2) 5`] = `
+Object {
+ "bar": Object {
+ "__typename": "Bar",
+ "i": 7,
+ "j": 8,
+ "k": 9,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "e": 4,
+ "f": 5,
+ "g": 6,
+ "h": Object {
+ "__ref": "bar",
+ },
+ },
+}
+`;
+
+exports[`Cache writeFragment will write some deeply nested data into the store at any id (1/2) 6`] = `
+Object {
+ "bar": Object {
+ "__typename": "Bar",
+ "i": 10,
+ "j": 11,
+ "k": 12,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "e": 4,
+ "f": 5,
+ "g": 6,
+ "h": Object {
+ "__ref": "bar",
+ },
+ },
+}
+`;
+
+exports[`Cache writeFragment will write some deeply nested data into the store at any id (2/2) 1`] = `
+Object {
+ "bar": Object {
+ "i": 7,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "e": 4,
+ "h": Object {
+ "__ref": "bar",
+ },
+ },
+}
+`;
+
+exports[`Cache writeFragment will write some deeply nested data into the store at any id (2/2) 2`] = `
+Object {
+ "bar": Object {
+ "i": 7,
+ "j": 8,
+ "k": 9,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "e": 4,
+ "f": 5,
+ "g": 6,
+ "h": Object {
+ "__ref": "bar",
+ },
+ },
+}
+`;
+
+exports[`Cache writeFragment will write some deeply nested data into the store at any id (2/2) 3`] = `
+Object {
+ "bar": Object {
+ "__typename": "Bar",
+ "i": 10,
+ "j": 8,
+ "k": 9,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "e": 4,
+ "f": 5,
+ "g": 6,
+ "h": Object {
+ "__ref": "bar",
+ },
+ },
+}
+`;
+
+exports[`Cache writeFragment will write some deeply nested data into the store at any id (2/2) 4`] = `
+Object {
+ "bar": Object {
+ "__typename": "Bar",
+ "i": 10,
+ "j": 11,
+ "k": 12,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "e": 4,
+ "f": 5,
+ "g": 6,
+ "h": Object {
+ "__ref": "bar",
+ },
+ },
+}
+`;
+
+exports[`Cache writeFragment will write some deeply nested data into the store at any id (2/2) 5`] = `
+Object {
+ "bar": Object {
+ "__typename": "Bar",
+ "i": 7,
+ "j": 8,
+ "k": 9,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "e": 4,
+ "f": 5,
+ "g": 6,
+ "h": Object {
+ "__ref": "bar",
+ },
+ },
+}
+`;
+
+exports[`Cache writeFragment will write some deeply nested data into the store at any id (2/2) 6`] = `
+Object {
+ "bar": Object {
+ "__typename": "Bar",
+ "i": 10,
+ "j": 11,
+ "k": 12,
+ },
+ "foo": Object {
+ "__typename": "Foo",
+ "e": 4,
+ "f": 5,
+ "g": 6,
+ "h": Object {
+ "__ref": "bar",
+ },
+ },
+}
+`;
diff --git a/src/cache/inmemory/__tests__/__snapshots__/roundtrip.ts.snap b/src/cache/inmemory/__tests__/__snapshots__/roundtrip.ts.snap
new file mode 100644
--- /dev/null
+++ b/src/cache/inmemory/__tests__/__snapshots__/roundtrip.ts.snap
@@ -0,0 +1,3 @@
+// Jest Snapshot v1, https://goo.gl/fbAQLP
+
+exports[`writing to the store throws when trying to write an object without id that was previously queried with id 1`] = `"Store error: the application attempted to write an object with no provided id but the store already contains an id of abcd for this object."`;
diff --git a/src/cache/inmemory/__tests__/__snapshots__/writeToStore.ts.snap b/src/cache/inmemory/__tests__/__snapshots__/writeToStore.ts.snap
new file mode 100644
--- /dev/null
+++ b/src/cache/inmemory/__tests__/__snapshots__/writeToStore.ts.snap
@@ -0,0 +1,3 @@
+// Jest Snapshot v1, https://goo.gl/fbAQLP
+
+exports[`writing to the store throws when trying to write an object without id that was previously queried with id 1`] = `"Store error: the application attempted to write an object with no provided id but the store already contains an id of abcd for this object."`;
diff --git a/packages/apollo-cache-inmemory/src/__tests__/cache.ts b/src/cache/inmemory/__tests__/cache.ts
similarity index 87%
rename from packages/apollo-cache-inmemory/src/__tests__/cache.ts
rename to src/cache/inmemory/__tests__/cache.ts
--- a/packages/apollo-cache-inmemory/src/__tests__/cache.ts
+++ b/src/cache/inmemory/__tests__/cache.ts
@@ -1,8 +1,9 @@
import gql, { disableFragmentWarnings } from 'graphql-tag';
-import { stripSymbols } from 'apollo-utilities';
-import { cloneDeep } from 'lodash';
-import { InMemoryCache, InMemoryCacheConfig } from '..';
+import { stripSymbols } from '../../../utilities/testing/stripSymbols';
+import { cloneDeep } from '../../../utilities/common/cloneDeep';
+import { makeReference } from '../../../utilities/graphql/storeUtils';
+import { InMemoryCache, InMemoryCacheConfig } from '../inMemoryCache';
disableFragmentWarnings();
@@ -24,12 +25,6 @@ describe('Cache', () => {
resultCaching: false,
}).restore(cloneDeep(data)),
),
- initialDataForCaches.map(data =>
- new InMemoryCache({
- addTypename: false,
- freezeResults: true,
- }).restore(cloneDeep(data)),
- ),
];
cachesList.forEach((caches, i) => {
@@ -55,11 +50,6 @@ describe('Cache', () => {
...config,
resultCaching: false,
}),
- new InMemoryCache({
- addTypename: false,
- ...config,
- freezeResults: true,
- }),
];
caches.forEach((cache, i) => {
@@ -127,21 +117,13 @@ describe('Cache', () => {
a: 1,
b: 2,
c: 3,
- d: {
- type: 'id',
- id: 'foo',
- generated: false,
- },
+ d: makeReference('foo'),
},
foo: {
e: 4,
f: 5,
g: 6,
- h: {
- type: 'id',
- id: 'bar',
- generated: false,
- },
+ h: makeReference('bar'),
},
bar: {
i: 7,
@@ -391,22 +373,14 @@ describe('Cache', () => {
a: 1,
b: 2,
c: 3,
- d: {
- type: 'id',
- id: 'foo',
- generated: false,
- },
+ d: makeReference('foo'),
},
foo: {
__typename: 'Foo',
e: 4,
f: 5,
g: 6,
- h: {
- type: 'id',
- id: 'bar',
- generated: false,
- },
+ h: makeReference('bar'),
},
bar: {
__typename: 'Bar',
@@ -638,6 +612,7 @@ describe('Cache', () => {
expect((proxy as InMemoryCache).extract()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
a: 1,
},
});
@@ -654,6 +629,7 @@ describe('Cache', () => {
expect((proxy as InMemoryCache).extract()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
a: 1,
b: 2,
c: 3,
@@ -673,6 +649,7 @@ describe('Cache', () => {
expect((proxy as InMemoryCache).extract()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
a: 4,
b: 5,
c: 6,
@@ -698,16 +675,12 @@ describe('Cache', () => {
expect((proxy as InMemoryCache).extract()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
a: 1,
d: {
- type: 'id',
- id: '$ROOT_QUERY.d',
- generated: true,
+ e: 4,
},
},
- '$ROOT_QUERY.d': {
- e: 4,
- },
});
proxy.writeQuery({
@@ -726,24 +699,16 @@ describe('Cache', () => {
expect((proxy as InMemoryCache).extract()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
a: 1,
+ // The new value for d overwrites the old value, since there
+ // is no custom merge function defined for Query.d.
d: {
- type: 'id',
- id: '$ROOT_QUERY.d',
- generated: true,
- },
- },
- '$ROOT_QUERY.d': {
- e: 4,
- h: {
- type: 'id',
- id: '$ROOT_QUERY.d.h',
- generated: true,
+ h: {
+ i: 7,
+ },
},
},
- '$ROOT_QUERY.d.h': {
- i: 7,
- },
});
proxy.writeQuery({
@@ -774,30 +739,21 @@ describe('Cache', () => {
expect((proxy as InMemoryCache).extract()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
a: 1,
b: 2,
c: 3,
d: {
- type: 'id',
- id: '$ROOT_QUERY.d',
- generated: true,
- },
- },
- '$ROOT_QUERY.d': {
- e: 4,
- f: 5,
- g: 6,
- h: {
- type: 'id',
- id: '$ROOT_QUERY.d.h',
- generated: true,
+ e: 4,
+ f: 5,
+ g: 6,
+ h: {
+ i: 7,
+ j: 8,
+ k: 9,
+ },
},
},
- '$ROOT_QUERY.d.h': {
- i: 7,
- j: 8,
- k: 9,
- },
});
},
);
@@ -825,6 +781,7 @@ describe('Cache', () => {
expect((proxy as InMemoryCache).extract()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
'field({"literal":true,"value":42})': 1,
'field({"literal":false,"value":42})': 2,
},
@@ -855,6 +812,7 @@ describe('Cache', () => {
expect((proxy as InMemoryCache).extract()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
'field({"literal":true,"value":42})': 1,
'field({"literal":false,"value":null})': 2,
},
@@ -1229,3 +1187,140 @@ describe('Cache', () => {
});
});
});
+
+describe("InMemoryCache#broadcastWatches", function () {
+ it("should keep distinct consumers distinct (issue #5733)", function () {
+ const cache = new InMemoryCache();
+ const query = gql`
+ query {
+ value(arg: $arg) {
+ name
+ }
+ }
+ `;
+
+ const receivedCallbackResults: [string, number, any][] = [];
+
+ let nextWatchId = 1;
+ function watch(arg) {
+ const watchId = `id${nextWatchId++}`;
+ cache.watch({
+ query,
+ variables: { arg },
+ optimistic: false,
+ callback(result) {
+ receivedCallbackResults.push([watchId, arg, result]);
+ },
+ });
+ return watchId;
+ }
+
+ const id1 = watch(1);
+ expect(receivedCallbackResults).toEqual([]);
+
+ function write(arg: number, name: string) {
+ cache.writeQuery({
+ query,
+ variables: { arg },
+ data: {
+ value: { name },
+ },
+ });
+ }
+
+ write(1, "one");
+
+ const received1 = [id1, 1, {
+ result: {
+ value: {
+ name: "one",
+ },
+ },
+ complete: true,
+ }];
+
+ expect(receivedCallbackResults).toEqual([
+ received1,
+ ]);
+
+ const id2 = watch(2);
+
+ expect(receivedCallbackResults).toEqual([
+ received1,
+ ]);
+
+ write(2, "two");
+
+ const received2 = [id2, 2, {
+ result: {
+ value: {
+ name: "two",
+ },
+ },
+ complete: true,
+ }];
+
+ expect(receivedCallbackResults).toEqual([
+ received1,
+ // New results:
+ received1,
+ received2,
+ ]);
+
+ const id3 = watch(1);
+ const id4 = watch(1);
+
+ write(1, "one");
+
+ const received3 = [id3, 1, {
+ result: {
+ value: {
+ name: "one",
+ },
+ },
+ complete: true,
+ }];
+
+ const received4 = [id4, 1, {
+ result: {
+ value: {
+ name: "one",
+ },
+ },
+ complete: true,
+ }];
+
+ expect(receivedCallbackResults).toEqual([
+ received1,
+ received1,
+ received2,
+ // New results:
+ received3,
+ received4,
+ ]);
+
+ write(2, "TWO");
+
+ const received2AllCaps = [id2, 2, {
+ result: {
+ value: {
+ name: "TWO",
+ },
+ },
+ complete: true,
+ }];
+
+ expect(receivedCallbackResults).toEqual([
+ received1,
+ received1,
+ received2,
+ received3,
+ received4,
+ // New results:
+ received1,
+ received2AllCaps,
+ received3,
+ received4,
+ ]);
+ });
+});
diff --git a/packages/apollo-cache-inmemory/src/__tests__/diffAgainstStore.ts b/src/cache/inmemory/__tests__/diffAgainstStore.ts
similarity index 86%
rename from packages/apollo-cache-inmemory/src/__tests__/diffAgainstStore.ts
rename to src/cache/inmemory/__tests__/diffAgainstStore.ts
--- a/packages/apollo-cache-inmemory/src/__tests__/diffAgainstStore.ts
+++ b/src/cache/inmemory/__tests__/diffAgainstStore.ts
@@ -1,17 +1,16 @@
import gql, { disableFragmentWarnings } from 'graphql-tag';
-import { toIdValue } from 'apollo-utilities';
-import { defaultNormalizedCacheFactory } from '../objectCache';
+import { Reference, makeReference } from '../../../utilities/graphql/storeUtils';
+import { defaultNormalizedCacheFactory } from '../entityStore';
import { StoreReader } from '../readFromStore';
import { StoreWriter } from '../writeToStore';
-import { HeuristicFragmentMatcher } from '../fragmentMatcher';
-import { defaultDataIdFromObject } from '../inMemoryCache';
+import { defaultDataIdFromObject } from '../policies';
import { NormalizedCache } from '../types';
-
-const fragmentMatcherFunction = new HeuristicFragmentMatcher().match;
+import { Policies } from '../policies';
disableFragmentWarnings();
-export function withError(func: Function, regex: RegExp) {
+
+export function withError(func: Function, regex?: RegExp) {
let message: string = null as never;
const { error } = console;
console.error = (m: any) => {
@@ -20,7 +19,9 @@ export function withError(func: Function, regex: RegExp) {
try {
const result = func();
- expect(message).toMatch(regex);
+ if (regex) {
+ expect(message).toMatch(regex);
+ }
return result;
} finally {
console.error = error;
@@ -28,8 +29,11 @@ export function withError(func: Function, regex: RegExp) {
}
describe('diffing queries against the store', () => {
- const reader = new StoreReader();
- const writer = new StoreWriter();
+ const policies = new Policies({
+ dataIdFromObject: defaultDataIdFromObject,
+ })
+ const reader = new StoreReader({ policies });
+ const writer = new StoreWriter({ policies });
it(
'expects named fragments to return complete as true when diffd against ' +
@@ -53,10 +57,6 @@ describe('diffing queries against the store', () => {
}
}
`,
- fragmentMatcherFunction,
- config: {
- dataIdFromObject: defaultDataIdFromObject,
- },
});
expect(queryResult.complete).toEqual(false);
@@ -99,10 +99,6 @@ describe('diffing queries against the store', () => {
}
}
`,
- fragmentMatcherFunction,
- config: {
- dataIdFromObject: defaultDataIdFromObject,
- },
});
expect(queryResult.complete).toEqual(false);
@@ -138,30 +134,33 @@ describe('diffing queries against the store', () => {
});
it('caches root queries both under the ID of the node and the query name', () => {
- const firstQuery = gql`
- {
- people_one(id: "1") {
- __typename
- id
- name
- }
- }
- `;
-
- const result = {
- people_one: {
- __typename: 'Person',
- id: '1',
- name: 'Luke Skywalker',
- },
- };
-
- const getIdField = ({ id }: { id: string }) => id;
+ const writer = new StoreWriter({
+ policies: new Policies({
+ typePolicies: {
+ Person: {
+ keyFields: ["id"],
+ },
+ },
+ }),
+ });
const store = writer.writeQueryToStore({
- result,
- query: firstQuery,
- dataIdFromObject: getIdField,
+ query: gql`
+ {
+ people_one(id: "1") {
+ __typename
+ idAlias: id
+ name
+ }
+ }
+ `,
+ result: {
+ people_one: {
+ __typename: 'Person',
+ idAlias: '1',
+ name: 'Luke Skywalker',
+ },
+ },
});
const secondQuery = gql`
@@ -180,7 +179,11 @@ describe('diffing queries against the store', () => {
});
expect(complete).toBeTruthy();
- expect(store.get('1')).toEqual(result.people_one);
+ expect(store.get('Person:{"id":"1"}')).toEqual({
+ __typename: 'Person',
+ id: '1',
+ name: 'Luke Skywalker',
+ });
});
it('does not swallow errors other than field errors', () => {
@@ -253,11 +256,10 @@ describe('diffing queries against the store', () => {
store,
query: unionQuery,
returnPartialData: false,
- fragmentMatcherFunction,
});
- expect(complete).toBe(false);
- }, /IntrospectionFragmentMatcher/);
+ expect(complete).toBe(true);
+ });
});
it('does not error on a query with fields missing from all but one named fragment', () => {
@@ -304,7 +306,7 @@ describe('diffing queries against the store', () => {
query: unionQuery,
});
- expect(complete).toBe(false);
+ expect(complete).toBe(true);
});
it('throws an error on a query with fields missing from matching named fragments', () => {
@@ -421,6 +423,7 @@ describe('diffing queries against the store', () => {
expect(simpleDiff.result).toEqual({
people_one: {
+ __typename: 'Person',
name: 'Luke Skywalker',
},
});
@@ -432,6 +435,7 @@ describe('diffing queries against the store', () => {
expect(inlineDiff.result).toEqual({
people_one: {
+ __typename: 'Person',
name: 'Luke Skywalker',
},
});
@@ -443,6 +447,7 @@ describe('diffing queries against the store', () => {
expect(namedDiff.result).toEqual({
people_one: {
+ __typename: 'Person',
name: 'Luke Skywalker',
},
});
@@ -491,14 +496,17 @@ describe('diffing queries against the store', () => {
},
};
- function dataIdFromObject({ id }: { id: string }) {
- return id;
- }
+ const policies = new Policies({
+ dataIdFromObject({ id }: { id: string }) {
+ return id;
+ },
+ });
+
+ const writer = new StoreWriter({ policies });
const store = writer.writeQueryToStore({
query,
result: queryResult,
- dataIdFromObject,
});
const { result } = reader.diffQueryAgainstStore({
@@ -507,14 +515,14 @@ describe('diffing queries against the store', () => {
});
expect(result).toEqual(queryResult);
- expect(dataIdFromObject(result.a[0])).toBe('a:1');
- expect(dataIdFromObject(result.a[1])).toBe('a:2');
- expect(dataIdFromObject(result.a[2])).toBe('a:3');
- expect(dataIdFromObject(result.c.e[0])).toBe('e:1');
- expect(dataIdFromObject(result.c.e[1])).toBe('e:2');
- expect(dataIdFromObject(result.c.e[2])).toBe('e:3');
- expect(dataIdFromObject(result.c.e[3])).toBe('e:4');
- expect(dataIdFromObject(result.c.e[4])).toBe('e:5');
+ expect(policies.identify(result.a[0])).toBe('a:1');
+ expect(policies.identify(result.a[1])).toBe('a:2');
+ expect(policies.identify(result.a[2])).toBe('a:3');
+ expect(policies.identify(result.c.e[0])).toBe('e:1');
+ expect(policies.identify(result.c.e[1])).toBe('e:2');
+ expect(policies.identify(result.c.e[2])).toBe('e:3');
+ expect(policies.identify(result.c.e[3])).toBe('e:4');
+ expect(policies.identify(result.c.e[4])).toBe('e:5');
});
describe('referential equality preservation', () => {
@@ -822,10 +830,15 @@ describe('diffing queries against the store', () => {
},
};
+ const writer = new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject: ({ id }: { id: string }) => id,
+ }),
+ });
+
const store = writer.writeQueryToStore({
query,
result: queryResult,
- dataIdFromObject: ({ id }: { id: string }) => id,
});
const previousResult = {
@@ -910,6 +923,7 @@ describe('diffing queries against the store', () => {
expect(result.d).not.toEqual(previousResult.d);
expect(result.d.f).toEqual(previousResult.d.f);
});
+
it('will preserve equality with custom resolvers', () => {
const listQuery = gql`
{
@@ -924,7 +938,7 @@ describe('diffing queries against the store', () => {
const listResult = {
people: [
{
- id: '4',
+ id: 4,
name: 'Luke Skywalker',
__typename: 'Person',
},
@@ -941,32 +955,46 @@ describe('diffing queries against the store', () => {
}
`;
- const dataIdFromObject = (obj: any) => obj.id;
+ const policies = new Policies({
+ typePolicies: {
+ Query: {
+ fields: {
+ person(_, { args, isReference, toReference, readField }) {
+ expect(typeof args.id).toBe('number');
+ const ref = toReference({ __typename: 'Person', id: args.id });
+ expect(isReference(ref)).toBe(true);
+ expect(ref).toEqual({
+ __ref: `Person:${JSON.stringify({ id: args.id })}`,
+ });
+ const found = readField<Reference[]>("people").find(
+ person => person.__ref === ref.__ref);
+ expect(found).toBeTruthy();
+ return found;
+ },
+ },
+ },
+ Person: {
+ keyFields: ["id"],
+ },
+ },
+ });
+
+ const writer = new StoreWriter({ policies });
+ const reader = new StoreReader({ policies });
const store = writer.writeQueryToStore({
query: listQuery,
result: listResult,
- dataIdFromObject,
});
const previousResult = {
person: listResult.people[0],
};
- const cacheRedirects = {
- Query: {
- person: (_: any, args: any) =>
- toIdValue({ id: args['id'], typename: 'Person' }),
- },
- };
-
- const config = { dataIdFromObject, cacheRedirects };
-
const { result } = reader.diffQueryAgainstStore({
store,
query: itemQuery,
previousResult,
- config,
});
expect(result).toEqual(previousResult);
@@ -1091,24 +1119,30 @@ describe('diffing queries against the store', () => {
expect(result).toEqual({
user: {
+ __typename: 'User',
id: 1,
name: 'Ben',
company: {
+ __typename: 'Company',
id: 1,
name: 'Apollo',
users: [
{
+ __typename: 'User',
id: 1,
name: 'Ben',
company: {
+ __typename: 'Company',
id: 1,
name: 'Apollo',
},
},
{
+ __typename: 'User',
id: 2,
name: 'James',
company: {
+ __typename: 'Company',
id: 1,
name: 'Apollo',
},
@@ -1121,7 +1155,11 @@ describe('diffing queries against the store', () => {
// Check first using generated IDs.
check(
- writer.writeQueryToStore({
+ new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject: void 0,
+ })
+ }).writeQueryToStore({
query,
result: {
user: company.users[0],
@@ -1131,8 +1169,11 @@ describe('diffing queries against the store', () => {
// Now check with __typename-specific IDs.
check(
- writer.writeQueryToStore({
- dataIdFromObject: defaultDataIdFromObject,
+ new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject: defaultDataIdFromObject,
+ }),
+ }).writeQueryToStore({
query,
result: {
user: company.users[0],
diff --git a/src/cache/inmemory/__tests__/entityStore.ts b/src/cache/inmemory/__tests__/entityStore.ts
new file mode 100644
--- /dev/null
+++ b/src/cache/inmemory/__tests__/entityStore.ts
@@ -0,0 +1,1208 @@
+import gql from 'graphql-tag';
+import { EntityStore, supportsResultCaching } from '../entityStore';
+import { InMemoryCache } from '../inMemoryCache';
+import { DocumentNode, FieldNode } from 'graphql';
+import { getOperationDefinition, getFragmentDefinitions } from '../../../utilities/graphql/getFromAST';
+import { createFragmentMap } from '../../../utilities/graphql/fragments';
+
+describe('EntityStore', () => {
+ it('should support result caching if so configured', () => {
+ const storeWithResultCaching = new EntityStore.Root({
+ resultCaching: true,
+ });
+
+ const storeWithoutResultCaching = new EntityStore.Root({
+ resultCaching: false,
+ });
+
+ expect(supportsResultCaching({ some: "arbitrary object " })).toBe(false);
+ expect(supportsResultCaching(storeWithResultCaching)).toBe(true);
+ expect(supportsResultCaching(storeWithoutResultCaching)).toBe(false);
+
+ const layerWithCaching = storeWithResultCaching.addLayer("with caching", () => {});
+ expect(supportsResultCaching(layerWithCaching)).toBe(true);
+ const anotherLayer = layerWithCaching.addLayer("another layer", () => {});
+ expect(supportsResultCaching(anotherLayer)).toBe(true);
+ expect(
+ anotherLayer
+ .removeLayer("with caching")
+ .removeLayer("another layer")
+ ).toBe(storeWithResultCaching);
+ expect(supportsResultCaching(storeWithResultCaching)).toBe(true);
+
+ const layerWithoutCaching = storeWithoutResultCaching.addLayer("with caching", () => {});
+ expect(supportsResultCaching(layerWithoutCaching)).toBe(false);
+ expect(layerWithoutCaching.removeLayer("with caching")).toBe(storeWithoutResultCaching);
+ expect(supportsResultCaching(storeWithoutResultCaching)).toBe(false);
+ });
+
+ function newBookAuthorCache() {
+ const cache = new InMemoryCache({
+ resultCaching: true,
+ dataIdFromObject(value: any) {
+ switch (value && value.__typename) {
+ case 'Book':
+ return 'Book:' + value.isbn;
+ case 'Author':
+ return 'Author:' + value.name;
+ }
+ },
+ });
+
+ const query = gql`
+ query {
+ book {
+ title
+ author {
+ name
+ }
+ }
+ }
+ `;
+
+ return {
+ cache,
+ query,
+ };
+ }
+
+ it('should reclaim no-longer-reachable, unretained entities', () => {
+ const { cache, query } = newBookAuthorCache();
+
+ cache.writeQuery({
+ query,
+ data: {
+ book: {
+ __typename: 'Book',
+ isbn: '9781451673319',
+ title: 'Fahrenheit 451',
+ author: {
+ __typename: 'Author',
+ name: 'Ray Bradbury',
+ }
+ },
+ },
+ });
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: {
+ __ref: "Book:9781451673319",
+ },
+ },
+ "Book:9781451673319": {
+ __typename: "Book",
+ title: "Fahrenheit 451",
+ author: {
+ __ref: 'Author:Ray Bradbury',
+ }
+ },
+ "Author:Ray Bradbury": {
+ __typename: "Author",
+ name: "Ray Bradbury",
+ },
+ });
+
+ cache.writeQuery({
+ query,
+ data: {
+ book: {
+ __typename: 'Book',
+ isbn: '0312429215',
+ title: '2666',
+ author: {
+ __typename: 'Author',
+ name: 'Roberto Bolaño',
+ },
+ },
+ },
+ });
+
+ const snapshot = cache.extract();
+
+ expect(snapshot).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: {
+ __ref: "Book:0312429215",
+ },
+ },
+ "Book:9781451673319": {
+ __typename: "Book",
+ title: "Fahrenheit 451",
+ author: {
+ __ref: 'Author:Ray Bradbury',
+ }
+ },
+ "Author:Ray Bradbury": {
+ __typename: "Author",
+ name: "Ray Bradbury",
+ },
+ "Book:0312429215": {
+ __typename: "Book",
+ author: {
+ __ref: "Author:Roberto Bolaño",
+ },
+ title: "2666",
+ },
+ "Author:Roberto Bolaño": {
+ __typename: "Author",
+ name: "Roberto Bolaño",
+ },
+ });
+
+ expect(cache.gc().sort()).toEqual([
+ 'Author:Ray Bradbury',
+ 'Book:9781451673319',
+ ]);
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: {
+ __ref: "Book:0312429215",
+ },
+ },
+ "Book:0312429215": {
+ __typename: "Book",
+ author: {
+ __ref: "Author:Roberto Bolaño",
+ },
+ title: "2666",
+ },
+ "Author:Roberto Bolaño": {
+ __typename: "Author",
+ name: "Roberto Bolaño",
+ },
+ });
+
+ // Nothing left to garbage collect.
+ expect(cache.gc()).toEqual([]);
+
+ // Go back to the pre-GC snapshot.
+ cache.restore(snapshot);
+ expect(cache.extract()).toEqual(snapshot);
+
+ // Reading a specific fragment causes it to be retained during garbage collection.
+ const authorNameFragment = gql`
+ fragment AuthorName on Author {
+ name
+ }
+ `;
+ const ray = cache.readFragment({
+ id: 'Author:Ray Bradbury',
+ fragment: authorNameFragment,
+ });
+
+ expect(cache.retain('Author:Ray Bradbury')).toBe(1);
+
+ expect(ray).toEqual({
+ __typename: 'Author',
+ name: 'Ray Bradbury',
+ });
+
+ expect(cache.gc()).toEqual([
+ // Only Fahrenheit 451 (the book) is reclaimed this time.
+ 'Book:9781451673319',
+ ]);
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: {
+ __ref: "Book:0312429215",
+ },
+ },
+ "Author:Ray Bradbury": {
+ __typename: "Author",
+ name: "Ray Bradbury",
+ },
+ "Book:0312429215": {
+ __typename: "Book",
+ author: {
+ __ref: "Author:Roberto Bolaño",
+ },
+ title: "2666",
+ },
+ "Author:Roberto Bolaño": {
+ __typename: "Author",
+ name: "Roberto Bolaño",
+ },
+ });
+
+ expect(cache.gc()).toEqual([]);
+
+ expect(cache.release('Author:Ray Bradbury')).toBe(0);
+
+ expect(cache.gc()).toEqual([
+ 'Author:Ray Bradbury',
+ ]);
+
+ expect(cache.gc()).toEqual([]);
+ });
+
+ it('should respect optimistic updates, when active', () => {
+ const { cache, query } = newBookAuthorCache();
+
+ cache.writeQuery({
+ query,
+ data: {
+ book: {
+ __typename: 'Book',
+ isbn: '9781451673319',
+ title: 'Fahrenheit 451',
+ author: {
+ __typename: 'Author',
+ name: 'Ray Bradbury',
+ }
+ },
+ },
+ });
+
+ expect(cache.gc()).toEqual([]);
+
+ // Orphan the F451 / Ray Bradbury data, but avoid collecting garbage yet.
+ cache.writeQuery({
+ query,
+ data: {
+ book: {
+ __typename: 'Book',
+ isbn: '1980719802',
+ title: '1984',
+ author: {
+ __typename: 'Author',
+ name: 'George Orwell',
+ },
+ }
+ }
+ });
+
+ cache.recordOptimisticTransaction(proxy => {
+ proxy.writeFragment({
+ id: 'Author:Ray Bradbury',
+ fragment: gql`
+ fragment AuthorBooks on Author {
+ books {
+ title
+ }
+ }
+ `,
+ data: {
+ books: [
+ {
+ __typename: 'Book',
+ isbn: '9781451673319',
+ },
+ ],
+ },
+ });
+ }, "ray books");
+
+ expect(cache.extract(true)).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: {
+ __ref: "Book:1980719802",
+ },
+ },
+ "Author:Ray Bradbury": {
+ __typename: "Author",
+ name: "Ray Bradbury",
+ books: [
+ {
+ __ref: "Book:9781451673319",
+ },
+ ],
+ },
+ "Book:9781451673319": {
+ __typename: "Book",
+ title: "Fahrenheit 451",
+ author: {
+ __ref: "Author:Ray Bradbury",
+ },
+ },
+ "Author:George Orwell": {
+ __typename: "Author",
+ name: "George Orwell",
+ },
+ "Book:1980719802": {
+ __typename: "Book",
+ title: "1984",
+ author: {
+ __ref: "Author:George Orwell",
+ },
+ },
+ });
+
+ // Nothing can be reclaimed while the optimistic update is retaining
+ // Fahrenheit 451.
+ expect(cache.gc()).toEqual([]);
+
+ cache.removeOptimistic("ray books");
+
+ expect(cache.extract(true)).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: {
+ __ref: "Book:1980719802",
+ },
+ },
+ "Author:Ray Bradbury": {
+ __typename: "Author",
+ name: "Ray Bradbury",
+ // Note that the optimistic books field has disappeared, as expected.
+ },
+ "Book:9781451673319": {
+ __typename: "Book",
+ title: "Fahrenheit 451",
+ author: {
+ __ref: "Author:Ray Bradbury",
+ },
+ },
+ "Author:George Orwell": {
+ __typename: "Author",
+ name: "George Orwell",
+ },
+ "Book:1980719802": {
+ __typename: "Book",
+ title: "1984",
+ author: {
+ __ref: "Author:George Orwell",
+ },
+ },
+ });
+
+ expect(cache.gc().sort()).toEqual([
+ "Author:Ray Bradbury",
+ "Book:9781451673319",
+ ]);
+
+ expect(cache.extract(true)).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: {
+ __ref: "Book:1980719802",
+ },
+ },
+ "Author:George Orwell": {
+ __typename: "Author",
+ name: "George Orwell",
+ },
+ "Book:1980719802": {
+ __typename: "Book",
+ title: "1984",
+ author: {
+ __ref: "Author:George Orwell",
+ },
+ },
+ });
+
+ expect(cache.gc()).toEqual([]);
+ });
+
+ it('should respect retain/release methods', () => {
+ const { query, cache } = newBookAuthorCache();
+
+ const eagerBookData = {
+ __typename: 'Book',
+ isbn: '1603589082',
+ title: 'Eager',
+ subtitle: 'The Surprising, Secret Life of Beavers and Why They Matter',
+ author: {
+ __typename: 'Author',
+ name: 'Ben Goldfarb',
+ },
+ };
+
+ const spinelessBookData = {
+ __typename: 'Book',
+ isbn: '0735211280',
+ title: 'Spineless',
+ subtitle: 'The Science of Jellyfish and the Art of Growing a Backbone',
+ author: {
+ __typename: 'Author',
+ name: 'Juli Berwald',
+ },
+ };
+
+ cache.writeQuery({
+ query,
+ data: {
+ book: spinelessBookData,
+ },
+ });
+
+ expect(cache.extract(true)).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: {
+ __ref: "Book:0735211280",
+ },
+ },
+ "Book:0735211280": {
+ __typename: "Book",
+ author: {
+ __ref: "Author:Juli Berwald",
+ },
+ title: "Spineless",
+ },
+ "Author:Juli Berwald": {
+ __typename: "Author",
+ name: "Juli Berwald",
+ },
+ });
+
+ cache.writeQuery({
+ query,
+ data: {
+ book: eagerBookData,
+ },
+ });
+
+ const snapshotWithBothBooksAndAuthors = {
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: {
+ __ref: "Book:1603589082",
+ },
+ },
+ "Book:0735211280": {
+ __typename: "Book",
+ author: {
+ __ref: "Author:Juli Berwald",
+ },
+ title: "Spineless",
+ },
+ "Author:Juli Berwald": {
+ __typename: "Author",
+ name: "Juli Berwald",
+ },
+ "Book:1603589082": {
+ __typename: "Book",
+ author: {
+ __ref: "Author:Ben Goldfarb",
+ },
+ title: "Eager",
+ },
+ "Author:Ben Goldfarb": {
+ __typename: "Author",
+ name: "Ben Goldfarb",
+ },
+ };
+
+ expect(cache.extract(true)).toEqual(snapshotWithBothBooksAndAuthors);
+
+ expect(cache.retain("Book:0735211280")).toBe(1);
+
+ expect(cache.gc()).toEqual([]);
+
+ expect(cache.retain("Author:Juli Berwald")).toBe(1);
+
+ cache.recordOptimisticTransaction(proxy => {
+ proxy.writeFragment({
+ id: "Author:Juli Berwald",
+ fragment: gql`
+ fragment AuthorBooks on Author {
+ books {
+ title
+ }
+ }
+ `,
+ data: {
+ books: [
+ {
+ __typename: 'Book',
+ isbn: '0735211280',
+ },
+ ],
+ },
+ });
+ }, "juli books");
+
+ // Retain the Spineless book on the optimistic layer (for the first time)
+ // but release it on the root layer.
+ expect(cache.retain("Book:0735211280", true)).toBe(1);
+ expect(cache.release("Book:0735211280")).toBe(0);
+
+ // The Spineless book is still protected by the reference from author Juli
+ // Berwald's optimistically-added author.books field.
+ expect(cache.gc()).toEqual([]);
+
+ expect(cache.extract(true)).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: {
+ __ref: "Book:1603589082",
+ },
+ },
+ "Book:0735211280": {
+ __typename: "Book",
+ author: {
+ __ref: "Author:Juli Berwald",
+ },
+ title: "Spineless",
+ },
+ "Author:Juli Berwald": {
+ __typename: "Author",
+ name: "Juli Berwald",
+ // Note this extra optimistic field.
+ books: [
+ {
+ __ref: "Book:0735211280",
+ },
+ ],
+ },
+ "Book:1603589082": {
+ __typename: "Book",
+ author: {
+ __ref: "Author:Ben Goldfarb",
+ },
+ title: "Eager",
+ },
+ "Author:Ben Goldfarb": {
+ __typename: "Author",
+ name: "Ben Goldfarb",
+ },
+ });
+
+ // A non-optimistic snapshot will not have the extra books field.
+ expect(cache.extract(false)).toEqual(snapshotWithBothBooksAndAuthors);
+
+ cache.removeOptimistic("juli books");
+
+ // The optimistic books field is gone now that we've removed the optimistic
+ // layer that added it.
+ expect(cache.extract(true)).toEqual(snapshotWithBothBooksAndAuthors);
+
+ // The Spineless book is no longer retained or kept alive by any other root
+ // IDs, so it can finally be collected.
+ expect(cache.gc()).toEqual([
+ "Book:0735211280",
+ ]);
+
+ expect(cache.release("Author:Juli Berwald")).toBe(0);
+
+ // Now that Juli Berwald's author entity is no longer retained, garbage
+ // collection cometh for her. Look out, Juli!
+ expect(cache.gc()).toEqual([
+ "Author:Juli Berwald",
+ ]);
+
+ expect(cache.gc()).toEqual([]);
+ });
+
+ it('allows cache eviction', () => {
+ const { cache, query } = newBookAuthorCache();
+
+ const cuckoosCallingBook = {
+ __typename: "Book",
+ isbn: "031648637X",
+ title: "The Cuckoo's Calling",
+ author: {
+ __typename: "Author",
+ name: "Robert Galbraith",
+ },
+ };
+
+ expect(cache.identify(cuckoosCallingBook)).toBe("Book:031648637X");
+
+ cache.writeQuery({
+ query,
+ data: {
+ book: cuckoosCallingBook,
+ },
+ });
+
+ expect(cache.evict("Author:J.K. Rowling")).toBe(false);
+
+ const bookAuthorFragment = gql`
+ fragment BookAuthor on Book {
+ author {
+ name
+ }
+ }
+ `;
+
+ const fragmentResult = cache.readFragment({
+ id: cache.identify(cuckoosCallingBook),
+ fragment: bookAuthorFragment,
+ });
+
+ expect(fragmentResult).toEqual({
+ __typename: "Book",
+ author: {
+ __typename: "Author",
+ name: "Robert Galbraith",
+ },
+ });
+
+ cache.recordOptimisticTransaction(proxy => {
+ proxy.writeFragment({
+ id: cache.identify(cuckoosCallingBook),
+ fragment: bookAuthorFragment,
+ data: {
+ ...fragmentResult,
+ author: {
+ __typename: "Author",
+ name: "J.K. Rowling",
+ },
+ },
+ });
+ }, "real name");
+
+ const snapshotWithBothNames = {
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: {
+ __ref: "Book:031648637X",
+ },
+ },
+ "Book:031648637X": {
+ __typename: "Book",
+ author: {
+ __ref: "Author:J.K. Rowling",
+ },
+ title: "The Cuckoo's Calling",
+ },
+ "Author:Robert Galbraith": {
+ __typename: "Author",
+ name: "Robert Galbraith",
+ },
+ "Author:J.K. Rowling": {
+ __typename: "Author",
+ name: "J.K. Rowling",
+ },
+ };
+
+ expect(cache.extract(true)).toEqual(snapshotWithBothNames);
+
+ expect(cache.gc()).toEqual([]);
+
+ expect(cache.retain('Author:Robert Galbraith')).toBe(1);
+
+ expect(cache.gc()).toEqual([]);
+
+ expect(cache.evict("Author:Robert Galbraith")).toBe(true);
+
+ expect(cache.gc()).toEqual([]);
+
+ cache.removeOptimistic("real name");
+
+ expect(cache.extract(true)).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: {
+ __ref: "Book:031648637X",
+ },
+ },
+ "Book:031648637X": {
+ __typename: "Book",
+ author: {
+ __ref: "Author:Robert Galbraith",
+ },
+ title: "The Cuckoo's Calling",
+ },
+ "Author:Robert Galbraith": {
+ __typename: "Author",
+ name: "Robert Galbraith",
+ },
+ });
+
+ cache.writeFragment({
+ id: cache.identify(cuckoosCallingBook),
+ fragment: bookAuthorFragment,
+ data: {
+ ...fragmentResult,
+ author: {
+ __typename: "Author",
+ name: "J.K. Rowling",
+ },
+ },
+ });
+
+ expect(cache.extract(true)).toEqual(snapshotWithBothNames);
+
+ expect(cache.retain("Author:Robert Galbraith")).toBe(2);
+
+ expect(cache.gc()).toEqual([]);
+
+ expect(cache.release("Author:Robert Galbraith")).toBe(1);
+ expect(cache.release("Author:Robert Galbraith")).toBe(0);
+
+ expect(cache.gc()).toEqual([
+ "Author:Robert Galbraith",
+ ]);
+
+ // If you're ever tempted to do this, you probably want to use cache.clear()
+ // instead, but evicting the ROOT_QUERY should work at least.
+ expect(cache.evict("ROOT_QUERY")).toBe(true);
+
+ expect(cache.extract(true)).toEqual({
+ "Book:031648637X": {
+ __typename: "Book",
+ author: {
+ __ref: "Author:J.K. Rowling",
+ },
+ title: "The Cuckoo's Calling",
+ },
+ "Author:J.K. Rowling": {
+ __typename: "Author",
+ name: "J.K. Rowling",
+ },
+ });
+
+ const ccId = cache.identify(cuckoosCallingBook);
+ expect(cache.retain(ccId)).toBe(2);
+ expect(cache.release(ccId)).toBe(1);
+ expect(cache.release(ccId)).toBe(0);
+
+ expect(cache.gc().sort()).toEqual([
+ "Author:J.K. Rowling",
+ ccId,
+ ]);
+ });
+
+ it("allows evicting specific fields", () => {
+ const query: DocumentNode = gql`
+ query {
+ authorOfBook(isbn: $isbn) {
+ name
+ hobby
+ }
+ publisherOfBook(isbn: $isbn) {
+ name
+ yearOfFounding
+ }
+ }
+ `;
+
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Query: {
+ fields: {
+ authorOfBook: {
+ keyArgs: ["isbn"],
+ },
+ },
+ },
+ Author: {
+ keyFields: ["name"],
+ },
+ Publisher: {
+ keyFields: ["name"],
+ },
+ },
+ });
+
+ const TedChiangData = {
+ __typename: "Author",
+ name: "Ted Chiang",
+ hobby: "video games",
+ };
+
+ const KnopfData = {
+ __typename: "Publisher",
+ name: "Alfred A. Knopf",
+ yearOfFounding: 1915,
+ };
+
+ cache.writeQuery({
+ query,
+ data: {
+ authorOfBook: TedChiangData,
+ publisherOfBook: KnopfData,
+ },
+ variables: {
+ isbn: "1529014514",
+ },
+ });
+
+ const justTedRootQueryData = {
+ __typename: "Query",
+ 'authorOfBook:{"isbn":"1529014514"}': {
+ __ref: 'Author:{"name":"Ted Chiang"}',
+ },
+ // This storeFieldName format differs slightly from that of
+ // authorOfBook because we did not define keyArgs for the
+ // publisherOfBook field, so the legacy storeKeyNameFromField
+ // function was used instead.
+ 'publisherOfBook({"isbn":"1529014514"})': {
+ __ref: 'Publisher:{"name":"Alfred A. Knopf"}',
+ },
+ };
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: justTedRootQueryData,
+ 'Author:{"name":"Ted Chiang"}': TedChiangData,
+ 'Publisher:{"name":"Alfred A. Knopf"}': KnopfData,
+ });
+
+ const JennyOdellData = {
+ __typename: "Author",
+ name: "Jenny Odell",
+ hobby: "birding",
+ };
+
+ const MelvilleData = {
+ __typename: "Publisher",
+ name: "Melville House",
+ yearOfFounding: 2001,
+ };
+
+ cache.writeQuery({
+ query,
+ data: {
+ authorOfBook: JennyOdellData,
+ publisherOfBook: MelvilleData,
+ },
+ variables: {
+ isbn: "1760641790",
+ },
+ });
+
+ const justJennyRootQueryData = {
+ __typename: "Query",
+ 'authorOfBook:{"isbn":"1760641790"}': {
+ __ref: 'Author:{"name":"Jenny Odell"}',
+ },
+ 'publisherOfBook({"isbn":"1760641790"})': {
+ __ref: 'Publisher:{"name":"Melville House"}',
+ },
+ };
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ ...justTedRootQueryData,
+ ...justJennyRootQueryData,
+ },
+ 'Author:{"name":"Ted Chiang"}': TedChiangData,
+ 'Publisher:{"name":"Alfred A. Knopf"}': KnopfData,
+ 'Author:{"name":"Jenny Odell"}': JennyOdellData,
+ 'Publisher:{"name":"Melville House"}': MelvilleData,
+ });
+
+ const fullTedResult = cache.readQuery<any>({
+ query,
+ variables: {
+ isbn: "1529014514",
+ },
+ });
+
+ expect(fullTedResult).toEqual({
+ authorOfBook: TedChiangData,
+ publisherOfBook: KnopfData,
+ });
+
+ const fullJennyResult = cache.readQuery<any>({
+ query,
+ variables: {
+ isbn: "1760641790",
+ },
+ });
+
+ expect(fullJennyResult).toEqual({
+ authorOfBook: JennyOdellData,
+ publisherOfBook: MelvilleData,
+ });
+
+ cache.evict(
+ cache.identify({
+ __typename: "Publisher",
+ name: "Alfred A. Knopf",
+ }),
+ "yearOfFounding",
+ );
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ ...justTedRootQueryData,
+ ...justJennyRootQueryData,
+ },
+ 'Author:{"name":"Ted Chiang"}': TedChiangData,
+ 'Publisher:{"name":"Alfred A. Knopf"}': {
+ __typename: "Publisher",
+ name: "Alfred A. Knopf",
+ // yearOfFounding has been removed
+ },
+ 'Author:{"name":"Jenny Odell"}': JennyOdellData,
+ 'Publisher:{"name":"Melville House"}': MelvilleData,
+ });
+
+ // Nothing to garbage collect yet.
+ expect(cache.gc()).toEqual([]);
+
+ cache.evict(cache.identify({
+ __typename: "Publisher",
+ name: "Melville House",
+ }));
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ ...justTedRootQueryData,
+ ...justJennyRootQueryData,
+ },
+ 'Author:{"name":"Ted Chiang"}': TedChiangData,
+ 'Publisher:{"name":"Alfred A. Knopf"}': {
+ __typename: "Publisher",
+ name: "Alfred A. Knopf",
+ },
+ 'Author:{"name":"Jenny Odell"}': JennyOdellData,
+ // Melville House has been removed
+ });
+
+ cache.evict("ROOT_QUERY", "publisherOfBook");
+
+ function withoutPublisherOfBook(obj: object) {
+ const clean = { ...obj };
+ Object.keys(obj).forEach(key => {
+ if (key.startsWith("publisherOfBook")) {
+ delete clean[key];
+ }
+ });
+ return clean;
+ }
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ ...withoutPublisherOfBook(justTedRootQueryData),
+ ...withoutPublisherOfBook(justJennyRootQueryData),
+ },
+ 'Author:{"name":"Ted Chiang"}': TedChiangData,
+ 'Publisher:{"name":"Alfred A. Knopf"}': {
+ __typename: "Publisher",
+ name: "Alfred A. Knopf",
+ },
+ 'Author:{"name":"Jenny Odell"}': JennyOdellData,
+ });
+
+ expect(cache.gc()).toEqual([
+ 'Publisher:{"name":"Alfred A. Knopf"}',
+ ]);
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ ...withoutPublisherOfBook(justTedRootQueryData),
+ ...withoutPublisherOfBook(justJennyRootQueryData),
+ },
+ 'Author:{"name":"Ted Chiang"}': TedChiangData,
+ 'Author:{"name":"Jenny Odell"}': JennyOdellData,
+ });
+
+ const partialTedResult = cache.diff<any>({
+ query,
+ returnPartialData: true,
+ optimistic: false, // required but not important
+ variables: {
+ isbn: "1529014514",
+ },
+ });
+ expect(partialTedResult.complete).toBe(false);
+ expect(partialTedResult.result).toEqual({
+ authorOfBook: TedChiangData,
+ });
+ // The result caching system preserves the referential identity of
+ // unchanged nested result objects.
+ expect(
+ partialTedResult.result.authorOfBook,
+ ).toBe(fullTedResult.authorOfBook);
+
+ const partialJennyResult = cache.diff<any>({
+ query,
+ returnPartialData: true,
+ optimistic: true, // required but not important
+ variables: {
+ isbn: "1760641790",
+ },
+ });
+ expect(partialJennyResult.complete).toBe(false);
+ expect(partialJennyResult.result).toEqual({
+ authorOfBook: JennyOdellData,
+ });
+ // The result caching system preserves the referential identity of
+ // unchanged nested result objects.
+ expect(
+ partialJennyResult.result.authorOfBook,
+ ).toBe(fullJennyResult.authorOfBook);
+
+ const tedWithoutHobby = {
+ __typename: "Author",
+ name: "Ted Chiang",
+ };
+
+ cache.evict(
+ cache.identify(tedWithoutHobby),
+ "hobby",
+ );
+
+ expect(cache.diff<any>({
+ query,
+ returnPartialData: true,
+ optimistic: false, // required but not important
+ variables: {
+ isbn: "1529014514",
+ },
+ })).toEqual({
+ complete: false,
+ result: {
+ authorOfBook: tedWithoutHobby,
+ },
+ });
+
+ cache.evict("ROOT_QUERY", "authorOfBook");
+ expect(cache.gc().sort()).toEqual([
+ 'Author:{"name":"Jenny Odell"}',
+ 'Author:{"name":"Ted Chiang"}',
+ ]);
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ // Everything else has been removed.
+ __typename: "Query",
+ },
+ });
+ });
+
+ it("supports cache.identify(object)", () => {
+ const queryWithAliases: DocumentNode = gql`
+ query {
+ abcs {
+ first: a
+ second: b
+ ...Rest
+ }
+ }
+ fragment Rest on ABCs {
+ third: c
+ }
+ `;
+
+ const queryWithoutAliases: DocumentNode = gql`
+ query {
+ abcs {
+ a
+ b
+ ...Rest
+ }
+ }
+ fragment Rest on ABCs {
+ c
+ }
+ `;
+
+ const cache = new InMemoryCache({
+ typePolicies: {
+ ABCs: {
+ keyFields: ["b", "a", "c"],
+ },
+ },
+ });
+
+ const ABCs = {
+ __typename: "ABCs",
+ first: "ay",
+ second: "bee",
+ third: "see",
+ };
+
+ cache.writeQuery({
+ query: queryWithAliases,
+ data: {
+ abcs: ABCs,
+ },
+ });
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ abcs: {
+ __ref: 'ABCs:{"b":"bee","a":"ay","c":"see"}',
+ },
+ },
+ 'ABCs:{"b":"bee","a":"ay","c":"see"}': {
+ __typename: "ABCs",
+ a: "ay",
+ b: "bee",
+ c: "see",
+ },
+ });
+
+ const resultWithAliases = cache.readQuery({
+ query: queryWithAliases,
+ });
+
+ expect(resultWithAliases).toEqual({ abcs: ABCs });
+
+ const resultWithoutAliases = cache.readQuery({
+ query: queryWithoutAliases,
+ });
+
+ expect(resultWithoutAliases).toEqual({
+ abcs: {
+ __typename: "ABCs",
+ a: "ay",
+ b: "bee",
+ c: "see",
+ },
+ });
+
+ expect(cache.identify({
+ __typename: "ABCs",
+ a: 1,
+ b: 2,
+ c: 3,
+ })).toBe('ABCs:{"b":2,"a":1,"c":3}');
+
+ expect(() => cache.identify(ABCs)).toThrow(
+ "Missing field b while computing key fields",
+ );
+
+ expect(cache.readFragment({
+ id: cache.identify({
+ __typename: "ABCs",
+ a: "ay",
+ b: "bee",
+ c: "see",
+ }),
+ fragment: gql`
+ fragment JustB on ABCs {
+ b
+ }
+ `,
+ })).toEqual({
+ __typename: "ABCs",
+ b: "bee",
+ });
+
+ expect(cache.readQuery({
+ query: queryWithAliases,
+ })).toBe(resultWithAliases);
+
+ expect(cache.readQuery({
+ query: queryWithoutAliases,
+ })).toBe(resultWithoutAliases);
+
+ cache.evict(cache.identify({
+ __typename: "ABCs",
+ a: "ay",
+ b: "bee",
+ c: "see",
+ }));
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ abcs: {
+ __ref: 'ABCs:{"b":"bee","a":"ay","c":"see"}',
+ },
+ },
+ });
+
+ expect(() => cache.readQuery({
+ query: queryWithAliases,
+ })).toThrow(/Can't find field a on object/);
+
+ expect(() => cache.readQuery({
+ query: queryWithoutAliases,
+ })).toThrow(/Can't find field a on object/);
+ });
+});
diff --git a/src/cache/inmemory/__tests__/fragmentMatcher.ts b/src/cache/inmemory/__tests__/fragmentMatcher.ts
new file mode 100644
--- /dev/null
+++ b/src/cache/inmemory/__tests__/fragmentMatcher.ts
@@ -0,0 +1,225 @@
+import gql from 'graphql-tag';
+
+import { InMemoryCache } from '../inMemoryCache';
+
+describe('fragment matching', () => {
+ it('can match exact types with or without possibleTypes', () => {
+ const cacheWithoutPossibleTypes = new InMemoryCache({
+ addTypename: true,
+ });
+
+ const cacheWithPossibleTypes = new InMemoryCache({
+ addTypename: true,
+ possibleTypes: {
+ Animal: ['Cat', 'Dog'],
+ },
+ });
+
+ const query = gql`
+ query AnimalNames {
+ animals {
+ id
+ name
+ ...CatDetails
+ }
+ }
+ fragment CatDetails on Cat {
+ livesLeft
+ killsToday
+ }
+ `;
+
+ const data = {
+ animals: [
+ {
+ __typename: 'Cat',
+ id: 1,
+ name: 'Felix',
+ livesLeft: 8,
+ killsToday: 2,
+ },
+ {
+ __typename: 'Dog',
+ id: 2,
+ name: 'Baxter',
+ },
+ ],
+ };
+
+ cacheWithoutPossibleTypes.writeQuery({ query, data });
+ expect(cacheWithoutPossibleTypes.readQuery({ query })).toEqual(data);
+
+ cacheWithPossibleTypes.writeQuery({ query, data });
+ expect(cacheWithPossibleTypes.readQuery({ query })).toEqual(data);
+ });
+
+ it('can match interface subtypes', () => {
+ const cache = new InMemoryCache({
+ addTypename: true,
+ possibleTypes: {
+ Animal: ['Cat', 'Dog'],
+ },
+ });
+
+ const query = gql`
+ query BestFriend {
+ bestFriend {
+ id
+ ...AnimalName
+ }
+ }
+ fragment AnimalName on Animal {
+ name
+ }
+ `;
+
+ const data = {
+ bestFriend: {
+ __typename: 'Dog',
+ id: 2,
+ name: 'Beckett',
+ },
+ };
+
+ cache.writeQuery({ query, data });
+ expect(cache.readQuery({ query })).toEqual(data);
+ });
+
+ it('can match union member types', () => {
+ const cache = new InMemoryCache({
+ addTypename: true,
+ possibleTypes: {
+ Status: ['PASSING', 'FAILING', 'SKIPPED'],
+ },
+ });
+
+ const query = gql`
+ query {
+ testResults {
+ id
+ output {
+ ... on Status {
+ stdout
+ }
+ ... on FAILING {
+ stderr
+ }
+ }
+ }
+ }
+ `;
+
+ const data = {
+ testResults: [
+ {
+ __typename: 'TestResult',
+ id: 123,
+ output: {
+ __typename: 'PASSING',
+ stdout: 'ok!',
+ },
+ },
+ {
+ __typename: 'TestResult',
+ id: 456,
+ output: {
+ __typename: 'FAILING',
+ stdout: '',
+ stderr: 'oh no',
+ },
+ },
+ ],
+ };
+
+ cache.writeQuery({ query, data });
+ expect(cache.readQuery({ query })).toEqual(data);
+ });
+
+ it('can match indirect subtypes while avoiding cycles', () => {
+ const cache = new InMemoryCache({
+ addTypename: true,
+ possibleTypes: {
+ Animal: ['Animal', 'Bug', 'Mammal'],
+ Bug: ['Ant', 'Spider', 'RolyPoly'],
+ Mammal: ['Dog', 'Cat', 'Human'],
+ Cat: ['Calico', 'Siamese', 'Sphynx', 'Tabby'],
+ },
+ });
+
+ const query = gql`
+ query {
+ animals {
+ ... on Mammal {
+ hasFur
+ bodyTemperature
+ }
+ ... on Bug {
+ isVenomous
+ }
+ }
+ }
+ `;
+
+ const data = {
+ animals: [
+ {
+ __typename: 'Sphynx',
+ hasFur: false,
+ bodyTemperature: 99,
+ },
+ {
+ __typename: 'Dog',
+ hasFur: true,
+ bodyTemperature: 102,
+ },
+ {
+ __typename: 'Spider',
+ isVenomous: 'maybe',
+ },
+ ],
+ };
+
+ cache.writeQuery({ query, data });
+ expect(cache.readQuery({ query })).toEqual(data);
+ });
+
+ it('can match against the root Query', () => {
+ const cache = new InMemoryCache({
+ addTypename: true,
+ });
+
+ const query = gql`
+ query AllPeople {
+ people {
+ id
+ name
+ }
+ ...PeopleTypes
+ }
+ fragment PeopleTypes on Query {
+ __type(name: "Person") {
+ name
+ kind
+ }
+ }
+ `;
+
+ const data = {
+ people: [
+ {
+ __typename: 'Person',
+ id: 123,
+ name: 'Ben',
+ },
+ ],
+ __type: {
+ __typename: '__Type',
+ name: 'Person',
+ kind: 'OBJECT',
+ },
+ };
+
+ cache.writeQuery({ query, data });
+ expect(cache.readQuery({ query })).toEqual(data);
+ });
+});
diff --git a/src/cache/inmemory/__tests__/optimistic.ts b/src/cache/inmemory/__tests__/optimistic.ts
new file mode 100644
--- /dev/null
+++ b/src/cache/inmemory/__tests__/optimistic.ts
@@ -0,0 +1,448 @@
+import gql from 'graphql-tag';
+
+import { InMemoryCache } from '../inMemoryCache';
+
+describe('optimistic cache layers', () => {
+ it('return === results for repeated reads', () => {
+ const cache = new InMemoryCache({
+ resultCaching: true,
+ dataIdFromObject(value: any) {
+ switch (value && value.__typename) {
+ case 'Book':
+ return 'Book:' + value.isbn;
+ case 'Author':
+ return 'Author:' + value.name;
+ }
+ },
+ });
+
+ const query = gql`
+ {
+ book {
+ title
+ author {
+ name
+ }
+ }
+ }
+ `;
+
+ function readOptimistic(cache: InMemoryCache) {
+ return cache.readQuery<{ book: any }>({ query }, true);
+ }
+
+ function readRealistic(cache: InMemoryCache) {
+ return cache.readQuery<{ book: any }>({ query }, false);
+ }
+
+ cache.writeQuery({
+ query,
+ data: {
+ book: {
+ __typename: 'Book',
+ isbn: '1980719802',
+ title: '1984',
+ author: {
+ __typename: 'Author',
+ name: 'George Orwell',
+ },
+ },
+ },
+ });
+
+ const result1984 = readOptimistic(cache);
+ expect(result1984).toEqual({
+ book: {
+ __typename: 'Book',
+ title: '1984',
+ author: {
+ __typename: 'Author',
+ name: 'George Orwell',
+ },
+ },
+ });
+
+ expect(result1984).toBe(readOptimistic(cache));
+ expect(result1984).toBe(readRealistic(cache));
+
+ let result2666InTransaction: ReturnType<typeof readOptimistic>;
+ cache.performTransaction(proxy => {
+ expect(readOptimistic(cache)).toEqual(result1984);
+
+ proxy.writeQuery({
+ query,
+ data: {
+ book: {
+ __typename: 'Book',
+ isbn: '0312429215',
+ title: '2666',
+ author: {
+ __typename: 'Author',
+ name: 'Roberto Bolaño',
+ },
+ },
+ },
+ });
+
+ result2666InTransaction = readOptimistic(proxy);
+ expect(result2666InTransaction).toEqual({
+ book: {
+ __typename: 'Book',
+ title: '2666',
+ author: {
+ __typename: 'Author',
+ name: 'Roberto Bolaño',
+ },
+ },
+ });
+ }, 'first');
+
+ expect(readOptimistic(cache)).toBe(result2666InTransaction);
+
+ expect(result1984).toBe(readRealistic(cache));
+
+ let resultCatch22: ReturnType<typeof readOptimistic>;
+ cache.performTransaction(proxy => {
+ proxy.writeQuery({
+ query,
+ data: {
+ book: {
+ __typename: 'Book',
+ isbn: '1451626657',
+ title: 'Catch-22',
+ author: {
+ __typename: 'Author',
+ name: 'Joseph Heller',
+ },
+ },
+ },
+ });
+
+ expect((resultCatch22 = readOptimistic(proxy))).toEqual({
+ book: {
+ __typename: 'Book',
+ title: 'Catch-22',
+ author: {
+ __typename: 'Author',
+ name: 'Joseph Heller',
+ },
+ },
+ });
+ }, 'second');
+
+ expect(readOptimistic(cache)).toBe(resultCatch22);
+
+ expect(result1984).toBe(readRealistic(cache));
+
+ cache.removeOptimistic('first');
+
+ expect(readOptimistic(cache)).toBe(resultCatch22);
+
+ // Write a new book to the root Query.book field, which should not affect
+ // the 'second' optimistic layer that is still applied.
+ cache.writeQuery({
+ query,
+ data: {
+ book: {
+ __typename: 'Book',
+ isbn: '9781451673319',
+ title: 'Fahrenheit 451',
+ author: {
+ __typename: 'Author',
+ name: 'Ray Bradbury',
+ },
+ },
+ },
+ });
+
+ expect(readOptimistic(cache)).toBe(resultCatch22);
+
+ const resultF451 = readRealistic(cache);
+ expect(resultF451).toEqual({
+ book: {
+ __typename: 'Book',
+ title: 'Fahrenheit 451',
+ author: {
+ __typename: 'Author',
+ name: 'Ray Bradbury',
+ },
+ },
+ });
+
+ cache.removeOptimistic('second');
+
+ expect(resultF451).toBe(readRealistic(cache));
+ expect(resultF451).toBe(readOptimistic(cache));
+
+ expect(cache.extract(true)).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: { __ref: 'Book:9781451673319' },
+ },
+ 'Book:1980719802': {
+ title: '1984',
+ author: { __ref: 'Author:George Orwell' },
+ __typename: 'Book',
+ },
+ 'Book:9781451673319': {
+ title: 'Fahrenheit 451',
+ author: { __ref: 'Author:Ray Bradbury' },
+ __typename: 'Book',
+ },
+ 'Author:George Orwell': {
+ __typename: 'Author',
+ name: 'George Orwell',
+ },
+ 'Author:Ray Bradbury': {
+ __typename: 'Author',
+ name: 'Ray Bradbury',
+ },
+ });
+ });
+
+ it('dirties appropriate IDs when optimistic layers are removed', () => {
+ const cache = new InMemoryCache({
+ resultCaching: true,
+ dataIdFromObject(value: any) {
+ switch (value && value.__typename) {
+ case 'Book':
+ return 'Book:' + value.isbn;
+ case 'Author':
+ return 'Author:' + value.name;
+ }
+ },
+ });
+
+ type Q = {
+ books: any[];
+ };
+
+ const query = gql`
+ {
+ books {
+ title
+ subtitle
+ }
+ }
+ `;
+
+ const eagerBookData = {
+ __typename: 'Book',
+ isbn: '1603589082',
+ title: 'Eager',
+ subtitle: 'The Surprising, Secret Life of Beavers and Why They Matter',
+ author: {
+ __typename: 'Author',
+ name: 'Ben Goldfarb',
+ },
+ };
+
+ const spinelessBookData = {
+ __typename: 'Book',
+ isbn: '0735211280',
+ title: 'Spineless',
+ subtitle: 'The Science of Jellyfish and the Art of Growing a Backbone',
+ author: {
+ __typename: 'Author',
+ name: 'Juli Berwald',
+ },
+ };
+
+ cache.writeQuery({
+ query,
+ data: {
+ books: [eagerBookData, spinelessBookData],
+ },
+ });
+
+ expect(cache.extract(true)).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ books: [{ __ref: 'Book:1603589082' }, { __ref: 'Book:0735211280' }],
+ },
+ 'Book:1603589082': {
+ title: 'Eager',
+ subtitle: eagerBookData.subtitle,
+ __typename: 'Book',
+ },
+ 'Book:0735211280': {
+ title: 'Spineless',
+ subtitle: spinelessBookData.subtitle,
+ __typename: 'Book',
+ },
+ });
+
+ function read() {
+ return cache.readQuery<Q>({ query }, true);
+ }
+
+ const result = read();
+ expect(result).toEqual({
+ books: [
+ {
+ __typename: 'Book',
+ title: 'Eager',
+ subtitle: 'The Surprising, Secret Life of Beavers and Why They Matter',
+ },
+ {
+ __typename: 'Book',
+ title: 'Spineless',
+ subtitle: 'The Science of Jellyfish and the Art of Growing a Backbone',
+ },
+ ],
+ });
+ expect(read()).toBe(result);
+
+ const bookAuthorNameFragment = gql`
+ fragment BookAuthorName on Book {
+ author {
+ name
+ }
+ }
+ `;
+
+ cache.writeFragment({
+ id: 'Book:0735211280',
+ fragment: bookAuthorNameFragment,
+ data: {
+ author: spinelessBookData.author,
+ },
+ });
+
+ // Adding an author doesn't change the structure of the original result,
+ // because the original query did not ask for author information.
+ const resultWithSpinlessAuthor = read();
+ expect(resultWithSpinlessAuthor).toEqual(result);
+ expect(resultWithSpinlessAuthor).toBe(result);
+ expect(resultWithSpinlessAuthor.books[0]).toBe(result.books[0]);
+ expect(resultWithSpinlessAuthor.books[1]).toBe(result.books[1]);
+
+ cache.recordOptimisticTransaction(proxy => {
+ proxy.writeFragment({
+ id: 'Book:1603589082',
+ fragment: bookAuthorNameFragment,
+ data: {
+ author: eagerBookData.author,
+ },
+ });
+ }, 'eager author');
+
+ expect(read()).toEqual(result);
+
+ const queryWithAuthors = gql`
+ {
+ books {
+ title
+ subtitle
+ author {
+ name
+ }
+ }
+ }
+ `;
+
+ function readWithAuthors(optimistic = true) {
+ return cache.readQuery<Q>({
+ query: queryWithAuthors,
+ }, optimistic);
+ }
+
+ function withoutISBN(data: any) {
+ return JSON.parse(JSON.stringify(
+ data,
+ (key, value) => {
+ if (key === 'isbn') return;
+ return value;
+ },
+ ));
+ }
+
+ const resultWithTwoAuthors = readWithAuthors();
+ expect(resultWithTwoAuthors).toEqual({
+ books: [
+ withoutISBN(eagerBookData),
+ withoutISBN(spinelessBookData),
+ ],
+ });
+
+ const buzzBookData = {
+ __typename: 'Book',
+ isbn: '0465052614',
+ title: 'Buzz',
+ subtitle: 'The Nature and Necessity of Bees',
+ author: {
+ __typename: 'Author',
+ name: 'Thor Hanson',
+ },
+ };
+
+ cache.recordOptimisticTransaction(proxy => {
+ proxy.writeQuery({
+ query: queryWithAuthors,
+ data: {
+ books: [
+ eagerBookData,
+ spinelessBookData,
+ buzzBookData,
+ ],
+ },
+ });
+ }, 'buzz book');
+
+ const resultWithBuzz = readWithAuthors();
+
+ expect(resultWithBuzz).toEqual({
+ books: [
+ withoutISBN(eagerBookData),
+ withoutISBN(spinelessBookData),
+ withoutISBN(buzzBookData),
+ ],
+ });
+ expect(resultWithBuzz.books[0]).toEqual(resultWithTwoAuthors.books[0]);
+ expect(resultWithBuzz.books[1]).toEqual(resultWithTwoAuthors.books[1]);
+
+ // Before removing the Buzz optimistic layer from the cache, write the same
+ // data to the root layer of the cache.
+ cache.writeQuery({
+ query: queryWithAuthors,
+ data: {
+ books: [eagerBookData, spinelessBookData, buzzBookData],
+ },
+ });
+
+ expect(readWithAuthors()).toBe(resultWithBuzz);
+
+ function readSpinelessFragment() {
+ return cache.readFragment<{ author: any }>(
+ {
+ id: 'Book:' + spinelessBookData.isbn,
+ fragment: bookAuthorNameFragment,
+ },
+ true,
+ );
+ }
+
+ const spinelessBeforeRemovingBuzz = readSpinelessFragment();
+ cache.removeOptimistic('buzz book');
+ const spinelessAfterRemovingBuzz = readSpinelessFragment();
+ expect(spinelessBeforeRemovingBuzz).toEqual(spinelessAfterRemovingBuzz);
+ expect(spinelessBeforeRemovingBuzz).not.toBe(spinelessAfterRemovingBuzz);
+ expect(spinelessBeforeRemovingBuzz.author).toBe(
+ spinelessAfterRemovingBuzz.author,
+ );
+
+ const resultAfterRemovingBuzzLayer = readWithAuthors();
+ expect(resultAfterRemovingBuzzLayer).toEqual(resultWithBuzz);
+ expect(resultAfterRemovingBuzzLayer).not.toBe(resultWithBuzz);
+ resultWithTwoAuthors.books.forEach((book, i) => {
+ expect(book).toEqual(resultAfterRemovingBuzzLayer.books[i]);
+ expect(book).toBe(resultAfterRemovingBuzzLayer.books[i]);
+ });
+
+ const nonOptimisticResult = readWithAuthors(false);
+ expect(nonOptimisticResult).toEqual(resultWithBuzz);
+ cache.removeOptimistic('eager author');
+ const resultWithoutOptimisticLayers = readWithAuthors();
+ expect(resultWithoutOptimisticLayers).toBe(nonOptimisticResult);
+ });
+});
diff --git a/src/cache/inmemory/__tests__/policies.ts b/src/cache/inmemory/__tests__/policies.ts
new file mode 100644
--- /dev/null
+++ b/src/cache/inmemory/__tests__/policies.ts
@@ -0,0 +1,2525 @@
+import gql from "graphql-tag";
+import { InMemoryCache } from "../inMemoryCache";
+import { StoreValue } from "../../../utilities";
+import { FieldPolicy, Policies } from "../policies";
+import { Reference } from "../../../utilities/graphql/storeUtils";
+
+function reverse(s: string) {
+ return s.split("").reverse().join("");
+}
+
+describe("type policies", function () {
+ const bookQuery = gql`
+ query {
+ book {
+ title
+ author {
+ name
+ }
+ }
+ }
+ `;
+
+ const theInformationBookData = {
+ __typename: "Book",
+ isbn: "1400096235",
+ title: "The Information",
+ subtitle: "A History, a Theory, a Flood",
+ author: {
+ name: "James Gleick"
+ },
+ };
+
+ function checkAuthorName(cache: InMemoryCache) {
+ expect(cache.readQuery({
+ query: gql`
+ query {
+ book {
+ author {
+ name
+ }
+ }
+ }
+ `,
+ })).toEqual({
+ book: {
+ __typename: "Book",
+ author: {
+ name: theInformationBookData.author.name,
+ },
+ },
+ });
+ }
+
+ it("can specify basic keyFields", function () {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Book: {
+ keyFields: ["isbn"],
+ },
+ },
+ });
+
+ cache.writeQuery({
+ query: bookQuery,
+ data: {
+ book: theInformationBookData,
+ },
+ });
+
+ expect(cache.extract(true)).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: {
+ __ref: 'Book:{"isbn":"1400096235"}',
+ },
+ },
+ 'Book:{"isbn":"1400096235"}': {
+ __typename: "Book",
+ title: "The Information",
+ author: {
+ name: "James Gleick"
+ },
+ },
+ });
+
+ checkAuthorName(cache);
+ });
+
+ it("can specify composite keyFields", function () {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Book: {
+ keyFields: ["title", "author", ["name"]],
+ },
+ },
+ });
+
+ cache.writeQuery({
+ query: bookQuery,
+ data: {
+ book: theInformationBookData,
+ },
+ });
+
+ expect(cache.extract(true)).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: {
+ __ref: 'Book:{"title":"The Information","author":{"name":"James Gleick"}}',
+ },
+ },
+ 'Book:{"title":"The Information","author":{"name":"James Gleick"}}': {
+ __typename: "Book",
+ title: "The Information",
+ author: {
+ name: "James Gleick"
+ },
+ },
+ });
+
+ checkAuthorName(cache);
+ });
+
+ it("keeps keyFields in specified order", function () {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Book: {
+ keyFields: ["author", ["name"], "title"],
+ },
+ },
+ });
+
+ cache.writeQuery({
+ query: bookQuery,
+ data: {
+ book: theInformationBookData,
+ },
+ });
+
+ expect(cache.extract(true)).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: {
+ __ref: 'Book:{"author":{"name":"James Gleick"},"title":"The Information"}',
+ },
+ },
+ 'Book:{"author":{"name":"James Gleick"},"title":"The Information"}': {
+ __typename: "Book",
+ title: "The Information",
+ author: {
+ name: "James Gleick"
+ },
+ },
+ });
+
+ checkAuthorName(cache);
+ });
+
+ it("accepts keyFields functions", function () {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Book: {
+ keyFields(book, context) {
+ expect(context.selectionSet.kind).toBe("SelectionSet");
+ expect(context.fragmentMap).toEqual({});
+ expect(context.policies).toBeInstanceOf(Policies);
+ return context.typename + ":" + book.isbn;
+ },
+ },
+ },
+ });
+
+ cache.writeQuery({
+ query: bookQuery,
+ data: {
+ book: theInformationBookData,
+ },
+ });
+
+ expect(cache.extract(true)).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: {
+ __ref: "Book:1400096235",
+ },
+ },
+ "Book:1400096235": {
+ __typename: "Book",
+ title: "The Information",
+ author: {
+ name: "James Gleick"
+ },
+ },
+ });
+
+ checkAuthorName(cache);
+ });
+
+ it("works with fragments that contain aliased key fields", function () {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Book: {
+ keyFields: ["ISBN", "title"],
+ },
+ },
+ });
+
+ cache.writeQuery({
+ query: gql`
+ query {
+ book {
+ ...BookFragment
+ author {
+ name
+ }
+ }
+ }
+ fragment BookFragment on Book {
+ isbn: ISBN
+ title
+ }
+ `,
+ data: {
+ book: theInformationBookData,
+ },
+ });
+
+ expect(cache.extract(true)).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: {
+ __ref: 'Book:{"ISBN":"1400096235","title":"The Information"}',
+ },
+ },
+ 'Book:{"ISBN":"1400096235","title":"The Information"}': {
+ __typename: "Book",
+ ISBN: "1400096235",
+ title: "The Information",
+ author: {
+ name: "James Gleick"
+ },
+ },
+ });
+
+ checkAuthorName(cache);
+ });
+
+ it("complains about missing key fields", function () {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Book: {
+ keyFields: ["title", "year"],
+ },
+ },
+ });
+
+ const query = gql`
+ query {
+ book {
+ title
+ year
+ }
+ }
+ `;
+
+ cache.writeQuery({
+ query,
+ data: {
+ book: {
+ year: 2011,
+ theInformationBookData,
+ },
+ },
+ });
+
+ expect(() => {
+ cache.writeQuery({
+ query,
+ data: {
+ book: theInformationBookData,
+ },
+ });
+ }).toThrow("Missing field year while computing key fields");
+ });
+
+ describe("field policies", function () {
+ it("can filter key arguments", function () {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Query: {
+ fields: {
+ book: {
+ keyArgs: ["isbn"],
+ },
+ },
+ },
+ },
+ });
+
+ cache.writeQuery({
+ query: gql`
+ query {
+ book(junk: "ignored", isbn: "0465030793") {
+ title
+ }
+ }
+ `,
+ data: {
+ book: {
+ __typename: "Book",
+ isbn: "0465030793",
+ title: "I Am a Strange Loop",
+ },
+ },
+ });
+
+ expect(cache.extract(true)).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ 'book:{"isbn":"0465030793"}': {
+ __typename: "Book",
+ title: "I Am a Strange Loop",
+ },
+ },
+ });
+ });
+
+ it("can filter key arguments in non-Query fields", function () {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Book: {
+ keyFields: ["isbn"],
+ fields: {
+ author: {
+ keyArgs: ["firstName", "lastName"],
+ },
+ },
+ },
+ Author: {
+ keyFields: ["name"],
+ },
+ },
+ });
+
+ const query = gql`
+ query {
+ book {
+ isbn
+ title
+ author(
+ firstName: "Douglas",
+ middleName: "Richard",
+ lastName: "Hofstadter"
+ ) {
+ name
+ }
+ }
+ }
+ `;
+
+ const data = {
+ book: {
+ __typename: "Book",
+ isbn: "0465030793",
+ title: "I Am a Strange Loop",
+ author: {
+ __typename: "Author",
+ name: "Douglas Hofstadter",
+ },
+ },
+ };
+
+ cache.writeQuery({ query, data });
+
+ expect(cache.extract(true)).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ book: {
+ __ref: 'Book:{"isbn":"0465030793"}',
+ },
+ },
+ 'Book:{"isbn":"0465030793"}': {
+ __typename: "Book",
+ isbn: "0465030793",
+ title: "I Am a Strange Loop",
+ 'author:{"firstName":"Douglas","lastName":"Hofstadter"}': {
+ __ref: 'Author:{"name":"Douglas Hofstadter"}',
+ },
+ },
+ 'Author:{"name":"Douglas Hofstadter"}': {
+ __typename: "Author",
+ name: "Douglas Hofstadter",
+ },
+ });
+
+ const result = cache.readQuery({ query });
+ expect(result).toEqual(data);
+ });
+
+ it("assumes keyArgs:false when read or merge function present", function () {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ TypeA: {
+ fields: {
+ a() {
+ return "a";
+ },
+ },
+ },
+
+ TypeB: {
+ fields: {
+ b: {
+ keyArgs: ["x"],
+ read() {
+ return "b";
+ },
+ },
+ },
+ },
+
+ TypeC: {
+ fields: {
+ c: {
+ keyArgs: false,
+ merge(existing, incoming: string) {
+ return reverse(incoming);
+ },
+ },
+ },
+ },
+
+ TypeD: {
+ fields: {
+ d: {
+ keyArgs(field) {
+ return "d";
+ },
+ read(existing: string) {
+ return existing.toLowerCase();
+ },
+ merge(existing: string, incoming: string) {
+ return incoming.toUpperCase();
+ },
+ },
+ },
+ },
+
+ TypeE: {
+ fields: {
+ e: {
+ read(existing: string) {
+ return existing.slice(1);
+ },
+ merge(existing: string, incoming: string) {
+ return "*" + incoming;
+ },
+ },
+ },
+ },
+
+ TypeF: {
+ fields: {
+ f: {
+ // nothing
+ },
+ },
+ },
+
+ Query: {
+ fields: {
+ types(existing: any[], { args }) {
+ const fromCode = args.from.charCodeAt(0);
+ const toCode = args.to.charCodeAt(0);
+ let e = 0;
+ for (let code = fromCode; code <= toCode; ++code) {
+ const upper = String.fromCharCode(code).toUpperCase();
+ const obj = existing[e++];
+ expect(obj.__typename).toBe("Type" + upper);
+ }
+ return existing;
+ },
+ },
+ },
+ },
+ });
+
+ const query = gql`
+ query {
+ types(from: "A", to: "F") {
+ ... on TypeA { a }
+ ... on TypeB { b(x: 1, y: 2, z: 3) }
+ ... on TypeC { c(see: "si") }
+ ... on TypeD { d }
+ ... on TypeE { e(eee: "ee") }
+ ... on TypeF { f(g: "h") }
+ }
+ }
+ `;
+
+ cache.writeQuery({
+ query,
+ data: {
+ types: [{
+ __typename: "TypeA",
+ }, {
+ __typename: "TypeB",
+ b: "x1",
+ }, {
+ __typename: "TypeC",
+ c: "naive",
+ }, {
+ __typename: "TypeD",
+ d: "quiet",
+ }, {
+ __typename: "TypeE",
+ e: "asterisk",
+ }, {
+ __typename: "TypeF",
+ f: "effigy",
+ }],
+ },
+ });
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ types: [
+ {
+ __typename: "TypeA",
+ },
+ {
+ __typename: "TypeB",
+ 'b:{"x":1}': "x1",
+ },
+ {
+ __typename: "TypeC",
+ c: "evian",
+ },
+ {
+ __typename: "TypeD",
+ d: "QUIET",
+ },
+ {
+ __typename: "TypeE",
+ e: "*asterisk",
+ },
+ {
+ __typename: "TypeF",
+ 'f({"g":"h"})': "effigy",
+ },
+ ],
+ },
+ });
+
+ const result = cache.readQuery({ query });
+ expect(result).toEqual({
+ types: [
+ {
+ __typename: "TypeA",
+ a: "a",
+ }, {
+ __typename: "TypeB",
+ b: "b",
+ }, {
+ __typename: "TypeC",
+ c: "evian",
+ }, {
+ __typename: "TypeD",
+ d: "quiet",
+ }, {
+ __typename: "TypeE",
+ e: "asterisk",
+ }, {
+ __typename: "TypeF",
+ f: "effigy",
+ }
+ ],
+ });
+ });
+
+ it("can use stable storage in read functions", function () {
+ const storageSet = new Set<Record<string, any>>();
+
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Task: {
+ fields: {
+ result(existing, { args, storage }) {
+ storageSet.add(storage);
+ if (storage.result) return storage.result;
+ return storage.result = compute(args);
+ },
+ },
+ },
+ },
+ });
+
+ let computeCount = 0;
+ function compute(args) {
+ return `expensive result ${++computeCount}`;
+ }
+
+ cache.writeQuery({
+ query: gql`
+ query {
+ tasks {
+ id
+ }
+ }
+ `,
+ data: {
+ tasks: [{
+ __typename: "Task",
+ id: 1,
+ }, {
+ __typename: "Task",
+ id: 2,
+ }],
+ },
+ });
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ tasks: [
+ { __ref: "Task:1" },
+ { __ref: "Task:2" },
+ ],
+ },
+ "Task:1": {
+ __typename: "Task",
+ id: 1,
+ },
+ "Task:2": {
+ __typename: "Task",
+ id: 2,
+ },
+ });
+
+ const result1 = cache.readQuery({
+ query: gql`
+ query {
+ tasks {
+ result
+ }
+ }
+ `,
+ });
+
+ expect(result1).toEqual({
+ tasks: [{
+ __typename: "Task",
+ result: "expensive result 1",
+ }, {
+ __typename: "Task",
+ result: "expensive result 2",
+ }],
+ });
+
+ const result2 = cache.readQuery({
+ query: gql`
+ query {
+ tasks {
+ id
+ result
+ }
+ }
+ `,
+ });
+
+ expect(result2).toEqual({
+ tasks: [{
+ __typename: "Task",
+ id: 1,
+ result: "expensive result 1",
+ }, {
+ __typename: "Task",
+ id: 2,
+ result: "expensive result 2",
+ }],
+ });
+
+ // Clear the cached results.
+ storageSet.forEach(storage => {
+ delete storage.result;
+ });
+
+ const result3 = cache.readQuery({
+ query: gql`
+ query {
+ tasks {
+ __typename
+ result
+ }
+ }
+ `,
+ });
+
+ expect(result3).toEqual({
+ tasks: [{
+ __typename: "Task",
+ result: "expensive result 3",
+ }, {
+ __typename: "Task",
+ result: "expensive result 4",
+ }],
+ });
+ });
+
+ it("can use read function to implement synthetic/computed keys", function () {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Person: {
+ keyFields: ["firstName", "lastName"],
+ fields: {
+ fullName(_, { readField }) {
+ const firstName = readField("firstName");
+ const lastName = readField("lastName");
+ return `${firstName} ${lastName}`;
+ },
+ },
+ },
+ },
+ });
+
+ cache.writeQuery({
+ query: gql`
+ query {
+ me {
+ firstName
+ lastName
+ }
+ }
+ `,
+ data: {
+ me: {
+ __typename: "Person",
+ firstName: "Ben",
+ lastName: "Newman",
+ },
+ },
+ });
+
+ const expectedExtraction = {
+ ROOT_QUERY: {
+ __typename: "Query",
+ me: {
+ __ref: 'Person:{"firstName":"Ben","lastName":"Newman"}',
+ },
+ },
+ 'Person:{"firstName":"Ben","lastName":"Newman"}': {
+ __typename: "Person",
+ firstName: "Ben",
+ lastName: "Newman",
+ },
+ };
+
+ expect(cache.extract(true)).toEqual(expectedExtraction);
+
+ const expectedResult = {
+ me: {
+ __typename: "Person",
+ fullName: "Ben Newman",
+ },
+ };
+
+ expect(cache.readQuery({
+ query: gql`
+ query {
+ me {
+ fullName
+ }
+ }
+ `,
+ })).toEqual(expectedResult);
+
+ expect(cache.readQuery({
+ query: gql`
+ query {
+ me {
+ fullName @client
+ }
+ }
+ `,
+ })).toEqual(expectedResult);
+
+ expect(cache.extract(true)).toEqual(expectedExtraction);
+ });
+
+ it("read and merge can cooperate through options.storage", function () {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Query: {
+ fields: {
+ jobs: {
+ merge(existing: any[] = [], incoming: any[]) {
+ return [...existing, ...incoming];
+ },
+ },
+ },
+ },
+
+ Job: {
+ keyFields: ["name"],
+ fields: {
+ result: {
+ read(_, { storage }) {
+ return storage.result;
+ },
+ merge(_, incoming, { storage, invalidate }) {
+ storage.result = incoming;
+ invalidate();
+ },
+ },
+ },
+ },
+ },
+ });
+
+ const query = gql`
+ query {
+ jobs {
+ name
+ result
+ }
+ }
+ `;
+
+ cache.writeQuery({
+ query,
+ data: {
+ jobs: [{
+ __typename: "Job",
+ name: "Job #1",
+ // intentionally omitting the result field
+ }, {
+ __typename: "Job",
+ name: "Job #2",
+ // intentionally omitting the result field
+ }, {
+ __typename: "Job",
+ name: "Job #3",
+ // intentionally omitting the result field
+ }],
+ },
+ });
+
+ const snapshot1 = {
+ ROOT_QUERY: {
+ __typename: "Query",
+ jobs: [
+ { __ref: 'Job:{"name":"Job #1"}' },
+ { __ref: 'Job:{"name":"Job #2"}' },
+ { __ref: 'Job:{"name":"Job #3"}' },
+ ],
+ },
+ 'Job:{"name":"Job #1"}': {
+ __typename: "Job",
+ name: "Job #1",
+ },
+ 'Job:{"name":"Job #2"}': {
+ __typename: "Job",
+ name: "Job #2",
+ },
+ 'Job:{"name":"Job #3"}': {
+ __typename: "Job",
+ name: "Job #3",
+ },
+ };
+
+ expect(cache.extract()).toEqual(snapshot1);
+
+ expect(cache.diff({
+ query,
+ optimistic: false,
+ returnPartialData: true,
+ })).toEqual({
+ result: {
+ jobs: [{
+ __typename: "Job",
+ name: "Job #1",
+ }, {
+ __typename: "Job",
+ name: "Job #2",
+ }, {
+ __typename: "Job",
+ name: "Job #3",
+ }],
+ },
+ complete: false,
+ });
+
+ function setResult(jobNum: number) {
+ cache.writeFragment({
+ id: cache.identify({
+ __typename: "Job",
+ name: `Job #${jobNum}`,
+ }),
+ fragment: gql`
+ fragment JobResult on Job {
+ result
+ }
+ `,
+ data: {
+ __typename: "Job",
+ result: `result for job ${jobNum}`,
+ },
+ });
+ }
+
+ setResult(2);
+
+ // Nothing should have changed in the cache itself as a result of
+ // writing a result for job #2.
+ expect(cache.extract()).toEqual(snapshot1);
+
+ expect(cache.diff({
+ query,
+ optimistic: false,
+ returnPartialData: true,
+ })).toEqual({
+ result: {
+ jobs: [{
+ __typename: "Job",
+ name: "Job #1",
+ }, {
+ __typename: "Job",
+ name: "Job #2",
+ result: "result for job 2",
+ }, {
+ __typename: "Job",
+ name: "Job #3",
+ }],
+ },
+ complete: false,
+ });
+
+ cache.writeQuery({
+ query,
+ data: {
+ jobs: [{
+ __typename: "Job",
+ name: "Job #4",
+ result: "result for job 4",
+ }],
+ },
+ });
+
+ const snapshot2 = {
+ ...snapshot1,
+ ROOT_QUERY: {
+ ...snapshot1.ROOT_QUERY,
+ jobs: [
+ ...snapshot1.ROOT_QUERY.jobs,
+ { __ref: 'Job:{"name":"Job #4"}' },
+ ],
+ },
+ 'Job:{"name":"Job #4"}': {
+ __typename: "Job",
+ name: "Job #4",
+ },
+ };
+
+ expect(cache.extract()).toEqual(snapshot2);
+
+ expect(cache.diff({
+ query,
+ optimistic: false,
+ returnPartialData: true,
+ })).toEqual({
+ result: {
+ jobs: [{
+ __typename: "Job",
+ name: "Job #1",
+ }, {
+ __typename: "Job",
+ name: "Job #2",
+ result: "result for job 2",
+ }, {
+ __typename: "Job",
+ name: "Job #3",
+ }, {
+ __typename: "Job",
+ name: "Job #4",
+ result: "result for job 4",
+ }],
+ },
+ complete: false,
+ });
+
+ setResult(1);
+ setResult(3);
+
+ expect(cache.diff({
+ query,
+ optimistic: false,
+ returnPartialData: true,
+ })).toEqual({
+ result: {
+ jobs: [{
+ __typename: "Job",
+ name: "Job #1",
+ result: "result for job 1",
+ }, {
+ __typename: "Job",
+ name: "Job #2",
+ result: "result for job 2",
+ }, {
+ __typename: "Job",
+ name: "Job #3",
+ result: "result for job 3",
+ }, {
+ __typename: "Job",
+ name: "Job #4",
+ result: "result for job 4",
+ }],
+ },
+ complete: true,
+ });
+
+ expect(cache.readQuery({ query })).toEqual({
+ jobs: [{
+ __typename: "Job",
+ name: "Job #1",
+ result: "result for job 1",
+ }, {
+ __typename: "Job",
+ name: "Job #2",
+ result: "result for job 2",
+ }, {
+ __typename: "Job",
+ name: "Job #3",
+ result: "result for job 3",
+ }, {
+ __typename: "Job",
+ name: "Job #4",
+ result: "result for job 4",
+ }],
+ });
+ });
+
+ it("merge functions can deduplicate items using readField", function () {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Query: {
+ fields: {
+ books: {
+ merge(existing: any[] = [], incoming: any[], {
+ readField,
+ }) {
+ if (existing) {
+ const merged = existing.slice(0);
+ const existingIsbnSet =
+ new Set(merged.map(book => readField("isbn", book)));
+ incoming.forEach(book => {
+ const isbn = readField("isbn", book);
+ if (!existingIsbnSet.has(isbn)) {
+ existingIsbnSet.add(isbn);
+ merged.push(book);
+ }
+ });
+ return merged;
+ }
+ return incoming;
+ },
+
+ // Returns the books array, sorted by title.
+ read(existing: any[], { readField }) {
+ if (existing) {
+ return existing.slice(0).sort((a, b) => {
+ const aTitle = readField<string>("title", a);
+ const bTitle = readField<string>("title", b);
+ if (aTitle === bTitle) return 0;
+ if (aTitle < bTitle) return -1;
+ return 1;
+ });
+ }
+ return [];
+ },
+ },
+ },
+ },
+
+ Book: {
+ keyFields: ["isbn"],
+ },
+ },
+ });
+
+ const query = gql`
+ query {
+ books {
+ isbn
+ title
+ }
+ }
+ `;
+
+ const programmingRustBook = {
+ __typename: "Book",
+ isbn: "9781491927281",
+ title: "Programming Rust: Fast, Safe Systems Development",
+ };
+
+ const officialRustBook = {
+ __typename: "Book",
+ isbn: "1593278284",
+ title: "The Rust Programming Language",
+ };
+
+ const handsOnConcurrencyBook = {
+ __typename: "Book",
+ isbn: "1788399978",
+ title: "Hands-On Concurrency with Rust",
+ };
+
+ const wasmWithRustBook = {
+ __typename: "Book",
+ isbn: "1680506366",
+ title: "Programming WebAssembly with Rust",
+ };
+
+ function addBooks(...books: (typeof programmingRustBook)[]) {
+ cache.writeQuery({
+ query,
+ data: {
+ books,
+ },
+ });
+ }
+
+ addBooks(officialRustBook);
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ books: [
+ { __ref: 'Book:{"isbn":"1593278284"}' },
+ ],
+ },
+ 'Book:{"isbn":"1593278284"}': {
+ __typename: "Book",
+ isbn: "1593278284",
+ title: "The Rust Programming Language",
+ },
+ });
+
+ addBooks(
+ programmingRustBook,
+ officialRustBook,
+ );
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ books: [
+ { __ref: 'Book:{"isbn":"1593278284"}' },
+ { __ref: 'Book:{"isbn":"9781491927281"}' },
+ ],
+ },
+ 'Book:{"isbn":"1593278284"}': officialRustBook,
+ 'Book:{"isbn":"9781491927281"}': programmingRustBook,
+ });
+
+ addBooks(
+ wasmWithRustBook,
+ wasmWithRustBook,
+ programmingRustBook,
+ wasmWithRustBook,
+ );
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ books: [
+ { __ref: 'Book:{"isbn":"1593278284"}' },
+ { __ref: 'Book:{"isbn":"9781491927281"}' },
+ { __ref: 'Book:{"isbn":"1680506366"}' },
+ ],
+ },
+ 'Book:{"isbn":"1593278284"}': officialRustBook,
+ 'Book:{"isbn":"9781491927281"}': programmingRustBook,
+ 'Book:{"isbn":"1680506366"}': wasmWithRustBook,
+ });
+
+ addBooks(
+ programmingRustBook,
+ officialRustBook,
+ handsOnConcurrencyBook,
+ wasmWithRustBook,
+ );
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ books: [
+ { __ref: 'Book:{"isbn":"1593278284"}' },
+ { __ref: 'Book:{"isbn":"9781491927281"}' },
+ { __ref: 'Book:{"isbn":"1680506366"}' },
+ { __ref: 'Book:{"isbn":"1788399978"}' },
+ ],
+ },
+ 'Book:{"isbn":"1593278284"}': officialRustBook,
+ 'Book:{"isbn":"9781491927281"}': programmingRustBook,
+ 'Book:{"isbn":"1680506366"}': wasmWithRustBook,
+ 'Book:{"isbn":"1788399978"}': handsOnConcurrencyBook,
+ });
+
+ expect(cache.readQuery({ query })).toEqual({
+ // Note that these books have been sorted by title, thanks to the
+ // custom read function we defined above.
+ "books": [
+ {
+ "__typename": "Book",
+ "isbn": "1788399978",
+ "title": "Hands-On Concurrency with Rust",
+ },
+ {
+ "__typename": "Book",
+ "isbn": "9781491927281",
+ "title": "Programming Rust: Fast, Safe Systems Development",
+ },
+ {
+ "__typename": "Book",
+ "isbn": "1680506366",
+ "title": "Programming WebAssembly with Rust",
+ },
+ {
+ "__typename": "Book",
+ "isbn": "1593278284",
+ "title": "The Rust Programming Language",
+ },
+ ],
+ });
+ });
+
+ it("readField helper function calls custom read functions", function () {
+ // Rather than writing ownTime data into the cache, we maintain it
+ // externally in this object:
+ const ownTimes = {
+ "parent task": 2,
+ "child task 1": 3,
+ "child task 2": 4,
+ "grandchild task": 5,
+ "independent task": 11,
+ };
+
+ const invalidators: Record<string, () => void> = {};
+
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Agenda: {
+ fields: {
+ taskCount(_, { readField }) {
+ return readField<Reference[]>("tasks").length;
+ },
+
+ tasks: {
+ // Thanks to this read function, the readField("tasks")
+ // call above will always return an array, so we don't
+ // have to guard against the possibility that the tasks
+ // data is undefined above.
+ read(existing = []) {
+ return existing;
+ },
+
+ merge(existing: Reference[], incoming: Reference[]) {
+ const merged = existing ? existing.slice(0) : [];
+ merged.push(...incoming);
+ return merged;
+ },
+ },
+ },
+ },
+
+ Task: {
+ fields: {
+ ownTime(_, { readField, invalidate }) {
+ const desc = readField<string>("description");
+ // Store the invalidate function so that we can call it
+ // after updating the external ownTimes data.
+ invalidators[desc] = invalidate;
+ return ownTimes[desc] || 0;
+ },
+
+ totalTime(_, { readField, toReference }) {
+ function total(
+ blockers: Readonly<Reference[]> = [],
+ seen = new Set<string>(),
+ ) {
+ let time = 0;
+ blockers.forEach(blocker => {
+ if (!seen.has(blocker.__ref)) {
+ seen.add(blocker.__ref);
+ time += readField<number>("ownTime", blocker);
+ time += total(
+ readField<Reference[]>("blockers", blocker),
+ seen,
+ );
+ }
+ });
+ return time;
+ }
+ return total([
+ toReference({
+ __typename: "Task",
+ id: readField("id"),
+ }),
+ ]);
+ },
+
+ blockers: {
+ merge(existing: Reference[] = [], incoming: Reference[]) {
+ const seenIDs = new Set(existing.map(ref => ref.__ref));
+ const merged = existing.slice(0);
+ incoming.forEach(ref => {
+ if (!seenIDs.has(ref.__ref)) {
+ seenIDs.add(ref.__ref);
+ merged.push(ref);
+ }
+ });
+ return merged;
+ },
+ },
+ },
+ },
+ },
+ });
+
+ cache.writeQuery({
+ query: gql`
+ query {
+ agenda {
+ id
+ tasks {
+ id
+ description
+ blockers {
+ id
+ }
+ }
+ }
+ }
+ `,
+ data: {
+ agenda: {
+ __typename: "Agenda",
+ id: 1,
+ tasks: [{
+ __typename: "Task",
+ id: 1,
+ description: "parent task",
+ blockers: [{
+ __typename: "Task",
+ id: 2,
+ }, {
+ __typename: "Task",
+ id: 3,
+ }],
+ }, {
+ __typename: "Task",
+ id: 2,
+ description: "child task 1",
+ blockers: [{
+ __typename: "Task",
+ id: 4,
+ }],
+ }, {
+ __typename: "Task",
+ id: 3,
+ description: "child task 2",
+ blockers: [{
+ __typename: "Task",
+ id: 4,
+ }],
+ }, {
+ __typename: "Task",
+ id: 4,
+ description: "grandchild task",
+ }],
+ },
+ },
+ });
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ agenda: { __ref: "Agenda:1" },
+ },
+ "Agenda:1": {
+ __typename: "Agenda",
+ id: 1,
+ tasks: [
+ { __ref: "Task:1" },
+ { __ref: "Task:2" },
+ { __ref: "Task:3" },
+ { __ref: "Task:4" },
+ ],
+ },
+ "Task:1": {
+ __typename: "Task",
+ blockers: [
+ { __ref: "Task:2" },
+ { __ref: "Task:3" },
+ ],
+ description: "parent task",
+ id: 1,
+ },
+ "Task:2": {
+ __typename: "Task",
+ blockers: [
+ { __ref: "Task:4" },
+ ],
+ description: "child task 1",
+ id: 2,
+ },
+ "Task:3": {
+ __typename: "Task",
+ blockers: [
+ { __ref: "Task:4" },
+ ],
+ description: "child task 2",
+ id: 3,
+ },
+ "Task:4": {
+ __typename: "Task",
+ description: "grandchild task",
+ id: 4,
+ },
+ });
+
+ const query = gql`
+ query {
+ agenda {
+ taskCount
+ tasks {
+ description
+ ownTime
+ totalTime
+ }
+ }
+ }
+ `;
+
+ function read() {
+ return cache.readQuery<{ agenda: any }>({ query });
+ }
+
+ const firstResult = read();
+
+ expect(firstResult).toEqual({
+ agenda: {
+ __typename: "Agenda",
+ taskCount: 4,
+ tasks: [{
+ __typename: "Task",
+ description: "parent task",
+ ownTime: 2,
+ totalTime: 2 + 3 + 4 + 5,
+ }, {
+ __typename: "Task",
+ description: "child task 1",
+ ownTime: 3,
+ totalTime: 3 + 5,
+ }, {
+ __typename: "Task",
+ description: "child task 2",
+ ownTime: 4,
+ totalTime: 4 + 5,
+ }, {
+ __typename: "Task",
+ description: "grandchild task",
+ ownTime: 5,
+ totalTime: 5,
+ }],
+ },
+ });
+
+ expect(read()).toBe(firstResult);
+
+ ownTimes["child task 2"] = 6;
+
+ // The query will not be reevaluated until we invalidate the read
+ // function whose result we changed.
+ expect(read()).toBe(firstResult);
+
+ invalidators["child task 2"]();
+
+ const secondResult = read();
+ expect(secondResult).not.toBe(firstResult);
+ expect(secondResult).toEqual({
+ agenda: {
+ __typename: "Agenda",
+ taskCount: 4,
+ tasks: [{
+ __typename: "Task",
+ description: "parent task",
+ ownTime: 2,
+ totalTime: 2 + 3 + 6 + 5,
+ }, {
+ __typename: "Task",
+ description: "child task 1",
+ ownTime: 3,
+ totalTime: 3 + 5,
+ }, {
+ __typename: "Task",
+ description: "child task 2",
+ ownTime: 6,
+ totalTime: 6 + 5,
+ }, {
+ __typename: "Task",
+ description: "grandchild task",
+ ownTime: 5,
+ totalTime: 5,
+ }],
+ },
+ });
+ expect(secondResult.agenda.tasks[0]).not.toBe(firstResult.agenda.tasks[0]);
+ expect(secondResult.agenda.tasks[1]).toBe(firstResult.agenda.tasks[1]);
+ expect(secondResult.agenda.tasks[2]).not.toBe(firstResult.agenda.tasks[2]);
+ expect(secondResult.agenda.tasks[3]).toBe(firstResult.agenda.tasks[3]);
+
+ ownTimes["grandchild task"] = 7;
+ invalidators["grandchild task"]();
+
+ const thirdResult = read();
+ expect(thirdResult).not.toBe(secondResult);
+ expect(thirdResult).toEqual({
+ agenda: {
+ __typename: "Agenda",
+ taskCount: 4,
+ tasks: [{
+ __typename: "Task",
+ description: "parent task",
+ ownTime: 2,
+ totalTime: 2 + 3 + 6 + 7,
+ }, {
+ __typename: "Task",
+ description: "child task 1",
+ ownTime: 3,
+ totalTime: 3 + 7,
+ }, {
+ __typename: "Task",
+ description: "child task 2",
+ ownTime: 6,
+ totalTime: 6 + 7,
+ }, {
+ __typename: "Task",
+ description: "grandchild task",
+ ownTime: 7,
+ totalTime: 7,
+ }],
+ },
+ });
+
+ cache.writeQuery({
+ query: gql`
+ query {
+ agenda {
+ id
+ tasks {
+ id
+ description
+ }
+ }
+ }
+ `,
+ data: {
+ agenda: {
+ __typename: "Agenda",
+ id: 1,
+ tasks: [{
+ __typename: "Task",
+ id: 5,
+ description: "independent task",
+ }],
+ },
+ },
+ });
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ agenda: { __ref: "Agenda:1" },
+ },
+ "Agenda:1": {
+ __typename: "Agenda",
+ id: 1,
+ tasks: [
+ { __ref: "Task:1" },
+ { __ref: "Task:2" },
+ { __ref: "Task:3" },
+ { __ref: "Task:4" },
+ { __ref: "Task:5" },
+ ],
+ },
+ "Task:1": {
+ __typename: "Task",
+ blockers: [
+ { __ref: "Task:2" },
+ { __ref: "Task:3" },
+ ],
+ description: "parent task",
+ id: 1,
+ },
+ "Task:2": {
+ __typename: "Task",
+ blockers: [
+ { __ref: "Task:4" },
+ ],
+ description: "child task 1",
+ id: 2,
+ },
+ "Task:3": {
+ __typename: "Task",
+ blockers: [
+ { __ref: "Task:4" },
+ ],
+ description: "child task 2",
+ id: 3,
+ },
+ "Task:4": {
+ __typename: "Task",
+ description: "grandchild task",
+ id: 4,
+ },
+ "Task:5": {
+ __typename: "Task",
+ description: "independent task",
+ id: 5,
+ },
+ });
+
+ const fourthResult = read();
+ expect(fourthResult).not.toBe(thirdResult);
+ expect(fourthResult).toEqual({
+ agenda: {
+ __typename: "Agenda",
+ taskCount: 5,
+ tasks: [{
+ __typename: "Task",
+ description: "parent task",
+ ownTime: 2,
+ totalTime: 2 + 3 + 6 + 7,
+ }, {
+ __typename: "Task",
+ description: "child task 1",
+ ownTime: 3,
+ totalTime: 3 + 7,
+ }, {
+ __typename: "Task",
+ description: "child task 2",
+ ownTime: 6,
+ totalTime: 6 + 7,
+ }, {
+ __typename: "Task",
+ description: "grandchild task",
+ ownTime: 7,
+ totalTime: 7,
+ }, {
+ __typename: "Task",
+ description: "independent task",
+ ownTime: 11,
+ totalTime: 11,
+ }],
+ },
+ });
+
+ function checkFirstFourIdentical(result: ReturnType<typeof read>) {
+ for (let i = 0; i < 4; ++i) {
+ expect(result.agenda.tasks[i]).toBe(thirdResult.agenda.tasks[i]);
+ }
+ }
+ // The four original task results should not have been altered by
+ // the addition of a fifth independent task.
+ checkFirstFourIdentical(fourthResult);
+
+ ++ownTimes["independent task"];
+ invalidators["independent task"]();
+
+ const fifthResult = read();
+ expect(fifthResult).not.toBe(fourthResult);
+ expect(fifthResult).toEqual({
+ agenda: {
+ __typename: "Agenda",
+ taskCount: 5,
+ tasks: [
+ fourthResult.agenda.tasks[0],
+ fourthResult.agenda.tasks[1],
+ fourthResult.agenda.tasks[2],
+ fourthResult.agenda.tasks[3],
+ {
+ __typename: "Task",
+ description: "independent task",
+ ownTime: 12,
+ totalTime: 12,
+ },
+ ],
+ },
+ });
+ checkFirstFourIdentical(fifthResult);
+ });
+
+ it("can return void to indicate missing field", function () {
+ let secretReadAttempted = false;
+
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Person: {
+ fields: {
+ secret() {
+ secretReadAttempted = true;
+ // Return nothing to signal field is missing.
+ },
+ },
+ },
+ },
+ });
+
+ const query = gql`
+ query {
+ me {
+ name
+ }
+ }
+ `;
+
+ cache.writeQuery({
+ query,
+ data: {
+ me: {
+ __typename: "Person",
+ name: "Ben Newman",
+ },
+ },
+ });
+
+ expect(secretReadAttempted).toBe(false);
+
+ expect(() => {
+ cache.readQuery({
+ query: gql`
+ query {
+ me {
+ secret
+ }
+ }
+ `
+ });
+ }).toThrow("Can't find field secret");
+
+ expect(secretReadAttempted).toBe(true);
+ });
+
+ it("can define custom merge functions", function () {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Person: {
+ // Disables normalization for the Person type, which means the
+ // todos field will be nested inside a non-normalized object
+ // (with __typename "Person") directly under the ROOT_QUERY.me
+ // field, which exercises what happens when mergeOverrides
+ // becomes nested (see writeToStore.ts).
+ keyFields: false,
+
+ fields: {
+ todos: {
+ keyArgs: [],
+
+ read(existing: any[], {
+ args,
+ toReference,
+ isReference,
+ policies,
+ }) {
+ expect(!existing || Object.isFrozen(existing)).toBe(true);
+ expect(typeof toReference).toBe("function");
+ expect(policies).toBeInstanceOf(Policies);
+ const slice = existing.slice(
+ args.offset,
+ args.offset + args.limit,
+ );
+ slice.forEach(ref => expect(isReference(ref)).toBe(true));
+ return slice;
+ },
+
+ merge(existing: any[], incoming: any[], {
+ args,
+ toReference,
+ isReference,
+ policies,
+ }) {
+ expect(!existing || Object.isFrozen(existing)).toBe(true);
+ expect(typeof toReference).toBe("function");
+ expect(policies).toBeInstanceOf(Policies);
+ const copy = existing ? existing.slice(0) : [];
+ const limit = args.offset + args.limit;
+ for (let i = args.offset; i < limit; ++i) {
+ copy[i] = incoming[i - args.offset];
+ }
+ copy.forEach(todo => expect(isReference(todo)).toBe(true));
+ return copy;
+ }
+ },
+ },
+ },
+
+ Todo: {
+ keyFields: ["id"],
+ },
+ },
+ });
+
+ const query = gql`
+ query {
+ me {
+ todos(offset: $offset, limit: $limit) {
+ text
+ }
+ }
+ }
+ `;
+
+ cache.writeQuery({
+ query,
+ data: {
+ me: {
+ __typename: "Person",
+ id: "ignored",
+ todos: [
+ { __typename: "Todo", id: 1, text: "Write more merge tests" },
+ { __typename: "Todo", id: 2, text: "Write pagination tests" },
+ ],
+ },
+ },
+ variables: {
+ offset: 0,
+ limit: 2,
+ },
+ });
+
+ expect(cache.extract(true)).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ me: {
+ __typename: "Person",
+ "todos:{}": [
+ { __ref: 'Todo:{"id":1}' },
+ { __ref: 'Todo:{"id":2}' },
+ ],
+ },
+ },
+ 'Todo:{"id":1}': {
+ __typename: "Todo",
+ text: "Write more merge tests",
+ },
+ 'Todo:{"id":2}': {
+ __typename: "Todo",
+ text: "Write pagination tests",
+ },
+ });
+
+ cache.writeQuery({
+ query,
+ data: {
+ me: {
+ __typename: "Person",
+ todos: [
+ { __typename: "Todo", id: 5, text: "Submit pull request" },
+ { __typename: "Todo", id: 6, text: "Merge pull request" },
+ ],
+ },
+ },
+ variables: {
+ offset: 4,
+ limit: 2,
+ },
+ });
+
+ expect(cache.extract(true)).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ me: {
+ __typename: "Person",
+ "todos:{}": [
+ { __ref: 'Todo:{"id":1}' },
+ { __ref: 'Todo:{"id":2}' },
+ void 0,
+ void 0,
+ { __ref: 'Todo:{"id":5}' },
+ { __ref: 'Todo:{"id":6}' },
+ ],
+ },
+ },
+ 'Todo:{"id":1}': {
+ __typename: "Todo",
+ text: "Write more merge tests",
+ },
+ 'Todo:{"id":2}': {
+ __typename: "Todo",
+ text: "Write pagination tests",
+ },
+ 'Todo:{"id":5}': {
+ __typename: "Todo",
+ text: "Submit pull request",
+ },
+ 'Todo:{"id":6}': {
+ __typename: "Todo",
+ text: "Merge pull request",
+ },
+ });
+
+ cache.writeQuery({
+ query,
+ data: {
+ me: {
+ __typename: "Person",
+ todos: [
+ { __typename: "Todo", id: 3, text: "Iron out merge API" },
+ { __typename: "Todo", id: 4, text: "Take a nap" },
+ ],
+ },
+ },
+ variables: {
+ offset: 2,
+ limit: 2,
+ },
+ });
+
+ expect(cache.extract(true)).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ me: {
+ __typename: "Person",
+ "todos:{}": [
+ { __ref: 'Todo:{"id":1}' },
+ { __ref: 'Todo:{"id":2}' },
+ { __ref: 'Todo:{"id":3}' },
+ { __ref: 'Todo:{"id":4}' },
+ { __ref: 'Todo:{"id":5}' },
+ { __ref: 'Todo:{"id":6}' },
+ ],
+ },
+ },
+ 'Todo:{"id":1}': {
+ __typename: "Todo",
+ text: "Write more merge tests",
+ },
+ 'Todo:{"id":2}': {
+ __typename: "Todo",
+ text: "Write pagination tests",
+ },
+ 'Todo:{"id":3}': {
+ __typename: "Todo",
+ text: "Iron out merge API",
+ },
+ 'Todo:{"id":4}': {
+ __typename: "Todo",
+ text: "Take a nap",
+ },
+ 'Todo:{"id":5}': {
+ __typename: "Todo",
+ text: "Submit pull request",
+ },
+ 'Todo:{"id":6}': {
+ __typename: "Todo",
+ text: "Merge pull request",
+ },
+ });
+
+ expect(cache.gc()).toEqual([]);
+
+ // The moment of truth!
+ expect(
+ cache.readQuery({
+ query,
+ variables: {
+ offset: 1,
+ limit: 4,
+ },
+ })
+ ).toEqual({
+ me: {
+ __typename: "Person",
+ todos: [
+ { __typename: "Todo", text: "Write pagination tests" },
+ { __typename: "Todo", text: "Iron out merge API" },
+ { __typename: "Todo", text: "Take a nap" },
+ { __typename: "Todo", text: "Submit pull request" },
+ ],
+ },
+ });
+ });
+
+ it("runs nested merge functions as well as ancestors", function () {
+ let eventMergeCount = 0;
+ let attendeeMergeCount = 0;
+
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Event: {
+ fields: {
+ attendees: {
+ merge(existing: any[], incoming: any[]) {
+ ++eventMergeCount;
+ expect(Array.isArray(incoming)).toBe(true);
+ return existing ? existing.concat(incoming) : incoming;
+ },
+ },
+ },
+ },
+
+ Attendee: {
+ fields: {
+ events: {
+ merge(existing: any[], incoming: any[]) {
+ ++attendeeMergeCount;
+ expect(Array.isArray(incoming)).toBe(true);
+ return existing ? existing.concat(incoming) : incoming;
+ },
+ },
+ },
+ },
+ },
+ });
+
+ cache.writeQuery({
+ query: gql`
+ query {
+ eventsToday {
+ name
+ attendees {
+ name
+ events {
+ time
+ }
+ }
+ }
+ }
+ `,
+ data: {
+ eventsToday: [{
+ __typename: "Event",
+ id: 123,
+ name: "One-person party",
+ time: "noonish",
+ attendees: [{
+ __typename: "Attendee",
+ id: 234,
+ name: "Ben Newman",
+ events: [
+ { __typename: "Event", id: 123 },
+ ],
+ }],
+ }],
+ },
+ });
+
+ expect(eventMergeCount).toBe(1);
+ expect(attendeeMergeCount).toBe(1);
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ eventsToday: [
+ { __ref: "Event:123" },
+ ],
+ },
+ "Event:123": {
+ __typename: "Event",
+ name: "One-person party",
+ attendees: [
+ { __ref: "Attendee:234" },
+ ],
+ },
+ "Attendee:234": {
+ __typename: "Attendee",
+ name: "Ben Newman",
+ events: [
+ { __ref: "Event:123" },
+ ],
+ },
+ });
+
+ cache.writeQuery({
+ query: gql`
+ query {
+ people {
+ name
+ events {
+ time
+ attendees {
+ name
+ }
+ }
+ }
+ }
+ `,
+ data: {
+ people: [{
+ __typename: "Attendee",
+ id: 234,
+ name: "Ben Newman",
+ events: [{
+ __typename: "Event",
+ id: 345,
+ name: "Rooftop dog party",
+ attendees: [{
+ __typename: "Attendee",
+ id: 456,
+ name: "Inspector Beckett",
+ }, {
+ __typename: "Attendee",
+ id: 234,
+ }],
+ }],
+ }],
+ },
+ });
+
+ expect(eventMergeCount).toBe(2);
+ expect(attendeeMergeCount).toBe(2);
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ eventsToday: [
+ { __ref: "Event:123" },
+ ],
+ people: [
+ { __ref: "Attendee:234" },
+ ],
+ },
+ "Event:123": {
+ __typename: "Event",
+ name: "One-person party",
+ attendees: [
+ { __ref: "Attendee:234" },
+ ],
+ },
+ "Event:345": {
+ __typename: "Event",
+ attendees: [
+ { __ref: "Attendee:456" },
+ { __ref: "Attendee:234" },
+ ],
+ },
+ "Attendee:234": {
+ __typename: "Attendee",
+ name: "Ben Newman",
+ events: [
+ { __ref: "Event:123" },
+ { __ref: "Event:345" },
+ ],
+ },
+ "Attendee:456": {
+ __typename: "Attendee",
+ name: "Inspector Beckett",
+ },
+ });
+
+ expect(cache.gc()).toEqual([]);
+ });
+ });
+
+ it("runs read and merge functions for unidentified data", function () {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Book: {
+ keyFields: ["isbn"],
+ },
+
+ Author: {
+ // Passing false for keyFields disables normalization of Author
+ // objects, which should not interfere with the operation of
+ // their read and/or merge functions. However, disabling
+ // normalization means the merge function for the name field
+ // will be called only once, because we never merge fields when
+ // the IDs of the enclosing objects are unknown or unequal.
+ keyFields: false,
+
+ fields: {
+ name: {
+ read(name: string) {
+ return reverse(name).toUpperCase();
+ },
+ merge(oldName, newName: string) {
+ expect(oldName).toBe(void 0);
+ expect(typeof newName).toBe("string");
+ return reverse(newName);
+ },
+ },
+ },
+ },
+ },
+ });
+
+ const query = gql`
+ query {
+ currentlyReading {
+ title
+ authors {
+ name
+ }
+ }
+ }
+ `;
+
+ cache.writeQuery({
+ query,
+ data: {
+ currentlyReading: [{
+ __typename: "Book",
+ isbn: "0525558616",
+ title: "Human Compatible: Artificial Intelligence and the Problem of Control",
+ authors: [{
+ __typename: "Author",
+ name: "Stuart Russell",
+ }],
+ }, {
+ __typename: "Book",
+ isbn: "1541698967",
+ title: "The Book of Why: The New Science of Cause and Effect",
+ authors: [{
+ __typename: "Author",
+ name: "Judea Pearl",
+ }, {
+ __typename: "Author",
+ name: "Dana Mackenzie",
+ }],
+ }],
+ },
+ });
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ currentlyReading: [
+ { __ref: 'Book:{"isbn":"0525558616"}' },
+ { __ref: 'Book:{"isbn":"1541698967"}' },
+ ],
+ },
+ 'Book:{"isbn":"0525558616"}': {
+ __typename: "Book",
+ authors: [{
+ __typename: "Author",
+ // Note the successful reversal of the Author names.
+ name: "llessuR trautS",
+ }],
+ title: "Human Compatible: Artificial Intelligence and the Problem of Control",
+ },
+ 'Book:{"isbn":"1541698967"}': {
+ __typename: "Book",
+ authors: [{
+ __typename: "Author",
+ name: "lraeP aeduJ",
+ }, {
+ __typename: "Author",
+ name: "eiznekcaM anaD",
+ }],
+ title: "The Book of Why: The New Science of Cause and Effect",
+ },
+ });
+
+ expect(cache.readQuery({ query })).toEqual({
+ currentlyReading: [{
+ __typename: "Book",
+ title: "Human Compatible: Artificial Intelligence and the Problem of Control",
+ authors: [{
+ __typename: "Author",
+ name: "STUART RUSSELL",
+ }],
+ }, {
+ __typename: "Book",
+ title: "The Book of Why: The New Science of Cause and Effect",
+ authors: [{
+ __typename: "Author",
+ // Note the successful re-reversal and uppercasing, thanks to
+ // the custom read function.
+ name: "JUDEA PEARL",
+ }, {
+ __typename: "Author",
+ name: "DANA MACKENZIE",
+ }],
+ }],
+ });
+ });
+
+ it("can read from foreign references using read helper", function () {
+ const cache = new InMemoryCache({
+ typePolicies: {
+ Author: {
+ keyFields: ["name"],
+
+ fields: {
+ books: {
+ merge(existing: Reference[] = [], incoming: Reference[]) {
+ return [...existing, ...incoming];
+ },
+ },
+
+ // A dynamically computed field that returns the Book
+ // Reference with the earliest year, which requires reading
+ // fields from foreign references.
+ firstBook(_, { isReference, readField }) {
+ let firstBook: Reference;
+ let firstYear: number;
+ const bookRefs = readField<Reference[]>("books") || [];
+ bookRefs.forEach(bookRef => {
+ expect(isReference(bookRef)).toBe(true);
+ const year = readField<number>("year", bookRef);
+ if (firstYear === void 0 || year < firstYear) {
+ firstBook = bookRef;
+ firstYear = year;
+ }
+ });
+ // Return a Book Reference, which can have a nested
+ // selection set applied to it.
+ return firstBook;
+ },
+ },
+ },
+
+ Book: {
+ keyFields: ["isbn"],
+ },
+ },
+ });
+
+ function addBook(bookData) {
+ cache.writeQuery({
+ query: gql`
+ query {
+ author {
+ name
+ books {
+ isbn
+ title
+ year
+ }
+ }
+ }
+ `,
+ data: {
+ author: {
+ __typename: "Author",
+ name: "Virginia Woolf",
+ books: [{
+ __typename: "Book",
+ ...bookData,
+ }],
+ },
+ },
+ });
+ }
+
+ addBook({
+ __typename: "Book",
+ isbn: "1853262390",
+ title: "Orlando",
+ year: 1928,
+ });
+
+ addBook({
+ __typename: "Book",
+ isbn: "9353420717",
+ title: "A Room of One's Own",
+ year: 1929,
+ });
+
+ addBook({
+ __typename: "Book",
+ isbn: "0156907399",
+ title: "To the Lighthouse",
+ year: 1927,
+ });
+
+ expect(cache.extract()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ author: {
+ __ref: 'Author:{"name":"Virginia Woolf"}',
+ },
+ },
+ 'Author:{"name":"Virginia Woolf"}': {
+ __typename: "Author",
+ name: "Virginia Woolf",
+ books: [
+ { __ref: 'Book:{"isbn":"1853262390"}' },
+ { __ref: 'Book:{"isbn":"9353420717"}' },
+ { __ref: 'Book:{"isbn":"0156907399"}' },
+ ],
+ },
+ 'Book:{"isbn":"1853262390"}': {
+ __typename: "Book",
+ isbn: "1853262390",
+ title: "Orlando",
+ year: 1928,
+ },
+ 'Book:{"isbn":"9353420717"}': {
+ __typename: "Book",
+ isbn: "9353420717",
+ title: "A Room of One's Own",
+ year: 1929,
+ },
+ 'Book:{"isbn":"0156907399"}': {
+ __typename: "Book",
+ isbn: "0156907399",
+ title: "To the Lighthouse",
+ year: 1927,
+ },
+ });
+
+ const firstBookQuery = gql`
+ query {
+ author {
+ name
+ firstBook {
+ title
+ year
+ }
+ }
+ }
+ `;
+
+ function readFirstBookResult() {
+ return cache.readQuery<{ author: any }>({
+ query: firstBookQuery,
+ });
+ }
+
+ const firstBookResult = readFirstBookResult();
+ expect(firstBookResult).toEqual({
+ author: {
+ __typename: "Author",
+ name: "Virginia Woolf",
+ firstBook: {
+ __typename: "Book",
+ title: "To the Lighthouse",
+ year: 1927,
+ },
+ },
+ });
+
+ expect(readFirstBookResult()).toBe(firstBookResult);
+
+ // Add an even earlier book.
+ addBook({
+ isbn: "1420959719",
+ title: "The Voyage Out",
+ year: 1915,
+ });
+
+ const secondFirstBookResult = readFirstBookResult();
+ expect(secondFirstBookResult).not.toBe(firstBookResult);
+ expect(secondFirstBookResult).toEqual({
+ author: {
+ __typename: "Author",
+ name: "Virginia Woolf",
+ firstBook: {
+ __typename: "Book",
+ title: "The Voyage Out",
+ year: 1915,
+ },
+ },
+ });
+
+ // Write a new, unrelated field.
+ cache.writeQuery({
+ query: gql`query { author { afraidCount } }`,
+ data: {
+ author: {
+ __typename: "Author",
+ name: "Virginia Woolf",
+ afraidCount: 2,
+ },
+ },
+ });
+
+ // Make sure afraidCount was written.
+ expect(cache.readFragment({
+ id: cache.identify({
+ __typename: "Author",
+ name: "Virginia Woolf",
+ }),
+ fragment: gql`
+ fragment AfraidFragment on Author {
+ name
+ afraidCount
+ }
+ `,
+ })).toEqual({
+ __typename: "Author",
+ name: "Virginia Woolf",
+ afraidCount: 2,
+ });
+
+ // Since we wrote only the afraidCount field, the firstBook result
+ // should be completely unchanged.
+ expect(readFirstBookResult()).toBe(secondFirstBookResult);
+
+ // Add another book, not published first.
+ addBook({
+ isbn: "9780156949606",
+ title: "The Waves",
+ year: 1931,
+ });
+
+ const thirdFirstBookResult = readFirstBookResult();
+
+ // A change in VW's books field triggers rereading of result objects
+ // that previously involved her books field.
+ expect(thirdFirstBookResult).not.toBe(secondFirstBookResult);
+
+ // However, since the new Book was not the earliest published, the
+ // second and third results are structurally the same.
+ expect(thirdFirstBookResult).toEqual(secondFirstBookResult);
+
+ // In fact, the original author.firstBook object has been reused!
+ expect(thirdFirstBookResult.author.firstBook).toBe(
+ secondFirstBookResult.author.firstBook,
+ );
+ });
+});
diff --git a/packages/apollo-cache-inmemory/src/__tests__/readFromStore.ts b/src/cache/inmemory/__tests__/readFromStore.ts
similarity index 80%
rename from packages/apollo-cache-inmemory/src/__tests__/readFromStore.ts
rename to src/cache/inmemory/__tests__/readFromStore.ts
--- a/packages/apollo-cache-inmemory/src/__tests__/readFromStore.ts
+++ b/src/cache/inmemory/__tests__/readFromStore.ts
@@ -1,35 +1,35 @@
import { assign, omit } from 'lodash';
-import { IdValue, JsonValue } from 'apollo-utilities';
import gql from 'graphql-tag';
-import { stripSymbols } from 'apollo-utilities';
-import { StoreObject, HeuristicFragmentMatcher } from '../';
+import { stripSymbols } from '../../../utilities/testing/stripSymbols';
+import { StoreObject } from '../types';
import { StoreReader } from '../readFromStore';
-import { defaultNormalizedCacheFactory } from '../objectCache';
-
-const fragmentMatcherFunction = new HeuristicFragmentMatcher().match;
+import { makeReference } from '../../../utilities/graphql/storeUtils';
+import { defaultNormalizedCacheFactory } from '../entityStore';
import { withError } from './diffAgainstStore';
+import { Policies } from '../policies';
describe('reading from the store', () => {
- const reader = new StoreReader();
+ const reader = new StoreReader({
+ policies: new Policies(),
+ });
it('runs a nested query with proper fragment fields in arrays', () => {
withError(() => {
const store = defaultNormalizedCacheFactory({
ROOT_QUERY: {
__typename: 'Query',
- nestedObj: { type: 'id', id: 'abcde', generated: false },
+ nestedObj: makeReference('abcde'),
} as StoreObject,
abcde: {
id: 'abcde',
innerArray: [
- { type: 'id', generated: true, id: 'abcde.innerArray.0' } as any,
+ {
+ id: 'abcdef',
+ someField: 3,
+ },
],
} as StoreObject,
- 'abcde.innerArray.0': {
- id: 'abcdef',
- someField: 3,
- } as StoreObject,
});
const queryResult = reader.readQueryFromStore({
@@ -62,7 +62,6 @@ describe('reading from the store', () => {
}
}
`,
- fragmentMatcherFunction,
});
expect(stripSymbols(queryResult)).toEqual({
@@ -70,7 +69,7 @@ describe('reading from the store', () => {
innerArray: [{ id: 'abcdef', someField: 3 }],
},
});
- }, /queries contain union or interface types/);
+ });
});
it('rejects malformed queries', () => {
@@ -258,11 +257,7 @@ describe('reading from the store', () => {
const store = defaultNormalizedCacheFactory({
ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedObj')), {
- nestedObj: {
- type: 'id',
- id: 'abcde',
- generated: false,
- },
+ nestedObj: makeReference('abcde'),
} as StoreObject),
abcde: result.nestedObj,
});
@@ -319,19 +314,11 @@ describe('reading from the store', () => {
assign({}, omit(result, 'nestedObj', 'deepNestedObj')),
{
__typename: 'Query',
- nestedObj: {
- type: 'id',
- id: 'abcde',
- generated: false,
- },
+ nestedObj: makeReference('abcde'),
} as StoreObject,
),
abcde: assign({}, result.nestedObj, {
- deepNestedObj: {
- type: 'id',
- id: 'abcdef',
- generated: false,
- },
+ deepNestedObj: makeReference('abcdef'),
}) as StoreObject,
abcdef: result.deepNestedObj as StoreObject,
});
@@ -410,14 +397,7 @@ describe('reading from the store', () => {
};
const store = defaultNormalizedCacheFactory({
- ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedArray')), {
- nestedArray: [
- { type: 'id', generated: true, id: 'abcd.nestedArray.0' } as IdValue,
- { type: 'id', generated: true, id: 'abcd.nestedArray.1' } as IdValue,
- ],
- }) as StoreObject,
- 'abcd.nestedArray.0': result.nestedArray[0],
- 'abcd.nestedArray.1': result.nestedArray[1],
+ ROOT_QUERY: result,
});
const queryResult = reader.readQueryFromStore({
@@ -468,13 +448,7 @@ describe('reading from the store', () => {
};
const store = defaultNormalizedCacheFactory({
- ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedArray')), {
- nestedArray: [
- null,
- { type: 'id', generated: true, id: 'abcd.nestedArray.1' } as IdValue,
- ],
- }) as StoreObject,
- 'abcd.nestedArray.1': result.nestedArray[1],
+ ROOT_QUERY: result,
});
const queryResult = reader.readQueryFromStore({
@@ -524,7 +498,7 @@ describe('reading from the store', () => {
const store = defaultNormalizedCacheFactory({
ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedArray')), {
- nestedArray: [null, { type: 'id', generated: false, id: 'abcde' }],
+ nestedArray: [null, makeReference('abcde')],
}) as StoreObject,
abcde: result.nestedArray[1],
});
@@ -629,12 +603,7 @@ describe('reading from the store', () => {
};
const store = defaultNormalizedCacheFactory({
- ROOT_QUERY: assign({}, assign({}, omit(result, 'simpleArray')), {
- simpleArray: {
- type: 'json',
- json: result.simpleArray,
- } as JsonValue,
- }) as StoreObject,
+ ROOT_QUERY: result,
});
const queryResult = reader.readQueryFromStore({
@@ -666,12 +635,7 @@ describe('reading from the store', () => {
};
const store = defaultNormalizedCacheFactory({
- ROOT_QUERY: assign({}, assign({}, omit(result, 'simpleArray')), {
- simpleArray: {
- type: 'json',
- json: result.simpleArray,
- } as JsonValue,
- }) as StoreObject,
+ ROOT_QUERY: result,
});
const queryResult = reader.readQueryFromStore({
@@ -720,19 +684,11 @@ describe('reading from the store', () => {
assign({}, omit(data, 'nestedObj', 'deepNestedObj')),
{
__typename: 'Query',
- nestedObj: {
- type: 'id',
- id: 'abcde',
- generated: false,
- } as IdValue,
+ nestedObj: makeReference('abcde'),
},
) as StoreObject,
abcde: assign({}, data.nestedObj, {
- deepNestedObj: {
- type: 'id',
- id: 'abcdef',
- generated: false,
- },
+ deepNestedObj: makeReference('abcdef'),
}) as StoreObject,
abcdef: data.deepNestedObj as StoreObject,
});
@@ -784,20 +740,15 @@ describe('reading from the store', () => {
});
});
- it('properly handles the connection directive', () => {
+ it('properly handles the @connection directive', () => {
const store = defaultNormalizedCacheFactory({
ROOT_QUERY: {
- abc: [
+ 'books:abc': [
{
- generated: true,
- id: 'ROOT_QUERY.abc.0',
- type: 'id',
+ name: 'efgh',
},
],
},
- 'ROOT_QUERY.abc.0': {
- name: 'efgh',
- },
});
const queryResult = reader.readQueryFromStore({
@@ -819,4 +770,150 @@ describe('reading from the store', () => {
],
});
});
+
+ it('can use keyArgs function instead of @connection directive', () => {
+ const reader = new StoreReader({
+ policies: new Policies({
+ typePolicies: {
+ Query: {
+ fields: {
+ books: {
+ // Even though we're returning an arbitrary string here,
+ // the InMemoryCache will ensure the actual key begins
+ // with "books".
+ keyArgs: () => "abc",
+ },
+ },
+ },
+ },
+ }),
+ });
+
+ const store = defaultNormalizedCacheFactory({
+ ROOT_QUERY: {
+ "books:abc": [
+ {
+ name: 'efgh',
+ },
+ ],
+ },
+ });
+
+ const queryResult = reader.readQueryFromStore({
+ store,
+ query: gql`
+ {
+ books(skip: 0, limit: 2) {
+ name
+ }
+ }
+ `,
+ });
+
+ expect(stripSymbols(queryResult)).toEqual({
+ books: [
+ {
+ name: 'efgh',
+ },
+ ],
+ });
+ });
+
+ it('refuses to return raw Reference objects', () => {
+ const store = defaultNormalizedCacheFactory({
+ ROOT_QUERY: {
+ author: {
+ __typename: 'Author',
+ name: 'Toni Morrison',
+ books: [
+ {
+ title: 'The Bluest Eye',
+ publisher: makeReference('Publisher1'),
+ },
+ {
+ title: 'Song of Solomon',
+ publisher: makeReference('Publisher2'),
+ },
+ {
+ title: 'Beloved',
+ publisher: makeReference('Publisher2'),
+ },
+ ],
+ },
+ },
+ Publisher1: {
+ __typename: 'Publisher',
+ id: 1,
+ name: 'Holt, Rinehart and Winston',
+ },
+ Publisher2: {
+ __typename: 'Publisher',
+ id: 2,
+ name: 'Alfred A. Knopf, Inc.',
+ },
+ });
+
+ expect(() => {
+ reader.readQueryFromStore({
+ store,
+ query: gql`
+ {
+ author {
+ name
+ books
+ }
+ }
+ `,
+ });
+ }).toThrow(
+ /Missing selection set for object of type Publisher returned for query field books/,
+ );
+
+ expect(
+ reader.readQueryFromStore({
+ store,
+ query: gql`
+ {
+ author {
+ name
+ books {
+ title
+ publisher {
+ name
+ }
+ }
+ }
+ }
+ `,
+ }),
+ ).toEqual({
+ author: {
+ __typename: 'Author',
+ name: 'Toni Morrison',
+ books: [
+ {
+ title: 'The Bluest Eye',
+ publisher: {
+ __typename: 'Publisher',
+ name: 'Holt, Rinehart and Winston',
+ },
+ },
+ {
+ title: 'Song of Solomon',
+ publisher: {
+ __typename: 'Publisher',
+ name: 'Alfred A. Knopf, Inc.',
+ },
+ },
+ {
+ title: 'Beloved',
+ publisher: {
+ __typename: 'Publisher',
+ name: 'Alfred A. Knopf, Inc.',
+ },
+ },
+ ],
+ },
+ });
+ });
});
diff --git a/packages/apollo-cache-inmemory/src/__tests__/recordingCache.ts b/src/cache/inmemory/__tests__/recordingCache.ts
similarity index 53%
rename from packages/apollo-cache-inmemory/src/__tests__/recordingCache.ts
rename to src/cache/inmemory/__tests__/recordingCache.ts
--- a/packages/apollo-cache-inmemory/src/__tests__/recordingCache.ts
+++ b/src/cache/inmemory/__tests__/recordingCache.ts
@@ -1,10 +1,9 @@
-import { OptimisticCacheLayer } from '../inMemoryCache';
-import { ObjectCache } from '../objectCache';
import { NormalizedCacheObject } from '../types';
+import { EntityStore } from '../entityStore';
-describe('OptimisticCacheLayer', () => {
- function makeLayer(root: ObjectCache) {
- return new OptimisticCacheLayer('whatever', root, () => {});
+describe('Optimistic EntityStore layering', () => {
+ function makeLayer(root: EntityStore) {
+ return root.addLayer('whatever', () => {});
}
describe('returns correct values during recording', () => {
@@ -17,31 +16,31 @@ describe('OptimisticCacheLayer', () => {
Human: { __typename: 'Human', name: 'John' },
};
- const underlyingCache = new ObjectCache(data);
+ const underlyingStore = new EntityStore.Root({ seed: data });
- let cache = makeLayer(underlyingCache);
+ let store = makeLayer(underlyingStore);
beforeEach(() => {
- cache = makeLayer(underlyingCache);
+ store = makeLayer(underlyingStore);
});
it('should passthrough values if not defined in recording', () => {
- expect(cache.get('Human')).toBe(data.Human);
- expect(cache.get('Animal')).toBe(data.Animal);
+ expect(store.get('Human')).toBe(data.Human);
+ expect(store.get('Animal')).toBe(data.Animal);
});
it('should return values defined during recording', () => {
- cache.set('Human', dataToRecord.Human);
- expect(cache.get('Human')).toBe(dataToRecord.Human);
- expect(underlyingCache.get('Human')).toBe(data.Human);
+ store.merge('Human', dataToRecord.Human);
+ expect(store.get('Human')).toEqual(dataToRecord.Human);
+ expect(underlyingStore.get('Human')).toBe(data.Human);
});
it('should return undefined for values deleted during recording', () => {
- expect(cache.get('Animal')).toBe(data.Animal);
+ expect(store.get('Animal')).toBe(data.Animal);
// delete should be registered in the recording:
- cache.delete('Animal');
- expect(cache.get('Animal')).toBeUndefined();
- expect(cache.toObject()).toHaveProperty('Animal');
- expect(underlyingCache.get('Animal')).toBe(data.Animal);
+ store.delete('Animal');
+ expect(store.get('Animal')).toBeUndefined();
+ expect(store.toObject()).toHaveProperty('Animal');
+ expect(underlyingStore.get('Animal')).toBe(data.Animal);
});
});
@@ -55,15 +54,15 @@ describe('OptimisticCacheLayer', () => {
Human: { __typename: 'Human', name: 'John' },
};
- const underlyingCache = new ObjectCache(data);
- let cache = makeLayer(underlyingCache);
+ const underlyingStore = new EntityStore.Root({ seed: data });
+ let store = makeLayer(underlyingStore);
let recording: NormalizedCacheObject;
beforeEach(() => {
- cache = makeLayer(underlyingCache);
- cache.set('Human', dataToRecord.Human);
- cache.delete('Animal');
- recording = cache.toObject();
+ store = makeLayer(underlyingStore);
+ store.merge('Human', dataToRecord.Human);
+ store.delete('Animal');
+ recording = store.toObject();
});
it('should contain the property indicating deletion', () => {
@@ -78,7 +77,7 @@ describe('OptimisticCacheLayer', () => {
});
it('should keep the original data unaffected', () => {
- expect(underlyingCache.toObject()).toEqual(data);
+ expect(underlyingStore.toObject()).toEqual(data);
});
});
});
diff --git a/packages/apollo-cache-inmemory/src/__tests__/roundtrip.ts b/src/cache/inmemory/__tests__/roundtrip.ts
similarity index 87%
rename from packages/apollo-cache-inmemory/src/__tests__/roundtrip.ts
rename to src/cache/inmemory/__tests__/roundtrip.ts
--- a/packages/apollo-cache-inmemory/src/__tests__/roundtrip.ts
+++ b/src/cache/inmemory/__tests__/roundtrip.ts
@@ -3,12 +3,10 @@ import gql from 'graphql-tag';
import { withError } from './diffAgainstStore';
import { withWarning } from './writeToStore';
-
-import { DepTrackingCache } from '../depTrackingCache';
-
-import { HeuristicFragmentMatcher, StoreReader, StoreWriter } from '../';
-
-const fragmentMatcherFunction = new HeuristicFragmentMatcher().match;
+import { EntityStore } from '../entityStore';
+import { StoreReader } from '../readFromStore';
+import { StoreWriter } from '../writeToStore';
+import { Policies } from '../policies';
function assertDeeplyFrozen(value: any, stack: any[] = []) {
if (value !== null && typeof value === 'object' && stack.indexOf(value) < 0) {
@@ -23,9 +21,14 @@ function assertDeeplyFrozen(value: any, stack: any[] = []) {
}
function storeRoundtrip(query: DocumentNode, result: any, variables = {}) {
- const reader = new StoreReader();
- const immutableReader = new StoreReader({ freezeResults: true });
- const writer = new StoreWriter();
+ const policies = new Policies({
+ possibleTypes: {
+ Character: ["Jedi", "Droid"],
+ },
+ });
+
+ const reader = new StoreReader({ policies });
+ const writer = new StoreWriter({ policies });
const store = writer.writeQueryToStore({
result,
@@ -37,7 +40,6 @@ function storeRoundtrip(query: DocumentNode, result: any, variables = {}) {
store,
query,
variables,
- fragmentMatcherFunction,
};
const reconstructedResult = reader.readQueryFromStore<any>(readOptions);
@@ -45,12 +47,12 @@ function storeRoundtrip(query: DocumentNode, result: any, variables = {}) {
// Make sure the result is identical if we haven't written anything new
// to the store. https://github.com/apollographql/apollo-client/pull/3394
- expect(store).toBeInstanceOf(DepTrackingCache);
+ expect(store).toBeInstanceOf(EntityStore);
expect(reader.readQueryFromStore(readOptions)).toBe(reconstructedResult);
- const immutableResult = immutableReader.readQueryFromStore(readOptions);
+ const immutableResult = reader.readQueryFromStore(readOptions);
expect(immutableResult).toEqual(reconstructedResult);
- expect(immutableReader.readQueryFromStore(readOptions)).toBe(immutableResult);
+ expect(reader.readQueryFromStore(readOptions)).toBe(immutableResult);
if (process.env.NODE_ENV !== 'production') {
try {
// Note: this illegal assignment will only throw in strict mode, but that's
@@ -237,8 +239,8 @@ describe('roundtrip', () => {
},
);
- // Just because we read from the store using { freezeResults: true }, the
- // original data should not be frozen.
+ // Reading immutable results from the store does not mean the original
+ // data should get frozen.
expect(Object.isExtensible(updateClub)).toBe(true);
expect(Object.isFrozen(updateClub)).toBe(false);
});
@@ -306,42 +308,10 @@ describe('roundtrip', () => {
);
});
- it('should resolve on union types with inline fragments without typenames with warning', () => {
- return withWarning(() => {
- storeRoundtrip(
- gql`
- query {
- all_people {
- name
- ... on Jedi {
- side
- }
- ... on Droid {
- model
- }
- }
- }
- `,
- {
- all_people: [
- {
- name: 'Luke Skywalker',
- side: 'bright',
- },
- {
- name: 'R2D2',
- model: 'astromech',
- },
- ],
- },
- );
- }, /using fragments/);
- });
-
// XXX this test is weird because it assumes the server returned an incorrect result
// However, the user may have written this result with client.writeQuery.
it('should throw an error on two of the same inline fragment types', () => {
- return expect(() => {
+ return withWarning(() => expect(() => {
storeRoundtrip(
gql`
query {
@@ -367,7 +337,7 @@ describe('roundtrip', () => {
],
},
);
- }).toThrowError(/Can\'t find field rank on object/);
+ }).toThrowError(/Can\'t find field rank on object/));
});
it('should resolve fields it can on interface with non matching inline fragments', () => {
@@ -398,7 +368,7 @@ describe('roundtrip', () => {
],
},
);
- }, /IntrospectionFragmentMatcher/);
+ });
});
it('should resolve on union types with spread fragments', () => {
@@ -437,7 +407,7 @@ describe('roundtrip', () => {
],
},
);
- }, /IntrospectionFragmentMatcher/);
+ });
});
it('should work with a fragment on the actual interface or union', () => {
@@ -472,15 +442,16 @@ describe('roundtrip', () => {
__typename: 'Droid',
name: 'R2D2',
model: 'astromech',
+ side: 'bright',
},
],
},
);
- }, /IntrospectionFragmentMatcher/);
+ });
});
it('should throw on error on two of the same spread fragment types', () => {
- expect(() =>
+ withWarning(() => expect(() => {
storeRoundtrip(
gql`
fragment jediSide on Jedi {
@@ -509,8 +480,8 @@ describe('roundtrip', () => {
},
],
},
- ),
- ).toThrowError(/Can\'t find field rank on object/);
+ );
+ }).toThrowError(/Can\'t find field rank on object/));
});
it('should resolve on @include and @skip with inline fragments', () => {
diff --git a/packages/apollo-cache-inmemory/src/__tests__/writeToStore.ts b/src/cache/inmemory/__tests__/writeToStore.ts
similarity index 74%
rename from packages/apollo-cache-inmemory/src/__tests__/writeToStore.ts
rename to src/cache/inmemory/__tests__/writeToStore.ts
--- a/packages/apollo-cache-inmemory/src/__tests__/writeToStore.ts
+++ b/src/cache/inmemory/__tests__/writeToStore.ts
@@ -1,38 +1,35 @@
-import { cloneDeep, assign, omit } from 'lodash';
-
+import { assign, omit } from 'lodash';
import {
SelectionNode,
FieldNode,
DefinitionNode,
OperationDefinitionNode,
ASTNode,
+ DocumentNode,
} from 'graphql';
-
import gql from 'graphql-tag';
+
import {
storeKeyNameFromField,
- IdValue,
- addTypenameToDocument,
-} from 'apollo-utilities';
-
+ makeReference,
+} from '../../../utilities/graphql/storeUtils';
+import { addTypenameToDocument } from '../../../utilities/graphql/transform';
+import { cloneDeep } from '../../../utilities/common/cloneDeep';
import { StoreWriter } from '../writeToStore';
+import { defaultNormalizedCacheFactory } from '../entityStore';
+import { InMemoryCache } from '../inMemoryCache';
+import { Policies } from '../policies';
-import { defaultNormalizedCacheFactory } from '../objectCache';
-
-import {
- HeuristicFragmentMatcher,
- IntrospectionFragmentMatcher,
- StoreObject,
-} from '../';
-
-export function withWarning(func: Function, regex: RegExp) {
+export function withWarning(func: Function, regex?: RegExp) {
let message: string = null as never;
const oldWarn = console.warn;
console.warn = (m: string) => (message = m);
return Promise.resolve(func()).then(val => {
- expect(message).toMatch(regex);
+ if (regex) {
+ expect(message).toMatch(regex);
+ }
console.warn = oldWarn;
return val;
});
@@ -41,7 +38,15 @@ export function withWarning(func: Function, regex: RegExp) {
const getIdField = ({ id }: { id: string }) => id;
describe('writing to the store', () => {
- const writer = new StoreWriter();
+ const policies = new Policies({
+ dataIdFromObject(object: any) {
+ if (object.__typename && object.id) {
+ return object.__typename + '__' + object.id;
+ }
+ },
+ });
+
+ const writer = new StoreWriter({ policies });
it('properly normalizes a trivial item', () => {
const query = gql`
@@ -68,7 +73,10 @@ describe('writing to the store', () => {
})
.toObject(),
).toEqual({
- ROOT_QUERY: result,
+ ROOT_QUERY: {
+ __typename: 'Query',
+ ...result,
+ },
});
});
@@ -96,6 +104,7 @@ describe('writing to the store', () => {
expect(normalized.toObject()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
id: 'abcd',
stringField: 'This is a string!',
numberField: 5,
@@ -130,6 +139,7 @@ describe('writing to the store', () => {
expect(normalized.toObject()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
id: 'abcd',
'stringField({"arg":1})': 'The arg was 1!',
'stringField({"arg":2})': 'The arg was 2!',
@@ -170,6 +180,7 @@ describe('writing to the store', () => {
expect(normalized.toObject()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
id: 'abcd',
nullField: null,
'numberField({"floatArg":3.14,"intArg":5})': 5,
@@ -212,6 +223,7 @@ describe('writing to the store', () => {
expect(normalized.toObject()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
id: 'abcd',
nullField: null,
'numberField({"floatArg":3.14,"intArg":5})': 5,
@@ -244,6 +256,7 @@ describe('writing to the store', () => {
expect(normalized.toObject()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
id: 'abcd',
firstName: 'James',
'lastName@upperCase': 'BOND',
@@ -281,22 +294,25 @@ describe('writing to the store', () => {
},
};
+ const writer = new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject: getIdField,
+ }),
+ });
+
expect(
writer
.writeQueryToStore({
query,
result: cloneDeep(result),
- dataIdFromObject: getIdField,
})
.toObject(),
).toEqual({
- ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedObj')), {
- nestedObj: {
- type: 'id',
- id: result.nestedObj.id,
- generated: false,
- },
- }),
+ ROOT_QUERY: {
+ __typename: 'Query',
+ ...result,
+ nestedObj: makeReference(result.nestedObj.id),
+ },
[result.nestedObj.id]: result.nestedObj,
});
});
@@ -336,14 +352,10 @@ describe('writing to the store', () => {
})
.toObject(),
).toEqual({
- ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedObj')), {
- nestedObj: {
- type: 'id',
- id: `$ROOT_QUERY.nestedObj`,
- generated: true,
- },
- }),
- [`$ROOT_QUERY.nestedObj`]: result.nestedObj,
+ ROOT_QUERY: {
+ __typename: 'Query',
+ ...result,
+ },
});
});
@@ -382,14 +394,10 @@ describe('writing to the store', () => {
})
.toObject(),
).toEqual({
- ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedObj')), {
- 'nestedObj({"arg":"val"})': {
- type: 'id',
- id: `$ROOT_QUERY.nestedObj({"arg":"val"})`,
- generated: true,
- },
+ ROOT_QUERY: assign(omit(result, 'nestedObj'), {
+ __typename: "Query",
+ 'nestedObj({"arg":"val"})': result.nestedObj,
}),
- [`$ROOT_QUERY.nestedObj({"arg":"val"})`]: result.nestedObj,
});
});
@@ -430,21 +438,25 @@ describe('writing to the store', () => {
],
};
+ const writer = new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject: getIdField,
+ }),
+ });
+
expect(
writer
.writeQueryToStore({
query,
result: cloneDeep(result),
- dataIdFromObject: getIdField,
})
.toObject(),
).toEqual({
ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedArray')), {
- nestedArray: result.nestedArray.map((obj: any) => ({
- type: 'id',
- id: obj.id,
- generated: false,
- })),
+ __typename: "Query",
+ nestedArray: result.nestedArray.map(
+ (obj: any) => makeReference(obj.id),
+ ),
}),
[result.nestedArray[0].id]: result.nestedArray[0],
[result.nestedArray[1].id]: result.nestedArray[1],
@@ -483,20 +495,23 @@ describe('writing to the store', () => {
],
};
+ const writer = new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject: getIdField,
+ }),
+ });
+
expect(
writer
.writeQueryToStore({
query,
result: cloneDeep(result),
- dataIdFromObject: getIdField,
})
.toObject(),
).toEqual({
ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedArray')), {
- nestedArray: [
- { type: 'id', id: result.nestedArray[0].id, generated: false },
- null,
- ],
+ __typename: "Query",
+ nestedArray: [makeReference(result.nestedArray[0].id), null],
}),
[result.nestedArray[0].id]: result.nestedArray[0],
});
@@ -542,14 +557,10 @@ describe('writing to the store', () => {
});
expect(normalized.toObject()).toEqual({
- ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedArray')), {
- nestedArray: [
- { type: 'id', generated: true, id: `ROOT_QUERY.nestedArray.0` },
- { type: 'id', generated: true, id: `ROOT_QUERY.nestedArray.1` },
- ],
- }),
- [`ROOT_QUERY.nestedArray.0`]: result.nestedArray[0],
- [`ROOT_QUERY.nestedArray.1`]: result.nestedArray[1],
+ ROOT_QUERY: {
+ __typename: 'Query',
+ ...result,
+ },
});
});
@@ -589,13 +600,10 @@ describe('writing to the store', () => {
});
expect(normalized.toObject()).toEqual({
- ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedArray')), {
- nestedArray: [
- null,
- { type: 'id', generated: true, id: `ROOT_QUERY.nestedArray.1` },
- ],
- }),
- [`ROOT_QUERY.nestedArray.1`]: result.nestedArray[1],
+ ROOT_QUERY: {
+ __typename: 'Query',
+ ...result,
+ },
});
});
@@ -618,23 +626,22 @@ describe('writing to the store', () => {
simpleArray: ['one', 'two', 'three'],
};
+ const writer = new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject: getIdField,
+ }),
+ });
+
const normalized = writer.writeQueryToStore({
query,
result: cloneDeep(result),
- dataIdFromObject: getIdField,
});
expect(normalized.toObject()).toEqual({
- ROOT_QUERY: assign({}, assign({}, omit(result, 'simpleArray')), {
- simpleArray: {
- type: 'json',
- json: [
- result.simpleArray[0],
- result.simpleArray[1],
- result.simpleArray[2],
- ],
- },
- }),
+ ROOT_QUERY: {
+ __typename: 'Query',
+ ...result,
+ },
});
});
@@ -663,16 +670,10 @@ describe('writing to the store', () => {
});
expect(normalized.toObject()).toEqual({
- ROOT_QUERY: assign({}, assign({}, omit(result, 'simpleArray')), {
- simpleArray: {
- type: 'json',
- json: [
- result.simpleArray[0],
- result.simpleArray[1],
- result.simpleArray[2],
- ],
- },
- }),
+ ROOT_QUERY: {
+ __typename: 'Query',
+ ...result,
+ },
});
});
@@ -703,25 +704,23 @@ describe('writing to the store', () => {
},
};
+ const writer = new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject: getIdField,
+ }),
+ });
+
const normalized = writer.writeQueryToStore({
query,
result: cloneDeep(result),
- dataIdFromObject: getIdField,
});
expect(normalized.toObject()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
id: 'a',
- object1: {
- type: 'id',
- id: 'aa',
- generated: false,
- },
- object2: {
- type: 'id',
- id: 'aa',
- generated: false,
- },
+ object1: makeReference('aa'),
+ object2: makeReference('aa'),
},
aa: {
id: 'aa',
@@ -778,47 +777,33 @@ describe('writing to the store', () => {
],
};
+ const writer = new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject: getIdField,
+ }),
+ });
+
const normalized = writer.writeQueryToStore({
query,
result: cloneDeep(result),
- dataIdFromObject: getIdField,
});
expect(normalized.toObject()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
id: 'a',
- array1: [
- {
- type: 'id',
- id: 'aa',
- generated: false,
- },
- ],
- array2: [
- {
- type: 'id',
- id: 'ab',
- generated: false,
- },
- ],
+ array1: [makeReference('aa')],
+ array2: [makeReference('ab')],
},
aa: {
id: 'aa',
stringField: 'string',
- obj: {
- type: 'id',
- id: 'aaa',
- generated: false,
- },
+ obj: makeReference('aaa'),
},
ab: {
id: 'ab',
stringField: 'string2',
- obj: {
- type: 'id',
- id: 'aaa',
- generated: false,
- },
+ obj: makeReference('aaa'),
},
aaa: {
id: 'aaa',
@@ -868,45 +853,32 @@ describe('writing to the store', () => {
],
};
+ const writer = new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject: getIdField,
+ }),
+ });
+
const normalized = writer.writeQueryToStore({
query,
result: cloneDeep(result),
- dataIdFromObject: getIdField,
});
expect(normalized.toObject()).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
id: 'a',
- array1: [
- {
- type: 'id',
- id: 'aa',
- generated: false,
- },
- {
- type: 'id',
- id: 'ab',
- generated: false,
- },
- ],
+ array1: [makeReference('aa'), makeReference('ab')],
},
aa: {
id: 'aa',
stringField: 'string',
- obj: {
- type: 'id',
- id: 'aaa',
- generated: false,
- },
+ obj: makeReference('aaa'),
},
ab: {
id: 'ab',
stringField: 'string2',
- obj: {
- type: 'id',
- id: 'aaa',
- generated: false,
- },
+ obj: makeReference('aaa'),
},
aaa: {
id: 'aaa',
@@ -931,10 +903,15 @@ describe('writing to the store', () => {
nullField: null,
};
+ const writer = new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject: getIdField,
+ }),
+ });
+
const store = writer.writeQueryToStore({
query,
result: cloneDeep(result),
- dataIdFromObject: getIdField,
});
const query2 = gql`
@@ -955,11 +932,14 @@ describe('writing to the store', () => {
store,
query: query2,
result: result2,
- dataIdFromObject: getIdField,
});
expect(store2.toObject()).toEqual({
- ROOT_QUERY: assign({}, result, result2),
+ ROOT_QUERY: {
+ __typename: 'Query',
+ ...result,
+ ...result2,
+ },
});
});
@@ -995,9 +975,11 @@ describe('writing to the store', () => {
})
.toObject(),
).toEqual({
- ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedObj')), {
+ ROOT_QUERY: {
+ __typename: 'Query',
+ ...result,
nestedObj: null,
- }),
+ },
});
});
@@ -1027,16 +1009,12 @@ describe('writing to the store', () => {
.toObject(),
).toEqual({
ROOT_QUERY: {
+ __typename: "Query",
'people_one({"id":"5"})': {
- type: 'id',
- id: '$ROOT_QUERY.people_one({"id":"5"})',
- generated: true,
+ id: 'abcd',
+ stringField: 'This is a string!',
},
},
- '$ROOT_QUERY.people_one({"id":"5"})': {
- id: 'abcd',
- stringField: 'This is a string!',
- },
});
});
@@ -1187,32 +1165,29 @@ describe('writing to the store', () => {
mutation.definitions.map((def: OperationDefinitionNode) => {
if (isOperationDefinition(def)) {
+ const writer = new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject() {
+ return '5';
+ },
+ }),
+ });
+
expect(
- writer
- .writeSelectionSetToStore({
- dataId: '5',
- selectionSet: def.selectionSet,
- result: cloneDeep(result),
- context: {
- store: defaultNormalizedCacheFactory(),
- variables,
- dataIdFromObject: () => '5',
- },
- })
- .toObject(),
+ writer.writeQueryToStore({
+ query: {
+ kind: 'Document',
+ definitions: [def],
+ } as DocumentNode,
+ dataId: '5',
+ result,
+ variables,
+ }).toObject(),
).toEqual({
'5': {
id: 'id',
- 'some_mutation({"input":{"arr":[1,{"a":"b"}],"bo":true,"id":"5","nil":null,"num":5.5,"obj":{"a":"b"}}})': {
- generated: false,
- id: '5',
- type: 'id',
- },
- 'some_mutation_with_variables({"input":{"arr":[1,{"a":"b"}],"bo":true,"id":"5","nil":null,"num":5.5,"obj":{"a":"b"}}})': {
- generated: false,
- id: '5',
- type: 'id',
- },
+ 'some_mutation({"input":{"arr":[1,{"a":"b"}],"bo":true,"id":"5","nil":null,"num":5.5,"obj":{"a":"b"}}})': makeReference('5'),
+ 'some_mutation_with_variables({"input":{"arr":[1,{"a":"b"}],"bo":true,"id":"5","nil":null,"num":5.5,"obj":{"a":"b"}}})': makeReference('5'),
},
});
} else {
@@ -1221,58 +1196,7 @@ describe('writing to the store', () => {
});
});
- it('should write to store if `dataIdFromObject` returns an ID of 0', () => {
- const query = gql`
- query {
- author {
- firstName
- id
- __typename
- }
- }
- `;
- const data = {
- author: {
- id: 0,
- __typename: 'Author',
- firstName: 'John',
- },
- };
- const expStore = defaultNormalizedCacheFactory({
- ROOT_QUERY: {
- author: {
- id: 0 as any,
- typename: 'Author',
- type: 'id',
- generated: false,
- },
- },
- 0: {
- id: data.author.id,
- __typename: data.author.__typename,
- firstName: data.author.firstName,
- },
- });
-
- expect(
- writer
- .writeQueryToStore({
- result: data,
- query,
- dataIdFromObject: () => (0 as any) as string,
- })
- .toObject(),
- ).toEqual(expStore.toObject());
- });
-
describe('type escaping', () => {
- const dataIdFromObject = (object: any) => {
- if (object.__typename && object.id) {
- return object.__typename + '__' + object.id;
- }
- return undefined;
- };
-
it('should correctly escape generated ids', () => {
const query = gql`
query {
@@ -1290,13 +1214,9 @@ describe('writing to the store', () => {
};
const expStore = defaultNormalizedCacheFactory({
ROOT_QUERY: {
- author: {
- type: 'id' as any,
- id: '$ROOT_QUERY.author',
- generated: true,
- },
+ __typename: 'Query',
+ ...data,
},
- '$ROOT_QUERY.author': data.author,
});
expect(
writer
@@ -1327,14 +1247,10 @@ describe('writing to the store', () => {
};
const expStore = defaultNormalizedCacheFactory({
ROOT_QUERY: {
- author: {
- type: 'id',
- id: dataIdFromObject(data.author)!,
- generated: false,
- typename: 'Author',
- },
+ __typename: 'Query',
+ author: policies.toReference(data.author),
},
- [dataIdFromObject(data.author)!]: {
+ [policies.identify(data.author)!]: {
firstName: data.author.firstName,
id: data.author.id,
__typename: data.author.__typename,
@@ -1345,13 +1261,12 @@ describe('writing to the store', () => {
.writeQueryToStore({
result: data,
query,
- dataIdFromObject,
})
.toObject(),
).toEqual(expStore.toObject());
});
- it('should correctly escape json blobs', () => {
+ it('should not need to escape json blobs', () => {
const query = gql`
query {
author {
@@ -1372,20 +1287,13 @@ describe('writing to the store', () => {
};
const expStore = defaultNormalizedCacheFactory({
ROOT_QUERY: {
- author: {
- type: 'id',
- id: dataIdFromObject(data.author)!,
- generated: false,
- typename: 'Author',
- },
+ __typename: 'Query',
+ author: policies.toReference(data.author),
},
- [dataIdFromObject(data.author)!]: {
+ [policies.identify(data.author)!]: {
__typename: data.author.__typename,
id: data.author.id,
- info: {
- type: 'json',
- json: data.author.info,
- },
+ info: data.author.info,
},
});
expect(
@@ -1393,14 +1301,13 @@ describe('writing to the store', () => {
.writeQueryToStore({
result: data,
query,
- dataIdFromObject,
})
.toObject(),
).toEqual(expStore.toObject());
});
});
- it('should merge objects when overwriting a generated id with a real id', () => {
+ it('should not merge unidentified data when replacing with ID reference', () => {
const dataWithoutId = {
author: {
firstName: 'John',
@@ -1416,12 +1323,7 @@ describe('writing to the store', () => {
__typename: 'Author',
},
};
- const dataIdFromObject = (object: any) => {
- if (object.__typename && object.id) {
- return object.__typename + '__' + object.id;
- }
- return undefined;
- };
+
const queryWithoutId = gql`
query {
author {
@@ -1440,50 +1342,40 @@ describe('writing to the store', () => {
}
}
`;
- const expStoreWithoutId = defaultNormalizedCacheFactory({
- '$ROOT_QUERY.author': {
- firstName: 'John',
- lastName: 'Smith',
- __typename: 'Author',
- },
+ const expectedStoreWithoutId = defaultNormalizedCacheFactory({
ROOT_QUERY: {
+ __typename: 'Query',
author: {
- type: 'id',
- id: '$ROOT_QUERY.author',
- generated: true,
- typename: 'Author',
+ firstName: 'John',
+ lastName: 'Smith',
+ __typename: 'Author',
},
},
});
- const expStoreWithId = defaultNormalizedCacheFactory({
+ const expectedStoreWithId = defaultNormalizedCacheFactory({
Author__129: {
firstName: 'John',
- lastName: 'Smith',
id: '129',
__typename: 'Author',
},
ROOT_QUERY: {
- author: {
- type: 'id',
- id: 'Author__129',
- generated: false,
- typename: 'Author',
- },
+ __typename: 'Query',
+ author: makeReference('Author__129'),
},
});
+
const storeWithoutId = writer.writeQueryToStore({
result: dataWithoutId,
query: queryWithoutId,
- dataIdFromObject,
});
- expect(storeWithoutId.toObject()).toEqual(expStoreWithoutId.toObject());
+ expect(storeWithoutId.toObject()).toEqual(expectedStoreWithoutId.toObject());
+
const storeWithId = writer.writeQueryToStore({
result: dataWithId,
query: queryWithId,
store: storeWithoutId,
- dataIdFromObject,
});
- expect(storeWithId.toObject()).toEqual(expStoreWithId.toObject());
+ expect(storeWithId.toObject()).toEqual(expectedStoreWithId.toObject());
});
it('should allow a union of objects of a different type, when overwriting a generated id with a real id', () => {
@@ -1501,12 +1393,6 @@ describe('writing to the store', () => {
__typename: 'Author',
},
};
- const dataIdFromObject = (object: any) => {
- if (object.__typename && object.id) {
- return object.__typename + '__' + object.id;
- }
- return undefined;
- };
const query = gql`
query {
author {
@@ -1524,16 +1410,11 @@ describe('writing to the store', () => {
}
`;
const expStoreWithPlaceholder = defaultNormalizedCacheFactory({
- '$ROOT_QUERY.author': {
- hello: 'Foo',
- __typename: 'Placeholder',
- },
ROOT_QUERY: {
+ __typename: 'Query',
author: {
- type: 'id',
- id: '$ROOT_QUERY.author',
- generated: true,
- typename: 'Placeholder',
+ hello: 'Foo',
+ __typename: 'Placeholder',
},
},
});
@@ -1545,12 +1426,8 @@ describe('writing to the store', () => {
__typename: 'Author',
},
ROOT_QUERY: {
- author: {
- type: 'id',
- id: 'Author__129',
- generated: false,
- typename: 'Author',
- },
+ __typename: 'Query',
+ author: makeReference('Author__129'),
},
});
@@ -1558,7 +1435,6 @@ describe('writing to the store', () => {
const store = writer.writeQueryToStore({
result: dataWithPlaceholder,
query,
- dataIdFromObject,
});
expect(store.toObject()).toEqual(expStoreWithPlaceholder.toObject());
@@ -1567,7 +1443,6 @@ describe('writing to the store', () => {
result: dataWithAuthor,
query,
store,
- dataIdFromObject,
});
expect(store.toObject()).toEqual(expStoreWithAuthor.toObject());
@@ -1576,7 +1451,6 @@ describe('writing to the store', () => {
result: dataWithPlaceholder,
query,
store,
- dataIdFromObject,
});
// Author__129 will remain in the store,
// but will not be referenced by any of the fields,
@@ -1658,19 +1532,21 @@ describe('writing to the store', () => {
],
};
- const newStore = writer.writeResultToStore({
- dataId: 'ROOT_QUERY',
+ const writer = new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject: getIdField,
+ }),
+ });
+
+ const newStore = writer.writeQueryToStore({
+ query,
result,
- document: query,
- dataIdFromObject: getIdField,
});
expect(newStore.get('1')).toEqual(result.todos[0]);
});
- it('should warn when it receives the wrong data with non-union fragments (using an heuristic matcher)', () => {
- const fragmentMatcherFunction = new HeuristicFragmentMatcher().match;
-
+ it('should warn when it receives the wrong data with non-union fragments', () => {
const result = {
todos: [
{
@@ -1680,37 +1556,24 @@ describe('writing to the store', () => {
],
};
+ const writer = new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject: getIdField,
+ possibleTypes: {},
+ }),
+ });
+
return withWarning(() => {
- const newStore = writer.writeResultToStore({
- dataId: 'ROOT_QUERY',
+ const newStore = writer.writeQueryToStore({
+ query,
result,
- document: query,
- dataIdFromObject: getIdField,
- fragmentMatcherFunction,
});
expect(newStore.get('1')).toEqual(result.todos[0]);
}, /Missing field description/);
});
- it('should warn when it receives the wrong data inside a fragment (using an introspection matcher)', () => {
- const fragmentMatcherFunction = new IntrospectionFragmentMatcher({
- introspectionQueryResultData: {
- __schema: {
- types: [
- {
- kind: 'UNION',
- name: 'Todo',
- possibleTypes: [
- { name: 'ShoppingCartItem' },
- { name: 'TaskItem' },
- ],
- },
- ],
- },
- },
- }).match;
-
+ it('should warn when it receives the wrong data inside a fragment', () => {
const queryWithInterface = gql`
query {
todos {
@@ -1745,22 +1608,26 @@ describe('writing to the store', () => {
],
};
+ const writer = new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject: getIdField,
+ possibleTypes: {
+ Todo: ["ShoppingCartItem", "TaskItem"],
+ },
+ }),
+ });
+
return withWarning(() => {
- const newStore = writer.writeResultToStore({
- dataId: 'ROOT_QUERY',
+ const newStore = writer.writeQueryToStore({
+ query: queryWithInterface,
result,
- document: queryWithInterface,
- dataIdFromObject: getIdField,
- fragmentMatcherFunction,
});
expect(newStore.get('1')).toEqual(result.todos[0]);
}, /Missing field price/);
});
- it('should warn if a result is missing __typename when required (using an heuristic matcher)', () => {
- const fragmentMatcherFunction = new HeuristicFragmentMatcher().match;
-
+ it('should warn if a result is missing __typename when required', () => {
const result: any = {
todos: [
{
@@ -1771,13 +1638,17 @@ describe('writing to the store', () => {
],
};
+ const writer = new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject: getIdField,
+ possibleTypes: {},
+ }),
+ });
+
return withWarning(() => {
- const newStore = writer.writeResultToStore({
- dataId: 'ROOT_QUERY',
+ const newStore = writer.writeQueryToStore({
+ query: addTypenameToDocument(query),
result,
- document: addTypenameToDocument(query),
- dataIdFromObject: getIdField,
- fragmentMatcherFunction,
});
expect(newStore.get('1')).toEqual(result.todos[0]);
@@ -1789,14 +1660,21 @@ describe('writing to the store', () => {
todos: null,
};
- const newStore = writer.writeResultToStore({
- dataId: 'ROOT_QUERY',
+ const writer = new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject: getIdField,
+ }),
+ });
+
+ const newStore = writer.writeQueryToStore({
+ query,
result,
- document: query,
- dataIdFromObject: getIdField,
});
- expect(newStore.get('ROOT_QUERY')).toEqual({ todos: null });
+ expect(newStore.get('ROOT_QUERY')).toEqual({
+ __typename: 'Query',
+ todos: null,
+ });
});
it('should not warn if a field is defered', () => {
let originalWarn = console.warn;
@@ -1811,16 +1689,18 @@ describe('writing to the store', () => {
id: 1,
};
- const fragmentMatcherFunction = new HeuristicFragmentMatcher().match;
- const newStore = writer.writeResultToStore({
- dataId: 'ROOT_QUERY',
+ const writer = new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject: getIdField,
+ }),
+ });
+
+ const newStore = writer.writeQueryToStore({
+ query: defered,
result,
- document: defered,
- dataIdFromObject: getIdField,
- fragmentMatcherFunction,
});
- expect(newStore.get('ROOT_QUERY')).toEqual({ id: 1 });
+ expect(newStore.get('ROOT_QUERY')).toEqual({ __typename: 'Query', id: 1 });
expect(console.warn).not.toBeCalled();
console.warn = originalWarn;
});
@@ -1828,25 +1708,21 @@ describe('writing to the store', () => {
it('throws when trying to write an object without id that was previously queried with id', () => {
const store = defaultNormalizedCacheFactory({
- ROOT_QUERY: assign(
- {},
- {
- __typename: 'Query',
- item: {
- type: 'id',
- id: 'abcd',
- generated: false,
- } as IdValue,
- },
- ) as StoreObject,
- abcd: assign(
- {},
- {
- id: 'abcd',
- __typename: 'Item',
- stringField: 'This is a string!',
- },
- ) as StoreObject,
+ ROOT_QUERY: {
+ __typename: 'Query',
+ item: makeReference('abcd'),
+ },
+ abcd: {
+ id: 'abcd',
+ __typename: 'Item',
+ stringField: 'This is a string!',
+ },
+ });
+
+ const writer = new StoreWriter({
+ policies: new Policies({
+ dataIdFromObject: getIdField,
+ }),
});
expect(() => {
@@ -1865,33 +1741,30 @@ describe('writing to the store', () => {
}
}
`,
- dataIdFromObject: getIdField,
});
}).toThrowErrorMatchingSnapshot();
expect(() => {
- writer.writeResultToStore({
+ writer.writeQueryToStore({
store,
- result: {
- item: {
- __typename: 'Item',
- stringField: 'This is still a string!',
- },
- },
- dataId: 'ROOT_QUERY',
- document: gql`
+ query: gql`
query {
item {
stringField
}
}
`,
- dataIdFromObject: getIdField,
+ result: {
+ item: {
+ __typename: 'Item',
+ stringField: 'This is still a string!',
+ },
+ },
});
- }).toThrowError(/stringField(.|\n)*abcd/g);
+ }).toThrowError(/contains an id of abcd/g);
});
- it('properly handles the connection directive', () => {
+ it('properly handles the @connection directive', () => {
const store = defaultNormalizedCacheFactory();
writer.writeQueryToStore({
@@ -1932,16 +1805,87 @@ describe('writing to the store', () => {
expect(store.toObject()).toEqual({
ROOT_QUERY: {
- abc: [
+ __typename: "Query",
+ 'books:abc': [
{
- generated: true,
- id: 'ROOT_QUERY.abc.0',
- type: 'id',
+ name: 'efgh',
},
],
},
- 'ROOT_QUERY.abc.0': {
- name: 'efgh',
+ });
+ });
+
+ it('can use keyArgs function instead of @connection directive', () => {
+ const store = defaultNormalizedCacheFactory();
+ const writer = new StoreWriter({
+ policies: new Policies({
+ typePolicies: {
+ Query: {
+ fields: {
+ books: {
+ keyArgs: () => "abc",
+ },
+ },
+ },
+ },
+ }),
+ });
+
+ writer.writeQueryToStore({
+ query: gql`
+ {
+ books(skip: 0, limit: 2) {
+ name
+ }
+ }
+ `,
+ result: {
+ books: [
+ {
+ name: 'abcd',
+ },
+ ],
+ },
+ store,
+ });
+
+ expect(store.toObject()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ "books:abc": [
+ {
+ name: 'abcd',
+ },
+ ],
+ },
+ });
+
+ writer.writeQueryToStore({
+ query: gql`
+ {
+ books(skip: 2, limit: 4) {
+ name
+ }
+ }
+ `,
+ result: {
+ books: [
+ {
+ name: 'efgh',
+ },
+ ],
+ },
+ store,
+ });
+
+ expect(store.toObject()).toEqual({
+ ROOT_QUERY: {
+ __typename: "Query",
+ "books:abc": [
+ {
+ name: 'efgh',
+ },
+ ],
},
});
});
@@ -1976,25 +1920,18 @@ describe('writing to the store', () => {
});
expect(store.toObject()).toEqual({
- '$ROOT_QUERY.animals.0.species': { name: 'cat' },
ROOT_QUERY: {
+ __typename: "Query",
animals: [
{
- generated: true,
- id: 'ROOT_QUERY.animals.0',
- type: 'id',
- typename: 'Animal',
+ __typename: 'Animal',
+ species: {
+ __typename: 'Cat',
+ name: 'cat',
+ },
},
],
},
- 'ROOT_QUERY.animals.0': {
- species: {
- generated: true,
- id: '$ROOT_QUERY.animals.0.species',
- type: 'id',
- typename: 'Cat',
- },
- },
});
writer.writeQueryToStore({
@@ -2014,38 +1951,24 @@ describe('writing to the store', () => {
});
expect(store.toObject()).toEqual({
- '$ROOT_QUERY.animals.0.species': { name: 'dog' },
ROOT_QUERY: {
+ __typename: "Query",
animals: [
{
- generated: true,
- id: 'ROOT_QUERY.animals.0',
- type: 'id',
- typename: 'Animal',
+ __typename: 'Animal',
+ species: {
+ __typename: 'Dog',
+ name: 'dog',
+ },
},
],
},
- 'ROOT_QUERY.animals.0': {
- species: {
- generated: true,
- id: '$ROOT_QUERY.animals.0.species',
- type: 'id',
- typename: 'Dog',
- },
- },
});
});
it('should not keep reference when type of mixed inlined field changes to non-inlined field', () => {
const store = defaultNormalizedCacheFactory();
- const dataIdFromObject = (object: any) => {
- if (object.__typename && object.id) {
- return object.__typename + '__' + object.id;
- }
- return undefined;
- };
-
const query = gql`
query {
animals {
@@ -2070,30 +1993,22 @@ describe('writing to the store', () => {
},
],
},
- dataIdFromObject,
store,
});
expect(store.toObject()).toEqual({
- '$ROOT_QUERY.animals.0.species': { name: 'cat' },
ROOT_QUERY: {
+ __typename: "Query",
animals: [
{
- generated: true,
- id: 'ROOT_QUERY.animals.0',
- type: 'id',
- typename: 'Animal',
+ __typename: 'Animal',
+ species: {
+ __typename: 'Cat',
+ name: 'cat',
+ },
},
],
},
- 'ROOT_QUERY.animals.0': {
- species: {
- generated: true,
- id: '$ROOT_QUERY.animals.0.species',
- type: 'id',
- typename: 'Cat',
- },
- },
});
writer.writeQueryToStore({
@@ -2110,34 +2025,60 @@ describe('writing to the store', () => {
},
],
},
- dataIdFromObject,
store,
});
expect(store.toObject()).toEqual({
- '$ROOT_QUERY.animals.0.species': undefined,
'Dog__dog-species': {
id: 'dog-species',
+ __typename: 'Dog',
name: 'dog',
},
ROOT_QUERY: {
+ __typename: "Query",
animals: [
{
- generated: true,
- id: 'ROOT_QUERY.animals.0',
- type: 'id',
- typename: 'Animal',
+ __typename: 'Animal',
+ species: makeReference('Dog__dog-species'),
},
],
},
- 'ROOT_QUERY.animals.0': {
- species: {
- generated: false,
- id: 'Dog__dog-species',
- type: 'id',
- typename: 'Dog',
- },
+ });
+ });
+
+ it('should not deep-freeze scalar objects', () => {
+ const query = gql`
+ query {
+ scalarFieldWithObjectValue
+ }
+ `;
+
+ const scalarObject = {
+ a: 1,
+ b: [2, 3],
+ c: {
+ d: 4,
+ e: 5,
+ },
+ };
+
+ const cache = new InMemoryCache();
+
+ cache.writeQuery({
+ query,
+ data: {
+ scalarFieldWithObjectValue: scalarObject,
},
});
+
+ expect(Object.isFrozen(scalarObject)).toBe(false);
+ expect(Object.isFrozen(scalarObject.b)).toBe(false);
+ expect(Object.isFrozen(scalarObject.c)).toBe(false);
+
+ const result = cache.readQuery<any>({ query });
+ expect(result.scalarFieldWithObjectValue).not.toBe(scalarObject);
+ expect(Object.isFrozen(result.scalarFieldWithObjectValue)).toBe(true);
+ expect(Object.isFrozen(result.scalarFieldWithObjectValue.b)).toBe(true);
+ expect(Object.isFrozen(result.scalarFieldWithObjectValue.c)).toBe(true);
});
});
diff --git a/packages/apollo-client/src/core/__tests__/LocalState.ts b/src/core/__tests__/LocalState.ts
similarity index 93%
rename from packages/apollo-client/src/core/__tests__/LocalState.ts
rename to src/core/__tests__/LocalState.ts
--- a/packages/apollo-client/src/core/__tests__/LocalState.ts
+++ b/src/core/__tests__/LocalState.ts
@@ -1,6 +1,4 @@
-import ApolloClient from '../..';
-import { InMemoryCache } from 'apollo-cache-inmemory';
-import gql from 'graphql-tag';
+import { ApolloClient, InMemoryCache, gql } from '../..';
/**
* Creates an apollo-client instance with a local query resolver named 'localQuery'.
diff --git a/packages/apollo-client/src/core/__tests__/ObservableQuery.ts b/src/core/__tests__/ObservableQuery.ts
similarity index 79%
rename from packages/apollo-client/src/core/__tests__/ObservableQuery.ts
rename to src/core/__tests__/ObservableQuery.ts
--- a/packages/apollo-client/src/core/__tests__/ObservableQuery.ts
+++ b/src/core/__tests__/ObservableQuery.ts
@@ -1,24 +1,22 @@
import gql from 'graphql-tag';
-import { ApolloLink, Observable } from 'apollo-link';
-import {
- InMemoryCache,
- IntrospectionFragmentMatcher,
-} from 'apollo-cache-inmemory';
import { GraphQLError } from 'graphql';
-import mockQueryManager from '../../__mocks__/mockQueryManager';
-import mockWatchQuery from '../../__mocks__/mockWatchQuery';
-import { mockSingleLink } from '../../__mocks__/mockLinks';
+import { Observable } from '../../utilities/observables/Observable';
+import { ApolloLink } from '../../link/core/ApolloLink';
+import { InMemoryCache } from '../../cache/inmemory/inMemoryCache';
+import mockQueryManager from '../../utilities/testing/mocking/mockQueryManager';
+import mockWatchQuery from '../../utilities/testing/mocking/mockWatchQuery';
+import { mockSingleLink } from '../../utilities/testing/mocking/mockLink';
import { ObservableQuery } from '../ObservableQuery';
import { NetworkStatus } from '../networkStatus';
import { QueryManager } from '../QueryManager';
-import { DataStore } from '../../data/store';
-import ApolloClient from '../../';
+import { ApolloClient } from '../../';
-import wrap from '../../util/wrap';
-import subscribeAndCount from '../../util/subscribeAndCount';
-import { stripSymbols } from 'apollo-utilities';
+import wrap from '../../utilities/testing/wrap';
+import subscribeAndCount from '../../utilities/testing/subscribeAndCount';
+import { stripSymbols } from '../../utilities/testing/stripSymbols';
+import { itAsync } from '../../utilities/testing/itAsync';
import { ApolloError } from '../../errors/ApolloError';
describe('ObservableQuery', () => {
@@ -48,26 +46,21 @@ describe('ObservableQuery', () => {
message: 'is offline.',
};
- const createQueryManager = ({ link }: { link?: ApolloLink }) => {
+ const createQueryManager = ({ link }: { link: ApolloLink }) => {
return new QueryManager({
- link: link || mockSingleLink(),
+ link,
assumeImmutableResults: true,
- store: new DataStore(
- new InMemoryCache({
- addTypename: false,
- freezeResults: true,
- }),
- ),
+ cache: new InMemoryCache({
+ addTypename: false,
+ }),
});
};
describe('setOptions', () => {
describe('to change pollInterval', () => {
- beforeEach(() => jest.useFakeTimers());
- afterEach(() => jest.useRealTimers());
-
- it('starts polling if goes from 0 -> something', done => {
+ itAsync('starts polling if goes from 0 -> something', (resolve, reject) => {
const manager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -87,23 +80,22 @@ describe('ObservableQuery', () => {
variables,
notifyOnNetworkStatusChange: false,
});
- subscribeAndCount(done, observable, (handleCount, result) => {
+
+ subscribeAndCount(reject, observable, (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(dataOne);
observable.setOptions({ pollInterval: 10 });
- // 10 for the poll and an extra 1 for network requests
- jest.runTimersToTime(11);
} else if (handleCount === 2) {
expect(stripSymbols(result.data)).toEqual(dataTwo);
- done();
+ observable.stopPolling();
+ resolve();
}
});
-
- jest.runTimersToTime(1);
});
- it('stops polling if goes from something -> 0', done => {
+ itAsync('stops polling if goes from something -> 0', (resolve, reject) => {
const manager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -123,24 +115,21 @@ describe('ObservableQuery', () => {
variables,
pollInterval: 10,
});
- subscribeAndCount(done, observable, (handleCount, result) => {
+
+ subscribeAndCount(reject, observable, (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(dataOne);
observable.setOptions({ pollInterval: 0 });
-
- jest.runTimersToTime(5);
- done();
+ setTimeout(resolve, 5);
} else if (handleCount === 2) {
- done.fail(new Error('Should not get more than one result'));
+ reject(new Error('Should not get more than one result'));
}
});
-
- // trigger the first subscription callback
- jest.runTimersToTime(1);
});
- it('can change from x>0 to y>0', done => {
+ itAsync('can change from x>0 to y>0', (resolve, reject) => {
const manager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -161,34 +150,21 @@ describe('ObservableQuery', () => {
pollInterval: 100,
notifyOnNetworkStatusChange: false,
});
- subscribeAndCount(done, observable, (handleCount, result) => {
+
+ subscribeAndCount(reject, observable, (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(dataOne);
-
- // It's confusing but we need to ensure we let the scheduler
- // come back from fetching before we mess with it.
- setImmediate(() => {
- observable.setOptions({ pollInterval: 10 });
-
- // Again, the scheduler needs to complete setting up the poll
- // before the timer goes off
- setImmediate(() => {
- // just enough to trigger a second data
- jest.runTimersToTime(11);
- });
- });
+ observable.setOptions({ pollInterval: 10 });
} else if (handleCount === 2) {
expect(stripSymbols(result.data)).toEqual(dataTwo);
- done();
+ observable.stopPolling();
+ resolve();
}
});
-
- // trigger the first subscription callback
- jest.runTimersToTime(0);
});
});
- it('does not break refetch', done => {
+ itAsync('does not break refetch', (resolve, reject) => {
// This query and variables are copied from react-apollo
const queryWithVars = gql`
query people($first: Int) {
@@ -207,6 +183,7 @@ describe('ObservableQuery', () => {
const variables2 = { first: 1 };
const observable: ObservableQuery<any> = mockWatchQuery(
+ reject,
{
request: {
query: queryWithVars,
@@ -223,7 +200,7 @@ describe('ObservableQuery', () => {
},
);
- subscribeAndCount(done, observable, (handleCount, result) => {
+ subscribeAndCount(reject, observable, (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(data);
return observable.refetch(variables2);
@@ -232,12 +209,12 @@ describe('ObservableQuery', () => {
expect(result.loading).toBe(true);
} else if (handleCount === 3) {
expect(stripSymbols(result.data)).toEqual(data2);
- done();
+ resolve();
}
});
});
- it('rerenders when refetch is called', done => {
+ itAsync('rerenders when refetch is called', (resolve, reject) => {
// This query and variables are copied from react-apollo
const query = gql`
query people($first: Int) {
@@ -255,6 +232,7 @@ describe('ObservableQuery', () => {
const data2 = { allPeople: { people: [{ name: 'Leia Skywalker' }] } };
const observable: ObservableQuery<any> = mockWatchQuery(
+ reject,
{
request: {
query,
@@ -271,18 +249,18 @@ describe('ObservableQuery', () => {
},
);
- subscribeAndCount(done, observable, (handleCount, result) => {
+ subscribeAndCount(reject, observable, (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(data);
return observable.refetch();
} else if (handleCount === 2) {
expect(stripSymbols(result.data)).toEqual(data2);
- done();
+ resolve();
}
});
});
- it('rerenders with new variables then shows correct data for previous variables', done => {
+ itAsync('rerenders with new variables then shows correct data for previous variables', (resolve, reject) => {
// This query and variables are copied from react-apollo
const query = gql`
query people($first: Int) {
@@ -301,6 +279,7 @@ describe('ObservableQuery', () => {
const variables2 = { first: 1 };
const observable: ObservableQuery<any> = mockWatchQuery(
+ reject,
{
request: {
query,
@@ -317,7 +296,7 @@ describe('ObservableQuery', () => {
},
);
- subscribeAndCount(done, observable, async (handleCount, result) => {
+ subscribeAndCount(reject, observable, async (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(data);
await observable.setOptions({ variables: variables2 });
@@ -327,20 +306,18 @@ describe('ObservableQuery', () => {
} else if (handleCount === 3) {
expect(stripSymbols(result.data)).toEqual(data2);
// go back to first set of variables
- await observable.setOptions({ variables });
- const current = observable.getCurrentResult();
+ const current = await observable.setOptions({ variables });
expect(stripSymbols(current.data)).toEqual(data);
- const secondCurrent = observable.getCurrentResult();
- expect(current.data).toEqual(secondCurrent.data);
- done();
+ resolve();
}
});
});
// TODO: Something isn't quite right with this test. It's failing but not
// for the right reasons.
- it.skip('if query is refetched, and an error is returned, no other observer callbacks will be called', done => {
+ itAsync.skip('if query is refetched, and an error is returned, no other observer callbacks will be called', (resolve, reject) => {
const observable: ObservableQuery<any> = mockWatchQuery(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -370,13 +347,14 @@ describe('ObservableQuery', () => {
handleCount++;
expect(handleCount).toBe(2);
observable.refetch();
- setTimeout(done, 25);
+ setTimeout(resolve, 25);
},
});
});
- it('does a network request if fetchPolicy becomes networkOnly', done => {
+ itAsync('does a network request if fetchPolicy becomes networkOnly', (resolve, reject) => {
const observable: ObservableQuery<any> = mockWatchQuery(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -387,18 +365,18 @@ describe('ObservableQuery', () => {
},
);
- subscribeAndCount(done, observable, (handleCount, result) => {
+ subscribeAndCount(reject, observable, (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(dataOne);
return observable.setOptions({ fetchPolicy: 'network-only' });
} else if (handleCount === 2) {
expect(stripSymbols(result.data)).toEqual(dataTwo);
- done();
+ resolve();
}
});
});
- it('does a network request if fetchPolicy is cache-only then store is reset then fetchPolicy becomes not cache-only', done => {
+ itAsync('does a network request if fetchPolicy is cache-only then store is reset then fetchPolicy becomes not cache-only', (resolve, reject) => {
let queryManager: QueryManager;
let observable: ObservableQuery<any>;
const testQuery = gql`
@@ -431,7 +409,7 @@ describe('ObservableQuery', () => {
// fetch first data from server
observable = queryManager.watchQuery({ query: testQuery });
- subscribeAndCount(done, observable, async (handleCount, result) => {
+ subscribeAndCount(reject, observable, async (handleCount, result) => {
try {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(data);
@@ -446,15 +424,15 @@ describe('ObservableQuery', () => {
} else if (handleCount === 3) {
expect(stripSymbols(result.data)).toEqual(data);
expect(timesFired).toBe(2);
- done();
+ resolve();
}
} catch (e) {
- done.fail(e);
+ reject(e);
}
});
});
- it('does a network request if fetchPolicy changes from cache-only', done => {
+ itAsync('does a network request if fetchPolicy changes from cache-only', (resolve, reject) => {
let queryManager: QueryManager;
let observable: ObservableQuery<any>;
const testQuery = gql`
@@ -490,20 +468,20 @@ describe('ObservableQuery', () => {
notifyOnNetworkStatusChange: false,
});
- subscribeAndCount(done, observable, async (handleCount, result) => {
- if (handleCount === 2) {
+ subscribeAndCount(reject, observable, async (handleCount, result) => {
+ if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual({});
expect(timesFired).toBe(0);
await observable.setOptions({ fetchPolicy: 'cache-first' });
- } else if (handleCount === 3) {
+ } else if (handleCount === 2) {
expect(stripSymbols(result.data)).toEqual(data);
expect(timesFired).toBe(1);
- done();
+ resolve();
}
});
});
- it('can set queries to standby and will not fetch when doing so', done => {
+ itAsync('can set queries to standby and will not fetch when doing so', (resolve, reject) => {
let queryManager: QueryManager;
let observable: ObservableQuery<any>;
const testQuery = gql`
@@ -539,21 +517,21 @@ describe('ObservableQuery', () => {
notifyOnNetworkStatusChange: false,
});
- subscribeAndCount(done, observable, async (handleCount, result) => {
+ subscribeAndCount(reject, observable, async (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(data);
expect(timesFired).toBe(1);
await observable.setOptions({ fetchPolicy: 'standby' });
// make sure the query didn't get fired again.
expect(timesFired).toBe(1);
- done();
+ resolve();
} else if (handleCount === 2) {
throw new Error('Handle should not be triggered on standby query');
}
});
});
- it('will not fetch when setting a cache-only query to standby', done => {
+ itAsync('will not fetch when setting a cache-only query to standby', (resolve, reject) => {
let queryManager: QueryManager;
let observable: ObservableQuery<any>;
const testQuery = gql`
@@ -591,22 +569,24 @@ describe('ObservableQuery', () => {
notifyOnNetworkStatusChange: false,
});
- subscribeAndCount(done, observable, async (handleCount, result) => {
+ subscribeAndCount(reject, observable, async (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(data);
expect(timesFired).toBe(1);
await observable.setOptions({ fetchPolicy: 'standby' });
// make sure the query didn't get fired again.
expect(timesFired).toBe(1);
- done();
+ resolve();
} else if (handleCount === 2) {
throw new Error('Handle should not be triggered on standby query');
}
});
});
});
- it('returns a promise which eventually returns data', done => {
+
+ itAsync('returns a promise which eventually returns data', (resolve, reject) => {
const observable: ObservableQuery<any> = mockWatchQuery(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -617,7 +597,7 @@ describe('ObservableQuery', () => {
},
);
- subscribeAndCount(done, observable, handleCount => {
+ subscribeAndCount(reject, observable, handleCount => {
if (handleCount !== 1) {
return;
}
@@ -626,12 +606,14 @@ describe('ObservableQuery', () => {
.then(res => {
// returns dataOne from cache
expect(stripSymbols(res.data)).toEqual(dataOne);
- done();
+ resolve();
});
});
});
- it('can bypass looking up results if passed to options', done => {
+
+ itAsync('can bypass looking up results if passed to options', (resolve, reject) => {
const observable: ObservableQuery<any> = mockWatchQuery(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -643,13 +625,13 @@ describe('ObservableQuery', () => {
);
let errored = false;
- subscribeAndCount(done, observable, handleCount => {
+ subscribeAndCount(reject, observable, handleCount => {
if (handleCount === 1) {
observable
.setOptions({ fetchResults: false, fetchPolicy: 'standby' })
.then(res => {
expect(res).toBeUndefined();
- setTimeout(() => !errored && done(), 5);
+ setTimeout(() => !errored && resolve(), 5);
});
} else if (handleCount > 1) {
errored = true;
@@ -660,8 +642,9 @@ describe('ObservableQuery', () => {
});
describe('setVariables', () => {
- it('reruns query if the variables change', done => {
+ itAsync('reruns query if the variables change', (resolve, reject) => {
const observable: ObservableQuery<any> = mockWatchQuery(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -672,7 +655,7 @@ describe('ObservableQuery', () => {
},
);
- subscribeAndCount(done, observable, (handleCount, result) => {
+ subscribeAndCount(reject, observable, (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(dataOne);
return observable.setVariables(differentVariables);
@@ -682,13 +665,14 @@ describe('ObservableQuery', () => {
} else if (handleCount === 3) {
expect(result.loading).toBe(false);
expect(stripSymbols(result.data)).toEqual(dataTwo);
- done();
+ resolve();
}
});
});
- it('does invalidate the currentResult data if the variables change', done => {
+ itAsync('does invalidate the currentResult data if the variables change', (resolve, reject) => {
const observable: ObservableQuery<any> = mockWatchQuery(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -700,7 +684,7 @@ describe('ObservableQuery', () => {
},
);
- subscribeAndCount(done, observable, async (handleCount, result) => {
+ subscribeAndCount(reject, observable, async (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(dataOne);
expect(stripSymbols(observable.getCurrentResult().data)).toEqual(
@@ -717,11 +701,12 @@ describe('ObservableQuery', () => {
dataTwo,
);
expect(observable.getCurrentResult().loading).toBe(false);
- done();
+ resolve();
}
});
});
- it('does invalidate the currentResult data if the variables change', done => {
+
+ itAsync('does invalidate the currentResult data if the variables change', (resolve, reject) => {
// Standard data for all these tests
const query = gql`
query UsersQuery($page: Int) {
@@ -756,6 +741,7 @@ describe('ObservableQuery', () => {
};
const observable: ObservableQuery<any> = mockWatchQuery(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -767,7 +753,7 @@ describe('ObservableQuery', () => {
},
);
- subscribeAndCount(done, observable, async (handleCount, result) => {
+ subscribeAndCount(reject, observable, async (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(dataOne);
expect(stripSymbols(observable.getCurrentResult().data)).toEqual(
@@ -784,13 +770,14 @@ describe('ObservableQuery', () => {
dataTwo,
);
expect(observable.getCurrentResult().loading).toBe(false);
- done();
+ resolve();
}
});
});
- it('does not invalidate the currentResult errors if the variables change', done => {
+ itAsync('does not invalidate the currentResult errors if the variables change', (resolve, reject) => {
const queryManager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { errors: [error] },
@@ -807,7 +794,7 @@ describe('ObservableQuery', () => {
errorPolicy: 'all',
});
- subscribeAndCount(done, observable, (handleCount, result) => {
+ subscribeAndCount(reject, observable, async (handleCount, result) => {
if (handleCount === 1) {
expect(result.errors).toEqual([error]);
expect(observable.getCurrentResult().errors).toEqual([error]);
@@ -821,20 +808,20 @@ describe('ObservableQuery', () => {
dataTwo,
);
expect(observable.getCurrentResult().loading).toBe(false);
- done();
+ resolve();
}
});
});
- it('does not perform a query when unsubscribed if variables change', () => {
+ itAsync('does not perform a query when unsubscribed if variables change', (resolve, reject) => {
// Note: no responses, will throw if a query is made
- const queryManager = mockQueryManager();
+ const queryManager = mockQueryManager(reject);
const observable = queryManager.watchQuery({ query, variables });
-
- return observable.setVariables(differentVariables);
+ return observable.setVariables(differentVariables)
+ .then(resolve, reject);
});
- it('sets networkStatus to `setVariables` when fetching', done => {
+ itAsync('sets networkStatus to `setVariables` when fetching', (resolve, reject) => {
const mockedResponses = [
{
request: { query, variables },
@@ -846,7 +833,7 @@ describe('ObservableQuery', () => {
},
];
- const queryManager = mockQueryManager(...mockedResponses);
+ const queryManager = mockQueryManager(reject, ...mockedResponses);
const firstRequest = mockedResponses[0].request;
const observable = queryManager.watchQuery({
query: firstRequest.query,
@@ -854,7 +841,7 @@ describe('ObservableQuery', () => {
notifyOnNetworkStatusChange: true,
});
- subscribeAndCount(done, observable, (handleCount, result) => {
+ subscribeAndCount(reject, observable, (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(dataOne);
expect(result.networkStatus).toBe(NetworkStatus.ready);
@@ -867,12 +854,12 @@ describe('ObservableQuery', () => {
expect(result.loading).toBe(false);
expect(result.networkStatus).toBe(NetworkStatus.ready);
expect(stripSymbols(result.data)).toEqual(dataTwo);
- done();
+ resolve();
}
});
});
- it('sets networkStatus to `setVariables` when calling refetch with new variables', done => {
+ itAsync('sets networkStatus to `setVariables` when calling refetch with new variables', (resolve, reject) => {
const mockedResponses = [
{
request: { query, variables },
@@ -884,7 +871,7 @@ describe('ObservableQuery', () => {
},
];
- const queryManager = mockQueryManager(...mockedResponses);
+ const queryManager = mockQueryManager(reject, ...mockedResponses);
const firstRequest = mockedResponses[0].request;
const observable = queryManager.watchQuery({
query: firstRequest.query,
@@ -892,7 +879,7 @@ describe('ObservableQuery', () => {
notifyOnNetworkStatusChange: true,
});
- subscribeAndCount(done, observable, (handleCount, result) => {
+ subscribeAndCount(reject, observable, (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(dataOne);
expect(result.networkStatus).toBe(NetworkStatus.ready);
@@ -905,13 +892,14 @@ describe('ObservableQuery', () => {
expect(result.loading).toBe(false);
expect(result.networkStatus).toBe(NetworkStatus.ready);
expect(stripSymbols(result.data)).toEqual(dataTwo);
- done();
+ resolve();
}
});
});
- it('reruns observer callback if the variables change but data does not', done => {
+ itAsync('reruns observer callback if the variables change but data does not', (resolve, reject) => {
const observable: ObservableQuery<any> = mockWatchQuery(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -922,7 +910,7 @@ describe('ObservableQuery', () => {
},
);
- subscribeAndCount(done, observable, (handleCount, result) => {
+ subscribeAndCount(reject, observable, (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(dataOne);
observable.setVariables(differentVariables);
@@ -931,13 +919,14 @@ describe('ObservableQuery', () => {
expect(stripSymbols(result.data)).toEqual(dataOne);
} else if (handleCount === 3) {
expect(stripSymbols(result.data)).toEqual(dataOne);
- done();
+ resolve();
}
});
});
- it('does not rerun observer callback if the variables change but new data is in store', done => {
+ itAsync('does not rerun observer callback if the variables change but new data is in store', (resolve, reject) => {
const manager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -956,13 +945,13 @@ describe('ObservableQuery', () => {
});
let errored = false;
- subscribeAndCount(done, observable, (handleCount, result) => {
+ subscribeAndCount(reject, observable, (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(dataOne);
observable.setVariables(differentVariables);
// Nothing should happen, so we'll wait a moment to check that
- setTimeout(() => !errored && done(), 10);
+ setTimeout(() => !errored && resolve(), 10);
} else if (handleCount === 2) {
throw new Error('Observable callback should not fire twice');
}
@@ -970,8 +959,9 @@ describe('ObservableQuery', () => {
});
});
- it('does not rerun query if variables do not change', done => {
+ itAsync('does not rerun query if variables do not change', (resolve, reject) => {
const observable: ObservableQuery<any> = mockWatchQuery(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -983,13 +973,13 @@ describe('ObservableQuery', () => {
);
let errored = false;
- subscribeAndCount(done, observable, (handleCount, result) => {
+ subscribeAndCount(reject, observable, (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(dataOne);
observable.setVariables(variables);
// Nothing should happen, so we'll wait a moment to check that
- setTimeout(() => !errored && done(), 10);
+ setTimeout(() => !errored && resolve(), 10);
} else if (handleCount === 2) {
errored = true;
throw new Error('Observable callback should not fire twice');
@@ -997,8 +987,9 @@ describe('ObservableQuery', () => {
});
});
- it('does not rerun query if set to not refetch', done => {
+ itAsync('does not rerun query if set to not refetch', (resolve, reject) => {
const observable: ObservableQuery<any> = mockWatchQuery(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -1010,13 +1001,13 @@ describe('ObservableQuery', () => {
);
let errored = false;
- subscribeAndCount(done, observable, (handleCount, result) => {
+ subscribeAndCount(reject, observable, (handleCount, result) => {
if (handleCount === 1) {
expect(stripSymbols(result.data)).toEqual(dataOne);
observable.setVariables(variables, true, false);
// Nothing should happen, so we'll wait a moment to check that
- setTimeout(() => !errored && done(), 10);
+ setTimeout(() => !errored && resolve(), 10);
} else if (handleCount === 2) {
errored = true;
throw new Error('Observable callback should not fire twice');
@@ -1024,11 +1015,12 @@ describe('ObservableQuery', () => {
});
});
- it('handles variables changing while a query is in-flight', done => {
+ itAsync('handles variables changing while a query is in-flight', (resolve, reject) => {
// The expected behavior is that the original variables are forgotten
// and the query stays in loading state until the result for the new variables
// has returned.
const observable: ObservableQuery<any> = mockWatchQuery(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -1043,19 +1035,19 @@ describe('ObservableQuery', () => {
setTimeout(() => observable.setVariables(differentVariables), 10);
- subscribeAndCount(done, observable, (handleCount, result) => {
+ subscribeAndCount(reject, observable, (handleCount, result) => {
if (handleCount === 1) {
expect(result.networkStatus).toBe(NetworkStatus.ready);
expect(result.loading).toBe(false);
expect(stripSymbols(result.data)).toEqual(dataTwo);
- done();
+ resolve();
}
});
});
});
describe('refetch', () => {
- it('calls fetchRequest with fetchPolicy `network-only` when using a non-networked fetch policy', done => {
+ itAsync('calls fetchRequest with fetchPolicy `network-only` when using a non-networked fetch policy', (resolve, reject) => {
const mockedResponses = [
{
request: { query, variables },
@@ -1067,7 +1059,7 @@ describe('ObservableQuery', () => {
},
];
- const queryManager = mockQueryManager(...mockedResponses);
+ const queryManager = mockQueryManager(reject, ...mockedResponses);
const firstRequest = mockedResponses[0].request;
const observable = queryManager.watchQuery({
query: firstRequest.query,
@@ -1080,22 +1072,21 @@ describe('ObservableQuery', () => {
origFetchQuery.apply(queryManager, arguments),
);
- subscribeAndCount(done, observable, (handleCount, result) => {
+ subscribeAndCount(reject, observable, (handleCount, result) => {
if (handleCount === 1) {
observable.refetch(differentVariables);
} else if (handleCount === 3) {
expect(queryManager.fetchQuery.mock.calls[1][1].fetchPolicy).toEqual(
'network-only',
);
- done();
+ resolve();
}
});
});
- it(
- 'calls fetchRequest with fetchPolicy `no-cache` when using `no-cache` ' +
- 'fetch policy',
- done => {
+ itAsync(
+ 'calls fetchRequest with fetchPolicy `no-cache` when using `no-cache` fetch policy',
+ (resolve, reject) => {
const mockedResponses = [
{
request: { query, variables },
@@ -1107,7 +1098,7 @@ describe('ObservableQuery', () => {
},
];
- const queryManager = mockQueryManager(...mockedResponses);
+ const queryManager = mockQueryManager(reject, ...mockedResponses);
const firstRequest = mockedResponses[0].request;
const observable = queryManager.watchQuery({
query: firstRequest.query,
@@ -1120,20 +1111,20 @@ describe('ObservableQuery', () => {
origFetchQuery.apply(queryManager, arguments),
);
- subscribeAndCount(done, observable, (handleCount, result) => {
+ subscribeAndCount(reject, observable, (handleCount, result) => {
if (handleCount === 1) {
observable.refetch(differentVariables);
} else if (handleCount === 2) {
expect(
queryManager.fetchQuery.mock.calls[1][1].fetchPolicy,
).toEqual('no-cache');
- done();
+ resolve();
}
});
- },
+ }
);
- it('calls ObservableQuery.next even after hitting cache', done => {
+ itAsync('calls ObservableQuery.next even after hitting cache', (resolve, reject) => {
// This query and variables are copied from react-apollo
const queryWithVars = gql`
query people($first: Int) {
@@ -1152,6 +1143,7 @@ describe('ObservableQuery', () => {
const variables2 = { first: 1 };
const observable: ObservableQuery<any> = mockWatchQuery(
+ reject,
{
request: {
query: queryWithVars,
@@ -1177,7 +1169,7 @@ describe('ObservableQuery', () => {
observable.setOptions({ fetchPolicy: 'cache-and-network' });
- subscribeAndCount(done, observable, (handleCount, result) => {
+ subscribeAndCount(reject, observable, (handleCount, result) => {
if (handleCount === 1) {
expect(result.data).toBeUndefined();
expect(result.loading).toBe(true);
@@ -1198,12 +1190,12 @@ describe('ObservableQuery', () => {
} else if (handleCount === 6) {
expect(stripSymbols(result.data)).toEqual(data);
expect(result.loading).toBe(false);
- done();
+ resolve();
}
});
});
- it('cache-and-network refetch should run @client(always: true) resolvers when network request fails', done => {
+ itAsync('cache-and-network refetch should run @client(always: true) resolvers when network request fails', (resolve, reject) => {
const query = gql`
query MixedQuery {
counter @client(always: true)
@@ -1320,9 +1312,9 @@ describe('ObservableQuery', () => {
stale: false,
});
- done();
+ resolve();
} else if (handleCount > 5) {
- done.fail(new Error('should not get here'));
+ reject(new Error('should not get here'));
}
},
});
@@ -1330,7 +1322,7 @@ describe('ObservableQuery', () => {
});
describe('currentResult', () => {
- it('returns the same value as observableQuery.next got', done => {
+ itAsync('returns the same value as observableQuery.next got', (resolve, reject) => {
const queryWithFragment = gql`
fragment CatInfo on Cat {
isTabby
@@ -1397,33 +1389,20 @@ describe('ObservableQuery', () => {
pets: petData.slice(0, 3),
};
- const ni = mockSingleLink(
- {
- request: { query: queryWithFragment, variables },
- result: { data: dataOneWithTypename },
- },
- {
- request: { query: queryWithFragment, variables },
- result: { data: dataTwoWithTypename },
- },
- );
+ const ni = mockSingleLink({
+ request: { query: queryWithFragment, variables },
+ result: { data: dataOneWithTypename },
+ }, {
+ request: { query: queryWithFragment, variables },
+ result: { data: dataTwoWithTypename },
+ }).setOnError(reject);
const client = new ApolloClient({
link: ni,
cache: new InMemoryCache({
- fragmentMatcher: new IntrospectionFragmentMatcher({
- introspectionQueryResultData: {
- __schema: {
- types: [
- {
- kind: 'UNION',
- name: 'Creature',
- possibleTypes: [{ name: 'Pet' }],
- },
- ],
- },
- },
- }),
+ possibleTypes: {
+ Creature: ['Pet'],
+ },
}),
});
@@ -1433,7 +1412,7 @@ describe('ObservableQuery', () => {
notifyOnNetworkStatusChange: true,
});
- subscribeAndCount(done, observable, (count, result) => {
+ subscribeAndCount(reject, observable, (count, result) => {
const { data, loading, networkStatus } = observable.getCurrentResult();
try {
expect(result).toEqual({
@@ -1443,86 +1422,60 @@ describe('ObservableQuery', () => {
stale: false,
});
} catch (e) {
- done.fail(e);
+ reject(e);
}
if (count === 1) {
observable.refetch();
}
if (count === 3) {
- setTimeout(done, 5);
+ setTimeout(resolve, 5);
}
if (count > 3) {
- done.fail(new Error('Observable.next called too many times'));
+ reject(new Error('Observable.next called too many times'));
}
});
});
- it('returns the current query status immediately', done => {
- const observable: ObservableQuery<any> = mockWatchQuery({
+ itAsync('returns the current query status immediately', (resolve, reject) => {
+ const observable: ObservableQuery<any> = mockWatchQuery(reject, {
request: { query, variables },
result: { data: dataOne },
delay: 100,
});
- subscribeAndCount(done, observable, () => {
+ subscribeAndCount(reject, observable, () => {
expect(stripSymbols(observable.getCurrentResult())).toEqual({
data: dataOne,
loading: false,
networkStatus: 7,
- partial: false,
+ stale: false,
});
- done();
+ resolve();
});
expect(observable.getCurrentResult()).toEqual({
loading: true,
data: undefined,
networkStatus: 1,
- partial: true,
+ stale: false,
});
setTimeout(
- wrap(done, () => {
+ wrap(reject, () => {
expect(observable.getCurrentResult()).toEqual({
loading: true,
data: undefined,
networkStatus: 1,
- partial: true,
+ stale: false,
});
}),
0,
);
});
- it('returns results from the store immediately', () => {
- const queryManager = mockQueryManager({
- request: { query, variables },
- result: { data: dataOne },
- });
-
- return queryManager.query({ query, variables }).then((result: any) => {
- expect(stripSymbols(result)).toEqual({
- data: dataOne,
- loading: false,
- networkStatus: 7,
- stale: false,
- });
- const observable = queryManager.watchQuery({
- query,
- variables,
- });
- expect(stripSymbols(observable.getCurrentResult())).toEqual({
- data: dataOne,
- loading: false,
- networkStatus: 7,
- partial: false,
- });
- });
- });
-
- it('returns errors from the store immediately', done => {
- const queryManager = mockQueryManager({
+ itAsync('returns errors from the store immediately', (resolve, reject) => {
+ const queryManager = mockQueryManager(reject, {
request: { query, variables },
result: { errors: [error] },
});
@@ -1539,13 +1492,13 @@ describe('ObservableQuery', () => {
const currentResult = observable.getCurrentResult();
expect(currentResult.loading).toBe(false);
expect(currentResult.error!.graphQLErrors).toEqual([error]);
- done();
+ resolve();
},
});
});
- it('returns referentially equal errors', () => {
- const queryManager = mockQueryManager({
+ itAsync('returns referentially equal errors', (resolve, reject) => {
+ const queryManager = mockQueryManager(reject, {
request: { query, variables },
result: { errors: [error] },
});
@@ -1563,11 +1516,11 @@ describe('ObservableQuery', () => {
expect(currentResult.error!.graphQLErrors).toEqual([error]);
const currentResult2 = observable.getCurrentResult();
expect(currentResult.error === currentResult2.error).toBe(true);
- });
+ }).then(resolve, reject);
});
- it('returns errors with data if errorPolicy is all', () => {
- const queryManager = mockQueryManager({
+ itAsync('returns errors with data if errorPolicy is all', (resolve, reject) => {
+ const queryManager = mockQueryManager(reject, {
request: { query, variables },
result: { data: dataOne, errors: [error] },
});
@@ -1585,11 +1538,11 @@ describe('ObservableQuery', () => {
expect(currentResult.loading).toBe(false);
expect(currentResult.errors).toEqual([error]);
expect(currentResult.error).toBeUndefined();
- });
+ }).then(resolve, reject);
});
- it('ignores errors with data if errorPolicy is ignore', () => {
- const queryManager = mockQueryManager({
+ itAsync('ignores errors with data if errorPolicy is ignore', (resolve, reject) => {
+ const queryManager = mockQueryManager(reject, {
request: { query, variables },
result: { errors: [error], data: dataOne },
});
@@ -1607,10 +1560,10 @@ describe('ObservableQuery', () => {
expect(currentResult.loading).toBe(false);
expect(currentResult.errors).toBeUndefined();
expect(currentResult.error).toBeUndefined();
- });
+ }).then(resolve, reject);
});
- it('returns partial data from the store immediately', done => {
+ itAsync('returns partial data from the store', (resolve, reject) => {
const superQuery = gql`
query superQuery($id: ID!) {
people_one(id: $id) {
@@ -1628,6 +1581,7 @@ describe('ObservableQuery', () => {
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -1645,16 +1599,16 @@ describe('ObservableQuery', () => {
returnPartialData: true,
});
- expect(observable.currentResult()).toEqual({
- data: dataOne,
+ expect(observable.getCurrentResult()).toEqual({
+ data: void 0,
loading: true,
networkStatus: 1,
- partial: true,
+ stale: false,
});
// we can use this to trigger the query
- subscribeAndCount(done, observable, (handleCount, subResult) => {
- const { data, loading, networkStatus } = observable.currentResult();
+ subscribeAndCount(reject, observable, (handleCount, subResult) => {
+ const { data, loading, networkStatus } = observable.getCurrentResult();
expect(subResult).toEqual({
data,
loading,
@@ -1664,27 +1618,37 @@ describe('ObservableQuery', () => {
if (handleCount === 1) {
expect(subResult).toEqual({
- data: dataOne,
+ data: void 0,
loading: true,
networkStatus: 1,
stale: false,
});
} else if (handleCount === 2) {
+ expect(subResult).toEqual({
+ data: dataOne,
+ loading: false,
+ networkStatus: 7,
+ stale: false,
+ });
+
+ } else if (handleCount === 3) {
expect(subResult).toEqual({
data: superDataOne,
loading: false,
networkStatus: 7,
stale: false,
});
- done();
+
+ resolve();
}
});
});
});
- it('returns loading even if full data is available when using network-only fetchPolicy', done => {
+ itAsync('returns loading even if full data is available when using network-only fetchPolicy', (resolve, reject) => {
const queryManager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -1705,15 +1669,16 @@ describe('ObservableQuery', () => {
data: undefined,
loading: true,
networkStatus: 1,
- partial: false,
+ stale: false,
});
- subscribeAndCount(done, observable, (handleCount, subResult) => {
+ subscribeAndCount(reject, observable, (handleCount, subResult) => {
const {
data,
loading,
networkStatus,
} = observable.getCurrentResult();
+
expect(subResult).toEqual({
data,
loading,
@@ -1721,21 +1686,31 @@ describe('ObservableQuery', () => {
stale: false,
});
- if (handleCount === 2) {
+ if (handleCount === 1) {
+ expect(stripSymbols(subResult)).toEqual({
+ data: void 0,
+ loading: true,
+ networkStatus: NetworkStatus.loading,
+ stale: false,
+ });
+
+ } else if (handleCount === 2) {
expect(stripSymbols(subResult)).toEqual({
data: dataTwo,
loading: false,
- networkStatus: 7,
+ networkStatus: NetworkStatus.ready,
stale: false,
});
- done();
+
+ resolve();
}
});
});
});
- it('returns loading on no-cache fetchPolicy queries when calling getCurrentResult', done => {
+ itAsync('returns loading on no-cache fetchPolicy queries when calling getCurrentResult', (resolve, reject) => {
const queryManager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -1752,34 +1727,36 @@ describe('ObservableQuery', () => {
variables,
fetchPolicy: 'no-cache',
});
+
expect(stripSymbols(observable.getCurrentResult())).toEqual({
data: undefined,
loading: true,
networkStatus: 1,
- partial: false,
+ stale: false,
});
- subscribeAndCount(done, observable, (handleCount, subResult) => {
+ subscribeAndCount(reject, observable, (handleCount, subResult) => {
const {
data,
loading,
networkStatus,
} = observable.getCurrentResult();
- expect(subResult).toEqual({
- data,
- loading,
- networkStatus,
- stale: false,
- });
- if (handleCount === 2) {
+ if (handleCount === 1) {
+ expect(subResult).toEqual({
+ data,
+ loading,
+ networkStatus,
+ stale: false,
+ });
+ } else if (handleCount === 2) {
expect(stripSymbols(subResult)).toEqual({
data: dataTwo,
loading: false,
networkStatus: 7,
stale: false,
});
- done();
+ resolve();
}
});
});
@@ -1808,8 +1785,9 @@ describe('ObservableQuery', () => {
},
};
- it('returns optimistic mutation results from the store', done => {
+ itAsync('returns optimistic mutation results from the store', (resolve, reject) => {
const queryManager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -1825,7 +1803,7 @@ describe('ObservableQuery', () => {
variables,
});
- subscribeAndCount(done, observable, (count, result) => {
+ subscribeAndCount(reject, observable, (count, result) => {
const {
data,
loading,
@@ -1856,7 +1834,7 @@ describe('ObservableQuery', () => {
);
} else if (count === 3) {
expect(stripSymbols(result.data.people_one)).toEqual(mutationData);
- done();
+ resolve();
}
});
});
@@ -1864,7 +1842,7 @@ describe('ObservableQuery', () => {
});
describe('assumeImmutableResults', () => {
- it('should prevent costly (but safe) cloneDeep calls', async () => {
+ itAsync('should prevent costly (but safe) cloneDeep calls', async (resolve, reject) => {
const queryOptions = {
query: gql`
query {
@@ -1874,15 +1852,18 @@ describe('ObservableQuery', () => {
pollInterval: 20,
};
- function check({ assumeImmutableResults, freezeResults }) {
+ function check({
+ assumeImmutableResults = true,
+ assertFrozenResults = false,
+ }) {
const client = new ApolloClient({
link: mockSingleLink(
{ request: queryOptions, result: { data: { value: 1 } } },
{ request: queryOptions, result: { data: { value: 2 } } },
- { request: queryOptions, result: { data: { value: 3 } } },
- ),
+ { request: queryOptions, result: { data: { value: 3 } } }
+ ).setOnError(error => { throw error }),
assumeImmutableResults,
- cache: new InMemoryCache({ freezeResults }),
+ cache: new InMemoryCache(),
});
const observable = client.watchQuery(queryOptions);
@@ -1892,10 +1873,20 @@ describe('ObservableQuery', () => {
observable.subscribe({
next(result) {
values.push(result.data.value);
- try {
- result.data.value = 'oyez';
- } catch (error) {
- reject(error);
+ if (assertFrozenResults) {
+ try {
+ result.data.value = 'oyez';
+ } catch (error) {
+ reject(error);
+ }
+ } else {
+ result = {
+ ...result,
+ data: {
+ ...result.data,
+ value: 'oyez',
+ },
+ };
}
client.writeData(result);
},
@@ -1907,20 +1898,6 @@ describe('ObservableQuery', () => {
});
}
- // When we assume immutable results, the next method will not fire as a
- // result of destructively modifying result.data.value, because the data
- // object is still === to the previous object. This behavior might seem
- // like a bug, if you are relying on the mutability of results, but the
- // cloneDeep calls required to prevent that bug are expensive. Assuming
- // immutability is safe only when you write your code in an immutable
- // style, but the benefits are well worth the extra effort.
- expect(
- await check({
- assumeImmutableResults: true,
- freezeResults: false,
- }),
- ).toEqual([1, 2, 3]);
-
// When we do not assume immutable results, the observable must do
// extra work to take snapshots of past results, just in case those
// results are destructively modified. The benefit of that work is
@@ -1931,7 +1908,7 @@ describe('ObservableQuery', () => {
expect(
await check({
assumeImmutableResults: false,
- freezeResults: false,
+ assertFrozenResults: false,
}),
).toEqual([1, 'oyez', 2, 'oyez', 3, 'oyez']);
@@ -1944,7 +1921,7 @@ describe('ObservableQuery', () => {
// modifications of the result objects will become fatal. Once you
// start enforcing immutability in this way, you might as well pass
// assumeImmutableResults: true, to prevent calling cloneDeep.
- freezeResults: true,
+ assertFrozenResults: true,
});
throw new Error('not reached');
} catch (error) {
@@ -1956,12 +1933,15 @@ describe('ObservableQuery', () => {
}
await checkThrows(true);
await checkThrows(false);
+
+ resolve();
});
});
describe('stopPolling', () => {
- it('does not restart polling after stopping and resubscribing', done => {
+ itAsync('does not restart polling after stopping and resubscribing', (resolve, reject) => {
const observable = mockWatchQuery(
+ reject,
{
request: { query, variables },
result: { data: dataOne },
@@ -1976,7 +1956,7 @@ describe('ObservableQuery', () => {
observable.stopPolling();
let startedPolling = false;
- subscribeAndCount(done, observable, handleCount => {
+ subscribeAndCount(reject, observable, handleCount => {
if (handleCount === 1) {
// first call to subscribe is the immediate result when
// subscribing. later calls to this callback indicate that
@@ -1988,23 +1968,23 @@ describe('ObservableQuery', () => {
if (!startedPolling) {
// if we're not polling for data, it means this test
// is ok
- done();
+ resolve();
}
}, 60);
} else if (handleCount === 2) {
// oops! we are polling for data, this should not happen.
startedPolling = true;
- done.fail(new Error('should not start polling, already stopped'));
+ reject(new Error('should not start polling, already stopped'));
}
});
});
});
describe('resetQueryStoreErrors', () => {
- it("should remove any GraphQLError's stored in the query store", (done) => {
+ itAsync("should remove any GraphQLError's stored in the query store", (resolve, reject) => {
const graphQLError = new GraphQLError('oh no!');
- const observable: ObservableQuery<any> = mockWatchQuery({
+ const observable: ObservableQuery<any> = mockWatchQuery(reject, {
request: { query, variables },
result: { errors: [graphQLError] },
});
@@ -2018,15 +1998,15 @@ describe('ObservableQuery', () => {
observable.resetQueryStoreErrors();
expect(queryStore.graphQLErrors).toEqual([]);
- done();
+ resolve();
}
});
});
- it("should remove network error's stored in the query store", (done) => {
+ itAsync("should remove network error's stored in the query store", (resolve, reject) => {
const networkError = new Error('oh no!');
- const observable: ObservableQuery<any> = mockWatchQuery({
+ const observable: ObservableQuery<any> = mockWatchQuery(reject, {
request: { query, variables },
result: { data: dataOne },
});
@@ -2038,7 +2018,7 @@ describe('ObservableQuery', () => {
queryStore.networkError = networkError;
observable.resetQueryStoreErrors();
expect(queryStore.networkError).toBeNull();
- done();
+ resolve();
}
});
});
diff --git a/packages/apollo-client/src/core/__tests__/QueryManager/index.ts b/src/core/__tests__/QueryManager/index.ts
similarity index 84%
rename from packages/apollo-client/src/core/__tests__/QueryManager/index.ts
rename to src/core/__tests__/QueryManager/index.ts
--- a/packages/apollo-client/src/core/__tests__/QueryManager/index.ts
+++ b/src/core/__tests__/QueryManager/index.ts
@@ -4,20 +4,20 @@ import { map } from 'rxjs/operators';
import { assign } from 'lodash';
import gql from 'graphql-tag';
import { DocumentNode, ExecutionResult, GraphQLError } from 'graphql';
-import { ApolloLink, Operation, Observable } from 'apollo-link';
+
+import { Observable, Observer } from '../../../utilities/observables/Observable';
+import { ApolloLink } from '../../../link/core/ApolloLink';
+import { Operation } from '../../../link/core/types';
+import { InMemoryCache } from '../../../cache/inmemory/inMemoryCache';
import {
- InMemoryCache,
ApolloReducerConfig,
- NormalizedCacheObject,
-} from 'apollo-cache-inmemory';
+ NormalizedCacheObject
+} from '../../../cache/inmemory/types';
// mocks
-import mockQueryManager from '../../../__mocks__/mockQueryManager';
-import mockWatchQuery from '../../../__mocks__/mockWatchQuery';
-import {
- mockSingleLink,
- MockSubscriptionLink,
-} from '../../../__mocks__/mockLinks';
+import mockQueryManager from '../../../utilities/testing/mocking/mockQueryManager';
+import mockWatchQuery from '../../../utilities/testing/mocking/mockWatchQuery';
+import { mockSingleLink } from '../../../utilities/testing/mocking/mockLink';
// core
import { ApolloQueryResult } from '../../types';
@@ -27,15 +27,15 @@ import { WatchQueryOptions } from '../../watchQueryOptions';
import { QueryManager } from '../../QueryManager';
import { ApolloError } from '../../../errors/ApolloError';
-import { DataStore } from '../../../data/store';
-import { Observer } from '../../../util/Observable';
// testing utils
-import wrap from '../../../util/wrap';
+import wrap from '../../../utilities/testing/wrap';
import observableToPromise, {
observableToPromiseAndSubscription,
-} from '../../../util/observableToPromise';
-import { stripSymbols } from 'apollo-utilities';
+} from '../../../utilities/testing/observableToPromise';
+import subscribeAndCount from '../../../utilities/testing/subscribeAndCount';
+import { stripSymbols } from '../../../utilities/testing/stripSymbols';
+import { itAsync } from '../../../utilities/testing/itAsync';
describe('QueryManager', () => {
// Standard "get id from object" method.
@@ -54,15 +54,13 @@ describe('QueryManager', () => {
config = {},
clientAwareness = {},
}: {
- link?: ApolloLink;
+ link: ApolloLink;
config?: ApolloReducerConfig;
clientAwareness?: { [key: string]: string };
}) => {
return new QueryManager({
- link: link || mockSingleLink(),
- store: new DataStore(
- new InMemoryCache({ addTypename: false, ...config }),
- ),
+ link,
+ cache: new InMemoryCache({ addTypename: false, ...config }),
clientAwareness,
});
};
@@ -70,7 +68,7 @@ describe('QueryManager', () => {
// Helper method that sets up a mockQueryManager and then passes on the
// results to an observer.
const assertWithObserver = ({
- done,
+ reject,
query,
variables = {},
queryOptions = {},
@@ -79,7 +77,7 @@ describe('QueryManager', () => {
delay,
observer,
}: {
- done: () => void;
+ reject: (reason: any) => any;
query: DocumentNode;
variables?: Object;
queryOptions?: Object;
@@ -88,7 +86,7 @@ describe('QueryManager', () => {
delay?: number;
observer: Observer<ApolloQueryResult<any>>;
}) => {
- const queryManager = mockQueryManager({
+ const queryManager = mockQueryManager(reject, {
request: { query, variables },
result,
error,
@@ -99,18 +97,20 @@ describe('QueryManager', () => {
queryOptions,
) as WatchQueryOptions;
return queryManager.watchQuery<any>(finalOptions).subscribe({
- next: wrap(done, observer.next!),
+ next: wrap(reject, observer.next!),
error: observer.error,
});
};
const mockMutation = ({
+ reject,
mutation,
data,
errors,
variables = {},
config = {},
}: {
+ reject: (reason: any) => any;
mutation: DocumentNode;
data?: Object;
errors?: GraphQLError[];
@@ -120,7 +120,7 @@ describe('QueryManager', () => {
const link = mockSingleLink({
request: { query: mutation, variables },
result: { data, errors },
- });
+ }).setOnError(reject);
const queryManager = createQueryManager({
link,
config,
@@ -141,23 +141,28 @@ describe('QueryManager', () => {
};
const assertMutationRoundtrip = (opts: {
+ resolve: (result: any) => any;
+ reject: (reason: any) => any;
mutation: DocumentNode;
data: Object;
variables?: Object;
}) => {
+ const { resolve, reject } = opts;
return mockMutation(opts).then(({ result }) => {
expect(stripSymbols(result.data)).toEqual(opts.data);
- });
+ }).then(resolve, reject);
};
// Helper method that takes a query with a first response and a second response.
// Used to assert stuff about refetches.
const mockRefetch = ({
+ reject,
request,
firstResult,
secondResult,
thirdResult,
}: {
+ reject: (reason: any) => any;
request: Operation;
firstResult: ExecutionResult;
secondResult: ExecutionResult;
@@ -178,12 +183,12 @@ describe('QueryManager', () => {
args.push({ request, result: thirdResult });
}
- return mockQueryManager(...args);
+ return mockQueryManager(reject, ...args);
};
- it('handles GraphQL errors', done => {
+ itAsync('handles GraphQL errors', (resolve, reject) => {
assertWithObserver({
- done,
+ reject,
query: gql`
query people {
allPeople(first: 1) {
@@ -204,22 +209,22 @@ describe('QueryManager', () => {
},
observer: {
next() {
- done.fail(
+ reject(
new Error('Returned a result when it was supposed to error out'),
);
},
error(apolloError) {
expect(apolloError).toBeDefined();
- done();
+ resolve();
},
},
});
});
- it('handles GraphQL errors as data', done => {
+ itAsync('handles GraphQL errors as data', (resolve, reject) => {
assertWithObserver({
- done,
+ reject,
query: gql`
query people {
allPeople(first: 1) {
@@ -246,7 +251,7 @@ describe('QueryManager', () => {
expect(errors).toBeDefined();
expect(errors[0].name).toBe('Name');
expect(errors[0].message).toBe('This is an error message.');
- done();
+ resolve();
},
error(apolloError) {
throw new Error(
@@ -257,9 +262,9 @@ describe('QueryManager', () => {
});
});
- it('handles GraphQL errors with data returned', done => {
+ itAsync('handles GraphQL errors with data returned', (resolve, reject) => {
assertWithObserver({
- done,
+ reject,
query: gql`
query people {
allPeople(first: 1) {
@@ -286,22 +291,22 @@ describe('QueryManager', () => {
},
observer: {
next() {
- done.fail(
+ reject(
new Error('Returned data when it was supposed to error out.'),
);
},
error(apolloError) {
expect(apolloError).toBeDefined();
- done();
+ resolve();
},
},
});
});
- it('empty error array (handle non-spec-compliant server) #156', done => {
+ itAsync('empty error array (handle non-spec-compliant server) #156', (resolve, reject) => {
assertWithObserver({
- done,
+ reject,
query: gql`
query people {
allPeople(first: 1) {
@@ -325,7 +330,7 @@ describe('QueryManager', () => {
next(result) {
expect(result.data['allPeople'].people.name).toBe('Ada Lovelace');
expect(result['errors']).toBeUndefined();
- done();
+ resolve();
},
},
});
@@ -333,9 +338,9 @@ describe('QueryManager', () => {
// Easy to get into this state if you write an incorrect `formatError`
// function with graphql-server or express-graphql
- it('error array with nulls (handle non-spec-compliant server) #1185', done => {
+ itAsync('error array with nulls (handle non-spec-compliant server) #1185', (resolve, reject) => {
assertWithObserver({
- done,
+ reject,
query: gql`
query people {
allPeople(first: 1) {
@@ -350,20 +355,20 @@ describe('QueryManager', () => {
},
observer: {
next() {
- done.fail(new Error('Should not fire next for an error'));
+ reject(new Error('Should not fire next for an error'));
},
error(error) {
expect((error as any).graphQLErrors).toEqual([null]);
expect(error.message).toBe('GraphQL error: Error message not found.');
- done();
+ resolve();
},
},
});
});
- it('handles network errors', done => {
+ itAsync('handles network errors', (resolve, reject) => {
assertWithObserver({
- done,
+ reject,
query: gql`
query people {
allPeople(first: 1) {
@@ -376,19 +381,19 @@ describe('QueryManager', () => {
error: new Error('Network error'),
observer: {
next: () => {
- done.fail(new Error('Should not deliver result'));
+ reject(new Error('Should not deliver result'));
},
error: error => {
const apolloError = error as ApolloError;
expect(apolloError.networkError).toBeDefined();
expect(apolloError.networkError!.message).toMatch('Network error');
- done();
+ resolve();
},
},
});
});
- it('uses console.error to log unhandled errors', done => {
+ itAsync('uses console.error to log unhandled errors', (resolve, reject) => {
const oldError = console.error;
let printed: any;
console.error = (...args: any[]) => {
@@ -396,7 +401,7 @@ describe('QueryManager', () => {
};
assertWithObserver({
- done,
+ reject,
query: gql`
query people {
allPeople(first: 1) {
@@ -409,7 +414,7 @@ describe('QueryManager', () => {
error: new Error('Network error'),
observer: {
next: () => {
- done.fail(new Error('Should not deliver result'));
+ reject(new Error('Should not deliver result'));
},
},
});
@@ -417,15 +422,15 @@ describe('QueryManager', () => {
setTimeout(() => {
expect(printed[0]).toMatch(/error/);
console.error = oldError;
- done();
+ resolve();
}, 10);
});
// XXX this looks like a bug in zen-observable but we should figure
// out a solution for it
- xit('handles an unsubscribe action that happens before data returns', done => {
+ itAsync.skip('handles an unsubscribe action that happens before data returns', (resolve, reject) => {
const subscription = assertWithObserver({
- done,
+ reject,
query: gql`
query people {
allPeople(first: 1) {
@@ -438,10 +443,10 @@ describe('QueryManager', () => {
delay: 1000,
observer: {
next: () => {
- done.fail(new Error('Should not deliver result'));
+ reject(new Error('Should not deliver result'));
},
error: () => {
- done.fail(new Error('Should not deliver result'));
+ reject(new Error('Should not deliver result'));
},
},
});
@@ -449,7 +454,7 @@ describe('QueryManager', () => {
expect(subscription.unsubscribe).not.toThrow();
});
- it('supports interoperability with other Observable implementations like RxJS', done => {
+ itAsync('supports interoperability with other Observable implementations like RxJS', (resolve, reject) => {
const expResult = {
data: {
allPeople: {
@@ -462,7 +467,7 @@ describe('QueryManager', () => {
},
};
- const handle = mockWatchQuery({
+ const handle = mockWatchQuery(reject, {
request: {
query: gql`
query people {
@@ -480,18 +485,18 @@ describe('QueryManager', () => {
const observable = from(handle);
observable.pipe(map(result => assign({ fromRx: true }, result))).subscribe({
- next: wrap(done, newResult => {
+ next: wrap(reject, newResult => {
const expectedResult = assign(
{ fromRx: true, loading: false, networkStatus: 7, stale: false },
expResult,
);
expect(stripSymbols(newResult)).toEqual(expectedResult);
- done();
+ resolve();
}),
});
});
- it('allows you to subscribe twice to one query', done => {
+ itAsync('allows you to subscribe twice to one query', (resolve, reject) => {
const request = {
query: gql`
query fetchLuke($id: String) {
@@ -523,6 +528,7 @@ describe('QueryManager', () => {
};
const queryManager = mockQueryManager(
+ reject,
{
request,
result: { data: data1 },
@@ -574,16 +580,16 @@ describe('QueryManager', () => {
subOne.unsubscribe();
handle.refetch();
} catch (e) {
- done.fail(e);
+ reject(e);
}
}, 0);
} else if (subTwoCount === 3) {
setTimeout(() => {
try {
expect(subOneCount).toBe(2);
- done();
+ resolve();
} catch (e) {
- done.fail(e);
+ reject(e);
}
}, 0);
}
@@ -591,7 +597,8 @@ describe('QueryManager', () => {
});
});
});
- it('resolves all queries when one finishes after another', done => {
+
+ itAsync('resolves all queries when one finishes after another', (resolve, reject) => {
const request = {
query: gql`
query fetchLuke($id: String) {
@@ -649,6 +656,7 @@ describe('QueryManager', () => {
};
const queryManager = mockQueryManager(
+ reject,
{
request,
result: { data: data1 },
@@ -679,7 +687,7 @@ describe('QueryManager', () => {
ob2.subscribe(result => {
expect(stripSymbols(result.data)).toEqual(data2);
expect(finishCount).toBe(2);
- done();
+ resolve();
});
ob3.subscribe(result => {
expect(stripSymbols(result.data)).toEqual(data3);
@@ -687,7 +695,7 @@ describe('QueryManager', () => {
});
});
- it('allows you to refetch queries', () => {
+ itAsync('allows you to refetch queries', (resolve, reject) => {
const request = {
query: gql`
query fetchLuke($id: String) {
@@ -714,6 +722,7 @@ describe('QueryManager', () => {
};
const queryManager = mockRefetch({
+ reject,
request,
firstResult: { data: data1 },
secondResult: { data: data2 },
@@ -727,10 +736,10 @@ describe('QueryManager', () => {
observable.refetch();
},
result => expect(stripSymbols(result.data)).toEqual(data2),
- );
+ ).then(resolve, reject);
});
- it('will return referentially equivalent data if nothing changed in a refetch', done => {
+ itAsync('will return referentially equivalent data if nothing changed in a refetch', (resolve, reject) => {
const request = {
query: gql`
{
@@ -768,6 +777,7 @@ describe('QueryManager', () => {
};
const queryManager = mockRefetch({
+ reject,
request,
firstResult: { data: data1 },
secondResult: { data: data2 },
@@ -802,20 +812,20 @@ describe('QueryManager', () => {
expect(result.data.b).toEqual(firstResultData.b);
expect(result.data.d).not.toBe(firstResultData.d);
expect(result.data.d.f).toEqual(firstResultData.d.f);
- done();
+ resolve();
break;
default:
throw new Error('Next run too many times.');
}
} catch (error) {
- done.fail(error);
+ reject(error);
}
},
- error: error => done.fail(error),
+ error: reject,
});
});
- it('will return referentially equivalent data in getCurrentResult if nothing changed', done => {
+ itAsync('will return referentially equivalent data in getCurrentResult if nothing changed', (resolve, reject) => {
const request = {
query: gql`
{
@@ -840,7 +850,7 @@ describe('QueryManager', () => {
d: { e: 3, f: { g: 4 } },
};
- const queryManager = mockQueryManager({
+ const queryManager = mockQueryManager(reject, {
request,
result: { data: data1 },
});
@@ -854,16 +864,16 @@ describe('QueryManager', () => {
expect(stripSymbols(result.data)).toEqual(
stripSymbols(observable.getCurrentResult().data),
);
- done();
+ resolve();
} catch (error) {
- done.fail(error);
+ reject(error);
}
},
- error: error => done.fail(error),
+ error: reject,
});
});
- it('sets networkStatus to `refetch` when refetching', () => {
+ itAsync('sets networkStatus to `refetch` when refetching', (resolve, reject) => {
const request = {
query: gql`
query fetchLuke($id: String) {
@@ -890,6 +900,7 @@ describe('QueryManager', () => {
};
const queryManager = mockRefetch({
+ reject,
request,
firstResult: { data: data1 },
secondResult: { data: data2 },
@@ -907,10 +918,10 @@ describe('QueryManager', () => {
expect(result.networkStatus).toBe(NetworkStatus.ready);
expect(stripSymbols(result.data)).toEqual(data2);
},
- );
+ ).then(resolve, reject);
});
- it('allows you to refetch queries with promises', () => {
+ itAsync('allows you to refetch queries with promises', async (resolve, reject) => {
const request = {
query: gql`
{
@@ -933,6 +944,7 @@ describe('QueryManager', () => {
};
const queryManager = mockRefetch({
+ reject,
request,
firstResult: { data: data1 },
secondResult: { data: data2 },
@@ -943,10 +955,11 @@ describe('QueryManager', () => {
return handle
.refetch()
- .then(result => expect(stripSymbols(result.data)).toEqual(data2));
+ .then(result => expect(stripSymbols(result.data)).toEqual(data2))
+ .then(resolve, reject);
});
- it('allows you to refetch queries with new variables', () => {
+ itAsync('allows you to refetch queries with new variables', (resolve, reject) => {
const query = gql`
{
people_one(id: 1) {
@@ -988,6 +1001,7 @@ describe('QueryManager', () => {
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query: query },
result: { data: data1 },
@@ -1035,10 +1049,10 @@ describe('QueryManager', () => {
result => {
expect(stripSymbols(result.data)).toEqual(data4);
},
- );
+ ).then(resolve, reject);
});
- it('only modifies varaibles when refetching', () => {
+ itAsync('only modifies varaibles when refetching', (resolve, reject) => {
const query = gql`
{
people_one(id: 1) {
@@ -1060,6 +1074,7 @@ describe('QueryManager', () => {
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query: query },
result: { data: data1 },
@@ -1088,10 +1103,10 @@ describe('QueryManager', () => {
delete updatedOptions.variables;
expect(updatedOptions).toEqual(originalOptions);
},
- );
+ ).then(resolve, reject);
});
- it('continues to poll after refetch', () => {
+ itAsync('continues to poll after refetch', (resolve, reject) => {
const query = gql`
{
people_one(id: 1) {
@@ -1119,6 +1134,7 @@ describe('QueryManager', () => {
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query },
result: { data: data1 },
@@ -1150,10 +1166,10 @@ describe('QueryManager', () => {
expect(stripSymbols(result.data)).toEqual(data3);
observable.stopPolling();
},
- );
+ ).then(resolve, reject);
});
- it('sets networkStatus to `poll` if a polling query is in flight', done => {
+ itAsync('sets networkStatus to `poll` if a polling query is in flight', (resolve, reject) => {
const query = gql`
{
people_one(id: 1) {
@@ -1181,6 +1197,7 @@ describe('QueryManager', () => {
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query },
result: { data: data1 },
@@ -1211,13 +1228,13 @@ describe('QueryManager', () => {
} else if (counter === 2) {
expect(result.networkStatus).toBe(NetworkStatus.poll);
handle.unsubscribe();
- done();
+ resolve();
}
},
});
});
- it('supports returnPartialData #193', () => {
+ itAsync('supports returnPartialData #193', (resolve, reject) => {
const primeQuery = gql`
query primeQuery {
people_one(id: 1) {
@@ -1258,6 +1275,7 @@ describe('QueryManager', () => {
};
const queryManager = mockQueryManager(
+ error => { throw error },
{
request: { query: primeQuery },
result: { data: data1 },
@@ -1284,10 +1302,11 @@ describe('QueryManager', () => {
expect(result.data['luke'].name).toBe('Luke Skywalker');
expect(result.data).not.toHaveProperty('vader');
});
- });
+ })
+ .then(resolve, reject);
});
- it('can handle null values in arrays (#1551)', done => {
+ itAsync('can handle null values in arrays (#1551)', (resolve, reject) => {
const query = gql`
{
list {
@@ -1296,7 +1315,7 @@ describe('QueryManager', () => {
}
`;
const data = { list: [null, { value: 1 }] };
- const queryManager = mockQueryManager({
+ const queryManager = mockQueryManager(reject, {
request: { query },
result: { data },
});
@@ -1306,18 +1325,19 @@ describe('QueryManager', () => {
next: result => {
expect(stripSymbols(result.data)).toEqual(data);
expect(stripSymbols(observable.getCurrentResult().data)).toEqual(data);
- done();
+ resolve();
},
});
});
- it('should error if we pass fetchPolicy = cache-first or cache-only on a polling query', () => {
+ itAsync('should error if we pass fetchPolicy = cache-only on a polling query', (resolve, reject) => {
assertWithObserver({
- done: () => {},
+ reject,
observer: {
next() {},
error(error) {
expect(error).toBeInstanceOf(Error);
+ resolve();
},
},
query: gql`
@@ -1330,14 +1350,18 @@ describe('QueryManager', () => {
`,
queryOptions: { pollInterval: 200, fetchPolicy: 'cache-only' },
});
+ });
+
+ itAsync('should error if we pass fetchPolicy = cache-first on a polling query', (resolve, reject) => {
assertWithObserver({
- done: () => {},
+ reject,
observer: {
next() {
- // done.fail(new Error('Returned a result when it should not have.'));
+ // reject(new Error('Returned a result when it should not have.'));
},
error(error) {
expect(error).toBeInstanceOf(Error);
+ resolve();
},
},
query: gql`
@@ -1352,7 +1376,7 @@ describe('QueryManager', () => {
});
});
- it('supports cache-only fetchPolicy fetching only cached data', () => {
+ itAsync('supports cache-only fetchPolicy fetching only cached data', (resolve, reject) => {
const primeQuery = gql`
query primeQuery {
luke: people_one(id: 1) {
@@ -1378,7 +1402,7 @@ describe('QueryManager', () => {
},
};
- const queryManager = mockQueryManager({
+ const queryManager = mockQueryManager(reject, {
request: { query: primeQuery },
result: { data: data1 },
});
@@ -1398,11 +1422,14 @@ describe('QueryManager', () => {
expect(result.data['luke'].name).toBe('Luke Skywalker');
expect(result.data).not.toHaveProperty('vader');
});
- });
+ })
+ .then(resolve, reject);
});
- it('runs a mutation', () => {
+ itAsync('runs a mutation', (resolve, reject) => {
return assertMutationRoundtrip({
+ resolve,
+ reject,
mutation: gql`
mutation makeListPrivate {
makeListPrivate(id: "5")
@@ -1412,9 +1439,11 @@ describe('QueryManager', () => {
});
});
- it('runs a mutation even when errors is empty array #2912', () => {
+ itAsync('runs a mutation even when errors is empty array #2912', (resolve, reject) => {
const errors = [];
return assertMutationRoundtrip({
+ resolve,
+ reject,
mutation: gql`
mutation makeListPrivate {
makeListPrivate(id: "5")
@@ -1425,10 +1454,11 @@ describe('QueryManager', () => {
});
});
- it('runs a mutation with default errorPolicy equal to "none"', () => {
+ itAsync('runs a mutation with default errorPolicy equal to "none"', (resolve, reject) => {
const errors = [new GraphQLError('foo')];
return mockMutation({
+ reject,
mutation: gql`
mutation makeListPrivate {
makeListPrivate(id: "5")
@@ -1444,11 +1474,13 @@ describe('QueryManager', () => {
error => {
expect(error.graphQLErrors).toEqual(errors);
},
- );
+ ).then(resolve, reject);
});
- it('runs a mutation with variables', () => {
+ itAsync('runs a mutation with variables', (resolve, reject) => {
return assertMutationRoundtrip({
+ resolve,
+ reject,
mutation: gql`
mutation makeListPrivate($listId: ID!) {
makeListPrivate(id: $listId)
@@ -1461,7 +1493,7 @@ describe('QueryManager', () => {
const getIdField = ({ id }: { id: string }) => id;
- it('runs a mutation with object parameters and puts the result in the store', () => {
+ itAsync('runs a mutation with object parameters and puts the result in the store', (resolve, reject) => {
const data = {
makeListPrivate: {
id: '5',
@@ -1469,6 +1501,7 @@ describe('QueryManager', () => {
},
};
return mockMutation({
+ reject,
mutation: gql`
mutation makeListPrivate {
makeListPrivate(input: { id: "5" }) {
@@ -1484,15 +1517,15 @@ describe('QueryManager', () => {
// Make sure we updated the store with the new data
expect(
- (queryManager.dataStore.getCache() as InMemoryCache).extract()['5'],
+ queryManager.cache.extract()['5'],
).toEqual({
id: '5',
isPrivate: true,
});
- });
+ }).then(resolve, reject);
});
- it('runs a mutation and puts the result in the store', () => {
+ itAsync('runs a mutation and puts the result in the store', (resolve, reject) => {
const data = {
makeListPrivate: {
id: '5',
@@ -1501,6 +1534,7 @@ describe('QueryManager', () => {
};
return mockMutation({
+ reject,
mutation: gql`
mutation makeListPrivate {
makeListPrivate(id: "5") {
@@ -1516,15 +1550,15 @@ describe('QueryManager', () => {
// Make sure we updated the store with the new data
expect(
- (queryManager.dataStore.getCache() as InMemoryCache).extract()['5'],
+ queryManager.cache.extract()['5'],
).toEqual({
id: '5',
isPrivate: true,
});
- });
+ }).then(resolve, reject);
});
- it('runs a mutation and puts the result in the store with root key', () => {
+ itAsync('runs a mutation and puts the result in the store with root key', (resolve, reject) => {
const mutation = gql`
mutation makeListPrivate {
makeListPrivate(id: "5") {
@@ -1545,7 +1579,7 @@ describe('QueryManager', () => {
link: mockSingleLink({
request: { query: mutation },
result: { data },
- }),
+ }).setOnError(reject),
config: { dataIdFromObject: getIdField },
});
@@ -1558,15 +1592,15 @@ describe('QueryManager', () => {
// Make sure we updated the store with the new data
expect(
- (queryManager.dataStore.getCache() as InMemoryCache).extract()['5'],
+ queryManager.cache.extract()['5'],
).toEqual({
id: '5',
isPrivate: true,
});
- });
+ }).then(resolve, reject);
});
- it(`doesn't return data while query is loading`, () => {
+ itAsync(`doesn't return data while query is loading`, (resolve, reject) => {
const query1 = gql`
{
people_one(id: 1) {
@@ -1596,6 +1630,7 @@ describe('QueryManager', () => {
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query: query1 },
result: { data: data1 },
@@ -1617,10 +1652,10 @@ describe('QueryManager', () => {
observableToPromise({ observable: observable2 }, result =>
expect(stripSymbols(result.data)).toEqual(data2),
),
- ]);
+ ]).then(resolve, reject);
});
- it(`updates result of previous query if the result of a new query overlaps`, () => {
+ itAsync('updates result of previous query if the result of a new query overlaps', (resolve, reject) => {
const query1 = gql`
{
people_one(id: 1) {
@@ -1654,6 +1689,7 @@ describe('QueryManager', () => {
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query: query1 },
result: { data: data1 },
@@ -1666,25 +1702,25 @@ describe('QueryManager', () => {
);
const observable = queryManager.watchQuery<any>({ query: query1 });
- return observableToPromise(
- { observable },
- result => {
- expect(stripSymbols(result.data)).toEqual(data1);
+
+ subscribeAndCount(reject, observable, (handleCount, result) => {
+ if (handleCount === 1) {
+ expect(result.data).toEqual(data1);
queryManager.query<any>({ query: query2 });
- },
- // 3 because the query init action for the second query causes a callback
- result =>
- expect(stripSymbols(result.data)).toEqual({
+ } else if (handleCount === 2) {
+ expect(result.data).toEqual({
people_one: {
- name: 'Luke Skywalker has a new name',
+ name: 'Luke Skywalker',
age: 50,
},
- }),
- );
+ });
+ resolve();
+ }
+ });
});
- it('warns if you forget the template literal tag', async () => {
- const queryManager = mockQueryManager();
+ itAsync('warns if you forget the template literal tag', async (resolve, reject) => {
+ const queryManager = mockQueryManager(reject);
expect(() => {
queryManager.query<any>({
// Bamboozle TypeScript into letting us do this
@@ -1705,9 +1741,11 @@ describe('QueryManager', () => {
query: ('string' as any) as DocumentNode,
});
}).toThrowError(/wrap the query string in a "gql" tag/);
+
+ resolve();
});
- it('should transform queries correctly when given a QueryTransformer', done => {
+ itAsync('should transform queries correctly when given a QueryTransformer', (resolve, reject) => {
const query = gql`
query {
author {
@@ -1740,17 +1778,17 @@ describe('QueryManager', () => {
link: mockSingleLink({
request: { query: transformedQuery },
result: { data: transformedQueryResult },
- }),
+ }).setOnError(reject),
config: { addTypename: true },
})
.query({ query: query })
.then(result => {
expect(stripSymbols(result.data)).toEqual(transformedQueryResult);
- done();
- });
+ })
+ .then(resolve, reject);
});
- it('should transform mutations correctly', done => {
+ itAsync('should transform mutations correctly', (resolve, reject) => {
const mutation = gql`
mutation {
createAuthor(firstName: "John", lastName: "Smith") {
@@ -1781,17 +1819,17 @@ describe('QueryManager', () => {
link: mockSingleLink({
request: { query: transformedMutation },
result: { data: transformedMutationResult },
- }),
+ }).setOnError(reject),
config: { addTypename: true },
})
.mutate({ mutation: mutation })
.then(result => {
expect(stripSymbols(result.data)).toEqual(transformedMutationResult);
- done();
+ resolve();
});
});
- it('should reject a query promise given a network error', done => {
+ itAsync('should reject a query promise given a network error', (resolve, reject) => {
const query = gql`
query {
author {
@@ -1801,13 +1839,13 @@ describe('QueryManager', () => {
}
`;
const networkError = new Error('Network error');
- mockQueryManager({
+ mockQueryManager(reject, {
request: { query },
error: networkError,
})
.query({ query })
.then(() => {
- done.fail(new Error('Returned result on an errored fetchQuery'));
+ reject(new Error('Returned result on an errored fetchQuery'));
})
.catch(error => {
const apolloError = error as ApolloError;
@@ -1815,50 +1853,12 @@ describe('QueryManager', () => {
expect(apolloError.message).toBeDefined();
expect(apolloError.networkError).toBe(networkError);
expect(apolloError.graphQLErrors).toEqual([]);
- done();
- })
- .catch(done.fail);
- });
-
- it('should error when we attempt to give an id beginning with $', done => {
- const query = gql`
- query {
- author {
- firstName
- lastName
- id
- __typename
- }
- }
- `;
- const data = {
- author: {
- firstName: 'John',
- lastName: 'Smith',
- id: '129',
- __typename: 'Author',
- },
- };
- const reducerConfig = {
- dataIdFromObject: (x: any) => '$' + dataIdFromObject(x),
- };
- createQueryManager({
- link: mockSingleLink({
- request: { query },
- result: { data },
- }),
- config: reducerConfig,
- })
- .query({ query })
- .then(() => {
- done.fail(new Error('Returned a result when it should not have.'));
+ resolve();
})
- .catch(() => {
- done();
- });
+ .then(resolve, reject);
});
- it('should reject a query promise given a GraphQL error', () => {
+ itAsync('should reject a query promise given a GraphQL error', (resolve, reject) => {
const query = gql`
query {
author {
@@ -1868,7 +1868,7 @@ describe('QueryManager', () => {
}
`;
const graphQLErrors = [new Error('GraphQL error')];
- return mockQueryManager({
+ return mockQueryManager(reject, {
request: { query },
result: { errors: graphQLErrors },
})
@@ -1880,13 +1880,13 @@ describe('QueryManager', () => {
// don't use .catch() for this or it will catch the above error
error => {
const apolloError = error as ApolloError;
- expect(apolloError.graphQLErrors).toBe(graphQLErrors);
+ expect(apolloError.graphQLErrors).toEqual(graphQLErrors);
expect(!apolloError.networkError).toBeTruthy();
},
- );
+ ).then(resolve, reject);
});
- it('should not empty the store when a non-polling query fails due to a network error', done => {
+ itAsync('should not empty the store when a non-polling query fails due to a network error', (resolve, reject) => {
const query = gql`
query {
author {
@@ -1902,6 +1902,7 @@ describe('QueryManager', () => {
},
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query },
result: { data },
@@ -1919,26 +1920,24 @@ describe('QueryManager', () => {
queryManager
.query<any>({ query, fetchPolicy: 'network-only' })
.then(() => {
- done.fail(
+ reject(
new Error('Returned a result when it was not supposed to.'),
);
})
.catch(() => {
// make that the error thrown doesn't empty the state
expect(
- (queryManager.dataStore.getCache() as InMemoryCache).extract()[
- '$ROOT_QUERY.author'
- ] as Object,
- ).toEqual(data['author']);
- done();
+ queryManager.cache.extract().ROOT_QUERY.author,
+ ).toEqual(data.author);
+ resolve();
});
})
.catch(() => {
- done.fail(new Error('Threw an error on the first query.'));
+ reject(new Error('Threw an error on the first query.'));
});
});
- it('should be able to unsubscribe from a polling query subscription', () => {
+ itAsync('should be able to unsubscribe from a polling query subscription', (resolve, reject) => {
const query = gql`
query {
author {
@@ -1954,7 +1953,7 @@ describe('QueryManager', () => {
},
};
- const observable = mockQueryManager({
+ const observable = mockQueryManager(reject, {
request: { query },
result: { data },
}).watchQuery({ query, pollInterval: 20 });
@@ -1969,10 +1968,11 @@ describe('QueryManager', () => {
subscription.unsubscribe();
},
);
- return promise;
+
+ return promise.then(resolve, reject);
});
- it('should not empty the store when a polling query fails due to a network error', () => {
+ itAsync('should not empty the store when a polling query fails due to a network error', (resolve, reject) => {
const query = gql`
query {
author {
@@ -1988,6 +1988,7 @@ describe('QueryManager', () => {
},
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query },
result: { data },
@@ -2009,23 +2010,21 @@ describe('QueryManager', () => {
errorCallbacks: [
() => {
expect(
- (queryManager.dataStore.getCache() as InMemoryCache).extract()[
- '$ROOT_QUERY.author'
- ] as Object,
+ queryManager.cache.extract().ROOT_QUERY.author,
).toEqual(data.author);
},
],
},
result => {
expect(stripSymbols(result.data)).toEqual(data);
- expect((queryManager.dataStore.getCache() as InMemoryCache).extract()[
- '$ROOT_QUERY.author'
- ] as Object).toEqual(data.author);
+ expect(
+ queryManager.cache.extract().ROOT_QUERY.author
+ ).toEqual(data.author);
},
- );
+ ).then(resolve, reject);
});
- it('should not fire next on an observer if there is no change in the result', () => {
+ itAsync('should not fire next on an observer if there is no change in the result', (resolve, reject) => {
const query = gql`
query {
author {
@@ -2042,6 +2041,7 @@ describe('QueryManager', () => {
},
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query },
result: { data },
@@ -2062,10 +2062,10 @@ describe('QueryManager', () => {
queryManager.query<any>({ query }).then(result => {
expect(stripSymbols(result.data)).toEqual(data);
}),
- ]);
+ ]).then(resolve, reject);
});
- it('should store metadata with watched queries', () => {
+ itAsync('should store metadata with watched queries', (resolve, reject) => {
const query = gql`
query {
author {
@@ -2081,7 +2081,8 @@ describe('QueryManager', () => {
lastName: 'Smith',
},
};
- const queryManager = mockQueryManager({
+
+ const queryManager = mockQueryManager(reject, {
request: { query },
result: { data },
});
@@ -2090,15 +2091,16 @@ describe('QueryManager', () => {
query,
metadata: { foo: 'bar' },
});
+
return observableToPromise({ observable }, result => {
expect(stripSymbols(result.data)).toEqual(data);
expect(queryManager.queryStore.get(observable.queryId).metadata).toEqual({
foo: 'bar',
});
- });
+ }).then(resolve, reject);
});
- it('should return stale data when we orphan a real-id node in the store with a real-id node', () => {
+ itAsync('should return stale data when we orphan a real-id node in the store with a real-id node', (resolve, reject) => {
const query1 = gql`
query {
author {
@@ -2145,16 +2147,13 @@ describe('QueryManager', () => {
};
const reducerConfig = { dataIdFromObject };
const queryManager = createQueryManager({
- link: mockSingleLink(
- {
- request: { query: query1 },
- result: { data: data1 },
- },
- {
- request: { query: query2 },
- result: { data: data2 },
- },
- ),
+ link: mockSingleLink({
+ request: { query: query1 },
+ result: { data: data1 },
+ }, {
+ request: { query: query2 },
+ result: { data: data2 },
+ }).setOnError(reject),
config: reducerConfig,
});
@@ -2199,10 +2198,10 @@ describe('QueryManager', () => {
});
},
),
- ]);
+ ]).then(resolve, reject);
});
- it('should return partial data when configured when we orphan a real-id node in the store with a real-id node', () => {
+ itAsync('should return partial data when configured when we orphan a real-id node in the store with a real-id node', (resolve, reject) => {
const query1 = gql`
query {
author {
@@ -2249,16 +2248,13 @@ describe('QueryManager', () => {
};
const queryManager = createQueryManager({
- link: mockSingleLink(
- {
- request: { query: query1 },
- result: { data: data1 },
- },
- {
- request: { query: query2 },
- result: { data: data2 },
- },
- ),
+ link: mockSingleLink({
+ request: { query: query1 },
+ result: { data: data1 },
+ }, {
+ request: { query: query2 },
+ result: { data: data2 },
+ }).setOnError(reject),
});
const observable1 = queryManager.watchQuery<any>({
@@ -2313,10 +2309,10 @@ describe('QueryManager', () => {
});
},
),
- ]);
+ ]).then(resolve, reject);
});
- it('should error if we replace a real id node in the store with a generated id node', () => {
+ itAsync('should error if we replace a real id node in the store with a generated id node', (resolve, reject) => {
const queryWithId = gql`
query {
author {
@@ -2349,16 +2345,13 @@ describe('QueryManager', () => {
};
const reducerConfig = { dataIdFromObject };
const queryManager = createQueryManager({
- link: mockSingleLink(
- {
- request: { query: queryWithId },
- result: { data: dataWithId },
- },
- {
- request: { query: queryWithoutId },
- result: { data: dataWithoutId },
- },
- ),
+ link: mockSingleLink({
+ request: { query: queryWithId },
+ result: { data: dataWithId },
+ }, {
+ request: { query: queryWithoutId },
+ result: { data: dataWithoutId },
+ }).setOnError(reject),
config: reducerConfig,
});
@@ -2379,10 +2372,10 @@ describe('QueryManager', () => {
errorCallbacks: [error => expect(error.message).toMatch('Store error')],
wait: 60,
}),
- ]);
+ ]).then(resolve, reject);
});
- it('should not error when merging a generated id store node with a real id node', () => {
+ itAsync('should not error when replacing unidentified data with a normalized ID', (resolve, reject) => {
const queryWithoutId = gql`
query {
author {
@@ -2395,6 +2388,7 @@ describe('QueryManager', () => {
}
}
`;
+
const queryWithId = gql`
query {
author {
@@ -2406,6 +2400,7 @@ describe('QueryManager', () => {
}
}
`;
+
const dataWithoutId = {
author: {
name: {
@@ -2416,6 +2411,7 @@ describe('QueryManager', () => {
__typename: 'Author',
},
};
+
const dataWithId = {
author: {
name: {
@@ -2425,51 +2421,39 @@ describe('QueryManager', () => {
__typename: 'Author',
},
};
- const mergedDataWithoutId = {
- author: {
- name: {
- firstName: 'Jane',
- lastName: 'Smith',
- },
- age: '124',
- __typename: 'Author',
- },
- };
+
const queryManager = createQueryManager({
- link: mockSingleLink(
- {
- request: { query: queryWithoutId },
- result: { data: dataWithoutId },
- },
- {
- request: { query: queryWithId },
- result: { data: dataWithId },
- },
- ),
+ link: mockSingleLink({
+ request: { query: queryWithoutId },
+ result: { data: dataWithoutId },
+ }, {
+ request: { query: queryWithId },
+ result: { data: dataWithId },
+ }).setOnError(reject),
});
const observableWithId = queryManager.watchQuery<any>({
query: queryWithId,
});
+
const observableWithoutId = queryManager.watchQuery<any>({
query: queryWithoutId,
});
- // I'm not sure the waiting 60 here really is required, but the test used to do it
return Promise.all([
observableToPromise(
- { observable: observableWithoutId, wait: 120 },
+ { observable: observableWithoutId },
+ result => expect(stripSymbols(result.data)).toEqual(dataWithoutId),
result => expect(stripSymbols(result.data)).toEqual(dataWithoutId),
- result =>
- expect(stripSymbols(result.data)).toEqual(mergedDataWithoutId),
),
- observableToPromise({ observable: observableWithId, wait: 120 }, result =>
- expect(stripSymbols(result.data)).toEqual(dataWithId),
+ observableToPromise(
+ { observable: observableWithId },
+ result => expect(stripSymbols(result.data)).toEqual(dataWithId),
),
- ]);
+ ]).then(resolve, reject);
});
- it('exposes errors on a refetch as a rejection', done => {
+ itAsync('exposes errors on a refetch as a rejection', async (resolve, reject) => {
const request = {
query: gql`
{
@@ -2495,7 +2479,12 @@ describe('QueryManager', () => {
],
};
- const queryManager = mockRefetch({ request, firstResult, secondResult });
+ const queryManager = mockRefetch({
+ reject,
+ request,
+ firstResult,
+ secondResult,
+ });
const handle = queryManager.watchQuery<any>(request);
@@ -2515,17 +2504,15 @@ describe('QueryManager', () => {
handle
.refetch()
.then(() => {
- done.fail(new Error('Error on refetch should reject promise'));
+ reject(new Error('Error on refetch should reject promise'));
})
.catch(error => {
checkError(error);
- done();
- });
-
- // We have an unhandled error warning from the `subscribe` above, which has no `error` cb
+ })
+ .then(resolve, reject);
});
- it('does not return incomplete data when two queries for the same item are executed', () => {
+ itAsync('does not return incomplete data when two queries for the same item are executed', (resolve, reject) => {
const queryA = gql`
query queryA {
person(id: "abc") {
@@ -2565,9 +2552,9 @@ describe('QueryManager', () => {
const queryManager = new QueryManager<NormalizedCacheObject>({
link: mockSingleLink(
{ request: { query: queryA }, result: { data: dataA } },
- { request: { query: queryB }, result: { data: dataB }, delay: 20 },
- ),
- store: new DataStore(new InMemoryCache({})),
+ { request: { query: queryB }, result: { data: dataB }, delay: 20 }
+ ).setOnError(reject),
+ cache: new InMemoryCache({}),
ssrMode: true,
});
@@ -2603,11 +2590,11 @@ describe('QueryManager', () => {
partial: false,
});
}),
- ]);
+ ]).then(resolve, reject);
});
describe('polling queries', () => {
- it('allows you to poll queries', () => {
+ itAsync('allows you to poll queries', (resolve, reject) => {
const query = gql`
query fetchLuke($id: String) {
people_one(id: $id) {
@@ -2633,6 +2620,7 @@ describe('QueryManager', () => {
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data: data1 },
@@ -2653,10 +2641,10 @@ describe('QueryManager', () => {
{ observable },
result => expect(stripSymbols(result.data)).toEqual(data1),
result => expect(stripSymbols(result.data)).toEqual(data2),
- );
+ ).then(resolve, reject);
});
- it('does not poll during SSR', done => {
+ itAsync('does not poll during SSR', (resolve, reject) => {
const query = gql`
query fetchLuke($id: String) {
people_one(id: $id) {
@@ -2682,21 +2670,17 @@ describe('QueryManager', () => {
};
const queryManager = new QueryManager<NormalizedCacheObject>({
- link: mockSingleLink(
- {
- request: { query, variables },
- result: { data: data1 },
- },
- {
- request: { query, variables },
- result: { data: data2 },
- },
- {
- request: { query, variables },
- result: { data: data2 },
- },
- ),
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ link: mockSingleLink({
+ request: { query, variables },
+ result: { data: data1 },
+ }, {
+ request: { query, variables },
+ result: { data: data2 },
+ }, {
+ request: { query, variables },
+ result: { data: data2 },
+ }).setOnError(reject),
+ cache: new InMemoryCache({ addTypename: false }),
ssrMode: true,
});
@@ -2708,7 +2692,6 @@ describe('QueryManager', () => {
});
let count = 1;
- let doneCalled = false;
const subHandle = observable.subscribe({
next: (result: any) => {
switch (count) {
@@ -2716,22 +2699,19 @@ describe('QueryManager', () => {
expect(stripSymbols(result.data)).toEqual(data1);
setTimeout(() => {
subHandle.unsubscribe();
- if (!doneCalled) {
- done();
- }
+ resolve();
}, 15);
count++;
break;
case 2:
default:
- doneCalled = true;
- done.fail(new Error('Only expected one result, not multiple'));
+ reject(new Error('Only expected one result, not multiple'));
}
},
});
});
- it('should let you handle multiple polled queries and unsubscribe from one of them', done => {
+ itAsync('should let you handle multiple polled queries and unsubscribe from one of them', (resolve, reject) => {
const query1 = gql`
query {
author {
@@ -2782,6 +2762,7 @@ describe('QueryManager', () => {
},
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query: query1 },
result: { data: data11 },
@@ -2843,11 +2824,11 @@ describe('QueryManager', () => {
subscription1.unsubscribe();
subscription2.unsubscribe();
- done();
+ resolve();
}, 400);
});
- it('allows you to unsubscribe from polled queries', () => {
+ itAsync('allows you to unsubscribe from polled queries', (resolve, reject) => {
const query = gql`
query fetchLuke($id: String) {
people_one(id: $id) {
@@ -2873,6 +2854,7 @@ describe('QueryManager', () => {
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data: data1 },
@@ -2903,10 +2885,10 @@ describe('QueryManager', () => {
},
);
- return promise;
+ return promise.then(resolve, reject);
});
- it('allows you to unsubscribe from polled query errors', done => {
+ itAsync('allows you to unsubscribe from polled query errors', (resolve, reject) => {
const query = gql`
query fetchLuke($id: String) {
people_one(id: $id) {
@@ -2932,6 +2914,7 @@ describe('QueryManager', () => {
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data: data1 },
@@ -2955,7 +2938,7 @@ describe('QueryManager', () => {
let isFinished;
process.once('unhandledRejection', () => {
- if (!isFinished) done.fail('unhandledRejection from network');
+ if (!isFinished) reject('unhandledRejection from network');
});
const { promise, subscription } = observableToPromiseAndSubscription(
@@ -2975,12 +2958,12 @@ describe('QueryManager', () => {
promise.then(() => {
setTimeout(() => {
isFinished = true;
- done();
+ resolve();
}, 4);
});
});
- it('exposes a way to start a polling query', () => {
+ itAsync('exposes a way to start a polling query', (resolve, reject) => {
const query = gql`
query fetchLuke($id: String) {
people_one(id: $id) {
@@ -3006,6 +2989,7 @@ describe('QueryManager', () => {
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data: data1 },
@@ -3027,10 +3011,10 @@ describe('QueryManager', () => {
{ observable },
result => expect(stripSymbols(result.data)).toEqual(data1),
result => expect(stripSymbols(result.data)).toEqual(data2),
- );
+ ).then(resolve, reject);
});
- it('exposes a way to stop a polling query', () => {
+ itAsync('exposes a way to stop a polling query', (resolve, reject) => {
const query = gql`
query fetchLeia($id: String) {
people_one(id: $id) {
@@ -3056,6 +3040,7 @@ describe('QueryManager', () => {
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data: data1 },
@@ -3074,10 +3059,10 @@ describe('QueryManager', () => {
return observableToPromise({ observable, wait: 60 }, result => {
expect(stripSymbols(result.data)).toEqual(data1);
observable.stopPolling();
- });
+ }).then(resolve, reject);
});
- it('stopped polling queries still get updates', () => {
+ itAsync('stopped polling queries still get updates', (resolve, reject) => {
const query = gql`
query fetchLeia($id: String) {
people_one(id: $id) {
@@ -3103,6 +3088,7 @@ describe('QueryManager', () => {
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data: data1 },
@@ -3135,11 +3121,11 @@ describe('QueryManager', () => {
new Promise((_, reject) => {
timeout = (error: Error) => reject(error);
}),
- ]);
+ ]).then(resolve, reject);
});
});
describe('store resets', () => {
- it('returns a promise resolving when all queries have been refetched', () => {
+ itAsync('returns a promise resolving when all queries have been refetched', (resolve, reject) => {
const query = gql`
query {
author {
@@ -3187,24 +3173,19 @@ describe('QueryManager', () => {
};
const queryManager = createQueryManager({
- link: mockSingleLink(
- {
- request: { query },
- result: { data },
- },
- {
- request: { query: query2 },
- result: { data: data2 },
- },
- {
- request: { query },
- result: { data: dataChanged },
- },
- {
- request: { query: query2 },
- result: { data: data2Changed },
- },
- ),
+ link: mockSingleLink({
+ request: { query },
+ result: { data },
+ }, {
+ request: { query: query2 },
+ result: { data: data2 },
+ }, {
+ request: { query },
+ result: { data: dataChanged },
+ }, {
+ request: { query: query2 },
+ result: { data: data2Changed },
+ }).setOnError(reject),
});
const observable = queryManager.watchQuery<any>({ query });
@@ -3230,19 +3211,23 @@ describe('QueryManager', () => {
expect(result2.partial).toBe(false);
expect(stripSymbols(result2.data)).toEqual(data2Changed);
});
- });
+ }).then(resolve, reject);
});
- it('should change the store state to an empty state', () => {
- const queryManager = createQueryManager({});
+ itAsync('should change the store state to an empty state', (resolve, reject) => {
+ const queryManager = createQueryManager({
+ link: mockSingleLink().setOnError(reject),
+ });
queryManager.resetStore();
expect(
- (queryManager.dataStore.getCache() as InMemoryCache).extract(),
+ queryManager.cache.extract(),
).toEqual({});
expect(queryManager.queryStore.getStore()).toEqual({});
expect(queryManager.mutationStore.getStore()).toEqual({});
+
+ resolve();
});
xit('should only refetch once when we store reset', () => {
@@ -3303,7 +3288,7 @@ describe('QueryManager', () => {
);
});
- it('should not refetch torn-down queries', done => {
+ itAsync('should not refetch torn-down queries', (resolve, reject) => {
let queryManager: QueryManager<NormalizedCacheObject>;
let observable: ObservableQuery<any>;
const query = gql`
@@ -3346,7 +3331,7 @@ describe('QueryManager', () => {
setTimeout(() => {
expect(timesFired).toBe(1);
- done();
+ resolve();
}, 50);
});
});
@@ -3415,7 +3400,7 @@ describe('QueryManager', () => {
);
});
- it('should not error on a stopped query()', done => {
+ itAsync('should not error on a stopped query()', (resolve, reject) => {
let queryManager: QueryManager<NormalizedCacheObject>;
const query = gql`
query {
@@ -3445,13 +3430,13 @@ describe('QueryManager', () => {
const queryId = '1';
queryManager
.fetchQuery(queryId, { query })
- .catch(e => done.fail('Exception thrown for stopped query'));
+ .catch(e => reject('Exception thrown for stopped query'));
queryManager.removeQuery(queryId);
- queryManager.resetStore().then(() => done());
+ queryManager.resetStore().then(resolve, reject);
});
- it('should throw an error on an inflight fetch query if the store is reset', done => {
+ itAsync('should throw an error on an inflight fetch query if the store is reset', (resolve, reject) => {
const query = gql`
query {
author {
@@ -3466,7 +3451,7 @@ describe('QueryManager', () => {
lastName: 'Smith',
},
};
- const queryManager = mockQueryManager({
+ const queryManager = mockQueryManager(reject, {
request: { query },
result: { data },
delay: 10000, //i.e. forever
@@ -3474,16 +3459,18 @@ describe('QueryManager', () => {
queryManager
.fetchQuery('made up id', { query })
.then(() => {
- done.fail(new Error('Returned a result.'));
+ reject(new Error('Returned a result.'));
})
.catch(error => {
expect(error.message).toMatch('Store reset');
- done();
+ resolve();
});
- queryManager.resetStore();
+ // Need to delay the reset at least until the fetchRequest method
+ // has had a chance to enter this request into fetchQueryRejectFns.
+ setTimeout(() => queryManager.resetStore(), 100);
});
- it('should call refetch on a mocked Observable if the store is reset', done => {
+ itAsync('should call refetch on a mocked Observable if the store is reset', (resolve, reject) => {
const query = gql`
query {
author {
@@ -3492,11 +3479,11 @@ describe('QueryManager', () => {
}
}
`;
- const queryManager = mockQueryManager();
+ const queryManager = mockQueryManager(reject);
const mockObservableQuery: ObservableQuery<any> = ({
refetch(_: any): Promise<ExecutionResult> {
- done();
+ resolve();
return null as never;
},
options: {
@@ -3511,7 +3498,7 @@ describe('QueryManager', () => {
queryManager.resetStore();
});
- it('should not call refetch on a cache-only Observable if the store is reset', done => {
+ itAsync('should not call refetch on a cache-only Observable if the store is reset', (resolve, reject) => {
const query = gql`
query {
author {
@@ -3520,7 +3507,9 @@ describe('QueryManager', () => {
}
}
`;
- const queryManager = createQueryManager({});
+ const queryManager = createQueryManager({
+ link: mockSingleLink().setOnError(reject),
+ });
const options = assign({}) as WatchQueryOptions;
options.fetchPolicy = 'cache-only';
options.query = query;
@@ -3541,11 +3530,11 @@ describe('QueryManager', () => {
queryManager.resetStore();
setTimeout(() => {
expect(refetchCount).toEqual(0);
- done();
+ resolve();
}, 50);
});
- it('should not call refetch on a standby Observable if the store is reset', done => {
+ itAsync('should not call refetch on a standby Observable if the store is reset', (resolve, reject) => {
const query = gql`
query {
author {
@@ -3554,7 +3543,9 @@ describe('QueryManager', () => {
}
}
`;
- const queryManager = createQueryManager({});
+ const queryManager = createQueryManager({
+ link: mockSingleLink().setOnError(reject),
+ });
const options = assign({}) as WatchQueryOptions;
options.fetchPolicy = 'standby';
options.query = query;
@@ -3575,11 +3566,11 @@ describe('QueryManager', () => {
queryManager.resetStore();
setTimeout(() => {
expect(refetchCount).toEqual(0);
- done();
+ resolve();
}, 50);
});
- it('should throw an error on an inflight query() if the store is reset', done => {
+ itAsync('should throw an error on an inflight query() if the store is reset', (resolve, reject) => {
let queryManager: QueryManager<NormalizedCacheObject>;
const query = gql`
query {
@@ -3610,15 +3601,15 @@ describe('QueryManager', () => {
queryManager
.query<any>({ query })
.then(() => {
- done.fail(new Error('query() gave results on a store reset'));
+ reject(new Error('query() gave results on a store reset'));
})
.catch(() => {
- done();
+ resolve();
});
});
});
describe('refetching observed queries', () => {
- it('returns a promise resolving when all queries have been refetched', () => {
+ itAsync('returns a promise resolving when all queries have been refetched', (resolve, reject) => {
const query = gql`
query {
author {
@@ -3666,24 +3657,19 @@ describe('QueryManager', () => {
};
const queryManager = createQueryManager({
- link: mockSingleLink(
- {
- request: { query },
- result: { data },
- },
- {
- request: { query: query2 },
- result: { data: data2 },
- },
- {
- request: { query },
- result: { data: dataChanged },
- },
- {
- request: { query: query2 },
- result: { data: data2Changed },
- },
- ),
+ link: mockSingleLink({
+ request: { query },
+ result: { data },
+ }, {
+ request: { query: query2 },
+ result: { data: data2 },
+ }, {
+ request: { query },
+ result: { data: dataChanged },
+ }, {
+ request: { query: query2 },
+ result: { data: data2Changed },
+ }).setOnError(reject),
});
const observable = queryManager.watchQuery<any>({ query });
@@ -3709,10 +3695,10 @@ describe('QueryManager', () => {
expect(result2.partial).toBe(false);
expect(stripSymbols(result2.data)).toEqual(data2Changed);
});
- });
+ }).then(resolve, reject);
});
- it('should only refetch once when we refetch observable queries', done => {
+ itAsync('should only refetch once when we refetch observable queries', (resolve, reject) => {
let queryManager: QueryManager<NormalizedCacheObject>;
const query = gql`
query {
@@ -3766,14 +3752,14 @@ describe('QueryManager', () => {
// only refetch once and make sure data has changed
expect(stripSymbols(result.data)).toEqual(data2);
expect(timesFired).toBe(2);
- done();
+ resolve();
},
).catch(e => {
- done.fail(e);
+ reject(e);
});
});
- it('should not refetch torn-down queries', done => {
+ itAsync('should not refetch torn-down queries', (resolve, reject) => {
let queryManager: QueryManager<NormalizedCacheObject>;
let observable: ObservableQuery<any>;
const query = gql`
@@ -3816,7 +3802,7 @@ describe('QueryManager', () => {
setTimeout(() => {
expect(timesFired).toBe(1);
- done();
+ resolve();
}, 50);
});
});
@@ -3871,7 +3857,7 @@ describe('QueryManager', () => {
);
});
- it('should NOT throw an error on an inflight fetch query if the observable queries are refetched', done => {
+ itAsync('should NOT throw an error on an inflight fetch query if the observable queries are refetched', (resolve, reject) => {
const query = gql`
query {
author {
@@ -3886,23 +3872,21 @@ describe('QueryManager', () => {
lastName: 'Smith',
},
};
- const queryManager = mockQueryManager({
+ const queryManager = mockQueryManager(reject, {
request: { query },
result: { data },
delay: 100,
});
queryManager
.fetchQuery('made up id', { query })
- .then(() => {
- done();
- })
+ .then(resolve)
.catch(error => {
- done.fail(new Error('Should not return an error'));
+ reject(new Error('Should not return an error'));
});
queryManager.reFetchObservableQueries();
});
- it('should call refetch on a mocked Observable if the observed queries are refetched', done => {
+ itAsync('should call refetch on a mocked Observable if the observed queries are refetched', (resolve, reject) => {
const query = gql`
query {
author {
@@ -3911,11 +3895,11 @@ describe('QueryManager', () => {
}
}
`;
- const queryManager = mockQueryManager();
+ const queryManager = mockQueryManager(reject);
const mockObservableQuery: ObservableQuery<any> = ({
refetch(_: any): Promise<ExecutionResult> {
- done();
+ resolve();
return null as never;
},
options: {
@@ -3930,7 +3914,7 @@ describe('QueryManager', () => {
queryManager.reFetchObservableQueries();
});
- it('should not call refetch on a cache-only Observable if the observed queries are refetched', done => {
+ itAsync('should not call refetch on a cache-only Observable if the observed queries are refetched', (resolve, reject) => {
const query = gql`
query {
author {
@@ -3939,7 +3923,9 @@ describe('QueryManager', () => {
}
}
`;
- const queryManager = createQueryManager({});
+ const queryManager = createQueryManager({
+ link: mockSingleLink().setOnError(reject),
+ });
const options = assign({}) as WatchQueryOptions;
options.fetchPolicy = 'cache-only';
options.query = query;
@@ -3960,11 +3946,11 @@ describe('QueryManager', () => {
queryManager.reFetchObservableQueries();
setTimeout(() => {
expect(refetchCount).toEqual(0);
- done();
+ resolve();
}, 50);
});
- it('should not call refetch on a standby Observable if the observed queries are refetched', done => {
+ itAsync('should not call refetch on a standby Observable if the observed queries are refetched', (resolve, reject) => {
const query = gql`
query {
author {
@@ -3973,7 +3959,9 @@ describe('QueryManager', () => {
}
}
`;
- const queryManager = createQueryManager({});
+ const queryManager = createQueryManager({
+ link: mockSingleLink().setOnError(reject),
+ });
const options = assign({}) as WatchQueryOptions;
options.fetchPolicy = 'standby';
options.query = query;
@@ -3994,11 +3982,11 @@ describe('QueryManager', () => {
queryManager.reFetchObservableQueries();
setTimeout(() => {
expect(refetchCount).toEqual(0);
- done();
+ resolve();
}, 50);
});
- it('should refetch on a standby Observable if the observed queries are refetched and the includeStandby parameter is set to true', done => {
+ itAsync('should refetch on a standby Observable if the observed queries are refetched and the includeStandby parameter is set to true', (resolve, reject) => {
const query = gql`
query {
author {
@@ -4007,7 +3995,9 @@ describe('QueryManager', () => {
}
}
`;
- const queryManager = createQueryManager({});
+ const queryManager = createQueryManager({
+ link: mockSingleLink().setOnError(reject),
+ });
const options = assign({}) as WatchQueryOptions;
options.fetchPolicy = 'standby';
options.query = query;
@@ -4029,11 +4019,11 @@ describe('QueryManager', () => {
queryManager.reFetchObservableQueries(includeStandBy);
setTimeout(() => {
expect(refetchCount).toEqual(1);
- done();
+ resolve();
}, 50);
});
- it('should NOT throw an error on an inflight query() if the observed queries are refetched', done => {
+ itAsync('should NOT throw an error on an inflight query() if the observed queries are refetched', (resolve, reject) => {
let queryManager: QueryManager<NormalizedCacheObject>;
const query = gql`
query {
@@ -4064,10 +4054,10 @@ describe('QueryManager', () => {
queryManager
.query<any>({ query })
.then(() => {
- done();
+ resolve();
})
.catch(e => {
- done.fail(
+ reject(
new Error(
'query() should not throw error when refetching observed queriest',
),
@@ -4075,8 +4065,9 @@ describe('QueryManager', () => {
});
});
});
+
describe('loading state', () => {
- it('should be passed as false if we are not watching a query', () => {
+ itAsync('should be passed as false if we are not watching a query', (resolve, reject) => {
const query = gql`
query {
fortuneCookie
@@ -4085,7 +4076,7 @@ describe('QueryManager', () => {
const data = {
fortuneCookie: 'Buy it',
};
- return mockQueryManager({
+ return mockQueryManager(reject, {
request: { query },
result: { data },
})
@@ -4093,10 +4084,11 @@ describe('QueryManager', () => {
.then(result => {
expect(!result.loading).toBeTruthy();
expect(stripSymbols(result.data)).toEqual(data);
- });
+ })
+ .then(resolve, reject);
});
- it('should be passed to the observer as true if we are returning partial data', () => {
+ itAsync('should be passed to the observer as true if we are returning partial data', (resolve, reject) => {
const fortuneCookie =
'You must stick to your goal but rethink your approach';
const primeQuery = gql`
@@ -4118,6 +4110,7 @@ describe('QueryManager', () => {
const fullData = { fortuneCookie, author };
const queryManager = mockQueryManager(
+ reject,
{
request: { query },
result: { data: fullData },
@@ -4148,12 +4141,13 @@ describe('QueryManager', () => {
expect(result.data).toEqual(fullData);
},
);
- });
+ })
+ .then(resolve, reject);
});
- it('should be passed to the observer as false if we are returning all the data', done => {
+ itAsync('should be passed to the observer as false if we are returning all the data', (resolve, reject) => {
assertWithObserver({
- done,
+ reject,
query: gql`
query {
author {
@@ -4173,13 +4167,13 @@ describe('QueryManager', () => {
observer: {
next(result) {
expect(!result.loading).toBeTruthy();
- done();
+ resolve();
},
},
});
});
- it('will update on `resetStore`', done => {
+ itAsync('will update on `resetStore`', (resolve, reject) => {
const testQuery = gql`
query {
author {
@@ -4201,6 +4195,7 @@ describe('QueryManager', () => {
},
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query: testQuery },
result: { data: data1 },
@@ -4230,17 +4225,17 @@ describe('QueryManager', () => {
case 1:
expect(result.loading).toBe(false);
expect(stripSymbols(result.data)).toEqual(data2);
- done();
+ resolve();
break;
default:
- done.fail(new Error('`next` was called to many times.'));
+ reject(new Error('`next` was called to many times.'));
}
},
- error: error => done.fail(error),
+ error: error => reject(error),
});
});
- it('will be true when partial data may be returned', done => {
+ itAsync('will be true when partial data may be returned', (resolve, reject) => {
const query1 = gql`{
a { x1 y1 z1 }
}`;
@@ -4256,6 +4251,7 @@ describe('QueryManager', () => {
b: { x2: 3, y2: 2, z2: 1 },
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query: query1 },
result: { data: data1 },
@@ -4286,16 +4282,15 @@ describe('QueryManager', () => {
case 1:
expect(result2.loading).toBe(false);
expect(result2.data).toEqual(data2);
- done();
+ resolve();
break;
default:
- done(new Error('`next` was called to many times.'));
+ reject(new Error('`next` was called to many times.'));
}
},
- error: error => done(error),
+ error: reject,
});
- })
- .catch(done);
+ }).then(resolve, reject);
});
});
@@ -4304,7 +4299,7 @@ describe('QueryManager', () => {
let warned: any;
let timesWarned = 0;
- beforeEach(done => {
+ beforeEach(() => {
// clear warnings
warned = null;
timesWarned = 0;
@@ -4313,10 +4308,9 @@ describe('QueryManager', () => {
warned = args;
timesWarned++;
};
- done();
});
- it('should refetch the right query when a result is successfully returned', () => {
+ itAsync('should refetch the right query when a result is successfully returned', (resolve, reject) => {
const mutation = gql`
mutation changeAuthorName {
changeAuthorName(newName: "Jack Smith") {
@@ -4353,6 +4347,7 @@ describe('QueryManager', () => {
};
const variables = { id: '1234' };
const queryManager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data },
@@ -4383,10 +4378,10 @@ describe('QueryManager', () => {
);
expect(stripSymbols(result.data)).toEqual(secondReqData);
},
- );
+ ).then(resolve, reject);
});
- it('should not warn and continue when an unknown query name is asked to refetch', () => {
+ itAsync('should not warn and continue when an unknown query name is asked to refetch', (resolve, reject) => {
const mutation = gql`
mutation changeAuthorName {
changeAuthorName(newName: "Jack Smith") {
@@ -4422,6 +4417,7 @@ describe('QueryManager', () => {
},
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query },
result: { data },
@@ -4452,10 +4448,10 @@ describe('QueryManager', () => {
expect(stripSymbols(result.data)).toEqual(secondReqData);
expect(timesWarned).toBe(0);
},
- );
+ ).then(resolve, reject);
});
- it('should ignore without warning a query name that is asked to refetch with no active subscriptions', () => {
+ itAsync('should ignore without warning a query name that is asked to refetch with no active subscriptions', (resolve, reject) => {
const mutation = gql`
mutation changeAuthorName {
changeAuthorName(newName: "Jack Smith") {
@@ -4491,6 +4487,7 @@ describe('QueryManager', () => {
},
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query },
result: { data },
@@ -4516,10 +4513,11 @@ describe('QueryManager', () => {
refetchQueries: ['getAuthors'],
});
})
- .then(() => expect(timesWarned).toBe(0));
+ .then(() => expect(timesWarned).toBe(0))
+ .then(resolve, reject);
});
- it('also works with a query document and variables', done => {
+ itAsync('also works with a query document and variables', (resolve, reject) => {
const mutation = gql`
mutation changeAuthorName($id: ID!) {
changeAuthorName(newName: "Jack Smith", id: $id) {
@@ -4558,6 +4556,7 @@ describe('QueryManager', () => {
const variables = { id: '1234' };
const mutationVariables = { id: '2345' };
const queryManager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data },
@@ -4592,7 +4591,7 @@ describe('QueryManager', () => {
expect(stripSymbols(observable.getCurrentResult().data)).toEqual(
secondReqData,
);
- done();
+ resolve();
}, 1);
expect(resultData).toEqual(secondReqData);
@@ -4603,7 +4602,7 @@ describe('QueryManager', () => {
});
});
- it('also works with a conditional function that returns false', () => {
+ itAsync('also works with a conditional function that returns false', (resolve, reject) => {
const mutation = gql`
mutation changeAuthorName {
changeAuthorName(newName: "Jack Smith") {
@@ -4639,6 +4638,7 @@ describe('QueryManager', () => {
},
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query },
result: { data },
@@ -4661,9 +4661,10 @@ describe('QueryManager', () => {
return observableToPromise({ observable }, result => {
expect(stripSymbols(result.data)).toEqual(data);
queryManager.mutate({ mutation, refetchQueries: conditional });
- });
+ }).then(resolve, reject);
});
- it('also works with a conditional function that returns an array of refetches', () => {
+
+ itAsync('also works with a conditional function that returns an array of refetches', (resolve, reject) => {
const mutation = gql`
mutation changeAuthorName {
changeAuthorName(newName: "Jack Smith") {
@@ -4699,6 +4700,7 @@ describe('QueryManager', () => {
},
};
const queryManager = mockQueryManager(
+ reject,
{
request: { query },
result: { data },
@@ -4725,10 +4727,10 @@ describe('QueryManager', () => {
queryManager.mutate({ mutation, refetchQueries: conditional });
},
result => expect(stripSymbols(result.data)).toEqual(secondReqData),
- );
+ ).then(resolve, reject);
});
- it('should refetch using the original query context (if any)', () => {
+ itAsync('should refetch using the original query context (if any)', (resolve, reject) => {
const mutation = gql`
mutation changeAuthorName {
changeAuthorName(newName: "Jack Smith") {
@@ -4765,6 +4767,7 @@ describe('QueryManager', () => {
};
const variables = { id: '1234' };
const queryManager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data },
@@ -4804,10 +4807,10 @@ describe('QueryManager', () => {
expect(context.headers).not.toBeUndefined();
expect(context.headers.someHeader).toEqual(headers.someHeader);
},
- );
+ ).then(resolve, reject);
});
- it('should refetch using the specified context, if provided', () => {
+ itAsync('should refetch using the specified context, if provided', (resolve, reject) => {
const mutation = gql`
mutation changeAuthorName {
changeAuthorName(newName: "Jack Smith") {
@@ -4844,6 +4847,7 @@ describe('QueryManager', () => {
};
const variables = { id: '1234' };
const queryManager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data },
@@ -4889,18 +4893,16 @@ describe('QueryManager', () => {
expect(context.headers).not.toBeUndefined();
expect(context.headers.someHeader).toEqual(headers.someHeader);
},
- );
+ ).then(resolve, reject);
});
- afterEach(done => {
- // restore standard method
+ afterEach(() => {
console.warn = oldWarn;
- done();
});
});
describe('awaitRefetchQueries', () => {
- function awaitRefetchTest({ awaitRefetchQueries }) {
+ const awaitRefetchTest = ({ awaitRefetchQueries }) => new Promise((resolve, reject) => {
const query = gql`
query getAuthors($id: ID!) {
author(id: $id) {
@@ -4943,6 +4945,7 @@ describe('QueryManager', () => {
const variables = { id: '1234' };
const queryManager = mockQueryManager(
+ reject,
{
request: { query, variables },
result: { data: queryData },
@@ -4990,29 +4993,30 @@ describe('QueryManager', () => {
);
expect(stripSymbols(result.data)).toEqual(secondReqData);
},
- );
- }
+ ).then(resolve, reject);
+ });
it(
'should not wait for `refetchQueries` to complete before resolving ' +
- 'the mutation, when `awaitRefetchQueries` is falsy',
- () => {
- awaitRefetchTest({ awaitRefetchQueries: undefined });
- awaitRefetchTest({ awaitRefetchQueries: false });
- },
+ 'the mutation, when `awaitRefetchQueries` is undefined',
+ () => awaitRefetchTest({ awaitRefetchQueries: void 0 })
+ );
+
+ it(
+ 'should not wait for `refetchQueries` to complete before resolving ' +
+ 'the mutation, when `awaitRefetchQueries` is false',
+ () => awaitRefetchTest({ awaitRefetchQueries: false })
);
it(
'should wait for `refetchQueries` to complete before resolving ' +
'the mutation, when `awaitRefetchQueries` is `true`',
- () => {
- awaitRefetchTest({ awaitRefetchQueries: true });
- },
+ () => awaitRefetchTest({ awaitRefetchQueries: true })
);
});
describe('store watchers', () => {
- it('does not fill up the store on resolved queries', () => {
+ itAsync('does not fill up the store on resolved queries', (resolve, reject) => {
const query1 = gql`
query One {
one
@@ -5038,13 +5042,13 @@ describe('QueryManager', () => {
{ request: { query: query1 }, result: { data: { one: 1 } } },
{ request: { query: query2 }, result: { data: { two: 2 } } },
{ request: { query: query3 }, result: { data: { three: 3 } } },
- { request: { query: query4 }, result: { data: { four: 4 } } },
- );
+ { request: { query: query4 }, result: { data: { four: 4 } } }
+ ).setOnError(reject);
const cache = new InMemoryCache();
const queryManager = new QueryManager<NormalizedCacheObject>({
link,
- store: new DataStore(cache),
+ cache,
});
return queryManager
@@ -5065,15 +5069,15 @@ describe('QueryManager', () => {
})
.then(() => {
expect(cache.watches.size).toBe(0);
- });
+ })
+ .then(resolve, reject);
});
});
describe('`no-cache` handling', () => {
- it(
- 'should return a query result (if one exists) when a `no-cache` ' +
- 'fetch policy is used',
- done => {
+ itAsync(
+ 'should return a query result (if one exists) when a `no-cache` fetch policy is used',
+ (resolve, reject) => {
const query = gql`
query {
author {
@@ -5094,7 +5098,7 @@ describe('QueryManager', () => {
link: mockSingleLink({
request: { query },
result: { data },
- }),
+ }).setOnError(reject),
});
const observable = queryManager.watchQuery<any>({
@@ -5105,14 +5109,14 @@ describe('QueryManager', () => {
expect(stripSymbols(result.data)).toEqual(data);
const currentResult = queryManager.getCurrentQueryResult(observable);
expect(currentResult.data).toEqual(data);
- done();
+ resolve();
});
},
);
});
describe('client awareness', () => {
- it('should pass client awareness settings into the link chain via context', done => {
+ itAsync('should pass client awareness settings into the link chain via context', (resolve, reject) => {
const query = gql`
query {
author {
@@ -5132,7 +5136,7 @@ describe('QueryManager', () => {
const link = mockSingleLink({
request: { query },
result: { data },
- });
+ }).setOnError(reject);
const clientAwareness = {
name: 'Test',
@@ -5153,7 +5157,7 @@ describe('QueryManager', () => {
const context = link.operation.getContext();
expect(context.clientAwareness).toBeDefined();
expect(context.clientAwareness).toEqual(clientAwareness);
- done();
+ resolve();
});
});
});
diff --git a/packages/apollo-client/src/core/__tests__/QueryManager/links.ts b/src/core/__tests__/QueryManager/links.ts
similarity index 80%
rename from packages/apollo-client/src/core/__tests__/QueryManager/links.ts
rename to src/core/__tests__/QueryManager/links.ts
--- a/packages/apollo-client/src/core/__tests__/QueryManager/links.ts
+++ b/src/core/__tests__/QueryManager/links.ts
@@ -1,36 +1,17 @@
// externals
-import Rx from 'rxjs';
-import { assign } from 'lodash';
import gql from 'graphql-tag';
-import { DocumentNode, ExecutionResult } from 'graphql';
-import { ApolloLink, Operation, Observable } from 'apollo-link';
-import { InMemoryCache, ApolloReducerConfig } from 'apollo-cache-inmemory';
-import { stripSymbols } from 'apollo-utilities';
+
+import { Observable } from '../../../utilities/observables/Observable';
+import { ApolloLink } from '../../../link/core/ApolloLink';
+import { InMemoryCache } from '../../../cache/inmemory/inMemoryCache';
+import { stripSymbols } from '../../../utilities/testing/stripSymbols';
// mocks
-import mockQueryManager from '../../../__mocks__/mockQueryManager';
-import mockWatchQuery from '../../../__mocks__/mockWatchQuery';
-import {
- mockSingleLink,
- MockSubscriptionLink,
-} from '../../../__mocks__/mockLinks';
+import { MockSubscriptionLink } from '../../../utilities/testing/mocking/mockSubscriptionLink';
// core
-import { ApolloQueryResult } from '../../types';
-import { NetworkStatus } from '../../networkStatus';
-import { ObservableQuery } from '../../ObservableQuery';
-import { WatchQueryOptions } from '../../watchQueryOptions';
import { QueryManager } from '../../QueryManager';
-
-import { ApolloError } from '../../../errors/ApolloError';
-import { DataStore } from '../../../data/store';
-import { Observer } from '../../../util/Observable';
-
-// testing utils
-import wrap from '../../../util/wrap';
-import observableToPromise, {
- observableToPromiseAndSubscription,
-} from '../../../util/observableToPromise';
+import { Reference } from '../../../utilities/graphql/storeUtils';
describe('Link interactions', () => {
it('includes the cache on the context for eviction links', done => {
@@ -71,7 +52,7 @@ describe('Link interactions', () => {
const mockLink = new MockSubscriptionLink();
const link = ApolloLink.from([evictionLink, mockLink]);
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link,
});
@@ -114,7 +95,7 @@ describe('Link interactions', () => {
const link = new MockSubscriptionLink();
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link,
});
@@ -185,7 +166,7 @@ describe('Link interactions', () => {
const link = new MockSubscriptionLink();
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link,
});
@@ -264,7 +245,7 @@ describe('Link interactions', () => {
const mockLink = new MockSubscriptionLink();
const link = ApolloLink.from([evictionLink, mockLink]);
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link,
});
@@ -302,7 +283,7 @@ describe('Link interactions', () => {
const mockLink = new MockSubscriptionLink();
const link = ApolloLink.from([evictionLink, mockLink]);
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link,
});
@@ -345,23 +326,22 @@ describe('Link interactions', () => {
const queryManager = new QueryManager({
link,
- store: new DataStore(
- new InMemoryCache({
- cacheResolvers: {
- Query: {
- book: (_, { id }, context) => {
- expect(context.getCacheKey).toBeDefined();
- const cacheKey = context.getCacheKey({
- id,
- __typename: 'Book',
- });
- expect(cacheKey.id).toEqual(`Book:${id}`);
- return cacheKey;
+ cache: new InMemoryCache({
+ typePolicies: {
+ Query: {
+ fields: {
+ book(_, { args, toReference, readField }) {
+ const ref = toReference({ __typename: "Book", id: args.id });
+ expect(ref).toEqual({ __ref: `Book:${args.id}` });
+ const found = readField<Reference[]>("books").find(
+ book => book.__ref === ref.__ref);
+ expect(found).toBeTruthy();
+ return found;
},
},
},
- }),
- ),
+ },
+ }),
});
await queryManager.query({ query });
diff --git a/packages/apollo-client/src/core/__tests__/QueryManager/live.ts b/src/core/__tests__/QueryManager/live.ts
similarity index 87%
rename from packages/apollo-client/src/core/__tests__/QueryManager/live.ts
rename to src/core/__tests__/QueryManager/live.ts
--- a/packages/apollo-client/src/core/__tests__/QueryManager/live.ts
+++ b/src/core/__tests__/QueryManager/live.ts
@@ -6,20 +6,12 @@
// externals
import gql from 'graphql-tag';
-import { DocumentNode, ExecutionResult } from 'graphql';
-import { ApolloLink, Operation, Observable } from 'apollo-link';
-import { InMemoryCache, ApolloReducerConfig } from 'apollo-cache-inmemory';
-import { stripSymbols } from 'apollo-utilities';
-
-import { MockSubscriptionLink } from '../../../__mocks__/mockLinks';
+import { InMemoryCache } from '../../../cache/inmemory/inMemoryCache';
+import { stripSymbols } from '../../../utilities/testing/stripSymbols';
+import { MockSubscriptionLink } from '../../../utilities/testing/mocking/mockSubscriptionLink';
// core
-import { ApolloQueryResult } from '../../types';
-import { NetworkStatus } from '../../networkStatus';
-import { ObservableQuery } from '../../ObservableQuery';
-import { WatchQueryOptions } from '../../watchQueryOptions';
import { QueryManager } from '../../QueryManager';
-import { DataStore } from '../../../data/store';
describe('Live queries', () => {
it('handles mutliple results for live queries', done => {
@@ -49,7 +41,7 @@ describe('Live queries', () => {
};
const link = new MockSubscriptionLink();
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link,
});
@@ -115,7 +107,7 @@ describe('Live queries', () => {
let count = 0;
const link = new MockSubscriptionLink();
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link,
});
@@ -199,7 +191,7 @@ describe('Live queries', () => {
link.onUnsubscribe(() => cleanedupTimes++);
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link,
});
diff --git a/packages/apollo-client/src/core/__tests__/QueryManager/multiple-results.ts b/src/core/__tests__/QueryManager/multiple-results.ts
similarity index 85%
rename from packages/apollo-client/src/core/__tests__/QueryManager/multiple-results.ts
rename to src/core/__tests__/QueryManager/multiple-results.ts
--- a/packages/apollo-client/src/core/__tests__/QueryManager/multiple-results.ts
+++ b/src/core/__tests__/QueryManager/multiple-results.ts
@@ -1,37 +1,16 @@
// externals
-import Rx from 'rxjs';
-import { assign } from 'lodash';
import gql from 'graphql-tag';
-import { DocumentNode, ExecutionResult } from 'graphql';
-import { ApolloLink, Operation, Observable } from 'apollo-link';
-import { InMemoryCache, ApolloReducerConfig } from 'apollo-cache-inmemory';
-import { stripSymbols } from 'apollo-utilities';
+import { InMemoryCache } from '../../../cache/inmemory/inMemoryCache';
+import { stripSymbols } from '../../../utilities/testing/stripSymbols';
// mocks
-import mockQueryManager from '../../../__mocks__/mockQueryManager';
-import mockWatchQuery from '../../../__mocks__/mockWatchQuery';
import {
- mockSingleLink,
- MockSubscriptionLink,
-} from '../../../__mocks__/mockLinks';
+ MockSubscriptionLink
+} from '../../../utilities/testing/mocking/mockSubscriptionLink';
// core
-import { ApolloQueryResult } from '../../types';
-import { NetworkStatus } from '../../networkStatus';
-import { ObservableQuery } from '../../ObservableQuery';
-import { WatchQueryOptions } from '../../watchQueryOptions';
import { QueryManager } from '../../QueryManager';
-import { ApolloError } from '../../../errors/ApolloError';
-import { DataStore } from '../../../data/store';
-import { Observer } from '../../../util/Observable';
-
-// testing utils
-import wrap from '../../../util/wrap';
-import observableToPromise, {
- observableToPromiseAndSubscription,
-} from '../../../util/observableToPromise';
-
describe('mutiple results', () => {
it('allows multiple query results from link', done => {
const query = gql`
@@ -61,7 +40,7 @@ describe('mutiple results', () => {
};
const link = new MockSubscriptionLink();
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link,
});
@@ -118,7 +97,7 @@ describe('mutiple results', () => {
};
const link = new MockSubscriptionLink();
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link,
});
@@ -187,7 +166,7 @@ describe('mutiple results', () => {
};
const link = new MockSubscriptionLink();
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link,
});
@@ -258,7 +237,7 @@ describe('mutiple results', () => {
};
const link = new MockSubscriptionLink();
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link,
});
@@ -333,7 +312,7 @@ describe('mutiple results', () => {
};
const link = new MockSubscriptionLink();
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link,
});
diff --git a/packages/apollo-client/src/core/__tests__/QueryManager/recycler.ts b/src/core/__tests__/QueryManager/recycler.ts
similarity index 80%
rename from packages/apollo-client/src/core/__tests__/QueryManager/recycler.ts
rename to src/core/__tests__/QueryManager/recycler.ts
--- a/packages/apollo-client/src/core/__tests__/QueryManager/recycler.ts
+++ b/src/core/__tests__/QueryManager/recycler.ts
@@ -6,20 +6,14 @@
// externals
import gql from 'graphql-tag';
-import { DocumentNode, ExecutionResult } from 'graphql';
-import { ApolloLink, Operation, Observable } from 'apollo-link';
-import { InMemoryCache, ApolloReducerConfig } from 'apollo-cache-inmemory';
-import { stripSymbols } from 'apollo-utilities';
-
-import { MockSubscriptionLink } from '../../../__mocks__/mockLinks';
+import { InMemoryCache } from '../../../cache/inmemory/inMemoryCache';
+import { stripSymbols } from '../../../utilities/testing/stripSymbols';
+import {
+ MockSubscriptionLink
+} from '../../../utilities/testing/mocking/mockSubscriptionLink';
// core
-import { ApolloQueryResult } from '../../types';
-import { NetworkStatus } from '../../networkStatus';
-import { ObservableQuery } from '../../ObservableQuery';
-import { WatchQueryOptions } from '../../watchQueryOptions';
import { QueryManager } from '../../QueryManager';
-import { DataStore } from '../../../data/store';
describe('Subscription lifecycles', () => {
it('cleans up and reuses data like QueryRecycler wants', done => {
@@ -43,7 +37,7 @@ describe('Subscription lifecycles', () => {
const link = new MockSubscriptionLink();
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link,
});
diff --git a/packages/apollo-client/src/core/__tests__/fetchPolicies.ts b/src/core/__tests__/fetchPolicies.ts
similarity index 69%
rename from packages/apollo-client/src/core/__tests__/fetchPolicies.ts
rename to src/core/__tests__/fetchPolicies.ts
--- a/packages/apollo-client/src/core/__tests__/fetchPolicies.ts
+++ b/src/core/__tests__/fetchPolicies.ts
@@ -1,26 +1,12 @@
-import { cloneDeep, assign } from 'lodash';
-import { GraphQLError, ExecutionResult, DocumentNode } from 'graphql';
import gql from 'graphql-tag';
-import { print } from 'graphql/language/printer';
-import { ApolloLink, Observable } from 'apollo-link';
-import {
- InMemoryCache,
- IntrospectionFragmentMatcher,
- FragmentMatcherInterface,
-} from 'apollo-cache-inmemory';
-import { stripSymbols } from 'apollo-utilities';
-import { QueryManager } from '../QueryManager';
-import { WatchQueryOptions } from '../watchQueryOptions';
-
-import { ApolloError } from '../../errors/ApolloError';
-
-import ApolloClient, { printAST } from '../..';
-
-import subscribeAndCount from '../../util/subscribeAndCount';
-import { withWarning } from '../../util/wrap';
-
-import { mockSingleLink } from '../../__mocks__/mockLinks';
+import { ApolloLink } from '../../link/core/ApolloLink';
+import { InMemoryCache } from '../../cache/inmemory/inMemoryCache';
+import { stripSymbols } from '../../utilities/testing/stripSymbols';
+import { itAsync } from '../../utilities/testing/itAsync';
+import { ApolloClient } from '../..';
+import subscribeAndCount from '../../utilities/testing/subscribeAndCount';
+import { mockSingleLink } from '../../utilities/testing/mocking/mockLink';
import { NetworkStatus } from '../networkStatus';
const query = gql`
@@ -68,51 +54,41 @@ const mutationResult = {
const merged = { author: { ...result.author, firstName: 'James' } };
-const createLink = () =>
- mockSingleLink(
- {
- request: { query },
- result: { data: result },
- },
- {
- request: { query },
- result: { data: result },
- },
- );
-
-const createFailureLink = () =>
- mockSingleLink(
- {
- request: { query },
- error: new Error('query failed'),
- },
- {
- request: { query },
- result: { data: result },
- },
- );
-
-const createMutationLink = () =>
+const createLink = (reject: (reason: any) => any) =>
+ mockSingleLink({
+ request: { query },
+ result: { data: result },
+ }, {
+ request: { query },
+ result: { data: result },
+ }).setOnError(reject);
+
+const createFailureLink = (reject: (reason: any) => any) =>
+ mockSingleLink({
+ request: { query },
+ error: new Error('query failed'),
+ }, {
+ request: { query },
+ result: { data: result },
+ }).setOnError(reject);
+
+const createMutationLink = (reject: (reason: any) => any) =>
// fetch the data
- mockSingleLink(
- {
- request: { query },
- result: { data: result },
- },
- // update the data
- {
- request: { query: mutation, variables },
- result: { data: mutationResult },
- },
- // get the new results
- {
- request: { query },
- result: { data: merged },
- },
- );
+ mockSingleLink({
+ request: { query },
+ result: { data: result },
+ }, // update the data
+ {
+ request: { query: mutation, variables },
+ result: { data: mutationResult },
+ }, // get the new results
+ {
+ request: { query },
+ result: { data: merged },
+ }).setOnError(reject);
describe('network-only', () => {
- it('requests from the network even if already in cache', () => {
+ itAsync('requests from the network even if already in cache', (resolve, reject) => {
let called = 0;
const inspector = new ApolloLink((operation, forward) => {
called++;
@@ -123,20 +99,21 @@ describe('network-only', () => {
});
const client = new ApolloClient({
- link: inspector.concat(createLink()),
+ link: inspector.concat(createLink(reject)),
cache: new InMemoryCache({ addTypename: false }),
});
- return client.query({ query }).then(() =>
- client
+ return client.query({ query }).then(
+ () => client
.query({ fetchPolicy: 'network-only', query })
.then(actualResult => {
expect(stripSymbols(actualResult.data)).toEqual(result);
expect(called).toBe(4);
}),
- );
+ ).then(resolve, reject);
});
- it('saves data to the cache on success', () => {
+
+ itAsync('saves data to the cache on success', (resolve, reject) => {
let called = 0;
const inspector = new ApolloLink((operation, forward) => {
called++;
@@ -147,18 +124,19 @@ describe('network-only', () => {
});
const client = new ApolloClient({
- link: inspector.concat(createLink()),
+ link: inspector.concat(createLink(reject)),
cache: new InMemoryCache({ addTypename: false }),
});
- return client.query({ query, fetchPolicy: 'network-only' }).then(() =>
- client.query({ query }).then(actualResult => {
+ return client.query({ query, fetchPolicy: 'network-only' }).then(
+ () => client.query({ query }).then(actualResult => {
expect(stripSymbols(actualResult.data)).toEqual(result);
expect(called).toBe(2);
}),
- );
+ ).then(resolve, reject);
});
- it('does not save data to the cache on failure', () => {
+
+ itAsync('does not save data to the cache on failure', (resolve, reject) => {
let called = 0;
const inspector = new ApolloLink((operation, forward) => {
called++;
@@ -169,7 +147,7 @@ describe('network-only', () => {
});
const client = new ApolloClient({
- link: inspector.concat(createFailureLink()),
+ link: inspector.concat(createFailureLink(reject)),
cache: new InMemoryCache({ addTypename: false }),
});
@@ -180,17 +158,16 @@ describe('network-only', () => {
expect(e.message).toMatch('query failed');
didFail = true;
})
- .then(() =>
- client.query({ query }).then(actualResult => {
- expect(stripSymbols(actualResult.data)).toEqual(result);
- // the first error doesn't call .map on the inspector
- expect(called).toBe(3);
- expect(didFail).toBe(true);
- }),
- );
+ .then(() => client.query({ query }).then(actualResult => {
+ expect(stripSymbols(actualResult.data)).toEqual(result);
+ // the first error doesn't call .map on the inspector
+ expect(called).toBe(3);
+ expect(didFail).toBe(true);
+ }))
+ .then(resolve, reject);
});
- it('updates the cache on a mutation', () => {
+ itAsync('updates the cache on a mutation', (resolve, reject) => {
let called = 0;
const inspector = new ApolloLink((operation, forward) => {
called++;
@@ -201,7 +178,7 @@ describe('network-only', () => {
});
const client = new ApolloClient({
- link: inspector.concat(createMutationLink()),
+ link: inspector.concat(createMutationLink(reject)),
cache: new InMemoryCache({ addTypename: false }),
});
@@ -216,11 +193,13 @@ describe('network-only', () => {
return client.query({ query }).then(actualResult => {
expect(stripSymbols(actualResult.data)).toEqual(merged);
});
- });
+ })
+ .then(resolve, reject);
});
});
+
describe('no-cache', () => {
- it('requests from the network when not in cache', () => {
+ itAsync('requests from the network when not in cache', (resolve, reject) => {
let called = 0;
const inspector = new ApolloLink((operation, forward) => {
called++;
@@ -231,7 +210,7 @@ describe('no-cache', () => {
});
const client = new ApolloClient({
- link: inspector.concat(createLink()),
+ link: inspector.concat(createLink(reject)),
cache: new InMemoryCache({ addTypename: false }),
});
@@ -240,9 +219,11 @@ describe('no-cache', () => {
.then(actualResult => {
expect(actualResult.data).toEqual(result);
expect(called).toBe(2);
- });
+ })
+ .then(resolve, reject);
});
- it('requests from the network even if already in cache', () => {
+
+ itAsync('requests from the network even if already in cache', (resolve, reject) => {
let called = 0;
const inspector = new ApolloLink((operation, forward) => {
called++;
@@ -253,18 +234,19 @@ describe('no-cache', () => {
});
const client = new ApolloClient({
- link: inspector.concat(createLink()),
+ link: inspector.concat(createLink(reject)),
cache: new InMemoryCache({ addTypename: false }),
});
- return client.query({ query }).then(() =>
- client.query({ fetchPolicy: 'no-cache', query }).then(actualResult => {
+ return client.query({ query }).then(
+ () => client.query({ fetchPolicy: 'no-cache', query }).then(actualResult => {
expect(actualResult.data).toEqual(result);
expect(called).toBe(4);
}),
- );
+ ).then(resolve, reject);
});
- it('does not save the data to the cache on success', () => {
+
+ itAsync('does not save the data to the cache on success', (resolve, reject) => {
let called = 0;
const inspector = new ApolloLink((operation, forward) => {
called++;
@@ -275,20 +257,20 @@ describe('no-cache', () => {
});
const client = new ApolloClient({
- link: inspector.concat(createLink()),
+ link: inspector.concat(createLink(reject)),
cache: new InMemoryCache({ addTypename: false }),
});
- return client.query({ query, fetchPolicy: 'no-cache' }).then(() =>
- client.query({ query }).then(actualResult => {
+ return client.query({ query, fetchPolicy: 'no-cache' }).then(
+ () => client.query({ query }).then(actualResult => {
expect(stripSymbols(actualResult.data)).toEqual(result);
// the second query couldn't read anything from the cache
expect(called).toBe(4);
}),
- );
+ ).then(resolve, reject);
});
- it('does not save data to the cache on failure', () => {
+ itAsync('does not save data to the cache on failure', (resolve, reject) => {
let called = 0;
const inspector = new ApolloLink((operation, forward) => {
called++;
@@ -299,7 +281,7 @@ describe('no-cache', () => {
});
const client = new ApolloClient({
- link: inspector.concat(createFailureLink()),
+ link: inspector.concat(createFailureLink(reject)),
cache: new InMemoryCache({ addTypename: false }),
});
@@ -310,16 +292,16 @@ describe('no-cache', () => {
expect(e.message).toMatch('query failed');
didFail = true;
})
- .then(() =>
- client.query({ query }).then(actualResult => {
- expect(stripSymbols(actualResult.data)).toEqual(result);
- // the first error doesn't call .map on the inspector
- expect(called).toBe(3);
- expect(didFail).toBe(true);
- }),
- );
+ .then(() => client.query({ query }).then(actualResult => {
+ expect(stripSymbols(actualResult.data)).toEqual(result);
+ // the first error doesn't call .map on the inspector
+ expect(called).toBe(3);
+ expect(didFail).toBe(true);
+ }))
+ .then(resolve, reject);
});
- it('does not update the cache on a mutation', () => {
+
+ itAsync('does not update the cache on a mutation', (resolve, reject) => {
let called = 0;
const inspector = new ApolloLink((operation, forward) => {
called++;
@@ -330,7 +312,7 @@ describe('no-cache', () => {
});
const client = new ApolloClient({
- link: inspector.concat(createMutationLink()),
+ link: inspector.concat(createMutationLink(reject)),
cache: new InMemoryCache({ addTypename: false }),
});
@@ -343,12 +325,13 @@ describe('no-cache', () => {
return client.query({ query }).then(actualResult => {
expect(stripSymbols(actualResult.data)).toEqual(result);
});
- });
+ })
+ .then(resolve, reject);
});
});
describe('cache-and-network', function() {
- it('gives appropriate networkStatus for refetched queries', done => {
+ itAsync('gives appropriate networkStatus for refetched queries', (resolve, reject) => {
const client = new ApolloClient({
link: ApolloLink.empty(),
cache: new InMemoryCache(),
@@ -389,7 +372,7 @@ describe('cache-and-network', function() {
};
}
- subscribeAndCount(done, observable, (count, result) => {
+ subscribeAndCount(reject, observable, (count, result) => {
if (count === 1) {
expect(result).toEqual({
data: void 0,
@@ -449,7 +432,7 @@ describe('cache-and-network', function() {
networkStatus: NetworkStatus.ready,
stale: false,
});
- done();
+ resolve();
}
});
});
diff --git a/packages/apollo-client/src/core/__tests__/scheduler.ts b/src/core/__tests__/scheduler.ts
similarity index 79%
rename from packages/apollo-client/src/core/__tests__/scheduler.ts
rename to src/core/__tests__/scheduler.ts
--- a/packages/apollo-client/src/core/__tests__/scheduler.ts
+++ b/src/core/__tests__/scheduler.ts
@@ -1,13 +1,14 @@
-import { InMemoryCache } from 'apollo-cache-inmemory';
import gql from 'graphql-tag';
-import { stripSymbols } from 'apollo-utilities';
+
+import { InMemoryCache } from '../../cache/inmemory/inMemoryCache';
+import { stripSymbols } from '../../utilities/testing/stripSymbols';
+import { itAsync } from '../../utilities/testing/itAsync';
import { QueryManager } from '../QueryManager';
import { WatchQueryOptions } from '../../core/watchQueryOptions';
-import { mockSingleLink } from '../../__mocks__/mockLinks';
+import { mockSingleLink } from '../../utilities/testing/mocking/mockLink';
import { NetworkStatus } from '../../core/networkStatus';
-import { DataStore } from '../../data/store';
import { ObservableQuery } from '../../core/ObservableQuery';
// Used only for unit testing.
@@ -36,10 +37,10 @@ function eachPollingQuery(
}
describe('QueryScheduler', () => {
- it('should throw an error if we try to start polling a non-polling query', () => {
+ itAsync('should throw an error if we try to start polling a non-polling query', (resolve, reject) => {
const queryManager = new QueryManager({
- link: mockSingleLink(),
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ link: mockSingleLink().setOnError(reject),
+ cache: new InMemoryCache({ addTypename: false }),
});
const query = gql`
@@ -50,15 +51,15 @@ describe('QueryScheduler', () => {
}
}
`;
- const queryOptions: WatchQueryOptions = {
- query,
- };
+
expect(() => {
- queryManager.startPollingQuery(queryOptions, null as never);
+ queryManager.startPollingQuery({ query }, null as never);
}).toThrow();
+
+ resolve();
});
- it('should correctly start polling queries', done => {
+ itAsync('should correctly start polling queries', (resolve, reject) => {
const query = gql`
query {
author {
@@ -82,10 +83,9 @@ describe('QueryScheduler', () => {
const link = mockSingleLink({
request: queryOptions,
result: { data },
- });
+ }).setOnError(reject);
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
-
+ cache: new InMemoryCache({ addTypename: false }),
link: link,
});
let timesFired = 0;
@@ -95,11 +95,11 @@ describe('QueryScheduler', () => {
setTimeout(() => {
expect(timesFired).toBeGreaterThanOrEqual(0);
queryManager.stop();
- done();
+ resolve();
}, 120);
});
- it('should correctly stop polling queries', done => {
+ itAsync('should correctly stop polling queries', (resolve, reject) => {
const query = gql`
query {
someAlias: author {
@@ -123,9 +123,9 @@ describe('QueryScheduler', () => {
query: queryOptions.query,
},
result: { data },
- });
+ }).setOnError(reject);
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link: link,
});
let timesFired = 0;
@@ -143,11 +143,11 @@ describe('QueryScheduler', () => {
setTimeout(() => {
expect(timesFired).toEqual(1);
queryManager.stop();
- done();
+ resolve();
}, 170);
});
- it('should register a query and return an observable that can be unsubscribed', done => {
+ itAsync('should register a query and return an observable that can be unsubscribed', (resolve, reject) => {
const myQuery = gql`
query {
someAuthorAlias: author {
@@ -169,10 +169,9 @@ describe('QueryScheduler', () => {
const link = mockSingleLink({
request: queryOptions,
result: { data },
- });
+ }).setOnError(reject);
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
-
+ cache: new InMemoryCache({ addTypename: false }),
link,
});
let timesFired = 0;
@@ -188,11 +187,11 @@ describe('QueryScheduler', () => {
setTimeout(() => {
expect(timesFired).toEqual(1);
queryManager.stop();
- done();
+ resolve();
}, 100);
});
- it('should register a query and return an observable that can adjust interval', done => {
+ itAsync('should register a query and return an observable that can adjust interval', (resolve, reject) => {
const myQuery = gql`
query {
someAuthorAlias: author {
@@ -214,10 +213,10 @@ describe('QueryScheduler', () => {
const link = mockSingleLink(
{ request: queryOptions, result: { data: data[0] } },
{ request: queryOptions, result: { data: data[1] } },
- { request: queryOptions, result: { data: data[2] } },
- );
+ { request: queryOptions, result: { data: data[2] } }
+ ).setOnError(reject);
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link,
});
let timesFired = 0;
@@ -241,11 +240,11 @@ describe('QueryScheduler', () => {
expect(timesFired).toEqual(2);
subscription.unsubscribe();
queryManager.stop();
- done();
+ resolve();
}, 100);
});
- it('should handle network errors on polling queries correctly', done => {
+ itAsync('should handle network errors on polling queries correctly', (resolve, reject) => {
const query = gql`
query {
author {
@@ -262,16 +261,16 @@ describe('QueryScheduler', () => {
const link = mockSingleLink({
request: queryOptions,
error,
- });
+ }).setOnError(reject);
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link,
});
let observableQuery = registerPollingQuery(queryManager, queryOptions);
const subscription = observableQuery.subscribe({
next() {
queryManager.stop();
- done.fail(
+ reject(
new Error('Observer provided a result despite a network error.'),
);
},
@@ -283,12 +282,12 @@ describe('QueryScheduler', () => {
});
subscription.unsubscribe();
queryManager.stop();
- done();
+ resolve();
},
});
});
- it('should not fire another query if one with the same id is in flight', done => {
+ itAsync('should not fire another query if one with the same id is in flight', (resolve, reject) => {
const query = gql`
query B {
fortuneCookie
@@ -305,9 +304,9 @@ describe('QueryScheduler', () => {
request: queryOptions,
result: { data },
delay: 20000,
- });
+ }).setOnError(reject);
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache()),
+ cache: new InMemoryCache(),
link,
});
const observer = registerPollingQuery(queryManager, queryOptions);
@@ -315,11 +314,11 @@ describe('QueryScheduler', () => {
setTimeout(() => {
subscription.unsubscribe();
queryManager.stop();
- done();
+ resolve();
}, 100);
});
- it('should add a query to an interval correctly', () => {
+ itAsync('should add a query to an interval correctly', (resolve, reject) => {
const query = gql`
query {
fortuneCookie
@@ -335,9 +334,9 @@ describe('QueryScheduler', () => {
const link = mockSingleLink({
request: queryOptions,
result: { data },
- });
+ }).setOnError(reject);
const queryManager = new QueryManager<any>({
- store: new DataStore(new InMemoryCache()),
+ cache: new InMemoryCache(),
link,
});
const queryId = 'fake-id';
@@ -351,9 +350,11 @@ describe('QueryScheduler', () => {
});
expect(count).toEqual(1);
queryManager.stop();
+
+ resolve();
});
- it('should add multiple queries to an interval correctly', () => {
+ itAsync('should add multiple queries to an interval correctly', (resolve, reject) => {
const query1 = gql`
query {
fortuneCookie
@@ -386,17 +387,14 @@ describe('QueryScheduler', () => {
pollInterval: interval,
};
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
- link: mockSingleLink(
- {
- request: { query: query1 },
- result: { data: data1 },
- },
- {
- request: { query: query2 },
- result: { data: data2 },
- },
- ),
+ cache: new InMemoryCache({ addTypename: false }),
+ link: mockSingleLink({
+ request: { query: query1 },
+ result: { data: data1 },
+ }, {
+ request: { query: query2 },
+ result: { data: data2 },
+ }).setOnError(reject),
});
const observable1 = registerPollingQuery(queryManager, queryOptions1);
observable1.subscribe({
@@ -419,9 +417,11 @@ describe('QueryScheduler', () => {
});
expect(count).toEqual(2);
queryManager.stop();
+
+ resolve();
});
- it('should remove queries from the interval list correctly', done => {
+ itAsync('should remove queries from the interval list correctly', (resolve, reject) => {
const query = gql`
query {
author {
@@ -437,11 +437,11 @@ describe('QueryScheduler', () => {
},
};
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link: mockSingleLink({
request: { query },
result: { data },
- }),
+ }).setOnError(reject),
});
let timesFired = 0;
const observable = registerPollingQuery(queryManager, {
@@ -463,11 +463,11 @@ describe('QueryScheduler', () => {
setTimeout(() => {
expect(timesFired).toEqual(1);
queryManager.stop();
- done();
+ resolve();
}, 100);
});
- it('should correctly start new polling query after removing old one', done => {
+ itAsync('should correctly start new polling query after removing old one', (resolve, reject) => {
const query = gql`
query {
someAlias: author {
@@ -490,14 +490,9 @@ describe('QueryScheduler', () => {
request: queryOptions,
result: { data },
};
- const link = mockSingleLink(
- networkResult,
- networkResult,
- networkResult,
- networkResult,
- );
+ const link = mockSingleLink(networkResult, networkResult, networkResult, networkResult).setOnError(reject);
const queryManager = new QueryManager({
- store: new DataStore(new InMemoryCache({ addTypename: false })),
+ cache: new InMemoryCache({ addTypename: false }),
link: link,
});
let timesFired = 0;
@@ -515,7 +510,7 @@ describe('QueryScheduler', () => {
setTimeout(() => {
expect(timesFired).toBeGreaterThanOrEqual(1);
queryManager.stop();
- done();
+ resolve();
}, 80);
}, 200);
});
diff --git a/packages/apollo-client/src/data/__tests__/queries.ts b/src/data/__tests__/queries.ts
similarity index 99%
rename from packages/apollo-client/src/data/__tests__/queries.ts
rename to src/data/__tests__/queries.ts
--- a/packages/apollo-client/src/data/__tests__/queries.ts
+++ b/src/data/__tests__/queries.ts
@@ -1,6 +1,7 @@
+import { DocumentNode } from 'graphql';
+
import { QueryStore } from '../queries';
import { NetworkStatus } from '../../core/networkStatus';
-import { DocumentNode } from 'graphql';
describe('QueryStore', () => {
const queryId = 'abc123';
diff --git a/packages/apollo-client/src/errors/__tests__/ApolloError.ts b/src/errors/__tests__/ApolloError.ts
similarity index 100%
rename from packages/apollo-client/src/errors/__tests__/ApolloError.ts
rename to src/errors/__tests__/ApolloError.ts
diff --git a/src/link/core/__tests__/ApolloLink.ts b/src/link/core/__tests__/ApolloLink.ts
new file mode 100644
--- /dev/null
+++ b/src/link/core/__tests__/ApolloLink.ts
@@ -0,0 +1,1052 @@
+import gql from 'graphql-tag';
+import { print } from 'graphql/language/printer';
+
+import { Observable } from '../../../utilities/observables/Observable';
+import { FetchResult, Operation, NextLink, GraphQLRequest } from '../types';
+import { ApolloLink } from '../ApolloLink';
+
+export class SetContextLink extends ApolloLink {
+ constructor(
+ private setContext: (
+ context: Record<string, any>,
+ ) => Record<string, any> = c => c,
+ ) {
+ super();
+ }
+
+ public request(
+ operation: Operation,
+ forward: NextLink,
+ ): Observable<FetchResult> {
+ operation.setContext(this.setContext(operation.getContext()));
+ return forward(operation);
+ }
+}
+
+export const sampleQuery = gql`
+ query SampleQuery {
+ stub {
+ id
+ }
+ }
+`;
+
+function checkCalls<T>(calls: any[] = [], results: Array<T>) {
+ expect(calls.length).toBe(results.length);
+ calls.map((call, i) => expect(call.data).toEqual(results[i]));
+}
+
+interface TestResultType {
+ link: ApolloLink;
+ results?: any[];
+ query?: string;
+ done?: () => void;
+ context?: any;
+ variables?: any;
+}
+
+export function testLinkResults(params: TestResultType) {
+ const { link, context, variables } = params;
+ const results = params.results || [];
+ const query = params.query || sampleQuery;
+ const done = params.done || (() => void 0);
+
+ const spy = jest.fn();
+ ApolloLink.execute(link, { query, context, variables }).subscribe({
+ next: spy,
+ error: error => {
+ expect(error).toEqual(results.pop());
+ checkCalls(spy.mock.calls[0], results);
+ if (done) {
+ done();
+ }
+ },
+ complete: () => {
+ checkCalls(spy.mock.calls[0], results);
+ if (done) {
+ done();
+ }
+ },
+ });
+}
+
+export const setContext = () => ({ add: 1 });
+
+describe('ApolloClient', () => {
+ describe('context', () => {
+ it('should merge context when using a function', done => {
+ const returnOne = new SetContextLink(setContext);
+ const mock = new ApolloLink((op, forward) => {
+ op.setContext(({ add }) => ({ add: add + 2 }));
+ op.setContext(() => ({ substract: 1 }));
+
+ return forward(op);
+ });
+ const link = returnOne.concat(mock).concat(op => {
+ expect(op.getContext()).toEqual({
+ add: 3,
+ substract: 1,
+ });
+ return Observable.of({ data: op.getContext().add });
+ });
+
+ testLinkResults({
+ link,
+ results: [3],
+ done,
+ });
+ });
+
+ it('should merge context when not using a function', done => {
+ const returnOne = new SetContextLink(setContext);
+ const mock = new ApolloLink((op, forward) => {
+ op.setContext({ add: 3 });
+ op.setContext({ substract: 1 });
+
+ return forward(op);
+ });
+ const link = returnOne.concat(mock).concat(op => {
+ expect(op.getContext()).toEqual({
+ add: 3,
+ substract: 1,
+ });
+ return Observable.of({ data: op.getContext().add });
+ });
+
+ testLinkResults({
+ link,
+ results: [3],
+ done,
+ });
+ });
+ });
+
+ describe('concat', () => {
+ it('should concat a function', done => {
+ const returnOne = new SetContextLink(setContext);
+ const link = returnOne.concat((operation, forward) => {
+ return Observable.of({ data: { count: operation.getContext().add } });
+ });
+
+ testLinkResults({
+ link,
+ results: [{ count: 1 }],
+ done,
+ });
+ });
+
+ it('should concat a Link', done => {
+ const returnOne = new SetContextLink(setContext);
+ const mock = new ApolloLink(op =>
+ Observable.of({ data: op.getContext().add }),
+ );
+ const link = returnOne.concat(mock);
+
+ testLinkResults({
+ link,
+ results: [1],
+ done,
+ });
+ });
+
+ it("should pass error to observable's error", done => {
+ const error = new Error('thrown');
+ const returnOne = new SetContextLink(setContext);
+ const mock = new ApolloLink(
+ op =>
+ new Observable(observer => {
+ observer.next({ data: op.getContext().add });
+ observer.error(error);
+ }),
+ );
+ const link = returnOne.concat(mock);
+
+ testLinkResults({
+ link,
+ results: [1, error],
+ done,
+ });
+ });
+
+ it('should concat a Link and function', done => {
+ const returnOne = new SetContextLink(setContext);
+ const mock = new ApolloLink((op, forward) => {
+ op.setContext(({ add }) => ({ add: add + 2 }));
+ return forward(op);
+ });
+ const link = returnOne.concat(mock).concat(op => {
+ return Observable.of({ data: op.getContext().add });
+ });
+
+ testLinkResults({
+ link,
+ results: [3],
+ done,
+ });
+ });
+
+ it('should concat a function and Link', done => {
+ const returnOne = new SetContextLink(setContext);
+ const mock = new ApolloLink((op, forward) =>
+ Observable.of({ data: op.getContext().add }),
+ );
+
+ const link = returnOne
+ .concat((operation, forward) => {
+ operation.setContext({
+ add: operation.getContext().add + 2,
+ });
+ return forward(operation);
+ })
+ .concat(mock);
+ testLinkResults({
+ link,
+ results: [3],
+ done,
+ });
+ });
+
+ it('should concat two functions', done => {
+ const returnOne = new SetContextLink(setContext);
+ const link = returnOne
+ .concat((operation, forward) => {
+ operation.setContext({
+ add: operation.getContext().add + 2,
+ });
+ return forward(operation);
+ })
+ .concat((op, forward) => Observable.of({ data: op.getContext().add }));
+ testLinkResults({
+ link,
+ results: [3],
+ done,
+ });
+ });
+
+ it('should concat two Links', done => {
+ const returnOne = new SetContextLink(setContext);
+ const mock1 = new ApolloLink((operation, forward) => {
+ operation.setContext({
+ add: operation.getContext().add + 2,
+ });
+ return forward(operation);
+ });
+ const mock2 = new ApolloLink((op, forward) =>
+ Observable.of({ data: op.getContext().add }),
+ );
+
+ const link = returnOne.concat(mock1).concat(mock2);
+ testLinkResults({
+ link,
+ results: [3],
+ done,
+ });
+ });
+
+ it("should return an link that can be concat'd multiple times", done => {
+ const returnOne = new SetContextLink(setContext);
+ const mock1 = new ApolloLink((operation, forward) => {
+ operation.setContext({
+ add: operation.getContext().add + 2,
+ });
+ return forward(operation);
+ });
+ const mock2 = new ApolloLink((op, forward) =>
+ Observable.of({ data: op.getContext().add + 2 }),
+ );
+ const mock3 = new ApolloLink((op, forward) =>
+ Observable.of({ data: op.getContext().add + 3 }),
+ );
+ const link = returnOne.concat(mock1);
+
+ testLinkResults({
+ link: link.concat(mock2),
+ results: [5],
+ });
+ testLinkResults({
+ link: link.concat(mock3),
+ results: [6],
+ done,
+ });
+ });
+ });
+
+ describe('empty', () => {
+ it('should returns an immediately completed Observable', done => {
+ testLinkResults({
+ link: ApolloLink.empty(),
+ done,
+ });
+ });
+ });
+
+ describe('execute', () => {
+ it('transforms an opearation with context into something serlizable', done => {
+ const query = gql`
+ {
+ id
+ }
+ `;
+ const link = new ApolloLink(operation => {
+ const str = JSON.stringify({
+ ...operation,
+ query: print(operation.query),
+ });
+
+ expect(str).toBe(
+ JSON.stringify({
+ variables: { id: 1 },
+ extensions: { cache: true },
+ operationName: null,
+ query: print(operation.query),
+ }),
+ );
+ return Observable.of();
+ });
+ const noop = () => {};
+ ApolloLink.execute(link, {
+ query,
+ variables: { id: 1 },
+ extensions: { cache: true },
+ }).subscribe(noop, noop, done);
+ });
+
+ describe('execute', () => {
+ let _warn: (message?: any, ...originalParams: any[]) => void;
+
+ beforeEach(() => {
+ _warn = console.warn;
+ console.warn = jest.fn(warning => {
+ expect(warning).toBe(`query should either be a string or GraphQL AST`);
+ });
+ });
+
+ afterEach(() => {
+ console.warn = _warn;
+ });
+
+ it('should return an empty observable when a link returns null', done => {
+ const link = new ApolloLink();
+ link.request = () => null;
+ testLinkResults({
+ link,
+ results: [],
+ done,
+ });
+ });
+
+ it('should return an empty observable when a link is empty', done => {
+ testLinkResults({
+ link: ApolloLink.empty(),
+ results: [],
+ done,
+ });
+ });
+
+ it("should return an empty observable when a concat'd link returns null", done => {
+ const link = new ApolloLink((operation, forward) => {
+ return forward(operation);
+ }).concat(() => null);
+ testLinkResults({
+ link,
+ results: [],
+ done,
+ });
+ });
+
+ it('should return an empty observable when a split link returns null', done => {
+ let context = { test: true };
+ const link = new SetContextLink(() => context).split(
+ op => op.getContext().test,
+ () => Observable.of(),
+ () => null,
+ );
+ testLinkResults({
+ link,
+ results: [],
+ });
+ context.test = false;
+ testLinkResults({
+ link,
+ results: [],
+ done,
+ });
+ });
+
+ it('should set a default context, variable, query and operationName on a copy of operation', done => {
+ const operation = {
+ query: gql`
+ {
+ id
+ }
+ `,
+ };
+ const link = new ApolloLink(op => {
+ expect(operation['operationName']).toBeUndefined();
+ expect(operation['variables']).toBeUndefined();
+ expect(operation['context']).toBeUndefined();
+ expect(operation['extensions']).toBeUndefined();
+ expect(op['operationName']).toBeDefined();
+ expect(op['variables']).toBeDefined();
+ expect(op['context']).toBeUndefined();
+ expect(op['extensions']).toBeDefined();
+ return Observable.of();
+ });
+
+ ApolloLink.execute(link, operation).subscribe({
+ complete: done,
+ });
+ });
+ })
+ });
+
+ describe('from', () => {
+ const uniqueOperation: GraphQLRequest = {
+ query: sampleQuery,
+ context: { name: 'uniqueName' },
+ operationName: 'SampleQuery',
+ extensions: {},
+ };
+
+ it('should create an observable that completes when passed an empty array', done => {
+ const observable = ApolloLink.execute(ApolloLink.from([]), {
+ query: sampleQuery,
+ });
+ observable.subscribe(() => expect(false), () => expect(false), done);
+ });
+
+ it('can create chain of one', () => {
+ expect(() => ApolloLink.from([new ApolloLink()])).not.toThrow();
+ });
+
+ it('can create chain of two', () => {
+ expect(() =>
+ ApolloLink.from([
+ new ApolloLink((operation, forward) => forward(operation)),
+ new ApolloLink(),
+ ]),
+ ).not.toThrow();
+ });
+
+ it('should receive result of one link', done => {
+ const data: FetchResult = {
+ data: {
+ hello: 'world',
+ },
+ };
+ const chain = ApolloLink.from([new ApolloLink(() => Observable.of(data))]);
+ // Smoke tests execute as a static method
+ const observable = ApolloLink.execute(chain, uniqueOperation);
+ observable.subscribe({
+ next: actualData => {
+ expect(data).toEqual(actualData);
+ },
+ error: () => {
+ throw new Error();
+ },
+ complete: () => done(),
+ });
+ });
+
+ it('should accept AST query and pass AST to link', () => {
+ const astOperation = {
+ ...uniqueOperation,
+ query: sampleQuery,
+ };
+
+ const stub = jest.fn();
+
+ const chain = ApolloLink.from([new ApolloLink(stub)]);
+ ApolloLink.execute(chain, astOperation);
+
+ expect(stub).toBeCalledWith({
+ query: sampleQuery,
+ operationName: 'SampleQuery',
+ variables: {},
+ extensions: {},
+ });
+ });
+
+ it('should pass operation from one link to next with modifications', done => {
+ const chain = ApolloLink.from([
+ new ApolloLink((op, forward) =>
+ forward({
+ ...op,
+ query: sampleQuery,
+ }),
+ ),
+ new ApolloLink(op => {
+ expect({
+ extensions: {},
+ operationName: 'SampleQuery',
+ query: sampleQuery,
+ variables: {},
+ }).toEqual(op);
+ return done();
+ }),
+ ]);
+ ApolloLink.execute(chain, uniqueOperation);
+ });
+
+ it('should pass result of one link to another with forward', done => {
+ const data: FetchResult = {
+ data: {
+ hello: 'world',
+ },
+ };
+
+ const chain = ApolloLink.from([
+ new ApolloLink((op, forward) => {
+ const observable = forward(op);
+
+ observable.subscribe({
+ next: actualData => {
+ expect(data).toEqual(actualData);
+ },
+ error: () => {
+ throw new Error();
+ },
+ complete: done,
+ });
+
+ return observable;
+ }),
+ new ApolloLink(() => Observable.of(data)),
+ ]);
+ ApolloLink.execute(chain, uniqueOperation);
+ });
+
+ it('should receive final result of two link chain', done => {
+ const data: FetchResult = {
+ data: {
+ hello: 'world',
+ },
+ };
+
+ const chain = ApolloLink.from([
+ new ApolloLink((op, forward) => {
+ const observable = forward(op);
+
+ return new Observable(observer => {
+ observable.subscribe({
+ next: actualData => {
+ expect(data).toEqual(actualData);
+ observer.next({
+ data: {
+ ...actualData.data,
+ modification: 'unique',
+ },
+ });
+ },
+ error: error => observer.error(error),
+ complete: () => observer.complete(),
+ });
+ });
+ }),
+ new ApolloLink(() => Observable.of(data)),
+ ]);
+
+ const result = ApolloLink.execute(chain, uniqueOperation);
+
+ result.subscribe({
+ next: modifiedData => {
+ expect({
+ data: {
+ ...data.data,
+ modification: 'unique',
+ },
+ }).toEqual(modifiedData);
+ },
+ error: () => {
+ throw new Error();
+ },
+ complete: done,
+ });
+ });
+
+ it('should chain together a function with links', done => {
+ const add1 = new ApolloLink((operation: Operation, forward: NextLink) => {
+ operation.setContext(({ num }) => ({ num: num + 1 }));
+ return forward(operation);
+ });
+ const add1Link = new ApolloLink((operation, forward) => {
+ operation.setContext(({ num }) => ({ num: num + 1 }));
+ return forward(operation);
+ });
+
+ const link = ApolloLink.from([
+ add1,
+ add1,
+ add1Link,
+ add1,
+ add1Link,
+ new ApolloLink(operation =>
+ Observable.of({ data: operation.getContext() }),
+ ),
+ ]);
+ testLinkResults({
+ link,
+ results: [{ num: 5 }],
+ context: { num: 0 },
+ done,
+ });
+ });
+ });
+
+ describe('split', () => {
+ it('should split two functions', done => {
+ const context = { add: 1 };
+ const returnOne = new SetContextLink(() => context);
+ const link1 = returnOne.concat((operation, forward) =>
+ Observable.of({ data: operation.getContext().add + 1 }),
+ );
+ const link2 = returnOne.concat((operation, forward) =>
+ Observable.of({ data: operation.getContext().add + 2 }),
+ );
+ const link = returnOne.split(
+ operation => operation.getContext().add === 1,
+ link1,
+ link2,
+ );
+
+ testLinkResults({
+ link,
+ results: [2],
+ });
+
+ context.add = 2;
+
+ testLinkResults({
+ link,
+ results: [4],
+ done,
+ });
+ });
+
+ it('should split two Links', done => {
+ const context = { add: 1 };
+ const returnOne = new SetContextLink(() => context);
+ const link1 = returnOne.concat(
+ new ApolloLink((operation, forward) =>
+ Observable.of({ data: operation.getContext().add + 1 }),
+ ),
+ );
+ const link2 = returnOne.concat(
+ new ApolloLink((operation, forward) =>
+ Observable.of({ data: operation.getContext().add + 2 }),
+ ),
+ );
+ const link = returnOne.split(
+ operation => operation.getContext().add === 1,
+ link1,
+ link2,
+ );
+
+ testLinkResults({
+ link,
+ results: [2],
+ });
+
+ context.add = 2;
+
+ testLinkResults({
+ link,
+ results: [4],
+ done,
+ });
+ });
+
+ it('should split a link and a function', done => {
+ const context = { add: 1 };
+ const returnOne = new SetContextLink(() => context);
+ const link1 = returnOne.concat((operation, forward) =>
+ Observable.of({ data: operation.getContext().add + 1 }),
+ );
+ const link2 = returnOne.concat(
+ new ApolloLink((operation, forward) =>
+ Observable.of({ data: operation.getContext().add + 2 }),
+ ),
+ );
+ const link = returnOne.split(
+ operation => operation.getContext().add === 1,
+ link1,
+ link2,
+ );
+
+ testLinkResults({
+ link,
+ results: [2],
+ });
+
+ context.add = 2;
+
+ testLinkResults({
+ link,
+ results: [4],
+ done,
+ });
+ });
+
+ it('should allow concat after split to be join', done => {
+ const context = { test: true, add: 1 };
+ const start = new SetContextLink(() => ({ ...context }));
+ const link = start
+ .split(
+ operation => operation.getContext().test,
+ (operation, forward) => {
+ operation.setContext(({ add }) => ({ add: add + 1 }));
+ return forward(operation);
+ },
+ (operation, forward) => {
+ operation.setContext(({ add }) => ({ add: add + 2 }));
+ return forward(operation);
+ },
+ )
+ .concat(operation =>
+ Observable.of({ data: operation.getContext().add }),
+ );
+
+ testLinkResults({
+ link,
+ context,
+ results: [2],
+ });
+
+ context.test = false;
+
+ testLinkResults({
+ link,
+ context,
+ results: [3],
+ done,
+ });
+ });
+
+ it('should allow default right to be empty or passthrough when forward available', done => {
+ let context = { test: true };
+ const start = new SetContextLink(() => context);
+ const link = start.split(
+ operation => operation.getContext().test,
+ operation =>
+ Observable.of({
+ data: {
+ count: 1,
+ },
+ }),
+ );
+ const concat = link.concat(operation =>
+ Observable.of({
+ data: {
+ count: 2,
+ },
+ }),
+ );
+
+ testLinkResults({
+ link,
+ results: [{ count: 1 }],
+ });
+
+ context.test = false;
+
+ testLinkResults({
+ link,
+ results: [],
+ });
+
+ testLinkResults({
+ link: concat,
+ results: [{ count: 2 }],
+ done,
+ });
+ });
+
+ it('should create filter when single link passed in', done => {
+ const link = ApolloLink.split(
+ operation => operation.getContext().test,
+ (operation, forward) => Observable.of({ data: { count: 1 } }),
+ );
+
+ let context = { test: true };
+
+ testLinkResults({
+ link,
+ results: [{ count: 1 }],
+ context,
+ });
+
+ context.test = false;
+
+ testLinkResults({
+ link,
+ results: [],
+ context,
+ done,
+ });
+ });
+
+ it('should split two functions', done => {
+ const link = ApolloLink.split(
+ operation => operation.getContext().test,
+ (operation, forward) => Observable.of({ data: { count: 1 } }),
+ (operation, forward) => Observable.of({ data: { count: 2 } }),
+ );
+
+ let context = { test: true };
+
+ testLinkResults({
+ link,
+ results: [{ count: 1 }],
+ context,
+ });
+
+ context.test = false;
+
+ testLinkResults({
+ link,
+ results: [{ count: 2 }],
+ context,
+ done,
+ });
+ });
+
+ it('should split two Links', done => {
+ const link = ApolloLink.split(
+ operation => operation.getContext().test,
+ (operation, forward) => Observable.of({ data: { count: 1 } }),
+ new ApolloLink((operation, forward) =>
+ Observable.of({ data: { count: 2 } }),
+ ),
+ );
+
+ let context = { test: true };
+
+ testLinkResults({
+ link,
+ results: [{ count: 1 }],
+ context,
+ });
+
+ context.test = false;
+
+ testLinkResults({
+ link,
+ results: [{ count: 2 }],
+ context,
+ done,
+ });
+ });
+
+ it('should split a link and a function', done => {
+ const link = ApolloLink.split(
+ operation => operation.getContext().test,
+ (operation, forward) => Observable.of({ data: { count: 1 } }),
+ new ApolloLink((operation, forward) =>
+ Observable.of({ data: { count: 2 } }),
+ ),
+ );
+
+ let context = { test: true };
+
+ testLinkResults({
+ link,
+ results: [{ count: 1 }],
+ context,
+ });
+
+ context.test = false;
+
+ testLinkResults({
+ link,
+ results: [{ count: 2 }],
+ context,
+ done,
+ });
+ });
+
+ it('should allow concat after split to be join', done => {
+ const context = { test: true };
+ const link = ApolloLink.split(
+ operation => operation.getContext().test,
+ (operation, forward) =>
+ forward(operation).map(data => ({
+ data: { count: data.data.count + 1 },
+ })),
+ ).concat(() => Observable.of({ data: { count: 1 } }));
+
+ testLinkResults({
+ link,
+ context,
+ results: [{ count: 2 }],
+ });
+
+ context.test = false;
+
+ testLinkResults({
+ link,
+ context,
+ results: [{ count: 1 }],
+ done,
+ });
+ });
+
+ it('should allow default right to be passthrough', done => {
+ const context = { test: true };
+ const link = ApolloLink.split(
+ operation => operation.getContext().test,
+ operation => Observable.of({ data: { count: 2 } }),
+ ).concat(operation => Observable.of({ data: { count: 1 } }));
+
+ testLinkResults({
+ link,
+ context,
+ results: [{ count: 2 }],
+ });
+
+ context.test = false;
+
+ testLinkResults({
+ link,
+ context,
+ results: [{ count: 1 }],
+ done,
+ });
+ });
+ });
+
+ describe('Terminating links', () => {
+ const _warn = console.warn;
+ const warningStub = jest.fn(warning => {
+ expect(warning.message).toBe(
+ `You are calling concat on a terminating link, which will have no effect`,
+ );
+ });
+ const data = {
+ stub: 'data',
+ };
+
+ beforeAll(() => {
+ console.warn = warningStub;
+ });
+
+ beforeEach(() => {
+ warningStub.mockClear();
+ });
+
+ afterAll(() => {
+ console.warn = _warn;
+ });
+
+ describe('split', () => {
+ it('should not warn if attempting to split a terminating and non-terminating Link', () => {
+ const split = ApolloLink.split(
+ () => true,
+ operation => Observable.of({ data }),
+ (operation, forward) => forward(operation),
+ );
+ split.concat((operation, forward) => forward(operation));
+ expect(warningStub).not.toBeCalled();
+ });
+
+ it('should warn if attempting to concat to split two terminating links', () => {
+ const split = ApolloLink.split(
+ () => true,
+ operation => Observable.of({ data }),
+ operation => Observable.of({ data }),
+ );
+ expect(split.concat((operation, forward) => forward(operation))).toEqual(
+ split,
+ );
+ expect(warningStub).toHaveBeenCalledTimes(1);
+ });
+
+ it('should warn if attempting to split to split two terminating links', () => {
+ const split = ApolloLink.split(
+ () => true,
+ operation => Observable.of({ data }),
+ operation => Observable.of({ data }),
+ );
+ expect(
+ split.split(
+ () => true,
+ (operation, forward) => forward(operation),
+ (operation, forward) => forward(operation),
+ ),
+ ).toEqual(split);
+ expect(warningStub).toHaveBeenCalledTimes(1);
+ });
+ });
+
+ describe('from', () => {
+ it('should not warn if attempting to form a terminating then non-terminating Link', () => {
+ ApolloLink.from([
+ (operation, forward) => forward(operation),
+ operation => Observable.of({ data }),
+ ]);
+ expect(warningStub).not.toBeCalled();
+ });
+
+ it('should warn if attempting to add link after termination', () => {
+ ApolloLink.from([
+ (operation, forward) => forward(operation),
+ operation => Observable.of({ data }),
+ (operation, forward) => forward(operation),
+ ]);
+ expect(warningStub).toHaveBeenCalledTimes(1);
+ });
+
+ it('should warn if attempting to add link after termination', () => {
+ ApolloLink.from([
+ new ApolloLink((operation, forward) => forward(operation)),
+ new ApolloLink(operation => Observable.of({ data })),
+ new ApolloLink((operation, forward) => forward(operation)),
+ ]);
+ expect(warningStub).toHaveBeenCalledTimes(1);
+ });
+ });
+
+ describe('concat', () => {
+ it('should warn if attempting to concat to a terminating Link from function', () => {
+ const link = new ApolloLink(operation => Observable.of({ data }));
+ expect(ApolloLink.concat(link, (operation, forward) => forward(operation))).toEqual(
+ link,
+ );
+ expect(warningStub).toHaveBeenCalledTimes(1);
+ expect(warningStub.mock.calls[0][0].link).toEqual(link);
+ });
+
+ it('should warn if attempting to concat to a terminating Link', () => {
+ const link = new ApolloLink(operation => Observable.of());
+ expect(link.concat((operation, forward) => forward(operation))).toEqual(
+ link,
+ );
+ expect(warningStub).toHaveBeenCalledTimes(1);
+ expect(warningStub.mock.calls[0][0].link).toEqual(link);
+ });
+
+ it('should not warn if attempting concat a terminating Link at end', () => {
+ const link = new ApolloLink((operation, forward) => forward(operation));
+ link.concat(operation => Observable.of());
+ expect(warningStub).not.toBeCalled();
+ });
+ });
+
+ describe('warning', () => {
+ it('should include link that terminates', () => {
+ const terminatingLink = new ApolloLink(operation =>
+ Observable.of({ data }),
+ );
+ ApolloLink.from([
+ new ApolloLink((operation, forward) => forward(operation)),
+ new ApolloLink((operation, forward) => forward(operation)),
+ terminatingLink,
+ new ApolloLink((operation, forward) => forward(operation)),
+ new ApolloLink((operation, forward) => forward(operation)),
+ new ApolloLink(operation => Observable.of({ data })),
+ new ApolloLink((operation, forward) => forward(operation)),
+ ]);
+ expect(warningStub).toHaveBeenCalledTimes(4);
+ });
+ });
+ });
+});
diff --git a/src/link/http/__tests__/HttpLink.ts b/src/link/http/__tests__/HttpLink.ts
new file mode 100644
--- /dev/null
+++ b/src/link/http/__tests__/HttpLink.ts
@@ -0,0 +1,1112 @@
+import gql from 'graphql-tag';
+import fetchMock from 'fetch-mock';
+import { print } from 'graphql';
+
+import { Observable } from '../../../utilities/observables/Observable';
+import { ApolloLink } from '../../core/ApolloLink';
+import { execute } from '../../core/execute';
+import { HttpLink } from '../HttpLink';
+import { createHttpLink } from '../createHttpLink';
+
+const sampleQuery = gql`
+ query SampleQuery {
+ stub {
+ id
+ }
+ }
+`;
+
+const sampleMutation = gql`
+ mutation SampleMutation {
+ stub {
+ id
+ }
+ }
+`;
+
+const makeCallback = (done, body) => {
+ return (...args) => {
+ try {
+ body(...args);
+ done();
+ } catch (error) {
+ done.fail(error);
+ }
+ };
+};
+
+const convertBatchedBody = body => {
+ const parsed = JSON.parse(body);
+ return parsed;
+};
+
+const makePromise =
+ res => new Promise((resolve) => setTimeout(() => resolve(res)));
+
+describe('HttpLink', () => {
+ describe('General', () => {
+ const data = { data: { hello: 'world' } };
+ const data2 = { data: { hello: 'everyone' } };
+ const mockError = { throws: new TypeError('mock me') };
+ let subscriber;
+
+ beforeEach(() => {
+ fetchMock.restore();
+ fetchMock.post('begin:/data2', makePromise(data2));
+ fetchMock.post('begin:/data', makePromise(data));
+ fetchMock.post('begin:/error', mockError);
+ fetchMock.post('begin:/apollo', makePromise(data));
+
+ fetchMock.get('begin:/data', makePromise(data));
+ fetchMock.get('begin:/data2', makePromise(data2));
+
+ const next = jest.fn();
+ const error = jest.fn();
+ const complete = jest.fn();
+
+ subscriber = {
+ next,
+ error,
+ complete,
+ };
+ });
+
+ afterEach(() => {
+ fetchMock.restore();
+ });
+
+ it('does not need any constructor arguments', () => {
+ expect(() => new HttpLink()).not.toThrow();
+ });
+
+ it('constructor creates link that can call next and then complete', done => {
+ const next = jest.fn();
+ const link = new HttpLink({ uri: '/data' });
+ const observable = execute(link, {
+ query: sampleQuery,
+ });
+ observable.subscribe({
+ next,
+ error: error => expect(false),
+ complete: () => {
+ expect(next).toHaveBeenCalledTimes(1);
+ done();
+ },
+ });
+ });
+
+ it('supports using a GET request', done => {
+ const variables = { params: 'stub' };
+ const extensions = { myExtension: 'foo' };
+
+ const link = createHttpLink({
+ uri: '/data',
+ fetchOptions: { method: 'GET' },
+ includeExtensions: true,
+ });
+
+ execute(link, { query: sampleQuery, variables, extensions }).subscribe({
+ next: makeCallback(done, result => {
+ const [uri, options] = fetchMock.lastCall();
+ const { method, body } = options;
+ expect(body).toBeUndefined();
+ expect(method).toBe('GET');
+ expect(uri).toBe(
+ '/data?query=query%20SampleQuery%20%7B%0A%20%20stub%20%7B%0A%20%20%20%20id%0A%20%20%7D%0A%7D%0A&operationName=SampleQuery&variables=%7B%22params%22%3A%22stub%22%7D&extensions=%7B%22myExtension%22%3A%22foo%22%7D',
+ );
+ }),
+ error: error => done.fail(error),
+ });
+ });
+
+ it('supports using a GET request with search', done => {
+ const variables = { params: 'stub' };
+
+ const link = createHttpLink({
+ uri: '/data?foo=bar',
+ fetchOptions: { method: 'GET' },
+ });
+
+ execute(link, { query: sampleQuery, variables }).subscribe({
+ next: makeCallback(done, result => {
+ const [uri, options] = fetchMock.lastCall();
+ const { method, body } = options;
+ expect(body).toBeUndefined();
+ expect(method).toBe('GET');
+ expect(uri).toBe(
+ '/data?foo=bar&query=query%20SampleQuery%20%7B%0A%20%20stub%20%7B%0A%20%20%20%20id%0A%20%20%7D%0A%7D%0A&operationName=SampleQuery&variables=%7B%22params%22%3A%22stub%22%7D',
+ );
+ }),
+ error: error => done.fail(error),
+ });
+ });
+
+ it('supports using a GET request on the context', done => {
+ const variables = { params: 'stub' };
+ const link = createHttpLink({
+ uri: '/data',
+ });
+
+ execute(link, {
+ query: sampleQuery,
+ variables,
+ context: {
+ fetchOptions: { method: 'GET' },
+ },
+ }).subscribe(
+ makeCallback(done, result => {
+ const [uri, options] = fetchMock.lastCall();
+ const { method, body } = options;
+ expect(body).toBeUndefined();
+ expect(method).toBe('GET');
+ expect(uri).toBe(
+ '/data?query=query%20SampleQuery%20%7B%0A%20%20stub%20%7B%0A%20%20%20%20id%0A%20%20%7D%0A%7D%0A&operationName=SampleQuery&variables=%7B%22params%22%3A%22stub%22%7D',
+ );
+ }),
+ );
+ });
+
+ it('uses GET with useGETForQueries', done => {
+ const variables = { params: 'stub' };
+ const link = createHttpLink({
+ uri: '/data',
+ useGETForQueries: true,
+ });
+
+ execute(link, {
+ query: sampleQuery,
+ variables,
+ }).subscribe(
+ makeCallback(done, result => {
+ const [uri, options] = fetchMock.lastCall();
+ const { method, body } = options;
+ expect(body).toBeUndefined();
+ expect(method).toBe('GET');
+ expect(uri).toBe(
+ '/data?query=query%20SampleQuery%20%7B%0A%20%20stub%20%7B%0A%20%20%20%20id%0A%20%20%7D%0A%7D%0A&operationName=SampleQuery&variables=%7B%22params%22%3A%22stub%22%7D',
+ );
+ }),
+ );
+ });
+
+ it('uses POST for mutations with useGETForQueries', done => {
+ const variables = { params: 'stub' };
+ const link = createHttpLink({
+ uri: '/data',
+ useGETForQueries: true,
+ });
+
+ execute(link, {
+ query: sampleMutation,
+ variables,
+ }).subscribe(
+ makeCallback(done, result => {
+ const [uri, options] = fetchMock.lastCall();
+ const { method, body } = options;
+ expect(body).toBeDefined();
+ expect(method).toBe('POST');
+ expect(uri).toBe('/data');
+ }),
+ );
+ });
+
+ it('should add client awareness settings to request headers', done => {
+ const variables = { params: 'stub' };
+ const link = createHttpLink({
+ uri: '/data',
+ });
+
+ const clientAwareness = {
+ name: 'Some Client Name',
+ version: '1.0.1',
+ };
+
+ execute(link, {
+ query: sampleQuery,
+ variables,
+ context: {
+ clientAwareness,
+ },
+ }).subscribe(
+ makeCallback(done, result => {
+ const [uri, options] = fetchMock.lastCall();
+ const { headers } = options;
+ expect(headers['apollographql-client-name']).toBeDefined();
+ expect(headers['apollographql-client-name']).toEqual(
+ clientAwareness.name,
+ );
+ expect(headers['apollographql-client-version']).toBeDefined();
+ expect(headers['apollographql-client-version']).toEqual(
+ clientAwareness.version,
+ );
+ }),
+ );
+ });
+
+ it('should not add empty client awareness settings to request headers', done => {
+ const variables = { params: 'stub' };
+ const link = createHttpLink({
+ uri: '/data',
+ });
+
+ const hasOwn = Object.prototype.hasOwnProperty;
+ const clientAwareness = {};
+ execute(link, {
+ query: sampleQuery,
+ variables,
+ context: {
+ clientAwareness,
+ },
+ }).subscribe(
+ makeCallback(done, result => {
+ const [uri, options] = fetchMock.lastCall();
+ const { headers } = options;
+ expect(hasOwn.call(headers, 'apollographql-client-name')).toBe(false);
+ expect(hasOwn.call(headers, 'apollographql-client-version')).toBe(
+ false,
+ );
+ }),
+ );
+ });
+
+ it("throws for GET if the variables can't be stringified", done => {
+ const link = createHttpLink({
+ uri: '/data',
+ useGETForQueries: true,
+ });
+
+ let b;
+ const a = { b };
+ b = { a };
+ a.b = b;
+ const variables = {
+ a,
+ b,
+ };
+ execute(link, { query: sampleQuery, variables }).subscribe(
+ result => {
+ done.fail('next should have been thrown from the link');
+ },
+ makeCallback(done, e => {
+ expect(e.message).toMatch(/Variables map is not serializable/);
+ expect(e.parseError.message).toMatch(
+ /Converting circular structure to JSON/,
+ );
+ }),
+ );
+ });
+
+ it("throws for GET if the extensions can't be stringified", done => {
+ const link = createHttpLink({
+ uri: '/data',
+ useGETForQueries: true,
+ includeExtensions: true,
+ });
+
+ let b;
+ const a = { b };
+ b = { a };
+ a.b = b;
+ const extensions = {
+ a,
+ b,
+ };
+ execute(link, { query: sampleQuery, extensions }).subscribe(
+ result => {
+ done.fail('next should have been thrown from the link');
+ },
+ makeCallback(done, e => {
+ expect(e.message).toMatch(/Extensions map is not serializable/);
+ expect(e.parseError.message).toMatch(
+ /Converting circular structure to JSON/,
+ );
+ }),
+ );
+ });
+
+ it('raises warning if called with concat', () => {
+ const link = createHttpLink();
+ const _warn = console.warn;
+ console.warn = warning => expect(warning['message']).toBeDefined();
+ expect(link.concat((operation, forward) => forward(operation))).toEqual(
+ link,
+ );
+ console.warn = _warn;
+ });
+
+ it('does not need any constructor arguments', () => {
+ expect(() => createHttpLink()).not.toThrow();
+ });
+
+ it('calls next and then complete', done => {
+ const next = jest.fn();
+ const link = createHttpLink({ uri: 'data' });
+ const observable = execute(link, {
+ query: sampleQuery,
+ });
+ observable.subscribe({
+ next,
+ error: error => done.fail(error),
+ complete: makeCallback(done, () => {
+ expect(next).toHaveBeenCalledTimes(1);
+ }),
+ });
+ });
+
+ it('calls error when fetch fails', done => {
+ const link = createHttpLink({ uri: 'error' });
+ const observable = execute(link, {
+ query: sampleQuery,
+ });
+ observable.subscribe(
+ result => done.fail('next should not have been called'),
+ makeCallback(done, error => {
+ expect(error).toEqual(mockError.throws);
+ }),
+ () => done.fail('complete should not have been called'),
+ );
+ });
+
+ it('calls error when fetch fails', done => {
+ const link = createHttpLink({ uri: 'error' });
+ const observable = execute(link, {
+ query: sampleMutation,
+ });
+ observable.subscribe(
+ result => done.fail('next should not have been called'),
+ makeCallback(done, error => {
+ expect(error).toEqual(mockError.throws);
+ }),
+ () => done.fail('complete should not have been called'),
+ );
+ });
+
+ it('unsubscribes without calling subscriber', done => {
+ const link = createHttpLink({ uri: 'data' });
+ const observable = execute(link, {
+ query: sampleQuery,
+ });
+ const subscription = observable.subscribe(
+ result => done.fail('next should not have been called'),
+ error => done.fail(error),
+ () => done.fail('complete should not have been called'),
+ );
+ subscription.unsubscribe();
+ expect(subscription.closed).toBe(true);
+ setTimeout(done, 50);
+ });
+
+ const verifyRequest = (
+ link: ApolloLink,
+ after: () => void,
+ includeExtensions: boolean,
+ done: any,
+ ) => {
+ const next = jest.fn();
+ const context = { info: 'stub' };
+ const variables = { params: 'stub' };
+
+ const observable = execute(link, {
+ query: sampleMutation,
+ context,
+ variables,
+ });
+ observable.subscribe({
+ next,
+ error: error => done.fail(error),
+ complete: () => {
+ try {
+ let body = convertBatchedBody(fetchMock.lastCall()[1].body);
+ expect(body.query).toBe(print(sampleMutation));
+ expect(body.variables).toEqual(variables);
+ expect(body.context).not.toBeDefined();
+ if (includeExtensions) {
+ expect(body.extensions).toBeDefined();
+ } else {
+ expect(body.extensions).not.toBeDefined();
+ }
+ expect(next).toHaveBeenCalledTimes(1);
+
+ after();
+ } catch (e) {
+ done.fail(e);
+ }
+ },
+ });
+ };
+
+ it('passes all arguments to multiple fetch body including extensions', done => {
+ debugger;
+ const link = createHttpLink({ uri: 'data', includeExtensions: true });
+ verifyRequest(
+ link,
+ () => verifyRequest(link, done, true, done),
+ true,
+ done,
+ );
+ });
+
+ it('passes all arguments to multiple fetch body excluding extensions', done => {
+ const link = createHttpLink({ uri: 'data' });
+ verifyRequest(
+ link,
+ () => verifyRequest(link, done, false, done),
+ false,
+ done,
+ );
+ });
+
+ it('calls multiple subscribers', done => {
+ const link = createHttpLink({ uri: 'data' });
+ const context = { info: 'stub' };
+ const variables = { params: 'stub' };
+
+ const observable = execute(link, {
+ query: sampleMutation,
+ context,
+ variables,
+ });
+ observable.subscribe(subscriber);
+ observable.subscribe(subscriber);
+
+ setTimeout(() => {
+ expect(subscriber.next).toHaveBeenCalledTimes(2);
+ expect(subscriber.complete).toHaveBeenCalledTimes(2);
+ expect(subscriber.error).not.toHaveBeenCalled();
+ done();
+ }, 50);
+ });
+
+ it('calls remaining subscribers after unsubscribe', done => {
+ const link = createHttpLink({ uri: 'data' });
+ const context = { info: 'stub' };
+ const variables = { params: 'stub' };
+
+ const observable = execute(link, {
+ query: sampleMutation,
+ context,
+ variables,
+ });
+
+ observable.subscribe(subscriber);
+
+ setTimeout(() => {
+ const subscription = observable.subscribe(subscriber);
+ subscription.unsubscribe();
+ }, 10);
+
+ setTimeout(
+ makeCallback(done, () => {
+ expect(subscriber.next).toHaveBeenCalledTimes(1);
+ expect(subscriber.complete).toHaveBeenCalledTimes(1);
+ expect(subscriber.error).not.toHaveBeenCalled();
+ done();
+ }),
+ 50,
+ );
+ });
+
+ it('allows for dynamic endpoint setting', done => {
+ const variables = { params: 'stub' };
+ const link = createHttpLink({ uri: 'data' });
+
+ execute(link, {
+ query: sampleQuery,
+ variables,
+ context: { uri: 'data2' },
+ }).subscribe(result => {
+ expect(result).toEqual(data2);
+ done();
+ });
+ });
+
+ it('adds headers to the request from the context', done => {
+ const variables = { params: 'stub' };
+ const middleware = new ApolloLink((operation, forward) => {
+ operation.setContext({
+ headers: { authorization: '1234' },
+ });
+ return forward(operation).map(result => {
+ const { headers } = operation.getContext();
+ try {
+ expect(headers).toBeDefined();
+ } catch (e) {
+ done.fail(e);
+ }
+ return result;
+ });
+ });
+ const link = middleware.concat(createHttpLink({ uri: 'data' }));
+
+ execute(link, { query: sampleQuery, variables }).subscribe(
+ makeCallback(done, result => {
+ const headers = fetchMock.lastCall()[1].headers;
+ expect(headers.authorization).toBe('1234');
+ expect(headers['content-type']).toBe('application/json');
+ expect(headers.accept).toBe('*/*');
+ }),
+ );
+ });
+
+ it('adds headers to the request from the setup', done => {
+ const variables = { params: 'stub' };
+ const link = createHttpLink({
+ uri: 'data',
+ headers: { authorization: '1234' },
+ });
+
+ execute(link, { query: sampleQuery, variables }).subscribe(
+ makeCallback(done, result => {
+ const headers = fetchMock.lastCall()[1].headers;
+ expect(headers.authorization).toBe('1234');
+ expect(headers['content-type']).toBe('application/json');
+ expect(headers.accept).toBe('*/*');
+ }),
+ );
+ });
+
+ it('prioritizes context headers over setup headers', done => {
+ const variables = { params: 'stub' };
+ const middleware = new ApolloLink((operation, forward) => {
+ operation.setContext({
+ headers: { authorization: '1234' },
+ });
+ return forward(operation);
+ });
+ const link = middleware.concat(
+ createHttpLink({ uri: 'data', headers: { authorization: 'no user' } }),
+ );
+
+ execute(link, { query: sampleQuery, variables }).subscribe(
+ makeCallback(done, result => {
+ const headers = fetchMock.lastCall()[1].headers;
+ expect(headers.authorization).toBe('1234');
+ expect(headers['content-type']).toBe('application/json');
+ expect(headers.accept).toBe('*/*');
+ }),
+ );
+ });
+
+ it('adds headers to the request from the context on an operation', done => {
+ const variables = { params: 'stub' };
+ const link = createHttpLink({ uri: 'data' });
+
+ const context = {
+ headers: { authorization: '1234' },
+ };
+ execute(link, {
+ query: sampleQuery,
+ variables,
+ context,
+ }).subscribe(
+ makeCallback(done, result => {
+ const headers = fetchMock.lastCall()[1].headers;
+ expect(headers.authorization).toBe('1234');
+ expect(headers['content-type']).toBe('application/json');
+ expect(headers.accept).toBe('*/*');
+ }),
+ );
+ });
+
+ it('adds creds to the request from the context', done => {
+ const variables = { params: 'stub' };
+ const middleware = new ApolloLink((operation, forward) => {
+ operation.setContext({
+ credentials: 'same-team-yo',
+ });
+ return forward(operation);
+ });
+ const link = middleware.concat(createHttpLink({ uri: 'data' }));
+
+ execute(link, { query: sampleQuery, variables }).subscribe(
+ makeCallback(done, result => {
+ const creds = fetchMock.lastCall()[1].credentials;
+ expect(creds).toBe('same-team-yo');
+ }),
+ );
+ });
+
+ it('adds creds to the request from the setup', done => {
+ const variables = { params: 'stub' };
+ const link = createHttpLink({ uri: 'data', credentials: 'same-team-yo' });
+
+ execute(link, { query: sampleQuery, variables }).subscribe(
+ makeCallback(done, result => {
+ const creds = fetchMock.lastCall()[1].credentials;
+ expect(creds).toBe('same-team-yo');
+ }),
+ );
+ });
+
+ it('prioritizes creds from the context over the setup', done => {
+ const variables = { params: 'stub' };
+ const middleware = new ApolloLink((operation, forward) => {
+ operation.setContext({
+ credentials: 'same-team-yo',
+ });
+ return forward(operation);
+ });
+ const link = middleware.concat(
+ createHttpLink({ uri: 'data', credentials: 'error' }),
+ );
+
+ execute(link, { query: sampleQuery, variables }).subscribe(
+ makeCallback(done, result => {
+ const creds = fetchMock.lastCall()[1].credentials;
+ expect(creds).toBe('same-team-yo');
+ }),
+ );
+ });
+
+ it('adds uri to the request from the context', done => {
+ const variables = { params: 'stub' };
+ const middleware = new ApolloLink((operation, forward) => {
+ operation.setContext({
+ uri: 'data',
+ });
+ return forward(operation);
+ });
+ const link = middleware.concat(createHttpLink());
+
+ execute(link, { query: sampleQuery, variables }).subscribe(
+ makeCallback(done, result => {
+ const uri = fetchMock.lastUrl();
+ expect(uri).toBe('/data');
+ }),
+ );
+ });
+
+ it('adds uri to the request from the setup', done => {
+ const variables = { params: 'stub' };
+ const link = createHttpLink({ uri: 'data' });
+
+ execute(link, { query: sampleQuery, variables }).subscribe(
+ makeCallback(done, result => {
+ const uri = fetchMock.lastUrl();
+ expect(uri).toBe('/data');
+ }),
+ );
+ });
+
+ it('prioritizes context uri over setup uri', done => {
+ const variables = { params: 'stub' };
+ const middleware = new ApolloLink((operation, forward) => {
+ operation.setContext({
+ uri: 'apollo',
+ });
+ return forward(operation);
+ });
+ const link = middleware.concat(
+ createHttpLink({ uri: 'data', credentials: 'error' }),
+ );
+
+ execute(link, { query: sampleQuery, variables }).subscribe(
+ makeCallback(done, result => {
+ const uri = fetchMock.lastUrl();
+
+ expect(uri).toBe('/apollo');
+ }),
+ );
+ });
+
+ it('allows uri to be a function', done => {
+ const variables = { params: 'stub' };
+ const customFetch = (uri, options) => {
+ const { operationName } = convertBatchedBody(options.body);
+ try {
+ expect(operationName).toBe('SampleQuery');
+ } catch (e) {
+ done.fail(e);
+ }
+ return fetch('dataFunc', options);
+ };
+
+ const link = createHttpLink({ fetch: customFetch });
+
+ execute(link, { query: sampleQuery, variables }).subscribe(
+ makeCallback(done, result => {
+ const uri = fetchMock.lastUrl();
+ expect(fetchMock.lastUrl()).toBe('/dataFunc');
+ }),
+ );
+ });
+
+ it('adds fetchOptions to the request from the setup', done => {
+ const variables = { params: 'stub' };
+ const link = createHttpLink({
+ uri: 'data',
+ fetchOptions: { someOption: 'foo', mode: 'no-cors' },
+ });
+
+ execute(link, { query: sampleQuery, variables }).subscribe(
+ makeCallback(done, result => {
+ const { someOption, mode, headers } = fetchMock.lastCall()[1];
+ expect(someOption).toBe('foo');
+ expect(mode).toBe('no-cors');
+ expect(headers['content-type']).toBe('application/json');
+ }),
+ );
+ });
+
+ it('adds fetchOptions to the request from the context', done => {
+ const variables = { params: 'stub' };
+ const middleware = new ApolloLink((operation, forward) => {
+ operation.setContext({
+ fetchOptions: {
+ someOption: 'foo',
+ },
+ });
+ return forward(operation);
+ });
+ const link = middleware.concat(createHttpLink({ uri: 'data' }));
+
+ execute(link, { query: sampleQuery, variables }).subscribe(
+ makeCallback(done, result => {
+ const { someOption } = fetchMock.lastCall()[1];
+ expect(someOption).toBe('foo');
+ done();
+ }),
+ );
+ });
+
+ it('prioritizes context over setup', done => {
+ const variables = { params: 'stub' };
+ const middleware = new ApolloLink((operation, forward) => {
+ operation.setContext({
+ fetchOptions: {
+ someOption: 'foo',
+ },
+ });
+ return forward(operation);
+ });
+ const link = middleware.concat(
+ createHttpLink({ uri: 'data', fetchOptions: { someOption: 'bar' } }),
+ );
+
+ execute(link, { query: sampleQuery, variables }).subscribe(
+ makeCallback(done, result => {
+ const { someOption } = fetchMock.lastCall()[1];
+ expect(someOption).toBe('foo');
+ }),
+ );
+ });
+
+ it('allows for not sending the query with the request', done => {
+ const variables = { params: 'stub' };
+ const middleware = new ApolloLink((operation, forward) => {
+ operation.setContext({
+ http: {
+ includeQuery: false,
+ includeExtensions: true,
+ },
+ });
+ operation.extensions.persistedQuery = { hash: '1234' };
+ return forward(operation);
+ });
+ const link = middleware.concat(createHttpLink({ uri: 'data' }));
+
+ execute(link, { query: sampleQuery, variables }).subscribe(
+ makeCallback(done, result => {
+ let body = convertBatchedBody(fetchMock.lastCall()[1].body);
+
+ expect(body.query).not.toBeDefined();
+ expect(body.extensions).toEqual({ persistedQuery: { hash: '1234' } });
+ done();
+ }),
+ );
+ });
+
+ it('sets the raw response on context', done => {
+ const middleware = new ApolloLink((operation, forward) => {
+ return new Observable(ob => {
+ const op = forward(operation);
+ const sub = op.subscribe({
+ next: ob.next.bind(ob),
+ error: ob.error.bind(ob),
+ complete: makeCallback(done, e => {
+ expect(operation.getContext().response.headers.toBeDefined);
+ ob.complete();
+ }),
+ });
+
+ return () => {
+ sub.unsubscribe();
+ };
+ });
+ });
+
+ const link = middleware.concat(createHttpLink({ uri: 'data', fetch }));
+
+ execute(link, { query: sampleQuery }).subscribe(
+ result => {
+ done();
+ },
+ () => {},
+ );
+ });
+ });
+
+ describe('Dev warnings', () => {
+ let oldFetch;
+ beforeEach(() => {
+ oldFetch = window.fetch;
+ delete window.fetch;
+ });
+
+ afterEach(() => {
+ window.fetch = oldFetch;
+ });
+
+ it('warns if fetch is undeclared', done => {
+ try {
+ const link = createHttpLink({ uri: 'data' });
+ done.fail("warning wasn't called");
+ } catch (e) {
+ makeCallback(done, () =>
+ expect(e.message).toMatch(/has not been found globally/),
+ )();
+ }
+ });
+
+ it('warns if fetch is undefined', done => {
+ window.fetch = undefined;
+ try {
+ const link = createHttpLink({ uri: 'data' });
+ done.fail("warning wasn't called");
+ } catch (e) {
+ makeCallback(done, () =>
+ expect(e.message).toMatch(/has not been found globally/),
+ )();
+ }
+ });
+
+ it('does not warn if fetch is undeclared but a fetch is passed', () => {
+ expect(() => {
+ const link = createHttpLink({ uri: 'data', fetch: () => {} });
+ }).not.toThrow();
+ });
+ });
+
+ describe('Error handling', () => {
+ let responseBody;
+ const text = jest.fn(() => {
+ const responseBodyText = '{}';
+ responseBody = JSON.parse(responseBodyText);
+ return Promise.resolve(responseBodyText);
+ });
+ const textWithData = jest.fn(() => {
+ responseBody = {
+ data: { stub: { id: 1 } },
+ errors: [{ message: 'dangit' }],
+ };
+
+ return Promise.resolve(JSON.stringify(responseBody));
+ });
+
+ const textWithErrors = jest.fn(() => {
+ responseBody = {
+ errors: [{ message: 'dangit' }],
+ };
+
+ return Promise.resolve(JSON.stringify(responseBody));
+ });
+ const fetch = jest.fn((uri, options) => {
+ return Promise.resolve({ text });
+ });
+ beforeEach(() => {
+ fetch.mockReset();
+ });
+ it('makes it easy to do stuff on a 401', done => {
+ const middleware = new ApolloLink((operation, forward) => {
+ return new Observable(ob => {
+ fetch.mockReturnValueOnce(Promise.resolve({ status: 401, text }));
+ const op = forward(operation);
+ const sub = op.subscribe({
+ next: ob.next.bind(ob),
+ error: makeCallback(done, e => {
+ expect(e.message).toMatch(/Received status code 401/);
+ expect(e.statusCode).toEqual(401);
+ ob.error(e);
+ }),
+ complete: ob.complete.bind(ob),
+ });
+
+ return () => {
+ sub.unsubscribe();
+ };
+ });
+ });
+
+ const link = middleware.concat(createHttpLink({ uri: 'data', fetch }));
+
+ execute(link, { query: sampleQuery }).subscribe(
+ result => {
+ done.fail('next should have been thrown from the network');
+ },
+ () => {},
+ );
+ });
+
+ it('throws an error if response code is > 300', done => {
+ fetch.mockReturnValueOnce(Promise.resolve({ status: 400, text }));
+ const link = createHttpLink({ uri: 'data', fetch });
+
+ execute(link, { query: sampleQuery }).subscribe(
+ result => {
+ done.fail('next should have been thrown from the network');
+ },
+ makeCallback(done, e => {
+ expect(e.message).toMatch(/Received status code 400/);
+ expect(e.statusCode).toBe(400);
+ expect(e.result).toEqual(responseBody);
+ }),
+ );
+ });
+ it('throws an error if response code is > 300 and returns data', done => {
+ fetch.mockReturnValueOnce(
+ Promise.resolve({ status: 400, text: textWithData }),
+ );
+
+ const link = createHttpLink({ uri: 'data', fetch });
+
+ let called = false;
+
+ execute(link, { query: sampleQuery }).subscribe(
+ result => {
+ called = true;
+ expect(result).toEqual(responseBody);
+ },
+ e => {
+ expect(called).toBe(true);
+ expect(e.message).toMatch(/Received status code 400/);
+ expect(e.statusCode).toBe(400);
+ expect(e.result).toEqual(responseBody);
+ done();
+ },
+ );
+ });
+ it('throws an error if only errors are returned', done => {
+ fetch.mockReturnValueOnce(
+ Promise.resolve({ status: 400, text: textWithErrors }),
+ );
+
+ const link = createHttpLink({ uri: 'data', fetch });
+
+ let called = false;
+
+ execute(link, { query: sampleQuery }).subscribe(
+ result => {
+ done.fail('should not have called result because we have no data');
+ },
+ e => {
+ expect(e.message).toMatch(/Received status code 400/);
+ expect(e.statusCode).toBe(400);
+ expect(e.result).toEqual(responseBody);
+ done();
+ },
+ );
+ });
+ it('throws an error if empty response from the server ', done => {
+ fetch.mockReturnValueOnce(Promise.resolve({ text }));
+ text.mockReturnValueOnce(Promise.resolve('{ "body": "boo" }'));
+ const link = createHttpLink({ uri: 'data', fetch });
+
+ execute(link, { query: sampleQuery }).subscribe(
+ result => {
+ done.fail('next should have been thrown from the network');
+ },
+ makeCallback(done, e => {
+ expect(e.message).toMatch(
+ /Server response was missing for query 'SampleQuery'/,
+ );
+ }),
+ );
+ });
+ it("throws if the body can't be stringified", done => {
+ fetch.mockReturnValueOnce(Promise.resolve({ data: {}, text }));
+ const link = createHttpLink({ uri: 'data', fetch });
+
+ let b;
+ const a = { b };
+ b = { a };
+ a.b = b;
+ const variables = {
+ a,
+ b,
+ };
+ execute(link, { query: sampleQuery, variables }).subscribe(
+ result => {
+ done.fail('next should have been thrown from the link');
+ },
+ makeCallback(done, e => {
+ expect(e.message).toMatch(/Payload is not serializable/);
+ expect(e.parseError.message).toMatch(
+ /Converting circular structure to JSON/,
+ );
+ }),
+ );
+ });
+ it('supports being cancelled and does not throw', done => {
+ let called;
+ class AbortController {
+ signal: {};
+ abort = () => {
+ called = true;
+ };
+ }
+
+ global.AbortController = AbortController;
+
+ fetch.mockReturnValueOnce(Promise.resolve({ text }));
+ text.mockReturnValueOnce(
+ Promise.resolve('{ "data": { "hello": "world" } }'),
+ );
+
+ const link = createHttpLink({ uri: 'data', fetch });
+
+ const sub = execute(link, { query: sampleQuery }).subscribe({
+ next: result => {
+ done.fail('result should not have been called');
+ },
+ error: e => {
+ done.fail(e);
+ },
+ complete: () => {
+ done.fail('complete should not have been called');
+ },
+ });
+ sub.unsubscribe();
+
+ setTimeout(
+ makeCallback(done, () => {
+ delete global.AbortController;
+ expect(called).toBe(true);
+ fetch.mockReset();
+ text.mockReset();
+ }),
+ 150,
+ );
+ });
+
+ const body = '{';
+ const unparsableJson = jest.fn(() => Promise.resolve(body));
+ it('throws an error if response is unparsable', done => {
+ fetch.mockReturnValueOnce(
+ Promise.resolve({ status: 400, text: unparsableJson }),
+ );
+ const link = createHttpLink({ uri: 'data', fetch });
+
+ execute(link, { query: sampleQuery }).subscribe(
+ result => {
+ done.fail('next should have been thrown from the network');
+ },
+ makeCallback(done, e => {
+ expect(e.message).toMatch(/JSON/);
+ expect(e.statusCode).toBe(400);
+ expect(e.response).toBeDefined();
+ expect(e.bodyText).toBe(body);
+ }),
+ );
+ });
+ });
+});
diff --git a/src/link/http/__tests__/checkFetcher.ts b/src/link/http/__tests__/checkFetcher.ts
new file mode 100644
--- /dev/null
+++ b/src/link/http/__tests__/checkFetcher.ts
@@ -0,0 +1,23 @@
+import { checkFetcher } from '../checkFetcher';
+
+describe('checkFetcher', () => {
+ let oldFetch;
+ beforeEach(() => {
+ oldFetch = window.fetch;
+ delete window.fetch;
+ });
+
+ afterEach(() => {
+ window.fetch = oldFetch;
+ });
+
+ it('throws if no fetch is present', () => {
+ expect(() => checkFetcher(undefined)).toThrow(
+ /has not been found globally/,
+ );
+ });
+
+ it('does not throws if no fetch is present but a fetch is passed', () => {
+ expect(() => checkFetcher(() => {})).not.toThrow();
+ });
+});
diff --git a/src/link/http/__tests__/parseAndCheckHttpResponse.ts b/src/link/http/__tests__/parseAndCheckHttpResponse.ts
new file mode 100644
--- /dev/null
+++ b/src/link/http/__tests__/parseAndCheckHttpResponse.ts
@@ -0,0 +1,91 @@
+import gql from 'graphql-tag';
+import fetchMock from 'fetch-mock';
+
+import { createOperation } from '../../utils/createOperation';
+import { parseAndCheckHttpResponse } from '../parseAndCheckHttpResponse';
+
+const query = gql`
+ query SampleQuery {
+ stub {
+ id
+ }
+ }
+`;
+
+describe('parseAndCheckResponse', () => {
+ beforeEach(() => {
+ fetchMock.restore();
+ });
+
+ const operations = [createOperation({}, { query })];
+
+ it('throws a parse error with a status code on unparsable response', done => {
+ const status = 400;
+ fetchMock.mock('begin:/error', status);
+ fetch('error')
+ .then(parseAndCheckHttpResponse(operations))
+ .then(done.fail)
+ .catch(e => {
+ expect(e.statusCode).toBe(status);
+ expect(e.name).toBe('ServerParseError');
+ expect(e).toHaveProperty('response');
+ expect(e).toHaveProperty('bodyText');
+ done();
+ })
+ .catch(done.fail);
+ });
+
+ it('throws a network error with a status code and result', done => {
+ const status = 403;
+ const body = { data: 'fail' }; //does not contain data or errors
+ fetchMock.mock('begin:/error', {
+ body,
+ status,
+ });
+ fetch('error')
+ .then(parseAndCheckHttpResponse(operations))
+ .then(done.fail)
+ .catch(e => {
+ expect(e.statusCode).toBe(status);
+ expect(e.name).toBe('ServerError');
+ expect(e).toHaveProperty('response');
+ expect(e).toHaveProperty('result');
+ done();
+ })
+ .catch(done.fail);
+ });
+
+ it('throws a server error on incorrect data', done => {
+ const data = { hello: 'world' }; //does not contain data or erros
+ fetchMock.mock('begin:/incorrect', data);
+ fetch('incorrect')
+ .then(parseAndCheckHttpResponse(operations))
+ .then(done.fail)
+ .catch(e => {
+ expect(e.statusCode).toBe(200);
+ expect(e.name).toBe('ServerError');
+ expect(e).toHaveProperty('response');
+ expect(e.result).toEqual(data);
+ done();
+ })
+ .catch(done.fail);
+ });
+
+ it('is able to return a correct GraphQL result', done => {
+ const errors = ['', '' + new Error('hi')];
+ const data = { data: { hello: 'world' }, errors };
+
+ fetchMock.mock('begin:/data', {
+ body: data,
+ });
+ fetch('data')
+ .then(parseAndCheckHttpResponse(operations))
+ .then(({ data, errors: e }) => {
+ expect(data).toEqual({ hello: 'world' });
+ expect(e.length).toEqual(errors.length);
+ expect(e).toEqual(errors);
+ done();
+ })
+ .catch(done.fail);
+ });
+});
diff --git a/src/link/http/__tests__/selectHttpOptionsAndBody.ts b/src/link/http/__tests__/selectHttpOptionsAndBody.ts
new file mode 100644
--- /dev/null
+++ b/src/link/http/__tests__/selectHttpOptionsAndBody.ts
@@ -0,0 +1,91 @@
+import gql from 'graphql-tag';
+
+import { createOperation } from '../../utils/createOperation';
+import {
+ selectHttpOptionsAndBody,
+ fallbackHttpConfig,
+} from '../selectHttpOptionsAndBody';
+
+const query = gql`
+ query SampleQuery {
+ stub {
+ id
+ }
+ }
+`;
+
+describe('selectHttpOptionsAndBody', () => {
+ it('includeQuery allows the query to be ignored', () => {
+ const { options, body } = selectHttpOptionsAndBody(
+ createOperation({}, { query }),
+ { http: { includeQuery: false } },
+ );
+ expect(body).not.toHaveProperty('query');
+ });
+
+ it('includeExtensions allows the extensions to be added', () => {
+ const extensions = { yo: 'what up' };
+ const { options, body } = selectHttpOptionsAndBody(
+ createOperation({}, { query, extensions }),
+ { http: { includeExtensions: true } },
+ );
+ expect(body).toHaveProperty('extensions');
+ expect((body as any).extensions).toEqual(extensions);
+ });
+
+ it('the fallbackConfig is used if no other configs are specified', () => {
+ const defaultHeaders = {
+ accept: '*/*',
+ 'content-type': 'application/json',
+ };
+
+ const defaultOptions = {
+ method: 'POST',
+ };
+
+ const extensions = { yo: 'what up' };
+ const { options, body } = selectHttpOptionsAndBody(
+ createOperation({}, { query, extensions }),
+ fallbackHttpConfig,
+ );
+
+ expect(body).toHaveProperty('query');
+ expect(body).not.toHaveProperty('extensions');
+
+ expect(options.headers).toEqual(defaultHeaders);
+ expect(options.method).toEqual(defaultOptions.method);
+ });
+
+ it('allows headers, credentials, and setting of method to function correctly', () => {
+ const headers = {
+ accept: 'application/json',
+ 'content-type': 'application/graphql',
+ };
+
+ const credentials = {
+ 'X-Secret': 'djmashko',
+ };
+
+ const opts = {
+ opt: 'hi',
+ };
+
+ const config = { headers, credentials, options: opts };
+
+ const extensions = { yo: 'what up' };
+
+ const { options, body } = selectHttpOptionsAndBody(
+ createOperation({}, { query, extensions }),
+ fallbackHttpConfig,
+ config,
+ );
+
+ expect(body).toHaveProperty('query');
+ expect(body).not.toHaveProperty('extensions');
+
+ expect(options.headers).toEqual(headers);
+ expect(options.credentials).toEqual(credentials);
+ expect(options.opt).toEqual('hi');
+ expect(options.method).toEqual('POST'); //from default
+ });
+});
diff --git a/src/link/http/__tests__/selectURI.ts b/src/link/http/__tests__/selectURI.ts
new file mode 100644
--- /dev/null
+++ b/src/link/http/__tests__/selectURI.ts
@@ -0,0 +1,32 @@
+import gql from 'graphql-tag';
+
+import { createOperation } from '../../utils/createOperation';
+import { selectURI } from '../selectURI';
+
+const query = gql`
+ query SampleQuery {
+ stub {
+ id
+ }
+ }
+`;
+
+describe('selectURI', () => {
+ it('returns a passed in string', () => {
+ const uri = '/somewhere';
+ const operation = createOperation({ uri }, { query });
+ expect(selectURI(operation)).toEqual(uri);
+ });
+
+ it('returns a fallback of /graphql', () => {
+ const uri = '/graphql';
+ const operation = createOperation({}, { query });
+ expect(selectURI(operation)).toEqual(uri);
+ });
+
+ it('returns the result of a UriFunction', () => {
+ const uri = '/somewhere';
+ const operation = createOperation({}, { query });
+ expect(selectURI(operation, () => uri)).toEqual(uri);
+ });
+});
diff --git a/src/link/http/__tests__/serializeFetchParameter.ts b/src/link/http/__tests__/serializeFetchParameter.ts
new file mode 100644
--- /dev/null
+++ b/src/link/http/__tests__/serializeFetchParameter.ts
@@ -0,0 +1,17 @@
+import { serializeFetchParameter } from '../serializeFetchParameter';
+
+describe('serializeFetchParameter', () => {
+ it('throws a parse error on an unparsable body', () => {
+ const b = {};
+ const a = { b };
+ (b as any).a = a;
+
+ expect(() => serializeFetchParameter(b, 'Label')).toThrow(/Label/);
+ });
+
+ it('returns a correctly parsed body', () => {
+ const body = { no: 'thing' };
+
+ expect(serializeFetchParameter(body, 'Label')).toEqual('{"no":"thing"}');
+ });
+});
diff --git a/src/link/utils/__tests__/fromError.ts b/src/link/utils/__tests__/fromError.ts
new file mode 100644
--- /dev/null
+++ b/src/link/utils/__tests__/fromError.ts
@@ -0,0 +1,12 @@
+import { toPromise } from '../toPromise';
+import { fromError, } from '../fromError';
+
+describe('fromError', () => {
+ it('acts as error call', () => {
+ const error = new Error('I always error');
+ const observable = fromError(error);
+ return toPromise(observable)
+ .then(expect.fail)
+ .catch(actualError => expect(error).toEqual(actualError));
+ });
+});
diff --git a/src/link/utils/__tests__/fromPromise.ts b/src/link/utils/__tests__/fromPromise.ts
new file mode 100644
--- /dev/null
+++ b/src/link/utils/__tests__/fromPromise.ts
@@ -0,0 +1,25 @@
+import { fromPromise } from '../fromPromise';
+import { toPromise, } from '../toPromise';
+
+describe('fromPromise', () => {
+ const data = {
+ data: {
+ hello: 'world',
+ },
+ };
+ const error = new Error('I always error');
+
+ it('return next call as Promise resolution', () => {
+ const observable = fromPromise(Promise.resolve(data));
+ return toPromise(observable).then(result =>
+ expect(data).toEqual(result),
+ );
+ });
+
+ it('return Promise rejection as error call', () => {
+ const observable = fromPromise(Promise.reject(error));
+ return toPromise(observable)
+ .then(expect.fail)
+ .catch(actualError => expect(error).toEqual(actualError));
+ });
+});
diff --git a/src/link/utils/__tests__/toPromise.ts b/src/link/utils/__tests__/toPromise.ts
new file mode 100644
--- /dev/null
+++ b/src/link/utils/__tests__/toPromise.ts
@@ -0,0 +1,46 @@
+import { Observable } from '../../../utilities/observables/Observable';
+import { toPromise } from '../toPromise';
+import { fromError } from '../fromError';
+
+describe('toPromise', () => {
+ const data = {
+ data: {
+ hello: 'world',
+ },
+ };
+ const error = new Error('I always error');
+
+ it('return next call as Promise resolution', () => {
+ return toPromise(Observable.of(data)).then(result =>
+ expect(data).toEqual(result),
+ );
+ });
+
+ it('return error call as Promise rejection', () => {
+ return toPromise(fromError(error))
+ .then(expect.fail)
+ .catch(actualError => expect(error).toEqual(actualError));
+ });
+
+ describe('warnings', () => {
+ const spy = jest.fn();
+ let _warn: (message?: any, ...originalParams: any[]) => void;
+
+ beforeEach(() => {
+ _warn = console.warn;
+ console.warn = spy;
+ });
+
+ afterEach(() => {
+ console.warn = _warn;
+ });
+
+ it('return error call as Promise rejection', done => {
+ toPromise(Observable.of(data, data)).then(result => {
+ expect(data).toEqual(result);
+ expect(spy).toHaveBeenCalled();
+ done();
+ });
+ });
+ });
+});
diff --git a/src/link/utils/__tests__/validateOperation.ts b/src/link/utils/__tests__/validateOperation.ts
new file mode 100644
--- /dev/null
+++ b/src/link/utils/__tests__/validateOperation.ts
@@ -0,0 +1,17 @@
+import { validateOperation, } from '../validateOperation';
+
+describe('validateOperation', () => {
+ it('should throw when invalid field in operation', () => {
+ expect(() => validateOperation(<any>{ qwerty: '' })).toThrow();
+ });
+
+ it('should not throw when valid fields in operation', () => {
+ expect(() =>
+ validateOperation({
+ query: '1234',
+ context: {},
+ variables: {},
+ }),
+ ).not.toThrow();
+ });
+});
diff --git a/src/react/context/__tests__/ApolloConsumer.test.tsx b/src/react/context/__tests__/ApolloConsumer.test.tsx
new file mode 100644
--- /dev/null
+++ b/src/react/context/__tests__/ApolloConsumer.test.tsx
@@ -0,0 +1,69 @@
+import { render, cleanup } from '@testing-library/react';
+
+import { ApolloLink } from '../../../link/core/ApolloLink';
+import { ApolloClient } from '../../../ApolloClient';
+import { InMemoryCache as Cache } from '../../../cache/inmemory/inMemoryCache';
+import { ApolloProvider } from '../ApolloProvider';
+import { ApolloConsumer } from '../ApolloConsumer';
+import { getApolloContext } from '../ApolloContext';
+import { requireReactLazily } from '../../react';
+
+const React = requireReactLazily();
+
+const client = new ApolloClient({
+ cache: new Cache(),
+ link: new ApolloLink((o, f) => (f ? f(o) : null))
+});
+
+describe('<ApolloConsumer /> component', () => {
+ afterEach(cleanup);
+
+ it('has a render prop', done => {
+ render(
+ <ApolloProvider client={client}>
+ <ApolloConsumer>
+ {clientRender => {
+ try {
+ expect(clientRender).toBe(client);
+ done();
+ } catch (e) {
+ done.fail(e);
+ }
+ return null;
+ }}
+ </ApolloConsumer>
+ </ApolloProvider>
+ );
+ });
+
+ it('renders the content in the children prop', () => {
+ const { getByText } = render(
+ <ApolloProvider client={client}>
+ <ApolloConsumer>{() => <div>Test</div>}</ApolloConsumer>
+ </ApolloProvider>
+ );
+
+ expect(getByText('Test')).toBeTruthy();
+ });
+
+ it('errors if there is no client in the context', () => {
+ // Prevent Error about missing context type from appearing in the console.
+ const errorLogger = console.error;
+ console.error = () => {};
+ expect(() => {
+ // We're wrapping the `ApolloConsumer` component in a
+ // `ApolloContext.Provider` component, to reset the context before
+ // testing.
+ const ApolloContext = getApolloContext();
+ render(
+ <ApolloContext.Provider value={{}}>
+ <ApolloConsumer>{() => null}</ApolloConsumer>
+ </ApolloContext.Provider>
+ );
+ }).toThrowError(
+ 'Could not find "client" in the context of ApolloConsumer. Wrap the root component in an <ApolloProvider>'
+ );
+
+ console.error = errorLogger;
+ });
+});
diff --git a/src/react/context/__tests__/ApolloProvider.test.tsx b/src/react/context/__tests__/ApolloProvider.test.tsx
new file mode 100644
--- /dev/null
+++ b/src/react/context/__tests__/ApolloProvider.test.tsx
@@ -0,0 +1,149 @@
+import { render, cleanup } from '@testing-library/react';
+
+import { ApolloLink } from '../../../link/core/ApolloLink';
+import { ApolloClient } from '../../../ApolloClient';
+import { InMemoryCache as Cache } from '../../../cache/inmemory/inMemoryCache';
+import { ApolloProvider } from '../ApolloProvider';
+import { getApolloContext } from '../ApolloContext';
+import { requireReactLazily } from '../../react';
+
+const React = requireReactLazily();
+const { useContext } = React;
+
+describe('<ApolloProvider /> Component', () => {
+ afterEach(cleanup);
+
+ const client = new ApolloClient({
+ cache: new Cache(),
+ link: new ApolloLink((o, f) => (f ? f(o) : null))
+ });
+
+ class Child extends React.Component<any, { store: any; client: any }> {
+ static contextType = getApolloContext();
+
+ componentDidUpdate() {
+ if (this.props.data) this.props.data.refetch();
+ }
+
+ render() {
+ return null;
+ }
+ }
+
+ interface Props {
+ client: ApolloClient<any>;
+ }
+
+ class Container extends React.Component<Props, any> {
+ constructor(props: Props) {
+ super(props);
+ this.state = {};
+ }
+
+ componentDidMount() {
+ this.setState({
+ client: this.props.client
+ });
+ }
+
+ render() {
+ return (
+ <ApolloProvider client={this.state.client || this.props.client}>
+ <Child />
+ </ApolloProvider>
+ );
+ }
+ }
+
+ it('should render children components', () => {
+ const { getByText } = render(
+ <ApolloProvider client={client}>
+ <div className="unique">Test</div>
+ </ApolloProvider>
+ );
+
+ expect(getByText('Test')).toBeTruthy();
+ });
+
+ it('should support the 2.0', () => {
+ const { getByText } = render(
+ <ApolloProvider client={{} as ApolloClient<any>}>
+ <div className="unique">Test</div>
+ </ApolloProvider>
+ );
+
+ expect(getByText('Test')).toBeTruthy();
+ });
+
+ it('should require a client', () => {
+ const originalConsoleError = console.error;
+ console.error = () => {
+ /* noop */
+ };
+ expect(() => {
+ // Before testing `ApolloProvider`, we first fully reset the
+ // existing context using `ApolloContext.Provider` directly.
+ const ApolloContext = getApolloContext();
+ render(
+ <ApolloContext.Provider value={{}}>
+ <ApolloProvider client={undefined as any}>
+ <div className="unique" />
+ </ApolloProvider>
+ </ApolloContext.Provider>
+ );
+ }).toThrowError(
+ 'ApolloProvider was not passed a client instance. Make ' +
+ 'sure you pass in your client via the "client" prop.'
+ );
+ console.error = originalConsoleError;
+ });
+
+ it('should not require a store', () => {
+ const { getByText } = render(
+ <ApolloProvider client={client}>
+ <div className="unique">Test</div>
+ </ApolloProvider>
+ );
+ expect(getByText('Test')).toBeTruthy();
+ });
+
+ it('should add the client to the children context', () => {
+ const TestChild = () => {
+ const context = useContext(getApolloContext());
+ expect(context.client).toEqual(client);
+ return null;
+ };
+ render(
+ <ApolloProvider client={client}>
+ <TestChild />
+ <TestChild />
+ </ApolloProvider>
+ );
+ });
+
+ it('should update props when the client changes', () => {
+ let clientToCheck = client;
+
+ const TestChild = () => {
+ const context = useContext(getApolloContext());
+ expect(context.client).toEqual(clientToCheck);
+ return null;
+ };
+ const { rerender } = render(
+ <ApolloProvider client={clientToCheck}>
+ <TestChild />
+ </ApolloProvider>
+ );
+
+ const newClient = new ApolloClient({
+ cache: new Cache(),
+ link: new ApolloLink((o, f) => (f ? f(o) : null))
+ });
+ clientToCheck = newClient;
+ rerender(
+ <ApolloProvider client={clientToCheck}>
+ <TestChild />
+ </ApolloProvider>
+ );
+ });
+});
diff --git a/src/react/hooks/__tests__/useApolloClient.test.tsx b/src/react/hooks/__tests__/useApolloClient.test.tsx
new file mode 100644
--- /dev/null
+++ b/src/react/hooks/__tests__/useApolloClient.test.tsx
@@ -0,0 +1,46 @@
+import { render, cleanup } from '@testing-library/react';
+import { InvariantError } from 'ts-invariant';
+
+import { ApolloLink } from '../../../link/core/ApolloLink';
+import { ApolloProvider } from '../../context/ApolloProvider';
+import { ApolloClient } from '../../../ApolloClient';
+import { InMemoryCache } from '../../../cache/inmemory/inMemoryCache';
+import { useApolloClient } from '../useApolloClient';
+import { resetApolloContext } from '../../context/ApolloContext';
+import { requireReactLazily } from '../../react';
+
+const React = requireReactLazily();
+
+describe('useApolloClient Hook', () => {
+ afterEach(() => {
+ cleanup();
+ resetApolloContext();
+ });
+
+ it('should return a client instance from the context if available', () => {
+ const client = new ApolloClient({
+ cache: new InMemoryCache(),
+ link: ApolloLink.empty()
+ });
+
+ function App() {
+ expect(useApolloClient()).toEqual(client);
+ return null;
+ }
+
+ render(
+ <ApolloProvider client={client}>
+ <App />
+ </ApolloProvider>
+ );
+ });
+
+ it("should error if a client instance can't be found in the context", () => {
+ function App() {
+ expect(() => useApolloClient()).toThrow(InvariantError);
+ return null;
+ }
+
+ render(<App />);
+ });
+});
diff --git a/src/react/hooks/__tests__/useLazyQuery.test.tsx b/src/react/hooks/__tests__/useLazyQuery.test.tsx
new file mode 100644
--- /dev/null
+++ b/src/react/hooks/__tests__/useLazyQuery.test.tsx
@@ -0,0 +1,396 @@
+import { DocumentNode } from 'graphql';
+import gql from 'graphql-tag';
+import { render, wait } from '@testing-library/react';
+
+import { MockedProvider } from '../../../utilities/testing';
+import { ApolloClient } from '../../../ApolloClient';
+import { InMemoryCache } from '../../../cache/inmemory/inMemoryCache';
+import { ApolloProvider } from '../../context/ApolloProvider';
+import { useLazyQuery } from '../useLazyQuery';
+import { requireReactLazily } from '../../react';
+
+const React = requireReactLazily();
+
+describe('useLazyQuery Hook', () => {
+ const CAR_QUERY: DocumentNode = gql`
+ query {
+ cars {
+ make
+ model
+ vin
+ }
+ }
+ `;
+
+ const CAR_RESULT_DATA = {
+ cars: [
+ {
+ make: 'Audi',
+ model: 'RS8',
+ vin: 'DOLLADOLLABILL',
+ __typename: 'Car'
+ }
+ ]
+ };
+
+ const CAR_MOCKS = [
+ {
+ request: {
+ query: CAR_QUERY
+ },
+ result: { data: CAR_RESULT_DATA }
+ }
+ ];
+
+ it('should hold query execution until manually triggered', async () => {
+ let renderCount = 0;
+ const Component = () => {
+ const [execute, { loading, data }] = useLazyQuery(CAR_QUERY);
+ switch (renderCount) {
+ case 0:
+ expect(loading).toEqual(false);
+ setTimeout(() => {
+ execute();
+ });
+ break;
+ case 1:
+ expect(loading).toEqual(true);
+ break;
+ case 2:
+ expect(loading).toEqual(false);
+ expect(data).toEqual(CAR_RESULT_DATA);
+ break;
+ default: // Do nothing
+ }
+ renderCount += 1;
+ return null;
+ };
+
+ render(
+ <MockedProvider mocks={CAR_MOCKS}>
+ <Component />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(3);
+ });
+ });
+
+ it('should set `called` to false by default', () => {
+ const Component = () => {
+ const [, { loading, called }] = useLazyQuery(CAR_QUERY);
+ expect(loading).toBeFalsy();
+ expect(called).toBeFalsy();
+ return null;
+ };
+
+ render(
+ <MockedProvider mocks={CAR_MOCKS}>
+ <Component />
+ </MockedProvider>
+ );
+ });
+
+ it('should set `called` to true after calling the lazy execute function', async () => {
+ let renderCount = 0;
+ const Component = () => {
+ const [execute, { loading, called, data }] = useLazyQuery(CAR_QUERY);
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeFalsy();
+ expect(called).toBeFalsy();
+ setTimeout(() => {
+ execute();
+ });
+ break;
+ case 1:
+ expect(loading).toBeTruthy();
+ expect(called).toBeTruthy();
+ break;
+ case 2:
+ expect(loading).toEqual(false);
+ expect(called).toBeTruthy();
+ expect(data).toEqual(CAR_RESULT_DATA);
+ break;
+ default: // Do nothing
+ }
+ renderCount += 1;
+ return null;
+ };
+
+ render(
+ <MockedProvider mocks={CAR_MOCKS}>
+ <Component />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(3);
+ });
+ });
+
+ it('should override `skip` if lazy mode execution function is called', async () => {
+ let renderCount = 0;
+ const Component = () => {
+ const [execute, { loading, data }] = useLazyQuery(CAR_QUERY, {
+ skip: true
+ } as any);
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeFalsy();
+ setTimeout(() => {
+ execute();
+ });
+ break;
+ case 1:
+ expect(loading).toBeTruthy();
+ break;
+ case 2:
+ expect(loading).toEqual(false);
+ expect(data).toEqual(CAR_RESULT_DATA);
+ break;
+ default: // Do nothing
+ }
+ renderCount += 1;
+ return null;
+ };
+
+ render(
+ <MockedProvider mocks={CAR_MOCKS}>
+ <Component />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(3);
+ });
+ });
+
+ it(
+ 'should use variables defined in hook options (if any), when running ' +
+ 'the lazy execution function',
+ async () => {
+ const CAR_QUERY: DocumentNode = gql`
+ query AllCars($year: Int!) {
+ cars(year: $year) @client {
+ make
+ year
+ }
+ }
+ `;
+
+ const CAR_RESULT_DATA = [
+ {
+ make: 'Audi',
+ year: 2000,
+ __typename: 'Car'
+ },
+ {
+ make: 'Hyundai',
+ year: 2001,
+ __typename: 'Car'
+ }
+ ];
+
+ const client = new ApolloClient({
+ cache: new InMemoryCache(),
+ resolvers: {
+ Query: {
+ cars(_root, { year }) {
+ return CAR_RESULT_DATA.filter(car => car.year === year);
+ }
+ }
+ }
+ });
+
+ let renderCount = 0;
+ const Component = () => {
+ const [execute, { loading, data }] = useLazyQuery(CAR_QUERY, {
+ variables: { year: 2001 }
+ });
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeFalsy();
+ setTimeout(() => {
+ execute();
+ });
+ break;
+ case 1:
+ expect(loading).toBeTruthy();
+ break;
+ case 2:
+ expect(loading).toEqual(false);
+ expect(data.cars).toEqual([CAR_RESULT_DATA[1]]);
+ break;
+ default: // Do nothing
+ }
+ renderCount += 1;
+ return null;
+ };
+
+ render(
+ <ApolloProvider client={client}>
+ <Component />
+ </ApolloProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(3);
+ });
+ }
+ );
+
+ it(
+ 'should use variables passed into lazy execution function, ' +
+ 'overriding similar variables defined in Hook options',
+ async () => {
+ const CAR_QUERY: DocumentNode = gql`
+ query AllCars($year: Int!) {
+ cars(year: $year) @client {
+ make
+ year
+ }
+ }
+ `;
+
+ const CAR_RESULT_DATA = [
+ {
+ make: 'Audi',
+ year: 2000,
+ __typename: 'Car'
+ },
+ {
+ make: 'Hyundai',
+ year: 2001,
+ __typename: 'Car'
+ }
+ ];
+
+ const client = new ApolloClient({
+ cache: new InMemoryCache(),
+ resolvers: {
+ Query: {
+ cars(_root, { year }) {
+ return CAR_RESULT_DATA.filter(car => car.year === year);
+ }
+ }
+ }
+ });
+
+ let renderCount = 0;
+ const Component = () => {
+ const [execute, { loading, data }] = useLazyQuery(CAR_QUERY, {
+ variables: { year: 2001 }
+ });
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeFalsy();
+ setTimeout(() => {
+ execute({ variables: { year: 2000 } });
+ });
+ break;
+ case 1:
+ expect(loading).toBeTruthy();
+ break;
+ case 2:
+ expect(loading).toEqual(false);
+ expect(data.cars).toEqual([CAR_RESULT_DATA[0]]);
+ break;
+ default: // Do nothing
+ }
+ renderCount += 1;
+ return null;
+ };
+
+ render(
+ <ApolloProvider client={client}>
+ <Component />
+ </ApolloProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(3);
+ });
+ }
+ );
+
+ it(
+ 'should fetch data each time the execution function is called, when ' +
+ 'using a "network-only" fetch policy',
+ async () => {
+ const data1 = CAR_RESULT_DATA;
+
+ const data2 = {
+ cars: [
+ {
+ make: 'Audi',
+ model: 'SQ5',
+ vin: 'POWERANDTRUNKSPACE',
+ __typename: 'Car'
+ }
+ ]
+ };
+
+ const mocks = [
+ {
+ request: {
+ query: CAR_QUERY
+ },
+ result: { data: data1 }
+ },
+ {
+ request: {
+ query: CAR_QUERY
+ },
+ result: { data: data2 }
+ }
+ ];
+
+ let renderCount = 0;
+ const Component = () => {
+ const [execute, { loading, data }] = useLazyQuery(CAR_QUERY, {
+ fetchPolicy: 'network-only'
+ });
+ switch (renderCount) {
+ case 0:
+ expect(loading).toEqual(false);
+ setTimeout(() => {
+ execute();
+ });
+ break;
+ case 1:
+ expect(loading).toEqual(true);
+ break;
+ case 2:
+ expect(loading).toEqual(false);
+ expect(data).toEqual(data1);
+ setTimeout(() => {
+ execute();
+ });
+ break;
+ case 3:
+ expect(loading).toEqual(true);
+ break;
+ case 4:
+ expect(loading).toEqual(false);
+ expect(data).toEqual(data2);
+ break;
+ default: // Do nothing
+ }
+ renderCount += 1;
+ return null;
+ };
+
+ render(
+ <MockedProvider mocks={mocks}>
+ <Component />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(5);
+ });
+ }
+ );
+});
diff --git a/src/react/hooks/__tests__/useMutation.test.tsx b/src/react/hooks/__tests__/useMutation.test.tsx
new file mode 100644
--- /dev/null
+++ b/src/react/hooks/__tests__/useMutation.test.tsx
@@ -0,0 +1,351 @@
+import { DocumentNode } from 'graphql';
+import gql from 'graphql-tag';
+import { render, cleanup, wait } from '@testing-library/react';
+
+import { MockedProvider, mockSingleLink } from '../../../utilities/testing';
+import { itAsync } from '../../../utilities/testing/itAsync';
+import { ApolloClient } from '../../../ApolloClient';
+import { InMemoryCache } from '../../../cache/inmemory/inMemoryCache';
+import { ApolloProvider } from '../../context/ApolloProvider';
+import { useMutation } from '../useMutation';
+import { requireReactLazily } from '../../react';
+
+const React = requireReactLazily();
+const { useEffect } = React;
+
+describe('useMutation Hook', () => {
+ interface Todo {
+ id: number;
+ description: string;
+ priority: string;
+ }
+
+ const CREATE_TODO_MUTATION: DocumentNode = gql`
+ mutation createTodo($description: String!) {
+ createTodo(description: $description) {
+ id
+ description
+ priority
+ }
+ }
+ `;
+
+ const CREATE_TODO_RESULT = {
+ createTodo: {
+ id: 1,
+ description: 'Get milk!',
+ priority: 'High',
+ __typename: 'Todo'
+ }
+ };
+
+ afterEach(cleanup);
+
+ describe('General use', () => {
+ it('should handle a simple mutation properly', async () => {
+ const variables = {
+ description: 'Get milk!'
+ };
+
+ const mocks = [
+ {
+ request: {
+ query: CREATE_TODO_MUTATION,
+ variables
+ },
+ result: { data: CREATE_TODO_RESULT }
+ }
+ ];
+
+ let renderCount = 0;
+ const Component = () => {
+ const [createTodo, { loading, data }] = useMutation(
+ CREATE_TODO_MUTATION
+ );
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeFalsy();
+ expect(data).toBeUndefined();
+ createTodo({ variables });
+ break;
+ case 1:
+ expect(loading).toBeTruthy();
+ expect(data).toBeUndefined();
+ break;
+ case 2:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(CREATE_TODO_RESULT);
+ break;
+ default:
+ }
+ renderCount += 1;
+ return null;
+ };
+
+ render(
+ <MockedProvider mocks={mocks}>
+ <Component />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(3);
+ });
+ });
+
+ it('should be able to call mutations as an effect', async () => {
+ const variables = {
+ description: 'Get milk!'
+ };
+
+ const mocks = [
+ {
+ request: {
+ query: CREATE_TODO_MUTATION,
+ variables
+ },
+ result: { data: CREATE_TODO_RESULT }
+ }
+ ];
+
+ let renderCount = 0;
+ const useCreateTodo = () => {
+ const [createTodo, { loading, data }] = useMutation(
+ CREATE_TODO_MUTATION
+ );
+
+ useEffect(() => {
+ createTodo({ variables });
+ }, [variables]);
+
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeFalsy();
+ expect(data).toBeUndefined();
+ break;
+ case 1:
+ expect(loading).toBeTruthy();
+ expect(data).toBeUndefined();
+ break;
+ case 2:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(CREATE_TODO_RESULT);
+ break;
+ default:
+ }
+ renderCount += 1;
+ return null;
+ };
+
+ const Component = () => {
+ useCreateTodo();
+ return null;
+ };
+
+ render(
+ <MockedProvider mocks={mocks}>
+ <Component />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(3);
+ });
+ });
+
+ it('should ensure the mutation callback function has a stable identity', async () => {
+ const variables = {
+ description: 'Get milk!'
+ };
+
+ const mocks = [
+ {
+ request: {
+ query: CREATE_TODO_MUTATION,
+ variables
+ },
+ result: { data: CREATE_TODO_RESULT }
+ }
+ ];
+
+ let mutationFn: any;
+ let renderCount = 0;
+ const Component = () => {
+ const [createTodo, { loading, data }] = useMutation(
+ CREATE_TODO_MUTATION
+ );
+ switch (renderCount) {
+ case 0:
+ mutationFn = createTodo;
+ expect(loading).toBeFalsy();
+ expect(data).toBeUndefined();
+ setTimeout(() => {
+ createTodo({ variables });
+ });
+ break;
+ case 1:
+ expect(mutationFn).toBe(createTodo);
+ expect(loading).toBeTruthy();
+ expect(data).toBeUndefined();
+ break;
+ case 2:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(CREATE_TODO_RESULT);
+ break;
+ default:
+ }
+ renderCount += 1;
+ return null;
+ };
+
+ render(
+ <MockedProvider mocks={mocks}>
+ <Component />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(3);
+ });
+ });
+
+ it('should resolve mutate function promise with mutation results', async () => {
+ const variables = {
+ description: 'Get milk!'
+ };
+
+ const mocks = [
+ {
+ request: {
+ query: CREATE_TODO_MUTATION,
+ variables
+ },
+ result: { data: CREATE_TODO_RESULT }
+ }
+ ];
+
+ const Component = () => {
+ const [createTodo] = useMutation<{ createTodo: Todo }>(
+ CREATE_TODO_MUTATION
+ );
+
+ async function doIt() {
+ const { data } = await createTodo({ variables });
+ expect(data).toEqual(CREATE_TODO_RESULT);
+ expect(data!.createTodo.description).toEqual(
+ CREATE_TODO_RESULT.createTodo.description
+ );
+ }
+
+ useEffect(() => {
+ doIt();
+ }, []);
+
+ return null;
+ };
+
+ render(
+ <MockedProvider mocks={mocks}>
+ <Component />
+ </MockedProvider>
+ );
+
+ return wait();
+ });
+
+ it('should return the current client instance in the result object', async () => {
+ const Component = () => {
+ const [, { client }] = useMutation(CREATE_TODO_MUTATION);
+ expect(client).toBeDefined();
+ expect(client instanceof ApolloClient).toBeTruthy();
+ return null;
+ };
+
+ render(
+ <MockedProvider>
+ <Component />
+ </MockedProvider>
+ );
+
+ return wait();
+ });
+ });
+
+ describe('Optimistic response', () => {
+ itAsync('should support optimistic response handling', async (resolve, reject) => {
+ const optimisticResponse = {
+ __typename: 'Mutation',
+ createTodo: {
+ id: 1,
+ description: 'TEMPORARY',
+ priority: 'High',
+ __typename: 'Todo'
+ }
+ };
+
+ const variables = {
+ description: 'Get milk!'
+ };
+
+ const mocks = [
+ {
+ request: {
+ query: CREATE_TODO_MUTATION,
+ variables
+ },
+ result: { data: CREATE_TODO_RESULT }
+ }
+ ];
+
+ const link = mockSingleLink(...mocks).setOnError(reject);
+ const cache = new InMemoryCache();
+ const client = new ApolloClient({
+ cache,
+ link
+ });
+
+ let renderCount = 0;
+ const Component = () => {
+ const [createTodo, { loading, data }] = useMutation(
+ CREATE_TODO_MUTATION,
+ { optimisticResponse }
+ );
+
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeFalsy();
+ expect(data).toBeUndefined();
+ createTodo({ variables });
+
+ const dataInStore = client.cache.extract(true);
+ expect(dataInStore['Todo:1']).toEqual(
+ optimisticResponse.createTodo
+ );
+
+ break;
+ case 1:
+ expect(loading).toBeTruthy();
+ expect(data).toBeUndefined();
+ break;
+ case 2:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(CREATE_TODO_RESULT);
+ break;
+ default:
+ }
+ renderCount += 1;
+ return null;
+ };
+
+ render(
+ <ApolloProvider client={client}>
+ <Component />
+ </ApolloProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(3);
+ }).then(resolve, reject);
+ });
+ });
+});
diff --git a/src/react/hooks/__tests__/useQuery.test.tsx b/src/react/hooks/__tests__/useQuery.test.tsx
new file mode 100644
--- /dev/null
+++ b/src/react/hooks/__tests__/useQuery.test.tsx
@@ -0,0 +1,1267 @@
+import { DocumentNode, GraphQLError } from 'graphql';
+import gql from 'graphql-tag';
+import { render, cleanup, wait } from '@testing-library/react';
+
+import { Observable } from '../../../utilities/observables/Observable';
+import { ApolloLink } from '../../../link/core/ApolloLink';
+import { MockedProvider, mockSingleLink } from '../../../utilities/testing';
+import { MockLink } from '../../../utilities/testing/mocking/mockLink';
+import { itAsync } from '../../../utilities/testing/itAsync';
+import { ApolloClient } from '../../../ApolloClient';
+import { InMemoryCache } from '../../../cache/inmemory/inMemoryCache';
+import { ApolloProvider } from '../../context/ApolloProvider';
+import { useQuery } from '../useQuery';
+import { requireReactLazily } from '../../react';
+
+const React = requireReactLazily();
+const { useState, useReducer } = React;
+
+describe('useQuery Hook', () => {
+ const CAR_QUERY: DocumentNode = gql`
+ query {
+ cars {
+ make
+ model
+ vin
+ }
+ }
+ `;
+
+ const CAR_RESULT_DATA = {
+ cars: [
+ {
+ make: 'Audi',
+ model: 'RS8',
+ vin: 'DOLLADOLLABILL',
+ __typename: 'Car'
+ }
+ ]
+ };
+
+ const CAR_MOCKS = [
+ {
+ request: {
+ query: CAR_QUERY
+ },
+ result: { data: CAR_RESULT_DATA }
+ }
+ ];
+
+ afterEach(cleanup);
+
+ describe('General use', () => {
+ it('should handle a simple query properly', async () => {
+ const Component = () => {
+ const { data, loading } = useQuery(CAR_QUERY);
+ if (!loading) {
+ expect(data).toEqual(CAR_RESULT_DATA);
+ }
+ return null;
+ };
+
+ render(
+ <MockedProvider mocks={CAR_MOCKS}>
+ <Component />
+ </MockedProvider>
+ );
+
+ return wait();
+ });
+
+ it('should keep data as undefined until data is actually returned', async () => {
+ const Component = () => {
+ const { data, loading } = useQuery(CAR_QUERY);
+ if (loading) {
+ expect(data).toBeUndefined();
+ } else {
+ expect(data).toEqual(CAR_RESULT_DATA);
+ }
+ return null;
+ };
+
+ render(
+ <MockedProvider mocks={CAR_MOCKS}>
+ <Component />
+ </MockedProvider>
+ );
+
+ return wait();
+ });
+
+ it('should ensure ObservableQuery fields have a stable identity', async () => {
+ let refetchFn: any;
+ let fetchMoreFn: any;
+ let updateQueryFn: any;
+ let startPollingFn: any;
+ let stopPollingFn: any;
+ let subscribeToMoreFn: any;
+ const Component = () => {
+ const {
+ loading,
+ refetch,
+ fetchMore,
+ updateQuery,
+ startPolling,
+ stopPolling,
+ subscribeToMore
+ } = useQuery(CAR_QUERY);
+ if (loading) {
+ refetchFn = refetch;
+ fetchMoreFn = fetchMore;
+ updateQueryFn = updateQuery;
+ startPollingFn = startPolling;
+ stopPollingFn = stopPolling;
+ subscribeToMoreFn = subscribeToMore;
+ } else {
+ expect(refetch).toBe(refetchFn);
+ expect(fetchMore).toBe(fetchMoreFn);
+ expect(updateQuery).toBe(updateQueryFn);
+ expect(startPolling).toBe(startPollingFn);
+ expect(stopPolling).toBe(stopPollingFn);
+ expect(subscribeToMore).toBe(subscribeToMoreFn);
+ }
+ return null;
+ };
+
+ render(
+ <MockedProvider mocks={CAR_MOCKS}>
+ <Component />
+ </MockedProvider>
+ );
+
+ return wait();
+ });
+ });
+
+ describe('Polling', () => {
+ it('should support polling', async () => {
+ let renderCount = 0;
+ const Component = () => {
+ let { data, loading, stopPolling } = useQuery(CAR_QUERY, {
+ pollInterval: 10
+ });
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeTruthy();
+ break;
+ case 1:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(CAR_RESULT_DATA);
+ break;
+ case 2:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(CAR_RESULT_DATA);
+ stopPolling();
+ break;
+ case 3:
+ throw new Error('Uh oh - we should have stopped polling!');
+ default:
+ // Do nothing
+ }
+ renderCount += 1;
+ return null;
+ };
+
+ render(
+ <MockedProvider mocks={CAR_MOCKS}>
+ <Component />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(3);
+ });
+ });
+
+ it('should stop polling when skip is true', async () => {
+ let renderCount = 0;
+ const Component = () => {
+ const [shouldSkip, setShouldSkip] = useState(false);
+ let { data, loading } = useQuery(CAR_QUERY, {
+ pollInterval: 10,
+ skip: shouldSkip
+ });
+
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeTruthy();
+ break;
+ case 1:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(CAR_RESULT_DATA);
+ break;
+ case 2:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(CAR_RESULT_DATA);
+ setShouldSkip(true);
+ break;
+ case 3:
+ expect(loading).toBeFalsy();
+ expect(data).toBeUndefined();
+ break;
+ case 4:
+ throw new Error('Uh oh - we should have stopped polling!');
+ default:
+ // Do nothing
+ }
+ renderCount += 1;
+ return null;
+ };
+
+ render(
+ <MockedProvider mocks={CAR_MOCKS}>
+ <Component />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(4);
+ });
+ });
+
+ itAsync('should stop polling when the component is unmounted', async (resolve, reject) => {
+ const mockLink = new MockLink(CAR_MOCKS);
+ const linkRequestSpy = jest.spyOn(mockLink, 'request');
+ let renderCount = 0;
+ const QueryComponent = ({ unmount }: { unmount: () => void }) => {
+ const { data, loading } = useQuery(CAR_QUERY, { pollInterval: 10 });
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeTruthy();
+ break;
+ case 1:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(CAR_RESULT_DATA);
+ expect(linkRequestSpy).toHaveBeenCalledTimes(1);
+ break;
+ case 2:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(CAR_RESULT_DATA);
+ expect(linkRequestSpy).toHaveBeenCalledTimes(2);
+ unmount();
+ break;
+ default:
+ }
+ renderCount += 1;
+ return null;
+ };
+
+ const Component = () => {
+ const [queryMounted, setQueryMounted] = useState(true);
+ const unmount = () => setTimeout(() => setQueryMounted(false), 0);
+ return <>{queryMounted && <QueryComponent unmount={unmount} />}</>;
+ };
+
+ render(
+ <MockedProvider mocks={CAR_MOCKS} link={mockLink}>
+ <Component />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(linkRequestSpy).toHaveBeenCalledTimes(2);
+ }).then(resolve, reject);
+ });
+
+ it(
+ 'should not throw an error if `stopPolling` is called manually after ' +
+ 'a component has unmounted (even though polling has already been ' +
+ 'stopped automatically)',
+ async () => {
+ let unmount: any;
+ let renderCount = 0;
+ const Component = () => {
+ const { data, loading, stopPolling } = useQuery(CAR_QUERY, {
+ pollInterval: 10
+ });
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeTruthy();
+ break;
+ case 1:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(CAR_RESULT_DATA);
+ setTimeout(() => {
+ unmount();
+ stopPolling();
+ });
+ break;
+ default:
+ }
+ renderCount += 1;
+ return null;
+ };
+
+ unmount = render(
+ <MockedProvider mocks={CAR_MOCKS}>
+ <Component />
+ </MockedProvider>
+ ).unmount;
+
+ return wait(() => {
+ expect(renderCount).toBe(2);
+ });
+ }
+ );
+
+ it('should set called to true by default', () => {
+ const Component = () => {
+ const { loading, called } = useQuery(CAR_QUERY);
+ expect(loading).toBeTruthy();
+ expect(called).toBeTruthy();
+ return null;
+ };
+
+ render(
+ <MockedProvider mocks={CAR_MOCKS}>
+ <Component />
+ </MockedProvider>
+ );
+ });
+ });
+
+ describe('Error handling', () => {
+ it("should render GraphQLError's", async () => {
+ const query = gql`
+ query TestQuery {
+ rates(currency: "USD") {
+ rate
+ }
+ }
+ `;
+
+ const mocks = [
+ {
+ request: { query },
+ result: {
+ errors: [new GraphQLError('forced error')]
+ }
+ }
+ ];
+
+ const Component = () => {
+ const { loading, error } = useQuery(query);
+ if (!loading) {
+ expect(error).toBeDefined();
+ expect(error!.message).toEqual('GraphQL error: forced error');
+ }
+ return null;
+ };
+
+ render(
+ <MockedProvider mocks={mocks}>
+ <Component />
+ </MockedProvider>
+ );
+
+ return wait();
+ });
+
+ it('should only call onError callbacks once', async () => {
+ const query = gql`
+ query SomeQuery {
+ stuff {
+ thing
+ }
+ }
+ `;
+
+ const resultData = { stuff: { thing: 'it!', __typename: 'Stuff' } };
+
+ let callCount = 0;
+ const link = new ApolloLink(() => {
+ if (!callCount) {
+ callCount += 1;
+ return new Observable(observer => {
+ observer.error(new Error('Oh no!'));
+ });
+ } else {
+ return Observable.of({ data: resultData });
+ }
+ });
+
+ const client = new ApolloClient({
+ link,
+ cache: new InMemoryCache()
+ });
+
+ let onError;
+ const onErrorPromise = new Promise(resolve => onError = resolve);
+
+ let renderCount = 0;
+ const Component = () => {
+ const { loading, error, refetch, data, networkStatus } = useQuery(
+ query,
+ {
+ onError,
+ notifyOnNetworkStatusChange: true
+ }
+ );
+
+ switch (++renderCount) {
+ case 1:
+ expect(loading).toBeTruthy();
+ break;
+ case 2:
+ expect(loading).toBeFalsy();
+ expect(error).toBeDefined();
+ expect(error!.message).toEqual('Network error: Oh no!');
+ onErrorPromise.then(() => refetch());
+ break;
+ case 3:
+ expect(loading).toBeTruthy();
+ break;
+ case 4:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(resultData);
+ break;
+ default: // Do nothing
+ }
+
+ return null;
+ };
+
+ render(
+ <ApolloProvider client={client}>
+ <Component />
+ </ApolloProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(4);
+ });
+ });
+
+ it('should persist errors on re-render if they are still valid', async () => {
+ const query = gql`
+ query SomeQuery {
+ stuff {
+ thing
+ }
+ }
+ `;
+
+ const mocks = [
+ {
+ request: { query },
+ result: {
+ errors: [new GraphQLError('forced error')]
+ }
+ }
+ ];
+
+ let renderCount = 0;
+ function App() {
+ const [_, forceUpdate] = useReducer(x => x + 1, 0);
+ const { loading, error } = useQuery(query);
+
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeTruthy();
+ expect(error).toBeUndefined();
+ break;
+ case 1:
+ expect(error).toBeDefined();
+ expect(error!.message).toEqual('GraphQL error: forced error');
+ setTimeout(() => {
+ forceUpdate(0);
+ });
+ break;
+ case 2:
+ expect(error).toBeDefined();
+ expect(error!.message).toEqual('GraphQL error: forced error');
+ break;
+ default: // Do nothing
+ }
+
+ renderCount += 1;
+ return null;
+ }
+
+ render(
+ <MockedProvider mocks={mocks}>
+ <App />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(3);
+ });
+ });
+
+ it(
+ 'should persist errors on re-render when inlining onError and/or ' +
+ 'onCompleted callbacks',
+ async () => {
+ const query = gql`
+ query SomeQuery {
+ stuff {
+ thing
+ }
+ }
+ `;
+
+ const mocks = [
+ {
+ request: { query },
+ result: {
+ errors: [new GraphQLError('forced error')]
+ }
+ }
+ ];
+
+ let renderCount = 0;
+ function App() {
+ const [_, forceUpdate] = useReducer(x => x + 1, 0);
+ const { loading, error } = useQuery(query, {
+ onError: () => {},
+ onCompleted: () => {}
+ });
+
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeTruthy();
+ expect(error).toBeUndefined();
+ break;
+ case 1:
+ expect(error).toBeDefined();
+ expect(error!.message).toEqual('GraphQL error: forced error');
+ setTimeout(() => {
+ forceUpdate(0);
+ });
+ break;
+ case 2:
+ expect(error).toBeDefined();
+ expect(error!.message).toEqual('GraphQL error: forced error');
+ break;
+ default: // Do nothing
+ }
+
+ renderCount += 1;
+ return null;
+ }
+
+ render(
+ <MockedProvider mocks={mocks}>
+ <App />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(3);
+ });
+ }
+ );
+
+ it('should render errors (different error messages) with loading done on refetch', async () => {
+ const query = gql`
+ query SomeQuery {
+ stuff {
+ thing
+ }
+ }
+ `;
+
+ const mocks = [
+ {
+ request: { query },
+ result: {
+ errors: [new GraphQLError('an error 1')]
+ }
+ },
+ {
+ request: { query },
+ result: {
+ errors: [new GraphQLError('an error 2')]
+ }
+ }
+ ];
+
+ let renderCount = 0;
+ function App() {
+ const { loading, error, refetch } = useQuery(query, {
+ notifyOnNetworkStatusChange: true
+ });
+
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeTruthy();
+ expect(error).toBeUndefined();
+ break;
+ case 1:
+ expect(loading).toBeFalsy();
+ expect(error).toBeDefined();
+ expect(error!.message).toEqual('GraphQL error: an error 1');
+ setTimeout(() => {
+ // catch here to avoid failing due to 'uncaught promise rejection'
+ refetch().catch(() => {});
+ });
+ break;
+ case 2:
+ expect(loading).toBeTruthy();
+ break;
+ case 3:
+ expect(loading).toBeFalsy();
+ expect(error).toBeDefined();
+ expect(error!.message).toEqual('GraphQL error: an error 2');
+ break;
+ default: // Do nothing
+ }
+
+ renderCount += 1;
+ return null;
+ }
+
+ render(
+ <MockedProvider mocks={mocks}>
+ <App />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(4);
+ });
+ });
+
+ it('should render errors (same error messages) with loading done on refetch', async () => {
+ const query = gql`
+ query SomeQuery {
+ stuff {
+ thing
+ }
+ }
+ `;
+
+ const mocks = [
+ {
+ request: { query },
+ result: {
+ errors: [new GraphQLError('same error message')]
+ }
+ },
+ {
+ request: { query },
+ result: {
+ errors: [new GraphQLError('same error message')]
+ }
+ }
+ ];
+
+ let renderCount = 0;
+ function App() {
+ const { loading, error, refetch } = useQuery(query, {
+ notifyOnNetworkStatusChange: true
+ });
+
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeTruthy();
+ expect(error).toBeUndefined();
+ break;
+ case 1:
+ expect(loading).toBeFalsy();
+ expect(error).toBeDefined();
+ expect(error!.message).toEqual('GraphQL error: same error message');
+ setTimeout(() => {
+ // catch here to avoid failing due to 'uncaught promise rejection'
+ refetch().catch(() => {});
+ });
+ break;
+ case 2:
+ expect(loading).toBeTruthy();
+ break;
+ case 3:
+ expect(loading).toBeFalsy();
+ expect(error).toBeDefined();
+ expect(error!.message).toEqual('GraphQL error: same error message');
+ break;
+ default: // Do nothing
+ }
+
+ renderCount += 1;
+ return null;
+ }
+
+ render(
+ <MockedProvider mocks={mocks}>
+ <App />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(4);
+ });
+ });
+
+ it('should render both success and errors (same error messages) with loading done on refetch', async () => {
+ const mocks = [
+ {
+ request: { query: CAR_QUERY },
+ result: {
+ errors: [new GraphQLError('same error message')]
+ }
+ },
+ {
+ request: { query: CAR_QUERY },
+ result: {
+ data: CAR_RESULT_DATA
+ }
+ },
+ {
+ request: { query: CAR_QUERY },
+ result: {
+ errors: [new GraphQLError('same error message')]
+ }
+ }
+ ];
+
+ let renderCount = 0;
+ function App() {
+ const { loading, data, error, refetch } = useQuery(CAR_QUERY, {
+ notifyOnNetworkStatusChange: true
+ });
+
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeTruthy();
+ expect(error).toBeUndefined();
+ break;
+ case 1:
+ expect(loading).toBeFalsy();
+ expect(error).toBeDefined();
+ expect(error!.message).toEqual('GraphQL error: same error message');
+ setTimeout(() => {
+ // catch here to avoid failing due to 'uncaught promise rejection'
+ refetch().catch(() => {});
+ });
+ break;
+ case 2:
+ expect(loading).toBeTruthy();
+ break;
+ case 3:
+ expect(loading).toBeFalsy();
+ expect(error).toBeUndefined();
+ expect(data).toEqual(CAR_RESULT_DATA);
+ setTimeout(() => {
+ // catch here to avoid failing due to 'uncaught promise rejection'
+ refetch().catch(() => {});
+ });
+ break;
+ case 4:
+ expect(loading).toBeTruthy();
+ break;
+ case 5:
+ expect(loading).toBeFalsy();
+ expect(error).toBeDefined();
+ expect(error!.message).toEqual('GraphQL error: same error message');
+ break;
+ default: // Do nothing
+ }
+
+ renderCount += 1;
+ return null;
+ }
+
+ render(
+ <MockedProvider mocks={mocks}>
+ <App />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(6);
+ });
+ });
+ });
+
+ describe('Pagination', () => {
+ it(
+ 'should render `fetchMore.updateQuery` updated results with proper ' +
+ 'loading status, when `notifyOnNetworkStatusChange` is true',
+ async () => {
+ const carQuery: DocumentNode = gql`
+ query cars($limit: Int) {
+ cars(limit: $limit) {
+ id
+ make
+ model
+ vin
+ __typename
+ }
+ }
+ `;
+
+ const carResults = {
+ cars: [
+ {
+ id: 1,
+ make: 'Audi',
+ model: 'RS8',
+ vin: 'DOLLADOLLABILL',
+ __typename: 'Car'
+ }
+ ]
+ };
+
+ const moreCarResults = {
+ cars: [
+ {
+ id: 2,
+ make: 'Audi',
+ model: 'eTron',
+ vin: 'TREESRGOOD',
+ __typename: 'Car'
+ }
+ ]
+ };
+
+ const mocks = [
+ {
+ request: { query: carQuery, variables: { limit: 1 } },
+ result: { data: carResults }
+ },
+ {
+ request: { query: carQuery, variables: { limit: 1 } },
+ result: { data: moreCarResults }
+ }
+ ];
+
+ let renderCount = 0;
+ function App() {
+ const { loading, data, fetchMore } = useQuery(carQuery, {
+ variables: { limit: 1 },
+ notifyOnNetworkStatusChange: true
+ });
+
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeTruthy();
+ break;
+ case 1:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(carResults);
+ fetchMore({
+ variables: {
+ limit: 1
+ },
+ updateQuery: (prev, { fetchMoreResult }) => ({
+ cars: [...prev.cars, ...fetchMoreResult.cars]
+ })
+ });
+ break;
+ case 2:
+ expect(loading).toBeTruthy();
+ break;
+ case 3:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual({
+ cars: [carResults.cars[0]]
+ });
+ break;
+ case 4:
+ expect(data).toEqual({
+ cars: [carResults.cars[0], moreCarResults.cars[0]]
+ });
+ break;
+ default:
+ }
+
+ renderCount += 1;
+ return null;
+ }
+
+ render(
+ <MockedProvider mocks={mocks}>
+ <App />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(5);
+ });
+ }
+ );
+
+ it(
+ 'should render `fetchMore.updateQuery` updated results with no ' +
+ 'loading status, when `notifyOnNetworkStatusChange` is false',
+ async () => {
+ const carQuery: DocumentNode = gql`
+ query cars($limit: Int) {
+ cars(limit: $limit) {
+ id
+ make
+ model
+ vin
+ __typename
+ }
+ }
+ `;
+
+ const carResults = {
+ cars: [
+ {
+ id: 1,
+ make: 'Audi',
+ model: 'RS8',
+ vin: 'DOLLADOLLABILL',
+ __typename: 'Car'
+ }
+ ]
+ };
+
+ const moreCarResults = {
+ cars: [
+ {
+ id: 2,
+ make: 'Audi',
+ model: 'eTron',
+ vin: 'TREESRGOOD',
+ __typename: 'Car'
+ }
+ ]
+ };
+
+ const mocks = [
+ {
+ request: { query: carQuery, variables: { limit: 1 } },
+ result: { data: carResults }
+ },
+ {
+ request: { query: carQuery, variables: { limit: 1 } },
+ result: { data: moreCarResults }
+ }
+ ];
+
+ let renderCount = 0;
+ function App() {
+ const { loading, data, fetchMore } = useQuery(carQuery, {
+ variables: { limit: 1 },
+ notifyOnNetworkStatusChange: false
+ });
+
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeTruthy();
+ break;
+ case 1:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(carResults);
+ fetchMore({
+ variables: {
+ limit: 1
+ },
+ updateQuery: (prev, { fetchMoreResult }) => ({
+ cars: [...prev.cars, ...fetchMoreResult.cars]
+ })
+ });
+ break;
+ case 2:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual({
+ cars: [carResults.cars[0], moreCarResults.cars[0]]
+ });
+ break;
+ default:
+ }
+
+ renderCount += 1;
+ return null;
+ }
+
+ render(
+ <MockedProvider mocks={mocks}>
+ <App />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(3);
+ });
+ }
+ );
+ });
+
+ describe('Refetching', () => {
+ it('should properly handle refetching with different variables', async () => {
+ const carQuery: DocumentNode = gql`
+ query cars($id: Int) {
+ cars(id: $id) {
+ id
+ make
+ model
+ vin
+ __typename
+ }
+ }
+ `;
+
+ const carData1 = {
+ cars: [
+ {
+ id: 1,
+ make: 'Audi',
+ model: 'RS8',
+ vin: 'DOLLADOLLABILL',
+ __typename: 'Car'
+ }
+ ]
+ };
+
+ const carData2 = {
+ cars: [
+ {
+ id: 2,
+ make: 'Audi',
+ model: 'eTron',
+ vin: 'TREESRGOOD',
+ __typename: 'Car'
+ }
+ ]
+ };
+
+ const mocks = [
+ {
+ request: { query: carQuery, variables: { id: 1 } },
+ result: { data: carData1 }
+ },
+ {
+ request: { query: carQuery, variables: { id: 2 } },
+ result: { data: carData2 }
+ },
+ {
+ request: { query: carQuery, variables: { id: 1 } },
+ result: { data: carData1 }
+ }
+ ];
+
+ let renderCount = 0;
+ function App() {
+ const { loading, data, refetch } = useQuery(carQuery, {
+ variables: { id: 1 }
+ });
+
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBeTruthy();
+ break;
+ case 1:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(carData1);
+ refetch({ id: 2 });
+ break;
+ case 2:
+ expect(loading).toBeTruthy();
+ break;
+ case 3:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(carData2);
+ refetch({ id: 1 });
+ break;
+ case 4:
+ expect(loading).toBeTruthy();
+ break;
+ case 5:
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(carData1);
+ break;
+ default:
+ }
+
+ renderCount += 1;
+ return null;
+ }
+
+ render(
+ <MockedProvider mocks={mocks}>
+ <App />
+ </MockedProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(6);
+ });
+ });
+ });
+
+ describe('Partial refetching', () => {
+ it(
+ 'should attempt a refetch when the query result was marked as being ' +
+ 'partial, the returned data was reset to an empty Object by the ' +
+ 'Apollo Client QueryManager (due to a cache miss), and the ' +
+ '`partialRefetch` prop is `true`',
+ async () => {
+ const query: DocumentNode = gql`
+ query AllPeople($name: String!) {
+ allPeople(name: $name) {
+ people {
+ name
+ }
+ }
+ }
+ `;
+
+ interface Data {
+ allPeople: {
+ people: Array<{ name: string }>;
+ };
+ }
+
+ const peopleData: Data = {
+ allPeople: { people: [{ name: 'Luke Skywalker' }] }
+ };
+
+ const link = mockSingleLink(
+ {
+ request: {
+ query,
+ variables: {
+ someVar: 'abc123'
+ }
+ },
+ result: {
+ data: undefined
+ }
+ },
+ {
+ request: {
+ query,
+ variables: {
+ someVar: 'abc123'
+ }
+ },
+ result: {
+ data: peopleData
+ }
+ }
+ );
+
+ const client = new ApolloClient({
+ link,
+ cache: new InMemoryCache()
+ });
+
+ let renderCount = 0;
+ const Component = () => {
+ const { loading, data } = useQuery(query, {
+ variables: { someVar: 'abc123' },
+ partialRefetch: true
+ });
+
+ switch (renderCount) {
+ case 0:
+ // Initial loading render
+ expect(loading).toBeTruthy();
+ break;
+ case 1:
+ // `data` is missing and `partialRetch` is true, so a refetch
+ // is triggered and loading is set as true again
+ expect(loading).toBeTruthy();
+ expect(data).toBeUndefined();
+ break;
+ case 2:
+ // Refetch has completed
+ expect(loading).toBeFalsy();
+ expect(data).toEqual(peopleData);
+ break;
+ default:
+ }
+
+ renderCount += 1;
+ return null;
+ };
+
+ render(
+ <ApolloProvider client={client}>
+ <Component />
+ </ApolloProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(3);
+ });
+ }
+ );
+ });
+
+ describe('Callbacks', () => {
+ it(
+ 'should pass loaded data to onCompleted when using the cache-only ' +
+ 'fetch policy',
+ async () => {
+ const cache = new InMemoryCache();
+ const client = new ApolloClient({
+ cache,
+ resolvers: {}
+ });
+
+ cache.writeQuery({
+ query: CAR_QUERY,
+ data: CAR_RESULT_DATA
+ });
+
+ let onCompletedCalled = false;
+ const Component = () => {
+ const { loading, data } = useQuery(CAR_QUERY, {
+ fetchPolicy: 'cache-only',
+ onCompleted(data) {
+ onCompletedCalled = true;
+ expect(data).toBeDefined();
+ }
+ });
+ if (!loading) {
+ expect(data).toEqual(CAR_RESULT_DATA);
+ }
+ return null;
+ };
+
+ render(
+ <ApolloProvider client={client}>
+ <Component />
+ </ApolloProvider>
+ );
+
+ return wait(() => {
+ expect(onCompletedCalled).toBeTruthy();
+ });
+ }
+ );
+
+ it('should only call onCompleted once per query run', async () => {
+ const cache = new InMemoryCache();
+ const client = new ApolloClient({
+ cache,
+ resolvers: {}
+ });
+
+ cache.writeQuery({
+ query: CAR_QUERY,
+ data: CAR_RESULT_DATA
+ });
+
+ let onCompletedCount = 0;
+ const Component = () => {
+ const { loading, data } = useQuery(CAR_QUERY, {
+ fetchPolicy: 'cache-only',
+ onCompleted() {
+ onCompletedCount += 1;
+ }
+ });
+ if (!loading) {
+ expect(data).toEqual(CAR_RESULT_DATA);
+ }
+ return null;
+ };
+
+ render(
+ <ApolloProvider client={client}>
+ <Component />
+ </ApolloProvider>
+ );
+
+ return wait(() => {
+ expect(onCompletedCount).toBe(1);
+ });
+ });
+ });
+});
diff --git a/src/react/hooks/__tests__/useSubscription.test.tsx b/src/react/hooks/__tests__/useSubscription.test.tsx
new file mode 100644
--- /dev/null
+++ b/src/react/hooks/__tests__/useSubscription.test.tsx
@@ -0,0 +1,298 @@
+import { render, cleanup, wait } from '@testing-library/react';
+import gql from 'graphql-tag';
+
+import { MockSubscriptionLink } from '../../../utilities/testing/mocking/mockSubscriptionLink';
+import { ApolloClient } from '../../../ApolloClient';
+import { InMemoryCache as Cache } from '../../../cache/inmemory/inMemoryCache';
+import { ApolloProvider } from '../../context/ApolloProvider';
+import { useSubscription } from '../useSubscription';
+import { requireReactLazily } from '../../react';
+
+const React = requireReactLazily();
+
+describe('useSubscription Hook', () => {
+ afterEach(cleanup);
+
+ it('should handle a simple subscription properly', async () => {
+ const subscription = gql`
+ subscription {
+ car {
+ make
+ }
+ }
+ `;
+
+ const results = ['Audi', 'BMW', 'Mercedes', 'Hyundai'].map(make => ({
+ result: { data: { car: { make } } }
+ }));
+
+ const link = new MockSubscriptionLink();
+ const client = new ApolloClient({
+ link,
+ cache: new Cache({ addTypename: false })
+ });
+
+ let renderCount = 0;
+ const Component = () => {
+ const { loading, data, error } = useSubscription(subscription);
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBe(true);
+ expect(error).toBeUndefined();
+ expect(data).toBeUndefined();
+ break;
+ case 1:
+ expect(loading).toBe(false);
+ expect(data).toEqual(results[0].result.data);
+ break;
+ case 2:
+ expect(loading).toBe(false);
+ expect(data).toEqual(results[1].result.data);
+ break;
+ case 3:
+ expect(loading).toBe(false);
+ expect(data).toEqual(results[2].result.data);
+ break;
+ case 4:
+ expect(loading).toBe(false);
+ expect(data).toEqual(results[3].result.data);
+ break;
+ default:
+ }
+ setTimeout(() => {
+ renderCount <= results.length &&
+ link.simulateResult(results[renderCount - 1]);
+ });
+ renderCount += 1;
+ return null;
+ };
+
+ render(
+ <ApolloProvider client={client}>
+ <Component />
+ </ApolloProvider>
+ );
+
+ return wait(() => {
+ expect(renderCount).toBe(5);
+ });
+ });
+
+ it('should cleanup after the subscription component has been unmounted', async () => {
+ const subscription = gql`
+ subscription {
+ car {
+ make
+ }
+ }
+ `;
+
+ const results = [
+ {
+ result: { data: { car: { make: 'Pagani' } } }
+ }
+ ];
+
+ const link = new MockSubscriptionLink();
+ const client = new ApolloClient({
+ link,
+ cache: new Cache({ addTypename: false })
+ });
+
+ let renderCount = 0;
+ let onSubscriptionDataCount = 0;
+ let unmount: any;
+
+ const Component = () => {
+ const { loading, data, error } = useSubscription(subscription, {
+ onSubscriptionData() {
+ onSubscriptionDataCount += 1;
+ }
+ });
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBe(true);
+ expect(error).toBeUndefined();
+ expect(data).toBeUndefined();
+ link.simulateResult(results[0]);
+ break;
+ case 1:
+ expect(loading).toBe(false);
+ expect(data).toEqual(results[0].result.data);
+
+ setTimeout(() => {
+ expect(onSubscriptionDataCount).toEqual(1);
+
+ // After the component has been unmounted, the internal
+ // ObservableQuery should be stopped, meaning it shouldn't
+ // receive any new data (so the onSubscriptionDataCount should
+ // stay at 1).
+ unmount();
+ link.simulateResult(results[0]);
+ });
+ break;
+ default:
+ }
+ renderCount += 1;
+ return null;
+ };
+
+ unmount = render(
+ <ApolloProvider client={client}>
+ <Component />
+ </ApolloProvider>
+ ).unmount;
+
+ return wait(() => {
+ expect(onSubscriptionDataCount).toEqual(1);
+ });
+ });
+
+ it('should never execute a subscription with the skip option', async () => {
+ const subscription = gql`
+ subscription {
+ car {
+ make
+ }
+ }
+ `;
+
+ const link = new MockSubscriptionLink();
+ const client = new ApolloClient({
+ link,
+ cache: new Cache({ addTypename: false })
+ });
+
+ let renderCount = 0;
+ let onSubscriptionDataCount = 0;
+ let unmount: any;
+
+ const Component = () => {
+ const { loading, data, error } = useSubscription(subscription, {
+ skip: true,
+ onSubscriptionData() {
+ onSubscriptionDataCount += 1;
+ }
+ });
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBe(false);
+ expect(error).toBeUndefined();
+ expect(data).toBeUndefined();
+ setTimeout(() => {
+ unmount();
+ });
+ break;
+ default:
+ }
+ renderCount += 1;
+ return null;
+ };
+
+ unmount = render(
+ <ApolloProvider client={client}>
+ <Component />
+ </ApolloProvider>
+ ).unmount;
+
+ return wait(() => {
+ expect(onSubscriptionDataCount).toEqual(0);
+ expect(renderCount).toEqual(1);
+ });
+ });
+
+ it('should create a subscription after skip has changed from true to a falsy value', async () => {
+ const subscription = gql`
+ subscription {
+ car {
+ make
+ }
+ }
+ `;
+
+ const results = [
+ {
+ result: { data: { car: { make: 'Pagani' } } }
+ },
+ {
+ result: { data: { car: { make: 'Scoop' } } }
+ }
+ ];
+
+ const link = new MockSubscriptionLink();
+ const client = new ApolloClient({
+ link,
+ cache: new Cache({ addTypename: false })
+ });
+
+ let renderCount = 0;
+ let unmount: any;
+
+ const Component = () => {
+ const [, triggerRerender] = React.useState(0);
+ const [skip, setSkip] = React.useState(true);
+ const { loading, data, error } = useSubscription(subscription, {
+ skip
+ });
+ switch (renderCount) {
+ case 0:
+ expect(loading).toBe(false);
+ expect(error).toBeUndefined();
+ expect(data).toBeUndefined();
+ setSkip(false);
+ break;
+ case 1:
+ expect(loading).toBe(true);
+ expect(error).toBeUndefined();
+ expect(data).toBeUndefined();
+ link.simulateResult(results[0]);
+ break;
+ case 2:
+ expect(loading).toBe(false);
+ expect(data).toEqual(results[0].result.data);
+ setSkip(true);
+ break;
+ case 3:
+ expect(loading).toBe(false);
+ expect(data).toBeUndefined();
+ expect(error).toBeUndefined();
+ // ensure state persists across rerenders
+ triggerRerender(i => i + 1);
+ break;
+ case 4:
+ expect(loading).toBe(false);
+ expect(data).toBeUndefined();
+ expect(error).toBeUndefined();
+ setSkip(false);
+ break;
+ case 5:
+ expect(loading).toBe(true);
+ expect(error).toBeUndefined();
+ expect(data).toBeUndefined();
+ link.simulateResult(results[1]);
+ break;
+ case 6:
+ expect(loading).toBe(false);
+ expect(error).toBeUndefined();
+ expect(data).toEqual(results[1].result.data);
+ setTimeout(() => {
+ unmount();
+ });
+ break;
+ default:
+ }
+ renderCount += 1;
+ return null;
+ };
+
+ unmount = render(
+ <ApolloProvider client={client}>
+ <Component />
+ </ApolloProvider>
+ ).unmount;
+
+ return wait(() => {
+ expect(renderCount).toEqual(7);
+ });
+ });
+});
diff --git a/src/react/parser/__tests__/parser.test.ts b/src/react/parser/__tests__/parser.test.ts
new file mode 100644
--- /dev/null
+++ b/src/react/parser/__tests__/parser.test.ts
@@ -0,0 +1,227 @@
+import gql from 'graphql-tag';
+
+import { parser, DocumentType } from '../parser';
+
+type OperationDefinition = any;
+
+describe('parser', () => {
+ it('should error if both a query and a mutation is present', () => {
+ const query = gql`
+ query {
+ user {
+ name
+ }
+ }
+
+ mutation($t: String) {
+ addT(t: $t) {
+ user {
+ name
+ }
+ }
+ }
+ `;
+
+ expect(parser.bind(null, query)).toThrowError(/react-apollo only supports/);
+ });
+
+ it('should error if multiple operations are present', () => {
+ const query = gql`
+ query One {
+ user {
+ name
+ }
+ }
+
+ query Two {
+ user {
+ name
+ }
+ }
+ `;
+
+ expect(parser.bind(null, query)).toThrowError(/react-apollo only supports/);
+ });
+
+ it('should error if not a DocumentNode', () => {
+ const query = `
+ query One { user { name } }
+ `;
+ expect(parser.bind(null, query as any)).toThrowError(
+ /not a valid GraphQL DocumentNode/
+ );
+ });
+
+ it('should return the name of the operation', () => {
+ const query = gql`
+ query One {
+ user {
+ name
+ }
+ }
+ `;
+ expect(parser(query).name).toBe('One');
+
+ const mutation = gql`
+ mutation One {
+ user {
+ name
+ }
+ }
+ `;
+ expect(parser(mutation).name).toBe('One');
+
+ const subscription = gql`
+ subscription One {
+ user {
+ name
+ }
+ }
+ `;
+ expect(parser(subscription).name).toBe('One');
+ });
+
+ it('should return data as the name of the operation if not named', () => {
+ const query = gql`
+ query {
+ user {
+ name
+ }
+ }
+ `;
+ expect(parser(query).name).toBe('data');
+
+ const unnamedQuery = gql`
+ {
+ user {
+ name
+ }
+ }
+ `;
+ expect(parser(unnamedQuery).name).toBe('data');
+
+ const mutation = gql`
+ mutation {
+ user {
+ name
+ }
+ }
+ `;
+ expect(parser(mutation).name).toBe('data');
+
+ const subscription = gql`
+ subscription {
+ user {
+ name
+ }
+ }
+ `;
+ expect(parser(subscription).name).toBe('data');
+ });
+
+ it('should return the type of operation', () => {
+ const query = gql`
+ query One {
+ user {
+ name
+ }
+ }
+ `;
+ expect(parser(query).type).toBe(DocumentType.Query);
+
+ const unnamedQuery = gql`
+ {
+ user {
+ name
+ }
+ }
+ `;
+ expect(parser(unnamedQuery).type).toBe(DocumentType.Query);
+
+ const mutation = gql`
+ mutation One {
+ user {
+ name
+ }
+ }
+ `;
+ expect(parser(mutation).type).toBe(DocumentType.Mutation);
+
+ const subscription = gql`
+ subscription One {
+ user {
+ name
+ }
+ }
+ `;
+ expect(parser(subscription).type).toBe(DocumentType.Subscription);
+ });
+
+ it('should return the variable definitions of the operation', () => {
+ const query = gql`
+ query One($t: String!) {
+ user(t: $t) {
+ name
+ }
+ }
+ `;
+ let definition = query.definitions[0] as OperationDefinition;
+ expect(parser(query).variables).toEqual(definition.variableDefinitions);
+
+ const mutation = gql`
+ mutation One($t: String!) {
+ user(t: $t) {
+ name
+ }
+ }
+ `;
+ definition = mutation.definitions[0] as OperationDefinition;
+ expect(parser(mutation).variables).toEqual(definition.variableDefinitions);
+
+ const subscription = gql`
+ subscription One($t: String!) {
+ user(t: $t) {
+ name
+ }
+ }
+ `;
+ definition = subscription.definitions[0] as OperationDefinition;
+ expect(parser(subscription).variables).toEqual(
+ definition.variableDefinitions
+ );
+ });
+
+ it('should not error if the operation has no variables', () => {
+ const query = gql`
+ query {
+ user(t: $t) {
+ name
+ }
+ }
+ `;
+ let definition = query.definitions[0] as OperationDefinition;
+ expect(parser(query).variables).toEqual(definition.variableDefinitions);
+
+ const mutation = gql`
+ mutation {
+ user(t: $t) {
+ name
+ }
+ }
+ `;
+ definition = mutation.definitions[0] as OperationDefinition;
+ expect(parser(mutation).variables).toEqual(definition.variableDefinitions);
+
+ const subscription = gql`
+ subscription {
+ user(t: $t) {
+ name
+ }
+ }
+ `;
+ definition = subscription.definitions[0] as OperationDefinition;
+ expect(parser(subscription).variables).toEqual(
+ definition.variableDefinitions
+ );
+ });
+});
diff --git a/packages/apollo-utilities/src/util/__tests__/assign.ts b/src/utilities/common/__tests__/assign.ts
similarity index 100%
rename from packages/apollo-utilities/src/util/__tests__/assign.ts
rename to src/utilities/common/__tests__/assign.ts
diff --git a/packages/apollo-utilities/src/util/__tests__/cloneDeep.ts b/src/utilities/common/__tests__/cloneDeep.ts
similarity index 100%
rename from packages/apollo-utilities/src/util/__tests__/cloneDeep.ts
rename to src/utilities/common/__tests__/cloneDeep.ts
diff --git a/packages/apollo-utilities/src/util/__tests__/environment.ts b/src/utilities/common/__tests__/environment.ts
similarity index 78%
rename from packages/apollo-utilities/src/util/__tests__/environment.ts
rename to src/utilities/common/__tests__/environment.ts
--- a/packages/apollo-utilities/src/util/__tests__/environment.ts
+++ b/src/utilities/common/__tests__/environment.ts
@@ -1,4 +1,4 @@
-import { isEnv, isProduction, isDevelopment, isTest } from '../environment';
+import { isEnv, isDevelopment, isTest } from '../environment';
describe('environment', () => {
let keepEnv: string | undefined;
@@ -27,18 +27,6 @@ describe('environment', () => {
});
});
- describe('isProduction', () => {
- it('should return true if in production', () => {
- process.env.NODE_ENV = 'production';
- expect(isProduction()).toBe(true);
- });
-
- it('should return false if not in production', () => {
- process.env.NODE_ENV = 'test';
- expect(!isProduction()).toBe(true);
- });
- });
-
describe('isTest', () => {
it('should return true if in test', () => {
process.env.NODE_ENV = 'test';
diff --git a/packages/apollo-utilities/src/util/__tests__/maybeDeepFeeze.ts b/src/utilities/common/__tests__/maybeDeepFeeze.ts
similarity index 100%
rename from packages/apollo-utilities/src/util/__tests__/maybeDeepFeeze.ts
rename to src/utilities/common/__tests__/maybeDeepFeeze.ts
diff --git a/src/utilities/common/__tests__/mergeDeep.ts b/src/utilities/common/__tests__/mergeDeep.ts
new file mode 100644
--- /dev/null
+++ b/src/utilities/common/__tests__/mergeDeep.ts
@@ -0,0 +1,326 @@
+import { mergeDeep, mergeDeepArray, DeepMerger } from '../mergeDeep';
+
+describe('mergeDeep', function() {
+ it('should return an object if first argument falsy', function() {
+ expect(mergeDeep()).toEqual({});
+ expect(mergeDeep(null)).toEqual({});
+ expect(mergeDeep(null, { foo: 42 })).toEqual({ foo: 42 });
+ });
+
+ it('should preserve identity for single arguments', function() {
+ const arg = Object.create(null);
+ expect(mergeDeep(arg)).toBe(arg);
+ });
+
+ it('should preserve identity when merging non-conflicting objects', function() {
+ const a = { a: { name: 'ay' } };
+ const b = { b: { name: 'bee' } };
+ const c = mergeDeep(a, b);
+ expect(c.a).toBe(a.a);
+ expect(c.b).toBe(b.b);
+ expect(c).toEqual({
+ a: { name: 'ay' },
+ b: { name: 'bee' },
+ });
+ });
+
+ it('should shallow-copy conflicting fields', function() {
+ const a = { conflict: { fromA: [1, 2, 3] } };
+ const b = { conflict: { fromB: [4, 5] } };
+ const c = mergeDeep(a, b);
+ expect(c.conflict).not.toBe(a.conflict);
+ expect(c.conflict).not.toBe(b.conflict);
+ expect(c.conflict.fromA).toBe(a.conflict.fromA);
+ expect(c.conflict.fromB).toBe(b.conflict.fromB);
+ expect(c).toEqual({
+ conflict: {
+ fromA: [1, 2, 3],
+ fromB: [4, 5],
+ },
+ });
+ });
+
+ it('should resolve conflicts among more than two objects', function() {
+ const sources = [];
+
+ for (let i = 0; i < 100; ++i) {
+ sources.push({
+ ['unique' + i]: { value: i },
+ conflict: {
+ ['from' + i]: { value: i },
+ nested: {
+ ['nested' + i]: { value: i },
+ },
+ },
+ });
+ }
+
+ const merged = mergeDeep(...sources);
+
+ sources.forEach((source, i) => {
+ expect(merged['unique' + i].value).toBe(i);
+ expect(source['unique' + i]).toBe(merged['unique' + i]);
+
+ expect(merged.conflict).not.toBe(source.conflict);
+ expect(merged.conflict['from' + i].value).toBe(i);
+ expect(merged.conflict['from' + i]).toBe(source.conflict['from' + i]);
+
+ expect(merged.conflict.nested).not.toBe(source.conflict.nested);
+ expect(merged.conflict.nested['nested' + i].value).toBe(i);
+ expect(merged.conflict.nested['nested' + i]).toBe(
+ source.conflict.nested['nested' + i],
+ );
+ });
+ });
+
+ it('can merge array elements', function() {
+ const a = [{ a: 1 }, { a: 'ay' }, 'a'];
+ const b = [{ b: 2 }, { b: 'bee' }, 'b'];
+ const c = [{ c: 3 }, { c: 'cee' }, 'c'];
+ const d = { 1: { d: 'dee' } };
+
+ expect(mergeDeep(a, b, c, d)).toEqual([
+ { a: 1, b: 2, c: 3 },
+ { a: 'ay', b: 'bee', c: 'cee', d: 'dee' },
+ 'c',
+ ]);
+ });
+
+ it('lets the last conflicting value win', function() {
+ expect(mergeDeep('a', 'b', 'c')).toBe('c');
+
+ expect(
+ mergeDeep(
+ { a: 'a', conflict: 1 },
+ { b: 'b', conflict: 2 },
+ { c: 'c', conflict: 3 },
+ ),
+ ).toEqual({
+ a: 'a',
+ b: 'b',
+ c: 'c',
+ conflict: 3,
+ });
+
+ expect(mergeDeep(
+ ['a', ['b', 'c'], 'd'],
+ [/*empty*/, ['B'], 'D'],
+ )).toEqual(
+ ['a', ['B', 'c'], 'D'],
+ );
+
+ expect(mergeDeep(
+ ['a', ['b', 'c'], 'd'],
+ ['A', [/*empty*/, 'C']],
+ )).toEqual(
+ ['A', ['b', 'C'], 'd'],
+ );
+ });
+
+ it('mergeDeep returns the intersection of its argument types', function() {
+ const abc = mergeDeep({ str: "hi", a: 1 }, { a: 3, b: 2 }, { b: 1, c: 2 });
+ // The point of this test is that the following lines type-check without
+ // resorting to any `any` loopholes:
+ expect(abc.str.slice(0)).toBe("hi");
+ expect(abc.a * 2).toBe(6);
+ expect(abc.b - 0).toBe(1);
+ expect(abc.c / 2).toBe(1);
+ });
+
+ it('mergeDeepArray returns the supertype of its argument types', function() {
+ class F {
+ check() { return "ok" };
+ }
+ const fs: F[] = [new F, new F, new F];
+ // Although mergeDeepArray doesn't have the same tuple type awareness as
+ // mergeDeep, it does infer that F should be the return type here:
+ expect(mergeDeepArray(fs).check()).toBe("ok");
+ });
+
+ it('supports custom reconciler functions', function () {
+ const merger = new DeepMerger((target, source, key) => {
+ const targetValue = target[key];
+ const sourceValue = source[key];
+ if (Array.isArray(sourceValue)) {
+ if (!Array.isArray(targetValue)) {
+ return sourceValue;
+ }
+ return [...targetValue, ...sourceValue];
+ }
+ return this.merge(targetValue, sourceValue);
+ });
+
+ expect(merger.merge(
+ {
+ a: [1, 2, 3],
+ b: "replace me",
+ },
+ {
+ a: [4, 5],
+ b: ["I", "win"],
+ },
+ )).toEqual({
+ a: [1, 2, 3, 4, 5],
+ b: ["I", "win"],
+ });
+ });
+
+ it('returns original object references when possible', function () {
+ const target = {
+ a: 1,
+ b: {
+ c: 3,
+ d: 4,
+ },
+ e: 5,
+ };
+
+ expect(mergeDeep(target, {
+ b: {
+ c: 3,
+ },
+ })).toBe(target);
+
+ const partial = mergeDeep(target, {
+ a: 1,
+ b: {
+ c: 3,
+ },
+ e: "eee",
+ });
+
+ expect(partial).not.toBe(target);
+ expect(partial.b).toBe(target.b);
+
+ const multiple = mergeDeep(target, {
+ a: 1,
+ }, {
+ b: {
+ d: 4,
+ },
+ }, {
+ e: 5,
+ });
+
+ expect(multiple).toBe(target);
+
+ const targetWithArrays = {
+ a: 1,
+ b: [2, {
+ c: [3, 4],
+ d: 5,
+ }, 6],
+ e: [7, 8, 9],
+ };
+
+ expect(mergeDeep(targetWithArrays, {
+ e: [],
+ })).toBe(targetWithArrays);
+
+ expect(mergeDeep(targetWithArrays, {
+ e: [/*hole*/, /*hole*/, 9],
+ })).toBe(targetWithArrays);
+
+ expect(mergeDeep(targetWithArrays, {
+ a: 1,
+ e: [7, 8],
+ })).toBe(targetWithArrays);
+
+ expect(mergeDeep(targetWithArrays, {
+ b: [2, {
+ c: [],
+ d: 5,
+ }],
+ })).toBe(targetWithArrays);
+
+ expect(mergeDeep(targetWithArrays, {
+ b: [2, {
+ c: [3],
+ d: 5,
+ }, 6],
+ e: [],
+ })).toBe(targetWithArrays);
+
+ const nestedInequality = mergeDeep(targetWithArrays, {
+ b: [2, {
+ c: [3],
+ d: 5,
+ }, "wrong"],
+ e: [],
+ });
+
+ expect(nestedInequality).not.toBe(targetWithArrays);
+ expect(nestedInequality.b).not.toBe(targetWithArrays.b);
+ expect(nestedInequality.b[1]).toEqual({
+ c: [3, 4],
+ d: 5,
+ });
+ expect(nestedInequality.b[1]).toBe(targetWithArrays.b[1]);
+
+ expect(mergeDeep(
+ targetWithArrays,
+ JSON.parse(JSON.stringify(targetWithArrays)),
+ JSON.parse(JSON.stringify(targetWithArrays)),
+ JSON.parse(JSON.stringify(targetWithArrays)),
+ )).toBe(targetWithArrays);
+ });
+
+ it("provides optional context to reconciler function", function () {
+ const contextObject = {
+ contextWithSpaces: "c o n t e x t",
+ };
+
+ const shallowContextValues: any[] = [];
+ const shallowMerger = new DeepMerger(
+ function(target, source, property, context: typeof contextObject) {
+ shallowContextValues.push(context);
+ // Deliberately not passing context down to nested levels.
+ return this.merge(target[property], source[property]);
+ },
+ );
+
+ const typicalContextValues: any[] = [];
+ const typicalMerger = new DeepMerger<typeof contextObject>(
+ function(target, source, property, context) {
+ typicalContextValues.push(context);
+ // Passing context down this time.
+ return this.merge(target[property], source[property], context);
+ },
+ );
+
+ const left = {
+ a: 1,
+ b: {
+ c: 2,
+ d: [3, 4],
+ },
+ e: 5,
+ };
+
+ const right = {
+ b: {
+ d: [3, 4, 5],
+ },
+ };
+
+ const expected = {
+ a: 1,
+ b: {
+ c: 2,
+ d: [3, 4, 5],
+ },
+ e: 5,
+ };
+
+ expect(shallowMerger.merge(left, right, contextObject)).toEqual(expected);
+ expect(typicalMerger.merge(left, right, contextObject)).toEqual(expected);
+
+ expect(shallowContextValues.length).toBe(2);
+ expect(shallowContextValues[0]).toBe(contextObject);
+ expect(shallowContextValues[1]).toBeUndefined();
+
+ expect(typicalContextValues.length).toBe(2);
+ expect(typicalContextValues[0]).toBe(contextObject);
+ expect(typicalContextValues[1]).toBe(contextObject);
+ });
+});
diff --git a/packages/apollo-utilities/src/__tests__/directives.ts b/src/utilities/graphql/__tests__/directives.ts
similarity index 100%
rename from packages/apollo-utilities/src/__tests__/directives.ts
rename to src/utilities/graphql/__tests__/directives.ts
diff --git a/packages/apollo-utilities/src/__tests__/fragments.ts b/src/utilities/graphql/__tests__/fragments.ts
similarity index 89%
rename from packages/apollo-utilities/src/__tests__/fragments.ts
rename to src/utilities/graphql/__tests__/fragments.ts
--- a/packages/apollo-utilities/src/__tests__/fragments.ts
+++ b/src/utilities/graphql/__tests__/fragments.ts
@@ -5,7 +5,12 @@ import { disableFragmentWarnings } from 'graphql-tag';
// Turn off warnings for repeated fragment names
disableFragmentWarnings();
-import { getFragmentQueryDocument } from '../fragments';
+import {
+ getFragmentQueryDocument,
+ createFragmentMap,
+ FragmentMap
+} from '../fragments';
+import { getFragmentDefinitions } from '../getFromAST';
describe('getFragmentQueryDocument', () => {
it('will throw an error if there is an operation', () => {
@@ -325,3 +330,26 @@ describe('getFragmentQueryDocument', () => {
);
});
});
+
+it('should create the fragment map correctly', () => {
+ const fragments = getFragmentDefinitions(gql`
+ fragment authorDetails on Author {
+ firstName
+ lastName
+ }
+
+ fragment moreAuthorDetails on Author {
+ address
+ }
+ `);
+ const fragmentMap = createFragmentMap(fragments);
+ const expectedTable: FragmentMap = {
+ authorDetails: fragments[0],
+ moreAuthorDetails: fragments[1],
+ };
+ expect(fragmentMap).toEqual(expectedTable);
+});
+
+it('should return an empty fragment map if passed undefined argument', () => {
+ expect(createFragmentMap(undefined)).toEqual({});
+});
diff --git a/packages/apollo-utilities/src/__tests__/getFromAST.ts b/src/utilities/graphql/__tests__/getFromAST.ts
similarity index 76%
rename from packages/apollo-utilities/src/__tests__/getFromAST.ts
rename to src/utilities/graphql/__tests__/getFromAST.ts
--- a/packages/apollo-utilities/src/__tests__/getFromAST.ts
+++ b/src/utilities/graphql/__tests__/getFromAST.ts
@@ -6,9 +6,6 @@ import {
checkDocument,
getFragmentDefinitions,
getQueryDefinition,
- getMutationDefinition,
- createFragmentMap,
- FragmentMap,
getDefaultValues,
getOperationName,
} from '../getFromAST';
@@ -164,55 +161,6 @@ describe('AST utility functions', () => {
}).toThrow();
});
- it('should get the correct mutation definition out of a mutation with multiple fragments', () => {
- const mutationWithFragments = gql`
- mutation {
- createAuthor(firstName: "John", lastName: "Smith") {
- ...authorDetails
- }
- }
-
- fragment authorDetails on Author {
- firstName
- lastName
- }
- `;
- const expectedDoc = gql`
- mutation {
- createAuthor(firstName: "John", lastName: "Smith") {
- ...authorDetails
- }
- }
- `;
- const expectedResult: OperationDefinitionNode = expectedDoc
- .definitions[0] as OperationDefinitionNode;
- const actualResult = getMutationDefinition(mutationWithFragments);
- expect(print(actualResult)).toEqual(print(expectedResult));
- });
-
- it('should create the fragment map correctly', () => {
- const fragments = getFragmentDefinitions(gql`
- fragment authorDetails on Author {
- firstName
- lastName
- }
-
- fragment moreAuthorDetails on Author {
- address
- }
- `);
- const fragmentMap = createFragmentMap(fragments);
- const expectedTable: FragmentMap = {
- authorDetails: fragments[0],
- moreAuthorDetails: fragments[1],
- };
- expect(fragmentMap).toEqual(expectedTable);
- });
-
- it('should return an empty fragment map if passed undefined argument', () => {
- expect(createFragmentMap(undefined)).toEqual({});
- });
-
it('should get the operation name out of a query', () => {
const query = gql`
query nameOfQuery {
@@ -293,24 +241,9 @@ describe('AST utility functions', () => {
}
`;
- const complexMutation = gql`
- mutation complexStuff(
- $test: Input = { key1: ["value", "value2"], key2: { key3: 4 } }
- ) {
- complexStuff(test: $test) {
- people {
- name
- }
- }
- }
- `;
-
expect(getDefaultValues(getQueryDefinition(basicQuery))).toEqual({
first: 1,
});
- expect(getDefaultValues(getMutationDefinition(complexMutation))).toEqual({
- test: { key1: ['value', 'value2'], key2: { key3: 4 } },
- });
});
});
});
diff --git a/packages/apollo-utilities/src/__tests__/storeUtils.ts b/src/utilities/graphql/__tests__/storeUtils.ts
similarity index 100%
rename from packages/apollo-utilities/src/__tests__/storeUtils.ts
rename to src/utilities/graphql/__tests__/storeUtils.ts
diff --git a/packages/apollo-utilities/src/__tests__/transform.ts b/src/utilities/graphql/__tests__/transform.ts
similarity index 67%
rename from packages/apollo-utilities/src/__tests__/transform.ts
rename to src/utilities/graphql/__tests__/transform.ts
--- a/packages/apollo-utilities/src/__tests__/transform.ts
+++ b/src/utilities/graphql/__tests__/transform.ts
@@ -8,7 +8,6 @@ disableFragmentWarnings();
import {
addTypenameToDocument,
removeDirectivesFromDocument,
- getDirectivesFromDocument,
removeConnectionDirectiveFromDocument,
removeArgumentsFromDocument,
removeFragmentSpreadFromDocument,
@@ -803,408 +802,6 @@ describe('query transforms', () => {
});
});
-describe('getDirectivesFromDocument', () => {
- it('should get query with fields of storage directive ', () => {
- const query = gql`
- query Simple {
- field @storage(if: true)
- }
- `;
-
- const expected = gql`
- query Simple {
- field @storage(if: true)
- }
- `;
- const doc = getDirectivesFromDocument([{ name: 'storage' }], query);
- expect(print(doc)).toBe(print(expected));
- });
-
- it('should get query with fields of storage directive [test function] ', () => {
- const query = gql`
- query Simple {
- field @storage(if: true)
- }
- `;
-
- const expected = gql`
- query Simple {
- field @storage(if: true)
- }
- `;
- const test = ({ name: { value } }: { name: any }) => value === 'storage';
- const doc = getDirectivesFromDocument([{ test }], query);
- expect(print(doc)).toBe(print(expected));
- });
-
- it('should only get query with fields of storage directive ', () => {
- const query = gql`
- query Simple {
- maybe @skip(if: false)
- field @storage(if: true)
- }
- `;
-
- const expected = gql`
- query Simple {
- field @storage(if: true)
- }
- `;
- const doc = getDirectivesFromDocument([{ name: 'storage' }], query);
- expect(print(doc)).toBe(print(expected));
- });
-
- it('should only get query with multiple fields of storage directive ', () => {
- const query = gql`
- query Simple {
- maybe @skip(if: false)
- field @storage(if: true)
- other @storage
- }
- `;
-
- const expected = gql`
- query Simple {
- field @storage(if: true)
- other @storage
- }
- `;
- const doc = getDirectivesFromDocument([{ name: 'storage' }], query);
- expect(print(doc)).toBe(print(expected));
- });
-
- it('should get query with fields of both storage and client directives ', () => {
- const query = gql`
- query Simple {
- maybe @skip(if: false)
- field @storage(if: true)
- user @client
- }
- `;
-
- const expected = gql`
- query Simple {
- field @storage(if: true)
- user @client
- }
- `;
- const doc = getDirectivesFromDocument(
- [{ name: 'storage' }, { name: 'client' }],
- query,
- );
- expect(print(doc)).toBe(print(expected));
- });
-
- it('should get query with different types of directive matchers ', () => {
- const query = gql`
- query Simple {
- maybe @skip(if: false)
- field @storage(if: true)
- user @client
- }
- `;
-
- const expected = gql`
- query Simple {
- field @storage(if: true)
- user @client
- }
- `;
- const doc = getDirectivesFromDocument(
- [
- { name: 'storage' },
- { test: directive => directive.name.value === 'client' },
- ],
- query,
- );
-
- expect(print(doc)).toBe(print(expected));
- });
-
- it('should get query with nested fields ', () => {
- const query = gql`
- query Simple {
- user {
- firstName @client
- email
- }
- }
- `;
-
- const expected = gql`
- query Simple {
- user {
- firstName @client
- }
- }
- `;
- const doc = getDirectivesFromDocument([{ name: 'client' }], query);
- expect(print(doc)).toBe(print(expected));
- });
-
- it('should include all the nested fields of field that has client directive ', () => {
- const query = gql`
- query Simple {
- user @client {
- firstName
- email
- }
- }
- `;
-
- const expected = gql`
- query Simple {
- user @client {
- firstName
- email
- }
- }
- `;
- const doc = getDirectivesFromDocument([{ name: 'client' }], query);
- expect(print(doc)).toBe(print(expected));
- });
-
- it('should return null if the query is no longer valid', () => {
- const query = gql`
- query Simple {
- field
- }
- `;
- const doc = getDirectivesFromDocument([{ name: 'client' }], query);
- expect(print(doc)).toBe(null);
- });
-
- it('should get query with client fields in fragment', function() {
- const query = gql`
- query Simple {
- ...fragmentSpread
- }
-
- fragment fragmentSpread on Thing {
- field @client
- other
- }
- `;
- const expected = gql`
- query Simple {
- ...fragmentSpread
- }
-
- fragment fragmentSpread on Thing {
- field @client
- }
- `;
- const doc = getDirectivesFromDocument([{ name: 'client' }], query);
- expect(print(doc)).toBe(print(expected));
- });
-
- it('should get query with client fields in fragment with nested fields', function() {
- const query = gql`
- query Simple {
- ...fragmentSpread
- }
-
- fragment fragmentSpread on Thing {
- user {
- firstName @client
- lastName
- }
- }
- `;
- const expected = gql`
- query Simple {
- ...fragmentSpread
- }
-
- fragment fragmentSpread on Thing {
- user {
- firstName @client
- }
- }
- `;
- const doc = getDirectivesFromDocument([{ name: 'client' }], query);
- expect(print(doc)).toBe(print(expected));
- });
-
- it('should get query with client fields in multiple fragments', function() {
- const query = gql`
- query Simple {
- ...fragmentSpread
- ...anotherFragmentSpread
- }
-
- fragment fragmentSpread on Thing {
- field @client
- other
- }
-
- fragment anotherFragmentSpread on AnotherThing {
- user @client
- product
- }
- `;
- const expected = gql`
- query Simple {
- ...fragmentSpread
- ...anotherFragmentSpread
- }
-
- fragment fragmentSpread on Thing {
- field @client
- }
-
- fragment anotherFragmentSpread on AnotherThing {
- user @client
- }
- `;
- const doc = getDirectivesFromDocument([{ name: 'client' }], query);
- expect(print(doc)).toBe(print(expected));
- });
-
- it("should return null if fragment didn't have client fields", function() {
- const query = gql`
- query Simple {
- ...fragmentSpread
- }
-
- fragment fragmentSpread on Thing {
- field
- }
- `;
- const doc = getDirectivesFromDocument([{ name: 'client' }], query);
- expect(print(doc)).toBe(print(null));
- });
-
- it('should get query with client fields when both fields and fragements are mixed', function() {
- const query = gql`
- query Simple {
- user @client
- product @storage
- order
- ...fragmentSpread
- }
-
- fragment fragmentSpread on Thing {
- field @client
- other
- }
- `;
- const expected = gql`
- query Simple {
- user @client
- ...fragmentSpread
- }
-
- fragment fragmentSpread on Thing {
- field @client
- }
- `;
- const doc = getDirectivesFromDocument([{ name: 'client' }], query);
- expect(print(doc)).toBe(print(expected));
- });
-
- it('should get mutation with client fields', () => {
- const query = gql`
- mutation {
- login @client
- }
- `;
-
- const expected = gql`
- mutation {
- login @client
- }
- `;
- const doc = getDirectivesFromDocument([{ name: 'client' }], query);
- expect(print(doc)).toBe(print(expected));
- });
-
- it('should get mutation fields of client only', () => {
- const query = gql`
- mutation {
- login @client
- updateUser
- }
- `;
-
- const expected = gql`
- mutation {
- login @client
- }
- `;
- const doc = getDirectivesFromDocument([{ name: 'client' }], query);
- expect(print(doc)).toBe(print(expected));
- });
-
- describe('includeAllFragments', () => {
- it('= false: should remove the values without a client in fragment', () => {
- const query = gql`
- fragment client on ClientData {
- hi @client
- bye @storage
- bar
- }
-
- query Mixed {
- foo @client {
- ...client
- }
- bar {
- baz
- }
- }
- `;
-
- const expected = gql`
- fragment client on ClientData {
- hi @client
- }
-
- query Mixed {
- foo @client {
- ...client
- }
- }
- `;
- const doc = getDirectivesFromDocument([{ name: 'client' }], query, false);
- expect(print(doc)).toBe(print(expected));
- });
-
- it('= true: should include the values without a client in fragment', () => {
- const query = gql`
- fragment client on ClientData {
- hi @client
- bye @storage
- bar
- }
-
- query Mixed {
- foo @client {
- ...client
- }
- bar {
- baz
- }
- }
- `;
-
- const expected = gql`
- fragment client on ClientData {
- hi @client
- }
-
- query Mixed {
- foo @client {
- ...client
- }
- }
- `;
- const doc = getDirectivesFromDocument([{ name: 'client' }], query, true);
- expect(print(doc)).toBe(print(expected));
- });
- });
-});
-
describe('removeClientSetsFromDocument', () => {
it('should remove @client fields from document', () => {
const query = gql`
diff --git a/packages/apollo-utilities/src/util/__tests__/stripSymbols.ts b/src/utilities/testing/__tests__/stripSymbols.ts
similarity index 100%
rename from packages/apollo-utilities/src/util/__tests__/stripSymbols.ts
rename to src/utilities/testing/__tests__/stripSymbols.ts
diff --git a/src/utilities/testing/index.ts b/src/utilities/testing/index.ts
new file mode 100644
--- /dev/null
+++ b/src/utilities/testing/index.ts
@@ -0,0 +1,8 @@
+export { MockedProvider } from './mocking/MockedProvider';
+export { MockLink, mockSingleLink, MockedResponse } from './mocking/mockLink';
+export {
+ MockSubscriptionLink,
+ mockObservableLink
+} from './mocking/mockSubscriptionLink';
+export { createMockClient } from './mocking/mockClient';
+export { stripSymbols } from './stripSymbols';
diff --git a/src/utilities/testing/itAsync.ts b/src/utilities/testing/itAsync.ts
new file mode 100644
--- /dev/null
+++ b/src/utilities/testing/itAsync.ts
@@ -0,0 +1,27 @@
+function wrap<TResult>(
+ original: (...args: any[]) => TResult,
+) {
+ return (
+ message: string,
+ callback: (
+ resolve: (result?: any) => void,
+ reject: (reason?: any) => void,
+ ) => any,
+ timeout?: number,
+ ) => original(message, function () {
+ return new Promise(
+ (resolve, reject) => callback.call(this, resolve, reject),
+ );
+ }, timeout);
+}
+
+const wrappedIt = wrap(it);
+export function itAsync(...args: Parameters<typeof wrappedIt>) {
+ return wrappedIt.apply(this, args);
+}
+
+export namespace itAsync {
+ export const only = wrap(it.only);
+ export const skip = wrap(it.skip);
+ export const todo = wrap(it.todo);
+}
diff --git a/src/utilities/testing/mocking/MockedProvider.tsx b/src/utilities/testing/mocking/MockedProvider.tsx
new file mode 100644
--- /dev/null
+++ b/src/utilities/testing/mocking/MockedProvider.tsx
@@ -0,0 +1,73 @@
+import React from 'react';
+
+import { ApolloClient, DefaultOptions } from '../../../ApolloClient';
+import { InMemoryCache as Cache } from '../../../cache/inmemory/inMemoryCache';
+import { ApolloProvider } from '../../../react/context/ApolloProvider';
+import { MockLink } from '../../../utilities/testing/mocking/mockLink';
+import { ApolloLink } from '../../../link/core/ApolloLink';
+import { Resolvers } from '../../../core/types';
+import { ApolloCache } from '../../../cache/core/cache';
+import { MockedResponse } from '../../../utilities/testing/mocking/mockLink';
+
+export interface MockedProviderProps<TSerializedCache = {}> {
+ mocks?: ReadonlyArray<MockedResponse>;
+ addTypename?: boolean;
+ defaultOptions?: DefaultOptions;
+ cache?: ApolloCache<TSerializedCache>;
+ resolvers?: Resolvers;
+ childProps?: object;
+ children?: React.ReactElement;
+ link?: ApolloLink;
+}
+
+export interface MockedProviderState {
+ client: ApolloClient<any>;
+}
+
+export class MockedProvider extends React.Component<
+ MockedProviderProps,
+ MockedProviderState
+> {
+ public static defaultProps: MockedProviderProps = {
+ addTypename: true
+ };
+
+ constructor(props: MockedProviderProps) {
+ super(props);
+
+ const {
+ mocks,
+ addTypename,
+ defaultOptions,
+ cache,
+ resolvers,
+ link
+ } = this.props;
+ const client = new ApolloClient({
+ cache: cache || new Cache({ addTypename }),
+ defaultOptions,
+ link: link || new MockLink(
+ mocks || [],
+ addTypename,
+ ),
+ resolvers,
+ });
+
+ this.state = { client };
+ }
+
+ public render() {
+ const { children, childProps } = this.props;
+ return children ? (
+ <ApolloProvider client={this.state.client}>
+ {React.cloneElement(React.Children.only(children), { ...childProps })}
+ </ApolloProvider>
+ ) : null;
+ }
+
+ public componentWillUnmount() {
+ // Since this.state.client was created in the constructor, it's this
+ // MockedProvider's responsibility to terminate it.
+ this.state.client.stop();
+ }
+}
diff --git a/src/utilities/testing/mocking/mockClient.ts b/src/utilities/testing/mocking/mockClient.ts
new file mode 100644
--- /dev/null
+++ b/src/utilities/testing/mocking/mockClient.ts
@@ -0,0 +1,20 @@
+import { DocumentNode } from 'graphql';
+
+import { ApolloClient } from '../../../ApolloClient';
+import { InMemoryCache } from '../../../cache/inmemory/inMemoryCache';
+import { NormalizedCacheObject } from '../../../cache/inmemory/types';
+import { mockSingleLink } from '../../../utilities/testing/mocking/mockLink';
+
+export function createMockClient<TData>(
+ data: TData,
+ query: DocumentNode,
+ variables = {},
+): ApolloClient<NormalizedCacheObject> {
+ return new ApolloClient({
+ link: mockSingleLink({
+ request: { query, variables },
+ result: { data },
+ }).setOnError(error => { throw error }),
+ cache: new InMemoryCache({ addTypename: false }),
+ });
+}
diff --git a/packages/apollo-client/src/__mocks__/mockFetch.ts b/src/utilities/testing/mocking/mockFetch.ts
similarity index 100%
rename from packages/apollo-client/src/__mocks__/mockFetch.ts
rename to src/utilities/testing/mocking/mockFetch.ts
diff --git a/src/utilities/testing/mocking/mockLink.ts b/src/utilities/testing/mocking/mockLink.ts
new file mode 100644
--- /dev/null
+++ b/src/utilities/testing/mocking/mockLink.ts
@@ -0,0 +1,178 @@
+import { print } from 'graphql/language/printer';
+import stringify from 'fast-json-stable-stringify';
+import { equal } from '@wry/equality';
+
+import { Observable } from '../../../utilities/observables/Observable';
+import { ApolloLink } from '../../../link/core/ApolloLink';
+import {
+ Operation,
+ GraphQLRequest,
+ FetchResult,
+} from '../../../link/core/types';
+import {
+ addTypenameToDocument,
+ removeClientSetsFromDocument,
+ removeConnectionDirectiveFromDocument,
+} from '../../../utilities/graphql/transform';
+import { cloneDeep } from '../../../utilities/common/cloneDeep';
+
+export type ResultFunction<T> = () => T;
+
+export interface MockedResponse {
+ request: GraphQLRequest;
+ result?: FetchResult | ResultFunction<FetchResult>;
+ error?: Error;
+ delay?: number;
+ newData?: ResultFunction<FetchResult>;
+}
+
+function requestToKey(request: GraphQLRequest, addTypename: Boolean): string {
+ const queryString =
+ request.query &&
+ print(addTypename ? addTypenameToDocument(request.query) : request.query);
+ const requestKey = { query: queryString };
+ return JSON.stringify(requestKey);
+}
+
+export class MockLink extends ApolloLink {
+ public operation: Operation;
+ public addTypename: Boolean = true;
+ private mockedResponsesByKey: { [key: string]: MockedResponse[] } = {};
+
+ constructor(
+ mockedResponses: ReadonlyArray<MockedResponse>,
+ addTypename: Boolean = true
+ ) {
+ super();
+ this.addTypename = addTypename;
+ if (mockedResponses) {
+ mockedResponses.forEach(mockedResponse => {
+ this.addMockedResponse(mockedResponse);
+ });
+ }
+ }
+
+ public addMockedResponse(mockedResponse: MockedResponse) {
+ const normalizedMockedResponse = this.normalizeMockedResponse(
+ mockedResponse
+ );
+ const key = requestToKey(
+ normalizedMockedResponse.request,
+ this.addTypename
+ );
+ let mockedResponses = this.mockedResponsesByKey[key];
+ if (!mockedResponses) {
+ mockedResponses = [];
+ this.mockedResponsesByKey[key] = mockedResponses;
+ }
+ mockedResponses.push(normalizedMockedResponse);
+ }
+
+ public request(operation: Operation): Observable<FetchResult> | null {
+ this.operation = operation;
+ const key = requestToKey(operation, this.addTypename);
+ let responseIndex;
+ const response = (this.mockedResponsesByKey[key] || []).find(
+ (res, index) => {
+ const requestVariables = operation.variables || {};
+ const mockedResponseVariables = res.request.variables || {};
+ if (
+ !equal(
+ stringify(requestVariables),
+ stringify(mockedResponseVariables)
+ )
+ ) {
+ return false;
+ }
+ responseIndex = index;
+ return true;
+ }
+ );
+
+ if (!response || typeof responseIndex === 'undefined') {
+ this.onError(new Error(
+ `No more mocked responses for the query: ${print(
+ operation.query
+ )}, variables: ${JSON.stringify(operation.variables)}`
+ ));
+ }
+
+ this.mockedResponsesByKey[key].splice(responseIndex, 1);
+
+ const { newData } = response;
+
+ if (newData) {
+ response.result = newData();
+ this.mockedResponsesByKey[key].push(response);
+ }
+
+ const { result, error, delay } = response;
+
+ if (!result && !error) {
+ this.onError(new Error(
+ `Mocked response should contain either result or error: ${key}`
+ ));
+ }
+
+ return new Observable(observer => {
+ let timer = setTimeout(
+ () => {
+ if (error) {
+ observer.error(error);
+ } else {
+ if (result) {
+ observer.next(
+ typeof result === 'function'
+ ? (result as ResultFunction<FetchResult>)()
+ : result
+ );
+ }
+ observer.complete();
+ }
+ },
+ delay ? delay : 0
+ );
+
+ return () => {
+ clearTimeout(timer);
+ };
+ });
+ }
+
+ private normalizeMockedResponse(
+ mockedResponse: MockedResponse
+ ): MockedResponse {
+ const newMockedResponse = cloneDeep(mockedResponse);
+ newMockedResponse.request.query = removeConnectionDirectiveFromDocument(
+ newMockedResponse.request.query
+ );
+ const query = removeClientSetsFromDocument(newMockedResponse.request.query);
+ if (query) {
+ newMockedResponse.request.query = query;
+ }
+ return newMockedResponse;
+ }
+}
+
+interface MockApolloLink extends ApolloLink {
+ operation?: Operation;
+}
+
+// Pass in multiple mocked responses, so that you can test flows that end up
+// making multiple queries to the server.
+// NOTE: The last arg can optionally be an `addTypename` arg.
+export function mockSingleLink(
+ ...mockedResponses: Array<any>
+): MockApolloLink {
+ // To pull off the potential typename. If this isn't a boolean, we'll just
+ // set it true later.
+ let maybeTypename = mockedResponses[mockedResponses.length - 1];
+ let mocks = mockedResponses.slice(0, mockedResponses.length - 1);
+
+ if (typeof maybeTypename !== 'boolean') {
+ mocks = mockedResponses;
+ maybeTypename = true;
+ }
+
+ return new MockLink(mocks, maybeTypename);
+}
diff --git a/src/utilities/testing/mocking/mockQueryManager.ts b/src/utilities/testing/mocking/mockQueryManager.ts
new file mode 100644
--- /dev/null
+++ b/src/utilities/testing/mocking/mockQueryManager.ts
@@ -0,0 +1,15 @@
+import { QueryManager } from '../../../core/QueryManager';
+import { mockSingleLink, MockedResponse } from './mockLink';
+import { InMemoryCache } from '../../../cache/inmemory/inMemoryCache';
+
+// Helper method for the tests that construct a query manager out of a
+// a list of mocked responses for a mocked network interface.
+export default (
+ reject: (reason: any) => any,
+ ...mockedResponses: MockedResponse[]
+) => {
+ return new QueryManager({
+ link: mockSingleLink(...mockedResponses).setOnError(reject),
+ cache: new InMemoryCache({ addTypename: false }),
+ });
+};
diff --git a/src/utilities/testing/mocking/mockSubscriptionLink.ts b/src/utilities/testing/mocking/mockSubscriptionLink.ts
new file mode 100644
--- /dev/null
+++ b/src/utilities/testing/mocking/mockSubscriptionLink.ts
@@ -0,0 +1,62 @@
+import { Observable } from '../../../utilities/observables/Observable';
+import { ApolloLink } from '../../../link/core/ApolloLink';
+import { FetchResult, Operation } from '../../../link/core/types';
+
+export interface MockedSubscription {
+ request: Operation;
+}
+
+export interface MockedSubscriptionResult {
+ result?: FetchResult;
+ error?: Error;
+ delay?: number;
+}
+
+export class MockSubscriptionLink extends ApolloLink {
+ public unsubscribers: any[] = [];
+ public setups: any[] = [];
+
+ private observer: any;
+
+ constructor() {
+ super();
+ }
+
+ public request(_req: any) {
+ return new Observable<FetchResult>(observer => {
+ this.setups.forEach(x => x());
+ this.observer = observer;
+ return () => {
+ this.unsubscribers.forEach(x => x());
+ };
+ });
+ }
+
+ public simulateResult(result: MockedSubscriptionResult, complete = false) {
+ setTimeout(() => {
+ const { observer } = this;
+ if (!observer) throw new Error('subscription torn down');
+ if (complete && observer.complete) observer.complete();
+ if (result.result && observer.next) observer.next(result.result);
+ if (result.error && observer.error) observer.error(result.error);
+ }, result.delay || 0);
+ }
+
+ public simulateComplete() {
+ const { observer } = this;
+ if (!observer) throw new Error('subscription torn down');
+ if (observer.complete) observer.complete();
+ }
+
+ public onSetup(listener: any): void {
+ this.setups = this.setups.concat([listener]);
+ }
+
+ public onUnsubscribe(listener: any): void {
+ this.unsubscribers = this.unsubscribers.concat([listener]);
+ }
+}
+
+export function mockObservableLink(): MockSubscriptionLink {
+ return new MockSubscriptionLink();
+}
diff --git a/packages/apollo-client/src/__mocks__/mockWatchQuery.ts b/src/utilities/testing/mocking/mockWatchQuery.ts
similarity index 52%
rename from packages/apollo-client/src/__mocks__/mockWatchQuery.ts
rename to src/utilities/testing/mocking/mockWatchQuery.ts
--- a/packages/apollo-client/src/__mocks__/mockWatchQuery.ts
+++ b/src/utilities/testing/mocking/mockWatchQuery.ts
@@ -1,11 +1,12 @@
-import { MockedResponse } from './mockLinks';
-
+import { MockedResponse } from './mockLink';
import mockQueryManager from './mockQueryManager';
+import { ObservableQuery } from '../../../core/ObservableQuery';
-import { ObservableQuery } from '../core/ObservableQuery';
-
-export default (...mockedResponses: MockedResponse[]): ObservableQuery<any> => {
- const queryManager = mockQueryManager(...mockedResponses);
+export default (
+ reject: (reason: any) => any,
+ ...mockedResponses: MockedResponse[]
+): ObservableQuery<any> => {
+ const queryManager = mockQueryManager(reject, ...mockedResponses);
const firstRequest = mockedResponses[0].request;
return queryManager.watchQuery({
query: firstRequest.query!,
diff --git a/packages/apollo-client/src/util/observableToPromise.ts b/src/utilities/testing/observableToPromise.ts
similarity index 93%
rename from packages/apollo-client/src/util/observableToPromise.ts
rename to src/utilities/testing/observableToPromise.ts
--- a/packages/apollo-client/src/util/observableToPromise.ts
+++ b/src/utilities/testing/observableToPromise.ts
@@ -1,6 +1,6 @@
-import { ObservableQuery } from '../core/ObservableQuery';
-import { ApolloQueryResult } from '../core/types';
-import { Subscription } from '../util/Observable';
+import { ObservableQuery } from '../../core/ObservableQuery';
+import { ApolloQueryResult } from '../../core/types';
+import { Subscription } from '../../utilities/observables/Observable';
/**
*
diff --git a/packages/apollo-utilities/src/util/stripSymbols.ts b/src/utilities/testing/stripSymbols.ts
similarity index 100%
rename from packages/apollo-utilities/src/util/stripSymbols.ts
rename to src/utilities/testing/stripSymbols.ts
diff --git a/packages/apollo-client/src/util/subscribeAndCount.ts b/src/utilities/testing/subscribeAndCount.ts
similarity index 67%
rename from packages/apollo-client/src/util/subscribeAndCount.ts
rename to src/utilities/testing/subscribeAndCount.ts
--- a/packages/apollo-client/src/util/subscribeAndCount.ts
+++ b/src/utilities/testing/subscribeAndCount.ts
@@ -1,10 +1,10 @@
-import { ObservableQuery } from '../core/ObservableQuery';
-import { ApolloQueryResult } from '../core/types';
-import { Subscription } from '../util/Observable';
-import { asyncMap } from './observables';
+import { ObservableQuery } from '../../core/ObservableQuery';
+import { ApolloQueryResult } from '../../core/types';
+import { Subscription } from '../../utilities/observables/Observable';
+import { asyncMap } from '../../utilities/observables/observables';
export default function subscribeAndCount(
- done: jest.DoneCallback,
+ reject: (reason: any) => any,
observable: ObservableQuery<any>,
cb: (handleCount: number, result: ApolloQueryResult<any>) => any,
): Subscription {
@@ -20,12 +20,12 @@ export default function subscribeAndCount(
// to be defined.
setImmediate(() => {
subscription.unsubscribe();
- done.fail(e);
+ reject(e);
});
}
},
).subscribe({
- error: done.fail,
+ error: reject,
});
return subscription;
}
diff --git a/packages/apollo-client/src/util/wrap.ts b/src/utilities/testing/wrap.ts
similarity index 83%
rename from packages/apollo-client/src/util/wrap.ts
rename to src/utilities/testing/wrap.ts
--- a/packages/apollo-client/src/util/wrap.ts
+++ b/src/utilities/testing/wrap.ts
@@ -1,12 +1,13 @@
// I'm not sure why mocha doesn't provide something like this, you can't
// always use promises
-export default (done: jest.DoneCallback, cb: (...args: any[]) => any) => (
- ...args: any[]
-) => {
+export default <TArgs extends any[], TResult>(
+ reject: (reason: any) => any,
+ cb: (...args: TArgs) => TResult,
+) => (...args: TArgs) => {
try {
return cb(...args);
} catch (e) {
- done.fail(e);
+ reject(e);
}
};
| v3.0.0-beta.16 mutating local state is not reflected in all components with useQuery
**Intended outcome:**
Store local state in apollo cache and when updating the local state then all components that have a useQuery against that data will be updated.
**Actual outcome:**
It appears only the first subscriber to the useQuery receives updates.
**How to reproduce the issue:**
At the following code sandbox you can see some local state stored in apollo. There are three components that are rendering the result of the same useQuery query. When the Child component calls a mutation on that data, only the Parent component updates. Both the Child component and the OtherChild component do not update.
If the query is removed from the Parent component, then only the Child component updates.
If the query is removed from both the Parent and Child, then the OtherChild component updates.
https://codesandbox.io/s/apollo-client-prob-6ywxk
**Versions**
- @apollo/client 3.0.0-beta.16
- graphql 14.5.8
- react 16.12.0
- react-dom 16.12.0
- react-scripts 3.0.1
v3.0.0-beta.16 mutating local state is not reflected in all components with useQuery
**Intended outcome:**
Store local state in apollo cache and when updating the local state then all components that have a useQuery against that data will be updated.
**Actual outcome:**
It appears only the first subscriber to the useQuery receives updates.
**How to reproduce the issue:**
At the following code sandbox you can see some local state stored in apollo. There are three components that are rendering the result of the same useQuery query. When the Child component calls a mutation on that data, only the Parent component updates. Both the Child component and the OtherChild component do not update.
If the query is removed from the Parent component, then only the Child component updates.
If the query is removed from both the Parent and Child, then the OtherChild component updates.
https://codesandbox.io/s/apollo-client-prob-6ywxk
**Versions**
- @apollo/client 3.0.0-beta.16
- graphql 14.5.8
- react 16.12.0
- react-dom 16.12.0
- react-scripts 3.0.1
| 2019-08-01T15:31:55Z | 2.6 |
|
apollographql/apollo-client | 4,765 | apollographql__apollo-client-4765 | [
"3660",
"3660"
] | a87d28b44f2d4d4320a08d74daddb905b600aa10 | diff --git a/packages/apollo-client/src/data/queries.ts b/packages/apollo-client/src/data/queries.ts
--- a/packages/apollo-client/src/data/queries.ts
+++ b/packages/apollo-client/src/data/queries.ts
@@ -151,13 +151,14 @@ export class QueryStore {
}
public markQueryResultClient(queryId: string, complete: boolean) {
- if (!this.store || !this.store[queryId]) return;
-
- this.store[queryId].networkError = null;
- this.store[queryId].previousVariables = null;
- this.store[queryId].networkStatus = complete
- ? NetworkStatus.ready
- : NetworkStatus.loading;
+ const storeValue = this.store && this.store[queryId];
+ if (storeValue) {
+ storeValue.networkError = null;
+ storeValue.previousVariables = null;
+ if (complete) {
+ storeValue.networkStatus = NetworkStatus.ready;
+ }
+ }
}
public stopQuery(queryId: string) {
| diff --git a/packages/apollo-client/src/core/__tests__/fetchPolicies.ts b/packages/apollo-client/src/core/__tests__/fetchPolicies.ts
--- a/packages/apollo-client/src/core/__tests__/fetchPolicies.ts
+++ b/packages/apollo-client/src/core/__tests__/fetchPolicies.ts
@@ -21,6 +21,7 @@ import subscribeAndCount from '../../util/subscribeAndCount';
import { withWarning } from '../../util/wrap';
import { mockSingleLink } from '../../__mocks__/mockLinks';
+import { NetworkStatus } from '../networkStatus';
const query = gql`
query {
@@ -345,3 +346,111 @@ describe('no-cache', () => {
});
});
});
+
+describe('cache-and-network', function() {
+ it('gives appropriate networkStatus for refetched queries', done => {
+ const client = new ApolloClient({
+ link: ApolloLink.empty(),
+ cache: new InMemoryCache(),
+ resolvers: {
+ Query: {
+ hero(_data, args) {
+ return {
+ __typename: 'Hero',
+ ...args,
+ name: 'Luke Skywalker',
+ };
+ },
+ },
+ },
+ });
+
+ const observable = client.watchQuery({
+ query: gql`
+ query FetchLuke($id: String) {
+ hero(id: $id) @client {
+ id
+ name
+ }
+ }
+ `,
+ fetchPolicy: 'cache-and-network',
+ variables: { id: '1' },
+ notifyOnNetworkStatusChange: true,
+ });
+
+ function dataWithId(id: number | string) {
+ return {
+ hero: {
+ __typename: 'Hero',
+ id: String(id),
+ name: 'Luke Skywalker',
+ },
+ };
+ }
+
+ subscribeAndCount(done, observable, (count, result) => {
+ if (count === 1) {
+ expect(result).toEqual({
+ data: void 0,
+ loading: true,
+ networkStatus: NetworkStatus.loading,
+ stale: true,
+ });
+ } else if (count === 2) {
+ expect(result).toEqual({
+ data: dataWithId(1),
+ loading: false,
+ networkStatus: NetworkStatus.ready,
+ stale: false,
+ });
+ return observable.setVariables({ id: '2' });
+ } else if (count === 3) {
+ expect(result).toEqual({
+ data: dataWithId(1),
+ loading: true,
+ networkStatus: NetworkStatus.setVariables,
+ stale: false,
+ });
+ } else if (count === 4) {
+ expect(result).toEqual({
+ data: dataWithId(2),
+ loading: false,
+ networkStatus: NetworkStatus.ready,
+ stale: false,
+ });
+ return observable.refetch();
+ } else if (count === 5) {
+ expect(result).toEqual({
+ data: dataWithId(2),
+ loading: true,
+ networkStatus: NetworkStatus.refetch,
+ stale: false,
+ });
+ } else if (count === 6) {
+ expect(result).toEqual({
+ data: dataWithId(2),
+ loading: false,
+ networkStatus: NetworkStatus.ready,
+ stale: false,
+ });
+ return observable.refetch({ id: '3' });
+ } else if (count === 7) {
+ expect(result).toEqual({
+ data: dataWithId(2),
+ loading: true,
+ networkStatus: NetworkStatus.setVariables,
+ stale: false,
+ });
+ } else if (count === 8) {
+ expect(result).toEqual({
+ data: dataWithId(3),
+ loading: false,
+ networkStatus: NetworkStatus.ready,
+ stale: false,
+ });
+ done();
+ }
+ });
+ });
+});
| FetchPolicy `cache-and-network` always uses networkStatus = 1 regardless of cache hit
<!--
Thanks for filing an issue on Apollo Client!
Please make sure that you include the following information to ensure that your issue is actionable.
If you don't follow the template, your issue may end up being closed without anyone looking at it carefully, because it is not actionable for us without the information in this template.
If you're filing a feature request, you do not need to follow the template, but please mark the feature box at the bottom and include a specific example in which that feature would be useful.
-->
**Intended outcome:**
This is the issue I originally found but it appears that the issue is with the `client` and not the `Query` component. apollographql/react-apollo#1217
When using the `cache-and-network` fetch policy I should be able to identify when I have data from the cache and when it is first retrieving the data.
```
<Query query={gql`{ now }` fetchPolicy="cache-and-network" notifyOnNetworkStatusChange>
{(data, error, networkStatus) => (
if (networkStatus < 4) return 'Loading'
if (error) return 'Error!'
return <div>
<time>data.now</time>
{networkStatus === 4 && 'Refeshing'}
</div>
)}
</Query>
```
_The query `{ now }` returns the current date and time._
I would expect `networkStatus` to be `1` when it is making the first request, but `4` on each subsequent test as it is refetching the data.
**Actual outcome:**
<!--
A description of what actually happened, including a screenshot or copy-paste of any related error messages, logs, or other output that might be related. Places to look for information include your browser console, server console, and network logs. Please avoid non-specific phrases like “didn’t work” or “broke”.
-->
Today the first request and subsequent requests come in using `networkStatus = 1` making it difficult to identify the difference between the two states.
**Versions**
<!--
Run the following command in your project directory, and paste its (automatically copied to clipboard) results here:
`npx envinfo@latest --preset apollo --clipboard`
-->
System:
OS: macOS Sierra 10.12.6
Binaries:
Node: 8.9.3 - ~/.nvm/versions/node/v8.9.3/bin/node
Yarn: 1.6.0 - ~/.nvm/versions/node/v8.9.3/bin/yarn
npm: 6.1.0 - ~/.nvm/versions/node/v8.9.3/bin/npm
Browsers:
Chrome: 67.0.3396.99
Firefox: 61.0
Safari: 11.1
<!--**Issue Labels**
While not necessary, you can help organize our issues by labeling this issue when you open it. To add a label automatically, simply [x] mark the appropriate box below:
- [ ] has-reproduction
- [ ] feature
- [ ] docs
- [ ] blocking
- [ ] good first issue
To add a label not listed above, simply place `/label another-label-name` on a line by itself.
-->
FetchPolicy `cache-and-network` always uses networkStatus = 1 regardless of cache hit
<!--
Thanks for filing an issue on Apollo Client!
Please make sure that you include the following information to ensure that your issue is actionable.
If you don't follow the template, your issue may end up being closed without anyone looking at it carefully, because it is not actionable for us without the information in this template.
If you're filing a feature request, you do not need to follow the template, but please mark the feature box at the bottom and include a specific example in which that feature would be useful.
-->
**Intended outcome:**
This is the issue I originally found but it appears that the issue is with the `client` and not the `Query` component. apollographql/react-apollo#1217
When using the `cache-and-network` fetch policy I should be able to identify when I have data from the cache and when it is first retrieving the data.
```
<Query query={gql`{ now }` fetchPolicy="cache-and-network" notifyOnNetworkStatusChange>
{(data, error, networkStatus) => (
if (networkStatus < 4) return 'Loading'
if (error) return 'Error!'
return <div>
<time>data.now</time>
{networkStatus === 4 && 'Refeshing'}
</div>
)}
</Query>
```
_The query `{ now }` returns the current date and time._
I would expect `networkStatus` to be `1` when it is making the first request, but `4` on each subsequent test as it is refetching the data.
**Actual outcome:**
<!--
A description of what actually happened, including a screenshot or copy-paste of any related error messages, logs, or other output that might be related. Places to look for information include your browser console, server console, and network logs. Please avoid non-specific phrases like “didn’t work” or “broke”.
-->
Today the first request and subsequent requests come in using `networkStatus = 1` making it difficult to identify the difference between the two states.
**Versions**
<!--
Run the following command in your project directory, and paste its (automatically copied to clipboard) results here:
`npx envinfo@latest --preset apollo --clipboard`
-->
System:
OS: macOS Sierra 10.12.6
Binaries:
Node: 8.9.3 - ~/.nvm/versions/node/v8.9.3/bin/node
Yarn: 1.6.0 - ~/.nvm/versions/node/v8.9.3/bin/yarn
npm: 6.1.0 - ~/.nvm/versions/node/v8.9.3/bin/npm
Browsers:
Chrome: 67.0.3396.99
Firefox: 61.0
Safari: 11.1
<!--**Issue Labels**
While not necessary, you can help organize our issues by labeling this issue when you open it. To add a label automatically, simply [x] mark the appropriate box below:
- [ ] has-reproduction
- [ ] feature
- [ ] docs
- [ ] blocking
- [ ] good first issue
To add a label not listed above, simply place `/label another-label-name` on a line by itself.
-->
| This is quite similar to an [issue I raised a while ago](https://github.com/apollographql/react-apollo/issues/447) - I actually [wrote a PR](https://github.com/apollographql/apollo-client/pull/1607) to address this, but it got lost in 2.0 limbo. It's definitely important to solve!
For anyone dealing with this PITA bug, an alternative is to use `network-only` fetchPolicy. For whatever reason it functions exactly the same as `cache-then-network` in my case, but reports correct network statuses.
@rtymchyk are you sure? With `network-only` the cache is not considered, or it is bugged?
@all any news on this bug (FetchPolicy `cache-and-network` always uses networkStatus = 1 regardless of cache hit)?
@frederikhors The network fetch happens with an explicit refetch only, which works well enough for me. Don't see much of an alternative 😢
@dallonf can you make a new smaller PR just for this problem in 2.0 land?
I'm not an Apollo maintainer, just a guy who wrote a (declined) PR once
@dallonf yes, I know. But maybe in that period there was a mess with 2.0 coming. Can you just re-write a smaller PR just for fix this? When you done I personally commit my time to assure it passes.
I could try, but I'm no more qualified than anybody else looking at this issue, and I don't have a lot of time to contribute to OSS at the moment, so if you really need this fixed ASAP, you're probably going to have to write the PR yourself.
It's also worth noting that my original PR(https://github.com/apollographql/apollo-client/pull/1607) isn't a "fix" - I diagnosed the problem as a gap in the design and added a whole new API to distinguish between two currently ambiguous states. This is something I really wouldn't be comfortable committing a lot of time developing until I have confirmation from the maintainers that this is a PR they would support; I don't want it to get ignored until the whole codebase changes out from under it again.
Same thing with broken `cache-and-network` and variable changes, always `1` instead of `2`.
Somehow it works for me with `network-only` which changes to `2` and still shows stale data in a component. 🤦♂️ 😆
Can someone confirm how this should work? I mean different network policies and how they affect response data and network status?
I'm seeing a similar issue. Basically, I make a query with `cache-and-network` it returns it from the cache and sets the `networkStatus` to `7`. It then makes the remote request and so the `networkStatus` is set to `1`.
I tried to solve the issue by manually running `markQueryResult()` for the said queries but they still didn't update. It seems to be some disconnect between the query manager and react-apollo.
I have a feeling this is the offending [code](https://github.com/apollographql/apollo-client/blob/master/packages/apollo-client/src/core/QueryManager.ts#L420) As you can see the promise isn't return like the other fetch-policies. I tried a few things but couldn't get the networkStatus to update.
Any ideas?
> Same thing with broken `cache-and-network` and variable changes, always `1` instead of `2`.
>
> Somehow it works for me with `network-only` which changes to `2` and still shows stale data in a component.
For me, the `networkStatus` returns same values for both `cache-and-network` and `network-only`.
Hopa this bug gets addressed soon. A comment from the maintainers would be nice just confirming it is a bug?!
I have the same issue. Only way to distinguish a cache hit is to use something like:
```
if (!data || (loading && !data.documents)) {
return <CardLoader />;
}
```
This is suboptimal because I cannot re-use this logic across different Query-components. Would prefer to have something like `networkStatus=4`.
@hwillson any comments? Thanks!
Would be great to resolve this issue by either acknowledging that this is a bug and will be fixed, such that there's only a loading state when our cache is empty or introduce a non-breaking additive "cache-then-network" policy which does the above.
Would be nice to see this resolved so we can have reusable logic.
What's difference between `cache-first` and `cache-and-network` though ? These policies seems identical to me, so if it's better to remove the latter for not confusing users ?
@revskill10 With `cache-first` if the data is found in the cache then no network request is made. `cache-and-network` will return data from the cache but will also make a network request (even on a cache hit) and return updated data if available.
Is there a way (using cache-and-network) to do not update the cache if the data received after the network request is the same as the old one in the cache? Let's say i have a blog: the user enters in one article and it loads fast (since the data is in the cache); but if the autor of the artiche made some update, then apollo updates the relevant data, and react updates the relative component. Now it always update the cache, causing the unmounting and mounting again the various component, which is weird because the user sees a component with some data (the data from the cache) then it dissapear, and then appear again.
It's a bit concerning that this issue has had no attention from the team at all in 8 months.
A workaround to check wether data has been resolved from cache is to use `isEmpty(data)` from `lodash` or something similar.
This is quite similar to an [issue I raised a while ago](https://github.com/apollographql/react-apollo/issues/447) - I actually [wrote a PR](https://github.com/apollographql/apollo-client/pull/1607) to address this, but it got lost in 2.0 limbo. It's definitely important to solve!
For anyone dealing with this PITA bug, an alternative is to use `network-only` fetchPolicy. For whatever reason it functions exactly the same as `cache-then-network` in my case, but reports correct network statuses.
@rtymchyk are you sure? With `network-only` the cache is not considered, or it is bugged?
@all any news on this bug (FetchPolicy `cache-and-network` always uses networkStatus = 1 regardless of cache hit)?
@frederikhors The network fetch happens with an explicit refetch only, which works well enough for me. Don't see much of an alternative 😢
@dallonf can you make a new smaller PR just for this problem in 2.0 land?
I'm not an Apollo maintainer, just a guy who wrote a (declined) PR once
@dallonf yes, I know. But maybe in that period there was a mess with 2.0 coming. Can you just re-write a smaller PR just for fix this? When you done I personally commit my time to assure it passes.
I could try, but I'm no more qualified than anybody else looking at this issue, and I don't have a lot of time to contribute to OSS at the moment, so if you really need this fixed ASAP, you're probably going to have to write the PR yourself.
It's also worth noting that my original PR(https://github.com/apollographql/apollo-client/pull/1607) isn't a "fix" - I diagnosed the problem as a gap in the design and added a whole new API to distinguish between two currently ambiguous states. This is something I really wouldn't be comfortable committing a lot of time developing until I have confirmation from the maintainers that this is a PR they would support; I don't want it to get ignored until the whole codebase changes out from under it again.
Same thing with broken `cache-and-network` and variable changes, always `1` instead of `2`.
Somehow it works for me with `network-only` which changes to `2` and still shows stale data in a component. 🤦♂️ 😆
Can someone confirm how this should work? I mean different network policies and how they affect response data and network status?
I'm seeing a similar issue. Basically, I make a query with `cache-and-network` it returns it from the cache and sets the `networkStatus` to `7`. It then makes the remote request and so the `networkStatus` is set to `1`.
I tried to solve the issue by manually running `markQueryResult()` for the said queries but they still didn't update. It seems to be some disconnect between the query manager and react-apollo.
I have a feeling this is the offending [code](https://github.com/apollographql/apollo-client/blob/master/packages/apollo-client/src/core/QueryManager.ts#L420) As you can see the promise isn't return like the other fetch-policies. I tried a few things but couldn't get the networkStatus to update.
Any ideas?
> Same thing with broken `cache-and-network` and variable changes, always `1` instead of `2`.
>
> Somehow it works for me with `network-only` which changes to `2` and still shows stale data in a component.
For me, the `networkStatus` returns same values for both `cache-and-network` and `network-only`.
Hopa this bug gets addressed soon. A comment from the maintainers would be nice just confirming it is a bug?!
I have the same issue. Only way to distinguish a cache hit is to use something like:
```
if (!data || (loading && !data.documents)) {
return <CardLoader />;
}
```
This is suboptimal because I cannot re-use this logic across different Query-components. Would prefer to have something like `networkStatus=4`.
@hwillson any comments? Thanks!
Would be great to resolve this issue by either acknowledging that this is a bug and will be fixed, such that there's only a loading state when our cache is empty or introduce a non-breaking additive "cache-then-network" policy which does the above.
Would be nice to see this resolved so we can have reusable logic.
What's difference between `cache-first` and `cache-and-network` though ? These policies seems identical to me, so if it's better to remove the latter for not confusing users ?
@revskill10 With `cache-first` if the data is found in the cache then no network request is made. `cache-and-network` will return data from the cache but will also make a network request (even on a cache hit) and return updated data if available.
Is there a way (using cache-and-network) to do not update the cache if the data received after the network request is the same as the old one in the cache? Let's say i have a blog: the user enters in one article and it loads fast (since the data is in the cache); but if the autor of the artiche made some update, then apollo updates the relevant data, and react updates the relative component. Now it always update the cache, causing the unmounting and mounting again the various component, which is weird because the user sees a component with some data (the data from the cache) then it dissapear, and then appear again.
It's a bit concerning that this issue has had no attention from the team at all in 8 months.
A workaround to check wether data has been resolved from cache is to use `isEmpty(data)` from `lodash` or something similar. | 2019-04-30T17:39:25Z | 0.4 |
apollographql/apollo-client | 3,956 | apollographql__apollo-client-3956 | [
"3947"
] | b082c8b23e791cfdca0fc0680a33ec542beb0a68 | diff --git a/packages/apollo-client/src/core/QueryManager.ts b/packages/apollo-client/src/core/QueryManager.ts
--- a/packages/apollo-client/src/core/QueryManager.ts
+++ b/packages/apollo-client/src/core/QueryManager.ts
@@ -996,7 +996,7 @@ export class QueryManager<TStore> {
const lastResult = observableQuery.getLastResult();
const { newData } = this.getQuery(observableQuery.queryId);
// XXX test this
- if (newData) {
+ if (newData && newData.complete) {
return { data: newData.result, partial: false };
} else {
try {
| diff --git a/packages/apollo-client/src/core/__tests__/QueryManager/index.ts b/packages/apollo-client/src/core/__tests__/QueryManager/index.ts
--- a/packages/apollo-client/src/core/__tests__/QueryManager/index.ts
+++ b/packages/apollo-client/src/core/__tests__/QueryManager/index.ts
@@ -2336,6 +2336,87 @@ describe('QueryManager', () => {
// We have an unhandled error warning from the `subscribe` above, which has no `error` cb
});
+ it('does not return incomplete data when two queries for the same item are executed', () => {
+ const queryA = gql`
+ query queryA {
+ person(id: "abc") {
+ __typename
+ id
+ firstName
+ lastName
+ }
+ }
+ `;
+ const queryB = gql`
+ query queryB {
+ person(id: "abc") {
+ __typename
+ id
+ lastName
+ age
+ }
+ }
+ `;
+ const dataA = {
+ person: {
+ __typename: 'Person',
+ id: 'abc',
+ firstName: 'Luke',
+ lastName: 'Skywalker',
+ },
+ };
+ const dataB = {
+ person: {
+ __typename: 'Person',
+ id: 'abc',
+ lastName: 'Skywalker',
+ age: '32',
+ },
+ };
+ const queryManager = new QueryManager<NormalizedCacheObject>({
+ link: mockSingleLink(
+ { request: { query: queryA }, result: { data: dataA } },
+ { request: { query: queryB }, result: { data: dataB }, delay: 20 },
+ ),
+ store: new DataStore(new InMemoryCache({})),
+ ssrMode: true,
+ });
+
+ const observableA = queryManager.watchQuery({
+ query: queryA,
+ });
+ const observableB = queryManager.watchQuery({
+ query: queryB,
+ });
+
+ return Promise.all([
+ observableToPromise({ observable: observableA }, () => {
+ expect(
+ stripSymbols(queryManager.getCurrentQueryResult(observableA)),
+ ).toEqual({
+ data: dataA,
+ partial: false,
+ });
+ expect(queryManager.getCurrentQueryResult(observableB)).toEqual({
+ data: {},
+ partial: true,
+ });
+ }),
+ observableToPromise({ observable: observableB }, () => {
+ expect(
+ stripSymbols(queryManager.getCurrentQueryResult(observableA)),
+ ).toEqual({
+ data: dataA,
+ partial: false,
+ });
+ expect(queryManager.getCurrentQueryResult(observableB)).toEqual({
+ data: dataB,
+ partial: false,
+ });
+ }),
+ ]);
+ });
+
describe('polling queries', () => {
it('allows you to poll queries', () => {
const query = gql`
| [QueryManager] getCurrentQueryResult behavior appears inconsistent
From what I can tell the return value of `getCurrentQueryResult` is inconsistent.
This is a bit tricky to explain, so I've annotated the method below in a vein attempt to convey what I believe I'm observing.
```javascript
// Copied from QueryManager commit d185f41.
// https://github.com/apollographql/apollo-client/blob/d185f41fb13eeaae38aa66582d3b2ea1ebf48d43/packages/apollo-client/src/core/QueryManager.ts#L991-L1016
public getCurrentQueryResult<T>(
observableQuery: ObservableQuery<T>,
optimistic: boolean = true,
) {
const { variables, query } = observableQuery.options;
const lastResult = observableQuery.getLastResult();
// newData is non null when either:
// a) a query watcher emits a new diff, see line 775
// b) the fetch has completed, see line 1137
const { newData } = this.getQuery(observableQuery.queryId);
// This is where I think this starts to become inconsistent with my expectations.
// Unlike the condition below, by virtue of the watcher, the result in newData may
// be incomplete if a query or mutation selecting similar fields completes first.
if (newData) {
// It's not 100% clear to me the intention of the partial field, but it led to some
// confusion when I received incomplete results and the value was false.
return { data: newData.result, partial: false };
} else {
try {
// The read method appears to throw when fields the query is trying to select are
// unavailable, meaning that in this condition the return value will never be incomplete.
const data = this.dataStore.getCache().read({
query,
variables,
previousResult: lastResult ? lastResult.data : undefined,
optimistic,
});
return { data, partial: false };
} catch (e) {
return { data: {}, partial: true };
}
}
}
```
This combination of the method in some cases returning complete results and in other rare cases returning incomplete results has led to some strange behavior. Consider, when the query is paired with a listener and a cache policy that is not "no-cache" you can observe:
1. watcher emits a new but incomplete result from the cache.
1. application calls `getCurrentQueryResult` and receives incomplete results.
1. next tick query listener fires and [nullifies](https://github.com/apollographql/apollo-client/blob/d185f41fb13eeaae38aa66582d3b2ea1ebf48d43/packages/apollo-client/src/core/QueryManager.ts#L774-L776) the `newData` value.
1. application calls `getCurrentQueryResult` again, this time the method fetches from the cache and throws. Empty result set is received by application.
If you have multiple queries on the page/view/activity the end-user could see content flicker in and out of existence until all fetches have completed. This is assuming it didn't already crash when the view attempted to access an incomplete result set.
**Intended outcome:**
I think ideally `getCurrentQueryResult` would be consistent by returning only complete or incomplete results, but not mixture of the two.
**Actual outcome:**
Depending on the state of the query, the method can return either complete or incomplete result sets.
**How to reproduce the issue:**
For a slightly more concrete example imagine the following (albeit contrived) scenario:
```javascript
const queryA = gql`
query A {
user {
name
}
}
`;
const queryB = gql`
query B {
# NOTE: using the same root query field.
user {
dogs
}
`;
<Query query={queryA} cachePolicy="cache-and-network">
<p>{resultA.user && resultA.user.name}</p>
<Query query={queryB} cachePolicy="cache-and-network">
{loading && <span>Loading...</span>}
<p>{resultB.user && resultB.user.dogs.join(", ")}</p>
</Query>
</Query>
```
Given:
- queryA takes 100ms to complete
- queryB takes 500ms to complete
Timeline:
1. components are mounted and fetches start.
1. `queryA` completes.
1. (From what I've observed) the watcher for `queryB` fires as there are new results associated with the root `user` field. queryB's `newData` field is set.
1. the first query container rerenders.
1. as updates cascade down the second query container rerenders.
1. the second query container's render method asks for the current query result receiving the incomplete result set that was retrieved by the watcher.
1. (From what I've observed) the query listener for `queryB` fires causing the second query container to rerender again, this time when it asks for the current query result it receives an empty set.
1. Eventually `queryB` completes and the query container rerenders with the complete set of results.
Concern:
Currently there is no way to determine if I've received an incomplete result set or not, this means that as a consumer, I would need a lot of (seemingly unnecessary) conditionals in my components to ensure that I am not accessing fields that have not yet been fulfilled.
**Versions:**
- `apollo-client` `=2.4.2`
| I hope what I've provided is clear; happy to provide more details or help patch. | 2018-09-27T18:33:46Z | 1.3 |
apollographql/apollo-client | 3,580 | apollographql__apollo-client-3580 | [
"3576"
] | e0ba90671e241184f12ce86370e0e3d84bc73552 | diff --git a/packages/apollo-client/src/__mocks__/mockLinks.ts b/packages/apollo-client/src/__mocks__/mockLinks.ts
--- a/packages/apollo-client/src/__mocks__/mockLinks.ts
+++ b/packages/apollo-client/src/__mocks__/mockLinks.ts
@@ -8,11 +8,15 @@ import {
import { print } from 'graphql/language/printer';
+interface MockApolloLink extends ApolloLink {
+ operation?: Operation;
+}
+
// Pass in multiple mocked responses, so that you can test flows that end up
// making multiple queries to the server
export function mockSingleLink(
...mockedResponses: MockedResponse[]
-): ApolloLink {
+): MockApolloLink {
return new MockLink(mockedResponses);
}
@@ -40,6 +44,7 @@ export interface MockedSubscription {
}
export class MockLink extends ApolloLink {
+ public operation: Operation;
private mockedResponsesByKey: { [key: string]: MockedResponse[] } = {};
constructor(mockedResponses: MockedResponse[]) {
@@ -60,6 +65,7 @@ export class MockLink extends ApolloLink {
}
public request(operation: Operation) {
+ this.operation = operation;
const key = requestToKey(operation);
const responses = this.mockedResponsesByKey[key];
if (!responses || responses.length === 0) {
diff --git a/packages/apollo-client/src/core/watchQueryOptions.ts b/packages/apollo-client/src/core/watchQueryOptions.ts
--- a/packages/apollo-client/src/core/watchQueryOptions.ts
+++ b/packages/apollo-client/src/core/watchQueryOptions.ts
@@ -222,7 +222,14 @@ export interface MutationOptions<
mutation: DocumentNode;
/**
- * Context to be passed to link execution chain
+ * The context to be passed to the link execution chain. This context will
+ * only be used with the mutation. It will not be used with
+ * `refetchQueries`. Refetched queries use the context they were
+ * initialized with (since the intitial context is stored as part of the
+ * `ObservableQuery` instance). If a specific context is needed when
+ * refetching queries, make sure it is configured (via the
+ * [`query` `context` option](/docs/react/api/apollo-client.html#ApolloClient.query))
+ * when the query is first initialized/run.
*/
context?: any;
| diff --git a/packages/apollo-client/src/core/__tests__/QueryManager/index.ts b/packages/apollo-client/src/core/__tests__/QueryManager/index.ts
--- a/packages/apollo-client/src/core/__tests__/QueryManager/index.ts
+++ b/packages/apollo-client/src/core/__tests__/QueryManager/index.ts
@@ -4362,6 +4362,85 @@ describe('QueryManager', () => {
);
});
+ it('should refetch using the original query context (if any)', () => {
+ const mutation = gql`
+ mutation changeAuthorName {
+ changeAuthorName(newName: "Jack Smith") {
+ firstName
+ lastName
+ }
+ }
+ `;
+ const mutationData = {
+ changeAuthorName: {
+ firstName: 'Jack',
+ lastName: 'Smith',
+ },
+ };
+ const query = gql`
+ query getAuthors($id: ID!) {
+ author(id: $id) {
+ firstName
+ lastName
+ }
+ }
+ `;
+ const data = {
+ author: {
+ firstName: 'John',
+ lastName: 'Smith',
+ },
+ };
+ const secondReqData = {
+ author: {
+ firstName: 'Jane',
+ lastName: 'Johnson',
+ },
+ };
+ const variables = { id: '1234' };
+ const queryManager = mockQueryManager(
+ {
+ request: { query, variables },
+ result: { data },
+ },
+ {
+ request: { query, variables },
+ result: { data: secondReqData },
+ },
+ {
+ request: { query: mutation },
+ result: { data: mutationData },
+ },
+ );
+
+ const headers = {
+ someHeader: 'some value',
+ };
+ const observable = queryManager.watchQuery<any>({
+ query,
+ variables,
+ context: {
+ headers,
+ },
+ notifyOnNetworkStatusChange: false,
+ });
+
+ return observableToPromise(
+ { observable },
+ result => {
+ queryManager.mutate({
+ mutation,
+ refetchQueries: ['getAuthors'],
+ });
+ },
+ result => {
+ const context = queryManager.link.operation.getContext();
+ expect(context.headers).not.toBeUndefined();
+ expect(context.headers.someHeader).toEqual(headers.someHeader);
+ },
+ );
+ });
+
afterEach(done => {
// restore standard method
console.warn = oldWarn;
| Mutation component ignores context headers for refetchQueries
**Intended outcome:**
Queries specified as string array to the `refetchQueries` option of a mutation call should have headers included as specified by the `context` prop of the `Mutation` component.
**Actual outcome:**
The mutation uses the auth headers in the context but when the `refetchQueries` are run they are sent without headers.
**How to reproduce the issue:**
```ts
<Mutation
mutation={MyMutation}
context={ headers: {
authorization: token ? `Bearer ${token}` : ""
}}>
{(mutate) => (
mutate({
variables: { ... },
refetchQueries: ["MyOtherQuery"],
});
)}
</Mutation>
```
When `MyMutation`is sent to the server the headers specified in the context prop are included in the HTTP request. But when the mutation has completed and `MyOtherQuery` is run, the HTTP request does not contain the headers and therefore gives a 403 error.
**Versions**
apollo-client@2.3.2
The requested command to get versions does not seem to work:
```
$ npx envinfo@latest --preset apollo --clipboard
npx: installed 1 in 1.95s
Path must be a string. Received undefined
npx: installed 1 in 1.89s
C:\Users\jonkel.DIVID\AppData\Roaming\npm-cache\_npx\11232\node_modules\envinfo\dist\cli.js
System:
OS: Windows 10
Binaries:
Yarn: 1.7.0 - C:\Users\jonkel.DIVID\AppData\Roaming\npm\yarn.CMD
npm: 5.7.1 - C:\PROGRAM FILES\NODEJS\npm.CMD
Browsers:
Edge: 41.16299.402.0
(node:11232) UnhandledPromiseRejectionWarning: Unhandled promise rejection (rejection id: 1): Error: The system cannot find the path specified.
(node:11232) [DEP0018] DeprecationWarning: Unhandled promise rejections are deprecated. In the future, promise rejections that are not handled will terminate the Node.js process with a non-zero exit code.
```
| Confirmed 🐛 - thanks for reporting this @jonaskello!
If anyone is interested in helping out with this, the following just needs to be updated to add the `mutate` functions passed in `context` param:
https://github.com/apollographql/apollo-client/blob/ef32c3250e51f55c328fef5e3e45d59397d92206/packages/apollo-client/src/core/QueryManager.ts#L263-L267
I'll try to take a stab at it:-). | 2018-06-13T14:45:17Z | 0.1 |
apollographql/apollo-client | 2,710 | apollographql__apollo-client-2710 | [
"2709"
] | b1e389560d32c01bcb680514fc7f5dd8d94f2646 | diff --git a/packages/apollo-utilities/src/storeUtils.ts b/packages/apollo-utilities/src/storeUtils.ts
--- a/packages/apollo-utilities/src/storeUtils.ts
+++ b/packages/apollo-utilities/src/storeUtils.ts
@@ -164,6 +164,8 @@ export type Directives = {
};
};
+const KNOWN_DIRECTIVES: string[] = ['connection', 'include', 'skip'];
+
export function getStoreKeyName(
fieldName: string,
args?: Object,
@@ -197,13 +199,25 @@ export function getStoreKeyName(
}
}
+ let completeFieldName: string = fieldName;
+
if (args) {
const stringifiedArgs: string = JSON.stringify(args);
+ completeFieldName += `(${stringifiedArgs})`;
+ }
- return `${fieldName}(${stringifiedArgs})`;
+ if (directives) {
+ Object.keys(directives).forEach(key => {
+ if (KNOWN_DIRECTIVES.indexOf(key) !== -1) return;
+ if (directives[key] && Object.keys(directives[key]).length) {
+ completeFieldName += `@${key}(${JSON.stringify(directives[key])})`;
+ } else {
+ completeFieldName += `@${key}`;
+ }
+ });
}
- return fieldName;
+ return completeFieldName;
}
export function argumentsObjectFromField(
| diff --git a/packages/apollo-cache-inmemory/src/__tests__/readFromStore.ts b/packages/apollo-cache-inmemory/src/__tests__/readFromStore.ts
--- a/packages/apollo-cache-inmemory/src/__tests__/readFromStore.ts
+++ b/packages/apollo-cache-inmemory/src/__tests__/readFromStore.ts
@@ -165,6 +165,38 @@ describe('reading from the store', () => {
});
});
+ it('runs a basic query with custom directives', () => {
+ const query = gql`
+ query {
+ id
+ firstName @include(if: true)
+ lastName @upperCase
+ birthDate @dateFormat(format: "DD-MM-YYYY")
+ }
+ `;
+
+ const store = defaultNormalizedCacheFactory({
+ ROOT_QUERY: {
+ id: 'abcd',
+ firstName: 'James',
+ 'lastName@upperCase': 'BOND',
+ 'birthDate@dateFormat({"format":"DD-MM-YYYY"})': '20-05-1940',
+ },
+ });
+
+ const result = readQueryFromStore({
+ store,
+ query,
+ });
+
+ expect(result).toEqual({
+ id: 'abcd',
+ firstName: 'James',
+ lastName: 'BOND',
+ birthDate: '20-05-1940',
+ });
+ });
+
it('runs a basic query with default values for arguments', () => {
const query = gql`
query someBigQuery(
diff --git a/packages/apollo-cache-inmemory/src/__tests__/writeToStore.ts b/packages/apollo-cache-inmemory/src/__tests__/writeToStore.ts
--- a/packages/apollo-cache-inmemory/src/__tests__/writeToStore.ts
+++ b/packages/apollo-cache-inmemory/src/__tests__/writeToStore.ts
@@ -221,6 +221,38 @@ describe('writing to the store', () => {
});
});
+ it('properly normalizes a query with custom directives', () => {
+ const query = gql`
+ query {
+ id
+ firstName @include(if: true)
+ lastName @upperCase
+ birthDate @dateFormat(format: "DD-MM-YYYY")
+ }
+ `;
+
+ const result: any = {
+ id: 'abcd',
+ firstName: 'James',
+ lastName: 'BOND',
+ birthDate: '20-05-1940',
+ };
+
+ const normalized = writeQueryToStore({
+ result,
+ query,
+ });
+
+ expect(normalized.toObject()).toEqual({
+ ROOT_QUERY: {
+ id: 'abcd',
+ firstName: 'James',
+ 'lastName@upperCase': 'BOND',
+ 'birthDate@dateFormat({"format":"DD-MM-YYYY"})': '20-05-1940',
+ },
+ });
+ });
+
it('properly normalizes a nested object with an ID', () => {
const query = gql`
{
| Support custom directives in InMemory Cache
**Intended outcome:**
I would like to add custom directive support to "apollo-cache-inmemory". More precisely fix it to prevent corrupted cache.
If you are not familiar with custom directives you can checkout my new module: [graphql-directive](https://github.com/smooth-code/graphql-directive).
**Actual outcome:**
"apollo-cache-inmemory" relies on [`getStoreKeyName` function](https://github.com/apollographql/apollo-client/blob/master/packages/apollo-utilities/src/storeUtils.ts#L167) either for writing or for reading cache. By reading the code of this method you can see that a field `firstName @upperCase` and `firstName` will use the same cache key: "firstName". Even if the value is not the same, it corrupts cache.
**How to reproduce the issue:**
```js
const gql = require('graphql-tag')
const { InMemoryCache } = require('apollo-cache-inmemory')
const cache = new InMemoryCache()
const query = gql`
query Foo {
book(id: 10) {
__typename
name @uppercase
}
}
`
cache.writeQuery({
query,
data: {
book: {
__typename: 'BOOK',
name: 'harry',
},
},
})
console.log(cache.extract())
console.log(cache.readQuery({ query }))
```
**Version**
- apollo-cache-inmemory@1.1.4
- apollo-utilities@1.0.3
| 2017-12-10T22:19:56Z | 1.1 |
|
apollographql/apollo-client | 2,362 | apollographql__apollo-client-2362 | [
"2293"
] | 7aa8cff98ea0428e8736e7b87662000f128beb57 | diff --git a/packages/apollo-cache-inmemory/src/fragmentMatcher.ts b/packages/apollo-cache-inmemory/src/fragmentMatcher.ts
--- a/packages/apollo-cache-inmemory/src/fragmentMatcher.ts
+++ b/packages/apollo-cache-inmemory/src/fragmentMatcher.ts
@@ -30,7 +30,7 @@ export class HeuristicFragmentMatcher implements FragmentMatcherInterface {
typeCondition: string,
context: ReadStoreContext,
): boolean {
- const obj = context.store[idValue.id];
+ const obj = context.store.get(idValue.id);
if (!obj) {
return false;
@@ -116,7 +116,7 @@ export class IntrospectionFragmentMatcher implements FragmentMatcherInterface {
);
}
- const obj = context.store[idValue.id];
+ const obj = context.store.get(idValue.id);
if (!obj) {
return false;
diff --git a/packages/apollo-cache-inmemory/src/inMemoryCache.ts b/packages/apollo-cache-inmemory/src/inMemoryCache.ts
--- a/packages/apollo-cache-inmemory/src/inMemoryCache.ts
+++ b/packages/apollo-cache-inmemory/src/inMemoryCache.ts
@@ -12,14 +12,18 @@ import {
OptimisticStoreItem,
ApolloReducerConfig,
NormalizedCache,
+ NormalizedCacheObject,
} from './types';
import { writeResultToStore } from './writeToStore';
import { readQueryFromStore, diffQueryAgainstStore } from './readFromStore';
+import { defaultNormalizedCacheFactory } from './objectCache';
+import { record } from './recordingCache';
const defaultConfig: ApolloReducerConfig = {
fragmentMatcher: new HeuristicFragmentMatcher(),
dataIdFromObject: defaultDataIdFromObject,
addTypename: true,
+ storeFactory: defaultNormalizedCacheFactory,
};
export function defaultDataIdFromObject(result: any): string | null {
@@ -34,8 +38,8 @@ export function defaultDataIdFromObject(result: any): string | null {
return null;
}
-export class InMemoryCache extends ApolloCache<NormalizedCache> {
- private data: NormalizedCache = {};
+export class InMemoryCache extends ApolloCache<NormalizedCacheObject> {
+ private data: NormalizedCache;
private config: ApolloReducerConfig;
private optimistic: OptimisticStoreItem[] = [];
private watches: Cache.WatchOptions[] = [];
@@ -52,29 +56,30 @@ export class InMemoryCache extends ApolloCache<NormalizedCache> {
if ((this.config as any).customResolvers)
this.config.cacheResolvers = (this.config as any).customResolvers;
this.addTypename = this.config.addTypename ? true : false;
+ this.data = this.config.storeFactory();
}
- public restore(data: NormalizedCache): ApolloCache<NormalizedCache> {
- if (data) this.data = data;
+ public restore(data: NormalizedCacheObject): this {
+ if (data) this.data.replace(data);
return this;
}
- public extract(optimistic: boolean = false): NormalizedCache {
+ public extract(optimistic: boolean = false): NormalizedCacheObject {
if (optimistic && this.optimistic.length > 0) {
const patches = this.optimistic.map(opt => opt.data);
- return Object.assign({}, this.data, ...patches) as NormalizedCache;
+ return Object.assign(this.data.toObject(), ...patches);
}
- return this.data;
+ return this.data.toObject();
}
- public read<T>(query: Cache.ReadOptions): T {
- if (query.rootId && typeof this.data[query.rootId] === 'undefined') {
+ public read<T>(query: Cache.ReadOptions): T | null {
+ if (query.rootId && this.data.get(query.rootId) === undefined) {
return null;
}
return readQueryFromStore({
- store: this.extract(query.optimistic),
+ store: this.config.storeFactory(this.extract(query.optimistic)),
query: this.transformDocument(query.query),
variables: query.variables,
rootId: query.rootId,
@@ -100,7 +105,7 @@ export class InMemoryCache extends ApolloCache<NormalizedCache> {
public diff<T>(query: Cache.DiffOptions): Cache.DiffResult<T> {
return diffQueryAgainstStore({
- store: this.extract(query.optimistic),
+ store: this.config.storeFactory(this.extract(query.optimistic)),
query: this.transformDocument(query.query),
variables: query.variables,
returnPartialData: query.returnPartialData,
@@ -123,7 +128,7 @@ export class InMemoryCache extends ApolloCache<NormalizedCache> {
}
public reset(): Promise<void> {
- this.data = {};
+ this.data.clear();
this.broadcastWatches();
return Promise.resolve();
@@ -143,7 +148,7 @@ export class InMemoryCache extends ApolloCache<NormalizedCache> {
this.broadcastWatches();
}
- public performTransaction(transaction: Transaction<NormalizedCache>) {
+ public performTransaction(transaction: Transaction<NormalizedCacheObject>) {
// TODO: does this need to be different, or is this okay for an in-memory cache?
let alreadySilenced = this.silenceBroadcast;
@@ -161,25 +166,18 @@ export class InMemoryCache extends ApolloCache<NormalizedCache> {
}
public recordOptimisticTransaction(
- transaction: Transaction<NormalizedCache>,
+ transaction: Transaction<NormalizedCacheObject>,
id: string,
) {
this.silenceBroadcast = true;
- const before = this.extract(true);
-
- const orig = this.data;
- this.data = { ...before };
- this.performTransaction(transaction);
- const after = this.data;
- this.data = orig;
-
- const patch: any = {};
-
- Object.keys(after).forEach(key => {
- if (after[key] !== before[key]) {
- patch[key] = after[key];
- }
+ const patch = record(this.extract(true), recordingCache => {
+ // swapping data instance on 'this' is currently necessary
+ // because of the current architecture
+ const dataCache = this.data;
+ this.data = recordingCache;
+ this.performTransaction(transaction);
+ this.data = dataCache;
});
this.optimistic.push({
diff --git a/packages/apollo-cache-inmemory/src/index.ts b/packages/apollo-cache-inmemory/src/index.ts
--- a/packages/apollo-cache-inmemory/src/index.ts
+++ b/packages/apollo-cache-inmemory/src/index.ts
@@ -2,4 +2,6 @@ export { InMemoryCache, defaultDataIdFromObject } from './inMemoryCache';
export * from './readFromStore';
export * from './writeToStore';
export * from './fragmentMatcher';
+export * from './objectCache';
+export * from './recordingCache';
export * from './types';
diff --git a/packages/apollo-cache-inmemory/src/mapCache.ts b/packages/apollo-cache-inmemory/src/mapCache.ts
new file mode 100644
--- /dev/null
+++ b/packages/apollo-cache-inmemory/src/mapCache.ts
@@ -0,0 +1,47 @@
+import { NormalizedCache, NormalizedCacheObject, StoreObject } from './types';
+
+function getNormalizedDataId(dataId: string | number): string {
+ return typeof dataId === 'number' ? String(dataId) : dataId;
+}
+
+/**
+ * A Map-based implementation of the NormalizedCache.
+ * Note that you need a polyfill for Object.entries for this to work.
+ */
+export class MapCache implements NormalizedCache {
+ cache: Map<string, StoreObject>;
+ constructor(data: NormalizedCacheObject = {}) {
+ this.cache = new Map(Object.entries(data));
+ }
+ get(dataId: string): StoreObject {
+ return this.cache.get(getNormalizedDataId(dataId));
+ }
+ set(dataId: string, value: StoreObject): void {
+ this.cache.set(getNormalizedDataId(dataId), value);
+ }
+ delete(dataId: string): void {
+ this.cache.delete(getNormalizedDataId(dataId));
+ }
+ clear(): void {
+ return this.cache.clear();
+ }
+ public toObject(): NormalizedCacheObject {
+ const obj: NormalizedCacheObject = {};
+ this.cache.forEach((dataId, key) => {
+ obj[key] = dataId;
+ });
+ return obj;
+ }
+ public replace(newData: NormalizedCacheObject): void {
+ this.cache.clear();
+ Object.entries(newData).forEach(([dataId, value]) =>
+ this.cache.set(dataId, value),
+ );
+ }
+}
+
+export function mapNormalizedCacheFactory(
+ seed?: NormalizedCacheObject,
+): NormalizedCache {
+ return new MapCache(seed);
+}
diff --git a/packages/apollo-cache-inmemory/src/objectCache.ts b/packages/apollo-cache-inmemory/src/objectCache.ts
new file mode 100644
--- /dev/null
+++ b/packages/apollo-cache-inmemory/src/objectCache.ts
@@ -0,0 +1,29 @@
+import { NormalizedCache, NormalizedCacheObject, StoreObject } from './types';
+
+export class ObjectCache implements NormalizedCache {
+ constructor(private data: NormalizedCacheObject = {}) {}
+ public toObject(): NormalizedCacheObject {
+ return { ...this.data };
+ }
+ public get(dataId: string): StoreObject {
+ return this.data[dataId];
+ }
+ public set(dataId: string, value: StoreObject) {
+ this.data[dataId] = value;
+ }
+ public delete(dataId: string): void {
+ this.data[dataId] = undefined;
+ }
+ public clear(): void {
+ this.data = {};
+ }
+ public replace(newData: NormalizedCacheObject): void {
+ this.data = newData || {};
+ }
+}
+
+export function defaultNormalizedCacheFactory(
+ seed?: NormalizedCacheObject,
+): NormalizedCache {
+ return new ObjectCache(seed);
+}
diff --git a/packages/apollo-cache-inmemory/src/readFromStore.ts b/packages/apollo-cache-inmemory/src/readFromStore.ts
--- a/packages/apollo-cache-inmemory/src/readFromStore.ts
+++ b/packages/apollo-cache-inmemory/src/readFromStore.ts
@@ -65,7 +65,7 @@ const readStoreResolver: Resolver = (
assertIdValue(idValue);
const objId = idValue.id;
- const obj = context.store[objId];
+ const obj = context.store.get(objId);
const storeKeyName = getStoreKeyName(fieldName, args, directives);
let fieldValue = (obj || {})[storeKeyName];
diff --git a/packages/apollo-cache-inmemory/src/recordingCache.ts b/packages/apollo-cache-inmemory/src/recordingCache.ts
new file mode 100644
--- /dev/null
+++ b/packages/apollo-cache-inmemory/src/recordingCache.ts
@@ -0,0 +1,56 @@
+import { NormalizedCache, NormalizedCacheObject, StoreObject } from './types';
+
+export class RecordingCache implements NormalizedCache {
+ constructor(private readonly data: NormalizedCacheObject = {}) {}
+
+ private recordedData: NormalizedCacheObject = {};
+
+ public record(
+ transaction: (recordingCache: RecordingCache) => void,
+ ): NormalizedCacheObject {
+ transaction(this);
+ const recordedData = this.recordedData;
+ this.recordedData = {};
+ return recordedData;
+ }
+
+ public toObject(): NormalizedCacheObject {
+ return { ...this.data, ...this.recordedData };
+ }
+
+ public get(dataId: string): StoreObject {
+ if (this.recordedData.hasOwnProperty(dataId)) {
+ // recording always takes precedence:
+ return this.recordedData[dataId];
+ }
+ return this.data[dataId];
+ }
+
+ public set(dataId: string, value: StoreObject) {
+ if (this.get(dataId) !== value) {
+ this.recordedData[dataId] = value;
+ }
+ }
+
+ public delete(dataId: string): void {
+ this.recordedData[dataId] = undefined;
+ }
+
+ public clear(): void {
+ Object.keys(this.data).forEach(dataId => this.delete(dataId));
+ this.recordedData = {};
+ }
+
+ public replace(newData: NormalizedCacheObject): void {
+ this.clear();
+ this.recordedData = { ...newData };
+ }
+}
+
+export function record(
+ startingState: NormalizedCacheObject,
+ transaction: (recordingCache: RecordingCache) => void,
+): NormalizedCacheObject {
+ const recordingCache = new RecordingCache(startingState);
+ return recordingCache.record(transaction);
+}
diff --git a/packages/apollo-cache-inmemory/src/types.ts b/packages/apollo-cache-inmemory/src/types.ts
--- a/packages/apollo-cache-inmemory/src/types.ts
+++ b/packages/apollo-cache-inmemory/src/types.ts
@@ -1,22 +1,53 @@
import { DocumentNode } from 'graphql';
import { FragmentMatcher } from 'graphql-anywhere';
import { Transaction } from 'apollo-cache';
-import { StoreValue, IdValue } from 'apollo-utilities';
+import { IdValue, StoreValue } from 'apollo-utilities';
+import { NormalizedCacheObject, NormalizedCache } from './types';
export type IdGetter = (value: Object) => string | null | undefined;
+/**
+ * This is an interface used to access, set and remove
+ * StoreObjects from the cache
+ */
+export interface NormalizedCache {
+ get(dataId: string): StoreObject;
+ set(dataId: string, value: StoreObject): void;
+ delete(dataId: string): void;
+ clear(): void;
+
+ // non-Map elements:
+ /**
+ * returns an Object with key-value pairs matching the contents of the store
+ */
+ toObject(): NormalizedCacheObject;
+ /**
+ * replace the state of the store
+ */
+ replace(newData: NormalizedCacheObject): void;
+}
+
/**
* This is a normalized representation of the Apollo query result cache. It consists of
* a flattened representation of query result trees.
*/
-export interface NormalizedCache {
+export interface NormalizedCacheObject {
[dataId: string]: StoreObject;
}
+export interface StoreObject {
+ __typename?: string;
+ [storeFieldKey: string]: StoreValue;
+}
+
+export type NormalizedCacheFactory = (
+ seed?: NormalizedCacheObject,
+) => NormalizedCache;
+
export type OptimisticStoreItem = {
id: string;
- data: NormalizedCache;
- transaction: Transaction<NormalizedCache>;
+ data: NormalizedCacheObject;
+ transaction: Transaction<NormalizedCacheObject>;
};
export type ReadQueryOptions = {
@@ -33,16 +64,12 @@ export type DiffQueryAgainstStoreOptions = ReadQueryOptions & {
returnPartialData?: boolean;
};
-export interface StoreObject {
- __typename?: string;
- [storeFieldKey: string]: StoreValue;
-}
-
export type ApolloReducerConfig = {
dataIdFromObject?: IdGetter;
fragmentMatcher?: FragmentMatcherInterface;
addTypename?: boolean;
cacheResolvers?: CacheResolverMap;
+ storeFactory?: NormalizedCacheFactory;
};
export type ReadStoreContext = {
diff --git a/packages/apollo-cache-inmemory/src/writeToStore.ts b/packages/apollo-cache-inmemory/src/writeToStore.ts
--- a/packages/apollo-cache-inmemory/src/writeToStore.ts
+++ b/packages/apollo-cache-inmemory/src/writeToStore.ts
@@ -26,9 +26,12 @@ import {
getQueryDefinition,
} from 'apollo-utilities';
+import { defaultNormalizedCacheFactory, ObjectCache } from './objectCache';
+
import {
IdGetter,
NormalizedCache,
+ NormalizedCacheFactory,
ReadStoreContext,
StoreObject,
} from './types';
@@ -72,7 +75,8 @@ export function enhanceErrorWithDocument(error: Error, document: DocumentNode) {
export function writeQueryToStore({
result,
query,
- store = {} as NormalizedCache,
+ storeFactory = defaultNormalizedCacheFactory,
+ store = storeFactory(),
variables,
dataIdFromObject,
fragmentMap = {} as FragmentMap,
@@ -81,6 +85,7 @@ export function writeQueryToStore({
result: Object;
query: DocumentNode;
store?: NormalizedCache;
+ storeFactory?: NormalizedCacheFactory;
variables?: Object;
dataIdFromObject?: IdGetter;
fragmentMap?: FragmentMap;
@@ -97,6 +102,7 @@ export function writeQueryToStore({
selectionSet: queryDefinition.selectionSet,
context: {
store,
+ storeFactory,
processedData: {},
variables,
dataIdFromObject,
@@ -111,6 +117,7 @@ export function writeQueryToStore({
export type WriteContext = {
store: NormalizedCache;
+ storeFactory: NormalizedCacheFactory;
processedData?: { [x: string]: FieldNode[] };
variables?: any;
dataIdFromObject?: IdGetter;
@@ -122,7 +129,8 @@ export function writeResultToStore({
dataId,
result,
document,
- store = {} as NormalizedCache,
+ storeFactory = defaultNormalizedCacheFactory,
+ store = storeFactory(),
variables,
dataIdFromObject,
fragmentMatcherFunction,
@@ -131,6 +139,7 @@ export function writeResultToStore({
result: any;
document: DocumentNode;
store?: NormalizedCache;
+ storeFactory?: NormalizedCacheFactory;
variables?: Object;
dataIdFromObject?: IdGetter;
fragmentMatcherFunction?: FragmentMatcher;
@@ -149,6 +158,7 @@ export function writeResultToStore({
selectionSet,
context: {
store,
+ storeFactory,
processedData: {},
variables,
dataIdFromObject,
@@ -236,7 +246,9 @@ export function writeSelectionSetToStore({
// on the context.
const idValue: IdValue = { type: 'id', id: 'self', generated: false };
const fakeContext: ReadStoreContext = {
- store: { self: result },
+ // NOTE: fakeContext always uses ObjectCache
+ // since this is only to ensure the return value of 'matches'
+ store: new ObjectCache({ self: result }),
returnPartialData: false,
hasMissingField: false,
cacheResolvers: {},
@@ -276,8 +288,8 @@ function mergeWithGenerated(
realKey: string,
cache: NormalizedCache,
) {
- const generated = cache[generatedKey];
- const real = cache[realKey];
+ const generated = cache.get(generatedKey);
+ const real = cache.get(realKey);
Object.keys(generated).forEach(key => {
const value = generated[key];
@@ -285,8 +297,8 @@ function mergeWithGenerated(
if (isIdValue(value) && isGeneratedId(value.id) && isIdValue(realValue)) {
mergeWithGenerated(value.id, realValue.id, cache);
}
- delete cache[generatedKey];
- cache[realKey] = { ...generated, ...real } as StoreObject;
+ cache.delete(generatedKey);
+ cache.set(realKey, { ...generated, ...real } as StoreObject);
});
}
@@ -326,6 +338,7 @@ function writeFieldToStore({
const { variables, dataIdFromObject, store } = context;
let storeValue: any;
+ let storeObject: StoreObject;
const storeFieldName: string = storeKeyNameFromField(field, variables);
// specifies if we need to merge existing keys in the store
@@ -401,8 +414,9 @@ function writeFieldToStore({
// check if there was a generated id at the location where we're
// about to place this new id. If there was, we have to merge the
// data from that id with the data we're about to write in the store.
- if (store[dataId] && store[dataId][storeFieldName] !== storeValue) {
- const escapedId = store[dataId][storeFieldName] as IdValue;
+ storeObject = store.get(dataId);
+ if (storeObject && storeObject[storeFieldName] !== storeValue) {
+ const escapedId = storeObject[storeFieldName] as IdValue;
// If there is already a real id in the store and the current id we
// are dealing with is generated, we throw an error.
@@ -426,7 +440,7 @@ function writeFieldToStore({
}
const newStoreObj = {
- ...store[dataId],
+ ...store.get(dataId),
[storeFieldName]: storeValue,
} as StoreObject;
@@ -434,8 +448,9 @@ function writeFieldToStore({
mergeWithGenerated(generatedKey, (storeValue as IdValue).id, store);
}
- if (!store[dataId] || storeValue !== store[dataId][storeFieldName]) {
- store[dataId] = newStoreObj;
+ storeObject = store.get(dataId);
+ if (!storeObject || storeValue !== storeObject[storeFieldName]) {
+ store.set(dataId, newStoreObj);
}
}
diff --git a/packages/apollo-cache/src/cache.ts b/packages/apollo-cache/src/cache.ts
--- a/packages/apollo-cache/src/cache.ts
+++ b/packages/apollo-cache/src/cache.ts
@@ -30,7 +30,7 @@ export abstract class ApolloCache<TSerialized> implements DataProxy {
/**
* Exposes the cache's complete state, in a serializable format for later restoration.
*/
- public abstract extract(optimistic: boolean): TSerialized;
+ public abstract extract(optimistic?: boolean): TSerialized;
// optimistic API
public abstract removeOptimistic(id: string): void;
| diff --git a/packages/apollo-cache-inmemory/src/__tests__/diffAgainstStore.ts b/packages/apollo-cache-inmemory/src/__tests__/diffAgainstStore.ts
--- a/packages/apollo-cache-inmemory/src/__tests__/diffAgainstStore.ts
+++ b/packages/apollo-cache-inmemory/src/__tests__/diffAgainstStore.ts
@@ -95,7 +95,7 @@ describe('diffing queries against the store', () => {
});
expect(complete).toBeTruthy();
- expect(store['1']).toEqual(result.people_one);
+ expect(store.get('1')).toEqual(result.people_one);
});
it('does not swallow errors other than field errors', () => {
diff --git a/packages/apollo-cache-inmemory/src/__tests__/fragmentMatcher.ts b/packages/apollo-cache-inmemory/src/__tests__/fragmentMatcher.ts
--- a/packages/apollo-cache-inmemory/src/__tests__/fragmentMatcher.ts
+++ b/packages/apollo-cache-inmemory/src/__tests__/fragmentMatcher.ts
@@ -1,4 +1,6 @@
import { IntrospectionFragmentMatcher } from '../fragmentMatcher';
+import { defaultNormalizedCacheFactory } from '../objectCache';
+
describe('IntrospectionFragmentMatcher', () => {
it('will throw an error if match is called if it is not ready', () => {
const ifm = new IntrospectionFragmentMatcher();
@@ -27,11 +29,11 @@ describe('IntrospectionFragmentMatcher', () => {
},
});
- const store = {
+ const store = defaultNormalizedCacheFactory({
a: {
__typename: 'ItemB',
},
- };
+ });
const idValue = {
type: 'id',
diff --git a/packages/apollo-cache-inmemory/src/__tests__/mapCache.ts b/packages/apollo-cache-inmemory/src/__tests__/mapCache.ts
new file mode 100644
--- /dev/null
+++ b/packages/apollo-cache-inmemory/src/__tests__/mapCache.ts
@@ -0,0 +1,20 @@
+jest.mock('../objectCache', () => {
+ const { MapCache, mapNormalizedCacheFactory } = require('../mapCache');
+ return {
+ ObjectCache: MapCache,
+ defaultNormalizedCacheFactory: mapNormalizedCacheFactory,
+ };
+});
+
+describe('MapCache', () => {
+ // simply re-runs all the tests
+ // with the alternative implementation of the cache
+ require('./objectCache');
+ require('./cache');
+ require('./diffAgainstStore');
+ require('./fragmentMatcher');
+ require('./readFromStore');
+ require('./diffAgainstStore');
+ require('./roundtrip');
+ require('./writeToStore');
+});
diff --git a/packages/apollo-cache-inmemory/src/__tests__/objectCache.ts b/packages/apollo-cache-inmemory/src/__tests__/objectCache.ts
new file mode 100644
--- /dev/null
+++ b/packages/apollo-cache-inmemory/src/__tests__/objectCache.ts
@@ -0,0 +1,36 @@
+import { ObjectCache } from '../objectCache';
+import { NormalizedCacheObject } from '../types';
+
+describe('ObjectCache', () => {
+ it('should create an empty cache', () => {
+ const cache = new ObjectCache();
+ expect(cache.toObject()).toEqual({});
+ });
+
+ it('should create a cache based on an Object', () => {
+ const contents: NormalizedCacheObject = { a: {} };
+ const cache = new ObjectCache(contents);
+ expect(cache.toObject()).toEqual(contents);
+ });
+
+ it(`should .get() an object from the store by dataId`, () => {
+ const contents: NormalizedCacheObject = { a: {} };
+ const cache = new ObjectCache(contents);
+ expect(cache.get('a')).toBe(contents.a);
+ });
+
+ it(`should .set() an object from the store by dataId`, () => {
+ const obj = {};
+ const cache = new ObjectCache();
+ cache.set('a', obj);
+ expect(cache.get('a')).toBe(obj);
+ });
+
+ it(`should .clear() the store`, () => {
+ const obj = {};
+ const cache = new ObjectCache();
+ cache.set('a', obj);
+ cache.clear();
+ expect(cache.get('a')).toBeUndefined();
+ });
+});
diff --git a/packages/apollo-cache-inmemory/src/__tests__/readFromStore.ts b/packages/apollo-cache-inmemory/src/__tests__/readFromStore.ts
--- a/packages/apollo-cache-inmemory/src/__tests__/readFromStore.ts
+++ b/packages/apollo-cache-inmemory/src/__tests__/readFromStore.ts
@@ -4,6 +4,7 @@ import gql from 'graphql-tag';
import { NormalizedCache, StoreObject, HeuristicFragmentMatcher } from '../';
import { readQueryFromStore } from '../readFromStore';
+import { defaultNormalizedCacheFactory } from '../objectCache';
const fragmentMatcherFunction = new HeuristicFragmentMatcher().match;
import { withError } from './diffAgainstStore';
@@ -11,7 +12,7 @@ import { withError } from './diffAgainstStore';
describe('reading from the store', () => {
it('runs a nested query with proper fragment fields in arrays', () => {
withError(() => {
- const store = {
+ const store = defaultNormalizedCacheFactory({
ROOT_QUERY: {
__typename: 'Query',
nestedObj: { type: 'id', id: 'abcde', generated: false },
@@ -26,7 +27,7 @@ describe('reading from the store', () => {
id: 'abcdef',
someField: 3,
} as StoreObject,
- } as NormalizedCache;
+ });
const queryResult = readQueryFromStore({
store,
@@ -71,7 +72,7 @@ describe('reading from the store', () => {
it('rejects malformed queries', () => {
expect(() => {
readQueryFromStore({
- store: {},
+ store: defaultNormalizedCacheFactory(),
query: gql`
query {
name
@@ -86,7 +87,7 @@ describe('reading from the store', () => {
expect(() => {
readQueryFromStore({
- store: {},
+ store: defaultNormalizedCacheFactory(),
query: gql`
fragment x on y {
name
@@ -104,9 +105,9 @@ describe('reading from the store', () => {
nullField: null,
} as StoreObject;
- const store = {
+ const store = defaultNormalizedCacheFactory({
ROOT_QUERY: result,
- } as NormalizedCache;
+ });
const queryResult = readQueryFromStore({
store,
@@ -141,14 +142,14 @@ describe('reading from the store', () => {
stringArg: 'This is a string!',
};
- const store = {
+ const store = defaultNormalizedCacheFactory({
ROOT_QUERY: {
id: 'abcd',
nullField: null,
'numberField({"intArg":5,"floatArg":3.14})': 5,
'stringField({"arg":"This is a string!"})': 'Heyo',
},
- } as NormalizedCache;
+ });
const result = readQueryFromStore({
store,
@@ -182,14 +183,14 @@ describe('reading from the store', () => {
floatArg: 3.14,
};
- const store = {
+ const store = defaultNormalizedCacheFactory({
ROOT_QUERY: {
id: 'abcd',
nullField: null,
'numberField({"intArg":0,"floatArg":3.14})': 5,
'stringField({"arg":"This is a default string!"})': 'Heyo',
},
- } as NormalizedCache;
+ });
const result = readQueryFromStore({
store,
@@ -219,7 +220,7 @@ describe('reading from the store', () => {
} as StoreObject,
};
- const store = {
+ const store = defaultNormalizedCacheFactory({
ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedObj')), {
nestedObj: {
type: 'id',
@@ -228,7 +229,7 @@ describe('reading from the store', () => {
},
} as StoreObject),
abcde: result.nestedObj,
- } as NormalizedCache;
+ });
const queryResult = readQueryFromStore({
store,
@@ -276,7 +277,7 @@ describe('reading from the store', () => {
__typename: 'Item',
};
- const store = {
+ const store = defaultNormalizedCacheFactory({
ROOT_QUERY: assign(
{},
assign({}, omit(result, 'nestedObj', 'deepNestedObj')),
@@ -297,7 +298,7 @@ describe('reading from the store', () => {
},
}) as StoreObject,
abcdef: result.deepNestedObj as StoreObject,
- } as NormalizedCache;
+ });
const queryResult = readQueryFromStore({
store,
@@ -372,7 +373,7 @@ describe('reading from the store', () => {
] as StoreObject[],
};
- const store = {
+ const store = defaultNormalizedCacheFactory({
ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedArray')), {
nestedArray: [
{ type: 'id', generated: true, id: 'abcd.nestedArray.0' } as IdValue,
@@ -381,7 +382,7 @@ describe('reading from the store', () => {
}) as StoreObject,
'abcd.nestedArray.0': result.nestedArray[0],
'abcd.nestedArray.1': result.nestedArray[1],
- } as NormalizedCache;
+ });
const queryResult = readQueryFromStore({
store,
@@ -430,7 +431,7 @@ describe('reading from the store', () => {
] as StoreObject[],
};
- const store = {
+ const store = defaultNormalizedCacheFactory({
ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedArray')), {
nestedArray: [
null,
@@ -438,7 +439,7 @@ describe('reading from the store', () => {
],
}) as StoreObject,
'abcd.nestedArray.1': result.nestedArray[1],
- } as NormalizedCache;
+ });
const queryResult = readQueryFromStore({
store,
@@ -485,12 +486,12 @@ describe('reading from the store', () => {
] as StoreObject[],
};
- const store = {
+ const store = defaultNormalizedCacheFactory({
ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedArray')), {
nestedArray: [null, { type: 'id', generated: false, id: 'abcde' }],
}) as StoreObject,
abcde: result.nestedArray[1],
- } as NormalizedCache;
+ });
const queryResult = readQueryFromStore({
store,
@@ -530,7 +531,7 @@ describe('reading from the store', () => {
nullField: null,
} as StoreObject;
- const store = { ROOT_QUERY: result } as NormalizedCache;
+ const store = defaultNormalizedCacheFactory({ ROOT_QUERY: result });
expect(() => {
readQueryFromStore({
@@ -554,11 +555,11 @@ describe('reading from the store', () => {
nestedObj: null,
};
- const store = {
+ const store = defaultNormalizedCacheFactory({
ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedObj')), {
nestedObj: null,
}) as StoreObject,
- } as NormalizedCache;
+ });
const queryResult = readQueryFromStore({
store,
@@ -591,14 +592,14 @@ describe('reading from the store', () => {
simpleArray: ['one', 'two', 'three'],
};
- const store = {
+ const store = defaultNormalizedCacheFactory({
ROOT_QUERY: assign({}, assign({}, omit(result, 'simpleArray')), {
simpleArray: {
type: 'json',
json: result.simpleArray,
} as JsonValue,
}) as StoreObject,
- } as NormalizedCache;
+ });
const queryResult = readQueryFromStore({
store,
@@ -628,14 +629,14 @@ describe('reading from the store', () => {
simpleArray: [null, 'two', 'three'],
};
- const store = {
+ const store = defaultNormalizedCacheFactory({
ROOT_QUERY: assign({}, assign({}, omit(result, 'simpleArray')), {
simpleArray: {
type: 'json',
json: result.simpleArray,
} as JsonValue,
}) as StoreObject,
- } as NormalizedCache;
+ });
const queryResult = readQueryFromStore({
store,
@@ -677,7 +678,7 @@ describe('reading from the store', () => {
__typename: 'Item',
};
- const store = {
+ const store = defaultNormalizedCacheFactory({
ROOT_QUERY: assign(
{},
assign({}, omit(data, 'nestedObj', 'deepNestedObj')),
@@ -698,7 +699,7 @@ describe('reading from the store', () => {
},
}) as StoreObject,
abcdef: data.deepNestedObj as StoreObject,
- } as NormalizedCache;
+ });
const queryResult1 = readQueryFromStore({
store,
@@ -748,7 +749,7 @@ describe('reading from the store', () => {
});
it('properly handles the connection directive', () => {
- const store: NormalizedCache = {
+ const store = defaultNormalizedCacheFactory({
ROOT_QUERY: {
abc: [
{
@@ -761,7 +762,7 @@ describe('reading from the store', () => {
'ROOT_QUERY.abc.0': {
name: 'efgh',
},
- };
+ });
const queryResult = readQueryFromStore({
store,
diff --git a/packages/apollo-cache-inmemory/src/__tests__/recordingCache.ts b/packages/apollo-cache-inmemory/src/__tests__/recordingCache.ts
new file mode 100644
--- /dev/null
+++ b/packages/apollo-cache-inmemory/src/__tests__/recordingCache.ts
@@ -0,0 +1,76 @@
+import { RecordingCache } from '../recordingCache';
+import { NormalizedCacheObject } from '../types';
+
+describe('RecordingCache', () => {
+ describe('returns correct values during recording', () => {
+ const data = {
+ Human: { __typename: 'Human', name: 'Mark' },
+ Animal: { __typename: 'Mouse', name: '🐭' },
+ };
+ const dataToRecord = { Human: { __typename: 'Human', name: 'John' } };
+ let cache: RecordingCache;
+
+ beforeEach(() => {
+ cache = new RecordingCache({ ...data });
+ });
+
+ it('should passthrough values if not defined in recording', () => {
+ cache.record(() => {
+ expect(cache.get('Human')).toBe(data.Human);
+ expect(cache.get('Animal')).toBe(data.Animal);
+ });
+ });
+
+ it('should return values defined during recording', () => {
+ const recording = cache.record(() => {
+ cache.set('Human', dataToRecord.Human);
+ expect(cache.get('Human')).toBe(dataToRecord.Human);
+ });
+ expect(recording.Human).toBe(dataToRecord.Human);
+ });
+
+ it('should return undefined for values deleted during recording', () => {
+ const recording = cache.record(() => {
+ expect(cache.get('Animal')).toBe(data.Animal);
+ // delete should be registered in the recording:
+ cache.delete('Animal');
+ expect(cache.get('Animal')).toBeUndefined();
+ });
+
+ expect(recording).toHaveProperty('Animal');
+ });
+ });
+
+ describe('returns correct result of a recorded transaction', () => {
+ const data = {
+ Human: { __typename: 'Human', name: 'Mark' },
+ Animal: { __typename: 'Mouse', name: '🐭' },
+ };
+ const dataToRecord = { Human: { __typename: 'Human', name: 'John' } };
+ let cache: RecordingCache;
+ let recording: NormalizedCacheObject;
+
+ beforeEach(() => {
+ cache = new RecordingCache({ ...data });
+ recording = cache.record(() => {
+ cache.set('Human', dataToRecord.Human);
+ cache.delete('Animal');
+ });
+ });
+
+ it('should contain the property indicating deletion', () => {
+ expect(recording).toHaveProperty('Animal');
+ });
+
+ it('should have recorded the changes made during recording', () => {
+ expect(recording).toEqual({
+ Human: dataToRecord.Human,
+ Animal: undefined,
+ });
+ });
+
+ it('should keep the original data unaffected', () => {
+ expect(cache.toObject()).toEqual(data);
+ });
+ });
+});
diff --git a/packages/apollo-cache-inmemory/src/__tests__/writeToStore.ts b/packages/apollo-cache-inmemory/src/__tests__/writeToStore.ts
--- a/packages/apollo-cache-inmemory/src/__tests__/writeToStore.ts
+++ b/packages/apollo-cache-inmemory/src/__tests__/writeToStore.ts
@@ -21,6 +21,8 @@ import {
writeSelectionSetToStore,
} from '../writeToStore';
+import { defaultNormalizedCacheFactory } from '../objectCache';
+
import {
HeuristicFragmentMatcher,
IntrospectionFragmentMatcher,
@@ -65,7 +67,7 @@ describe('writing to the store', () => {
writeQueryToStore({
query,
result: cloneDeep(result),
- }),
+ }).toObject(),
).toEqual({
ROOT_QUERY: result,
});
@@ -93,7 +95,7 @@ describe('writing to the store', () => {
query,
});
- expect(normalized).toEqual({
+ expect(normalized.toObject()).toEqual({
ROOT_QUERY: {
id: 'abcd',
stringField: 'This is a string!',
@@ -127,7 +129,7 @@ describe('writing to the store', () => {
query,
});
- expect(normalized).toEqual({
+ expect(normalized.toObject()).toEqual({
ROOT_QUERY: {
id: 'abcd',
'stringField({"arg":1})': 'The arg was 1!',
@@ -167,7 +169,7 @@ describe('writing to the store', () => {
variables,
});
- expect(normalized).toEqual({
+ expect(normalized.toObject()).toEqual({
ROOT_QUERY: {
id: 'abcd',
nullField: null,
@@ -209,7 +211,7 @@ describe('writing to the store', () => {
variables,
});
- expect(normalized).toEqual({
+ expect(normalized.toObject()).toEqual({
ROOT_QUERY: {
id: 'abcd',
nullField: null,
@@ -253,7 +255,7 @@ describe('writing to the store', () => {
query,
result: cloneDeep(result),
dataIdFromObject: getIdField,
- }),
+ }).toObject(),
).toEqual({
ROOT_QUERY: assign<{}>({}, assign({}, omit(result, 'nestedObj')), {
nestedObj: {
@@ -297,7 +299,7 @@ describe('writing to the store', () => {
writeQueryToStore({
query,
result: cloneDeep(result),
- }),
+ }).toObject(),
).toEqual({
ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedObj')), {
nestedObj: {
@@ -341,7 +343,7 @@ describe('writing to the store', () => {
writeQueryToStore({
query,
result: cloneDeep(result),
- }),
+ }).toObject(),
).toEqual({
ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedObj')), {
'nestedObj({"arg":"val"})': {
@@ -396,7 +398,7 @@ describe('writing to the store', () => {
query,
result: cloneDeep(result),
dataIdFromObject: getIdField,
- }),
+ }).toObject(),
).toEqual({
ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedArray')), {
nestedArray: result.nestedArray.map((obj: any) => ({
@@ -447,7 +449,7 @@ describe('writing to the store', () => {
query,
result: cloneDeep(result),
dataIdFromObject: getIdField,
- }),
+ }).toObject(),
).toEqual({
ROOT_QUERY: assign<{}>({}, assign({}, omit(result, 'nestedArray')), {
nestedArray: [
@@ -498,7 +500,7 @@ describe('writing to the store', () => {
result: cloneDeep(result),
});
- expect(normalized).toEqual({
+ expect(normalized.toObject()).toEqual({
ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedArray')), {
nestedArray: [
{ type: 'id', generated: true, id: `ROOT_QUERY.nestedArray.0` },
@@ -545,7 +547,7 @@ describe('writing to the store', () => {
result: cloneDeep(result),
});
- expect(normalized).toEqual({
+ expect(normalized.toObject()).toEqual({
ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedArray')), {
nestedArray: [
null,
@@ -581,7 +583,7 @@ describe('writing to the store', () => {
dataIdFromObject: getIdField,
});
- expect(normalized).toEqual({
+ expect(normalized.toObject()).toEqual({
ROOT_QUERY: assign<{}>({}, assign({}, omit(result, 'simpleArray')), {
simpleArray: {
type: 'json',
@@ -619,7 +621,7 @@ describe('writing to the store', () => {
result: cloneDeep(result),
});
- expect(normalized).toEqual({
+ expect(normalized.toObject()).toEqual({
ROOT_QUERY: assign<{}>({}, assign({}, omit(result, 'simpleArray')), {
simpleArray: {
type: 'json',
@@ -666,7 +668,7 @@ describe('writing to the store', () => {
dataIdFromObject: getIdField,
});
- expect(normalized).toEqual({
+ expect(normalized.toObject()).toEqual({
ROOT_QUERY: {
id: 'a',
object1: {
@@ -741,7 +743,7 @@ describe('writing to the store', () => {
dataIdFromObject: getIdField,
});
- expect(normalized).toEqual({
+ expect(normalized.toObject()).toEqual({
ROOT_QUERY: {
id: 'a',
array1: [
@@ -831,7 +833,7 @@ describe('writing to the store', () => {
dataIdFromObject: getIdField,
});
- expect(normalized).toEqual({
+ expect(normalized.toObject()).toEqual({
ROOT_QUERY: {
id: 'a',
array1: [
@@ -915,7 +917,7 @@ describe('writing to the store', () => {
dataIdFromObject: getIdField,
});
- expect(store2).toEqual({
+ expect(store2.toObject()).toEqual({
ROOT_QUERY: assign({}, result, result2),
});
});
@@ -948,7 +950,7 @@ describe('writing to the store', () => {
writeQueryToStore({
query,
result: cloneDeep(result),
- }),
+ }).toObject(),
).toEqual({
ROOT_QUERY: assign({}, assign({}, omit(result, 'nestedObj')), {
nestedObj: null,
@@ -977,7 +979,7 @@ describe('writing to the store', () => {
writeQueryToStore({
query,
result: cloneDeep(result),
- }),
+ }).toObject(),
).toEqual({
ROOT_QUERY: {
'people_one({"id":"5"})': {
@@ -1146,11 +1148,12 @@ describe('writing to the store', () => {
selectionSet: def.selectionSet,
result: cloneDeep(result),
context: {
- store: {},
+ storeFactory: defaultNormalizedCacheFactory,
+ store: defaultNormalizedCacheFactory(),
variables,
dataIdFromObject: () => '5',
},
- }),
+ }).toObject(),
).toEqual({
'5': {
'some_mutation({"input":{"id":"5","arr":[1,{"a":"b"}],"obj":{"a":"b"},"num":5.5,"nil":null,"bo":true}})': {
@@ -1195,7 +1198,7 @@ describe('writing to the store', () => {
lastName: 'Smith',
},
};
- const expStore = {
+ const expStore = defaultNormalizedCacheFactory({
ROOT_QUERY: {
author: {
type: 'id',
@@ -1204,13 +1207,13 @@ describe('writing to the store', () => {
},
},
'$ROOT_QUERY.author': data.author,
- };
+ });
expect(
writeQueryToStore({
result: data,
query,
- }),
- ).toEqual(expStore);
+ }).toObject(),
+ ).toEqual(expStore.toObject());
});
it('should correctly escape real ids', () => {
@@ -1230,7 +1233,7 @@ describe('writing to the store', () => {
__typename: 'Author',
},
};
- const expStore = {
+ const expStore = defaultNormalizedCacheFactory({
ROOT_QUERY: {
author: {
type: 'id',
@@ -1243,14 +1246,14 @@ describe('writing to the store', () => {
id: data.author.id,
__typename: data.author.__typename,
},
- };
+ });
expect(
writeQueryToStore({
result: data,
query,
dataIdFromObject,
- }),
- ).toEqual(expStore);
+ }).toObject(),
+ ).toEqual(expStore.toObject());
});
it('should correctly escape json blobs', () => {
@@ -1272,7 +1275,7 @@ describe('writing to the store', () => {
__typename: 'Author',
},
};
- const expStore = {
+ const expStore = defaultNormalizedCacheFactory({
ROOT_QUERY: {
author: {
type: 'id',
@@ -1288,14 +1291,14 @@ describe('writing to the store', () => {
json: data.author.info,
},
},
- };
+ });
expect(
writeQueryToStore({
result: data,
query,
dataIdFromObject,
- }),
- ).toEqual(expStore);
+ }).toObject(),
+ ).toEqual(expStore.toObject());
});
});
@@ -1337,7 +1340,7 @@ describe('writing to the store', () => {
}
}
`;
- const expStoreWithoutId = {
+ const expStoreWithoutId = defaultNormalizedCacheFactory({
'$ROOT_QUERY.author': {
firstName: 'John',
lastName: 'Smith',
@@ -1349,8 +1352,8 @@ describe('writing to the store', () => {
generated: true,
},
},
- };
- const expStoreWithId = {
+ });
+ const expStoreWithId = defaultNormalizedCacheFactory({
Author__129: {
firstName: 'John',
lastName: 'Smith',
@@ -1364,20 +1367,20 @@ describe('writing to the store', () => {
generated: false,
},
},
- };
+ });
const storeWithoutId = writeQueryToStore({
result: dataWithoutId,
query: queryWithoutId,
dataIdFromObject,
});
- expect(storeWithoutId).toEqual(expStoreWithoutId);
+ expect(storeWithoutId.toObject()).toEqual(expStoreWithoutId.toObject());
const storeWithId = writeQueryToStore({
result: dataWithId,
query: queryWithId,
store: storeWithoutId,
dataIdFromObject,
});
- expect(storeWithId).toEqual(expStoreWithId);
+ expect(storeWithId.toObject()).toEqual(expStoreWithId.toObject());
});
it('does not swallow errors other than field errors', () => {
@@ -1422,11 +1425,11 @@ describe('writing to the store', () => {
const newStore = writeQueryToStore({
query,
result: cloneDeep(result),
- store: assign({}, store) as NormalizedCache,
+ store: defaultNormalizedCacheFactory(store.toObject()),
});
- Object.keys(store).forEach(field => {
- expect(store[field]).toEqual(newStore[field]);
+ Object.keys(store.toObject()).forEach(field => {
+ expect(store.get(field)).toEqual(newStore.get(field));
});
});
@@ -1458,7 +1461,7 @@ describe('writing to the store', () => {
dataIdFromObject: getIdField,
});
- expect(newStore['1']).toEqual(result.todos[0]);
+ expect(newStore.get('1')).toEqual(result.todos[0]);
});
it('should warn when it receives the wrong data with non-union fragments (using an heuristic matcher)', () => {
@@ -1482,7 +1485,7 @@ describe('writing to the store', () => {
fragmentMatcherFunction,
});
- expect(newStore['1']).toEqual(result.todos[0]);
+ expect(newStore.get('1')).toEqual(result.todos[0]);
}, /Missing field description/);
});
@@ -1547,7 +1550,7 @@ describe('writing to the store', () => {
fragmentMatcherFunction,
});
- expect(newStore['1']).toEqual(result.todos[0]);
+ expect(newStore.get('1')).toEqual(result.todos[0]);
}, /Missing field price/);
});
@@ -1573,7 +1576,7 @@ describe('writing to the store', () => {
fragmentMatcherFunction,
});
- expect(newStore['1']).toEqual(result.todos[0]);
+ expect(newStore.get('1')).toEqual(result.todos[0]);
}, /Missing field __typename/);
});
@@ -1589,7 +1592,7 @@ describe('writing to the store', () => {
dataIdFromObject: getIdField,
});
- expect(newStore['ROOT_QUERY']).toEqual({ todos: null });
+ expect(newStore.get('ROOT_QUERY')).toEqual({ todos: null });
});
it('should not warn if a field is defered', () => {
let originalWarn = console.warn;
@@ -1613,14 +1616,14 @@ describe('writing to the store', () => {
fragmentMatcherFunction,
});
- expect(newStore['ROOT_QUERY']).toEqual({ id: 1 });
+ expect(newStore.get('ROOT_QUERY')).toEqual({ id: 1 });
expect(console.warn).not.toBeCalled();
console.warn = originalWarn;
});
});
it('throws when trying to write an object without id that was previously queried with id', () => {
- const store = {
+ const store = defaultNormalizedCacheFactory({
ROOT_QUERY: assign(
{},
{
@@ -1640,7 +1643,7 @@ describe('writing to the store', () => {
stringField: 'This is a string!',
},
) as StoreObject,
- } as NormalizedCache;
+ });
expect(() => {
writeQueryToStore({
@@ -1685,7 +1688,7 @@ describe('writing to the store', () => {
});
it('properly handles the connection directive', () => {
- const store: NormalizedCache = {};
+ const store = defaultNormalizedCacheFactory();
writeQueryToStore({
query: gql`
@@ -1723,7 +1726,7 @@ describe('writing to the store', () => {
store,
});
- expect(store).toEqual({
+ expect(store.toObject()).toEqual({
ROOT_QUERY: {
abc: [
{
diff --git a/packages/apollo-client/src/__tests__/client.ts b/packages/apollo-client/src/__tests__/client.ts
--- a/packages/apollo-client/src/__tests__/client.ts
+++ b/packages/apollo-client/src/__tests__/client.ts
@@ -1036,7 +1036,7 @@ describe('client', () => {
typeCondition: string,
context: any,
): boolean => {
- const obj = context.store[idValue.id];
+ const obj = context.store.get(idValue.id);
if (!obj) {
return false;
| Proposal: opening up the cache-inmemory implementation [apollo-client@2.0]
Hi apollo-client devs! Such an _amazing_ job with the 2.0 release so far, kudos. 💥
We've decided to port our setup over to Apollo 💛, but our needs are such that we want to integrate it with our custom `Observable` cache for storing the normalized objects.
The main problem is the way those normalized objects are currently stored and modified. Our implementation requires us to dispatch events whenever any object changes in the store, however that's currently impossible without actually forking the whole package.
The way objects are updated now is by modifying the `data` object directly -- and there is no way to hook into the `set` or `delete` events (without resolving to hacks like ES6 Proxies, which aren't supported everywhere yet).
The simplest solution to this is adding a Map-like API/abstraction over the [`data` property](https://github.com/apollographql/apollo-client/blob/489e4b74ea74f7d0884aaa049c909fb267086e20/packages/apollo-cache-inmemory/src/inMemoryCache.ts#L38), so that instead of working with a pure object, it would work with an API, like this:
- `this.data[dataId] = x` => `this.data.set(dataId, x)`
- `this.data[dataId]` => `this.data.get(dataId)`
- `delete this.data[dataId]` => `this.data.delete(dataId)`
- `this.data = {}` => `this.data.clear()`
- `this.data` => `this.data.toObject()` (extra method that flattens the contents to a plain object)
This way `data` can be a single instance throughout the whole life of the `InMemoryCache` and its underlying implementation can vary -- i.e. it can be an `Object`, as it is now, or a native `Map` (which is generally faster than `object[key]` operations).
A similar abstraction could be put in place of the `optimistic` property, currently an `Array` instance, but it could have a simple, `Set`-like interface.
I have the first change ready in a fork (it's a simple change, though in many places), with all the tests passing, and I've also added some extra ones for the added abstraction. This feature would greatly simplify our setup, since we would not need to fork the whole package, but only provide a custom implementation of the Cache when it is constructed.
If you are happy with this proposal, I'll refactor my fork, cleanup the commits and open a PR 🥇.
Also, we'd be happy to share our Observable-based implementation of the cache with the Apollo OSS community, once it's ready.
**Intended outcome:**
Able to hook the changes to cache, while re-using 99% of the `apollo-cache-inmemory` without creating a fork.
This requires adding two simple abstractions:
1. over the `data` property [defined here](https://github.com/apollographql/apollo-client/blob/489e4b74ea74f7d0884aaa049c909fb267086e20/packages/apollo-cache-inmemory/src/inMemoryCache.ts#L38)
2. over the `optimistic` property [defined here](https://github.com/apollographql/apollo-client/blob/489e4b74ea74f7d0884aaa049c909fb267086e20/packages/apollo-cache-inmemory/src/inMemoryCache.ts#L40)
**Actual outcome:**
Fork was required. :)
**Version**
- apollo-cache-inmemory@0.2.0-beta.4
| I'm all for this! The external API provided by Apollo Cache should be able to stay the same and the internals change as needed! Let's try it out!
@niieani @jbaxleyiii If we would abstract data's setter and getter it would be super easy to use normalized cache in `@ngrx/store` :ng: :+1: .
I forked Apollo's `InMemoryCache` few days ago and created a working prototype based on it.
Would be awesome to make it till the official release :heart:
@niieani any progress on that? | 2017-10-20T17:56:43Z | 1 |
apollographql/apollo-client | 2,345 | apollographql__apollo-client-2345 | [
"2309"
] | 4de051188d04351816e4034b2d75238e1ecdb783 | diff --git a/packages/apollo-cache/src/cache.ts b/packages/apollo-cache/src/cache.ts
--- a/packages/apollo-cache/src/cache.ts
+++ b/packages/apollo-cache/src/cache.ts
@@ -48,6 +48,10 @@ export abstract class ApolloCache<TSerialized> implements DataProxy {
public transformDocument(document: DocumentNode): DocumentNode {
return document;
}
+ // experimental
+ public transformForLink(document: DocumentNode): DocumentNode {
+ return document;
+ }
// DataProxy API
/**
diff --git a/packages/apollo-client/src/core/QueryManager.ts b/packages/apollo-client/src/core/QueryManager.ts
--- a/packages/apollo-client/src/core/QueryManager.ts
+++ b/packages/apollo-client/src/core/QueryManager.ts
@@ -132,8 +132,9 @@ export class QueryManager<TStore> {
}
const mutationId = this.generateQueryId();
+ const cache = this.dataStore.getCache();
mutation = removeConnectionDirectiveFromDocument(
- this.dataStore.getCache().transformDocument(mutation),
+ cache.transformDocument(mutation),
);
variables = assign(
@@ -187,7 +188,13 @@ export class QueryManager<TStore> {
return new Promise((resolve, reject) => {
let storeResult: FetchResult<T> | null;
let error: ApolloError;
- execute(this.link, request).subscribe({
+ let newRequest = {
+ ...request,
+ query: cache.transformForLink
+ ? cache.transformForLink(request.query)
+ : request.query,
+ };
+ execute(this.link, newRequest).subscribe({
next: (result: ExecutionResult) => {
if (result.errors && errorPolicy === 'none') {
error = new ApolloError({
@@ -281,8 +288,9 @@ export class QueryManager<TStore> {
metadata = null,
fetchPolicy = 'cache-first', // cache-first is the default fetch policy.
} = options;
+ const cache = this.dataStore.getCache();
- const query = this.dataStore.getCache().transformDocument(options.query);
+ const query = cache.transformDocument(options.query);
let storeResult: any;
let needToFetch: boolean = fetchPolicy === 'network-only';
@@ -355,7 +363,9 @@ export class QueryManager<TStore> {
const networkResult = this.fetchRequest({
requestId,
queryId,
- document: query,
+ document: cache.transformForLink
+ ? cache.transformForLink(query)
+ : query,
options,
fetchMoreForQueryId,
}).catch(error => {
@@ -840,7 +850,8 @@ export class QueryManager<TStore> {
options: SubscriptionOptions,
): Observable<any> {
const { query } = options;
- let transformedDoc = this.dataStore.getCache().transformDocument(query);
+ const cache = this.dataStore.getCache();
+ let transformedDoc = cache.transformDocument(query);
const variables = assign(
{},
@@ -890,7 +901,13 @@ export class QueryManager<TStore> {
},
};
- sub = execute(this.link, request).subscribe(handler);
+ let newRequest = {
+ ...request,
+ query: cache.transformForLink
+ ? cache.transformForLink(request.query)
+ : request.query,
+ };
+ sub = execute(this.link, newRequest).subscribe(handler);
}
return () => {
| diff --git a/packages/apollo-client/src/core/__tests__/ObservableQuery.ts b/packages/apollo-client/src/core/__tests__/ObservableQuery.ts
--- a/packages/apollo-client/src/core/__tests__/ObservableQuery.ts
+++ b/packages/apollo-client/src/core/__tests__/ObservableQuery.ts
@@ -783,6 +783,69 @@ describe('ObservableQuery', () => {
}
});
});
+ it('does invalidate the currentResult data if the variables change', done => {
+ // Standard data for all these tests
+ const query = gql`
+ query UsersQuery($page: Int) {
+ users {
+ id
+ name
+ posts(page: $page) {
+ title
+ }
+ }
+ }
+ `;
+ const variables = { page: 1 };
+ const differentVariables = { page: 2 };
+ const dataOne = {
+ users: [
+ {
+ id: 1,
+ name: 'James',
+ posts: [{ title: 'GraphQL Summit' }, { title: 'Awesome' }],
+ },
+ ],
+ };
+ const dataTwo = {
+ users: [
+ {
+ id: 1,
+ name: 'James',
+ posts: [{ title: 'Old post' }],
+ },
+ ],
+ };
+
+ const observable: ObservableQuery<any> = mockWatchQuery(
+ {
+ request: { query, variables },
+ result: { data: dataOne },
+ },
+ {
+ request: { query, variables: differentVariables },
+ result: { data: dataTwo },
+ delay: 25,
+ },
+ );
+
+ subscribeAndCount(done, observable, (handleCount, result) => {
+ if (handleCount === 1) {
+ expect(result.data).toEqual(dataOne);
+ expect(observable.currentResult().data).toEqual(dataOne);
+ observable.setVariables(differentVariables);
+ expect(observable.currentResult().data).toEqual({});
+ expect(observable.currentResult().loading).toBe(true);
+ }
+ // after loading is false and data has returned
+ if (handleCount === 3) {
+ expect(result.data).toEqual(dataTwo);
+ expect(observable.currentResult().data).toEqual(dataTwo);
+ expect(observable.currentResult().loading).toBe(false);
+ done();
+ }
+ });
+ });
it('does not invalidate the currentResult errors if the variables change', done => {
const queryManager = mockQueryManager(
| Enable cache implementations to modify document before transmission to links
Per a Slack conversation with @jbaxleyiii.
As cache implementations become more sophisticated, it will be helpful to allow end users to signal cache-specific behaviors via directives in the GraphQL document. (A simple, popular example would be a `@ttl` directive to set a cache TTL on a specific field.) These directives are meant exclusively for consumption by the client, and need to be stripped before reaching the server, lest a spec-compliant server complain about an unknown directive.
The semantics of these directives is owned by the cache implementation, so it seems reasonable for the cache to be able to 'see' these directives in its various lifecycle methods (including `read` and `write`) and also be responsible for stripping the directives from the document before the document is sent to the link stack (and ultimately to a server).
Right now, the `prepareDocument` cache lifecycle method is too early to remove said directives, because the directives will then be absent from the document when it's passed to `read` and `write`. (However, `prepareDocument` serves a useful purpose for the `addTypenames` use case, where both cache and server should see the transformed document.) Ideally, there could be a `prepareDocumentForLink` lifecycle method that is called before a given document is sent onward to the network.
| 2017-10-19T23:46:30Z | 0.2 |
|
apollographql/apollo-client | 1,801 | apollographql__apollo-client-1801 | [
"1779",
"1779"
] | 1fd04704958bc126c0c5a373d2a6f123aba64de1 | diff --git a/src/ApolloClient.ts b/src/ApolloClient.ts
--- a/src/ApolloClient.ts
+++ b/src/ApolloClient.ts
@@ -65,7 +65,7 @@ import {
} from './core/watchQueryOptions';
import {
- storeKeyNameFromFieldNameAndArgs,
+ getStoreKeyName,
} from './data/storeUtils';
import {
@@ -222,7 +222,7 @@ export default class ApolloClient implements DataProxy {
this.disableNetworkFetches = ssrMode || ssrForceFetchDelay > 0;
this.dataId = dataIdFromObject = dataIdFromObject || defaultDataIdFromObject;
this.dataIdFromObject = this.dataId;
- this.fieldWithArgs = storeKeyNameFromFieldNameAndArgs;
+ this.fieldWithArgs = getStoreKeyName;
this.queryDeduplication = queryDeduplication;
this.ssrMode = ssrMode;
diff --git a/src/core/QueryManager.ts b/src/core/QueryManager.ts
--- a/src/core/QueryManager.ts
+++ b/src/core/QueryManager.ts
@@ -1096,24 +1096,30 @@ export class QueryManager {
}
return result;
- }).then(() => {
-
+ }).then((result) => {
let resultFromStore: any;
- try {
- // ensure result is combined with data already in store
- // this will throw an error if there are missing fields in
- // the results if returnPartialData is false.
- resultFromStore = readQueryFromStore({
- store: this.getApolloState().data,
- variables,
- query: document,
- config: this.reducerConfig,
- fragmentMatcherFunction: this.fragmentMatcher.match,
- });
- // ensure multiple errors don't get thrown
- /* tslint:disable */
- } catch (e) {}
- /* tslint:enable */
+
+ if (fetchMoreForQueryId) {
+ // XXX We don't write fetchMore results to the store because this would overwrite
+ // the original result in case an @connection directive is used.
+ resultFromStore = result.data;
+ } else {
+ try {
+ // ensure result is combined with data already in store
+ // this will throw an error if there are missing fields in
+ // the results if returnPartialData is false.
+ resultFromStore = readQueryFromStore({
+ store: this.getApolloState().data,
+ variables,
+ query: document,
+ config: this.reducerConfig,
+ fragmentMatcherFunction: this.fragmentMatcher.match,
+ });
+ // ensure multiple errors don't get thrown
+ /* tslint:disable */
+ } catch (e) {}
+ /* tslint:enable */
+ }
const { reducerError } = this.getApolloState();
if (reducerError && reducerError.queryId === queryId) {
diff --git a/src/data/readFromStore.ts b/src/data/readFromStore.ts
--- a/src/data/readFromStore.ts
+++ b/src/data/readFromStore.ts
@@ -16,7 +16,7 @@ import {
} from './storeUtils';
import {
- storeKeyNameFromFieldNameAndArgs,
+ getStoreKeyName,
} from './storeUtils';
import {
@@ -132,13 +132,13 @@ const readStoreResolver: Resolver = (
idValue: IdValueWithPreviousResult,
args: any,
context: ReadStoreContext,
- { resultKey }: ExecInfo,
+ { resultKey, directives }: ExecInfo,
) => {
assertIdValue(idValue);
const objId = idValue.id;
const obj = context.store[objId];
- const storeKeyName = storeKeyNameFromFieldNameAndArgs(fieldName, args);
+ const storeKeyName = getStoreKeyName(fieldName, args, directives);
let fieldValue = (obj || {})[storeKeyName];
if (typeof fieldValue === 'undefined') {
diff --git a/src/data/store.ts b/src/data/store.ts
--- a/src/data/store.ts
+++ b/src/data/store.ts
@@ -69,7 +69,8 @@ export function data(
// Ignore results from old requests
// XXX this means that if you have a refetch interval which is shorter than your roundtrip time,
// your query will be in the loading state forever!
- if (action.requestId < queries[action.queryId].lastRequestId) {
+ // do not write to the store if this is for fetchMore
+ if (action.requestId < queries[action.queryId].lastRequestId || action.fetchMoreForQueryId) {
return previousState;
}
diff --git a/src/data/storeUtils.ts b/src/data/storeUtils.ts
--- a/src/data/storeUtils.ts
+++ b/src/data/storeUtils.ts
@@ -74,19 +74,40 @@ export function valueToObjectRepresentation(argObj: any, name: NameNode, value:
}
export function storeKeyNameFromField(field: FieldNode, variables?: Object): string {
- if (field.arguments && field.arguments.length) {
- const argObj: Object = {};
+ let directivesObj: any = null;
+ if (field.directives) {
+ directivesObj = {};
+ field.directives.forEach((directive) => {
+ directivesObj[directive.name.value] = {};
+
+ if (directive.arguments) {
+ directive.arguments.forEach((({name, value}) => valueToObjectRepresentation(
+ directivesObj[directive.name.value], name, value, variables)));
+ }
+ });
+ }
+ let argObj: any = null;
+ if (field.arguments && field.arguments.length) {
+ argObj = {};
field.arguments.forEach(({name, value}) => valueToObjectRepresentation(
argObj, name, value, variables));
-
- return storeKeyNameFromFieldNameAndArgs(field.name.value, argObj);
}
- return field.name.value;
+ return getStoreKeyName(field.name.value, argObj, directivesObj);
}
-export function storeKeyNameFromFieldNameAndArgs(fieldName: string, args?: Object): string {
+export type Directives = {
+ [directiveName: string]: {
+ [argName: string]: any;
+ };
+};
+
+export function getStoreKeyName(fieldName: string, args?: Object, directives?: Directives): string {
+ if (directives && directives['connection'] && directives['connection']['key']) {
+ return directives['connection']['key'];
+ }
+
if (args) {
const stringifiedArgs: string = JSON.stringify(args);
diff --git a/src/queries/queryTransform.ts b/src/queries/queryTransform.ts
--- a/src/queries/queryTransform.ts
+++ b/src/queries/queryTransform.ts
@@ -51,6 +51,38 @@ function addTypenameToSelectionSet(
}
}
+function removeConnectionDirectiveFromSelectionSet(selectionSet: SelectionSetNode) {
+ if (selectionSet.selections) {
+ selectionSet.selections.forEach((selection) => {
+ if (selection.kind === 'Field' && selection as FieldNode && selection.directives) {
+ selection.directives = selection.directives.filter((directive) => {
+ const willRemove = directive.name.value === 'connection';
+ if (willRemove) {
+ if (!directive.arguments || !directive.arguments.some((arg) => arg.name.value === 'key')) {
+ console.warn('Removing an @connection directive even though it does not have a key. ' +
+ 'You may want to use the key parameter to specify a store key.');
+ }
+ }
+
+ return !willRemove;
+ });
+ }
+ });
+
+ selectionSet.selections.forEach((selection) => {
+ if (selection.kind === 'Field') {
+ if (selection.selectionSet) {
+ removeConnectionDirectiveFromSelectionSet(selection.selectionSet);
+ }
+ } else if (selection.kind === 'InlineFragment') {
+ if (selection.selectionSet) {
+ removeConnectionDirectiveFromSelectionSet(selection.selectionSet);
+ }
+ }
+ });
+ }
+}
+
export function addTypenameToDocument(doc: DocumentNode) {
checkDocument(doc);
const docClone = cloneDeep(doc);
@@ -62,3 +94,14 @@ export function addTypenameToDocument(doc: DocumentNode) {
return docClone;
}
+
+export function removeConnectionDirectiveFromDocument(doc: DocumentNode) {
+ checkDocument(doc);
+ const docClone = cloneDeep(doc);
+
+ docClone.definitions.forEach((definition: DefinitionNode) => {
+ removeConnectionDirectiveFromSelectionSet((definition as OperationDefinitionNode).selectionSet);
+ });
+
+ return docClone;
+}
diff --git a/src/transport/networkInterface.ts b/src/transport/networkInterface.ts
--- a/src/transport/networkInterface.ts
+++ b/src/transport/networkInterface.ts
@@ -17,6 +17,10 @@ import {
BatchAfterwareInterface,
} from './afterware';
+import {
+ removeConnectionDirectiveFromDocument,
+} from '../queries/queryTransform';
+
/**
* This is an interface that describes an GraphQL document to be sent
* to the server.
@@ -192,6 +196,12 @@ export class HTTPFetchNetworkInterface extends BaseNetworkInterface {
return this.applyMiddlewares({
request,
options,
+ }).then((rao) => {
+ if (rao.request.query) {
+ rao.request.query = removeConnectionDirectiveFromDocument(rao.request.query);
+ }
+
+ return rao;
}).then( (rao) => this.fetchFromRemoteEndpoint.call(this, rao))
.then(response => this.applyAfterwares({
response: response as Response,
| diff --git a/test/client.ts b/test/client.ts
--- a/test/client.ts
+++ b/test/client.ts
@@ -2554,8 +2554,137 @@ describe('client', () => {
return withWarning(() => client.query({ query }), /Missing field description/);
});
+
+ it('runs a query with the connection directive and writes it to the store key defined in the directive', () => {
+ const query = gql`
+ {
+ books(skip: 0, limit: 2) @connection(key: "abc") {
+ name
+ }
+ }`;
+
+ const transformedQuery = gql`
+ {
+ books(skip: 0, limit: 2) @connection(key: "abc") {
+ name
+ __typename
+ }
+ }`;
+
+ const result = {
+ 'books': [
+ {
+ 'name': 'abcd',
+ '__typename': 'Book',
+ },
+ ],
+ };
+
+ const networkInterface = mockNetworkInterface({
+ request: { query: transformedQuery },
+ result: { data: result },
+ });
+
+ const client = new ApolloClient({
+ networkInterface,
+ });
+
+ return client.query({ query }).then((actualResult) => {
+ assert.deepEqual(actualResult.data, result);
+ });
+ });
+
+ it('should not remove the connection directive at the store level', () => {
+ const query = gql`
+ {
+ books(skip: 0, limit: 2) @connection {
+ name
+ }
+ }`;
+
+ const transformedQuery = gql`
+ {
+ books(skip: 0, limit: 2) @connection {
+ name
+ __typename
+ }
+ }`;
+
+ const result = {
+ 'books': [
+ {
+ 'name': 'abcd',
+ '__typename': 'Book',
+ },
+ ],
+ };
+
+ const networkInterface = mockNetworkInterface({
+ request: { query: transformedQuery },
+ result: { data: result },
+ });
+
+ const client = new ApolloClient({
+ networkInterface,
+ });
+
+ return client.query({ query }).then((actualResult) => {
+ assert.deepEqual(actualResult.data, result);
+ });
+ });
});
+it('should run a query with the connection directive and write the result to the store key defined in the directive', () => {
+ const query = gql`
+ {
+ books(skip: 0, limit: 2) @connection(key: "abc") {
+ name
+ }
+ }`;
+
+ const transformedQuery = gql`
+ {
+ books(skip: 0, limit: 2) @connection(key: "abc") {
+ name
+ __typename
+ }
+ }`;
+
+ const result = {
+ 'books': [
+ {
+ 'name': 'abcd',
+ '__typename': 'Book',
+ },
+ ],
+ };
+
+ const networkInterface = mockNetworkInterface({
+ request: { query: transformedQuery },
+ result: { data: result },
+ });
+
+ const client = new ApolloClient({
+ networkInterface,
+ });
+
+ return client.query({ query }).then((actualResult) => {
+ assert.deepEqual(actualResult.data, result);
+ assert.deepEqual(client.store.getState().apollo.data, {
+ 'ROOT_QUERY.abc.0': { name: 'abcd', __typename: 'Book' },
+ 'ROOT_QUERY': {
+ abc: [
+ {
+ 'generated': true,
+ 'id': 'ROOT_QUERY.abc.0',
+ 'type': 'id',
+ },
+ ],
+ },
+ });
+ });
+ });
+
function clientRoundtrip(
query: DocumentNode,
data: ExecutionResult,
diff --git a/test/networkInterface.ts b/test/networkInterface.ts
--- a/test/networkInterface.ts
+++ b/test/networkInterface.ts
@@ -56,6 +56,26 @@ describe('network interface', () => {
}
`;
+ const simpleQueryWithConnection = gql`
+ query people {
+ allPeople(first: 1) @connection(key: "people") {
+ people {
+ name
+ }
+ }
+ }
+ `;
+
+ const simpleQueryWithConnectionButNoKey = gql`
+ query people {
+ allPeople(first: 1) @connection {
+ people {
+ name
+ }
+ }
+ }
+ `;
+
const simpleResult = {
data: {
allPeople: {
@@ -483,6 +503,44 @@ describe('network interface', () => {
});
});
});
+
+ describe('transforming queries', () => {
+ it('should remove the @connection directive', () => {
+ const swapi = createNetworkInterface({ uri: swapiUrl });
+
+ const simpleRequestWithConnection = {
+ query: simpleQueryWithConnection,
+ variables: {},
+ debugName: 'People query',
+ };
+
+ return assert.eventually.deepEqual(
+ swapi.query(simpleRequestWithConnection),
+ simpleResult,
+ );
+ });
+
+ it('should remove the @connection directive even with no key but warn the user', () => {
+ const swapi = createNetworkInterface({ uri: swapiUrl });
+
+ const simpleRequestWithConnectionButNoKey = {
+ query: simpleQueryWithConnectionButNoKey,
+ variables: {},
+ debugName: 'People query',
+ };
+
+ const expected = 'Removing an @connection directive even though it does not have a ' +
+ 'key. You may want to use the key parameter to specify a store key.';
+
+ return assert.eventually.deepEqual(
+ withWarning(
+ () => swapi.query(simpleRequestWithConnectionButNoKey),
+ new RegExp(expected),
+ ),
+ simpleResult,
+ );
+ });
+ });
});
// simulate middleware by altering variables and options
diff --git a/test/readFromStore.ts b/test/readFromStore.ts
--- a/test/readFromStore.ts
+++ b/test/readFromStore.ts
@@ -812,4 +812,43 @@ describe('reading from the store', () => {
nullField: null,
});
});
+
+ it('properly handles the connection directive', () => {
+ const store: NormalizedCache = {
+ 'ROOT_QUERY': {
+ 'abc': [
+ {
+ 'generated': true,
+ 'id': 'ROOT_QUERY.abc.0',
+ 'type': 'id',
+ },
+ ],
+ },
+ 'ROOT_QUERY.abc.0': {
+ 'name': 'efgh',
+ },
+ };
+
+ const queryResult = readQueryFromStore({
+ store,
+ query: gql`
+ {
+ books(skip: 0, limit: 2) @connection(key: "abc") {
+ name
+ }
+ }
+ `,
+ });
+
+ assert.deepEqual<{}>(
+ queryResult,
+ {
+ 'books': [
+ {
+ 'name': 'efgh',
+ },
+ ],
+ },
+ );
+ });
});
diff --git a/test/util/wrap.ts b/test/util/wrap.ts
--- a/test/util/wrap.ts
+++ b/test/util/wrap.ts
@@ -17,9 +17,10 @@ export function withWarning(func: Function, regex: RegExp) {
console.warn = (m: string) => message = m;
- return Promise.resolve(func()).then(() => {
+ return Promise.resolve(func()).then((val) => {
assert.match(message, regex);
console.warn = oldWarn;
+ return val;
});
}
diff --git a/test/writeToStore.ts b/test/writeToStore.ts
--- a/test/writeToStore.ts
+++ b/test/writeToStore.ts
@@ -1311,4 +1311,62 @@ describe('writing to the store', () => {
});
}, /stringField(.|\n)*abcd/g);
});
+
+ it('properly handles the connection directive', () => {
+ const store: NormalizedCache = {};
+
+ writeQueryToStore({
+ query: gql`
+ {
+ books(skip: 0, limit: 2) @connection(key: "abc") {
+ name
+ }
+ }
+ `,
+ result: {
+ books: [
+ {
+ name: 'abcd',
+ },
+ ],
+ },
+ store,
+ });
+
+ writeQueryToStore({
+ query: gql`
+ {
+ books(skip: 2, limit: 4) @connection(key: "abc") {
+ name
+ }
+ }
+ `,
+ result: {
+ books: [
+ {
+ name: 'efgh',
+ },
+ ],
+ },
+ store,
+ });
+
+ assert.deepEqual<NormalizedCache>(
+ store,
+ {
+ 'ROOT_QUERY': {
+ 'abc': [
+ {
+ 'generated': true,
+ 'id': 'ROOT_QUERY.abc.0',
+ 'type': 'id',
+ },
+ ],
+ },
+ 'ROOT_QUERY.abc.0': {
+ 'name': 'efgh',
+ },
+ },
+ );
+ });
});
| Adjusting the store in order to squash pagination results via Relay's @connection directive.
I am using the Relay convention for pagination as described [here](http://dev.apollodata.com/react/pagination.html#relay-cursors). Coming from Relay Modern, there is a directive called `@connection`. It contains a key which Relay makes use of in order to retrieve all elements of one connection from the store. Apollo Client on the other side does ignore the `@connection` and stores objects by using keys containing variables like `after` or `count`, used to fetch the right amount of data by using a cursor based pagination approach.
The real problem of that comes into play when I am executing a mutation for adding a new edge into the connection. Here the result has to be stored under the correct key in the store via `proxy.readQuery(...)`, which is impossible if you have multiple keys for lists of the same connection.
I think the problem is better understandable with a short example. This is how a proxy store should look like and how it is right now:
**Intended outcome:**
```
Project:AAAAAAAAA
| issues -> ref to $Project:AAAAAAAAA.issues.edges
$Project:AAAAAAAAA.issues.edges
| edges
| 0 -> ref to $Project:AAAAAAAAA.issues.edges.0
| ...
$Project:AAAAAAAAA.issues.edges.0
$Project:AAAAAAAAA.issues.edges.1
$Project:AAAAAAAAA.issues.edges.2
$Project:AAAAAAAAA.issues.edges.pageInfo
```
**Actual outcome:**
```
Project:AAAAAAAAA
| issues({"first":2,"after":null}) -> ref to $Project:AAAAAAAAA.issues({"first":2,"after":null})
| issues({"first":1,"after":XYXYXYXYXY}) -> ref to $Project:AAAAAAAAA.issues({"first":1,"after":XYXYXYXYXY})
$Project:AAAAAAAAA.issues({"first":2,"after":null})
| edges
| 0 -> ref to $Project:AAAAAAAAA.issues({"first":2,"after":null}).edges.0
| 1 -> ref to $Project:AAAAAAAAA.issues({"first":2,"after":null}).edges.1
$Project:AAAAAAAAA.issues({"first":1,"after":XYXYXYXYXY})
| edges
| 0 -> ref to $Project:AAAAAAAAA.issues({"first":1,"after":XYXYXYXYXY}).edges.0
$Project:AAAAAAAAA.issues({"first":2,"after":null}).edges.0
$Project:AAAAAAAAA.issues({"first":2,"after":null}).edges.1
| cursor="XYXYXYXYXY"
$Project:AAAAAAAAA.issues({"first":2,"after":null}).edges.pageInfo
$Project:AAAAAAAAA.issues({"first":1,"after":XYXYXYXYXY}).edges.0
```
How can I tell the Apollo Client store to use a unified key for storing edges of the same connection, ignoring fields like `after` or `cursor` used for accessing the field? Is there a way to make use of the `@connection` information provided in the schema?
Adjusting the store in order to squash pagination results via Relay's @connection directive.
I am using the Relay convention for pagination as described [here](http://dev.apollodata.com/react/pagination.html#relay-cursors). Coming from Relay Modern, there is a directive called `@connection`. It contains a key which Relay makes use of in order to retrieve all elements of one connection from the store. Apollo Client on the other side does ignore the `@connection` and stores objects by using keys containing variables like `after` or `count`, used to fetch the right amount of data by using a cursor based pagination approach.
The real problem of that comes into play when I am executing a mutation for adding a new edge into the connection. Here the result has to be stored under the correct key in the store via `proxy.readQuery(...)`, which is impossible if you have multiple keys for lists of the same connection.
I think the problem is better understandable with a short example. This is how a proxy store should look like and how it is right now:
**Intended outcome:**
```
Project:AAAAAAAAA
| issues -> ref to $Project:AAAAAAAAA.issues.edges
$Project:AAAAAAAAA.issues.edges
| edges
| 0 -> ref to $Project:AAAAAAAAA.issues.edges.0
| ...
$Project:AAAAAAAAA.issues.edges.0
$Project:AAAAAAAAA.issues.edges.1
$Project:AAAAAAAAA.issues.edges.2
$Project:AAAAAAAAA.issues.edges.pageInfo
```
**Actual outcome:**
```
Project:AAAAAAAAA
| issues({"first":2,"after":null}) -> ref to $Project:AAAAAAAAA.issues({"first":2,"after":null})
| issues({"first":1,"after":XYXYXYXYXY}) -> ref to $Project:AAAAAAAAA.issues({"first":1,"after":XYXYXYXYXY})
$Project:AAAAAAAAA.issues({"first":2,"after":null})
| edges
| 0 -> ref to $Project:AAAAAAAAA.issues({"first":2,"after":null}).edges.0
| 1 -> ref to $Project:AAAAAAAAA.issues({"first":2,"after":null}).edges.1
$Project:AAAAAAAAA.issues({"first":1,"after":XYXYXYXYXY})
| edges
| 0 -> ref to $Project:AAAAAAAAA.issues({"first":1,"after":XYXYXYXYXY}).edges.0
$Project:AAAAAAAAA.issues({"first":2,"after":null}).edges.0
$Project:AAAAAAAAA.issues({"first":2,"after":null}).edges.1
| cursor="XYXYXYXYXY"
$Project:AAAAAAAAA.issues({"first":2,"after":null}).edges.pageInfo
$Project:AAAAAAAAA.issues({"first":1,"after":XYXYXYXYXY}).edges.0
```
How can I tell the Apollo Client store to use a unified key for storing edges of the same connection, ignoring fields like `after` or `cursor` used for accessing the field? Is there a way to make use of the `@connection` information provided in the schema?
| You won't need to worry about that provided you are using fetchMore -> updateQuery to update your cursor or in your case first/after. The query will get stored in redux with the new key name based on the variables, but the data will flow down to the original query. That said, the readQuery/writeQuery is the spot that needs some work. This is what I did (in an infinite scroll solution...):
I prefetch immediately after login as to instantiate cache for multiple queries:
`await this.props.client.query({
query: ALL_ITEMS_QUERY,
variables: rootQueryVariables('allItems'),
});`
Then the Key to it working....
The below is within my mutate.update function. unshiftItemToQuery is simply a helper but the point to note is that "rootQueryVariables" will always return the same vars as the initial "allItems" query.
`
// All Items
unshiftItemToQuery(
proxy,
{
mutationResult,
query: ALL_ITEMS_QUERY,
variables: rootQueryVariables('allItems'),
ownProps,
which: 'allItems',
self: 'assignItem',
})
`
Since the paginated data flows down to the initial query, reading the query with the initial vars will allow you to find the result set and mutate that.
@jonmanzo That variables part is exactly what gives me headache. I guess you're having some sort of store for variables accessible through the `rootQueryVariables` function. I imagine it being a bit problematic if the variables are given through user input. Surely there are ways to implement `rootQueryVariables` to hack around it, but this solution still feels like a hack, as the original Apollo store implementation does not provide the solution for that case.
It is also worth mentioning that the work-around causes the problem of having links to paginated objects **twice** in the data store. Though in most cases it shouldn't be a problem, it still feels very wrong to have this redundancy in the store.
In my opinion there is need to access the fields in store by some unique id, which can be defined on the query level. Relay has a pretty nice way of doing it by `@connection` annotation:
```
query PeopleQuery($count: Int!, $cursor: String) {
persons(first: $count, after: $cursor) @connection(key: "KEY_TO_RETRIEVE_PERSONS") {
...
}
}
```
and then accessing that connection from store via `ConnectionHandler`.
**PS**: for now I am using another work around of `updateQueries` in the `mutate` function. Which I am still not comfortable with, as it is relying on the exact naming of the initial query. Additionally I have to drill down the whole query tree in order to push into the affected array instead of directly accessing it from the store. (in contrary to e.g. `proxy.readFragment(...)` if that one would accept my proposal of `@connection` key directive)
Agreed 100%, it is a hack in my opinion and I've had to make functional exclusions to work under this model to boot. Digging through the src, I didn't find any other options that would allow a modification of the key utilized to store the query...
Honestly, I'd even be ok with the ability to define a query cache key name. We're pushing an ES query through the args, thus the potential exists for an insane amount of possible argument structures. Allowing the ability to choose what arguments are utilized in the key name would clean this up instantly in our use case.
Oh, and we cannot solely use update queries as we're moving objs from one cache to another based in user action and ran into issues where the destination query was no longer watched once their component unmounted. Found this a better solution that putting all the queries into a top level wrapper just to keep them available to updateQueries
We did want to have this in the original implementation for pagination, but didn't do it at first because the simpler version has worked fine for a while. I agree that being able to pick a custom store field name is a great way to do this.
We used to call this concept `quietArguments` and were thinking of an `@apolloFetchMore` directive: https://github.com/apollographql/apollo-client/pull/350
Perhaps we should bring this back and now may be the time to do it!
@zunder As @stubailo said, we think it would be a good idea to allow for an `@connection` directive. The key would determine where in the store the connection gets written to, so it would be easy to read it out with `readQuery`. The only thing that we'd have to decide is how you would tell Apollo Client when you're fetching more for a certain connection, and whether that "more" goes at the beginning, the end or somewhere in the middle.
I don't think we need to change anything about the fetchMore mechanism to add this directive - we could still use the manual reducer approach. Only difference would be having a more reasonable cache key!
Yeah, you're right, since `fetchMore` is imperative we don't need any configuration.
You won't need to worry about that provided you are using fetchMore -> updateQuery to update your cursor or in your case first/after. The query will get stored in redux with the new key name based on the variables, but the data will flow down to the original query. That said, the readQuery/writeQuery is the spot that needs some work. This is what I did (in an infinite scroll solution...):
I prefetch immediately after login as to instantiate cache for multiple queries:
`await this.props.client.query({
query: ALL_ITEMS_QUERY,
variables: rootQueryVariables('allItems'),
});`
Then the Key to it working....
The below is within my mutate.update function. unshiftItemToQuery is simply a helper but the point to note is that "rootQueryVariables" will always return the same vars as the initial "allItems" query.
`
// All Items
unshiftItemToQuery(
proxy,
{
mutationResult,
query: ALL_ITEMS_QUERY,
variables: rootQueryVariables('allItems'),
ownProps,
which: 'allItems',
self: 'assignItem',
})
`
Since the paginated data flows down to the initial query, reading the query with the initial vars will allow you to find the result set and mutate that.
@jonmanzo That variables part is exactly what gives me headache. I guess you're having some sort of store for variables accessible through the `rootQueryVariables` function. I imagine it being a bit problematic if the variables are given through user input. Surely there are ways to implement `rootQueryVariables` to hack around it, but this solution still feels like a hack, as the original Apollo store implementation does not provide the solution for that case.
It is also worth mentioning that the work-around causes the problem of having links to paginated objects **twice** in the data store. Though in most cases it shouldn't be a problem, it still feels very wrong to have this redundancy in the store.
In my opinion there is need to access the fields in store by some unique id, which can be defined on the query level. Relay has a pretty nice way of doing it by `@connection` annotation:
```
query PeopleQuery($count: Int!, $cursor: String) {
persons(first: $count, after: $cursor) @connection(key: "KEY_TO_RETRIEVE_PERSONS") {
...
}
}
```
and then accessing that connection from store via `ConnectionHandler`.
**PS**: for now I am using another work around of `updateQueries` in the `mutate` function. Which I am still not comfortable with, as it is relying on the exact naming of the initial query. Additionally I have to drill down the whole query tree in order to push into the affected array instead of directly accessing it from the store. (in contrary to e.g. `proxy.readFragment(...)` if that one would accept my proposal of `@connection` key directive)
Agreed 100%, it is a hack in my opinion and I've had to make functional exclusions to work under this model to boot. Digging through the src, I didn't find any other options that would allow a modification of the key utilized to store the query...
Honestly, I'd even be ok with the ability to define a query cache key name. We're pushing an ES query through the args, thus the potential exists for an insane amount of possible argument structures. Allowing the ability to choose what arguments are utilized in the key name would clean this up instantly in our use case.
Oh, and we cannot solely use update queries as we're moving objs from one cache to another based in user action and ran into issues where the destination query was no longer watched once their component unmounted. Found this a better solution that putting all the queries into a top level wrapper just to keep them available to updateQueries
We did want to have this in the original implementation for pagination, but didn't do it at first because the simpler version has worked fine for a while. I agree that being able to pick a custom store field name is a great way to do this.
We used to call this concept `quietArguments` and were thinking of an `@apolloFetchMore` directive: https://github.com/apollographql/apollo-client/pull/350
Perhaps we should bring this back and now may be the time to do it!
@zunder As @stubailo said, we think it would be a good idea to allow for an `@connection` directive. The key would determine where in the store the connection gets written to, so it would be easy to read it out with `readQuery`. The only thing that we'd have to decide is how you would tell Apollo Client when you're fetching more for a certain connection, and whether that "more" goes at the beginning, the end or somewhere in the middle.
I don't think we need to change anything about the fetchMore mechanism to add this directive - we could still use the manual reducer approach. Only difference would be having a more reasonable cache key!
Yeah, you're right, since `fetchMore` is imperative we don't need any configuration. | 2017-06-16T21:57:42Z | 1.3 |
apollographql/apollo-client | 1,664 | apollographql__apollo-client-1664 | [
"1334"
] | 8f362ae8f1f8026adf287dacc93b3ba42b57d259 | diff --git a/src/ApolloClient.ts b/src/ApolloClient.ts
--- a/src/ApolloClient.ts
+++ b/src/ApolloClient.ts
@@ -136,6 +136,7 @@ export default class ApolloClient implements DataProxy {
private devToolsHookCb: Function;
private proxy: DataProxy | undefined;
private fragmentMatcher: FragmentMatcherInterface;
+ private ssrMode: boolean;
/**
* Constructs an instance of {@link ApolloClient}.
@@ -215,6 +216,7 @@ export default class ApolloClient implements DataProxy {
this.dataId = dataIdFromObject = dataIdFromObject || defaultDataIdFromObject;
this.fieldWithArgs = storeKeyNameFromFieldNameAndArgs;
this.queryDeduplication = queryDeduplication;
+ this.ssrMode = ssrMode;
if (ssrForceFetchDelay) {
setTimeout(() => this.disableNetworkFetches = false, ssrForceFetchDelay);
@@ -522,6 +524,7 @@ export default class ApolloClient implements DataProxy {
reducerConfig: this.reducerConfig,
queryDeduplication: this.queryDeduplication,
fragmentMatcher: this.fragmentMatcher,
+ ssrMode: this.ssrMode,
});
}
diff --git a/src/core/QueryManager.ts b/src/core/QueryManager.ts
--- a/src/core/QueryManager.ts
+++ b/src/core/QueryManager.ts
@@ -132,6 +132,7 @@ export class QueryManager {
public scheduler: QueryScheduler;
public store: ApolloStore;
public networkInterface: NetworkInterface;
+ public ssrMode: boolean;
private addTypename: boolean;
private deduplicator: Deduplicator;
@@ -177,6 +178,7 @@ export class QueryManager {
fragmentMatcher,
addTypename = true,
queryDeduplication = false,
+ ssrMode = false,
}: {
networkInterface: NetworkInterface,
store: ApolloStore,
@@ -185,6 +187,7 @@ export class QueryManager {
reducerConfig?: ApolloReducerConfig,
addTypename?: boolean,
queryDeduplication?: boolean,
+ ssrMode?: boolean,
}) {
// XXX this might be the place to do introspection for inserting the `id` into the query? or
// is that the network interface?
@@ -198,6 +201,7 @@ export class QueryManager {
this.queryDocuments = {};
this.addTypename = addTypename;
this.queryDeduplication = queryDeduplication;
+ this.ssrMode = ssrMode;
// XXX This logic is duplicated in ApolloClient.ts for two reasons:
// 1. we need it in ApolloClient.ts for readQuery and readFragment of the data proxy.
diff --git a/src/scheduler/scheduler.ts b/src/scheduler/scheduler.ts
--- a/src/scheduler/scheduler.ts
+++ b/src/scheduler/scheduler.ts
@@ -80,6 +80,11 @@ export class QueryScheduler {
throw new Error('Attempted to start a polling query without a polling interval.');
}
+ if (this.queryManager.ssrMode) {
+ // Do not poll in SSR mode
+ return queryId;
+ }
+
this.registeredQueries[queryId] = options;
if (listener) {
| diff --git a/test/QueryManager.ts b/test/QueryManager.ts
--- a/test/QueryManager.ts
+++ b/test/QueryManager.ts
@@ -1711,6 +1711,7 @@ describe('QueryManager', () => {
});
describe('polling queries', () => {
+
it('allows you to poll queries', () => {
const query = gql`
query fetchLuke($id: String) {
@@ -1735,6 +1736,7 @@ describe('QueryManager', () => {
name: 'Luke Skywalker has a new name',
},
};
+
const queryManager = mockQueryManager(
{
request: { query, variables },
@@ -1759,6 +1761,81 @@ describe('QueryManager', () => {
});
+ it('does not poll during SSR', (done) => {
+ const query = gql`
+ query fetchLuke($id: String) {
+ people_one(id: $id) {
+ name
+ }
+ }
+ `;
+
+ const variables = {
+ id: '1',
+ };
+
+ const data1 = {
+ people_one: {
+ name: 'Luke Skywalker',
+ },
+ };
+
+ const data2 = {
+ people_one: {
+ name: 'Luke Skywalker has a new name',
+ },
+ };
+
+ const queryManager = new QueryManager({
+ networkInterface: mockNetworkInterface({
+ request: { query, variables },
+ result: { data: data1 },
+ },
+ {
+ request: { query, variables },
+ result: { data: data2 },
+ },
+ {
+ request: { query, variables },
+ result: { data: data2 },
+ }),
+ store: createApolloStore(),
+ reduxRootSelector: defaultReduxRootSelector,
+ addTypename: false,
+ ssrMode: true,
+ });
+
+ const observable = queryManager.watchQuery<any>({
+ query,
+ variables,
+ pollInterval: 10,
+ notifyOnNetworkStatusChange: false,
+ });
+
+ let count = 1;
+ let doneCalled = false;
+ const subHandle = observable.subscribe({
+ next: (result: any) => {
+ switch (count) {
+ case 1:
+ assert.deepEqual(result.data, data1);
+ setTimeout(() => {
+ subHandle.unsubscribe();
+ if (!doneCalled) {
+ done();
+ }
+ }, 15);
+ count++;
+ break;
+ case 2:
+ default:
+ doneCalled = true;
+ done(new Error('Only expected one result, not multiple'));
+ }
+ },
+ });
+ });
+
it('should let you handle multiple polled queries and unsubscribe from one of them', (done) => {
const query1 = gql`
query {
| Unhandled promise rejection when error in polling query on SSR
**How to reproduce the issue:**
1. Setup SSR
2. Setup query polling:
```js
export default graphql('query Samples { ... }', {
options: {
pollInterval: 5000,
forceFetch: true,
notifyOnNetworkStatusChange: true
}
})(PageLabSamples);
```
3. Throw error in `Samples` query's resolver.
**Intended outcome:**
Error thrown once.
**Actual outcome:**
Error thrown twice: one for SSR and another after polling interval. The problem is second throw is not catchable so it shuts down whole server.
apollo-client 0.9.0
| The only "fix" for this I've came up is to turn off polling on SSR:
```js
const isBrowser = typeof window !== 'undefined';
export default graphql(SamplesQuery, {
options: function() {
return isBrowser
? {
pollInterval: 5000,
forceFetch: true,
notifyOnNetworkStatusChange: true
}
: {};
}
})(PageLabSamples);
```
Maybe it's valid fix for whole apollo-client? 😄
We should not be polling when server-side rendering, especially considering that the React components don’t even mount.
See https://github.com/apollographql/react-apollo/pull/493 which should fix the issue. Tell me if that change solves it for you.
@DimitryDushkin Could you make a PR that checks the `ssrMode` flag before starting to poll, and just not poll when it's `true`? That way this problem could be solved internally to Apollo Client.
@helfer Sure, I'll try to make PR on this week. | 2017-05-05T23:31:57Z | 1.2 |
apollographql/apollo-client | 1,540 | apollographql__apollo-client-1540 | [
"1524"
] | d09e57335499d531cbc2ed741fda703d36be98ff | diff --git a/src/core/QueryManager.ts b/src/core/QueryManager.ts
--- a/src/core/QueryManager.ts
+++ b/src/core/QueryManager.ts
@@ -989,7 +989,7 @@ export class QueryManager {
if (queryOptions.reducer) {
return createStoreReducer(
queryOptions.reducer,
- queryOptions.query,
+ this.addTypename ? addTypenameToDocument(queryOptions.query) : queryOptions.query,
query.variables || {},
this.reducerConfig,
);
| diff --git a/test/mutationResults.ts b/test/mutationResults.ts
--- a/test/mutationResults.ts
+++ b/test/mutationResults.ts
@@ -13,33 +13,58 @@ import { ObservableQuery } from '../src/core/ObservableQuery';
import gql from 'graphql-tag';
describe('mutation results', () => {
+
const query = gql`
query todoList {
- __typename
todoList(id: 5) {
- __typename
id
todos {
id
- __typename
text
completed
}
filteredTodos: todos(completed: true) {
id
- __typename
text
completed
}
}
noIdList: todoList(id: 6) {
- __typename
id
todos {
+ text
+ completed
+ }
+ }
+ }
+ `;
+
+ const queryWithTypename = gql`
+ query todoList {
+ todoList(id: 5) {
+ id
+ todos {
+ id
+ text
+ completed
__typename
+ }
+ filteredTodos: todos(completed: true) {
+ id
text
completed
+ __typename
}
+ __typename
+ }
+ noIdList: todoList(id: 6) {
+ id
+ todos {
+ text
+ completed
+ __typename
+ }
+ __typename
}
}
`;
@@ -192,7 +217,7 @@ describe('mutation results', () => {
function setupObsHandle(...mockedResponses: any[]) {
networkInterface = mockNetworkInterface({
- request: { query },
+ request: { query: queryWithTypename },
result,
}, ...mockedResponses);
@@ -215,7 +240,7 @@ describe('mutation results', () => {
function setupDelayObsHandle(delay: number, ...mockedResponses: any[]) {
networkInterface = mockNetworkInterface({
- request: { query },
+ request: { query: queryWithTypename },
result,
delay,
}, ...mockedResponses);
@@ -394,6 +419,7 @@ describe('mutation results', () => {
}, {
request: { query: mutation},
result: mutationResult,
+ delay: 5,
}, {
request: { query: queryWithVars, variables: { id: 6 } },
result: result6,
@@ -439,7 +465,7 @@ describe('mutation results', () => {
subscription.unsubscribe();
// The reducer should have been called twice
- assert.equal(counter, 3);
+ assert.equal(counter, 4);
// But there should be one more todo item than before, because variables only matched once
assert.equal(newResult.data.todoList.todos.length, 4);
@@ -688,7 +714,7 @@ describe('mutation results', () => {
// The resolver doesn't actually run.
function setupReducerObsHandle(...mockedResponses: any[]) {
networkInterface = mockNetworkInterface({
- request: { query },
+ request: { query: queryWithTypename },
result,
delay: 30,
}, ...mockedResponses);
@@ -750,7 +776,7 @@ describe('mutation results', () => {
it('does not swallow errors', done => {
client = new ApolloClient({
networkInterface: mockNetworkInterface({
- request: { query },
+ request: { query: queryWithTypename },
result,
}),
});
@@ -984,7 +1010,7 @@ describe('mutation results', () => {
request: { query: mutation },
result: {errors: [new Error('mock error')]},
}, {
- request: { query },
+ request: { query: queryWithTypename },
result,
});
@@ -1435,7 +1461,7 @@ describe('mutation results', () => {
request: { query: mutation },
result: {errors: [new Error('mock error')]},
}, {
- request: { query },
+ request: { query: queryWithTypename },
result,
});
| Getting Store Errors after upgrading to v1.0.0
Since upgrading from a previous release candidate (rc6), I am now getting errors when running _**ALL**_ of my create/delete mutations, regardless of what data they mutate I always get the same error:
```
Allergen.jsx:38 Error: Error: Store error: the application attempted to write an object with no provided id but the store already contains an id of Site:developer for this object.
at writeFieldToStore (http://localhost:3000/packages/modules.js?hash=c1dea742144f5f0b244684fb3a95fddf714f326e:49816:23)
at http://localhost:3000/packages/modules.js?hash=c1dea742144f5f0b244684fb3a95fddf714f326e:49709:17
at Array.forEach (native)
at writeSelectionSetToStore (http://localhost:3000/packages/modules.js?hash=c1dea742144f5f0b244684fb3a95fddf714f326e:49703:29)
at writeResultToStore (http://localhost:3000/packages/modules.js?hash=c1dea742144f5f0b244684fb3a95fddf714f326e:49688:12)
at http://localhost:3000/packages/modules.js?hash=c1dea742144f5f0b244684fb3a95fddf714f326e:51277:20
at http://localhost:3000/packages/modules.js?hash=c1dea742144f5f0b244684fb3a95fddf714f326e:50464:34
at Array.forEach (native)
at data (http://localhost:3000/packages/modules.js?hash=c1dea742144f5f0b244684fb3a95fddf714f326e:50463:43)
at apolloReducer (http://localhost:3000/packages/modules.js?hash=c1dea742144f5f0b244684fb3a95fddf714f326e:50675:23)
(anonymous) @ Allergen.jsx:38
(anonymous) @ meteor.js?hash=27829e9…:1105
```
In this particular instance I was attempting to delete an `Allergen`.
For reference here is the relevant client-side code:
```jsx
const Allergen = ({ ..., removeAllergen }) => {
const onDelete = () => {
bootbox.confirm(`Are you sure that you wish to delete allergen '${allergen.name}'?`,
(result) => {
if (!result) return;
removeAllergen({ id: allergen._id }).catch((methodError) => {
toastr.error('An error occurred when attempting to delete the allergen');
console.error('Error:', methodError);
});
},
);
};
...
};
const removeAllergen = gql`
mutation removeAllergen($id: String!) {
removeAllergen(id: $id)
}
`;
const AllergenContainer = compose(
graphql(removeAllergen, {
props({ mutate }) {
return {
removeAllergen({ id }) {
return mutate({
variables: { id },
});
},
};
},
}),
)(Allergen);
export default AllergenContainer;
```
and the server-side mutation:
```js
export const schema = [`
type Allergen {
_id: String!
createdAt: Date
updatedAt: Date
site: Site!
siteId: String!
name: String!
}
`];
export const mutations = `
# Deletes the specified allergen
removeAllergen(id: String!): String
`;
export const mutationResolvers = {
removeAllergen(root, { id }, context) {
if (!context.user) throw new Meteor.Error('allergens.removeAllergen.login', 'Must be logged in.');
const allergen = Allergens.findOne({ _id: id });
if (!allergen) throw new Meteor.Error('allergens.removeAllergen.notFound', 'The specified allergen does not exist');
userIsOwner(allergen, context.user);
// Remove the allergen from the database and return the id of the removed document
const docsRemoved = Allergens.remove({ _id: id });
return docsRemoved === 1 ? id : null;
},
};
```
| Rolling `apollo-client` back to `v1.0.0-rc.6` makes it all work again... I'll continue testing the versions in between to try to narrow the issue down...
However I can confirm that `v1.0.0-rc.7` breaks all my mutations. So the issue was likely introduced there.
I'm starting to think that it might be related to the `reducer` functions so for completeness I'll add them too:
```jsx
import update from 'immutability-helper';
const dataQuery = gql`
query getAllergenInfo {
allergens {
_id
siteId
name
}
site {
_id
siteSettings {
allergenPrefix
}
}
}
`;
const AllergensPageContainer = compose(
graphql(dataQuery, {
props: ({ data }) => {
if (data.loading) return { loading: true };
if (data.error) return { error: data.error };
return {
allergens: data.allergens,
site: data.site,
};
},
options() {
return {
reducer: (previousResult, action) => {
if (action.type === 'APOLLO_MUTATION_RESULT' && action.operationName === 'insertAllergen') {
const newAllergen = action.result.data.insertAllergen;
return update(previousResult, {
allergens: {
$unshift: [newAllergen],
},
});
} else if (action.type === 'APOLLO_MUTATION_RESULT' && action.operationName === 'removeAllergen') {
const removedId = action.result.data.removeAllergen;
if (!_.isString(removedId)) return previousResult;
const index = _.findIndex(previousResult.allergens, { _id: removedId });
if (index === -1) return previousResult;
return update(previousResult, {
allergens: {
$splice: [[index, 1]],
},
});
}
return previousResult;
},
};
},
}),
)(AllergensPage);
```
@Siyfion could you provide a reproduction using react-apollo-error-template? That would help us find and fix the issue faster.
I saw a similar issue upgrading a client app to 1.0.0. I don't have time to give a good repro, but I thought I'd mention our code uses `reducer` code very similar to @Siyfion 's, in case that helps. We were at rc6 when we found the issue and have set the upgrade aside for the time being.
@rdickert I'm assuming you're also using `meteor-integration`, right?
I've tried to help @Siyfion solving this issue, here are which *assumptions* have come out after our discussion (*assumptions as in my knowledge of the internals of `apollo-client` is limited*).
The error happens when updating the store after a mutation succeeded: part of the reducer output conflicts with what is already there in the store, resulting in the error `Store error: ...`.
Looking at [the code related to the error](https://github.com/apollographql/apollo-client/blob/6b6e8ded1e0f83cb134d2261a3cf7d2d9416400f/src/data/writeToStore.ts#L297-L316), we are in the case of a generated id is already in place for the new id we want to insert. The "funny thing" is that, in the case of the issue raised, the id causing trouble isn't the added/removed result, it's an id of another type already present :sweat_smile:.
The query asks for `allergens` & `site`, the mutation updates `allergens`, the error is on `site` ([screenshot of a breakpoint just before the error](https://cl.ly/2D3P460Q2K0X)):
- the `escapedId`, already present, is `Site:developer` (`Site` is the typename, `developer` is the id);
- the new `storeValue` is generated, it's `$ROOT_QUERY.site`.
> Could the problem come from an inconsistency in the way of asking for data corresponding to `site` ?
The query asks for `site` on the root, but also on the allergen with `siteId`. `siteId` being resolved as:
```js
export const resolvers = {
Allergen: {
site: ({ site }) => Sites.findOne({ _id: site }),
siteId: _.property('site'),
},
};
```
I don't see in the changelog what could have caused the version upgrade triggering that kind of behavior? About the `meteor-integration`, the only real thing it does is plugging a new value on the headers on every request send, so I don't exactly see how it could cause the issue.
Hope it helps!
Just to confirm that the issue still exists in `v1.0.1` and that I believe @SachaG is also suffering from this issue?
@xavcz so the error is that your mutation somehow also returns a site that "conflicts" with a site already in the cache?
@wmertens The mutation doesn't return anything at all to do with a `Site`, in fact I get an error when dealing with the `removeAllergen` mutation which only returns a simple `String`.
I can confirm I've been running into pretty clear-cut issues with mutation updates. I'm not using `meteor-integration` btw.
I hit this error so I did 2 things:
1. I added the `addTypename` attr to my ApolloClient constructor
2. I specified __typename in all my relevant queries that the mutation touched
The fact that I had to do those things seemed weird, but bigger fish to fry on my end.
Hi I have this error after mmigrate to v1.0.1
```Network error: Store error: the application attempted to write an object with no provided id but the store already contains an id of UserNodeVXNlck5vZGU6MTUwNjA= for this object.```
can someone help me?
Thank you!
Thanks all! A reproduction with react-apollo-error-template would really help here, but I'll see if I can figure out what's going on without it.
I can confirm @abhiaiyer91's fix seems to work for me using 1.0.1.
Ok, after thinking for bit and looking at the code I'm fairly sure I know what's going on. The issue is that `reducer` takes its query from `observableQuery.options.query`, which does not yet have the `addTypename` transform applied.
The workaround is to add `__typename` to all selection sets in queries that have a reducer. I'm not sure if `updateQueries` has the same issue (it's not trivial to check), but `update` doesn't, because I recently fixed it (I think).
The quick solution would be to apply the `addTypename` transform every time the reducer runs (it's pretty cheap).
A slightly longer term solution might be to just apply `addTypename` as soon as the document enters ApolloClient through `query` or `watchQuery`, as we do for `mutate`.
In the long term, we'd like to apply the transform only where it's really necessary, and return only the selections that the user originally requested, not including `__typename` if it wasn't asked for.
PS: It's interesting that this only showed up after we created a default `dataIdFromObject` function. I would have assumed that such a problem would show up much sooner. It guess it shows that most people weren't using dataIdFromObject. | 2017-04-04T08:19:15Z | 1 |
apollographql/apollo-client | 1,661 | apollographql__apollo-client-1661 | [
"1551"
] | 2723963239e978e6fe7907fabe01a4c007fd3c82 | diff --git a/src/data/readFromStore.ts b/src/data/readFromStore.ts
--- a/src/data/readFromStore.ts
+++ b/src/data/readFromStore.ts
@@ -275,7 +275,8 @@ function addPreviousResultToIdValues (value: any, previousResult: any): any {
// using the private `ID_KEY` property that is added in `resultMapper`.
if (Array.isArray(previousResult)) {
previousResult.forEach(item => {
- if (item[ID_KEY]) {
+ // item can be null
+ if (item && item[ID_KEY]) {
idToPreviousResult[item[ID_KEY]] = item;
}
});
| diff --git a/test/QueryManager.ts b/test/QueryManager.ts
--- a/test/QueryManager.ts
+++ b/test/QueryManager.ts
@@ -1201,6 +1201,24 @@ describe('QueryManager', () => {
});
});
+ it('can handle null values in arrays (#1551)', (done) => {
+ const query = gql`{ list { value } }`;
+ const data = { list: [ null, { value: 1 } ] };
+ const queryManager = mockQueryManager({
+ request: { query },
+ result: { data },
+ });
+ const observable = queryManager.watchQuery({ query });
+
+ observable.subscribe({
+ next: (result) => {
+ assert.deepEqual(result.data, data);
+ assert.deepEqual(observable.currentResult().data, data);
+ done();
+ },
+ });
+ });
+
it('deepFreezes results in development mode', () => {
const query = gql`{ stuff }`;
const data = { stuff: 'wonderful' };
diff --git a/test/roundtrip.ts b/test/roundtrip.ts
--- a/test/roundtrip.ts
+++ b/test/roundtrip.ts
@@ -56,6 +56,21 @@ describe('roundtrip', () => {
});
});
+ it('array with null values (#1551)', () => {
+ storeRoundtrip(gql`
+ {
+ list {
+ value
+ }
+ }
+ `, {
+ list: [
+ null,
+ { value: 1 },
+ ],
+ });
+ });
+
it('enum arguments', () => {
storeRoundtrip(gql`
{
| Query result not delivered to component if array contains null values
I have an issue where my query works in Graphiql in my browser, but wont work for me in my React Native app.
This is a condensed version of the query:
``` query getTeam($teamId: String!, $gameweek: Int!) {
team(_id: $teamId) {
_id
history {
score
}
}
```
If I don’t request `history` it works for me in RN too. When I do request `history` I don't get any data back from the server for team. The server is getting the request and sending back data from my server console logs.
And the definition of team (condensed) is:
```
type Team {
_id: ID
history: [FixtureHistory]
}
type FixtureHistory {
lineup: [String]
subs: [String]
score: Int
finalLineup: [String]
finalSubs: [String]
autoSubs: [AutoSub]
players: [Player]
}
```
Any idea why this works fine in Graphiql but not with Apollo Client on React Native?
| Here's a gist of the code: https://gist.github.com/elie222/cb1b594e3b91f7d860454c5a9594823e
What do you mean by "doesn't work"? Does it simply silently fail? What props does your component receive, if any? Are any errors or warnings printed?
This is the result I receive when I request `history`:
<img width="177" alt="screen shot 2017-04-05 at 23 30 42" src="https://cloud.githubusercontent.com/assets/3090527/24749043/4d944222-1aca-11e7-96a6-3fcea4a7e5be.png">
When I don't request `history` I receive all the props as expected (`team`, `players`, ...).
And the request always works fine with graphiql.
This is the graphiql query (and works fine):
```
query getTeam($teamId: String!) {
team(_id: $teamId) {
_id
name
userId
playerIds
history {
lineup
subs
score
finalLineup
finalSubs
autoSubs {
in
out
}
}
}
}
```
And the result:
```
{
"data": {
"team": {
"_id": "2rMRFYpdeeDBLT825",
"name": "sdasdad",
"userId": "oetzPCTWihKBgDqdk",
"playerIds": [
"43dqNSSs9wWgBEpkf",
"cGxPc4Rfskr3vYGHm",
"4Lawd6McmeDK8chaa",
"3uMZoMiSgn7ANrvLd",
"FqXwEiGQmsunWWyx3",
"r2qvzLRwEW5qTSuHm",
"Cm5neKi3XEpdb3cEF",
"xRQTh6WLc3RpvbeHQ",
"o6f8weMiardSknd5x",
"rdGfYHtp87CG2YWR4",
"YJnw5TTc6LxH3634h",
"5rucfgAirfgXhLWac",
"oQKt3ims4J8YHPGbY",
"JMPut748RhvkbKHGn",
"RXjwt93bBKweiuFXm"
],
"history": [
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
{
"lineup": [
"RXjwt93bBKweiuFXm",
"4Lawd6McmeDK8chaa",
"5rucfgAirfgXhLWac",
"JMPut748RhvkbKHGn",
"3uMZoMiSgn7ANrvLd",
"43dqNSSs9wWgBEpkf",
"Cm5neKi3XEpdb3cEF",
"FqXwEiGQmsunWWyx3",
"YJnw5TTc6LxH3634h",
"o6f8weMiardSknd5x",
"rdGfYHtp87CG2YWR4"
],
"subs": [
"cGxPc4Rfskr3vYGHm",
"r2qvzLRwEW5qTSuHm",
"xRQTh6WLc3RpvbeHQ",
"oQKt3ims4J8YHPGbY"
],
"score": 32,
"finalLineup": [
"RXjwt93bBKweiuFXm",
"4Lawd6McmeDK8chaa",
"5rucfgAirfgXhLWac",
"JMPut748RhvkbKHGn",
"3uMZoMiSgn7ANrvLd",
"43dqNSSs9wWgBEpkf",
"Cm5neKi3XEpdb3cEF",
"FqXwEiGQmsunWWyx3",
"YJnw5TTc6LxH3634h",
"o6f8weMiardSknd5x",
"rdGfYHtp87CG2YWR4"
],
"finalSubs": [
"cGxPc4Rfskr3vYGHm",
"r2qvzLRwEW5qTSuHm",
"xRQTh6WLc3RpvbeHQ",
"oQKt3ims4J8YHPGbY"
],
"autoSubs": []
},
{
"lineup": [
"RXjwt93bBKweiuFXm",
"4Lawd6McmeDK8chaa",
"5rucfgAirfgXhLWac",
"JMPut748RhvkbKHGn",
"3uMZoMiSgn7ANrvLd",
"43dqNSSs9wWgBEpkf",
"Cm5neKi3XEpdb3cEF",
"FqXwEiGQmsunWWyx3",
"YJnw5TTc6LxH3634h",
"o6f8weMiardSknd5x",
"rdGfYHtp87CG2YWR4"
],
"subs": [
"cGxPc4Rfskr3vYGHm",
"r2qvzLRwEW5qTSuHm",
"xRQTh6WLc3RpvbeHQ",
"oQKt3ims4J8YHPGbY"
],
"score": 40,
"finalLineup": null,
"finalSubs": null,
"autoSubs": null
},
{
"lineup": [
"RXjwt93bBKweiuFXm",
"4Lawd6McmeDK8chaa",
"5rucfgAirfgXhLWac",
"JMPut748RhvkbKHGn",
"3uMZoMiSgn7ANrvLd",
"43dqNSSs9wWgBEpkf",
"Cm5neKi3XEpdb3cEF",
"FqXwEiGQmsunWWyx3",
"YJnw5TTc6LxH3634h",
"o6f8weMiardSknd5x",
"rdGfYHtp87CG2YWR4"
],
"subs": [
"cGxPc4Rfskr3vYGHm",
"r2qvzLRwEW5qTSuHm",
"xRQTh6WLc3RpvbeHQ",
"oQKt3ims4J8YHPGbY"
],
"score": 64,
"finalLineup": [
"RXjwt93bBKweiuFXm",
"4Lawd6McmeDK8chaa",
"5rucfgAirfgXhLWac",
"JMPut748RhvkbKHGn",
"3uMZoMiSgn7ANrvLd",
"43dqNSSs9wWgBEpkf",
"Cm5neKi3XEpdb3cEF",
"FqXwEiGQmsunWWyx3",
"YJnw5TTc6LxH3634h",
"o6f8weMiardSknd5x",
"rdGfYHtp87CG2YWR4"
],
"finalSubs": [
"cGxPc4Rfskr3vYGHm",
"r2qvzLRwEW5qTSuHm",
"xRQTh6WLc3RpvbeHQ",
"oQKt3ims4J8YHPGbY"
],
"autoSubs": []
},
{
"lineup": [
"RXjwt93bBKweiuFXm",
"4Lawd6McmeDK8chaa",
"5rucfgAirfgXhLWac",
"JMPut748RhvkbKHGn",
"3uMZoMiSgn7ANrvLd",
"43dqNSSs9wWgBEpkf",
"Cm5neKi3XEpdb3cEF",
"FqXwEiGQmsunWWyx3",
"YJnw5TTc6LxH3634h",
"o6f8weMiardSknd5x",
"rdGfYHtp87CG2YWR4"
],
"subs": [
"cGxPc4Rfskr3vYGHm",
"r2qvzLRwEW5qTSuHm",
"xRQTh6WLc3RpvbeHQ",
"oQKt3ims4J8YHPGbY"
],
"score": 12,
"finalLineup": [
"RXjwt93bBKweiuFXm",
"4Lawd6McmeDK8chaa",
"5rucfgAirfgXhLWac",
"JMPut748RhvkbKHGn",
"3uMZoMiSgn7ANrvLd",
"43dqNSSs9wWgBEpkf",
"Cm5neKi3XEpdb3cEF",
"FqXwEiGQmsunWWyx3",
"YJnw5TTc6LxH3634h",
"oQKt3ims4J8YHPGbY",
"rdGfYHtp87CG2YWR4"
],
"finalSubs": [
"cGxPc4Rfskr3vYGHm",
"r2qvzLRwEW5qTSuHm",
"xRQTh6WLc3RpvbeHQ",
"o6f8weMiardSknd5x"
],
"autoSubs": [
{
"in": "oQKt3ims4J8YHPGbY",
"out": "o6f8weMiardSknd5x"
}
]
},
{
"lineup": [
"RXjwt93bBKweiuFXm",
"43dqNSSs9wWgBEpkf",
"r2qvzLRwEW5qTSuHm",
"FqXwEiGQmsunWWyx3",
"JMPut748RhvkbKHGn",
"4Lawd6McmeDK8chaa",
"oQKt3ims4J8YHPGbY",
"xRQTh6WLc3RpvbeHQ",
"o6f8weMiardSknd5x",
"YJnw5TTc6LxH3634h",
"rdGfYHtp87CG2YWR4"
],
"subs": [
"cGxPc4Rfskr3vYGHm",
"5rucfgAirfgXhLWac",
"3uMZoMiSgn7ANrvLd",
"Cm5neKi3XEpdb3cEF"
],
"score": 42,
"finalLineup": [
"RXjwt93bBKweiuFXm",
"43dqNSSs9wWgBEpkf",
"r2qvzLRwEW5qTSuHm",
"FqXwEiGQmsunWWyx3",
"JMPut748RhvkbKHGn",
"4Lawd6McmeDK8chaa",
"oQKt3ims4J8YHPGbY",
"5rucfgAirfgXhLWac",
"o6f8weMiardSknd5x",
"YJnw5TTc6LxH3634h",
"rdGfYHtp87CG2YWR4"
],
"finalSubs": [
"cGxPc4Rfskr3vYGHm",
"xRQTh6WLc3RpvbeHQ",
"3uMZoMiSgn7ANrvLd",
"Cm5neKi3XEpdb3cEF"
],
"autoSubs": [
{
"in": "5rucfgAirfgXhLWac",
"out": "xRQTh6WLc3RpvbeHQ"
}
]
},
{
"lineup": [
"RXjwt93bBKweiuFXm",
"43dqNSSs9wWgBEpkf",
"r2qvzLRwEW5qTSuHm",
"FqXwEiGQmsunWWyx3",
"JMPut748RhvkbKHGn",
"4Lawd6McmeDK8chaa",
"oQKt3ims4J8YHPGbY",
"xRQTh6WLc3RpvbeHQ",
"o6f8weMiardSknd5x",
"YJnw5TTc6LxH3634h",
"rdGfYHtp87CG2YWR4"
],
"subs": [
"cGxPc4Rfskr3vYGHm",
"5rucfgAirfgXhLWac",
"3uMZoMiSgn7ANrvLd",
"Cm5neKi3XEpdb3cEF"
],
"score": null,
"finalLineup": null,
"finalSubs": null,
"autoSubs": null
},
{
"lineup": [
"RXjwt93bBKweiuFXm",
"43dqNSSs9wWgBEpkf",
"r2qvzLRwEW5qTSuHm",
"FqXwEiGQmsunWWyx3",
"JMPut748RhvkbKHGn",
"4Lawd6McmeDK8chaa",
"oQKt3ims4J8YHPGbY",
"xRQTh6WLc3RpvbeHQ",
"o6f8weMiardSknd5x",
"YJnw5TTc6LxH3634h",
"rdGfYHtp87CG2YWR4"
],
"subs": [
"cGxPc4Rfskr3vYGHm",
"5rucfgAirfgXhLWac",
"3uMZoMiSgn7ANrvLd",
"Cm5neKi3XEpdb3cEF"
],
"score": null,
"finalLineup": null,
"finalSubs": null,
"autoSubs": null
},
{
"lineup": [
"RXjwt93bBKweiuFXm",
"43dqNSSs9wWgBEpkf",
"r2qvzLRwEW5qTSuHm",
"FqXwEiGQmsunWWyx3",
"JMPut748RhvkbKHGn",
"4Lawd6McmeDK8chaa",
"oQKt3ims4J8YHPGbY",
"xRQTh6WLc3RpvbeHQ",
"o6f8weMiardSknd5x",
"YJnw5TTc6LxH3634h",
"rdGfYHtp87CG2YWR4"
],
"subs": [
"cGxPc4Rfskr3vYGHm",
"5rucfgAirfgXhLWac",
"3uMZoMiSgn7ANrvLd",
"Cm5neKi3XEpdb3cEF"
],
"score": null,
"finalLineup": null,
"finalSubs": null,
"autoSubs": null
},
{
"lineup": [
"RXjwt93bBKweiuFXm",
"43dqNSSs9wWgBEpkf",
"r2qvzLRwEW5qTSuHm",
"FqXwEiGQmsunWWyx3",
"JMPut748RhvkbKHGn",
"4Lawd6McmeDK8chaa",
"oQKt3ims4J8YHPGbY",
"xRQTh6WLc3RpvbeHQ",
"o6f8weMiardSknd5x",
"YJnw5TTc6LxH3634h",
"rdGfYHtp87CG2YWR4"
],
"subs": [
"cGxPc4Rfskr3vYGHm",
"5rucfgAirfgXhLWac",
"3uMZoMiSgn7ANrvLd",
"Cm5neKi3XEpdb3cEF"
],
"score": null,
"finalLineup": null,
"finalSubs": null,
"autoSubs": null
}
]
}
}
}
```
I even created a `teamHistory(teamId: $teamId)` query just now that all it does is return the history information but again, it only works with graphiql and not apollo client.
I've faced the same problem (in the browser not in RN) with a query resulting in nested arrays. The client get the JSON result back, however the result is empty (as seen from React-apollo). No errors. If I modify the query and remove the nested arrays it is working. And the query with nested arrays is working in GraphiQL.
@helfer this looks like the same bug that I still need to create a test case for :D https://github.com/apollographql/apollo-client/issues/1389
Did you find a work around for it?
If I add an id to the nested query would that help?
On Thu, 6 Apr 2017, 19:17 Andrew E. Rhyne, <notifications@github.com> wrote:
> @helfer <https://github.com/helfer> this looks like the same bug that I
> still need to create a test case for :D #1389
> <https://github.com/apollographql/apollo-client/issues/1389>
>
> —
> You are receiving this because you authored the thread.
> Reply to this email directly, view it on GitHub
> <https://github.com/apollographql/apollo-client/issues/1551#issuecomment-292226356>,
> or mute the thread
> <https://github.com/notifications/unsubscribe-auth/AC8oX4u5sRauYZPF3oMYbpPbsRfC2F6dks5rtRAJgaJpZM4M0vwC>
> .
>
In our case, we discovered today that running the graphql HoC twice on a single component with two different queries caused this to happen.
@elie222 definitely try to add an `id` to the nested queries. I intermittently had failed queries (outlined in issue #1518) because the cache couldn't store data without an id. It's error is being swallowed as well, so you're not alerted of the issue.
Well, I think that is different. If you don't have an ID for a nested type, and you have specified `dataIdFromObject`, it will try to normalized the object under some sort of `undefined` permutation... which means it will probably try to write the different data over and over on the same index.
So adding an `_id` field didn't help at all unfortunately.
I'm dealing with an array of objects btw. These objects shouldn't have ids, but even when I added them, the error remains the same.
I created an error repo for the problem here:
https://github.com/elie222/react-apollo-error-template
If you don't ask for `history` in the query it works fine, but when you do request history, no document is returned to the client.
In my case the problem was related to the server respons not having the exact shape of the query: sometime objects where missing in the answer. However the parsing errors were swallowed. This should have resulted in an error. Instead things where loaded in the store as if everything was working, except I didn't get the data in my React graphql HoC.
BTW: I also tried implementing dataIdFromObject, but it didn't help.
I finally found the source of the error based on your comments @jagare. Note, this is specific to Apollo, all works fine in graphiql and looks like it should work fine. Seems to be a bug with Apollo.
Apollo wasn't happy with me sending `null` in the array.
This works fine:
```
const teamData = {
_id: 1,
history: [
{},
{lineup: ['a', 'b', 'c']},
{lineup: ['a', 'b', 'c']},
]
}
```
This doesn't work fine:
```
const teamData = {
_id: 1,
history: [
null,
{lineup: ['a', 'b', 'c']},
{lineup: ['a', 'b', 'c']},
]
}
```
This is the schema we're talking about btw (and in the error repo I posted above):
```
const FixtureHistoryType = new GraphQLObjectType({
name: 'FixtureHistory',
fields: {
lineup: { type: new GraphQLList(GraphQLString) },
},
});
const TeamType = new GraphQLObjectType({
name: 'Team',
fields: {
_id: { type: GraphQLID },
history: { type: new GraphQLList(FixtureHistoryType) },
},
});
```
A different issue I just had with optimistic updates related to arrays of objects again. Again, the silent failure:
https://github.com/apollographql/apollo-client/issues/1566
I figured that one out at least.
Also experiencing a similar problem. I have multiple id fields which are null. It looks like apollo is automatically creating a reference hash using the null values (which breaks).
Thanks for the reproduction and detailed error report @elie222. I will look into it tomorrow and will try to fix it asap.
@elie222 I found the bug and fixed it, just have to write a test for it. I'll release it later today :tada: | 2017-05-05T21:52:27Z | 1.2 |
apollographql/apollo-client | 1,492 | apollographql__apollo-client-1492 | [
"947"
] | 8f9bb91c3145db36b9135ee3bb0048ea761ee860 | diff --git a/src/core/QueryManager.ts b/src/core/QueryManager.ts
--- a/src/core/QueryManager.ts
+++ b/src/core/QueryManager.ts
@@ -37,7 +37,10 @@ import {
import {
checkDocument,
getQueryDefinition,
+ getOperationDefinition,
getOperationName,
+ getDefaultValues,
+ getMutationDefinition,
} from '../queries/getFromAST';
import {
@@ -255,7 +258,8 @@ export class QueryManager {
mutation = addTypenameToDocument(mutation);
}
- checkDocument(mutation);
+ variables = Object.assign(getDefaultValues(getMutationDefinition(mutation)), variables);
+
const mutationString = print(mutation);
const request = {
query: mutation,
@@ -628,8 +632,15 @@ export class QueryManager {
}
- // Call just to get errors synchronously
- getQueryDefinition(options.query);
+ // get errors synchronously
+ const queryDefinition = getQueryDefinition(options.query);
+
+ // assign variable default values if supplied
+ if (queryDefinition.variableDefinitions && queryDefinition.variableDefinitions.length) {
+ const defaultValues = getDefaultValues(queryDefinition);
+
+ options.variables = Object.assign(defaultValues, options.variables);
+ }
if (typeof options.notifyOnNetworkStatusChange === 'undefined') {
options.notifyOnNetworkStatusChange = false;
@@ -818,13 +829,15 @@ export class QueryManager {
): Observable<any> {
const {
query,
- variables,
} = options;
let transformedDoc = query;
// Apply the query transformer if one has been provided.
if (this.addTypename) {
transformedDoc = addTypenameToDocument(transformedDoc);
}
+
+ const variables = Object.assign(getDefaultValues(getOperationDefinition(query)), options.variables);
+
const request: Request = {
query: transformedDoc,
variables,
@@ -854,7 +867,7 @@ export class QueryManager {
document: transformedDoc,
operationName: getOperationName(transformedDoc),
result: { data: result },
- variables: variables || {},
+ variables,
subscriptionId: subId,
extraReducers: this.getExtraReducers(),
});
diff --git a/src/data/readFromStore.ts b/src/data/readFromStore.ts
--- a/src/data/readFromStore.ts
+++ b/src/data/readFromStore.ts
@@ -20,6 +20,7 @@ import {
} from './storeUtils';
import {
+ getDefaultValues,
getQueryDefinition,
} from '../queries/getFromAST';
@@ -202,7 +203,9 @@ export function diffQueryAgainstStore({
config,
}: DiffQueryAgainstStoreOptions): DiffResult {
// Throw the right validation error by trying to find a query in the document
- getQueryDefinition(query);
+ const queryDefinition = getQueryDefinition(query);
+
+ variables = Object.assign(getDefaultValues(queryDefinition), variables);
const context: ReadStoreContext = {
// Global settings
diff --git a/src/data/storeUtils.ts b/src/data/storeUtils.ts
--- a/src/data/storeUtils.ts
+++ b/src/data/storeUtils.ts
@@ -47,7 +47,7 @@ function isEnumValue(value: ValueNode): value is EnumValueNode {
return value.kind === 'EnumValue';
}
-function valueToObjectRepresentation(argObj: any, name: NameNode, value: ValueNode, variables?: Object) {
+export function valueToObjectRepresentation(argObj: any, name: NameNode, value: ValueNode, variables?: Object) {
if (isIntValue(value) || isFloatValue(value)) {
argObj[name.value] = Number(value.value);
} else if (isBooleanValue(value) || isStringValue(value)) {
diff --git a/src/data/writeToStore.ts b/src/data/writeToStore.ts
--- a/src/data/writeToStore.ts
+++ b/src/data/writeToStore.ts
@@ -1,5 +1,6 @@
import {
+ getDefaultValues,
getOperationDefinition,
getQueryDefinition,
FragmentMap,
@@ -73,6 +74,8 @@ export function writeQueryToStore({
}): NormalizedCache {
const queryDefinition: OperationDefinitionNode = getQueryDefinition(query);
+ variables = Object.assign(getDefaultValues(queryDefinition), variables);
+
return writeSelectionSetToStore({
dataId: 'ROOT_QUERY',
result,
@@ -110,9 +113,12 @@ export function writeResultToStore({
}): NormalizedCache {
// XXX TODO REFACTOR: this is a temporary workaround until query normalization is made to work with documents.
- const selectionSet = getOperationDefinition(document).selectionSet;
+ const operationDefinition = getOperationDefinition(document);
+ const selectionSet = operationDefinition.selectionSet;
const fragmentMap = createFragmentMap(getFragmentDefinitions(document));
+ variables = Object.assign(getDefaultValues(operationDefinition), variables);
+
return writeSelectionSetToStore({
result,
dataId,
diff --git a/src/queries/getFromAST.ts b/src/queries/getFromAST.ts
--- a/src/queries/getFromAST.ts
+++ b/src/queries/getFromAST.ts
@@ -2,9 +2,14 @@ import {
DocumentNode,
OperationDefinitionNode,
FragmentDefinitionNode,
+ ValueNode,
} from 'graphql';
+import {
+ valueToObjectRepresentation,
+} from '../data/storeUtils';
+
export function getMutationDefinition(doc: DocumentNode): OperationDefinitionNode {
checkDocument(doc);
@@ -229,3 +234,24 @@ export function getFragmentQueryDocument(document: DocumentNode, fragmentName?:
return query;
}
+
+export function getDefaultValues(definition: OperationDefinitionNode): { [key: string]: any } {
+ if (definition.variableDefinitions && definition.variableDefinitions.length) {
+ const defaultValues = definition.variableDefinitions
+ .filter(({ defaultValue }) => defaultValue)
+ .map(({ variable, defaultValue }) : { [key: string]: any } => {
+ const defaultValueObj: { [key: string]: any } = {};
+ valueToObjectRepresentation(
+ defaultValueObj,
+ variable.name,
+ defaultValue as ValueNode,
+ );
+
+ return defaultValueObj;
+ });
+
+ return Object.assign({}, ...defaultValues);
+ }
+
+ return {};
+}
| diff --git a/test/ApolloClient.ts b/test/ApolloClient.ts
--- a/test/ApolloClient.ts
+++ b/test/ApolloClient.ts
@@ -103,6 +103,40 @@ describe('ApolloClient', () => {
});
});
+ it('will read some data from the store with default values', () => {
+ const client = new ApolloClient({
+ initialState: {
+ apollo: {
+ data: {
+ 'ROOT_QUERY': {
+ 'field({"literal":true,"value":-1})': 1,
+ 'field({"literal":false,"value":42})': 2,
+ },
+ },
+ },
+ },
+ });
+
+ assert.deepEqual(client.readQuery({
+ query: gql`query ($literal: Boolean, $value: Int = -1) {
+ a: field(literal: $literal, value: $value)
+ }`,
+ variables: {
+ literal: false,
+ value: 42,
+ },
+ }), { a: 2 });
+
+ assert.deepEqual(client.readQuery({
+ query: gql`query ($literal: Boolean, $value: Int = -1) {
+ a: field(literal: $literal, value: $value)
+ }`,
+ variables: {
+ literal: true,
+ },
+ }), { a: 1 });
+ });
+
describe('readFragment', () => {
it('will throw an error when there is no fragment', () => {
const client = new ApolloClient();
@@ -399,6 +433,46 @@ describe('ApolloClient', () => {
},
});
});
+
+ it('will write some data to the store with default values for variables', () => {
+ const client = new ApolloClient();
+
+ client.writeQuery({
+ data: {
+ a: 2,
+ },
+ query: gql`
+ query ($literal: Boolean, $value: Int = -1) {
+ a: field(literal: $literal, value: $value)
+ }
+ `,
+ variables: {
+ literal: true,
+ value: 42,
+ },
+ });
+
+ client.writeQuery({
+ data: {
+ a: 1,
+ },
+ query: gql`
+ query ($literal: Boolean, $value: Int = -1) {
+ a: field(literal: $literal, value: $value)
+ }
+ `,
+ variables: {
+ literal: false,
+ },
+ });
+
+ assert.deepEqual(client.store.getState().apollo.data, {
+ 'ROOT_QUERY': {
+ 'field({"literal":true,"value":42})': 2,
+ 'field({"literal":false,"value":-1})': 1,
+ },
+ });
+ });
});
describe('writeFragment', () => {
@@ -1148,3 +1222,4 @@ describe('ApolloClient', () => {
});
});
});
+
diff --git a/test/client.ts b/test/client.ts
--- a/test/client.ts
+++ b/test/client.ts
@@ -264,7 +264,123 @@ describe('client', () => {
},
};
- clientRoundrip(query, data);
+ clientRoundtrip(query, data);
+ });
+
+ it('should allow for a single query with complex default variables to take place', (done) => {
+ const query = gql`
+ query stuff($test: Input = {key1: ["value", "value2"], key2: {key3: 4}}) {
+ allStuff(test: $test) {
+ people {
+ name
+ }
+ }
+ }
+ `;
+
+ const result = {
+ allStuff: {
+ people: [
+ {
+ name: 'Luke Skywalker',
+ },
+ {
+ name: 'Jabba The Hutt',
+ },
+ ],
+ },
+ };
+
+ const variables = {test: { key1: ['value', 'value2'], key2: { key3: 4 } } };
+
+ const networkInterface = mockNetworkInterface({
+ request: { query, variables },
+ result: { data: result },
+ });
+
+ const client = new ApolloClient({
+ networkInterface,
+ addTypename: false,
+ });
+
+ const basic = client.query({ query, variables }).then((actualResult) => {
+ assert.deepEqual(actualResult.data, result);
+ });
+
+ const withDefault = client.query({ query }).then((actualResult) => {
+ assert.deepEqual(actualResult.data, result);
+ });
+
+ Promise.all([basic, withDefault]).then(res => {
+ done();
+ });
+ });
+
+ it('should allow for a single query with default values that get overridden with variables', (done) => {
+ const query = gql`
+ query people($first: Int = 1) {
+ allPeople(first: $first) {
+ people {
+ name
+ }
+ }
+ }
+ `;
+
+ const variables = { first: 1 };
+ const override = { first: 2 };
+
+ const result = {
+ allPeople: {
+ people: [
+ {
+ name: 'Luke Skywalker',
+ },
+ ],
+ },
+ };
+
+ const overriddenResult = {
+ allPeople: {
+ people: [
+ {
+ name: 'Luke Skywalker',
+ },
+ {
+ name: 'Jabba The Hutt',
+ },
+ ],
+ },
+ };
+
+ const networkInterface = mockNetworkInterface({
+ request: { query, variables },
+ result: { data: result },
+ }, {
+ request: { query, variables: override },
+ result: { data: overriddenResult },
+ });
+
+ const client = new ApolloClient({
+ networkInterface,
+ addTypename: false,
+ });
+
+ const basic = client.query({ query, variables }).then((actualResult) => {
+ assert.deepEqual(actualResult.data, result);
+ });
+
+ const withDefault = client.query({ query }).then((actualResult) => {
+ return assert.deepEqual(actualResult.data, result);
+ });
+
+ const withOverride = client.query({ query, variables: override }).then((actualResult) => {
+ return assert.deepEqual(actualResult.data, overriddenResult);
+ });
+
+ Promise.all([basic, withDefault, withOverride]).then(res => {
+ done();
+ });
});
it('should allow fragments on root query', () => {
@@ -290,7 +406,7 @@ describe('client', () => {
],
};
- clientRoundrip(query, data);
+ clientRoundtrip(query, data);
});
it('should allow for a single query with existing store', () => {
@@ -2155,7 +2271,7 @@ describe('client', () => {
});
});
-function clientRoundrip(
+function clientRoundtrip(
query: DocumentNode,
data: ExecutionResult,
variables?: any,
diff --git a/test/getFromAST.ts b/test/getFromAST.ts
--- a/test/getFromAST.ts
+++ b/test/getFromAST.ts
@@ -5,6 +5,7 @@ import {
getMutationDefinition,
createFragmentMap,
FragmentMap,
+ getDefaultValues,
getOperationName,
getFragmentQueryDocument,
} from '../src/queries/getFromAST';
@@ -332,4 +333,45 @@ describe('AST utility functions', () => {
);
});
});
+
+ describe('getDefaultValues', () => {
+ it('will create an empty variable object if no default values are provided', () => {
+ const basicQuery = gql`
+ query people($first: Int, $second: String) {
+ allPeople(first: $first) {
+ people {
+ name
+ }
+ }
+ }
+ `;
+
+ assert.deepEqual(getDefaultValues(getQueryDefinition(basicQuery)), {});
+ });
+
+ it('will create a variable object based on the definition node with default values', () => {
+ const basicQuery = gql`
+ query people($first: Int = 1, $second: String!) {
+ allPeople(first: $first) {
+ people {
+ name
+ }
+ }
+ }
+ `;
+
+ const complexMutation = gql`
+ mutation complexStuff($test: Input = {key1: ["value", "value2"], key2: {key3: 4}}) {
+ complexStuff(test: $test) {
+ people {
+ name
+ }
+ }
+ }
+ `;
+
+ assert.deepEqual(getDefaultValues(getQueryDefinition(basicQuery)), {first: 1});
+ assert.deepEqual(getDefaultValues(getMutationDefinition(complexMutation)), {test: {key1: ['value', 'value2'], key2: {key3: 4}}});
+ });
+ });
});
diff --git a/test/graphqlSubscriptions.ts b/test/graphqlSubscriptions.ts
--- a/test/graphqlSubscriptions.ts
+++ b/test/graphqlSubscriptions.ts
@@ -31,6 +31,8 @@ describe('GraphQL Subscriptions', () => {
let options: any;
let watchQueryOptions: any;
let sub2: any;
+ let defaultOptions: any;
+ let defaultSub1: any;
let commentsQuery: any;
let commentsVariables: any;
let commentsSub: any;
@@ -69,6 +71,34 @@ describe('GraphQL Subscriptions', () => {
},
};
+
+ defaultSub1 = {
+ request: {
+ query: gql`
+ subscription UserInfo($name: String = "Changping Chen") {
+ user(name: $name) {
+ name
+ }
+ }
+ `,
+ variables: {
+ name: 'Changping Chen',
+ },
+ },
+ id: 0,
+ results: [...results],
+ };
+
+ defaultOptions = {
+ query: gql`
+ subscription UserInfo($name: String = "Changping Chen") {
+ user(name: $name) {
+ name
+ }
+ }
+ `,
+ };
+
watchQueryOptions = {
query: gql`
query UserInfo($name: String) {
@@ -145,6 +175,32 @@ describe('GraphQL Subscriptions', () => {
it('should start a subscription on network interface and unsubscribe', (done) => {
+ const network = mockSubscriptionNetworkInterface([defaultSub1]);
+ // This test calls directly through Apollo Client
+ const client = new ApolloClient({
+ networkInterface: network,
+ addTypename: false,
+ });
+
+ const sub = client.subscribe(defaultOptions).subscribe({
+ next(result) {
+ assert.deepEqual(result, results[0].result);
+
+ // Test unsubscribing
+ sub.unsubscribe();
+ assert.equal(Object.keys(network.mockedSubscriptionsById).length, 0);
+
+ done();
+ },
+ });
+
+ const id = (sub as any)._networkSubscriptionId;
+ network.fireResult(id);
+
+ assert.equal(Object.keys(network.mockedSubscriptionsById).length, 1);
+ });
+
+ it('should subscribe with default values', (done) => {
const network = mockSubscriptionNetworkInterface([sub1]);
// This test calls directly through Apollo Client
const client = new ApolloClient({
diff --git a/test/mutationResults.ts b/test/mutationResults.ts
--- a/test/mutationResults.ts
+++ b/test/mutationResults.ts
@@ -1209,7 +1209,7 @@ describe('mutation results', () => {
assert.deepEqual(variables, { a: undefined, b: 2, c: 3 });
return Promise.resolve({ data: { result: 'goodbye' } });
case 3:
- assert.equal(variables, undefined);
+ assert.deepEqual(variables, {});
return Promise.resolve({ data: { result: 'moon' } });
default:
return Promise.reject(new Error('Too many network calls.'));
@@ -1253,6 +1253,61 @@ describe('mutation results', () => {
}).catch(done);
});
+ it('allows mutations with default values', done => {
+ let count = 0;
+
+ client = new ApolloClient({
+ addTypename: false,
+ networkInterface: {
+ query ({ variables }) {
+ switch (count++) {
+ case 0:
+ assert.deepEqual(variables, { a: 1, b: 'water' });
+ return Promise.resolve({ data: { result: 'hello' } });
+ case 1:
+ assert.deepEqual(variables, { a: 2, b: 'cheese', c: 3 });
+ return Promise.resolve({ data: { result: 'world' } });
+ case 2:
+ assert.deepEqual(variables, { a: 1, b: 'cheese', c: 3 });
+ return Promise.resolve({ data: { result: 'goodbye' } });
+ default:
+ return Promise.reject(new Error('Too many network calls.'));
+ }
+ },
+ },
+ });
+
+ const mutation = gql`
+ mutation ($a: Int = 1, $b: String = "cheese", $c: Int) {
+ result(a: $a, b: $b, c: $c)
+ }
+ `;
+
+ Promise.all([
+ client.mutate({
+ mutation,
+ variables: { a: 1, b: 'water' },
+ }),
+ client.mutate({
+ mutation,
+ variables: { a: 2, c: 3 },
+ }),
+ client.mutate({
+ mutation,
+ variables: { c: 3 },
+ }),
+ ]).then(() => {
+ assert.deepEqual(client.queryManager.getApolloState().data, {
+ ROOT_MUTATION: {
+ 'result({"a":1,"b":"water"})': 'hello',
+ 'result({"a":2,"b":"cheese","c":3})': 'world',
+ 'result({"a":1,"b":"cheese","c":3})': 'goodbye',
+ },
+ });
+ done();
+ }).catch(done);
+ });
+
it('will pass null to the network interface when provided', done => {
let count = 0;
diff --git a/test/readFromStore.ts b/test/readFromStore.ts
--- a/test/readFromStore.ts
+++ b/test/readFromStore.ts
@@ -111,6 +111,47 @@ describe('reading from the store', () => {
});
});
+ it('runs a basic query with default values for arguments', () => {
+ const query = gql`
+ query someBigQuery(
+ $stringArg: String = "This is a default string!",
+ $intArg: Int = 0,
+ $floatArg: Float,
+ ){
+ id,
+ stringField(arg: $stringArg),
+ numberField(intArg: $intArg, floatArg: $floatArg),
+ nullField
+ }
+ `;
+
+ const variables = {
+ floatArg: 3.14,
+ };
+
+ const store = {
+ 'ROOT_QUERY': {
+ id: 'abcd',
+ nullField: null,
+ 'numberField({"intArg":0,"floatArg":3.14})': 5,
+ 'stringField({"arg":"This is a default string!"})': 'Heyo',
+ },
+ } as NormalizedCache;
+
+ const result = readQueryFromStore({
+ store,
+ query,
+ variables,
+ });
+
+ assert.deepEqual(result, {
+ id: 'abcd',
+ nullField: null,
+ numberField: 5,
+ stringField: 'Heyo',
+ });
+ });
+
it('runs a nested query', () => {
const result: any = {
id: 'abcd',
diff --git a/test/writeToStore.ts b/test/writeToStore.ts
--- a/test/writeToStore.ts
+++ b/test/writeToStore.ts
@@ -158,6 +158,44 @@ describe('writing to the store', () => {
});
});
+ it('properly normalizes a query with default values', () => {
+ const query = gql`
+ query someBigQuery($stringArg: String = "This is a default string!", $intArg: Int, $floatArg: Float){
+ id,
+ stringField(arg: $stringArg),
+ numberField(intArg: $intArg, floatArg: $floatArg),
+ nullField
+ }
+ `;
+
+ const variables = {
+ intArg: 5,
+ floatArg: 3.14,
+ };
+
+ const result: any = {
+ id: 'abcd',
+ stringField: 'Heyo',
+ numberField: 5,
+ nullField: null,
+ };
+
+ const normalized = writeQueryToStore({
+ result,
+ query,
+ variables,
+ });
+
+ assert.deepEqual(normalized, {
+ 'ROOT_QUERY': {
+ id: 'abcd',
+ nullField: null,
+ 'numberField({"intArg":5,"floatArg":3.14})': 5,
+ 'stringField({"arg":"This is a default string!"})': 'Heyo',
+ },
+ });
+ });
+
it('properly normalizes a nested object with an ID', () => {
const query = gql`
{
| Add support for GraphQL variables default values
Spec: https://facebook.github.io/graphql/#sec-Language.Variables
More info: https://medium.com/the-graphqlhub/graphql-tour-variables-58c6abd10f56
| @iki sure, we'll implement it some time. I would give this relatively low priority at the moment though, because I think default values for variables are most useful on the server. On the client, I think it's just as convenient to provide that default value yourself in the component that issues the query. But maybe you have a compelling use-case?
One use case I've run into is with directives.
For example, if I want to reuse a query for different components, but they query a different subset of fields:
```
query user($id: Int, $withGroups: Boolean = false, $withFriends: Boolean = false) {
user(id: $id) {
id
username
friends @include(if: $withFriends) {
id
username
}
groups @include(if: $withGroups) {
id
name
messages(limit: 1) {
... MessageFragment
}
}
}
}
```
Here, it would be nice to default to false for `withGroups` and `withFriends`, and only opt-in with variables. It's a small change, but I imagine could get more complicated if you wanted to have optional filters with defaults, etc.
Maybe there's a nicer way to do this?
Also, when running the code with default values, I would receive an error, but the error message wasn't serializable so it didn't make it into reducer, so I needed to create middleware to log it. I'm working on a RN app so no apollo dev tools yet. I don't know if this is a general issue for some subset of errors that could use improving, but wanted to bring it to your attention, and happy to help if so.
@srtucker22 this is a feature we would be happy to merge if you wanted to implement it 😊
looking into it. got my apollo-client contributor env setup. hopefully will have some free time soon to mess with it
@calebmer another common use case is simply whenever it's more appropriate in codebase to setup default query/mutation parameter values in gql, e.h. if they are internal parameters and the user module which attaches given query/mutation to a redux component should not care about those parameters at all. | 2017-03-25T11:23:46Z | 1 |
apollographql/apollo-client | 1,270 | apollographql__apollo-client-1270 | [
"1266"
] | 449a96e70f0b232272cc6d7921322dff2e5af98d | diff --git a/src/core/QueryManager.ts b/src/core/QueryManager.ts
--- a/src/core/QueryManager.ts
+++ b/src/core/QueryManager.ts
@@ -877,14 +877,16 @@ export class QueryManager {
private getExtraReducers(): ApolloReducer[] {
return Object.keys(this.observableQueries).map( obsQueryId => {
- const queryOptions = this.observableQueries[obsQueryId].observableQuery.options;
+ const query = this.observableQueries[obsQueryId].observableQuery;
+ const queryOptions = query.options;
+
if (queryOptions.reducer) {
return createStoreReducer(
queryOptions.reducer,
queryOptions.query,
- queryOptions.variables || {},
+ query.variables || {},
this.reducerConfig,
- );
+ );
}
return null as never;
}).filter( reducer => reducer !== null );
| diff --git a/test/ObservableQuery.ts b/test/ObservableQuery.ts
--- a/test/ObservableQuery.ts
+++ b/test/ObservableQuery.ts
@@ -782,6 +782,66 @@ describe('ObservableQuery', () => {
}
});
});
+
+ it('applies query reducers with correct variables', (done) => {
+ const queryManager = mockQueryManager({
+ // First we make the query
+ request: { query, variables },
+ result: { data: dataOne },
+ }, {
+ // Then we make a mutation
+ request: { query: mutation },
+ result: { data: mutationData },
+ }, {
+ // Then we make another query
+ request: { query, variables: differentVariables },
+ result: { data: dataTwo },
+ }, {
+ // Then we make another mutation
+ request: { query: mutation },
+ result: { data: mutationData },
+ });
+
+
+ let lastReducerVars: Array<Object> = [];
+ let lastReducerData: Array<Object> = [];
+ const observable = queryManager.watchQuery({
+ query,
+ variables,
+ reducer: (previous, action, reducerVars) => {
+ if (action.type === 'APOLLO_MUTATION_RESULT') {
+ // We want to track the history of the `variables` the reducer
+ // is given for the query.
+ lastReducerData.push(previous);
+ lastReducerVars.push(reducerVars);
+ }
+
+ return previous;
+ },
+ });
+
+ // Check that the variables fed into the reducer are correct.
+ function assertVariables() {
+ assert.lengthOf(lastReducerVars, 2);
+ assert.deepEqual(lastReducerVars[0], variables);
+ assert.deepEqual(lastReducerData[0], dataOne);
+ assert.deepEqual(lastReducerVars[1], differentVariables);
+ assert.deepEqual(lastReducerData[1], dataTwo);
+ done();
+ }
+
+ // Subscribe to the query, then run the mutation, then change the variables, then run another mutation.
+ let sub = observable.subscribe({});
+ queryManager.mutate({ mutation }).then(() => {
+ observable.setVariables(differentVariables);
+ queryManager.mutate({ mutation }).then(() => {
+ // We have to get out of the Promise scope here
+ // because the promises are capturing the assertion errors
+ // leading to timesouts.
+ setTimeout(assertVariables, 0);
+ });
+ });
+ });
});
});
});
| Variables in reducer for query always use the original variables
**Intended outcome:**
After setVariables, the reducers should re-run with the new variables for the query.
**Actual outcome:**
reducers always run with original variables.
**How to reproduce the issue:**
```
query = apollo.watchQuery(query: q, variables: { test: "A" },
reducer: (prev, action, variables) => {
if (action.type == 'APOLLO_MUTATION_RESULT') {
console.log(variables.test);
}
return prev
}
).subscribe();
apollo.mutate(...)
// console => A
query.setVariables({ test: "B" })
apollo.mutate(...)
// console => A
// >>>> EXPECTED: B
```
<!--
**Intended outcome:**
What you were trying to accomplish when the bug occurred, and as much code as possible related to the source of the problem.
**Actual outcome:**
A description of what actually happened, including a screenshot or copy-paste of any related error messages, logs, or other output that might be related. Places to look for information include your browser console, server console, and network logs. Please avoid non-specific phrases like “didn’t work” or “broke”.
**How to reproduce the issue:**
Instructions for how the issue can be reproduced by a maintainer or contributor. Be as specific as possible, and only mention what is necessary to reproduce the bug. If possible, try to isolate the exact circumstances in which the bug occurs and avoid speculation over what the cause might be.
-->
| 2017-02-07T19:31:06Z | 0.8 |
|
apollographql/apollo-client | 1,169 | apollographql__apollo-client-1169 | [
"1168"
] | 389d87aabb6a7ddd553319ba932af09616a7b12f | diff --git a/src/ApolloClient.ts b/src/ApolloClient.ts
--- a/src/ApolloClient.ts
+++ b/src/ApolloClient.ts
@@ -44,6 +44,10 @@ import {
Observable,
} from './util/Observable';
+import {
+ isProduction,
+} from './util/environment';
+
import {
WatchQueryOptions,
SubscriptionOptions,
@@ -225,7 +229,7 @@ export default class ApolloClient {
// Attach the client instance to window to let us be found by chrome devtools, but only in
// development mode
const defaultConnectToDevTools =
- typeof process === 'undefined' || (process.env && process.env.NODE_ENV !== 'production') &&
+ !isProduction() &&
typeof window !== 'undefined' && (!(window as any).__APOLLO_CLIENT__);
if (typeof connectToDevTools === 'undefined') {
diff --git a/src/core/QueryManager.ts b/src/core/QueryManager.ts
--- a/src/core/QueryManager.ts
+++ b/src/core/QueryManager.ts
@@ -49,6 +49,10 @@ import {
createStoreReducer,
} from '../data/resultReducers';
+import {
+ isProduction,
+} from '../util/environment';
+
import maybeDeepFreeze from '../util/maybeDeepFreeze';
import {
@@ -346,7 +350,7 @@ export class QueryManager {
}
} else {
console.error('Unhandled error', apolloError, apolloError.stack);
- if (process.env.NODE_ENV !== 'production') {
+ if (!isProduction()) {
/* tslint:disable-next-line */
console.info(
'An unhandled error was thrown because no error handler is registered ' +
diff --git a/src/data/readFromStore.ts b/src/data/readFromStore.ts
--- a/src/data/readFromStore.ts
+++ b/src/data/readFromStore.ts
@@ -27,7 +27,13 @@ import {
ApolloReducerConfig,
} from '../store';
-import { isEqual } from '../util/isEqual';
+import {
+ isEqual,
+} from '../util/isEqual';
+
+import {
+ isTest,
+} from '../util/environment';
/**
* The key which the cache id for a given value is stored in the result object. This key is private
@@ -141,7 +147,7 @@ true option set in Apollo Client. Please turn on that option so that we can accu
match fragments.`);
/* istanbul ignore if */
- if (process.env.NODE_ENV !== 'test') {
+ if (!isTest()) {
// When running tests, we want to print the warning every time
haveWarned = true;
}
diff --git a/src/util/environment.ts b/src/util/environment.ts
new file mode 100644
--- /dev/null
+++ b/src/util/environment.ts
@@ -0,0 +1,24 @@
+export function getEnv(): string {
+ if (typeof process !== 'undefined' && process.env.NODE_ENV) {
+ return process.env.NODE_ENV;
+ }
+
+ // default environment
+ return 'development';
+}
+
+export function isEnv(env: string): boolean {
+ return getEnv() === env;
+}
+
+export function isProduction(): boolean {
+ return isEnv('production') === true;
+}
+
+export function isDevelopment(): boolean {
+ return isEnv('development') === true;
+}
+
+export function isTest(): boolean {
+ return isEnv('test') === true;
+}
diff --git a/src/util/maybeDeepFreeze.ts b/src/util/maybeDeepFreeze.ts
--- a/src/util/maybeDeepFreeze.ts
+++ b/src/util/maybeDeepFreeze.ts
@@ -1,3 +1,7 @@
+import {
+ isDevelopment,
+ isTest,
+} from './environment';
// taken straight from https://github.com/substack/deep-freeze to avoid import hassles with rollup
function deepFreeze (o: any) {
@@ -16,7 +20,7 @@ function deepFreeze (o: any) {
};
export default function maybeDeepFreeze(obj: any) {
- if (process.env.NODE_ENV === 'development' || process.env.NODE_ENV === 'test') {
+ if (isDevelopment() || isTest()) {
return deepFreeze(obj);
}
return obj;
| diff --git a/test/environment.ts b/test/environment.ts
new file mode 100644
--- /dev/null
+++ b/test/environment.ts
@@ -0,0 +1,77 @@
+import { assert } from 'chai';
+
+import { isEnv, isProduction, isDevelopment, isTest } from '../src/util/environment';
+
+describe('environment', () => {
+ let keepEnv: string;
+
+ beforeEach(() => {
+ // save the NODE_ENV
+ keepEnv = process.env.NODE_ENV;
+ });
+
+ afterEach(() => {
+ // restore the NODE_ENV
+ process.env.NODE_ENV = keepEnv;
+ });
+
+ describe('isEnv', () => {
+ it(`should match when there's a value`, () => {
+ [
+ 'production',
+ 'development',
+ 'test',
+ ]
+ .forEach(env => {
+ process.env.NODE_ENV = env;
+ assert.isTrue(isEnv(env));
+ });
+ });
+
+ it(`should treat no proces.env.NODE_ENV as it'd be in development`, () => {
+ delete process.env.NODE_ENV;
+ assert.isTrue(isEnv('development'));
+ });
+ });
+
+ describe('isProduction', () => {
+ it('should return true if in production', () => {
+ process.env.NODE_ENV = 'production';
+ assert.isTrue(isProduction());
+ });
+
+ it('should return false if not in production', () => {
+ process.env.NODE_ENV = 'test';
+ assert.isTrue(!isProduction());
+ });
+ });
+
+ describe('isTest', () => {
+ it('should return true if in test', () => {
+ process.env.NODE_ENV = 'test';
+ assert.isTrue(isTest());
+ });
+
+ it('should return true if not in test', () => {
+ process.env.NODE_ENV = 'development';
+ assert.isTrue(!isTest());
+ });
+ });
+
+ describe('isDevelopment', () => {
+ it('should return true if in development', () => {
+ process.env.NODE_ENV = 'development';
+ assert.isTrue(isDevelopment());
+ });
+
+ it('should return true if not in development and environment is defined', () => {
+ process.env.NODE_ENV = 'test';
+ assert.isTrue(!isDevelopment());
+ });
+
+ it('should make development as the default environment', () => {
+ delete process.env.NODE_ENV;
+ assert.isTrue(isDevelopment());
+ });
+ });
+});
diff --git a/test/tests.ts b/test/tests.ts
--- a/test/tests.ts
+++ b/test/tests.ts
@@ -53,3 +53,4 @@ import './customResolvers';
import './isEqual';
import './cloneDeep';
import './assign';
+import './environment'
| Avoid using process.env.NODE_ENV
There's no such thing in a browser
| 2017-01-12T16:40:42Z | 0.7 |
|
apollographql/apollo-client | 1,095 | apollographql__apollo-client-1095 | [
"1074",
"999"
] | 735ee9b49a834e5db65e60a90148f65acd7fd82e | diff --git a/rollup.config.js b/rollup.config.js
new file mode 100644
--- /dev/null
+++ b/rollup.config.js
@@ -0,0 +1,19 @@
+export default {
+ entry: 'lib/src/index.js',
+ dest: 'lib/apollo.umd.js',
+ format: 'umd',
+ sourceMap: true,
+ moduleName: 'apollo',
+ onwarn
+};
+
+function onwarn(message) {
+ const suppressed = [
+ 'UNRESOLVED_IMPORT',
+ 'THIS_IS_UNDEFINED'
+ ];
+
+ if (!suppressed.find(code => message.code === code)) {
+ return console.warn(message.message);
+ }
+}
diff --git a/src/ApolloClient.ts b/src/ApolloClient.ts
--- a/src/ApolloClient.ts
+++ b/src/ApolloClient.ts
@@ -13,9 +13,6 @@ import {
} from 'graphql';
-import isUndefined = require('lodash/isUndefined');
-import isString = require('lodash/isString');
-
import {
createApolloStore,
ApolloStore,
@@ -30,10 +27,14 @@ import {
import {
QueryManager,
+} from './core/QueryManager';
+
+import {
ApolloQueryResult,
ResultComparator,
ResultTransformer,
-} from './core/QueryManager';
+ IdGetter,
+} from './core/types';
import {
ObservableQuery,
@@ -44,15 +45,11 @@ import {
} from './util/Observable';
import {
- DeprecatedWatchQueryOptions,
- DeprecatedSubscriptionOptions,
+ WatchQueryOptions,
+ SubscriptionOptions,
MutationOptions,
} from './core/watchQueryOptions';
-import {
- IdGetter,
-} from './data/extensions';
-
import {
MutationBehaviorReducerMap,
} from './data/mutationResults';
@@ -61,12 +58,6 @@ import {
storeKeyNameFromFieldNameAndArgs,
} from './data/storeUtils';
-import { createFragment } from './fragments';
-
-import {
- addFragmentsToDocument,
-} from './queries/getFromAST';
-
import {
version,
} from './version';
@@ -85,12 +76,6 @@ function defaultReduxRootSelector(state: any) {
return state[DEFAULT_REDUX_ROOT_KEY];
}
-// deprecation warning flags
-let haveWarnedQuery = false;
-let haveWarnedWatchQuery = false;
-let haveWarnedMutation = false;
-let haveWarnedSubscription = false;
-
/**
* This is the primary Apollo Client class. It is used to send GraphQL documents (i.e. queries
* and mutations) to a GraphQL spec-compliant server over a {@link NetworkInterface} instance,
@@ -192,7 +177,7 @@ export default class ApolloClient {
if (!reduxRootSelector && reduxRootKey) {
this.reduxRootSelector = (state: any) => state[reduxRootKey];
- } else if (isString(reduxRootSelector)) {
+ } else if (typeof reduxRootSelector === 'string') {
// for backwards compatibility, we set reduxRootKey if reduxRootSelector is a string
this.reduxRootKey = reduxRootSelector as string;
this.reduxRootSelector = (state: any) => state[reduxRootSelector as string];
@@ -207,6 +192,13 @@ export default class ApolloClient {
this.networkInterface = networkInterface ? networkInterface :
createNetworkInterface({ uri: '/graphql' });
this.addTypename = addTypename;
+ if (resultTransformer) {
+ console.warn(
+ '"resultTransformer" is being considered for deprecation in an upcoming version. ' +
+ 'If you are using it, please file an issue on apollostack/apollo-client ' +
+ 'with a description of your use-case',
+ );
+ }
this.resultTransformer = resultTransformer;
this.resultComparator = resultComparator;
this.shouldForceFetch = !(ssrMode || ssrForceFetchDelay > 0);
@@ -265,44 +257,17 @@ export default class ApolloClient {
* a description of store reactivity.
*
*/
- public watchQuery(options: DeprecatedWatchQueryOptions): ObservableQuery {
+ public watchQuery<T>(options: WatchQueryOptions): ObservableQuery<T> {
this.initStore();
if (!this.shouldForceFetch && options.forceFetch) {
options = {
...options,
forceFetch: false,
- } as DeprecatedWatchQueryOptions;
+ } as WatchQueryOptions;
}
- if (options.fragments && !haveWarnedWatchQuery && process.env.NODE_ENV !== 'production') {
- console.warn(
- '"fragments" option is deprecated and will be removed in the upcoming versions, ' +
- 'please refer to the documentation for how to define fragments: ' +
- 'http://dev.apollodata.com/react/fragments.html.',
- );
- /* istanbul ignore if */
- if (process.env.NODE_ENV !== 'test') {
- // When running tests, we want to print the warning every time
- haveWarnedWatchQuery = true;
- }
- }
-
- // Register each of the fragments present in the query document. The point
- // is to prevent fragment name collisions with fragments that are in the query
- // document itself.
- createFragment(options.query, undefined, true);
-
- // We add the fragments to the document to pass only the document around internally.
- const fullDocument = addFragmentsToDocument(options.query, options.fragments);
-
- const realOptions = {
- ...options,
- query: fullDocument,
- };
- delete realOptions.fragments;
-
- return this.queryManager.watchQuery(realOptions);
+ return this.queryManager.watchQuery<T>(options);
};
/**
@@ -314,7 +279,7 @@ export default class ApolloClient {
* how this query should be treated e.g. whether it is a polling query, whether it should hit the
* server at all or just resolve from the cache, etc.
*/
- public query(options: DeprecatedWatchQueryOptions): Promise<ApolloQueryResult> {
+ public query<T>(options: WatchQueryOptions): Promise<ApolloQueryResult<T>> {
this.initStore();
// XXX what if I pass pollInterval? Will it just keep running?
@@ -324,37 +289,10 @@ export default class ApolloClient {
options = {
...options,
forceFetch: false,
- } as DeprecatedWatchQueryOptions;
+ } as WatchQueryOptions;
}
- if (options.fragments && !haveWarnedQuery && process.env.NODE_ENV !== 'production') {
- console.warn(
- '"fragments" option is deprecated and will be removed in the upcoming versions, ' +
- 'please refer to the documentation for how to define fragments: ' +
- 'http://dev.apollodata.com/react/fragments.html.',
- );
- /* istanbul ignore if */
- if (process.env.NODE_ENV !== 'test') {
- // When running tests, we want to print the warning every time
- haveWarnedQuery = true;
- }
- }
-
- // Register each of the fragments present in the query document. The point
- // is to prevent fragment name collisions with fragments that are in the query
- // document itself.
- createFragment(options.query, undefined, true);
-
- // We add the fragments to the document to pass only the document around internally.
- const fullDocument = addFragmentsToDocument(options.query, options.fragments);
-
- const realOptions = {
- ...options,
- query: fullDocument,
- };
- delete realOptions.fragments;
-
- return this.queryManager.query(realOptions);
+ return this.queryManager.query<T>(options);
};
/**
@@ -370,9 +308,6 @@ export default class ApolloClient {
* @param options.variables An object that maps from the name of a variable as used in the mutation
* GraphQL document to that variable's value.
*
- * @param options.fragments A list of fragments as returned by {@link createFragment}. These fragments
- * can be referenced from within the GraphQL mutation document.
- *
* @param options.optimisticResponse An object that represents the result of this mutation that will be
* optimistically stored before the server has actually returned a result. This is most often
* used for optimistic UI, where we want to be able to see the result of a mutation immediately,
@@ -388,58 +323,19 @@ export default class ApolloClient {
* for this, you can simply refetch the queries that will be affected and achieve a consistent
* store once these queries return.
*/
- public mutate(options: MutationOptions): Promise<ApolloQueryResult> {
+ public mutate<T>(options: MutationOptions): Promise<ApolloQueryResult<T>> {
this.initStore();
- if (options.fragments && !haveWarnedMutation && process.env.NODE_ENV !== 'production') {
- console.warn(
- '"fragments" option is deprecated and will be removed in the upcoming versions, ' +
- 'please refer to the documentation for how to define fragments: ' +
- 'http://dev.apollodata.com/react/fragments.html.',
- );
- /* istanbul ignore if */
- if (process.env.NODE_ENV !== 'test') {
- // When running tests, we want to print the warning every time
- haveWarnedMutation = true;
- }
- }
-
- // We add the fragments to the document to pass only the document around internally.
- const fullDocument = addFragmentsToDocument(options.mutation, options.fragments);
-
- const realOptions = {
- ...options,
- mutation: fullDocument,
- };
- delete realOptions.fragments;
-
- return this.queryManager.mutate(realOptions);
+ return this.queryManager.mutate<T>(options);
};
- public subscribe(options: DeprecatedSubscriptionOptions): Observable<any> {
+ public subscribe(options: SubscriptionOptions): Observable<any> {
this.initStore();
- if (options.fragments && !haveWarnedSubscription && process.env.NODE_ENV !== 'production') {
- console.warn(
- '"fragments" option is deprecated and will be removed in the upcoming versions, ' +
- 'please refer to the documentation for how to define fragments: ' +
- 'http://dev.apollodata.com/react/fragments.html.',
- );
- /* istanbul ignore if */
- if (process.env.NODE_ENV !== 'test') {
- // When running tests, we want to print the warning every time
- haveWarnedSubscription = true;
- }
- }
-
- // We add the fragments to the document to pass only the document around internally.
- const fullDocument = addFragmentsToDocument(options.query, options.fragments);
-
const realOptions = {
...options,
- document: fullDocument,
+ document: options.query,
};
- delete realOptions.fragments;
delete realOptions.query;
return this.queryManager.startGraphQLSubscription(realOptions);
@@ -461,8 +357,13 @@ export default class ApolloClient {
this.setStore(store);
return (next: any) => (action: any) => {
+ const previousApolloState = this.queryManager.selectApolloState(store);
const returnValue = next(action);
- this.queryManager.broadcastNewStore(store.getState());
+ const newApolloState = this.queryManager.selectApolloState(store);
+
+ if (newApolloState !== previousApolloState) {
+ this.queryManager.broadcastNewStore(store.getState());
+ }
if (this.devToolsHookCb) {
this.devToolsHookCb({
@@ -538,7 +439,7 @@ export default class ApolloClient {
}
// ensure existing store has apolloReducer
- if (isUndefined(reduxRootSelector(store.getState()))) {
+ if (typeof reduxRootSelector(store.getState()) === 'undefined') {
throw new Error(
'Existing store does not use apolloReducer. Please make sure the store ' +
'is properly configured and "reduxRootSelector" is correctly specified.',
diff --git a/src/core/ObservableQuery.ts b/src/core/ObservableQuery.ts
--- a/src/core/ObservableQuery.ts
+++ b/src/core/ObservableQuery.ts
@@ -17,20 +17,21 @@ import {
import {
QueryManager,
+} from './QueryManager';
+
+import {
ApolloQueryResult,
FetchType,
-} from './QueryManager';
+} from './types';
import { tryFunctionOrLogError } from '../util/errorHandling';
-import { NetworkStatus } from '../queries/store';
-
-import { addFragmentsToDocument } from '../queries/getFromAST';
+import { isEqual } from '../util/isEqual';
-import isEqual = require('lodash/isEqual');
+import { NetworkStatus } from '../queries/store';
-export type ApolloCurrentResult = {
- data: any;
+export type ApolloCurrentResult<T> = {
+ data: T | {};
loading: boolean;
networkStatus: NetworkStatus;
error?: ApolloError;
@@ -47,7 +48,7 @@ export interface UpdateQueryOptions {
variables: Object;
}
-export class ObservableQuery extends Observable<ApolloQueryResult> {
+export class ObservableQuery<T> extends Observable<ApolloQueryResult<T>> {
public options: WatchQueryOptions;
public queryId: string;
/**
@@ -60,10 +61,10 @@ export class ObservableQuery extends Observable<ApolloQueryResult> {
private shouldSubscribe: boolean;
private scheduler: QueryScheduler;
private queryManager: QueryManager;
- private observers: Observer<ApolloQueryResult>[];
+ private observers: Observer<ApolloQueryResult<T>>[];
private subscriptionHandles: Subscription[];
- private lastResult: ApolloQueryResult;
+ private lastResult: ApolloQueryResult<T>;
private lastError: ApolloError;
constructor({
@@ -78,7 +79,7 @@ export class ObservableQuery extends Observable<ApolloQueryResult> {
const queryManager = scheduler.queryManager;
const queryId = queryManager.generateQueryId();
- const subscriberFunction = (observer: Observer<ApolloQueryResult>) => {
+ const subscriberFunction = (observer: Observer<ApolloQueryResult<T>>) => {
return this.onSubscribe(observer);
};
@@ -95,7 +96,7 @@ export class ObservableQuery extends Observable<ApolloQueryResult> {
this.subscriptionHandles = [];
}
- public result(): Promise<ApolloQueryResult> {
+ public result(): Promise<ApolloQueryResult<T>> {
return new Promise((resolve, reject) => {
const subscription = this.subscribe({
next(result) {
@@ -111,7 +112,7 @@ export class ObservableQuery extends Observable<ApolloQueryResult> {
});
}
- public currentResult(): ApolloCurrentResult {
+ public currentResult(): ApolloCurrentResult<T> {
const { data, partial } = this.queryManager.getCurrentQueryResult(this, true);
const queryStoreValue = this.queryManager.getApolloState().queries[this.queryId];
@@ -147,7 +148,13 @@ export class ObservableQuery extends Observable<ApolloQueryResult> {
return { data, loading, networkStatus };
}
- public refetch(variables?: any): Promise<ApolloQueryResult> {
+ // Returns the last result that observer.next was called with. This is not the same as
+ // currentResult! If you're not sure which you need, then you probably need currentResult.
+ public getLastResult(): ApolloQueryResult<T> {
+ return this.lastResult;
+ }
+
+ public refetch(variables?: any): Promise<ApolloQueryResult<T>> {
this.variables = {
...this.variables,
...variables,
@@ -175,7 +182,7 @@ export class ObservableQuery extends Observable<ApolloQueryResult> {
public fetchMore(
fetchMoreOptions: FetchMoreQueryOptions & FetchMoreOptions,
- ): Promise<ApolloQueryResult> {
+ ): Promise<ApolloQueryResult<T>> {
return Promise.resolve()
.then(() => {
const qid = this.queryManager.generateQueryId();
@@ -198,12 +205,9 @@ export class ObservableQuery extends Observable<ApolloQueryResult> {
};
}
- // We add the fragments to the document to pass only the document around internally.
- const fullQuery = addFragmentsToDocument(combinedOptions.query, combinedOptions.fragments);
-
combinedOptions = {
...combinedOptions,
- query: fullQuery,
+ query: combinedOptions.query,
forceFetch: true,
} as WatchQueryOptions;
return this.queryManager.fetchQuery(qid, combinedOptions);
@@ -271,7 +275,7 @@ export class ObservableQuery extends Observable<ApolloQueryResult> {
};
}
- public setOptions(opts: ModifiableWatchQueryOptions): Promise<ApolloQueryResult> {
+ public setOptions(opts: ModifiableWatchQueryOptions): Promise<ApolloQueryResult<T>> {
const oldOptions = this.options;
this.options = {
...this.options,
@@ -303,7 +307,7 @@ export class ObservableQuery extends Observable<ApolloQueryResult> {
* @param variables: The new set of variables. If there are missing variables,
* the previous values of those variables will be used.
*/
- public setVariables(variables: any): Promise<ApolloQueryResult> {
+ public setVariables(variables: any): Promise<ApolloQueryResult<T>> {
const newVariables = {
...this.variables,
...variables,
@@ -365,7 +369,7 @@ export class ObservableQuery extends Observable<ApolloQueryResult> {
this.scheduler.startPollingQuery(this.options, this.queryId);
}
- private onSubscribe(observer: Observer<ApolloQueryResult>) {
+ private onSubscribe(observer: Observer<ApolloQueryResult<T>>) {
this.observers.push(observer);
// Deliver initial result
@@ -401,7 +405,7 @@ export class ObservableQuery extends Observable<ApolloQueryResult> {
private setUpQuery() {
if (this.shouldSubscribe) {
- this.queryManager.addObservableQuery(this.queryId, this);
+ this.queryManager.addObservableQuery<T>(this.queryId, this);
}
if (!!this.options.pollInterval) {
@@ -410,21 +414,20 @@ export class ObservableQuery extends Observable<ApolloQueryResult> {
}
this.isCurrentlyPolling = true;
- this.scheduler.startPollingQuery(
+ this.scheduler.startPollingQuery<T>(
this.options,
this.queryId,
);
}
- const observer: Observer<ApolloQueryResult> = {
- next: (result: ApolloQueryResult) => {
+ const observer: Observer<ApolloQueryResult<T>> = {
+ next: (result: ApolloQueryResult<T>) => {
+ this.lastResult = result;
this.observers.forEach((obs) => {
if (obs.next) {
obs.next(result);
}
});
-
- this.lastResult = result;
},
error: (error: ApolloError) => {
this.observers.forEach((obs) => {
@@ -439,7 +442,7 @@ export class ObservableQuery extends Observable<ApolloQueryResult> {
},
};
- this.queryManager.startQuery(
+ this.queryManager.startQuery<T>(
this.queryId,
this.options,
this.queryManager.queryListenerForObserver(this.queryId, this.options, observer),
diff --git a/src/core/QueryManager.ts b/src/core/QueryManager.ts
--- a/src/core/QueryManager.ts
+++ b/src/core/QueryManager.ts
@@ -8,8 +8,21 @@ import {
Deduplicator,
} from '../transport/Deduplicator';
-import forOwn = require('lodash/forOwn');
-import isEqual = require('lodash/isEqual');
+import { isEqual } from '../util/isEqual';
+
+import {
+ ResultTransformer,
+ ResultComparator,
+ QueryListener,
+ ApolloQueryResult,
+ FetchType,
+ SubscriptionOptions,
+} from './types';
+
+import {
+ QueryStoreValue,
+ NetworkStatus,
+} from '../queries/store';
import {
ApolloStore,
@@ -18,10 +31,6 @@ import {
ApolloReducerConfig,
} from '../store';
-import {
- QueryStoreValue,
-} from '../queries/store';
-
import {
checkDocument,
getQueryDefinition,
@@ -40,6 +49,8 @@ import {
createStoreReducer,
} from '../data/resultReducers';
+import maybeDeepFreeze from '../util/maybeDeepFreeze';
+
import {
ExecutionResult,
DocumentNode,
@@ -82,10 +93,6 @@ import {
Observable,
} from '../util/Observable';
-import {
- NetworkStatus,
-} from '../queries/store';
-
import { tryFunctionOrLogError } from '../util/errorHandling';
import {
@@ -97,44 +104,6 @@ import { WatchQueryOptions } from './watchQueryOptions';
import { ObservableQuery } from './ObservableQuery';
-export type QueryListener = (queryStoreValue: QueryStoreValue) => void;
-
-export interface SubscriptionOptions {
- document: DocumentNode;
- variables?: { [key: string]: any };
-};
-
-export type ApolloQueryResult = {
- data: any;
- loading: boolean;
- networkStatus: NetworkStatus;
-
- // This type is different from the ExecutionResult type because it doesn't include errors.
- // Those are thrown via the standard promise/observer catch mechanism.
-};
-
-// A result transformer is given the data that is to be returned from the store from a query or
-// mutation, and can modify or observe it before the value is provided to your application.
-//
-// For watched queries, the transformer is only called when the data retrieved from the server is
-// different from previous.
-//
-// If the transformer wants to mutate results (say, by setting the prototype of result data), it
-// will likely need to be paired with a custom resultComparator. By default, Apollo performs a
-// deep equality comparsion on results, and skips those that are considered equal - reducing
-// re-renders.
-export type ResultTransformer = (resultData: ApolloQueryResult) => ApolloQueryResult;
-
-// Controls how Apollo compares two query results and considers their equality. Two equal results
-// will not trigger re-renders.
-export type ResultComparator = (result1: ApolloQueryResult, result2: ApolloQueryResult) => boolean;
-
-export enum FetchType {
- normal = 1,
- refetch = 2,
- poll = 3,
-}
-
export class QueryManager {
public pollingTimers: {[queryId: string]: NodeJS.Timer | any}; //oddity in Typescript
public scheduler: QueryScheduler;
@@ -161,8 +130,8 @@ export class QueryManager {
// track of queries that are inflight and reject them in case some
// destabalizing action occurs (e.g. reset of the Apollo store).
private fetchQueryPromises: { [requestId: string]: {
- promise: Promise<ApolloQueryResult>;
- resolve: (result: ApolloQueryResult) => void;
+ promise: Promise<ApolloQueryResult<any>>;
+ resolve: (result: ApolloQueryResult<any>) => void;
reject: (error: Error) => void;
} };
@@ -170,7 +139,7 @@ export class QueryManager {
// these to keep track of queries that are inflight and error on the observers associated
// with them in case of some destabalizing action (e.g. reset of the Apollo store).
private observableQueries: { [queryId: string]: {
- observableQuery: ObservableQuery;
+ observableQuery: ObservableQuery<any>;
} };
// A map going from the name of a query to an observer issued for it by watchQuery. This is
@@ -241,7 +210,7 @@ export class QueryManager {
this.broadcastQueries();
}
- public mutate({
+ public mutate<T>({
mutation,
variables,
resultBehaviors = [],
@@ -255,7 +224,7 @@ export class QueryManager {
optimisticResponse?: Object,
updateQueries?: MutationQueryReducersMap,
refetchQueries?: string[],
- }): Promise<ApolloQueryResult> {
+ }): Promise<ApolloQueryResult<T>> {
const mutationId = this.generateQueryId();
if (this.addTypename) {
@@ -317,7 +286,7 @@ export class QueryManager {
refetchQueries.forEach((name) => { this.refetchQueryByName(name); });
delete this.queryDocuments[mutationId];
- resolve(this.transformResult(<ApolloQueryResult>result));
+ resolve(this.transformResult(<ApolloQueryResult<T>>result));
})
.catch((err) => {
this.store.dispatch({
@@ -336,12 +305,12 @@ export class QueryManager {
// Returns a query listener that will update the given observer based on the
// results (or lack thereof) for a particular query.
- public queryListenerForObserver(
+ public queryListenerForObserver<T>(
queryId: string,
options: WatchQueryOptions,
- observer: Observer<ApolloQueryResult>,
+ observer: Observer<ApolloQueryResult<T>>,
): QueryListener {
- let lastResult: ApolloQueryResult;
+ let lastResult: ApolloQueryResult<T>;
return (queryStoreValue: QueryStoreValue) => {
// The query store value can be undefined in the event of a store
// reset.
@@ -388,22 +357,36 @@ export class QueryManager {
} else {
try {
const resultFromStore = {
- data: readQueryFromStore({
+ data: readQueryFromStore<T>({
store: this.getDataWithOptimisticResults(),
query: this.queryDocuments[queryId],
variables: queryStoreValue.previousVariables || queryStoreValue.variables,
returnPartialData: options.returnPartialData || noFetch,
config: this.reducerConfig,
+ previousResult: lastResult && lastResult.data,
}),
loading: queryStoreValue.loading,
networkStatus: queryStoreValue.networkStatus,
};
if (observer.next) {
- if (this.isDifferentResult(lastResult, resultFromStore)) {
+ const isDifferentResult =
+ this.resultComparator ? !this.resultComparator(lastResult, resultFromStore) : !(
+ lastResult &&
+ resultFromStore &&
+ lastResult.loading === resultFromStore.loading &&
+ lastResult.networkStatus === resultFromStore.networkStatus &&
+
+ // We can do a strict equality check here because we include a `previousResult`
+ // with `readQueryFromStore`. So if the results are the same they will be
+ // referentially equal.
+ lastResult.data === resultFromStore.data
+ );
+
+ if (isDifferentResult) {
lastResult = resultFromStore;
try {
- observer.next(this.transformResult(resultFromStore));
+ observer.next(maybeDeepFreeze(this.transformResult(resultFromStore)));
} catch (e) {
console.error(`Error in observer.next \n${e.stack}`);
}
@@ -429,7 +412,7 @@ export class QueryManager {
// supposed to be refetched in the event of a store reset. Once we unify error handling for
// network errors and non-network errors, the shouldSubscribe option will go away.
- public watchQuery(options: WatchQueryOptions, shouldSubscribe = true): ObservableQuery {
+ public watchQuery<T>(options: WatchQueryOptions, shouldSubscribe = true): ObservableQuery<T> {
// Call just to get errors synchronously
getQueryDefinition(options.query);
@@ -438,7 +421,7 @@ export class QueryManager {
transformedOptions.query = addTypenameToDocument(transformedOptions.query);
}
- let observableQuery = new ObservableQuery({
+ let observableQuery = new ObservableQuery<T>({
scheduler: this.scheduler,
options: transformedOptions,
shouldSubscribe: shouldSubscribe,
@@ -447,7 +430,7 @@ export class QueryManager {
return observableQuery;
}
- public query(options: WatchQueryOptions): Promise<ApolloQueryResult> {
+ public query<T>(options: WatchQueryOptions): Promise<ApolloQueryResult<T>> {
if (options.returnPartialData) {
throw new Error('returnPartialData option only supported on watchQuery.');
}
@@ -458,9 +441,9 @@ export class QueryManager {
const requestId = this.idCounter;
const resPromise = new Promise((resolve, reject) => {
- this.addFetchQueryPromise(requestId, resPromise, resolve, reject);
+ this.addFetchQueryPromise<T>(requestId, resPromise, resolve, reject);
- return this.watchQuery(options, false).result().then((result) => {
+ return this.watchQuery<T>(options, false).result().then((result) => {
this.removeFetchQueryPromise(requestId);
resolve(result);
}).catch((error) => {
@@ -472,7 +455,7 @@ export class QueryManager {
return resPromise;
}
- public fetchQuery(queryId: string, options: WatchQueryOptions, fetchType?: FetchType): Promise<ApolloQueryResult> {
+ public fetchQuery<T>(queryId: string, options: WatchQueryOptions, fetchType?: FetchType): Promise<ApolloQueryResult<T>> {
const {
variables = {},
forceFetch = false,
@@ -574,6 +557,10 @@ export class QueryManager {
return this.reduxRootSelector(this.store.getState());
}
+ public selectApolloState(store: any) {
+ return this.reduxRootSelector(store.getState());
+ }
+
public getInitialState(): { data: Object } {
return { data: this.getApolloState().data };
}
@@ -588,20 +575,19 @@ export class QueryManager {
}
// Adds a promise to this.fetchQueryPromises for a given request ID.
- public addFetchQueryPromise(requestId: number, promise: Promise<ApolloQueryResult>,
- resolve: (result: ApolloQueryResult) => void,
+ public addFetchQueryPromise<T>(requestId: number, promise: Promise<ApolloQueryResult<T>>,
+ resolve: (result: ApolloQueryResult<T>) => void,
reject: (error: Error) => void) {
this.fetchQueryPromises[requestId.toString()] = { promise, resolve, reject };
}
-
// Removes the promise in this.fetchQueryPromises for a particular request ID.
public removeFetchQueryPromise(requestId: number) {
delete this.fetchQueryPromises[requestId.toString()];
}
// Adds an ObservableQuery to this.observableQueries and to this.observableQueriesByName.
- public addObservableQuery(queryId: string, observableQuery: ObservableQuery) {
+ public addObservableQuery<T>(queryId: string, observableQuery: ObservableQuery<T>) {
this.observableQueries[queryId] = { observableQuery };
// Insert the ObservableQuery into this.observableQueriesByName if the query has a name
@@ -659,10 +645,10 @@ export class QueryManager {
});
}
- public startQuery(queryId: string, options: WatchQueryOptions, listener: QueryListener) {
+ public startQuery<T>(queryId: string, options: WatchQueryOptions, listener: QueryListener) {
this.addQueryListener(queryId, listener);
- this.fetchQuery(queryId, options)
+ this.fetchQuery<T>(queryId, options)
// `fetchQuery` returns a Promise. In case of a failure it should be caucht or else the
// console will show an `Uncaught (in promise)` message. Ignore the error for now.
.catch((error: Error) => undefined);
@@ -749,11 +735,13 @@ export class QueryManager {
this.stopQueryInStore(queryId);
}
- public getCurrentQueryResult(observableQuery: ObservableQuery, isOptimistic = false) {
+ public getCurrentQueryResult<T>(observableQuery: ObservableQuery<T>, isOptimistic = false) {
const {
variables,
document } = this.getQueryParts(observableQuery);
+ const lastResult = observableQuery.getLastResult();
+
const queryOptions = observableQuery.options;
const readOptions: ReadQueryOptions = {
// In case of an optimistic change, apply reducer on top of the
@@ -764,12 +752,13 @@ export class QueryManager {
variables,
returnPartialData: false,
config: this.reducerConfig,
+ previousResult: lastResult ? lastResult.data : undefined,
};
try {
// first try reading the full result from the store
const data = readQueryFromStore(readOptions);
- return { data, partial: false };
+ return maybeDeepFreeze({ data, partial: false });
} catch (e) {
// next, try reading partial results, if we want them
if (queryOptions.returnPartialData || queryOptions.noFetch) {
@@ -782,12 +771,12 @@ export class QueryManager {
}
}
- return { data: {}, partial: true };
+ return maybeDeepFreeze({ data: {}, partial: true });
}
}
- public getQueryWithPreviousResult(queryIdOrObservable: string | ObservableQuery, isOptimistic = false) {
- let observableQuery: ObservableQuery;
+ public getQueryWithPreviousResult<T>(queryIdOrObservable: string | ObservableQuery<T>, isOptimistic = false) {
+ let observableQuery: ObservableQuery<T>;
if (typeof queryIdOrObservable === 'string') {
if (!this.observableQueries[queryIdOrObservable]) {
throw new Error(`ObservableQuery with this id doesn't exist: ${queryIdOrObservable}`);
@@ -812,7 +801,7 @@ export class QueryManager {
}
// Give the result transformer a chance to observe or modify result data before it is passed on.
- public transformResult(result: ApolloQueryResult): ApolloQueryResult {
+ public transformResult<T>(result: ApolloQueryResult<T>): ApolloQueryResult<T> {
if (!this.resultTransformer) {
return result;
} else {
@@ -822,7 +811,7 @@ export class QueryManager {
// XXX: I think we just store this on the observable query at creation time
// TODO LATER: rename this function. Its main role is to apply the transform, nothing else!
- private getQueryParts(observableQuery: ObservableQuery) {
+ private getQueryParts<T>(observableQuery: ObservableQuery<T>) {
const queryOptions = observableQuery.options;
let transformedDoc = observableQuery.options.query;
@@ -919,7 +908,7 @@ export class QueryManager {
// Takes a request id, query id, a query document and information associated with the query
// and send it to the network interface. Returns
// a promise for the result associated with that request.
- private fetchRequest({
+ private fetchRequest<T>({
requestId,
queryId,
document,
@@ -941,8 +930,8 @@ export class QueryManager {
operationName: getOperationName(document),
};
- const retPromise = new Promise<ApolloQueryResult>((resolve, reject) => {
- this.addFetchQueryPromise(requestId, retPromise, resolve, reject);
+ const retPromise = new Promise<ApolloQueryResult<T>>((resolve, reject) => {
+ this.addFetchQueryPromise<T>(requestId, retPromise, resolve, reject);
this.deduplicator.query(request, this.queryDeduplication)
.then((result: ExecutionResult) => {
@@ -1037,15 +1026,10 @@ export class QueryManager {
}
}
- // check to see if two results are the same, given our resultComparator
- private isDifferentResult(lastResult: ApolloQueryResult, newResult: ApolloQueryResult): boolean {
- const comparator = this.resultComparator || isEqual;
- return !comparator(lastResult, newResult);
- }
-
private broadcastQueries() {
const queries = this.getApolloState().queries;
- forOwn(this.queryListeners, (listeners: QueryListener[], queryId: string) => {
+ Object.keys(this.queryListeners).forEach((queryId: string) => {
+ const listeners = this.queryListeners[queryId];
// XXX due to an unknown race condition listeners can sometimes be undefined here.
// this prevents a crash but doesn't solve the root cause
// see: https://github.com/apollostack/apollo-client/issues/833
diff --git a/src/core/types.ts b/src/core/types.ts
new file mode 100644
--- /dev/null
+++ b/src/core/types.ts
@@ -0,0 +1,45 @@
+import { DocumentNode } from 'graphql';
+import {
+ QueryStoreValue,
+ NetworkStatus,
+} from '../queries/store';
+
+export interface SubscriptionOptions {
+ document: DocumentNode;
+ variables?: { [key: string]: any };
+};
+
+export type QueryListener = (queryStoreValue: QueryStoreValue) => void;
+
+export type ApolloQueryResult<T> = {
+ data: T;
+ loading: boolean;
+ networkStatus: NetworkStatus;
+
+ // This type is different from the GraphQLResult type because it doesn't include errors.
+ // Those are thrown via the standard promise/observer catch mechanism.
+};
+
+// A result transformer is given the data that is to be returned from the store from a query or
+// mutation, and can modify or observe it before the value is provided to your application.
+//
+// For watched queries, the transformer is only called when the data retrieved from the server is
+// different from previous.
+//
+// If the transformer wants to mutate results (say, by setting the prototype of result data), it
+// will likely need to be paired with a custom resultComparator. By default, Apollo performs a
+// deep equality comparsion on results, and skips those that are considered equal - reducing
+// re-renders.
+export type ResultTransformer = (resultData: ApolloQueryResult<any>) => ApolloQueryResult<any>;
+
+// Controls how Apollo compares two query results and considers their equality. Two equal results
+// will not trigger re-renders.
+export type ResultComparator = (result1: ApolloQueryResult<any>, result2: ApolloQueryResult<any>) => boolean;
+
+export enum FetchType {
+ normal = 1,
+ refetch = 2,
+ poll = 3,
+}
+
+export type IdGetter = (value: Object) => string;
diff --git a/src/core/watchQueryOptions.ts b/src/core/watchQueryOptions.ts
--- a/src/core/watchQueryOptions.ts
+++ b/src/core/watchQueryOptions.ts
@@ -73,31 +73,9 @@ export interface WatchQueryOptions extends ModifiableWatchQueryOptions {
metadata?: any;
}
-// This interface is deprecated because we no longer pass around fragments separately in the core.
-export interface DeprecatedWatchQueryOptions extends ModifiableWatchQueryOptions {
- /**
- * A GraphQL document that consists of a single query to be sent down to the
- * server.
- */
- query: DocumentNode;
-
- /**
- * A list of fragments that are returned by {@link createFragment} which can be
- * referenced from the query document.
- */
- fragments?: FragmentDefinitionNode[];
-
- /**
- * Arbitrary metadata stored in Redux with this query. Designed for debugging,
- * developer tools, etc.
- */
- metadata?: any;
-}
-
export interface FetchMoreQueryOptions {
query?: DocumentNode;
variables?: { [key: string]: any };
- fragments?: FragmentDefinitionNode[];
}
export type SubscribeToMoreOptions = {
@@ -110,17 +88,15 @@ export type SubscribeToMoreOptions = {
onError?: (error: Error) => void;
};
-export interface DeprecatedSubscriptionOptions {
+export interface SubscriptionOptions {
query: DocumentNode;
variables?: { [key: string]: any };
- fragments?: FragmentDefinitionNode[];
};
export interface MutationOptions {
mutation: DocumentNode;
variables?: Object;
resultBehaviors?: MutationBehavior[];
- fragments?: FragmentDefinitionNode[];
optimisticResponse?: Object;
updateQueries?: MutationQueryReducersMap;
refetchQueries?: string[];
diff --git a/src/data/debug.ts b/src/data/debug.ts
--- a/src/data/debug.ts
+++ b/src/data/debug.ts
@@ -1,22 +1,23 @@
// For development only!
-import isObject = require('lodash/isObject');
-import omit = require('lodash/omit');
-import mapValues = require('lodash/mapValues');
export function stripLoc(obj: Object) {
if (Array.isArray(obj)) {
return obj.map(stripLoc);
}
- if (! isObject(obj)) {
+ if (obj === null || typeof obj !== 'object') {
return obj;
}
- const omitted: Object = omit(obj, ['loc']);
+ const nextObj = {};
- return mapValues(omitted, (value) => {
- return stripLoc(value);
+ Object.keys(obj).forEach(key => {
+ if (key !== 'loc') {
+ nextObj[key] = stripLoc(obj[key]);
+ }
});
+
+ return nextObj;
}
export function printAST(fragAst: Object) {
diff --git a/src/data/extensions.ts b/src/data/extensions.ts
deleted file mode 100644
--- a/src/data/extensions.ts
+++ /dev/null
@@ -1,3 +0,0 @@
-export type IdGetter = (value: Object) => string;
-
-export const getIdField = (data: { id: any }) => data.id;
diff --git a/src/data/mutationResults.ts b/src/data/mutationResults.ts
--- a/src/data/mutationResults.ts
+++ b/src/data/mutationResults.ts
@@ -8,8 +8,7 @@ import {
ExecutionResult,
} from 'graphql';
-import mapValues = require('lodash/mapValues');
-import cloneDeep = require('lodash/cloneDeep');
+import { cloneDeep } from '../util/cloneDeep';
import { replaceQueryResults } from './replaceQueryResults';
@@ -183,8 +182,11 @@ function mutationResultDeleteReducer(state: NormalizedCache, {
delete state[dataId];
// Now we need to go through the whole store and remove all references
- const newState = mapValues(state, (storeObj: StoreObject) => {
- return removeRefsFromStoreObj(storeObj, dataId);
+ const newState: NormalizedCache = {};
+
+ Object.keys(state).forEach(key => {
+ const storeObj = state[key];
+ newState[key] = removeRefsFromStoreObj(storeObj, dataId);
});
return newState;
@@ -193,10 +195,15 @@ function mutationResultDeleteReducer(state: NormalizedCache, {
function removeRefsFromStoreObj(storeObj: any, dataId: any) {
let affected = false;
- const cleanedObj = mapValues(storeObj, (value: any) => {
+ const cleanedObj: any = {};
+
+ Object.keys(storeObj).forEach(key => {
+ const value = storeObj[key];
+
if (value && value.id === dataId) {
affected = true;
- return null;
+ cleanedObj[key] = null;
+ return;
}
if (Array.isArray(value)) {
@@ -204,12 +211,13 @@ function removeRefsFromStoreObj(storeObj: any, dataId: any) {
if (filteredArray !== value) {
affected = true;
- return filteredArray;
+ cleanedObj[key] = filteredArray;
+ return;
}
}
- // If not modified, return the original value
- return value;
+ // If not modified, set the original value
+ cleanedObj[key] = value;
});
if (affected) {
diff --git a/src/data/readFromStore.ts b/src/data/readFromStore.ts
--- a/src/data/readFromStore.ts
+++ b/src/data/readFromStore.ts
@@ -5,6 +5,7 @@ import {
import graphqlAnywhere, {
Resolver,
FragmentMatcher,
+ ExecInfo,
} from 'graphql-anywhere';
import {
@@ -26,6 +27,18 @@ import {
ApolloReducerConfig,
} from '../store';
+import { isEqual } from '../util/isEqual';
+
+/**
+ * The key which the cache id for a given value is stored in the result object. This key is private
+ * and should not be used by Apollo client users.
+ *
+ * Uses a symbol if available in the environment.
+ *
+ * @private
+ */
+export const ID_KEY = typeof Symbol !== 'undefined' ? Symbol('id') : '@@id';
+
export type DiffResult = {
result?: any;
isMissing?: boolean;
@@ -36,6 +49,7 @@ export type ReadQueryOptions = {
query: DocumentNode,
variables?: Object,
returnPartialData?: boolean,
+ previousResult?: any,
config?: ApolloReducerConfig,
};
@@ -47,37 +61,50 @@ export type CustomResolverMap = {
},
};
+/**
+ * This code needs an optional `previousResult` property on `IdValue` so that when the results
+ * returned from the store are the same, we can just return the `previousResult` and not a new
+ * value thus preserving referential equality.
+ *
+ * The `previousResult` property is added to our `IdValue`s in the `graphql-anywhere` resolver so
+ * that they can be in the right position for `resultMapper` to test equality and return whichever
+ * result is appropriate.
+ *
+ * `resultMapper` takes the `previousResult`s and performs a shallow referential equality check. If
+ * that passes then instead of returning the object created by `graphql-anywhere` the
+ * `resultMapper` function will instead return the `previousResult`. This process is bottom-up so
+ * we start at the leaf results and swap them for `previousResult`s all the way up until we get to
+ * the root object.
+ */
+interface IdValueWithPreviousResult extends IdValue {
+ previousResult?: any;
+}
+
/**
* Resolves the result of a query solely from the store (i.e. never hits the server).
*
- * @param store The {@link NormalizedCache} used by Apollo for the `data` portion of the store.
+ * @param {Store} store The {@link NormalizedCache} used by Apollo for the `data` portion of the
+ * store.
*
- * @param query The query document to resolve from the data available in the store.
+ * @param {DocumentNode} query The query document to resolve from the data available in the store.
*
- * @param variables A map from the name of a variable to its value. These variables can be
- * referenced by the query document.
+ * @param {Object} [variables] A map from the name of a variable to its value. These variables can
+ * be referenced by the query document.
*
- * @param returnPartialData If set to true, the query will be resolved even if all of the data
- * needed to resolve the query is not found in the store. The data keys that are not found will not
- * be present in the returned object. If set to false, an error will be thrown if there are fields
- * that cannot be resolved from the store.
+ * @param {boolean} [returnPartialData] If set to true, the query will be resolved even if all of
+ * the data needed to resolve the query is not found in the store. The data keys that are not found
+ * will not be present in the returned object. If set to false, an error will be thrown if there
+ * are fields that cannot be resolved from the store.
+ *
+ * @param {any} previousResult The previous result returned by this function for the same query.
+ * If nothing in the store changed since that previous result then values from the previous result
+ * will be returned to preserve referential equality.
*/
-export function readQueryFromStore({
- store,
- query,
- variables,
- returnPartialData = false,
- config,
-}: ReadQueryOptions): Object {
- const { result } = diffQueryAgainstStore({
- query,
- store,
+export function readQueryFromStore<QueryType>({ returnPartialData = false, ...options }: ReadQueryOptions): QueryType {
+ return diffQueryAgainstStore({
+ ...options,
returnPartialData,
- variables,
- config,
- });
-
- return result;
+ }).result;
}
type ReadStoreContext = {
@@ -134,16 +161,17 @@ match fragments.`);
const readStoreResolver: Resolver = (
fieldName: string,
- idValue: IdValue,
+ idValue: IdValueWithPreviousResult,
args: any,
context: ReadStoreContext,
+ { resultKey }: ExecInfo,
) => {
assertIdValue(idValue);
const objId = idValue.id;
const obj = context.store[objId];
const storeKeyName = storeKeyNameFromFieldNameAndArgs(fieldName, args);
- const fieldValue = (obj || {})[storeKeyName];
+ let fieldValue = (obj || {})[storeKeyName];
if (typeof fieldValue === 'undefined') {
if (context.customResolvers && obj && (obj.__typename || objId === 'ROOT_QUERY')) {
@@ -170,11 +198,26 @@ Perhaps you want to use the \`returnPartialData\` option?`);
return fieldValue;
}
+ // if this is an object scalar, it must be a json blob and we have to unescape it
if (isJsonValue(fieldValue)) {
- // if this is an object scalar, it must be a json blob and we have to unescape it
+ // If the JSON blob is the same now as in the previous result, return the previous result to
+ // maintain referential equality.
+ //
+ // `isEqual` will first perform a referential equality check (with `===`) in case the JSON
+ // value has not changed in the store, and then a deep equality check if that fails in case a
+ // new JSON object was returned by the API but that object may still be the same.
+ if (idValue.previousResult && isEqual(idValue.previousResult[resultKey], fieldValue.json)) {
+ return idValue.previousResult[resultKey];
+ }
return fieldValue.json;
}
+ // If we had a previous result, try adding that previous result value for this field to our field
+ // value. This will create a new value without mutating the old one.
+ if (idValue.previousResult) {
+ fieldValue = addPreviousResultToIdValues(fieldValue, idValue.previousResult[resultKey]);
+ }
+
return fieldValue;
};
@@ -184,6 +227,7 @@ Perhaps you want to use the \`returnPartialData\` option?`);
* @param {DocumentNode} query A parsed GraphQL query document
* @param {Store} store The Apollo Client store object
* @param {boolean} [returnPartialData] Whether to throw an error if any fields are missing
+ * @param {any} previousResult The previous result returned by this function for the same query
* @return {result: Object, isMissing: [boolean]}
*/
export function diffQueryAgainstStore({
@@ -191,6 +235,7 @@ export function diffQueryAgainstStore({
query,
variables,
returnPartialData = true,
+ previousResult,
config,
}: ReadQueryOptions): DiffResult {
// Throw the right validation error by trying to find a query in the document
@@ -209,10 +254,12 @@ export function diffQueryAgainstStore({
const rootIdValue = {
type: 'id',
id: 'ROOT_QUERY',
+ previousResult,
};
const result = graphqlAnywhere(readStoreResolver, query, rootIdValue, context, variables, {
fragmentMatcher,
+ resultMapper,
});
return {
@@ -228,3 +275,126 @@ an object reference. This should never happen during normal use unless you have
that is directly manipulating the store; please file an issue.`);
}
}
+
+/**
+ * Adds a previous result value to id values in a nested array. For a single id value and a single
+ * previous result then the previous value is added directly.
+ *
+ * For arrays we put all of the ids from the previous result array in a map and add them to id
+ * values with the same id.
+ *
+ * This function does not mutate. Instead it returns new instances of modified values.
+ *
+ * @private
+ */
+function addPreviousResultToIdValues (value: any, previousResult: any): any {
+ // If the value is an `IdValue`, add the previous result to it whether or not that
+ // `previousResult` is undefined.
+ //
+ // If the value is an array, recurse over each item trying to add the `previousResult` for that
+ // item.
+ if (isIdValue(value)) {
+ return {
+ ...value,
+ previousResult,
+ };
+ } else if (Array.isArray(value)) {
+ const idToPreviousResult: { [id: string]: any } = {};
+
+ // If the previous result was an array, we want to build up our map of ids to previous results
+ // using the private `ID_KEY` property that is added in `resultMapper`.
+ if (Array.isArray(previousResult)) {
+ previousResult.forEach(item => {
+ if (item[ID_KEY]) {
+ idToPreviousResult[item[ID_KEY]] = item;
+ }
+ });
+ }
+
+ // For every value we want to add the previous result.
+ return value.map((item, i) => {
+ // By default the previous result for this item will be in the same array position as this
+ // item.
+ let itemPreviousResult = previousResult && previousResult[i];
+
+ // If the item is an id value, we should check to see if there is a previous result for this
+ // specific id. If there is, that will be the value for `itemPreviousResult`.
+ if (isIdValue(item)) {
+ itemPreviousResult = idToPreviousResult[item.id] || itemPreviousResult;
+ }
+
+ return addPreviousResultToIdValues(item, itemPreviousResult);
+ });
+ }
+ // Return the value, nothing changed.
+ return value;
+}
+
+/**
+ * Maps a result from `graphql-anywhere` to a final result value.
+ *
+ * If the result and the previous result from the `idValue` pass a shallow equality test, we just
+ * return the `previousResult` to maintain referential equality.
+ *
+ * We also add a private id property to the result that we can use later on.
+ *
+ * @private
+ */
+function resultMapper (resultFields: any, idValue: IdValueWithPreviousResult) {
+ // If we had a previous result, we may be able to return that and preserve referential equality
+ if (idValue.previousResult) {
+ const currentResultKeys = Object.keys(resultFields);
+
+ const sameAsPreviousResult =
+ // Confirm that we have the same keys in both the current result and the previous result.
+ Object.keys(idValue.previousResult)
+ .reduce((sameKeys, key) => sameKeys && currentResultKeys.indexOf(key) > -1, true) &&
+
+ // Perform a shallow comparison of the result fields with the previous result. If all of
+ // the shallow fields are referentially equal to the fields of the previous result we can
+ // just return the previous result.
+ //
+ // While we do a shallow comparison of objects, but we do a deep comparison of arrays.
+ currentResultKeys.reduce((same, key) => (
+ same && areNestedArrayItemsStrictlyEqual(resultFields[key], idValue.previousResult[key])
+ ), true);
+
+ if (sameAsPreviousResult) {
+ return idValue.previousResult;
+ }
+ }
+
+ // Add the id to the result fields. It should be non-enumerable so users can’t see it without
+ // trying very hard.
+ Object.defineProperty(resultFields, ID_KEY, {
+ enumerable: false,
+ configurable: false,
+ writable: false,
+ value: idValue.id,
+ });
+
+ return resultFields;
+}
+
+type NestedArray<T> = T | Array<T | Array<T | Array<T>>>;
+
+/**
+ * Compare all the items to see if they are all referentially equal in two arrays no matter how
+ * deeply nested the arrays are.
+ *
+ * @private
+ */
+function areNestedArrayItemsStrictlyEqual (a: NestedArray<any>, b: NestedArray<any>): boolean {
+ // If `a` and `b` are referentially equal, return true.
+ if (a === b) {
+ return true;
+ }
+ // If either `a` or `b` are not an array or not of the same length return false. `a` and `b` are
+ // known to not be equal here, we checked above.
+ if (!Array.isArray(a) || !Array.isArray(b) || a.length !== b.length) {
+ return false;
+ }
+ // Otherwise let us compare all of the array items (which are potentially nested arrays!) to see
+ // if they are equal.
+ return a.reduce((same, item, i) => same && areNestedArrayItemsStrictlyEqual(item, b[i]), true);
+}
diff --git a/src/data/scopeQuery.ts b/src/data/scopeQuery.ts
--- a/src/data/scopeQuery.ts
+++ b/src/data/scopeQuery.ts
@@ -13,8 +13,6 @@ import {
resultKeyNameFromField,
} from './storeUtils';
-import isNumber = require('lodash/isNumber');
-
// The type of a path
export type StorePath = (string|number)[];
@@ -51,7 +49,7 @@ export function scopeSelectionSetToResultPath({
path
// Arrays are not represented in GraphQL AST
- .filter((pathSegment) => !isNumber(pathSegment))
+ .filter((pathSegment) => typeof pathSegment !== 'number')
.forEach((pathSegment) => {
currSelSet = followOnePathSegment(currSelSet, pathSegment as string, fragmentMap);
});
diff --git a/src/data/store.ts b/src/data/store.ts
--- a/src/data/store.ts
+++ b/src/data/store.ts
@@ -101,7 +101,7 @@ export function data(
// write to "the" store?
let newState = writeResultToStore({
result: action.result.data,
- dataId: 'ROOT_QUERY', // TODO: is this correct? what am I doing here? What is dataId for??
+ dataId: 'ROOT_SUBSCRIPTION',
document: action.document,
variables: action.variables,
store: clonedState,
diff --git a/src/data/storeUtils.ts b/src/data/storeUtils.ts
--- a/src/data/storeUtils.ts
+++ b/src/data/storeUtils.ts
@@ -15,9 +15,8 @@ import {
NameNode,
} from 'graphql';
-import isObject = require('lodash/isObject');
-
function isStringValue(value: ValueNode): value is StringValueNode {
+
return value.kind === 'StringValue';
}
@@ -146,7 +145,11 @@ export interface JsonValue {
export type StoreValue = number | string | string[] | IdValue | JsonValue | void;
export function isIdValue(idObject: StoreValue): idObject is IdValue {
- return (isObject(idObject) && (idObject as (IdValue | JsonValue)).type === 'id');
+ return (
+ idObject != null &&
+ typeof idObject === 'object' &&
+ (idObject as (IdValue | JsonValue)).type === 'id'
+ );
}
export function toIdValue(id: string, generated = false): IdValue {
@@ -158,5 +161,9 @@ export function toIdValue(id: string, generated = false): IdValue {
}
export function isJsonValue(jsonObject: StoreValue): jsonObject is JsonValue {
- return (isObject(jsonObject) && (jsonObject as (IdValue | JsonValue)).type === 'json');
+ return (
+ jsonObject != null &&
+ typeof jsonObject === 'object' &&
+ (jsonObject as (IdValue | JsonValue)).type === 'json'
+ );
}
diff --git a/src/data/writeToStore.ts b/src/data/writeToStore.ts
--- a/src/data/writeToStore.ts
+++ b/src/data/writeToStore.ts
@@ -1,6 +1,3 @@
-import isNull = require('lodash/isNull');
-import isUndefined = require('lodash/isUndefined');
-import isObject = require('lodash/isObject');
import {
getOperationDefinition,
@@ -35,7 +32,7 @@ import {
import {
IdGetter,
-} from './extensions';
+} from '../core/types';
import {
shouldInclude,
@@ -149,7 +146,7 @@ export function writeSelectionSetToStore({
const resultFieldKey: string = resultKeyNameFromField(selection);
const value: any = result[resultFieldKey];
- if (!isUndefined(value)) {
+ if (value !== undefined) {
writeFieldToStore({
dataId,
value,
@@ -241,16 +238,15 @@ function writeFieldToStore({
// If we merge, this will be the generatedKey
let generatedKey: string;
- // If it's a scalar that's not a JSON blob, just store it in the store
- if ((!field.selectionSet || isNull(value)) && !isObject(value)) {
- storeValue = value;
- } else if ((!field.selectionSet || isNull(value)) && isObject(value)) {
- // If it is a scalar that's a JSON blob, we have to "escape" it so it can't
- // pretend to be an id
- storeValue = {
- type: 'json',
- json: value,
- };
+ // If this is a scalar value...
+ if (!field.selectionSet || value === null) {
+ storeValue =
+ value != null && typeof value === 'object'
+ // If the scalar value is a JSON blob, we have to "escape" it so it can’t pretend to be
+ // an id.
+ ? { type: 'json', json: value }
+ // Otherwise, just store the scalar directly in the store.
+ : value;
} else if (Array.isArray(value)) {
const generatedId = `${dataId}.${storeFieldName}`;
@@ -340,7 +336,7 @@ function processArrayValue(
context: WriteContext,
): any[] {
return value.map((item: any, index: any) => {
- if (isNull(item)) {
+ if (item === null) {
return null;
}
diff --git a/src/fragments.ts b/src/fragments.ts
deleted file mode 100644
--- a/src/fragments.ts
+++ /dev/null
@@ -1,89 +0,0 @@
-import {
- DocumentNode,
- FragmentDefinitionNode,
-} from 'graphql';
-
-import flatten = require('lodash/flatten');
-
-import {
- getFragmentDefinitions,
-} from './queries/getFromAST';
-
-// A map going from the name of a fragment to that fragment's definition.
-// The point is to keep track of fragments that exist and print a warning if we encounter two
-// fragments that have the same name, i.e. the values *should* be of arrays of length 1.
-// Note: this variable is exported solely for unit testing purposes. It should not be touched
-// directly by application code.
-export let fragmentDefinitionsMap: { [fragmentName: string]: FragmentDefinitionNode[] } = {};
-
-// Specifies whether or not we should print warnings about conflicting fragment names.
-let printFragmentWarnings = true;
-
-// Takes a document, extracts the FragmentDefinitions from it and puts
-// them in this.fragmentDefinitions. The second argument specifies the fragments
-// that the fragment in the document depends on. The fragment definition array from the document
-// is concatenated with the fragment definition array passed as the second argument and this
-// concatenated array is returned.
-let haveWarned = false;
-
-export function createFragment(
- doc: DocumentNode,
- fragments: (FragmentDefinitionNode[] | FragmentDefinitionNode[][]) = [],
- internalUse = false,
-): FragmentDefinitionNode[] {
-
- if (!internalUse) {
- if (! haveWarned) {
- if (process.env.NODE_ENV !== 'production') {
- console.warn(
- '"createFragment" is deprecated and will be removed in version 0.6, ' +
- 'please refer to the documentation for how to define fragments: ' +
- 'http://dev.apollodata.com/react/fragments.html.',
- );
- }
- /* istanbul ignore if */
- if (process.env.NODE_ENV !== 'test') {
- // When running tests, we want to print the warning every time
- haveWarned = true;
- }
- }
- }
-
- fragments = flatten(fragments) as FragmentDefinitionNode[];
- const fragmentDefinitions = getFragmentDefinitions(doc);
- fragmentDefinitions.forEach((fragmentDefinition: FragmentDefinitionNode) => {
- const fragmentName = fragmentDefinition.name.value;
- if (fragmentDefinitionsMap.hasOwnProperty(fragmentName) &&
- fragmentDefinitionsMap[fragmentName].indexOf(fragmentDefinition) === -1) {
- // this is a problem because the app developer is trying to register another fragment with
- // the same name as one previously registered. So, we tell them about it.
- if (printFragmentWarnings) {
- console.warn(`Warning: fragment with name ${fragmentDefinition.name.value} already exists.
-Apollo Client enforces all fragment names across your application to be unique; read more about
-this in the docs: http://docs.apollostack.com/`);
- }
-
- fragmentDefinitionsMap[fragmentName].push(fragmentDefinition);
- } else if (!fragmentDefinitionsMap.hasOwnProperty(fragmentName)) {
- fragmentDefinitionsMap[fragmentName] = [fragmentDefinition];
- }
- });
-
- return fragments.concat(fragmentDefinitions);
-}
-
-// This function disables the warnings printed about fragment names. One place where this chould be
-// called is within writing unit tests that depend on Apollo Client and use named fragments that may
-// have the same name across different unit tests.
-export function disableFragmentWarnings() {
- printFragmentWarnings = false;
-}
-
-export function enableFragmentWarnings() {
- printFragmentWarnings = true;
-}
-
-// This function is used to be empty the namespace of fragment definitions. Used for unit tests.
-export function clearFragmentDefinitions() {
- fragmentDefinitionsMap = {};
-}
diff --git a/src/index.ts b/src/index.ts
--- a/src/index.ts
+++ b/src/index.ts
@@ -30,7 +30,7 @@ import {
import {
WatchQueryOptions,
MutationOptions,
- DeprecatedSubscriptionOptions,
+ SubscriptionOptions,
} from './core/watchQueryOptions';
import {
@@ -62,16 +62,10 @@ import {
} from './errors/ApolloError';
import ApolloClient from './ApolloClient';
-import {
- createFragment,
- clearFragmentDefinitions,
- disableFragmentWarnings,
- enableFragmentWarnings,
-} from './fragments';
import {
ApolloQueryResult,
-} from './core/QueryManager';
+} from './core/types';
import {
toIdValue,
@@ -91,12 +85,6 @@ export {
NetworkStatus,
ApolloError,
- // fragment stuff
- createFragment,
- clearFragmentDefinitions,
- disableFragmentWarnings,
- enableFragmentWarnings,
-
getQueryDefinition,
getFragmentDefinitions,
FragmentMap,
@@ -116,7 +104,7 @@ export {
MutationBehavior,
MutationQueryReducersMap,
Subscription,
- DeprecatedSubscriptionOptions as SubscriptionOptions,
+ SubscriptionOptions,
ApolloStore,
ApolloClient
};
diff --git a/src/optimistic-data/store.ts b/src/optimistic-data/store.ts
--- a/src/optimistic-data/store.ts
+++ b/src/optimistic-data/store.ts
@@ -14,11 +14,10 @@ import {
} from '../data/storeUtils';
import {
- getDataWithOptimisticResults,
Store,
} from '../store';
-import pick = require('lodash/pick');
+ import { assign } from '../util/assign';
// a stack of patches of new or changed documents
export type OptimisticStore = {
@@ -28,6 +27,14 @@ export type OptimisticStore = {
const optimisticDefaultState: any[] = [];
+export function getDataWithOptimisticResults(store: Store): NormalizedCache {
+ if (store.optimistic.length === 0) {
+ return store.data;
+ }
+ const patches = store.optimistic.map(opt => opt.data);
+ return assign({}, store.data, ...patches) as NormalizedCache;
+}
+
export function optimistic(
previousState = optimisticDefaultState,
action: any,
@@ -49,7 +56,7 @@ export function optimistic(
const fakeStore = {
...store,
optimistic: previousState,
- } as Store;
+ };
const optimisticData = getDataWithOptimisticResults(fakeStore);
const fakeDataResultState = data(
optimisticData,
@@ -61,9 +68,13 @@ export function optimistic(
// TODO: apply extra reducers and resultBehaviors to optimistic store?
- const changedKeys = Object.keys(fakeDataResultState).filter(
- key => optimisticData[key] !== fakeDataResultState[key]);
- const patch = pick(fakeDataResultState, changedKeys);
+ const patch: any = {};
+
+ Object.keys(fakeDataResultState).forEach(key => {
+ if (optimisticData[key] !== fakeDataResultState[key]) {
+ patch[key] = fakeDataResultState[key];
+ }
+ });
const optimisticState = {
data: patch,
diff --git a/src/queries/getFromAST.ts b/src/queries/getFromAST.ts
--- a/src/queries/getFromAST.ts
+++ b/src/queries/getFromAST.ts
@@ -4,9 +4,6 @@ import {
FragmentDefinitionNode,
} from 'graphql';
-import countBy = require('lodash/countBy');
-import identity = require('lodash/identity');
-import uniq = require('lodash/uniq');
export function getMutationDefinition(doc: DocumentNode): OperationDefinitionNode {
checkDocument(doc);
@@ -33,19 +30,26 @@ export function checkDocument(doc: DocumentNode) {
string in a "gql" tag? http://docs.apollostack.com/apollo-client/core.html#gql`);
}
- const definitionTypes = doc.definitions.map((definition) => {
- if (definition.kind !== 'OperationDefinition' && definition.kind !== 'FragmentDefinition') {
- throw new Error(`Schema type definitions not allowed in queries. Found: "${definition.kind}"`);
- }
+ let foundOperation = false;
- return definition.kind;
+ doc.definitions.forEach((definition) => {
+ switch (definition.kind) {
+ // If this is a fragment that’s fine.
+ case 'FragmentDefinition':
+ break;
+ // We can only find one operation, so the first time nothing happens. The second time we
+ // encounter an operation definition we throw an error.
+ case 'OperationDefinition':
+ if (foundOperation) {
+ throw new Error('Queries must have exactly one operation definition.');
+ }
+ foundOperation = true;
+ break;
+ // If this is any other operation kind, throw an error.
+ default:
+ throw new Error(`Schema type definitions not allowed in queries. Found: "${definition.kind}"`);
+ }
});
- const typeCounts = countBy(definitionTypes, identity);
-
- // can't have more than one operation definition per query
- if (typeCounts['OperationDefinition'] > 1) {
- throw new Error('Queries must have exactly one operation definition.');
- }
}
export function getOperationName(doc: DocumentNode): string {
@@ -144,17 +148,3 @@ export function createFragmentMap(fragments: FragmentDefinitionNode[] = []): Fra
return symTable;
}
-
-// Utility function that takes a list of fragment definitions and adds them to a particular
-// document.
-export function addFragmentsToDocument(queryDoc: DocumentNode,
- fragments: FragmentDefinitionNode[]): DocumentNode {
- if (!fragments) {
- return queryDoc;
- }
- checkDocument(queryDoc);
- return ({
- ...queryDoc,
- definitions: uniq(queryDoc.definitions.concat(fragments)),
- }) as DocumentNode;
-}
diff --git a/src/queries/queryTransform.ts b/src/queries/queryTransform.ts
--- a/src/queries/queryTransform.ts
+++ b/src/queries/queryTransform.ts
@@ -11,7 +11,7 @@ import {
checkDocument,
} from './getFromAST';
-import cloneDeep = require('lodash/cloneDeep');
+import { cloneDeep } from '../util/cloneDeep';
const TYPENAME_FIELD: FieldNode = {
kind: 'Field',
diff --git a/src/queries/store.ts b/src/queries/store.ts
--- a/src/queries/store.ts
+++ b/src/queries/store.ts
@@ -18,7 +18,7 @@ import {
GraphQLError,
} from 'graphql';
-import isEqual = require('lodash/isEqual');
+import { isEqual } from '../util/isEqual';
export interface QueryStore {
[queryId: string]: QueryStoreValue;
diff --git a/src/scheduler/scheduler.ts b/src/scheduler/scheduler.ts
--- a/src/scheduler/scheduler.ts
+++ b/src/scheduler/scheduler.ts
@@ -10,10 +10,13 @@
import {
QueryManager,
- QueryListener,
- FetchType,
} from '../core/QueryManager';
+import {
+ FetchType,
+ QueryListener,
+} from '../core/types';
+
import { ObservableQuery } from '../core/ObservableQuery';
import { WatchQueryOptions } from '../core/watchQueryOptions';
@@ -58,9 +61,9 @@ export class QueryScheduler {
return queries[queryId] && queries[queryId].networkStatus !== NetworkStatus.ready;
}
- public fetchQuery(queryId: string, options: WatchQueryOptions, fetchType: FetchType) {
+ public fetchQuery<T>(queryId: string, options: WatchQueryOptions, fetchType: FetchType) {
return new Promise((resolve, reject) => {
- this.queryManager.fetchQuery(queryId, options, fetchType).then((result) => {
+ this.queryManager.fetchQuery<T>(queryId, options, fetchType).then((result) => {
resolve(result);
}).catch((error) => {
reject(error);
@@ -68,7 +71,7 @@ export class QueryScheduler {
});
}
- public startPollingQuery(
+ public startPollingQuery<T>(
options: WatchQueryOptions,
queryId?: string,
listener?: QueryListener,
@@ -82,7 +85,7 @@ export class QueryScheduler {
if (listener) {
this.queryManager.addQueryListener(queryId, listener);
}
- this.addQueryOnInterval(queryId, options);
+ this.addQueryOnInterval<T>(queryId, options);
return queryId;
}
@@ -94,7 +97,7 @@ export class QueryScheduler {
}
// Fires the all of the queries on a particular interval. Called on a setInterval.
- public fetchQueriesOnInterval(interval: number) {
+ public fetchQueriesOnInterval<T>(interval: number) {
// XXX this "filter" here is nasty, because it does two things at the same time.
// 1. remove queries that have stopped polling
// 2. call fetchQueries for queries that are polling and not in flight.
@@ -116,7 +119,7 @@ export class QueryScheduler {
const queryOptions = this.registeredQueries[queryId];
const pollingOptions = { ...queryOptions } as WatchQueryOptions;
pollingOptions.forceFetch = true;
- this.fetchQuery(queryId, pollingOptions, FetchType.poll);
+ this.fetchQuery<T>(queryId, pollingOptions, FetchType.poll);
return true;
});
@@ -129,7 +132,7 @@ export class QueryScheduler {
// Adds a query on a particular interval to this.intervalQueries and then fires
// that query with all the other queries executing on that interval. Note that the query id
// and query options must have been added to this.registeredQueries before this function is called.
- public addQueryOnInterval(queryId: string, queryOptions: WatchQueryOptions) {
+ public addQueryOnInterval<T>(queryId: string, queryOptions: WatchQueryOptions) {
const interval = queryOptions.pollInterval;
// If there are other queries on this interval, this query will just fire with those
@@ -140,17 +143,17 @@ export class QueryScheduler {
this.intervalQueries[interval] = [queryId];
// set up the timer for the function that will handle this interval
this.pollingTimers[interval] = setInterval(() => {
- this.fetchQueriesOnInterval(interval);
+ this.fetchQueriesOnInterval<T>(interval);
}, interval);
}
}
// Used only for unit testing.
- public registerPollingQuery(queryOptions: WatchQueryOptions): ObservableQuery {
+ public registerPollingQuery<T>(queryOptions: WatchQueryOptions): ObservableQuery<T> {
if (!queryOptions.pollInterval) {
throw new Error('Attempted to register a non-polling query with the scheduler.');
}
- return new ObservableQuery({
+ return new ObservableQuery<T>({
scheduler: this,
options: queryOptions,
});
diff --git a/src/store.ts b/src/store.ts
--- a/src/store.ts
+++ b/src/store.ts
@@ -27,7 +27,9 @@ import {
import {
optimistic,
OptimisticStore,
+ getDataWithOptimisticResults,
} from './optimistic-data/store';
+export { getDataWithOptimisticResults };
import {
ApolloAction,
@@ -35,7 +37,7 @@ import {
import {
IdGetter,
-} from './data/extensions';
+} from './core/types';
import {
MutationBehaviorReducerMap,
@@ -45,7 +47,7 @@ import {
CustomResolverMap,
} from './data/readFromStore';
-import assign = require('lodash/assign');
+import { assign } from './util/assign';
export interface Store {
data: NormalizedCache;
@@ -109,6 +111,14 @@ export function createApolloReducer(config: ApolloReducerConfig): Function {
config,
);
+ if (state.data === newState.data &&
+ state.mutations === newState.mutations &&
+ state.queries === newState.queries &&
+ state.optimistic === newState.optimistic &&
+ state.reducerError === newState.reducerError) {
+ return state;
+ }
+
return newState;
} catch (reducerError) {
return {
@@ -176,17 +186,8 @@ export function createApolloStore({
);
}
-
export type ApolloReducerConfig = {
dataIdFromObject?: IdGetter;
mutationBehaviorReducers?: MutationBehaviorReducerMap;
customResolvers?: CustomResolverMap;
};
-
-export function getDataWithOptimisticResults(store: Store): NormalizedCache {
- if (store.optimistic.length === 0) {
- return store.data;
- }
- const patches = store.optimistic.map(opt => opt.data);
- return assign({}, store.data, ...patches) as NormalizedCache;
-}
diff --git a/src/transport/batchedNetworkInterface.ts b/src/transport/batchedNetworkInterface.ts
--- a/src/transport/batchedNetworkInterface.ts
+++ b/src/transport/batchedNetworkInterface.ts
@@ -2,11 +2,6 @@ import {
ExecutionResult,
} from 'graphql';
-import 'whatwg-fetch';
-
-import assign = require('lodash/assign');
-import isNumber = require('lodash/isNumber');
-
import {
HTTPFetchNetworkInterface,
HTTPNetworkInterface,
@@ -19,6 +14,8 @@ import {
QueryBatcher,
} from './batching';
+import { assign } from '../util/assign';
+
// An implementation of the network interface that operates over HTTP and batches
// together requests over the HTTP transport. Note that this implementation will only work correctly
// for GraphQL server implementations that support batching. If such a server is not available, you
@@ -31,7 +28,7 @@ export class HTTPBatchedNetworkInterface extends HTTPFetchNetworkInterface {
constructor(uri: string, pollInterval: number, fetchOpts: RequestInit) {
super(uri, fetchOpts);
- if (!isNumber(pollInterval)) {
+ if (typeof pollInterval !== 'number') {
throw new Error(`pollInterval must be a number, got ${pollInterval}`);
}
@@ -50,7 +47,7 @@ export class HTTPBatchedNetworkInterface extends HTTPFetchNetworkInterface {
// made public for testing only
public batchQuery(requests: Request[]): Promise<ExecutionResult[]> {
- const options = assign({}, this._opts);
+ const options = { ...this._opts };
// Apply the middlewares to each of the requests
const middlewarePromises: Promise<RequestAndOptions>[] = [];
@@ -118,15 +115,17 @@ export class HTTPBatchedNetworkInterface extends HTTPFetchNetworkInterface {
return printRequest(request);
});
- return fetch(this._uri, assign({}, this._opts, {
+ return fetch(this._uri, {
+ ...this._opts,
body: JSON.stringify(printedRequests),
method: 'POST',
- }, options, {
- headers: assign({}, {
+ ...options,
+ headers: {
Accept: '*/*',
'Content-Type': 'application/json',
- }, options.headers),
- }));
+ ...(options.headers as { [headerName: string]: string }),
+ },
+ });
};
}
diff --git a/src/transport/networkInterface.ts b/src/transport/networkInterface.ts
--- a/src/transport/networkInterface.ts
+++ b/src/transport/networkInterface.ts
@@ -1,8 +1,3 @@
-import isString = require('lodash/isString');
-import assign = require('lodash/assign');
-import mapValues = require('lodash/mapValues');
-import 'whatwg-fetch';
-
import {
ExecutionResult,
DocumentNode,
@@ -77,9 +72,10 @@ export interface ResponseAndOptions {
}
export function printRequest(request: Request): PrintedRequest {
- return mapValues(request, (val: any, key: any) => {
- return key === 'query' ? print(val) : val;
- }) as any as PrintedRequest;
+ return {
+ ...request,
+ query: print(request.query),
+ };
}
// TODO: refactor
@@ -95,12 +91,12 @@ export class HTTPFetchNetworkInterface implements NetworkInterface {
throw new Error('A remote enpdoint is required for a network layer');
}
- if (!isString(uri)) {
+ if (typeof uri !== 'string') {
throw new Error('Remote endpoint must be a string');
}
this._uri = uri;
- this._opts = assign({}, opts);
+ this._opts = { ...opts };
this._middlewares = [];
this._afterwares = [];
}
@@ -159,19 +155,21 @@ export class HTTPFetchNetworkInterface implements NetworkInterface {
request,
options,
}: RequestAndOptions): Promise<IResponse> {
- return fetch(this._uri, assign({}, this._opts, {
+ return fetch(this._uri, {
+ ...this._opts,
body: JSON.stringify(printRequest(request)),
method: 'POST',
- }, options, {
- headers: assign({}, {
+ ...options,
+ headers: {
Accept: '*/*',
'Content-Type': 'application/json',
- }, options.headers),
- }));
+ ...(options.headers as { [headerName: string]: string }),
+ },
+ });
};
public query(request: Request): Promise<ExecutionResult> {
- const options = assign({}, this._opts);
+ const options = { ...this._opts };
return this.applyMiddlewares({
request,
@@ -234,7 +232,7 @@ export function createNetworkInterface(
// We want to change the API in the future so that you just pass all of the options as one
// argument, so even though the internals work with two arguments we're warning here.
- if (isString(uriOrInterfaceOpts)) {
+ if (typeof uriOrInterfaceOpts === 'string') {
console.warn(`Passing the URI as the first argument to createNetworkInterface is deprecated \
as of Apollo Client 0.5. Please pass it as the "uri" property of the network interface options.`);
opts = secondArgOpts;
@@ -244,5 +242,15 @@ as of Apollo Client 0.5. Please pass it as the "uri" property of the network int
uri = (uriOrInterfaceOpts as NetworkInterfaceOptions).uri;
}
+ // Warn if there is no global `fetch` implementation.
+ if (typeof fetch === 'undefined') {
+ console.warn([
+ '[apollo-client]: An implementation for the fetch browser API could not be found. Apollo',
+ 'client requires fetch to execute GraphQL queries against your API server. Please include a',
+ 'global fetch implementation such as [whatwg-fetch](http://npmjs.com/whatwg-fetch) so that',
+ 'Apollo client can run in this environment.',
+ ].join(' '));
+ }
+
return new HTTPFetchNetworkInterface(uri, opts);
}
diff --git a/src/util/assign.ts b/src/util/assign.ts
new file mode 100644
--- /dev/null
+++ b/src/util/assign.ts
@@ -0,0 +1,20 @@
+/**
+ * Adds the properties of one or more source objects to a target object. Works exactly like
+ * `Object.assign`, but as a utility to maintain support for IE 11.
+ *
+ * @see https://github.com/apollostack/apollo-client/pull/1009
+ */
+export function assign <A, B>(a: A, b: B): A & B;
+export function assign <A, B, C>(a: A, b: B, c: C): A & B & C;
+export function assign <A, B, C, D>(a: A, b: B, c: C, d: D): A & B & C & D;
+export function assign <A, B, C, D, E>(a: A, b: B, c: C, d: D, e: E): A & B & C & D & E;
+export function assign (target: any, ...sources: Array<any>): any;
+export function assign (
+ target: { [key: string]: any },
+ ...sources: Array<{ [key: string]: any }>,
+): { [key: string]: any } {
+ sources.forEach(source => Object.keys(source).forEach(key => {
+ target[key] = source[key];
+ }));
+ return target;
+}
diff --git a/src/util/cloneDeep.ts b/src/util/cloneDeep.ts
new file mode 100644
--- /dev/null
+++ b/src/util/cloneDeep.ts
@@ -0,0 +1,23 @@
+/**
+ * Deeply clones a value to create a new instance.
+ */
+export function cloneDeep <T>(value: T): T {
+ // If the value is an array, create a new array where every item has been cloned.
+ if (Array.isArray(value)) {
+ return value.map(item => cloneDeep(item)) as any;
+ }
+ // If the value is an object, go through all of the object’s properties and add them to a new
+ // object.
+ if (value !== null && typeof value === 'object') {
+ const nextValue: any = {};
+ for (const key in value) {
+ if (value.hasOwnProperty(key)) {
+ nextValue[key] = cloneDeep(value[key]);
+ }
+ }
+ return nextValue;
+ }
+ // Otherwise this is some primitive value and it is therefore immutable so we can just return it
+ // directly.
+ return value;
+}
diff --git a/src/util/isEqual.ts b/src/util/isEqual.ts
new file mode 100644
--- /dev/null
+++ b/src/util/isEqual.ts
@@ -0,0 +1,35 @@
+/**
+ * Performs a deep equality check on two JavaScript values.
+ */
+export function isEqual (a: any, b: any): boolean {
+ // If the two values are strictly equal, we are good.
+ if (a === b) {
+ return true;
+ }
+ // If a and b are both objects, we will compare their properties. This will compare arrays as
+ // well.
+ if (a != null && typeof a === 'object' && b != null && typeof b === 'object') {
+ // Compare all of the keys in `a`. If one of the keys has a different value, or that key does
+ // not exist in `b` return false immediately.
+ for (const key in a) {
+ if (a.hasOwnProperty(key)) {
+ if (!b.hasOwnProperty(key)) {
+ return false;
+ }
+ if (!isEqual(a[key], b[key])) {
+ return false;
+ }
+ }
+ }
+ // Look through all the keys in `b`. If `b` has a key that `a` does not, return false.
+ for (const key in b) {
+ if (!a.hasOwnProperty(key)) {
+ return false;
+ }
+ }
+ // If we made it this far the objects are equal!
+ return true;
+ }
+ // Otherwise the values are not equal.
+ return false;
+}
diff --git a/src/util/maybeDeepFreeze.ts b/src/util/maybeDeepFreeze.ts
new file mode 100644
--- /dev/null
+++ b/src/util/maybeDeepFreeze.ts
@@ -0,0 +1,23 @@
+
+// taken straight from https://github.com/substack/deep-freeze to avoid import hassles with rollup
+function deepFreeze (o: any) {
+ Object.freeze(o);
+
+ Object.getOwnPropertyNames(o).forEach(function (prop) {
+ if (o.hasOwnProperty(prop)
+ && o[prop] !== null
+ && (typeof o[prop] === 'object' || typeof o[prop] === 'function')
+ && !Object.isFrozen(o[prop])) {
+ deepFreeze(o[prop]);
+ }
+ });
+
+ return o;
+};
+
+export default function maybeDeepFreeze(obj: any) {
+ if (process.env.NODE_ENV === 'development' || process.env.NODE_ENV === 'test') {
+ return deepFreeze(obj);
+ }
+ return obj;
+}
| diff --git a/test/ObservableQuery.ts b/test/ObservableQuery.ts
--- a/test/ObservableQuery.ts
+++ b/test/ObservableQuery.ts
@@ -215,7 +215,7 @@ describe('ObservableQuery', () => {
const variables2 = { first: 1 };
- const observable: ObservableQuery = mockWatchQuery({
+ const observable: ObservableQuery<any> = mockWatchQuery({
request: { query: queryWithVars, variables: variables1 },
result: { data },
}, {
@@ -236,7 +236,7 @@ describe('ObservableQuery', () => {
});
it('does a network request if forceFetch becomes true', (done) => {
- const observable: ObservableQuery = mockWatchQuery({
+ const observable: ObservableQuery<any> = mockWatchQuery({
request: { query, variables },
result: { data: dataOne },
}, {
@@ -257,7 +257,7 @@ describe('ObservableQuery', () => {
it('does a network request if noFetch becomes true then store is reset then noFetch becomes false', (done) => {
let queryManager: QueryManager = null;
- let observable: ObservableQuery = null;
+ let observable: ObservableQuery<any> = null;
const testQuery = gql`
query {
author {
@@ -310,7 +310,7 @@ describe('ObservableQuery', () => {
it('does a network request if noFetch becomes false', (done) => {
let queryManager: QueryManager = null;
- let observable: ObservableQuery = null;
+ let observable: ObservableQuery<any> = null;
const testQuery = gql`
query {
author {
@@ -355,7 +355,7 @@ describe('ObservableQuery', () => {
describe('setVariables', () => {
it('reruns query if the variables change', (done) => {
- const observable: ObservableQuery = mockWatchQuery({
+ const observable: ObservableQuery<any> = mockWatchQuery({
request: { query, variables },
result: { data: dataOne },
}, {
@@ -449,7 +449,7 @@ describe('ObservableQuery', () => {
});
it('reruns observer callback if the variables change but data does not', (done) => {
- const observable: ObservableQuery = mockWatchQuery({
+ const observable: ObservableQuery<any> = mockWatchQuery({
request: { query, variables },
result: { data: dataOne },
}, {
@@ -482,7 +482,7 @@ describe('ObservableQuery', () => {
manager.query({ query, variables: differentVariables })
.then(() => {
- const observable: ObservableQuery = manager.watchQuery({ query, variables });
+ const observable: ObservableQuery<any> = manager.watchQuery({ query, variables });
let errored = false;
subscribeAndCount(done, observable, (handleCount, result) => {
@@ -501,7 +501,7 @@ describe('ObservableQuery', () => {
});
it('does not rerun query if variables do not change', (done) => {
- const observable: ObservableQuery = mockWatchQuery({
+ const observable: ObservableQuery<any> = mockWatchQuery({
request: { query, variables },
result: { data: dataOne },
}, {
@@ -528,7 +528,7 @@ describe('ObservableQuery', () => {
// The expected behavior is that the original variables are forgotten
// and the query stays in loading state until the result for the new variables
// has returned.
- const observable: ObservableQuery = mockWatchQuery({
+ const observable: ObservableQuery<any> = mockWatchQuery({
request: { query, variables },
result: { data: dataOne },
delay: 20,
@@ -553,7 +553,7 @@ describe('ObservableQuery', () => {
describe('currentResult', () => {
it('returns the current query status immediately', (done) => {
- const observable: ObservableQuery = mockWatchQuery({
+ const observable: ObservableQuery<any> = mockWatchQuery({
request: { query, variables },
result: { data: dataOne },
delay: 100,
diff --git a/test/QueryManager.ts b/test/QueryManager.ts
--- a/test/QueryManager.ts
+++ b/test/QueryManager.ts
@@ -15,10 +15,6 @@ import {
ApolloStore,
} from '../src/store';
-import {
- getIdField,
-} from '../src/data/extensions';
-
import gql from 'graphql-tag';
import {
@@ -36,13 +32,13 @@ import ApolloClient, {
import {
ApolloQueryResult,
-} from '../src/core/QueryManager';
+} from '../src/core/types';
import { createStore, combineReducers, applyMiddleware } from 'redux';
import * as Rx from 'rxjs';
-import assign = require('lodash/assign');
+import { assign } from 'lodash';
import mockNetworkInterface, {
ParsedRequest,
@@ -62,7 +58,7 @@ import {
import { NetworkStatus } from '../src/queries/store';
-import wrap from './util/wrap';
+import wrap, { withWarning } from './util/wrap';
import observableToPromise, {
observableToPromiseAndSubscription,
@@ -122,7 +118,7 @@ describe('QueryManager', () => {
error?: Error,
result?: ExecutionResult,
delay?: number,
- observer: Observer<ApolloQueryResult>,
+ observer: Observer<ApolloQueryResult<any>>,
}) => {
const queryManager = mockQueryManager({
request: { query, variables },
@@ -131,7 +127,7 @@ describe('QueryManager', () => {
delay,
});
const finalOptions = assign({ query, variables }, queryOptions) as WatchQueryOptions;
- return queryManager.watchQuery(finalOptions).subscribe({
+ return queryManager.watchQuery<any>(finalOptions).subscribe({
next: wrap(done, observer.next),
error: observer.error,
});
@@ -208,12 +204,14 @@ describe('QueryManager', () => {
request,
firstResult,
secondResult,
+ thirdResult,
}: {
request: ParsedRequest,
firstResult: ExecutionResult,
secondResult: ExecutionResult,
+ thirdResult?: ExecutionResult,
}) => {
- return mockQueryManager(
+ const args = [
{
request,
result: firstResult,
@@ -222,7 +220,13 @@ describe('QueryManager', () => {
request,
result: secondResult,
},
- );
+ ];
+
+ if (thirdResult) {
+ args.push({ request, result: thirdResult });
+ }
+
+ return mockQueryManager(...args);
};
it('properly roundtrips through a Redux store', (done) => {
@@ -582,9 +586,9 @@ describe('QueryManager', () => {
let subOneCount = 0;
// pre populate data to avoid contention
- queryManager.query(request)
+ queryManager.query<any>(request)
.then(() => {
- const handle = queryManager.watchQuery(request);
+ const handle = queryManager.watchQuery<any>(request);
const subOne = handle.subscribe({
next(result) {
@@ -658,7 +662,7 @@ describe('QueryManager', () => {
secondResult: { data: data2 },
});
- const observable = queryManager.watchQuery(request);
+ const observable = queryManager.watchQuery<any>(request);
return observableToPromise({ observable },
(result) => {
assert.deepEqual(result.data, data1);
@@ -668,6 +672,123 @@ describe('QueryManager', () => {
);
});
+ it('will return referentially equivalent data if nothing changed in a refetch', done => {
+ const request = {
+ query: gql`
+ {
+ a
+ b { c }
+ d { e f { g } }
+ }
+ `,
+ };
+
+ const data1 = {
+ a: 1,
+ b: { c: 2 },
+ d: { e: 3, f: { g: 4 } },
+ };
+
+ const data2 = {
+ a: 1,
+ b: { c: 2 },
+ d: { e: 30, f: { g: 4 } },
+ };
+
+ const data3 = {
+ a: 1,
+ b: { c: 2 },
+ d: { e: 3, f: { g: 4 } },
+ };
+
+ const queryManager = mockRefetch({
+ request,
+ firstResult: { data: data1 },
+ secondResult: { data: data2 },
+ thirdResult: { data: data3 },
+ });
+
+ const observable = queryManager.watchQuery<any>(request);
+
+ let count = 0;
+ let firstResultData: any;
+
+ observable.subscribe({
+ next: result => {
+ try {
+ switch (count++) {
+ case 0:
+ assert.deepEqual(result.data, data1);
+ firstResultData = result.data;
+ observable.refetch();
+ break;
+ case 1:
+ assert.deepEqual(result.data, data2);
+ assert.notStrictEqual(result.data, firstResultData);
+ assert.strictEqual(result.data.b, firstResultData.b);
+ assert.notStrictEqual(result.data.d, firstResultData.d);
+ assert.strictEqual(result.data.d.f, firstResultData.d.f);
+ observable.refetch();
+ break;
+ case 2:
+ assert.deepEqual(result.data, data3);
+ assert.notStrictEqual(result.data, firstResultData);
+ assert.strictEqual(result.data.b, firstResultData.b);
+ assert.notStrictEqual(result.data.d, firstResultData.d);
+ assert.strictEqual(result.data.d.f, firstResultData.d.f);
+ done();
+ break;
+ default:
+ throw new Error('Next run too many times.');
+ }
+ } catch (error) {
+ done(error);
+ }
+ },
+ error: error =>
+ done(error),
+ });
+ });
+
+ it('will return referentially equivalent data in getCurrentResult if nothing changed', done => {
+ const request = {
+ query: gql`
+ {
+ a
+ b { c }
+ d { e f { g } }
+ }
+ `,
+ };
+
+ const data1 = {
+ a: 1,
+ b: { c: 2 },
+ d: { e: 3, f: { g: 4 } },
+ };
+
+ const queryManager = mockQueryManager({
+ request,
+ result: { data: data1 },
+ });
+
+ const observable = queryManager.watchQuery<any>(request);
+
+ observable.subscribe({
+ next: result => {
+ try {
+ assert.deepEqual(result.data, data1);
+ assert.strictEqual(result.data, observable.currentResult().data);
+ done();
+ } catch (error) {
+ done(error);
+ }
+ },
+ error: error =>
+ done(error),
+ });
+ });
+
it('sets networkStatus to `refetch` when refetching', () => {
const request = {
query: gql`
@@ -699,7 +820,7 @@ describe('QueryManager', () => {
secondResult: { data: data2 },
});
- const observable = queryManager.watchQuery(request);
+ const observable = queryManager.watchQuery<any>(request);
return observableToPromise({ observable },
(result) => {
assert.deepEqual(result.data, data1);
@@ -740,7 +861,7 @@ describe('QueryManager', () => {
secondResult: { data: data2 },
});
- const handle = queryManager.watchQuery(request);
+ const handle = queryManager.watchQuery<any>(request);
handle.subscribe({});
return handle.refetch().then(
@@ -808,7 +929,7 @@ describe('QueryManager', () => {
},
);
- const observable = queryManager.watchQuery({ query });
+ const observable = queryManager.watchQuery<any>({ query });
return observableToPromise({ observable },
(result) => {
assert.deepEqual(result.data, data1);
@@ -868,7 +989,7 @@ describe('QueryManager', () => {
},
);
- const observable = queryManager.watchQuery({ query });
+ const observable = queryManager.watchQuery<any>({ query });
const originalOptions = assign({}, observable.options);
return observableToPromise({ observable },
(result) => {
@@ -927,7 +1048,7 @@ describe('QueryManager', () => {
},
);
- const observable = queryManager.watchQuery({
+ const observable = queryManager.watchQuery<any>({
query,
pollInterval: 200,
});
@@ -988,7 +1109,7 @@ describe('QueryManager', () => {
},
);
- const observable = queryManager.watchQuery({
+ const observable = queryManager.watchQuery<any>({
query,
pollInterval: 30,
notifyOnNetworkStatusChange: true,
@@ -1063,10 +1184,10 @@ describe('QueryManager', () => {
);
// First, prime the store so that query diffing removes the query
- return queryManager.query({
+ return queryManager.query<any>({
query: primeQuery,
}).then(() => {
- const handle = queryManager.watchQuery({
+ const handle = queryManager.watchQuery<any>({
query: complexQuery,
returnPartialData: true,
});
@@ -1078,6 +1199,21 @@ describe('QueryManager', () => {
});
});
+ it('deepFreezes results in development mode', () => {
+ const query = gql`{ stuff }`;
+ const data = { stuff: 'wonderful' };
+ const queryManager = mockQueryManager({
+ request: { query },
+ result: { data },
+ });
+
+ return queryManager.query({ query })
+ .then(result => {
+ assert.deepEqual(result.data, data);
+ assert.throws( () => (result.data as any).stuff = 'awful' );
+ });
+ });
+
it('should error if we pass noFetch on a polling query', (done) => {
assert.throw(() => {
assertWithObserver({
@@ -1134,10 +1270,10 @@ describe('QueryManager', () => {
);
// First, prime the cache
- return queryManager.query({
+ return queryManager.query<any>({
query: primeQuery,
}).then(() => {
- const handle = queryManager.watchQuery({
+ const handle = queryManager.watchQuery<any>({
query: complexQuery,
noFetch: true,
});
@@ -1170,6 +1306,8 @@ describe('QueryManager', () => {
});
});
+ const getIdField = ({id}: {id: string}) => id;
+
it('runs a mutation with object parameters and puts the result in the store', () => {
const data = {
makeListPrivate: {
@@ -1342,6 +1480,83 @@ describe('QueryManager', () => {
);
});
+ it('does not call broadcastNewStore when Apollo state is not affected by an action', () => {
+ const query = gql`
+ query fetchLuke($id: String) {
+ people_one(id: $id) {
+ name
+ }
+ }
+ `;
+
+ const variables = {
+ id: '1',
+ };
+
+ const data1 = {
+ people_one: {
+ name: 'Luke Skywalker',
+ },
+ };
+
+ const data2 = {
+ people_one: {
+ name: 'Luke Skywalker has a new name',
+ },
+ };
+
+ function testReducer (state = false, action: any): boolean {
+ if (action.type === 'TOGGLE') {
+ return true;
+ }
+ return state;
+ }
+ const client = new ApolloClient();
+ const store = createStore(
+ combineReducers({
+ test: testReducer,
+ apollo: client.reducer() as any, // XXX see why this type fails
+ }),
+ applyMiddleware(client.middleware()),
+ );
+ const qm = createQueryManager({
+ networkInterface: mockNetworkInterface(
+ {
+ request: { query, variables },
+ result: { data: data1 },
+ },
+ {
+ request: { query, variables },
+ result: { data: data2 },
+ },
+ ),
+ store: store,
+ });
+
+ const observable = qm.watchQuery({ query, variables });
+
+ return observableToPromise({ observable },
+ (result) => {
+ assert.deepEqual(result.data, data1);
+ observable.refetch();
+ },
+ (result) => {
+ assert.deepEqual(result.data, data2);
+
+ // here's the actual test. Everything else is just setup.
+ let called = false;
+ client.queryManager.broadcastNewStore = (s: any) => {
+ called = true;
+ };
+ store.dispatch({
+ type: 'TOGGLE',
+ });
+ assert.equal((store.getState() as any).test, true, 'test state should have been updated');
+ assert.equal(called, false, 'broadcastNewStore should not have been called');
+ },
+ );
+ });
+
it(`doesn't return data while query is loading`, () => {
const query1 = gql`
{
@@ -1383,8 +1598,8 @@ describe('QueryManager', () => {
},
);
- const observable1 = queryManager.watchQuery({ query: query1 });
- const observable2 = queryManager.watchQuery({ query: query2 });
+ const observable1 = queryManager.watchQuery<any>({ query: query1 });
+ const observable2 = queryManager.watchQuery<any>({ query: query2 });
return Promise.all([
observableToPromise({ observable: observable1 },
@@ -1441,11 +1656,11 @@ describe('QueryManager', () => {
},
);
- const observable = queryManager.watchQuery({ query: query1 });
+ const observable = queryManager.watchQuery<any>({ query: query1 });
return observableToPromise({ observable },
(result) => {
assert.deepEqual(result.data, data1);
- queryManager.query({ query: query2 });
+ queryManager.query<any>({ query: query2 });
},
// 3 because the query init action for the second query causes a callback
(result) => assert.deepEqual(result.data, {
@@ -1492,7 +1707,7 @@ describe('QueryManager', () => {
result: { data: data2 },
},
);
- const observable = queryManager.watchQuery({
+ const observable = queryManager.watchQuery<any>({
query,
variables,
pollInterval: 50,
@@ -1502,6 +1717,7 @@ describe('QueryManager', () => {
(result) => assert.deepEqual(result.data, data1),
(result) => assert.deepEqual(result.data, data2),
);
+
});
it('should let you handle multiple polled queries and unsubscribe from one of them', (done) => {
@@ -1711,7 +1927,7 @@ describe('QueryManager', () => {
},
);
- const observable = queryManager.watchQuery({
+ const observable = queryManager.watchQuery<any>({
query,
variables,
pollInterval: 50,
@@ -1769,7 +1985,7 @@ describe('QueryManager', () => {
},
);
- const observable = queryManager.watchQuery({ query, variables });
+ const observable = queryManager.watchQuery<any>({ query, variables });
observable.startPolling(50);
return observableToPromise({ observable },
@@ -1813,7 +2029,7 @@ describe('QueryManager', () => {
result: { data: data2 },
},
);
- const observable = queryManager.watchQuery({
+ const observable = queryManager.watchQuery<any>({
query,
variables,
pollInterval: 50,
@@ -1890,7 +2106,7 @@ describe('QueryManager', () => {
it('warns if you forget the template literal tag', () => {
const queryManager = mockQueryManager();
assert.throws(() => {
- queryManager.query({
+ queryManager.query<any>({
// Bamboozle TypeScript into letting us do this
query: 'string' as any as DocumentNode,
});
@@ -1904,7 +2120,7 @@ describe('QueryManager', () => {
}, /wrap the query string in a "gql" tag/);
assert.throws(() => {
- queryManager.watchQuery({
+ queryManager.watchQuery<any>({
// Bamboozle TypeScript into letting us do this
query: 'string' as any as DocumentNode,
});
@@ -2054,7 +2270,7 @@ describe('QueryManager', () => {
},
};
queryManager = createQueryManager({ networkInterface });
- const observable = queryManager.watchQuery({ query });
+ const observable = queryManager.watchQuery<any>({ query });
// wait just to make sure the observable doesn't fire again
return observableToPromise({ observable, wait: 0 },
@@ -2066,7 +2282,7 @@ describe('QueryManager', () => {
it('should not refetch toredown queries', (done) => {
let queryManager: QueryManager = null;
- let observable: ObservableQuery = null;
+ let observable: ObservableQuery<any> = null;
const query = gql`
query {
author {
@@ -2138,7 +2354,7 @@ describe('QueryManager', () => {
},
};
queryManager = createQueryManager({ networkInterface });
- const observable = queryManager.watchQuery({ query });
+ const observable = queryManager.watchQuery<any>({ query });
// wait to make sure store reset happened
return observableToPromise({ observable, wait: 20 },
@@ -2186,7 +2402,8 @@ describe('QueryManager', () => {
}
}`;
const queryManager = mockQueryManager();
- const mockObservableQuery: ObservableQuery = {
+
+ const mockObservableQuery: ObservableQuery<any> = {
refetch(variables: any): Promise<ExecutionResult> {
done();
return null;
@@ -2195,10 +2412,10 @@ describe('QueryManager', () => {
query: query,
},
scheduler: queryManager.scheduler,
- } as any as ObservableQuery;
+ } as any as ObservableQuery<any>;
const queryId = 'super-fake-id';
- queryManager.addObservableQuery(queryId, mockObservableQuery);
+ queryManager.addObservableQuery<any>(queryId, mockObservableQuery);
queryManager.resetStore();
});
@@ -2215,7 +2432,7 @@ describe('QueryManager', () => {
options.noFetch = true;
options.query = query;
let refetchCount = 0;
- const mockObservableQuery: ObservableQuery = {
+ const mockObservableQuery: ObservableQuery<any> = {
refetch(variables: any): Promise<ExecutionResult> {
refetchCount ++;
done();
@@ -2223,10 +2440,10 @@ describe('QueryManager', () => {
},
options,
queryManager: queryManager,
- } as any as ObservableQuery;
+ } as any as ObservableQuery<any>;
const queryId = 'super-fake-id';
- queryManager.addObservableQuery(queryId, mockObservableQuery);
+ queryManager.addObservableQuery<any>(queryId, mockObservableQuery);
queryManager.resetStore();
setTimeout(() => {
assert.equal(refetchCount, 0);
@@ -2260,7 +2477,7 @@ describe('QueryManager', () => {
};
queryManager = createQueryManager({ networkInterface });
- queryManager.query({ query }).then((result) => {
+ queryManager.query<any>({ query }).then((result) => {
done(new Error('query() gave results on a store reset'));
}).catch((error) => {
done();
@@ -2374,10 +2591,10 @@ describe('QueryManager', () => {
error: new Error('Network error ocurred'),
},
);
- queryManager.query({ query }).then((result) => {
+ queryManager.query<any>({ query }).then((result) => {
assert.deepEqual(result.data, data);
- queryManager.query({ query, forceFetch: true }).then(() => {
+ queryManager.query<any>({ query, forceFetch: true }).then(() => {
done(new Error('Returned a result when it was not supposed to.'));
}).catch((error) => {
// make that the error thrown doesn't empty the state
@@ -2445,7 +2662,7 @@ describe('QueryManager', () => {
error: new Error('Network error occurred.'),
},
);
- const observable = queryManager.watchQuery({ query, pollInterval: 20 });
+ const observable = queryManager.watchQuery<any>({ query, pollInterval: 20 });
return observableToPromise({
observable,
@@ -2495,7 +2712,7 @@ describe('QueryManager', () => {
},
);
- const observable = queryManager.watchQuery({ query });
+ const observable = queryManager.watchQuery<any>({ query });
return Promise.all<any[] | void>([
// we wait for a little bit to ensure the result of the second query
// don't trigger another subscription event
@@ -2504,7 +2721,7 @@ describe('QueryManager', () => {
assert.deepEqual(result.data, data);
},
),
- queryManager.query({ query }).then((result) => {
+ queryManager.query<any>({ query }).then((result) => {
assert.deepEqual(result.data, data);
}),
]);
@@ -2607,8 +2824,8 @@ describe('QueryManager', () => {
store,
});
- const observable1 = queryManager.watchQuery({ query: query1 });
- const observable2 = queryManager.watchQuery({ query: query2 });
+ const observable1 = queryManager.watchQuery<any>({ query: query1 });
+ const observable2 = queryManager.watchQuery<any>({ query: query2 });
// I'm not sure the waiting 60 here really is required, but the test used to do it
return Promise.all([
@@ -2678,8 +2895,8 @@ describe('QueryManager', () => {
store,
});
- const observableWithId = queryManager.watchQuery({ query: queryWithId });
- const observableWithoutId = queryManager.watchQuery({ query: queryWithoutId });
+ const observableWithId = queryManager.watchQuery<any>({ query: queryWithId });
+ const observableWithoutId = queryManager.watchQuery<any>({ query: queryWithoutId });
// I'm not sure the waiting 60 here really is required, but the test used to do it
return Promise.all([
@@ -2766,8 +2983,8 @@ describe('QueryManager', () => {
store,
});
- const observableWithId = queryManager.watchQuery({ query: queryWithId });
- const observableWithoutId = queryManager.watchQuery({ query: queryWithoutId });
+ const observableWithId = queryManager.watchQuery<any>({ query: queryWithId });
+ const observableWithoutId = queryManager.watchQuery<any>({ query: queryWithoutId });
// I'm not sure the waiting 60 here really is required, but the test used to do it
return Promise.all([
@@ -2829,8 +3046,8 @@ describe('QueryManager', () => {
},
);
- return queryManager.query({ query: primeQuery }).then((primeResult) => {
- const observable = queryManager.watchQuery({ query, returnPartialData: true });
+ return queryManager.query<any>({ query: primeQuery }).then((primeResult) => {
+ const observable = queryManager.watchQuery<any>({ query, returnPartialData: true });
return observableToPromise({ observable },
(result) => {
@@ -2937,7 +3154,7 @@ describe('QueryManager', () => {
result: { data: mutationData },
},
);
- const observable = queryManager.watchQuery({ query });
+ const observable = queryManager.watchQuery<any>({ query });
return observableToPromise({ observable },
(result) => {
assert.deepEqual(result.data, data);
@@ -2994,7 +3211,7 @@ describe('QueryManager', () => {
result: { data: mutationData },
},
);
- const observable = queryManager.watchQuery({ query });
+ const observable = queryManager.watchQuery<any>({ query });
return observableToPromise({ observable },
(result) => {
assert.deepEqual(result.data, data);
@@ -3056,7 +3273,7 @@ describe('QueryManager', () => {
},
);
- const observable = queryManager.watchQuery({ query });
+ const observable = queryManager.watchQuery<any>({ query });
return observableToPromise({ observable },
(result) => {
assert.deepEqual(result.data, data);
@@ -3082,31 +3299,33 @@ describe('QueryManager', () => {
let transformCount: number;
beforeEach(() => {
- transformCount = 0;
+ withWarning( () => {
+ transformCount = 0;
- const networkInterface: NetworkInterface = {
- query(request: Request): Promise<ExecutionResult> {
- return Promise.resolve(response);
- },
- };
-
- client = new ApolloClient({
- networkInterface,
- resultTransformer(result: ExecutionResult) {
- transformCount++;
- return {
- data: assign({}, result.data, {transformCount}),
- loading: false,
- networkStatus: NetworkStatus.ready,
- };
- },
- });
+ const networkInterface: NetworkInterface = {
+ query(request: Request): Promise<ExecutionResult> {
+ return Promise.resolve(response);
+ },
+ };
+
+ client = new ApolloClient({
+ networkInterface,
+ resultTransformer(result: ExecutionResult) {
+ transformCount++;
+ return {
+ data: assign({}, result.data, {transformCount}),
+ loading: false,
+ networkStatus: NetworkStatus.ready,
+ };
+ },
+ });
+ }, /resultTransformer/);
});
it('transforms query() results', () => {
response = {data: {foo: 123}};
return client.query({query: gql`{ foo }`})
- .then((result: ApolloQueryResult) => {
+ .then((result: ApolloQueryResult<any>) => {
assert.deepEqual(result.data, {foo: 123, transformCount: 1});
});
});
@@ -3147,7 +3366,7 @@ describe('QueryManager', () => {
it('transforms mutate() results', () => {
response = {data: {foo: 123}};
return client.mutate({mutation: gql`mutation makeChanges { foo }`})
- .then((result: ApolloQueryResult) => {
+ .then((result: ApolloQueryResult<any>) => {
assert.deepEqual(result.data, {foo: 123, transformCount: 1});
});
});
@@ -3162,25 +3381,27 @@ describe('QueryManager', () => {
let response: any;
beforeEach(() => {
- const networkInterface: NetworkInterface = {
- query(request: Request): Promise<ExecutionResult> {
- return Promise.resolve(response);
- },
- };
+ withWarning( () => {
+ const networkInterface: NetworkInterface = {
+ query(request: Request): Promise<ExecutionResult> {
+ return Promise.resolve(response);
+ },
+ };
- client = new ApolloClient({
- networkInterface,
- resultTransformer(result: ApolloQueryResult) {
- result.data.__proto__ = Model.prototype;
- return result;
- },
- resultComparator(result1: ApolloQueryResult, result2: ApolloQueryResult) {
- // A real example would, say, deep compare the two while ignoring prototypes.
- const foo1 = result1 && result1.data && result1.data.foo;
- const foo2 = result2 && result2.data && result2.data.foo;
- return foo1 === foo2;
- },
- });
+ client = new ApolloClient({
+ networkInterface,
+ resultTransformer(result: ApolloQueryResult<any>) {
+ result.data.__proto__ = Model.prototype;
+ return result;
+ },
+ resultComparator(result1: ApolloQueryResult<any>, result2: ApolloQueryResult<any>) {
+ // A real example would, say, deep compare the two while ignoring prototypes.
+ const foo1 = result1 && result1.data && result1.data.foo;
+ const foo2 = result2 && result2.data && result2.data.foo;
+ return foo1 === foo2;
+ },
+ });
+ }, /resultTransformer/);
});
it('does not transform identical watchQuery() results, according to the comparator', () => {
@@ -3233,7 +3454,7 @@ describe('QueryManager', () => {
const queryManager = mockRefetch({ request, firstResult, secondResult });
- const handle = queryManager.watchQuery(request);
+ const handle = queryManager.watchQuery<any>(request);
handle.subscribe({
error: () => { /* nothing */ },
diff --git a/test/assign.ts b/test/assign.ts
new file mode 100644
--- /dev/null
+++ b/test/assign.ts
@@ -0,0 +1,30 @@
+import { assign } from '../src/util/assign';
+import { assert } from 'chai';
+
+describe('assign', () => {
+ it('will merge many objects together', () => {
+ assert.deepEqual(assign({ a: 1 }, { b: 2 }), { a: 1, b: 2 });
+ assert.deepEqual(assign({ a: 1 }, { b: 2 }, { c: 3 }), { a: 1, b: 2, c: 3 });
+ assert.deepEqual(assign({ a: 1 }, { b: 2 }, { c: 3 }, { d: 4 }), { a: 1, b: 2, c: 3, d: 4 });
+ });
+
+ it('will merge many objects together shallowly', () => {
+ assert.deepEqual(assign({ x: { a: 1 } }, { x: { b: 2 } }), { x: { b: 2 } });
+ assert.deepEqual(assign({ x: { a: 1 } }, { x: { b: 2 } }, { x: { c: 3 } }), { x: { c: 3 } });
+ assert.deepEqual(assign({ x: { a: 1 } }, { x: { b: 2 } }, { x: { c: 3 } }, { x: { d: 4 } }), { x: { d: 4 } });
+ });
+
+ it('will mutate and return the source objects', () => {
+ const source1 = { a: 1 };
+ const source2 = { a: 1 };
+ const source3 = { a: 1 };
+
+ assert.strictEqual(assign(source1, { b: 2 }), source1);
+ assert.strictEqual(assign(source2, { b: 2 }, { c: 3 }), source2);
+ assert.strictEqual(assign(source3, { b: 2 }, { c: 3 }, { d: 4 }), source3);
+
+ assert.deepEqual(source1, { a: 1, b: 2 });
+ assert.deepEqual(source2, { a: 1, b: 2, c: 3 });
+ assert.deepEqual(source3, { a: 1, b: 2, c: 3, d: 4 });
+ });
+});
diff --git a/test/batchedNetworkInterface.ts b/test/batchedNetworkInterface.ts
--- a/test/batchedNetworkInterface.ts
+++ b/test/batchedNetworkInterface.ts
@@ -1,6 +1,6 @@
import { assert } from 'chai';
-import merge = require('lodash/merge');
+import { merge } from 'lodash';
import { HTTPBatchedNetworkInterface } from '../src/transport/batchedNetworkInterface';
@@ -19,8 +19,6 @@ import { AfterwareInterface } from '../src/transport/afterware';
import { ExecutionResult } from 'graphql';
-import 'whatwg-fetch';
-
import gql from 'graphql-tag';
describe('HTTPBatchedNetworkInterface', () => {
diff --git a/test/client.ts b/test/client.ts
--- a/test/client.ts
+++ b/test/client.ts
@@ -3,19 +3,13 @@ const { assert } = chai;
import * as sinon from 'sinon';
import ApolloClient, {
- createFragment,
- clearFragmentDefinitions,
- disableFragmentWarnings,
printAST,
- enableFragmentWarnings,
} from '../src';
import {
disableFragmentWarnings as graphqlTagDisableFragmentWarnings,
} from 'graphql-tag';
-import { fragmentDefinitionsMap } from '../src/fragments';
-
import {
GraphQLError,
ExecutionResult,
@@ -80,15 +74,12 @@ import { withWarning } from './util/wrap';
import observableToPromise from './util/observableToPromise';
-import cloneDeep = require('lodash/cloneDeep');
-
-import assign = require('lodash/assign');
+import { cloneDeep, assign } from 'lodash';
// make it easy to assert with promises
chai.use(chaiAsPromised);
// Turn off warnings for repeated fragment names
-disableFragmentWarnings();
graphqlTagDisableFragmentWarnings();
describe('client', () => {
@@ -1310,461 +1301,6 @@ it('should not let errors in observer.next reach the store', (done) => {
assert.equal(printAST(query), print(query));
});
- describe('fragment referencing', () => {
- afterEach(() => {
- // after each test, we have to empty out fragmentDefinitionsMap since that is
- // global state that will be held across all client instances.
- clearFragmentDefinitions();
- });
-
- it('should return a fragment def with a unique name', () => {
- const fragment = gql`
- fragment authorDetails on Author {
- author {
- firstName
- lastName
- }
- }
- `;
- const fragmentDefs = createFragment(fragment);
- assert.equal(fragmentDefs.length, 1);
- assert.equal(print(fragmentDefs[0]), print(getFragmentDefinitions(fragment)[0]));
- });
-
- it('should correctly return multiple fragments from a single document', () => {
- const fragmentDoc = gql`
- fragment authorDetails on Author {
- firstName
- lastName
- }
- fragment personDetails on Person {
- name
- }
- `;
- const fragmentDefs = createFragment(fragmentDoc);
- assert.equal(fragmentDefs.length, 2);
- const expFragmentDefs = getFragmentDefinitions(fragmentDoc);
- assert.equal(print(fragmentDefs[0]), print(expFragmentDefs[0]));
- assert.equal(print(fragmentDefs[1]), print(expFragmentDefs[1]));
- });
-
- it('should correctly return fragment defs with one fragment depending on another', () => {
- const fragmentDoc = gql`
- fragment authorDetails on Author {
- firstName
- lastName
- ...otherAuthorDetails
- }`;
- const otherFragmentDoc = gql`
- fragment otherFragmentDoc on Author {
- address
- }`;
- const fragmentDefs = createFragment(fragmentDoc, getFragmentDefinitions(otherFragmentDoc));
- assert.equal(fragmentDefs.length, 2);
- const expFragmentDefs = getFragmentDefinitions(otherFragmentDoc)
- .concat(getFragmentDefinitions(fragmentDoc));
- assert.deepEqual(fragmentDefs.map(print), expFragmentDefs.map(print));
- });
-
- it('should return fragment defs with a multiple fragments depending on other fragments', () => {
- const fragmentDoc = gql`
- fragment authorDetails on Author {
- firstName
- lastName
- ...otherAuthorDetails
- }
-
- fragment onlineAuthorDetails on Author {
- email
- ...otherAuthorDetails
- }`;
- const otherFragmentDoc = gql`
- fragment otherAuthorDetails on Author {
- address
- }`;
- const fragmentDefs = createFragment(fragmentDoc, getFragmentDefinitions(otherFragmentDoc));
- assert.equal(fragmentDefs.length, 3);
-
- const expFragmentDefs = getFragmentDefinitions(otherFragmentDoc)
- .concat(getFragmentDefinitions(fragmentDoc));
- assert.deepEqual(fragmentDefs.map(print), expFragmentDefs.map(print));
- });
-
- it('should always return a flat array of fragment defs', () => {
- const fragmentDoc1 = gql`
- fragment authorDetails on Author {
- firstName
- lastName
- ...otherAuthorDetails
- }`;
- const fragmentDoc2 = gql`
- fragment otherAuthorDetails on Author {
- address
- }`;
- const fragmentDoc3 = gql`
- fragment personDetails on Person {
- personDetails
- }`;
- const fragments1 = createFragment(fragmentDoc1);
- const fragments2 = createFragment(fragmentDoc2);
- const fragments3 = createFragment(fragmentDoc3, [fragments1, fragments2]);
- assert.equal(fragments1.length, 1);
- assert.equal(fragments2.length, 1);
- assert.equal(fragments3.length, 3);
- });
-
- it('should add a fragment to the fragmentDefinitionsMap', () => {
- const fragmentDoc = gql`
- fragment authorDetails on Author {
- firstName
- lastName
- }`;
- assert.equal(Object.keys(fragmentDefinitionsMap).length, 0);
- createFragment(fragmentDoc);
- assert.equal(Object.keys(fragmentDefinitionsMap).length, 1);
- assert(fragmentDefinitionsMap.hasOwnProperty('authorDetails'));
- assert.equal(fragmentDefinitionsMap['authorDetails'].length, 1);
- assert.equal(print(fragmentDefinitionsMap['authorDetails']), print(getFragmentDefinitions(fragmentDoc)[0]));
- });
-
- it('should add fragments with the same name to fragmentDefinitionsMap + print warning', () => {
- const fragmentDoc = gql`
- fragment authorDetails on Author {
- firstName
- lastName
- }
- fragment authorDetails on Author {
- address
- }`;
-
- // hacky solution that allows us to test whether the warning is printed
- const oldWarn = console.warn;
- console.warn = (str: string) => {
- if (!str.match(/deprecated/)) {
- assert.include(str, 'Warning: fragment with name');
- }
- };
-
- createFragment(fragmentDoc);
- assert.equal(Object.keys(fragmentDefinitionsMap).length, 1);
- assert.equal(fragmentDefinitionsMap['authorDetails'].length, 2);
- console.warn = oldWarn;
- });
-
- it('should issue a warning if we try query with a conflicting fragment name', () => {
- enableFragmentWarnings();
-
- const client = new ApolloClient({
- networkInterface: mockNetworkInterface(),
- addTypename: false,
- });
- const fragmentDoc = gql`
- fragment authorDetails on Author {
- firstName
- lastName
- }`;
- const queryDoc = gql`
- query {
- author {
- firstName
- lastName
- }
- }
- fragment authorDetails on Author {
- firstName
- lastName
- }`;
- createFragment(fragmentDoc);
-
- withWarning(() => {
- client.query({ query: queryDoc });
- }, /Warning: fragment with name/);
-
- disableFragmentWarnings();
- });
-
- it('should issue a warning if we try to watchQuery with a conflicting fragment name', () => {
- enableFragmentWarnings();
-
- const client = new ApolloClient({
- networkInterface: mockNetworkInterface(),
- addTypename: false,
- });
- const fragmentDoc = gql`
- fragment authorDetails on Author {
- firstName
- lastName
- }`;
- const queryDoc = gql`
- query {
- author {
- firstName
- lastName
- }
- }
- fragment authorDetails on Author {
- firstName
- lastName
- }`;
- createFragment(fragmentDoc);
-
- withWarning(() => {
- client.watchQuery({ query: queryDoc });
- }, /Warning: fragment with name/);
-
- disableFragmentWarnings();
- });
-
- it('should allow passing fragments to query', () => {
- const queryDoc = gql`
- query {
- author {
- __typename
- ...authorDetails
- }
- }`;
- const composedQuery = gql`
- query {
- author {
- __typename
- ...authorDetails
- }
- }
- fragment authorDetails on Author {
- firstName
- lastName
- }`;
- const data = {
- author: {
- __typename: 'Author',
- firstName: 'John',
- lastName: 'Smith',
- },
- };
- const networkInterface = mockNetworkInterface({
- request: { query: composedQuery },
- result: { data },
- });
- const client = new ApolloClient({
- networkInterface,
- addTypename: false,
- });
- const fragmentDefs = createFragment(gql`
- fragment authorDetails on Author {
- firstName
- lastName
- }`);
-
- return client.query({ query: queryDoc, fragments: fragmentDefs }).then((result) => {
- assert.deepEqual(result.data, data);
- });
- });
-
- it('show allow passing fragments to mutate', () => {
- const mutationDoc = gql`
- mutation createAuthor {
- createAuthor {
- __typename
- ...authorDetails
- }
- }`;
- const composedMutation = gql`
- mutation createAuthor {
- createAuthor {
- __typename
- ...authorDetails
- }
- }
- fragment authorDetails on Author {
- firstName
- lastName
- }`;
- const data = {
- createAuthor: {
- __typename: 'Author',
- firstName: 'John',
- lastName: 'Smith',
- },
- };
- const networkInterface = mockNetworkInterface({
- request: { query: composedMutation },
- result: { data },
- });
- const client = new ApolloClient({
- networkInterface,
- addTypename: false,
- });
- const fragmentDefs = createFragment(gql`
- fragment authorDetails on Author {
- firstName
- lastName
- }`);
-
- return client.mutate({ mutation: mutationDoc, fragments: fragmentDefs }).then((result) => {
- assert.deepEqual(result, { data });
- });
- });
-
- it('should allow passing fragments to watchQuery', () => {
- const queryDoc = gql`
- query {
- author {
- __typename
- ...authorDetails
- }
- }`;
- const composedQuery = gql`
- query {
- author {
- __typename
- ...authorDetails
- }
- }
- fragment authorDetails on Author {
- firstName
- lastName
- }`;
- const data = {
- author: {
- __typename: 'Author',
- firstName: 'John',
- lastName: 'Smith',
- },
- };
- const networkInterface = mockNetworkInterface({
- request: { query: composedQuery },
- result: { data },
- });
- const client = new ApolloClient({
- networkInterface,
- addTypename: false,
- });
- const fragmentDefs = createFragment(gql`
- fragment authorDetails on Author {
- firstName
- lastName
- }`);
-
- const observable = client.watchQuery({ query: queryDoc, fragments: fragmentDefs });
-
- return observableToPromise({ observable }, (result) => {
- assert.deepEqual(result.data, data);
- });
- });
-
- it('should allow passing fragments in polling queries', () => {
- const queryDoc = gql`
- query {
- author {
- __typename
- ...authorDetails
- }
- }`;
- const composedQuery = gql`
- query {
- author {
- __typename
- ...authorDetails
- }
- }
- fragment authorDetails on Author {
- firstName
- lastName
- }`;
- const data = {
- author: {
- __typename: 'Author',
- firstName: 'John',
- lastName: 'Smith',
- },
- };
- const networkInterface = mockNetworkInterface({
- request: { query: composedQuery },
- result: { data },
- });
- const client = new ApolloClient({
- networkInterface,
- addTypename: false,
- });
- const fragmentDefs = createFragment(gql`
- fragment authorDetails on Author {
- firstName
- lastName
- }`);
-
- const observable = client.watchQuery({
- query: queryDoc,
- pollInterval: 30,
- fragments: fragmentDefs,
- });
-
- return observableToPromise({ observable }, (result) => {
- assert.deepEqual(result.data, data);
- });
- });
-
- it('should not print a warning if we call disableFragmentWarnings', (done) => {
- const oldWarn = console.warn;
- console.warn = (str: string) => {
- if (!str.match(/deprecated/)) {
- done(new Error('Returned a warning despite calling disableFragmentWarnings'));
- }
- };
- disableFragmentWarnings();
- createFragment(gql`
- fragment authorDetails on Author {
- firstName
- }
- `);
- createFragment(gql`
- fragment authorDetails on Author {
- lastName
- }`);
-
- // create fragment operates synchronously so if it returns and doesn't call
- // console.warn, we are done.
- setTimeout(() => {
- console.warn = oldWarn;
- done();
- }, 100);
- });
-
- it('should not add multiple instances of the same fragment to fragmentDefinitionsMap', () => {
- createFragment(gql`
- fragment authorDetails on Author {
- author {
- firstName
- lastName
- }
- }`);
- createFragment(gql`
- fragment authorDetails on Author {
- author {
- firstName
- lastName
- }
- }`);
- assert(fragmentDefinitionsMap.hasOwnProperty('authorDetails'));
- assert.equal(fragmentDefinitionsMap['authorDetails'].length, 1);
- });
-
- it('should not mutate the input document when querying', () => {
- const client = new ApolloClient();
-
- const fragments = createFragment(gql`
- fragment authorDetails on Author {
- author {
- firstName
- lastName
- }
- }`);
- const query = gql`{ author { ...authorDetails } }`;
- const initialDefinitions = query.definitions;
- client.query({query, fragments});
- assert.equal(query.definitions, initialDefinitions);
- });
- });
-
it('should pass a network error correctly on a mutation', (done) => {
const mutation = gql`
mutation {
@@ -2126,13 +1662,33 @@ it('should not let errors in observer.next reach the store', (done) => {
});
});
});
+
+ it('should propagate errors from network interface to observers', (done) => {
+
+ const networkInterface = {
+ query: () => Promise.reject(new Error('Uh oh!')),
+ };
+
+ const client = new ApolloClient({
+ networkInterface,
+ addTypename: false,
+ });
+
+ const handle = client.watchQuery({ query: gql`query { a b c }` });
+
+ handle.subscribe({
+ error(error) {
+ assert.equal(error.message, 'Network error: Uh oh!');
+ done();
+ },
+ });
+ });
});
function clientRoundrip(
query: DocumentNode,
data: ExecutionResult,
variables?: any,
- fragments?: FragmentDefinitionNode[],
) {
const networkInterface = mockNetworkInterface({
request: { query: cloneDeep(query) },
@@ -2143,7 +1699,7 @@ function clientRoundrip(
networkInterface,
});
- return client.query({ query, variables, fragments })
+ return client.query({ query, variables })
.then((result) => {
assert.deepEqual(result.data, data);
});
diff --git a/test/cloneDeep.ts b/test/cloneDeep.ts
new file mode 100644
--- /dev/null
+++ b/test/cloneDeep.ts
@@ -0,0 +1,56 @@
+import { cloneDeep } from '../src/util/cloneDeep';
+import { assert } from 'chai';
+
+describe('cloneDeep', () => {
+ it('will clone primitive values', () => {
+ assert.equal(cloneDeep(undefined), undefined);
+ assert.equal(cloneDeep(null), null);
+ assert.equal(cloneDeep(true), true);
+ assert.equal(cloneDeep(false), false);
+ assert.equal(cloneDeep(-1), -1);
+ assert.equal(cloneDeep(+1), +1);
+ assert.equal(cloneDeep(0.5), 0.5);
+ assert.equal(cloneDeep('hello'), 'hello');
+ assert.equal(cloneDeep('world'), 'world');
+ });
+
+ it('will clone objects', () => {
+ const value1 = {};
+ const value2 = { a: 1, b: 2, c: 3 };
+ const value3 = { x: { a: 1, b: 2, c: 3 }, y: { a: 1, b: 2, c: 3 } };
+
+ const clonedValue1 = cloneDeep(value1);
+ const clonedValue2 = cloneDeep(value2);
+ const clonedValue3 = cloneDeep(value3);
+
+ assert.deepEqual(clonedValue1, value1);
+ assert.deepEqual(clonedValue2, value2);
+ assert.deepEqual(clonedValue3, value3);
+
+ assert.notStrictEqual(clonedValue1, value1);
+ assert.notStrictEqual(clonedValue2, value2);
+ assert.notStrictEqual(clonedValue3, value3);
+ assert.notStrictEqual(clonedValue3.x, value3.x);
+ assert.notStrictEqual(clonedValue3.y, value3.y);
+ });
+
+ it('will clone arrays', () => {
+ const value1: Array<number> = [];
+ const value2 = [1, 2, 3];
+ const value3 = [[1, 2, 3], [1, 2, 3]];
+
+ const clonedValue1 = cloneDeep(value1);
+ const clonedValue2 = cloneDeep(value2);
+ const clonedValue3 = cloneDeep(value3);
+
+ assert.deepEqual(clonedValue1, value1);
+ assert.deepEqual(clonedValue2, value2);
+ assert.deepEqual(clonedValue3, value3);
+
+ assert.notStrictEqual(clonedValue1, value1);
+ assert.notStrictEqual(clonedValue2, value2);
+ assert.notStrictEqual(clonedValue3, value3);
+ assert.notStrictEqual(clonedValue3[0], value3[0]);
+ assert.notStrictEqual(clonedValue3[1], value3[1]);
+ });
+});
diff --git a/test/diffAgainstStore.ts b/test/diffAgainstStore.ts
--- a/test/diffAgainstStore.ts
+++ b/test/diffAgainstStore.ts
@@ -2,14 +2,11 @@ import { assert } from 'chai';
import {
diffQueryAgainstStore,
+ ID_KEY,
} from '../src/data/readFromStore';
import { writeQueryToStore } from '../src/data/writeToStore';
-import {
- getIdField,
-} from '../src/data/extensions';
-
import gql from 'graphql-tag';
describe('diffing queries against the store', () => {
@@ -58,6 +55,8 @@ describe('diffing queries against the store', () => {
},
};
+ const getIdField = ({id}: {id: string}) => id;
+
const store = writeQueryToStore({
result,
query: firstQuery,
@@ -348,4 +347,378 @@ describe('diffing queries against the store', () => {
});
});
});
+
+ it('will add a private id property', () => {
+ const query = gql`
+ query {
+ a { id b }
+ c { d e { id f } g { h } }
+ }
+ `;
+
+ const queryResult = {
+ a: [
+ { id: 'a:1', b: 1.1 },
+ { id: 'a:2', b: 1.2 },
+ { id: 'a:3', b: 1.3 },
+ ],
+ c: {
+ d: 2,
+ e: [
+ { id: 'e:1', f: 3.1 },
+ { id: 'e:2', f: 3.2 },
+ { id: 'e:3', f: 3.3 },
+ { id: 'e:4', f: 3.4 },
+ { id: 'e:5', f: 3.5 },
+ ],
+ g: { h: 4 },
+ },
+ };
+
+ const store = writeQueryToStore({
+ query,
+ result: queryResult,
+ dataIdFromObject: ({ id }: { id: string }) => id,
+ });
+
+ const { result } = diffQueryAgainstStore({
+ store,
+ query,
+ });
+
+ assert.deepEqual(result, queryResult);
+ assert.equal(result[ID_KEY], 'ROOT_QUERY');
+ assert.equal(result.a[0][ID_KEY], 'a:1');
+ assert.equal(result.a[1][ID_KEY], 'a:2');
+ assert.equal(result.a[2][ID_KEY], 'a:3');
+ assert.equal(result.c[ID_KEY], '$ROOT_QUERY.c');
+ assert.equal(result.c.e[0][ID_KEY], 'e:1');
+ assert.equal(result.c.e[1][ID_KEY], 'e:2');
+ assert.equal(result.c.e[2][ID_KEY], 'e:3');
+ assert.equal(result.c.e[3][ID_KEY], 'e:4');
+ assert.equal(result.c.e[4][ID_KEY], 'e:5');
+ assert.equal(result.c.g[ID_KEY], '$ROOT_QUERY.c.g');
+ });
+
+ describe('referential equality preservation', () => {
+ it('will return the previous result if there are no changes', () => {
+ const query = gql`
+ query {
+ a { b }
+ c { d e { f } }
+ }
+ `;
+
+ const queryResult = {
+ a: { b: 1 },
+ c: { d: 2, e: { f: 3 } },
+ };
+
+ const store = writeQueryToStore({
+ query,
+ result: queryResult,
+ });
+
+ const previousResult = {
+ a: { b: 1 },
+ c: { d: 2, e: { f: 3 } },
+ };
+
+ const { result } = diffQueryAgainstStore({
+ store,
+ query,
+ previousResult,
+ });
+
+ assert.deepEqual(result, queryResult);
+ assert.strictEqual(result, previousResult);
+ });
+
+ it('will return parts of the previous result that changed', () => {
+ const query = gql`
+ query {
+ a { b }
+ c { d e { f } }
+ }
+ `;
+
+ const queryResult = {
+ a: { b: 1 },
+ c: { d: 2, e: { f: 3 } },
+ };
+
+ const store = writeQueryToStore({
+ query,
+ result: queryResult,
+ });
+
+ const previousResult = {
+ a: { b: 1 },
+ c: { d: 20, e: { f: 3 } },
+ };
+
+ const { result } = diffQueryAgainstStore({
+ store,
+ query,
+ previousResult,
+ });
+
+ assert.deepEqual(result, queryResult);
+ assert.notStrictEqual(result, previousResult);
+ assert.strictEqual(result.a, previousResult.a);
+ assert.notStrictEqual(result.c, previousResult.c);
+ assert.strictEqual(result.c.e, previousResult.c.e);
+ });
+
+ it('will return the previous result if there are no changes in child arrays', () => {
+ const query = gql`
+ query {
+ a { b }
+ c { d e { f } }
+ }
+ `;
+
+ const queryResult = {
+ a: [{ b: 1.1 }, { b: 1.2 }, { b: 1.3 }],
+ c: { d: 2, e: [{ f: 3.1 }, { f: 3.2 }, { f: 3.3 }, { f: 3.4 }, { f: 3.5 }] },
+ };
+
+ const store = writeQueryToStore({
+ query,
+ result: queryResult,
+ });
+
+ const previousResult = {
+ a: [{ b: 1.1 }, { b: 1.2 }, { b: 1.3 }],
+ c: { d: 2, e: [{ f: 3.1 }, { f: 3.2 }, { f: 3.3 }, { f: 3.4 }, { f: 3.5 }] },
+ };
+
+ const { result } = diffQueryAgainstStore({
+ store,
+ query,
+ previousResult,
+ });
+
+ assert.deepEqual(result, queryResult);
+ assert.strictEqual(result, previousResult);
+ });
+
+ it('will not add zombie items when previousResult starts with the same items', () => {
+ const query = gql`
+ query {
+ a { b }
+ }
+ `;
+
+ const queryResult = {
+ a: [{ b: 1.1 }, { b: 1.2 }],
+ };
+
+ const store = writeQueryToStore({
+ query,
+ result: queryResult,
+ });
+
+ const previousResult = {
+ a: [{ b: 1.1 }, { b: 1.2 }, { b: 1.3 }],
+ };
+
+ const { result } = diffQueryAgainstStore({
+ store,
+ query,
+ previousResult,
+ });
+
+ assert.deepEqual(result, queryResult);
+ assert.strictEqual(result.a[0], previousResult.a[0]);
+ assert.strictEqual(result.a[1], previousResult.a[1]);
+ });
+
+ it('will return the previous result if there are no changes in nested child arrays', () => {
+ const query = gql`
+ query {
+ a { b }
+ c { d e { f } }
+ }
+ `;
+
+ const queryResult = {
+ a: [[[[[{ b: 1.1 }, { b: 1.2 }, { b: 1.3 }]]]]],
+ c: { d: 2, e: [[{ f: 3.1 }, { f: 3.2 }, { f: 3.3 }], [{ f: 3.4 }, { f: 3.5 }]] },
+ };
+
+ const store = writeQueryToStore({
+ query,
+ result: queryResult,
+ });
+
+ const previousResult = {
+ a: [[[[[{ b: 1.1 }, { b: 1.2 }, { b: 1.3 }]]]]],
+ c: { d: 2, e: [[{ f: 3.1 }, { f: 3.2 }, { f: 3.3 }], [{ f: 3.4 }, { f: 3.5 }]] },
+ };
+
+ const { result } = diffQueryAgainstStore({
+ store,
+ query,
+ previousResult,
+ });
+
+ assert.deepEqual(result, queryResult);
+ assert.strictEqual(result, previousResult);
+ });
+
+ it('will return parts of the previous result if there are changes in child arrays', () => {
+ const query = gql`
+ query {
+ a { b }
+ c { d e { f } }
+ }
+ `;
+
+ const queryResult = {
+ a: [{ b: 1.1 }, { b: 1.2 }, { b: 1.3 }],
+ c: { d: 2, e: [{ f: 3.1 }, { f: 3.2 }, { f: 3.3 }, { f: 3.4 }, { f: 3.5 }] },
+ };
+
+ const store = writeQueryToStore({
+ query,
+ result: queryResult,
+ });
+
+ const previousResult = {
+ a: [{ b: 1.1 }, { b: -1.2 }, { b: 1.3 }],
+ c: { d: 20, e: [{ f: 3.1 }, { f: 3.2 }, { f: 3.3 }, { f: 3.4 }, { f: 3.5 }] },
+ };
+
+ const { result } = diffQueryAgainstStore({
+ store,
+ query,
+ previousResult,
+ });
+
+ assert.deepEqual(result, queryResult);
+ assert.notStrictEqual(result, previousResult);
+ assert.notStrictEqual(result.a, previousResult.a);
+ assert.strictEqual(result.a[0], previousResult.a[0]);
+ assert.notStrictEqual(result.a[1], previousResult.a[1]);
+ assert.strictEqual(result.a[2], previousResult.a[2]);
+ assert.notStrictEqual(result.c, previousResult.c);
+ assert.notStrictEqual(result.c.e, previousResult.c.e);
+ assert.strictEqual(result.c.e[0], previousResult.c.e[0]);
+ assert.strictEqual(result.c.e[1], previousResult.c.e[1]);
+ assert.strictEqual(result.c.e[2], previousResult.c.e[2]);
+ assert.strictEqual(result.c.e[3], previousResult.c.e[3]);
+ assert.strictEqual(result.c.e[4], previousResult.c.e[4]);
+ });
+
+ it('will return the same items in a different order with `dataIdFromObject`', () => {
+ const query = gql`
+ query {
+ a { id b }
+ c { d e { id f } g { h } }
+ }
+ `;
+
+ const queryResult = {
+ a: [
+ { id: 'a:1', b: 1.1 },
+ { id: 'a:2', b: 1.2 },
+ { id: 'a:3', b: 1.3 },
+ ],
+ c: {
+ d: 2,
+ e: [
+ { id: 'e:1', f: 3.1 },
+ { id: 'e:2', f: 3.2 },
+ { id: 'e:3', f: 3.3 },
+ { id: 'e:4', f: 3.4 },
+ { id: 'e:5', f: 3.5 },
+ ],
+ g: { h: 4 },
+ },
+ };
+
+ const store = writeQueryToStore({
+ query,
+ result: queryResult,
+ dataIdFromObject: ({ id }: { id: string }) => id,
+ });
+
+ const previousResult = {
+ a: [
+ { id: 'a:3', b: 1.3, [ID_KEY]: 'a:3' },
+ { id: 'a:2', b: 1.2, [ID_KEY]: 'a:2' },
+ { id: 'a:1', b: 1.1, [ID_KEY]: 'a:1' },
+ ],
+ c: {
+ d: 2,
+ e: [
+ { id: 'e:4', f: 3.4, [ID_KEY]: 'e:4' },
+ { id: 'e:2', f: 3.2, [ID_KEY]: 'e:2' },
+ { id: 'e:5', f: 3.5, [ID_KEY]: 'e:5' },
+ { id: 'e:3', f: 3.3, [ID_KEY]: 'e:3' },
+ { id: 'e:1', f: 3.1, [ID_KEY]: 'e:1' },
+ ],
+ g: { h: 4 },
+ },
+ };
+
+ const { result } = diffQueryAgainstStore({
+ store,
+ query,
+ previousResult,
+ });
+
+ assert.deepEqual(result, queryResult);
+ assert.notStrictEqual(result, previousResult);
+ assert.notStrictEqual(result.a, previousResult.a);
+ assert.strictEqual(result.a[0], previousResult.a[2]);
+ assert.strictEqual(result.a[1], previousResult.a[1]);
+ assert.strictEqual(result.a[2], previousResult.a[0]);
+ assert.notStrictEqual(result.c, previousResult.c);
+ assert.notStrictEqual(result.c.e, previousResult.c.e);
+ assert.strictEqual(result.c.e[0], previousResult.c.e[4]);
+ assert.strictEqual(result.c.e[1], previousResult.c.e[1]);
+ assert.strictEqual(result.c.e[2], previousResult.c.e[3]);
+ assert.strictEqual(result.c.e[3], previousResult.c.e[0]);
+ assert.strictEqual(result.c.e[4], previousResult.c.e[2]);
+ assert.strictEqual(result.c.g, previousResult.c.g);
+ });
+
+ it('will return the same JSON scalar field object', () => {
+ const query = gql`
+ {
+ a { b c }
+ d { e f }
+ }
+ `;
+
+ const queryResult = {
+ a: { b: 1, c: { x: 2, y: 3, z: 4 } },
+ d: { e: 5, f: { x: 6, y: 7, z: 8 } },
+ };
+
+ const store = writeQueryToStore({
+ query,
+ result: queryResult,
+ });
+
+ const previousResult = {
+ a: { b: 1, c: { x: 2, y: 3, z: 4 } },
+ d: { e: 50, f: { x: 6, y: 7, z: 8 } },
+ };
+
+ const { result } = diffQueryAgainstStore({
+ store,
+ query,
+ previousResult,
+ });
+
+ assert.deepEqual(result, queryResult);
+ assert.notStrictEqual(result, previousResult);
+ assert.strictEqual(result.a, previousResult.a);
+ assert.notStrictEqual(result.d, previousResult.d);
+ assert.strictEqual(result.d.f, previousResult.d.f);
+ });
+ });
});
diff --git a/test/directives.ts b/test/directives.ts
--- a/test/directives.ts
+++ b/test/directives.ts
@@ -11,7 +11,7 @@ import {
import gql from 'graphql-tag';
-import cloneDeep = require('lodash/cloneDeep');
+import { cloneDeep } from 'lodash';
describe('query directives', () => {
it('should should not include a skipped field', () => {
diff --git a/test/fetchMore.ts b/test/fetchMore.ts
--- a/test/fetchMore.ts
+++ b/test/fetchMore.ts
@@ -5,17 +5,10 @@ import mockNetworkInterface from './mocks/mockNetworkInterface';
import ApolloClient from '../src';
import { ObservableQuery } from '../src/core/ObservableQuery';
-import assign = require('lodash/assign');
-import clonedeep = require('lodash/cloneDeep');
+import { assign, cloneDeep } from 'lodash';
import gql from 'graphql-tag';
-import { addFragmentsToDocument } from '../src/queries/getFromAST';
-
-import {
- createFragment,
-} from '../src/index';
-
describe('updateQuery on a simple query', () => {
const query = gql`
query thing {
@@ -60,10 +53,10 @@ describe('updateQuery on a simple query', () => {
return new Promise((resolve) => setTimeout(resolve))
.then(() => obsHandle)
- .then((watchedQuery: ObservableQuery) => {
+ .then((watchedQuery: ObservableQuery<any>) => {
assert.equal(latestResult.data.entry.value, 1);
watchedQuery.updateQuery((prevResult: any) => {
- const res = clonedeep(prevResult);
+ const res = cloneDeep(prevResult);
res.entry.value = 2;
return res;
});
@@ -115,7 +108,7 @@ describe('fetchMore on an observable query', () => {
},
},
};
- const resultMore = clonedeep(result);
+ const resultMore = cloneDeep(result);
const result2: any = {
data: {
__typename: 'Query',
@@ -150,7 +143,7 @@ describe('fetchMore on an observable query', () => {
addTypename: true,
});
- const obsHandle = client.watchQuery({
+ const obsHandle = client.watchQuery<any>({
query,
variables,
});
@@ -181,7 +174,7 @@ describe('fetchMore on an observable query', () => {
return watchedQuery.fetchMore({
variables: { start: 10 }, // rely on the fact that the original variables had limit: 10
updateQuery: (prev, options) => {
- const state = clonedeep(prev) as any;
+ const state = cloneDeep(prev) as any;
state.entry.comments = [...state.entry.comments, ...(options.fetchMoreResult as any).data.entry.comments];
return state;
},
@@ -211,54 +204,7 @@ describe('fetchMore on an observable query', () => {
query: query2,
variables: variables2,
updateQuery: (prev, options) => {
- const state = clonedeep(prev) as any;
- state.entry.comments = [...state.entry.comments, ...(options.fetchMoreResult as any).data.comments];
- return state;
- },
- });
- }).then(() => {
- const comments = latestResult.data.entry.comments;
- assert.lengthOf(comments, 20);
- for (let i = 1; i <= 10; i++) {
- assert.equal(comments[i - 1].text, `comment ${i}`);
- }
- for (let i = 11; i <= 20; i++) {
- assert.equal(comments[i - 1].text, `new comment ${i}`);
- }
- unsetup();
- });
- });
-
- it('fetching more with fragments', () => {
- latestResult = null;
- // identical to query2, but with a fragment
- const query3 = gql`
- query NewComments($start: Int!, $limit: Int!) {
- comments(start: $start, limit: $limit) {
- ...textFragment
- __typename
- }
- }
- `;
- const fragment = createFragment(gql`
- fragment textFragment on Comment {
- text
- __typename
- }
- `);
- return setup({
- request: {
- query: addFragmentsToDocument(query3, fragment),
- variables: variables2,
- },
- result: result2,
- }).then((watchedQuery) => {
- return watchedQuery.fetchMore({
- query: query3,
- variables: variables2,
- fragments: fragment,
- updateQuery: (prev, options) => {
- const state = clonedeep(prev) as any;
+ const state = cloneDeep(prev) as any;
state.entry.comments = [...state.entry.comments, ...(options.fetchMoreResult as any).data.comments];
return state;
},
diff --git a/test/fixtures/redux-todomvc/reducers.ts b/test/fixtures/redux-todomvc/reducers.ts
--- a/test/fixtures/redux-todomvc/reducers.ts
+++ b/test/fixtures/redux-todomvc/reducers.ts
@@ -1,5 +1,5 @@
import { combineReducers } from 'redux';
-import assign = require('lodash/assign');
+import { assign } from 'lodash';
import {
ADD_TODO,
diff --git a/test/getFromAST.ts b/test/getFromAST.ts
--- a/test/getFromAST.ts
+++ b/test/getFromAST.ts
@@ -6,7 +6,6 @@ import {
createFragmentMap,
FragmentMap,
getOperationName,
- addFragmentsToDocument,
} from '../src/queries/getFromAST';
import {
@@ -14,10 +13,6 @@ import {
OperationDefinitionNode,
} from 'graphql';
-import {
- createFragment,
-} from '../src';
-
import { print } from 'graphql-tag/printer';
import gql from 'graphql-tag';
import { assert } from 'chai';
@@ -234,136 +229,4 @@ describe('AST utility functions', () => {
getQueryDefinition(queryWithTypeDefination);
}, 'Schema type definitions not allowed in queries. Found: "InputObjectTypeDefinition"');
});
-
- it('should attach fragments properly', () => {
- const subjectInfo = createFragment(gql`
- fragment subjectInfo on Subject {
- id
- name
- }`,
- );
-
- const businessAreaInfo = createFragment(gql`
- fragment businessAreaInfo on BusinessArea {
- id
- name
- subjects {
- ...subjectInfo
- }
- }`,
- [subjectInfo],
- );
-
- const query = gql`
- query {
- businessAreas {
- ...businessAreaInfo
- }
- }
- `;
-
- const fullDoc = addFragmentsToDocument(query, businessAreaInfo);
-
- assert.equal(print(fullDoc), `{
- businessAreas {
- ...businessAreaInfo
- }
-}
-
-fragment subjectInfo on Subject {
- id
- name
-}
-
-fragment businessAreaInfo on BusinessArea {
- id
- name
- subjects {
- ...subjectInfo
- }
-}
-`);
- });
-
- it('should only attach distinct fragments', () => {
- const subjectInfo = createFragment(gql`
- fragment subjectInfo on Subject {
- id
- name
- }`,
- );
-
- const businessAreaInfo = createFragment(gql`
- fragment businessAreaInfo on BusinessArea {
- id
- name
- subjects {
- ...subjectInfo
- }
- }`,
- [subjectInfo, subjectInfo],
- );
-
- // to test that depending on the same fragment twice in different fragments won't add it twice.
- const whateverAreaInfo = createFragment(gql`
- fragment whateverAreaInfo on WhateverArea {
- subject {
- ...subjectInfo
- }
- }`,
- [subjectInfo],
- );
-
- // XXX we don't attach a fragment here, because if we do, it won't be === to
- // the same fragment created with createFragment.
- const query = gql`
- query {
- businessAreas {
- ...businessAreaInfo
- }
- whateverAreas {
- ...whateverAreaInfo
- }
- }
-
- #fragment subjectInfo on Subject {
- # id
- # name
- #}
- `;
-
- let fullDoc = addFragmentsToDocument(query, businessAreaInfo);
- fullDoc = addFragmentsToDocument(fullDoc, whateverAreaInfo);
- // tests to make sure we can't add subjectInfo twice, even in separate call
- fullDoc = addFragmentsToDocument(fullDoc, subjectInfo);
-
- assert.equal(print(fullDoc), `{
- businessAreas {
- ...businessAreaInfo
- }
- whateverAreas {
- ...whateverAreaInfo
- }
-}
-
-fragment subjectInfo on Subject {
- id
- name
-}
-
-fragment businessAreaInfo on BusinessArea {
- id
- name
- subjects {
- ...subjectInfo
- }
-}
-
-fragment whateverAreaInfo on WhateverArea {
- subject {
- ...subjectInfo
- }
-}
-`);
- });
});
diff --git a/test/graphqlSubscriptions.ts b/test/graphqlSubscriptions.ts
--- a/test/graphqlSubscriptions.ts
+++ b/test/graphqlSubscriptions.ts
@@ -6,7 +6,7 @@ import {
assert,
} from 'chai';
-import clonedeep = require('lodash/cloneDeep');
+import { cloneDeep } from 'lodash';
import { isSubscriptionResultAction } from '../src/actions';
@@ -282,7 +282,7 @@ describe('GraphQL Subscriptions', () => {
reducer: (previousResult, action) => {
counter++;
if (isSubscriptionResultAction(action)) {
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.number++;
return newResult;
}
@@ -312,52 +312,4 @@ describe('GraphQL Subscriptions', () => {
network.fireResult(id);
}
});
-
- // it('should work with an observable query', (done) => {
- // const network = mockSubscriptionNetworkInterface([sub2], {
- // request: {
- // query: commentsQuery,
- // variables: commentsVariables,
- // },
- // result: commentsResult, // list of 10 comments
- // });
- // const client = new ApolloClient({
- // networkInterface: network,
- // });
- // client.query({
- // query: commentsQuery,
- // variables: commentsVariables,
- // }).then(() => {
- // const graphQLSubscriptionOptions = {
- // subscription: commentsSub,
- // variables: commentsVariables,
- // updateQuery: (prev, updateOptions) => {
- // const state = clonedeep(prev) as any;
- // // prev is that data field of the query result
- // // updateOptions.subscriptionResult is the result entry from the subscription result
- // state.entry.comments = [...state.entry.comments, ...(updateOptions.subscriptionResult as any).entry.comments];
- // return state;
- // },
- // };
- // const obsHandle = client.watchQuery(commentsWatchQueryOptions);
-
- // obsHandle.subscribe({
- // next(result) {
- // let expectedComments = [];
- // for (let i = 1; i <= 11; i++) {
- // expectedComments.push({ text: `comment ${i}` });
- // }
- // assert.equal(result.data.entry.comments.length, 11);
- // assert.deepEqual(result.data.entry.comments, expectedComments);
- // done();
- // },
- // });
-
- // const id = obsHandle.startGraphQLSubscription(graphQLSubscriptionOptions);
- // network.fireResult(id);
- // });
- // });
-
- // TODO: test that we can make two subscriptions one one watchquery.
-
});
diff --git a/test/isEqual.ts b/test/isEqual.ts
new file mode 100644
--- /dev/null
+++ b/test/isEqual.ts
@@ -0,0 +1,58 @@
+import { isEqual } from '../src/util/isEqual';
+import { assert } from 'chai';
+
+describe('isEqual', () => {
+ it('should return true for equal primitive values', () => {
+ assert(isEqual(undefined, undefined));
+ assert(isEqual(null, null));
+ assert(isEqual(true, true));
+ assert(isEqual(false, false));
+ assert(isEqual(-1, -1));
+ assert(isEqual(+1, +1));
+ assert(isEqual(42, 42));
+ assert(isEqual(0, 0));
+ assert(isEqual(0.5, 0.5));
+ assert(isEqual('hello', 'hello'));
+ assert(isEqual('world', 'world'));
+ });
+
+ it('should return false for not equal primitive values', () => {
+ assert(!isEqual(undefined, null));
+ assert(!isEqual(null, undefined));
+ assert(!isEqual(true, false));
+ assert(!isEqual(false, true));
+ assert(!isEqual(-1, +1));
+ assert(!isEqual(+1, -1));
+ assert(!isEqual(42, 42.00000000000001));
+ assert(!isEqual(0, 0.5));
+ assert(!isEqual('hello', 'world'));
+ assert(!isEqual('world', 'hello'));
+ });
+
+ it('should return false when comparing primitives with objects', () => {
+ assert(!isEqual({}, null));
+ assert(!isEqual(null, {}));
+ assert(!isEqual({}, true));
+ assert(!isEqual(true, {}));
+ assert(!isEqual({}, 42));
+ assert(!isEqual(42, {}));
+ assert(!isEqual({}, 'hello'));
+ assert(!isEqual('hello', {}));
+ });
+
+ it('should correctly compare shallow objects', () => {
+ assert(isEqual({}, {}));
+ assert(isEqual({ a: 1, b: 2, c: 3 }, { a: 1, b: 2, c: 3 }));
+ assert(!isEqual({ a: 1, b: 2, c: 3 }, { a: 3, b: 2, c: 1 }));
+ assert(!isEqual({ a: 1, b: 2, c: 3 }, { a: 1, b: 2 }));
+ assert(!isEqual({ a: 1, b: 2 }, { a: 1, b: 2, c: 3 }));
+ });
+
+ it('should correctly compare deep objects', () => {
+ assert(isEqual({ x: {} }, { x: {} }));
+ assert(isEqual({ x: { a: 1, b: 2, c: 3 } }, { x: { a: 1, b: 2, c: 3 } }));
+ assert(!isEqual({ x: { a: 1, b: 2, c: 3 } }, { x: { a: 3, b: 2, c: 1 } }));
+ assert(!isEqual({ x: { a: 1, b: 2, c: 3 } }, { x: { a: 1, b: 2 } }));
+ assert(!isEqual({ x: { a: 1, b: 2 } }, { x: { a: 1, b: 2, c: 3 } }));
+ });
+});
diff --git a/test/mockNetworkInterface.ts b/test/mockNetworkInterface.ts
--- a/test/mockNetworkInterface.ts
+++ b/test/mockNetworkInterface.ts
@@ -7,7 +7,7 @@ import {
MockedSubscription,
} from './mocks/mockNetworkInterface';
-import omit = require('lodash/omit');
+import { omit } from 'lodash';
import gql from 'graphql-tag';
diff --git a/test/mocks/mockFetch.ts b/test/mocks/mockFetch.ts
--- a/test/mocks/mockFetch.ts
+++ b/test/mocks/mockFetch.ts
@@ -1,5 +1,3 @@
-import 'whatwg-fetch';
-
// This is an implementation of a mocked window.fetch implementation similar in
// structure to the MockedNetworkInterface.
diff --git a/test/mutationResults.ts b/test/mutationResults.ts
--- a/test/mutationResults.ts
+++ b/test/mutationResults.ts
@@ -7,8 +7,7 @@ import { isMutationResultAction, isQueryResultAction } from '../src/actions';
import { Subscription } from '../src/util/Observable';
-import assign = require('lodash/assign');
-import clonedeep = require('lodash/cloneDeep');
+import { assign, cloneDeep } from 'lodash';
import { ObservableQuery } from '../src/core/ObservableQuery';
@@ -743,7 +742,7 @@ describe('mutation results', () => {
reducer: (previousResult, action) => {
counter++;
if (isMutationResultAction(action)) {
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.todoList.todos.unshift(action.result.data['createTodo']);
return newResult;
}
@@ -776,7 +775,7 @@ describe('mutation results', () => {
it('passes variables', () => {
let counter = 0;
- let observableQuery: ObservableQuery;
+ let observableQuery: ObservableQuery<any>;
let subscription: any;
return setup({
@@ -799,8 +798,8 @@ describe('mutation results', () => {
reducer: (previousResult, action, variables: any) => {
counter++;
if (isMutationResultAction(action) && variables['id'] === 5) {
- const newResult = clonedeep(previousResult) as any;
- newResult.todoList.todos.unshift((action.result.data as any).createTodo);
+ const newResult = cloneDeep(previousResult) as any;
+ newResult.todoList.todos.unshift(action.result.data['createTodo']);
return newResult;
}
return previousResult;
@@ -854,7 +853,7 @@ describe('mutation results', () => {
reducer: (previousResult, action) => {
if (isMutationResultAction(action) && action.operationName === 'createTodo') {
counter++;
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.todoList.todos.unshift(action.result.data['createTodo']);
return newResult;
}
@@ -871,7 +870,7 @@ describe('mutation results', () => {
reducer: (previousResult, action) => {
if (isMutationResultAction(action) && action.operationName === 'wrongName') {
counter++; // shouldn't be called, so counter shouldn't increase.
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.todoList.todos.unshift(action.result.data['createTodo']);
return newResult;
}
@@ -919,7 +918,7 @@ describe('mutation results', () => {
reducer: (previousResult, action) => {
counter++;
if (isQueryResultAction(action)) {
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.todoList.todos.unshift(action.result.data['newTodos'][0]);
return newResult;
}
@@ -1003,7 +1002,7 @@ describe('mutation results', () => {
reducer: (previousResult, action) => {
counter++;
if (isMutationResultAction(action)) {
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.todoList.todos.unshift(action.result.data['createTodo']);
return newResult;
}
@@ -1018,7 +1017,7 @@ describe('mutation results', () => {
reducer: (previousResult, action) => {
counter2++;
if (isMutationResultAction(action) && action.result.data['createTodo'].completed) {
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.todoList.filteredTodos.unshift(action.result.data['createTodo']);
return newResult;
}
@@ -1117,6 +1116,31 @@ describe('mutation results', () => {
});
});
+ it('does not swallow errors', done => {
+ client = new ApolloClient({
+ networkInterface: mockNetworkInterface({
+ request: { query },
+ result,
+ }),
+ });
+
+ const observable = client.watchQuery({
+ query,
+ reducer: () => {
+ throw new Error('Don’t swallow me right up!');
+ },
+ });
+
+ observable.subscribe({
+ next: () => {
+ done(new Error('`next` should not be called.'));
+ },
+ error: error => {
+ assert(/swallow/.test(error.message));
+ done();
+ },
+ });
+ });
});
@@ -1170,7 +1194,7 @@ describe('mutation results', () => {
assert.equal(mResult.data.createTodo.id, '99');
assert.equal(mResult.data.createTodo.text, 'This one was created with a mutation.');
- const state = clonedeep(prev) as any;
+ const state = cloneDeep(prev) as any;
state.todoList.todos.unshift(mResult.data.createTodo);
return state;
},
@@ -1209,7 +1233,7 @@ describe('mutation results', () => {
assert.equal(mResult.data.createTodo.id, '99');
assert.equal(mResult.data.createTodo.text, 'This one was created with a mutation.');
- const state = clonedeep(prev) as any;
+ const state = cloneDeep(prev) as any;
state.todoList.todos.unshift(mResult.data.createTodo);
return state;
},
@@ -1233,7 +1257,7 @@ describe('mutation results', () => {
updateQueries: {
todoList: (prev, options) => {
const mResult = options.mutationResult as any;
- const state = clonedeep(prev) as any;
+ const state = cloneDeep(prev) as any;
// It's unfortunate that this function is called at all, but we are removing
// the updateQueries API soon so it won't matter.
state.todoList.todos.unshift(mResult.data && mResult.data.createTodo);
@@ -1248,7 +1272,7 @@ describe('mutation results', () => {
updateQueries: {
todoList: (prev, options) => {
const mResult = options.mutationResult as any;
- const state = clonedeep(prev) as any;
+ const state = cloneDeep(prev) as any;
state.todoList.todos.unshift(mResult.data.createTodo);
return state;
},
diff --git a/test/networkInterface.ts b/test/networkInterface.ts
--- a/test/networkInterface.ts
+++ b/test/networkInterface.ts
@@ -1,8 +1,7 @@
import * as chai from 'chai';
import * as chaiAsPromised from 'chai-as-promised';
-import assign = require('lodash/assign');
-import isequal = require('lodash/isEqual');
+import { assign, isEqual } from 'lodash';
import * as fetchMock from 'fetch-mock';
// make it easy to assert with promises
@@ -116,12 +115,12 @@ describe('network interface', () => {
}
if (query === print(simpleQueryWithVar)
- && isequal(variables, { personNum: 1 })) {
+ && isEqual(variables, { personNum: 1 })) {
return simpleResult;
}
if (query === print(complexQueryWithTwoVars)
- && isequal(variables, { personNum: 1, filmNum: 1 })) {
+ && isEqual(variables, { personNum: 1, filmNum: 1 })) {
return complexResult;
}
@@ -155,6 +154,29 @@ describe('network interface', () => {
}, /Passing the URI as the first argument to createNetworkInterface is deprecated/);
});
+ it('will warn if there is no global fetch implementation', () => {
+ const origWarn = console.warn;
+ const origFetch = (global as any).fetch;
+
+ const warnCalls: Array<Array<any>> = [];
+
+ console.warn = (...args: Array<any>) => warnCalls.push(args);
+
+ delete (global as any).fetch;
+
+ assert.equal(warnCalls.length, 0);
+
+ createNetworkInterface({ uri: '/graphql' });
+
+ assert.equal(warnCalls.length, 1);
+ assert.equal(warnCalls[0].length, 1);
+ assert(/the fetch browser API could not be found/.test(warnCalls[0][0]));
+
+ // Put everything back the way it was.
+ console.warn = origWarn;
+ (global as any).fetch = origFetch;
+ });
+
it('should create an instance with a given uri', () => {
const networkInterface = createNetworkInterface({ uri: '/graphql' });
assert.equal(networkInterface._uri, '/graphql');
diff --git a/test/optimistic.ts b/test/optimistic.ts
--- a/test/optimistic.ts
+++ b/test/optimistic.ts
@@ -2,13 +2,11 @@ import * as chai from 'chai';
const { assert } = chai;
import mockNetworkInterface from './mocks/mockNetworkInterface';
-import ApolloClient, { createFragment } from '../src';
+import ApolloClient from '../src';
import { MutationBehaviorReducerArgs, MutationBehavior, MutationQueryReducersMap } from '../src/data/mutationResults';
import { NormalizedCache, StoreObject } from '../src/data/storeUtils';
-import { addFragmentsToDocument } from '../src/queries/getFromAST';
-import assign = require('lodash/assign');
-import clonedeep = require('lodash/cloneDeep');
+import { assign, cloneDeep} from 'lodash';
import { Subscription } from '../src/util/Observable';
@@ -722,7 +720,7 @@ describe('optimistic mutation results', () => {
const mResult = options.mutationResult as any;
assert.equal(mResult.data.createTodo.id, '99');
- const state = clonedeep(prev) as any;
+ const state = cloneDeep(prev) as any;
state.todoList.todos.unshift(mResult.data.createTodo);
return state;
},
@@ -772,7 +770,7 @@ describe('optimistic mutation results', () => {
todoList: (prev, options) => {
const mResult = options.mutationResult as any;
- const state = clonedeep(prev) as any;
+ const state = cloneDeep(prev) as any;
state.todoList.todos.unshift(mResult.data.createTodo);
return state;
},
@@ -846,7 +844,7 @@ describe('optimistic mutation results', () => {
todoList: (prev, options) => {
const mResult = options.mutationResult as any;
- const state = clonedeep(prev) as any;
+ const state = cloneDeep(prev) as any;
state.todoList.todos.unshift(mResult.data.createTodo);
return state;
},
@@ -951,7 +949,7 @@ describe('optimistic mutation results', () => {
reducer: (previousResult, action) => {
counter++;
if (isMutationResultAction(action)) {
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.todoList.todos.unshift(action.result.data.createTodo);
return newResult;
}
@@ -979,7 +977,7 @@ describe('optimistic mutation results', () => {
reducer: (previousResult, action) => {
counter++;
if (isMutationResultAction(action)) {
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.todoList.todos.unshift(action.result.data['createTodo']);
return newResult;
}
@@ -1028,24 +1026,6 @@ describe('optimistic mutation - githunt comments', () => {
}
}
`;
- const fragment = createFragment(gql`
- fragment authorFields on User {
- postedBy {
- login
- html_url
- }
- }
- `);
- const fragmentWithTypenames = createFragment(gql`
- fragment authorFields on User {
- postedBy {
- login
- html_url
- __typename
- }
- __typename
- }
- `);
const queryWithFragment = gql`
query Comment($repoName: String!) {
entry(repoFullName: $repoName) {
@@ -1054,6 +1034,13 @@ describe('optimistic mutation - githunt comments', () => {
}
}
}
+
+ fragment authorFields on User {
+ postedBy {
+ login
+ html_url
+ }
+ }
`;
const variables = {
repoName: 'org/repo',
@@ -1091,7 +1078,7 @@ describe('optimistic mutation - githunt comments', () => {
result,
}, {
request: {
- query: addFragmentsToDocument(addTypenameToDocument(queryWithFragment), fragment),
+ query: addTypenameToDocument(queryWithFragment),
variables,
},
result,
@@ -1146,7 +1133,7 @@ describe('optimistic mutation - githunt comments', () => {
const updateQueries = {
Comment: (prev, { mutationResult: mutationResultArg }) => {
const newComment = (mutationResultArg as any).data.submitComment;
- const state = clonedeep(prev);
+ const state = cloneDeep(prev);
(state as any).entry.comments.unshift(newComment);
return state;
},
@@ -1196,49 +1183,6 @@ describe('optimistic mutation - githunt comments', () => {
assert.equal(newResult.data.entry.comments.length, 2);
});
});
-
- it('can post a new comment (with fragments)', () => {
- const mutationVariables = {
- repoFullName: 'org/repo',
- commentContent: 'New Comment',
- };
-
- let subscriptionHandle: Subscription;
- return setup({
- request: {
- query: addFragmentsToDocument(addTypenameToDocument(mutationWithFragment), fragmentWithTypenames),
- variables: mutationVariables,
- },
- result: mutationResult,
- })
- .then(() => {
- // we have to actually subscribe to the query to be able to update it
- return new Promise( (resolve, reject) => {
- const handle = client.watchQuery({
- query: queryWithFragment,
- variables,
- fragments: fragment,
- });
- subscriptionHandle = handle.subscribe({
- next(res) { resolve(res); },
- });
- });
- })
- .then(() => {
- return client.mutate({
- mutation: mutationWithFragment,
- optimisticResponse,
- variables: mutationVariables,
- updateQueries,
- fragments: fragment,
- });
- }).then(() => {
- return client.query({ query: queryWithFragment, variables, fragments: fragment });
- }).then((newResult: any) => {
- subscriptionHandle.unsubscribe();
- assert.equal(newResult.data.entry.comments.length, 2);
- });
- });
});
function realIdValue(id: string) {
diff --git a/test/readFromStore.ts b/test/readFromStore.ts
--- a/test/readFromStore.ts
+++ b/test/readFromStore.ts
@@ -1,6 +1,5 @@
import { assert } from 'chai';
-import assign = require('lodash/assign');
-import omit = require('lodash/omit');
+import { assign, omit } from 'lodash';
import {
readQueryFromStore,
diff --git a/test/scheduler.ts b/test/scheduler.ts
--- a/test/scheduler.ts
+++ b/test/scheduler.ts
@@ -324,7 +324,7 @@ describe('QueryScheduler', () => {
queryManager,
});
const queryId = 'fake-id';
- scheduler.addQueryOnInterval(queryId, queryOptions);
+ scheduler.addQueryOnInterval<any>(queryId, queryOptions);
assert.equal(Object.keys(scheduler.intervalQueries).length, 1);
assert.equal(Object.keys(scheduler.intervalQueries)[0], queryOptions.pollInterval.toString());
const queries = (<any>scheduler.intervalQueries)[queryOptions.pollInterval.toString()];
diff --git a/test/subscribeToMore.ts b/test/subscribeToMore.ts
--- a/test/subscribeToMore.ts
+++ b/test/subscribeToMore.ts
@@ -4,10 +4,8 @@ const { assert } = chai;
import {
mockSubscriptionNetworkInterface,
} from './mocks/mockNetworkInterface';
-import ApolloClient from '../src';
-// import assign = require('lodash/assign');
-// import clonedeep = require('lodash/cloneDeep');
+import ApolloClient from '../src';
import gql from 'graphql-tag';
diff --git a/test/tests.ts b/test/tests.ts
--- a/test/tests.ts
+++ b/test/tests.ts
@@ -18,7 +18,7 @@ require('source-map-support').install();
console.warn = console.error = (...messages: string[]) => {
console.log(`==> Error in test: Tried to log warning or error with message:
`, ...messages);
- if (!process.env.CI && !messages[0].match(/deprecated/)) {
+ if (!process.env.CI) {
process.exit(1);
}
};
@@ -50,3 +50,6 @@ import './batchedNetworkInterface';
import './ObservableQuery';
import './subscribeToMore';
import './customResolvers';
+import './isEqual';
+import './cloneDeep';
+import './assign';
diff --git a/test/util/observableToPromise.ts b/test/util/observableToPromise.ts
--- a/test/util/observableToPromise.ts
+++ b/test/util/observableToPromise.ts
@@ -1,5 +1,5 @@
import { ObservableQuery } from '../../src/core/ObservableQuery';
-import { ApolloQueryResult } from '../../src/core/QueryManager';
+import { ApolloQueryResult } from '../../src/core/types';
import { Subscription } from '../../src/util/Observable';
/**
@@ -12,13 +12,13 @@ import { Subscription } from '../../src/util/Observable';
* @param errorCallbacks an expected set of errors
*/
export type Options = {
- observable: ObservableQuery,
+ observable: ObservableQuery<any>,
shouldResolve?: boolean,
wait?: number,
errorCallbacks?: ((error: Error) => any)[],
};
-export type ResultCallback = ((result: ApolloQueryResult) => any);
+export type ResultCallback = ((result: ApolloQueryResult<any>) => any);
// Take an observable and N callbacks, and observe the observable,
// ensuring it is called exactly N times, resolving once it has done so.
diff --git a/test/util/subscribeAndCount.ts b/test/util/subscribeAndCount.ts
--- a/test/util/subscribeAndCount.ts
+++ b/test/util/subscribeAndCount.ts
@@ -1,11 +1,11 @@
import { ObservableQuery } from '../../src/core/ObservableQuery';
-import { ApolloQueryResult } from '../../src/core/QueryManager';
+import { ApolloQueryResult } from '../../src/core/types';
import { Subscription } from '../../src/util/Observable';
import wrap from './wrap';
-export default function(done: MochaDone, observable: ObservableQuery,
- cb: (handleCount: number, result: ApolloQueryResult) => any): Subscription {
+export default function(done: MochaDone, observable: ObservableQuery<any>,
+ cb: (handleCount: number, result: ApolloQueryResult<any>) => any): Subscription {
let handleCount = 0;
return observable.subscribe({
next: wrap(done, result => {
diff --git a/test/writeToStore.ts b/test/writeToStore.ts
--- a/test/writeToStore.ts
+++ b/test/writeToStore.ts
@@ -1,7 +1,5 @@
import { assert } from 'chai';
-import cloneDeep = require('lodash/cloneDeep');
-import assign = require('lodash/assign');
-import omit = require('lodash/omit');
+import { cloneDeep, assign, omit } from 'lodash';
import {
writeQueryToStore,
@@ -12,10 +10,6 @@ import {
storeKeyNameFromField,
} from '../src/data/storeUtils';
-import {
- getIdField,
-} from '../src/data/extensions';
-
import {
NormalizedCache,
} from '../src/data/storeUtils';
@@ -30,6 +24,8 @@ import {
import gql from 'graphql-tag';
+const getIdField = ({id}: {id: string}) => id;
+
describe('writing to the store', () => {
it('properly normalizes a trivial item', () => {
const query = gql`
diff --git a/tsconfig.test.json b/tsconfig.test.json
new file mode 100644
--- /dev/null
+++ b/tsconfig.test.json
@@ -0,0 +1,6 @@
+{
+ "extends": "./tsconfig",
+ "compilerOptions": {
+ "module": "commonjs"
+ }
+}
| Circular dependencies
```
store.js -> optimistic-data/store.js -> store.js
core/ObservableQuery.js -> core/QueryManager.js -> scheduler/scheduler.js -> core/ObservableQuery.js
core/QueryManager.js -> scheduler/scheduler.js -> core/QueryManager.js
optimistic-data/store.js -> store.js -> optimistic-data/store.js
scheduler/scheduler.js -> core/QueryManager.js -> scheduler/scheduler.js
```
@DxCx
Write subscription results under `ROOT_SUBSCRIPTION` ID instead of `ROOT_QUERY`
This will help show in the Apollo dev tools whether data came from a subscription or query.
| 2016-12-25T04:03:07Z | 0.6 |
|
apollographql/apollo-client | 1,069 | apollographql__apollo-client-1069 | [
"1074"
] | 6f679c392d421039c2c95ae226fd83d25a84a59a | diff --git a/rollup.config.js b/rollup.config.js
new file mode 100644
--- /dev/null
+++ b/rollup.config.js
@@ -0,0 +1,15 @@
+function globals(mod) {
+ if (mod.indexOf('lodash/') === 0) return '_';
+}
+
+export default {
+ entry: 'lib/src/index.js',
+ dest: 'lib/apollo.umd.js',
+ format: 'umd',
+ sourceMap: true,
+ moduleName: 'apollo',
+ external: [
+ 'lodash'
+ ],
+ globals
+};
diff --git a/src/ApolloClient.ts b/src/ApolloClient.ts
--- a/src/ApolloClient.ts
+++ b/src/ApolloClient.ts
@@ -13,8 +13,8 @@ import {
} from 'graphql';
-import isUndefined = require('lodash/isUndefined');
-import isString = require('lodash/isString');
+import isUndefined from 'lodash/isUndefined';
+import isString from 'lodash/isString';
import {
createApolloStore,
@@ -30,10 +30,13 @@ import {
import {
QueryManager,
+} from './core/QueryManager';
+
+import {
ApolloQueryResult,
ResultComparator,
ResultTransformer,
-} from './core/QueryManager';
+} from './core/types';
import {
ObservableQuery,
diff --git a/src/core/ObservableQuery.ts b/src/core/ObservableQuery.ts
--- a/src/core/ObservableQuery.ts
+++ b/src/core/ObservableQuery.ts
@@ -17,15 +17,18 @@ import {
import {
QueryManager,
+} from './QueryManager';
+
+import {
ApolloQueryResult,
FetchType,
-} from './QueryManager';
+} from './types';
import { tryFunctionOrLogError } from '../util/errorHandling';
import { NetworkStatus } from '../queries/store';
-import isEqual = require('lodash/isEqual');
+import isEqual from 'lodash/isEqual';
export type ApolloCurrentResult = {
data: any;
diff --git a/src/core/QueryManager.ts b/src/core/QueryManager.ts
--- a/src/core/QueryManager.ts
+++ b/src/core/QueryManager.ts
@@ -8,8 +8,22 @@ import {
Deduplicator,
} from '../transport/Deduplicator';
-import forOwn = require('lodash/forOwn');
-import isEqual = require('lodash/isEqual');
+import forOwn from 'lodash/forOwn';
+import isEqual from 'lodash/isEqual';
+
+import {
+ ResultTransformer,
+ ResultComparator,
+ QueryListener,
+ ApolloQueryResult,
+ FetchType,
+ SubscriptionOptions,
+} from './types';
+
+import {
+ QueryStoreValue,
+ NetworkStatus,
+} from '../queries/store';
import {
ApolloStore,
@@ -18,10 +32,6 @@ import {
ApolloReducerConfig,
} from '../store';
-import {
- QueryStoreValue,
-} from '../queries/store';
-
import {
checkDocument,
getQueryDefinition,
@@ -82,10 +92,6 @@ import {
Observable,
} from '../util/Observable';
-import {
- NetworkStatus,
-} from '../queries/store';
-
import { tryFunctionOrLogError } from '../util/errorHandling';
import {
@@ -97,44 +103,6 @@ import { WatchQueryOptions } from './watchQueryOptions';
import { ObservableQuery } from './ObservableQuery';
-export type QueryListener = (queryStoreValue: QueryStoreValue) => void;
-
-export interface SubscriptionOptions {
- document: Document;
- variables?: { [key: string]: any };
-};
-
-export type ApolloQueryResult = {
- data: any;
- loading: boolean;
- networkStatus: NetworkStatus;
-
- // This type is different from the GraphQLResult type because it doesn't include errors.
- // Those are thrown via the standard promise/observer catch mechanism.
-};
-
-// A result transformer is given the data that is to be returned from the store from a query or
-// mutation, and can modify or observe it before the value is provided to your application.
-//
-// For watched queries, the transformer is only called when the data retrieved from the server is
-// different from previous.
-//
-// If the transformer wants to mutate results (say, by setting the prototype of result data), it
-// will likely need to be paired with a custom resultComparator. By default, Apollo performs a
-// deep equality comparsion on results, and skips those that are considered equal - reducing
-// re-renders.
-export type ResultTransformer = (resultData: ApolloQueryResult) => ApolloQueryResult;
-
-// Controls how Apollo compares two query results and considers their equality. Two equal results
-// will not trigger re-renders.
-export type ResultComparator = (result1: ApolloQueryResult, result2: ApolloQueryResult) => boolean;
-
-export enum FetchType {
- normal = 1,
- refetch = 2,
- poll = 3,
-}
-
export class QueryManager {
public pollingTimers: {[queryId: string]: NodeJS.Timer | any}; //oddity in Typescript
public scheduler: QueryScheduler;
@@ -598,7 +566,6 @@ export class QueryManager {
this.fetchQueryPromises[requestId.toString()] = { promise, resolve, reject };
}
-
// Removes the promise in this.fetchQueryPromises for a particular request ID.
public removeFetchQueryPromise(requestId: number) {
delete this.fetchQueryPromises[requestId.toString()];
diff --git a/src/core/types.ts b/src/core/types.ts
new file mode 100644
--- /dev/null
+++ b/src/core/types.ts
@@ -0,0 +1,43 @@
+import { Document } from 'graphql';
+import {
+ QueryStoreValue,
+ NetworkStatus,
+} from '../queries/store';
+
+export interface SubscriptionOptions {
+ document: Document;
+ variables?: { [key: string]: any };
+};
+
+export type QueryListener = (queryStoreValue: QueryStoreValue) => void;
+
+export type ApolloQueryResult = {
+ data: any;
+ loading: boolean;
+ networkStatus: NetworkStatus;
+
+ // This type is different from the GraphQLResult type because it doesn't include errors.
+ // Those are thrown via the standard promise/observer catch mechanism.
+};
+
+// A result transformer is given the data that is to be returned from the store from a query or
+// mutation, and can modify or observe it before the value is provided to your application.
+//
+// For watched queries, the transformer is only called when the data retrieved from the server is
+// different from previous.
+//
+// If the transformer wants to mutate results (say, by setting the prototype of result data), it
+// will likely need to be paired with a custom resultComparator. By default, Apollo performs a
+// deep equality comparsion on results, and skips those that are considered equal - reducing
+// re-renders.
+export type ResultTransformer = (resultData: ApolloQueryResult) => ApolloQueryResult;
+
+// Controls how Apollo compares two query results and considers their equality. Two equal results
+// will not trigger re-renders.
+export type ResultComparator = (result1: ApolloQueryResult, result2: ApolloQueryResult) => boolean;
+
+export enum FetchType {
+ normal = 1,
+ refetch = 2,
+ poll = 3,
+}
diff --git a/src/data/debug.ts b/src/data/debug.ts
--- a/src/data/debug.ts
+++ b/src/data/debug.ts
@@ -1,7 +1,7 @@
// For development only!
-import isObject = require('lodash/isObject');
-import omit = require('lodash/omit');
-import mapValues = require('lodash/mapValues');
+import isObject from 'lodash/isObject';
+import omit from 'lodash/omit';
+import mapValues from 'lodash/mapValues';
export function stripLoc(obj: Object) {
if (Array.isArray(obj)) {
diff --git a/src/data/mutationResults.ts b/src/data/mutationResults.ts
--- a/src/data/mutationResults.ts
+++ b/src/data/mutationResults.ts
@@ -8,8 +8,8 @@ import {
GraphQLResult,
} from 'graphql';
-import mapValues = require('lodash/mapValues');
-import cloneDeep = require('lodash/cloneDeep');
+import mapValues from 'lodash/mapValues';
+import cloneDeep from 'lodash/cloneDeep';
import { replaceQueryResults } from './replaceQueryResults';
diff --git a/src/data/scopeQuery.ts b/src/data/scopeQuery.ts
--- a/src/data/scopeQuery.ts
+++ b/src/data/scopeQuery.ts
@@ -13,7 +13,7 @@ import {
resultKeyNameFromField,
} from './storeUtils';
-import isNumber = require('lodash/isNumber');
+import isNumber from 'lodash/isNumber';
// The type of a path
export type StorePath = (string|number)[];
diff --git a/src/data/storeUtils.ts b/src/data/storeUtils.ts
--- a/src/data/storeUtils.ts
+++ b/src/data/storeUtils.ts
@@ -15,7 +15,7 @@ import {
Name,
} from 'graphql';
-import isObject = require('lodash/isObject');
+import isObject from 'lodash/isObject';
function isStringValue(value: Value): value is StringValue {
return value.kind === 'StringValue';
diff --git a/src/data/writeToStore.ts b/src/data/writeToStore.ts
--- a/src/data/writeToStore.ts
+++ b/src/data/writeToStore.ts
@@ -1,6 +1,6 @@
-import isNull = require('lodash/isNull');
-import isUndefined = require('lodash/isUndefined');
-import isObject = require('lodash/isObject');
+import isNull from 'lodash/isNull';
+import isUndefined from 'lodash/isUndefined';
+import isObject from 'lodash/isObject';
import {
getOperationDefinition,
diff --git a/src/index.ts b/src/index.ts
--- a/src/index.ts
+++ b/src/index.ts
@@ -65,7 +65,7 @@ import ApolloClient from './ApolloClient';
import {
ApolloQueryResult,
-} from './core/QueryManager';
+} from './core/types';
import {
toIdValue,
diff --git a/src/optimistic-data/store.ts b/src/optimistic-data/store.ts
--- a/src/optimistic-data/store.ts
+++ b/src/optimistic-data/store.ts
@@ -14,11 +14,11 @@ import {
} from '../data/storeUtils';
import {
- getDataWithOptimisticResults,
Store,
} from '../store';
-import pick = require('lodash/pick');
+import assign from 'lodash/assign';
+import pick from 'lodash/pick';
// a stack of patches of new or changed documents
export type OptimisticStore = {
@@ -28,6 +28,14 @@ export type OptimisticStore = {
const optimisticDefaultState: any[] = [];
+export function getDataWithOptimisticResults(store: Store): NormalizedCache {
+ if (store.optimistic.length === 0) {
+ return store.data;
+ }
+ const patches = store.optimistic.map(opt => opt.data);
+ return assign({}, store.data, ...patches) as NormalizedCache;
+}
+
export function optimistic(
previousState = optimisticDefaultState,
action: any,
@@ -49,7 +57,7 @@ export function optimistic(
const fakeStore = {
...store,
optimistic: previousState,
- } as Store;
+ };
const optimisticData = getDataWithOptimisticResults(fakeStore);
const fakeDataResultState = data(
optimisticData,
diff --git a/src/queries/getFromAST.ts b/src/queries/getFromAST.ts
--- a/src/queries/getFromAST.ts
+++ b/src/queries/getFromAST.ts
@@ -4,9 +4,9 @@ import {
FragmentDefinition,
} from 'graphql';
-import countBy = require('lodash/countBy');
-import identity = require('lodash/identity');
-import uniq = require('lodash/uniq');
+import countBy from 'lodash/countBy';
+import identity from 'lodash/identity';
+import uniq from 'lodash/uniq';
export function getMutationDefinition(doc: Document): OperationDefinition {
checkDocument(doc);
diff --git a/src/queries/queryTransform.ts b/src/queries/queryTransform.ts
--- a/src/queries/queryTransform.ts
+++ b/src/queries/queryTransform.ts
@@ -11,7 +11,7 @@ import {
checkDocument,
} from './getFromAST';
-import cloneDeep = require('lodash/cloneDeep');
+import cloneDeep from 'lodash/cloneDeep';
const TYPENAME_FIELD: Field = {
kind: 'Field',
diff --git a/src/queries/store.ts b/src/queries/store.ts
--- a/src/queries/store.ts
+++ b/src/queries/store.ts
@@ -18,7 +18,7 @@ import {
GraphQLError,
} from 'graphql';
-import isEqual = require('lodash/isEqual');
+import isEqual from 'lodash/isEqual';
export interface QueryStore {
[queryId: string]: QueryStoreValue;
diff --git a/src/scheduler/scheduler.ts b/src/scheduler/scheduler.ts
--- a/src/scheduler/scheduler.ts
+++ b/src/scheduler/scheduler.ts
@@ -10,10 +10,13 @@
import {
QueryManager,
- QueryListener,
- FetchType,
} from '../core/QueryManager';
+import {
+ FetchType,
+ QueryListener,
+} from '../core/types';
+
import { ObservableQuery } from '../core/ObservableQuery';
import { WatchQueryOptions } from '../core/watchQueryOptions';
diff --git a/src/store.ts b/src/store.ts
--- a/src/store.ts
+++ b/src/store.ts
@@ -27,7 +27,9 @@ import {
import {
optimistic,
OptimisticStore,
+ getDataWithOptimisticResults,
} from './optimistic-data/store';
+export { getDataWithOptimisticResults };
import {
ApolloAction,
@@ -45,7 +47,7 @@ import {
CustomResolverMap,
} from './data/readFromStore';
-import assign = require('lodash/assign');
+import assign from 'lodash/assign';
export interface Store {
data: NormalizedCache;
@@ -184,17 +186,8 @@ export function createApolloStore({
);
}
-
export type ApolloReducerConfig = {
dataIdFromObject?: IdGetter;
mutationBehaviorReducers?: MutationBehaviorReducerMap;
customResolvers?: CustomResolverMap;
};
-
-export function getDataWithOptimisticResults(store: Store): NormalizedCache {
- if (store.optimistic.length === 0) {
- return store.data;
- }
- const patches = store.optimistic.map(opt => opt.data);
- return assign({}, store.data, ...patches) as NormalizedCache;
-}
diff --git a/src/transport/batchedNetworkInterface.ts b/src/transport/batchedNetworkInterface.ts
--- a/src/transport/batchedNetworkInterface.ts
+++ b/src/transport/batchedNetworkInterface.ts
@@ -4,8 +4,8 @@ import {
import 'whatwg-fetch';
-import assign = require('lodash/assign');
-import isNumber = require('lodash/isNumber');
+import assign from 'lodash/assign';
+import isNumber from 'lodash/isNumber';
import {
HTTPFetchNetworkInterface,
diff --git a/src/transport/networkInterface.ts b/src/transport/networkInterface.ts
--- a/src/transport/networkInterface.ts
+++ b/src/transport/networkInterface.ts
@@ -1,6 +1,7 @@
-import isString = require('lodash/isString');
-import assign = require('lodash/assign');
-import mapValues = require('lodash/mapValues');
+import isString from 'lodash/isString';
+import assign from 'lodash/assign';
+import mapValues from 'lodash/mapValues';
+
import 'whatwg-fetch';
import {
| diff --git a/test/QueryManager.ts b/test/QueryManager.ts
--- a/test/QueryManager.ts
+++ b/test/QueryManager.ts
@@ -36,13 +36,13 @@ import ApolloClient, {
import {
ApolloQueryResult,
-} from '../src/core/QueryManager';
+} from '../src/core/types';
import { createStore, combineReducers, applyMiddleware } from 'redux';
import * as Rx from 'rxjs';
-import assign = require('lodash/assign');
+import { assign } from 'lodash';
import mockNetworkInterface, {
ParsedRequest,
diff --git a/test/batchedNetworkInterface.ts b/test/batchedNetworkInterface.ts
--- a/test/batchedNetworkInterface.ts
+++ b/test/batchedNetworkInterface.ts
@@ -1,6 +1,6 @@
import { assert } from 'chai';
-import merge = require('lodash/merge');
+import { merge } from 'lodash';
import { HTTPBatchedNetworkInterface } from '../src/transport/batchedNetworkInterface';
diff --git a/test/client.ts b/test/client.ts
--- a/test/client.ts
+++ b/test/client.ts
@@ -66,7 +66,7 @@ import {
createMockedIResponse,
} from './mocks/mockFetch';
-import * as chaiAsPromised from 'chai-as-promised';
+import chaiAsPromised from 'chai-as-promised';
import { ApolloError } from '../src/errors/ApolloError';
@@ -74,9 +74,7 @@ import { withWarning } from './util/wrap';
import observableToPromise from './util/observableToPromise';
-import cloneDeep = require('lodash/cloneDeep');
-
-import assign = require('lodash/assign');
+import { cloneDeep, assign } from 'lodash';
// make it easy to assert with promises
chai.use(chaiAsPromised);
diff --git a/test/directives.ts b/test/directives.ts
--- a/test/directives.ts
+++ b/test/directives.ts
@@ -11,7 +11,7 @@ import {
import gql from 'graphql-tag';
-import cloneDeep = require('lodash/cloneDeep');
+import { cloneDeep } from 'lodash';
describe('query directives', () => {
it('should should not include a skipped field', () => {
diff --git a/test/fetchMore.ts b/test/fetchMore.ts
--- a/test/fetchMore.ts
+++ b/test/fetchMore.ts
@@ -5,8 +5,7 @@ import mockNetworkInterface from './mocks/mockNetworkInterface';
import ApolloClient from '../src';
import { ObservableQuery } from '../src/core/ObservableQuery';
-import assign = require('lodash/assign');
-import clonedeep = require('lodash/cloneDeep');
+import { assign, cloneDeep } from 'lodash';
import gql from 'graphql-tag';
@@ -57,7 +56,7 @@ describe('updateQuery on a simple query', () => {
.then((watchedQuery: ObservableQuery) => {
assert.equal(latestResult.data.entry.value, 1);
watchedQuery.updateQuery((prevResult: any) => {
- const res = clonedeep(prevResult);
+ const res = cloneDeep(prevResult);
res.entry.value = 2;
return res;
});
@@ -109,7 +108,7 @@ describe('fetchMore on an observable query', () => {
},
},
};
- const resultMore = clonedeep(result);
+ const resultMore = cloneDeep(result);
const result2: any = {
data: {
__typename: 'Query',
@@ -175,7 +174,7 @@ describe('fetchMore on an observable query', () => {
return watchedQuery.fetchMore({
variables: { start: 10 }, // rely on the fact that the original variables had limit: 10
updateQuery: (prev, options) => {
- const state = clonedeep(prev) as any;
+ const state = cloneDeep(prev) as any;
state.entry.comments = [...state.entry.comments, ...(options.fetchMoreResult as any).data.entry.comments];
return state;
},
@@ -205,7 +204,7 @@ describe('fetchMore on an observable query', () => {
query: query2,
variables: variables2,
updateQuery: (prev, options) => {
- const state = clonedeep(prev) as any;
+ const state = cloneDeep(prev) as any;
state.entry.comments = [...state.entry.comments, ...(options.fetchMoreResult as any).data.comments];
return state;
},
diff --git a/test/fixtures/redux-todomvc/reducers.ts b/test/fixtures/redux-todomvc/reducers.ts
--- a/test/fixtures/redux-todomvc/reducers.ts
+++ b/test/fixtures/redux-todomvc/reducers.ts
@@ -1,5 +1,5 @@
import { combineReducers } from 'redux';
-import assign = require('lodash/assign');
+import { assign } from 'lodash';
import {
ADD_TODO,
diff --git a/test/graphqlSubscriptions.ts b/test/graphqlSubscriptions.ts
--- a/test/graphqlSubscriptions.ts
+++ b/test/graphqlSubscriptions.ts
@@ -6,7 +6,7 @@ import {
assert,
} from 'chai';
-import clonedeep = require('lodash/cloneDeep');
+import { cloneDeep } from 'lodash';
import { isSubscriptionResultAction } from '../src/actions';
@@ -282,7 +282,7 @@ describe('GraphQL Subscriptions', () => {
reducer: (previousResult, action) => {
counter++;
if (isSubscriptionResultAction(action)) {
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.number++;
return newResult;
}
diff --git a/test/mockNetworkInterface.ts b/test/mockNetworkInterface.ts
--- a/test/mockNetworkInterface.ts
+++ b/test/mockNetworkInterface.ts
@@ -7,7 +7,7 @@ import {
MockedSubscription,
} from './mocks/mockNetworkInterface';
-import omit = require('lodash/omit');
+import { omit } from 'lodash';
import gql from 'graphql-tag';
diff --git a/test/mutationResults.ts b/test/mutationResults.ts
--- a/test/mutationResults.ts
+++ b/test/mutationResults.ts
@@ -7,8 +7,7 @@ import { isMutationResultAction, isQueryResultAction } from '../src/actions';
import { Subscription } from '../src/util/Observable';
-import assign = require('lodash/assign');
-import clonedeep = require('lodash/cloneDeep');
+import { assign, cloneDeep } from 'lodash';
import { ObservableQuery } from '../src/core/ObservableQuery';
@@ -743,7 +742,7 @@ describe('mutation results', () => {
reducer: (previousResult, action) => {
counter++;
if (isMutationResultAction(action)) {
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.todoList.todos.unshift(action.result.data.createTodo);
return newResult;
}
@@ -799,7 +798,7 @@ describe('mutation results', () => {
reducer: (previousResult, action, variables: any) => {
counter++;
if (isMutationResultAction(action) && variables['id'] === 5) {
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.todoList.todos.unshift(action.result.data.createTodo);
return newResult;
}
@@ -854,7 +853,7 @@ describe('mutation results', () => {
reducer: (previousResult, action) => {
if (isMutationResultAction(action) && action.operationName === 'createTodo') {
counter++;
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.todoList.todos.unshift(action.result.data.createTodo);
return newResult;
}
@@ -871,7 +870,7 @@ describe('mutation results', () => {
reducer: (previousResult, action) => {
if (isMutationResultAction(action) && action.operationName === 'wrongName') {
counter++; // shouldn't be called, so counter shouldn't increase.
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.todoList.todos.unshift(action.result.data.createTodo);
return newResult;
}
@@ -919,7 +918,7 @@ describe('mutation results', () => {
reducer: (previousResult, action) => {
counter++;
if (isQueryResultAction(action)) {
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.todoList.todos.unshift(action.result.data.newTodos[0]);
return newResult;
}
@@ -1003,7 +1002,7 @@ describe('mutation results', () => {
reducer: (previousResult, action) => {
counter++;
if (isMutationResultAction(action)) {
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.todoList.todos.unshift(action.result.data.createTodo);
return newResult;
}
@@ -1018,7 +1017,7 @@ describe('mutation results', () => {
reducer: (previousResult, action) => {
counter2++;
if (isMutationResultAction(action) && action.result.data.createTodo.completed) {
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.todoList.filteredTodos.unshift(action.result.data.createTodo);
return newResult;
}
@@ -1170,7 +1169,7 @@ describe('mutation results', () => {
assert.equal(mResult.data.createTodo.id, '99');
assert.equal(mResult.data.createTodo.text, 'This one was created with a mutation.');
- const state = clonedeep(prev) as any;
+ const state = cloneDeep(prev) as any;
state.todoList.todos.unshift(mResult.data.createTodo);
return state;
},
@@ -1209,7 +1208,7 @@ describe('mutation results', () => {
assert.equal(mResult.data.createTodo.id, '99');
assert.equal(mResult.data.createTodo.text, 'This one was created with a mutation.');
- const state = clonedeep(prev) as any;
+ const state = cloneDeep(prev) as any;
state.todoList.todos.unshift(mResult.data.createTodo);
return state;
},
@@ -1233,7 +1232,7 @@ describe('mutation results', () => {
updateQueries: {
todoList: (prev, options) => {
const mResult = options.mutationResult as any;
- const state = clonedeep(prev) as any;
+ const state = cloneDeep(prev) as any;
// It's unfortunate that this function is called at all, but we are removing
// the updateQueries API soon so it won't matter.
state.todoList.todos.unshift(mResult.data && mResult.data.createTodo);
@@ -1248,7 +1247,7 @@ describe('mutation results', () => {
updateQueries: {
todoList: (prev, options) => {
const mResult = options.mutationResult as any;
- const state = clonedeep(prev) as any;
+ const state = cloneDeep(prev) as any;
state.todoList.todos.unshift(mResult.data.createTodo);
return state;
},
diff --git a/test/networkInterface.ts b/test/networkInterface.ts
--- a/test/networkInterface.ts
+++ b/test/networkInterface.ts
@@ -1,9 +1,8 @@
import * as chai from 'chai';
-import * as chaiAsPromised from 'chai-as-promised';
+import chaiAsPromised from 'chai-as-promised';
-import assign = require('lodash/assign');
-import isequal = require('lodash/isEqual');
-import * as fetchMock from 'fetch-mock';
+import { assign, isEqual } from 'lodash';
+import fetchMock from 'fetch-mock';
// make it easy to assert with promises
chai.use(chaiAsPromised);
@@ -116,12 +115,12 @@ describe('network interface', () => {
}
if (query === print(simpleQueryWithVar)
- && isequal(variables, { personNum: 1 })) {
+ && isEqual(variables, { personNum: 1 })) {
return simpleResult;
}
if (query === print(complexQueryWithTwoVars)
- && isequal(variables, { personNum: 1, filmNum: 1 })) {
+ && isEqual(variables, { personNum: 1, filmNum: 1 })) {
return complexResult;
}
diff --git a/test/optimistic.ts b/test/optimistic.ts
--- a/test/optimistic.ts
+++ b/test/optimistic.ts
@@ -6,8 +6,7 @@ import ApolloClient from '../src';
import { MutationBehaviorReducerArgs, MutationBehavior, MutationQueryReducersMap } from '../src/data/mutationResults';
import { NormalizedCache, StoreObject } from '../src/data/storeUtils';
-import assign = require('lodash/assign');
-import clonedeep = require('lodash/cloneDeep');
+import { assign, cloneDeep} from 'lodash';
import { Subscription } from '../src/util/Observable';
@@ -721,7 +720,7 @@ describe('optimistic mutation results', () => {
const mResult = options.mutationResult as any;
assert.equal(mResult.data.createTodo.id, '99');
- const state = clonedeep(prev) as any;
+ const state = cloneDeep(prev) as any;
state.todoList.todos.unshift(mResult.data.createTodo);
return state;
},
@@ -771,7 +770,7 @@ describe('optimistic mutation results', () => {
todoList: (prev, options) => {
const mResult = options.mutationResult as any;
- const state = clonedeep(prev) as any;
+ const state = cloneDeep(prev) as any;
state.todoList.todos.unshift(mResult.data.createTodo);
return state;
},
@@ -845,7 +844,7 @@ describe('optimistic mutation results', () => {
todoList: (prev, options) => {
const mResult = options.mutationResult as any;
- const state = clonedeep(prev) as any;
+ const state = cloneDeep(prev) as any;
state.todoList.todos.unshift(mResult.data.createTodo);
return state;
},
@@ -950,7 +949,7 @@ describe('optimistic mutation results', () => {
reducer: (previousResult, action) => {
counter++;
if (isMutationResultAction(action)) {
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.todoList.todos.unshift(action.result.data.createTodo);
return newResult;
}
@@ -978,7 +977,7 @@ describe('optimistic mutation results', () => {
reducer: (previousResult, action) => {
counter++;
if (isMutationResultAction(action)) {
- const newResult = clonedeep(previousResult) as any;
+ const newResult = cloneDeep(previousResult) as any;
newResult.todoList.todos.unshift(action.result.data.createTodo);
return newResult;
}
@@ -1134,7 +1133,7 @@ describe('optimistic mutation - githunt comments', () => {
const updateQueries = {
Comment: (prev, { mutationResult: mutationResultArg }) => {
const newComment = (mutationResultArg as any).data.submitComment;
- const state = clonedeep(prev);
+ const state = cloneDeep(prev);
(state as any).entry.comments.unshift(newComment);
return state;
},
diff --git a/test/readFromStore.ts b/test/readFromStore.ts
--- a/test/readFromStore.ts
+++ b/test/readFromStore.ts
@@ -1,6 +1,5 @@
import { assert } from 'chai';
-import assign = require('lodash/assign');
-import omit = require('lodash/omit');
+import { assign, omit } from 'lodash';
import {
readQueryFromStore,
diff --git a/test/subscribeToMore.ts b/test/subscribeToMore.ts
--- a/test/subscribeToMore.ts
+++ b/test/subscribeToMore.ts
@@ -4,10 +4,8 @@ const { assert } = chai;
import {
mockSubscriptionNetworkInterface,
} from './mocks/mockNetworkInterface';
-import ApolloClient from '../src';
-// import assign = require('lodash/assign');
-// import clonedeep = require('lodash/cloneDeep');
+import ApolloClient from '../src';
import gql from 'graphql-tag';
diff --git a/test/util/observableToPromise.ts b/test/util/observableToPromise.ts
--- a/test/util/observableToPromise.ts
+++ b/test/util/observableToPromise.ts
@@ -1,5 +1,5 @@
import { ObservableQuery } from '../../src/core/ObservableQuery';
-import { ApolloQueryResult } from '../../src/core/QueryManager';
+import { ApolloQueryResult } from '../../src/core/types';
import { Subscription } from '../../src/util/Observable';
/**
diff --git a/test/util/subscribeAndCount.ts b/test/util/subscribeAndCount.ts
--- a/test/util/subscribeAndCount.ts
+++ b/test/util/subscribeAndCount.ts
@@ -1,5 +1,5 @@
import { ObservableQuery } from '../../src/core/ObservableQuery';
-import { ApolloQueryResult } from '../../src/core/QueryManager';
+import { ApolloQueryResult } from '../../src/core/types';
import { Subscription } from '../../src/util/Observable';
import wrap from './wrap';
diff --git a/test/writeToStore.ts b/test/writeToStore.ts
--- a/test/writeToStore.ts
+++ b/test/writeToStore.ts
@@ -1,7 +1,5 @@
import { assert } from 'chai';
-import cloneDeep = require('lodash/cloneDeep');
-import assign = require('lodash/assign');
-import omit = require('lodash/omit');
+import { cloneDeep, assign, omit } from 'lodash';
import {
writeQueryToStore,
| Circular dependencies
```
store.js -> optimistic-data/store.js -> store.js
core/ObservableQuery.js -> core/QueryManager.js -> scheduler/scheduler.js -> core/ObservableQuery.js
core/QueryManager.js -> scheduler/scheduler.js -> core/QueryManager.js
optimistic-data/store.js -> store.js -> optimistic-data/store.js
scheduler/scheduler.js -> core/QueryManager.js -> scheduler/scheduler.js
```
@DxCx
| 2016-12-20T14:11:30Z | 0.5 |
|
apollographql/apollo-client | 1,054 | apollographql__apollo-client-1054 | [
"902"
] | 0f7d80268db1d4e566df624e73c5902a51e389af | diff --git a/src/core/ObservableQuery.ts b/src/core/ObservableQuery.ts
--- a/src/core/ObservableQuery.ts
+++ b/src/core/ObservableQuery.ts
@@ -366,6 +366,11 @@ export class ObservableQuery extends Observable<ApolloQueryResult> {
const retQuerySubscription = {
unsubscribe: () => {
+ if (this.observers.findIndex(el => el === observer) < 0 ) {
+ // XXX can't unsubscribe if you've already unsubscribed...
+ // for some reason unsubscribe gets called multiple times by some of the tests
+ return;
+ }
this.observers = this.observers.filter((obs) => obs !== observer);
if (this.observers.length === 0) {
@@ -435,6 +440,9 @@ export class ObservableQuery extends Observable<ApolloQueryResult> {
this.subscriptionHandles = [];
this.queryManager.stopQuery(this.queryId);
+ if (this.shouldSubscribe) {
+ this.queryManager.removeObservableQuery(this.queryId);
+ }
this.observers = [];
}
}
diff --git a/src/core/QueryManager.ts b/src/core/QueryManager.ts
--- a/src/core/QueryManager.ts
+++ b/src/core/QueryManager.ts
@@ -150,7 +150,7 @@ export class QueryManager {
private queryListeners: { [queryId: string]: QueryListener[] };
private queryDocuments: { [queryId: string]: Document };
- private idCounter = 0;
+ private idCounter = 1; // XXX let's not start at zero to avoid pain with bad checks
// A map going from a requestId to a promise that has not yet been resolved. We use this to keep
// track of queries that are inflight and reject them in case some
@@ -271,19 +271,6 @@ export class QueryManager {
this.queryDocuments[mutationId] = mutation;
- const extraReducers = Object.keys(this.observableQueries).map( queryId => {
- const queryOptions = this.observableQueries[queryId].observableQuery.options;
- if (queryOptions.reducer) {
- return createStoreReducer(
- queryOptions.reducer,
- queryOptions.query,
- queryOptions.variables,
- this.reducerConfig,
- );
- }
- return null;
- }).filter( reducer => reducer !== null );
-
this.store.dispatch({
type: 'APOLLO_MUTATION_INIT',
mutationString,
@@ -293,7 +280,7 @@ export class QueryManager {
mutationId,
optimisticResponse,
resultBehaviors: [...resultBehaviors, ...updateQueriesResultBehaviors],
- extraReducers,
+ extraReducers: this.getExtraReducers(),
});
return new Promise((resolve, reject) => {
@@ -315,7 +302,7 @@ export class QueryManager {
...resultBehaviors,
...this.collectResultBehaviorsFromUpdateQueries(updateQueries, result),
],
- extraReducers,
+ extraReducers: this.getExtraReducers(),
});
refetchQueries.forEach((name) => { this.refetchQueryByName(name); });
@@ -607,7 +594,7 @@ export class QueryManager {
// Insert the ObservableQuery into this.observableQueriesByName if the query has a name
const queryDef = getQueryDefinition(observableQuery.options.query);
if (queryDef.name && queryDef.name.value) {
- const queryName = getQueryDefinition(observableQuery.options.query).name.value;
+ const queryName = queryDef.name.value;
// XXX we may we want to warn the user about query name conflicts in the future
this.queryIdsByName[queryName] = this.queryIdsByName[queryName] || [];
@@ -617,11 +604,14 @@ export class QueryManager {
public removeObservableQuery(queryId: string) {
const observableQuery = this.observableQueries[queryId].observableQuery;
- const queryName = getQueryDefinition(observableQuery.options.query).name.value;
+ const definition = getQueryDefinition(observableQuery.options.query);
+ const queryName = definition.name ? definition.name.value : null;
delete this.observableQueries[queryId];
- this.queryIdsByName[queryName] = this.queryIdsByName[queryName].filter((val) => {
- return !(observableQuery.queryId === val);
- });
+ if (queryName) {
+ this.queryIdsByName[queryName] = this.queryIdsByName[queryName].filter((val) => {
+ return !(observableQuery.queryId === val);
+ });
+ }
}
public resetStore(): void {
@@ -650,9 +640,7 @@ export class QueryManager {
Object.keys(this.observableQueries).forEach((queryId) => {
const storeQuery = this.reduxRootSelector(this.store.getState()).queries[queryId];
- if (! this.observableQueries[queryId].observableQuery.options.noFetch &&
- ! (storeQuery && storeQuery.stopped)
- ) {
+ if (!this.observableQueries[queryId].observableQuery.options.noFetch) {
this.observableQueries[queryId].observableQuery.refetch();
}
});
diff --git a/src/queries/store.ts b/src/queries/store.ts
--- a/src/queries/store.ts
+++ b/src/queries/store.ts
@@ -39,7 +39,6 @@ export type QueryStoreValue = {
queryString: string;
variables: Object;
previousVariables: Object;
- stopped: boolean;
loading: boolean;
networkStatus: NetworkStatus;
networkError: Error;
@@ -108,7 +107,6 @@ export function queries(
queryString: action.queryString,
variables: action.variables,
previousVariables,
- stopped: false,
loading: true,
networkError: null,
graphQLErrors: null,
@@ -182,12 +180,7 @@ export function queries(
} else if (isQueryStopAction(action)) {
const newState = assign({}, previousState) as QueryStore;
- newState[action.queryId] = assign({}, previousState[action.queryId], {
- loading: false,
- stopped: true,
- networkStatus: NetworkStatus.ready,
- }) as QueryStoreValue;
-
+ delete newState[action.queryId];
return newState;
} else if (isStoreResetAction(action)) {
return resetQueryState(previousState, action);
| diff --git a/test/QueryManager.ts b/test/QueryManager.ts
--- a/test/QueryManager.ts
+++ b/test/QueryManager.ts
@@ -535,7 +535,7 @@ describe('QueryManager', () => {
});
});
- it('allows you to subscribe twice to the one query', (done) => {
+ it('allows you to subscribe twice to one query', (done) => {
const request = {
query: gql`
query fetchLuke($id: String) {
diff --git a/test/client.ts b/test/client.ts
--- a/test/client.ts
+++ b/test/client.ts
@@ -484,17 +484,16 @@ describe('client', () => {
const finalState = { apollo: assign({}, initialState.apollo, {
queries: {
- '0': {
+ '1': {
queryString: print(query),
variables: undefined,
loading: false,
networkStatus: NetworkStatus.ready,
- stopped: false,
networkError: null,
graphQLErrors: null,
forceFetch: false,
returnPartialData: false,
- lastRequestId: 1,
+ lastRequestId: 2,
previousVariables: null,
metadata: null,
},
diff --git a/test/mutationResults.ts b/test/mutationResults.ts
--- a/test/mutationResults.ts
+++ b/test/mutationResults.ts
@@ -5,6 +5,8 @@ import { MutationBehaviorReducerArgs, MutationBehavior, cleanArray } from '../sr
import { NormalizedCache, StoreObject } from '../src/data/storeUtils';
import { isMutationResultAction, isQueryResultAction } from '../src/actions';
+import { Subscription } from '../src/util/Observable';
+
import assign = require('lodash/assign');
import clonedeep = require('lodash/cloneDeep');
@@ -993,10 +995,20 @@ describe('mutation results', () => {
};
it('analogous of ARRAY_INSERT', () => {
+ let subscriptionHandle: Subscription;
return setup({
request: { query: mutation },
result: mutationResult,
})
+ .then(() => {
+ // we have to actually subscribe to the query to be able to update it
+ return new Promise( (resolve, reject) => {
+ const handle = client.watchQuery({ query });
+ subscriptionHandle = handle.subscribe({
+ next(res) { resolve(res); },
+ });
+ });
+ })
.then(() => {
return client.mutate({
mutation,
@@ -1017,6 +1029,8 @@ describe('mutation results', () => {
return client.query({ query });
})
.then((newResult: any) => {
+ subscriptionHandle.unsubscribe();
+
// There should be one more todo item than before
assert.equal(newResult.data.todoList.todos.length, 4);
@@ -1105,10 +1119,20 @@ describe('mutation results', () => {
errors.push(msg);
};
+ let subscriptionHandle: Subscription;
return setup({
request: { query: mutation },
result: mutationResult,
})
+ .then(() => {
+ // we have to actually subscribe to the query to be able to update it
+ return new Promise( (resolve, reject) => {
+ const handle = client.watchQuery({ query });
+ subscriptionHandle = handle.subscribe({
+ next(res) { resolve(res); },
+ });
+ });
+ })
.then(() => {
return client.mutate({
mutation,
@@ -1120,6 +1144,7 @@ describe('mutation results', () => {
});
})
.then(() => {
+ subscriptionHandle.unsubscribe();
assert.lengthOf(errors, 1);
assert.equal(errors[0].message, `Hello... It's me.`);
console.error = oldError;
diff --git a/test/optimistic.ts b/test/optimistic.ts
--- a/test/optimistic.ts
+++ b/test/optimistic.ts
@@ -10,6 +10,8 @@ import { addFragmentsToDocument } from '../src/queries/getFromAST';
import assign = require('lodash/assign');
import clonedeep = require('lodash/cloneDeep');
+import { Subscription } from '../src/util/Observable';
+
import gql from 'graphql-tag';
import {
@@ -608,8 +610,8 @@ describe('optimistic mutation results', () => {
}).then((res) => {
checkBothMutationsAreApplied('This one was created with a mutation.', 'Optimistically generated 2');
const mutationsState = client.store.getState().apollo.mutations;
- assert.equal(mutationsState[2].loading, false);
- assert.equal(mutationsState[3].loading, true);
+ assert.equal(mutationsState['3'].loading, false);
+ assert.equal(mutationsState['4'].loading, true);
return res;
});
@@ -621,15 +623,15 @@ describe('optimistic mutation results', () => {
}).then((res) => {
checkBothMutationsAreApplied('This one was created with a mutation.', 'Second mutation.');
const mutationsState = client.store.getState().apollo.mutations;
- assert.equal(mutationsState[2].loading, false);
assert.equal(mutationsState[3].loading, false);
+ assert.equal(mutationsState[4].loading, false);
return res;
});
const mutationsState = client.store.getState().apollo.mutations;
- assert.equal(mutationsState[2].loading, true);
assert.equal(mutationsState[3].loading, true);
+ assert.equal(mutationsState[4].loading, true);
checkBothMutationsAreApplied('Optimistically generated', 'Optimistically generated 2');
@@ -697,10 +699,20 @@ describe('optimistic mutation results', () => {
};
it('analogous of ARRAY_INSERT', () => {
+ let subscriptionHandle: Subscription;
return setup({
request: { query: mutation },
result: mutationResult,
})
+ .then(() => {
+ // we have to actually subscribe to the query to be able to update it
+ return new Promise( (resolve, reject) => {
+ const handle = client.watchQuery({ query });
+ subscriptionHandle = handle.subscribe({
+ next(res) { resolve(res); },
+ });
+ });
+ })
.then(() => {
const promise = client.mutate({
mutation,
@@ -727,6 +739,7 @@ describe('optimistic mutation results', () => {
return client.query({ query });
})
.then((newResult: any) => {
+ subscriptionHandle.unsubscribe();
// There should be one more todo item than before
assert.equal(newResult.data.todoList.todos.length, 4);
@@ -736,6 +749,7 @@ describe('optimistic mutation results', () => {
});
it('two ARRAY_INSERT like mutations', () => {
+ let subscriptionHandle: Subscription;
return setup({
request: { query: mutation },
result: mutationResult,
@@ -744,6 +758,15 @@ describe('optimistic mutation results', () => {
result: mutationResult2,
delay: 50,
})
+ .then(() => {
+ // we have to actually subscribe to the query to be able to update it
+ return new Promise( (resolve, reject) => {
+ const handle = client.watchQuery({ query });
+ subscriptionHandle = handle.subscribe({
+ next(res) { resolve(res); },
+ });
+ });
+ })
.then(() => {
const updateQueries = {
todoList: (prev, options) => {
@@ -783,6 +806,7 @@ describe('optimistic mutation results', () => {
return client.query({ query });
})
.then((newResult: any) => {
+ subscriptionHandle.unsubscribe();
// There should be one more todo item than before
assert.equal(newResult.data.todoList.todos.length, 5);
@@ -793,12 +817,29 @@ describe('optimistic mutation results', () => {
});
it('two mutations, one fails', () => {
+ let subscriptionHandle: Subscription;
return setup({
request: { query: mutation },
error: new Error('forbidden (test error)'),
+ delay: 20,
}, {
request: { query: mutation },
result: mutationResult2,
+ // XXX this test will uncover a flaw in the design of optimistic responses combined with
+ // updateQueries or result reducers if you un-comment the line below. The issue is that
+ // optimistic updates are not commutative but are treated as such. When undoing an
+ // optimistic update, other optimistic updates should be rolled back and re-applied in the
+ // same order as before, otherwise the store can end up in an inconsistent state.
+ // delay: 50,
+ })
+ .then(() => {
+ // we have to actually subscribe to the query to be able to update it
+ return new Promise( (resolve, reject) => {
+ const handle = client.watchQuery({ query });
+ subscriptionHandle = handle.subscribe({
+ next(res) { resolve(res); },
+ });
+ });
})
.then(() => {
const updateQueries = {
@@ -835,6 +876,7 @@ describe('optimistic mutation results', () => {
return Promise.all([promise, promise2]);
})
.then(() => {
+ subscriptionHandle.unsubscribe();
const dataInStore = client.queryManager.getDataWithOptimisticResults();
assert.equal((dataInStore['TodoList5'] as any).todos.length, 4);
assert.notProperty(dataInStore, 'Todo99');
@@ -1123,13 +1165,24 @@ describe('optimistic mutation - githunt comments', () => {
commentContent: 'New Comment',
};
+ let subscriptionHandle: Subscription;
return setup({
request: {
query: addTypenameToDocument(mutation),
variables: mutationVariables,
},
result: mutationResult,
- }).then(() => {
+ })
+ .then(() => {
+ // we have to actually subscribe to the query to be able to update it
+ return new Promise( (resolve, reject) => {
+ const handle = client.watchQuery({ query, variables });
+ subscriptionHandle = handle.subscribe({
+ next(res) { resolve(res); },
+ });
+ });
+ })
+ .then(() => {
return client.mutate({
mutation,
optimisticResponse,
@@ -1139,6 +1192,7 @@ describe('optimistic mutation - githunt comments', () => {
}).then(() => {
return client.query({ query, variables });
}).then((newResult: any) => {
+ subscriptionHandle.unsubscribe();
assert.equal(newResult.data.entry.comments.length, 2);
});
});
@@ -1149,13 +1203,28 @@ describe('optimistic mutation - githunt comments', () => {
commentContent: 'New Comment',
};
+ let subscriptionHandle: Subscription;
return setup({
request: {
query: addFragmentsToDocument(addTypenameToDocument(mutationWithFragment), fragmentWithTypenames),
variables: mutationVariables,
},
result: mutationResult,
- }).then(() => {
+ })
+ .then(() => {
+ // we have to actually subscribe to the query to be able to update it
+ return new Promise( (resolve, reject) => {
+ const handle = client.watchQuery({
+ query: queryWithFragment,
+ variables,
+ fragments: fragment,
+ });
+ subscriptionHandle = handle.subscribe({
+ next(res) { resolve(res); },
+ });
+ });
+ })
+ .then(() => {
return client.mutate({
mutation: mutationWithFragment,
optimisticResponse,
@@ -1166,6 +1235,7 @@ describe('optimistic mutation - githunt comments', () => {
}).then(() => {
return client.query({ query: queryWithFragment, variables, fragments: fragment });
}).then((newResult: any) => {
+ subscriptionHandle.unsubscribe();
assert.equal(newResult.data.entry.comments.length, 2);
});
});
diff --git a/test/store.ts b/test/store.ts
--- a/test/store.ts
+++ b/test/store.ts
@@ -157,7 +157,6 @@ describe('createApolloStore', () => {
'previousVariables': undefined as any,
'queryString': '',
'returnPartialData': false,
- 'stopped': false,
'variables': {},
'metadata': null,
},
| Question about stopped queries and potential bug
I have two views in my app and each view is wrapped in Apollo component. Every time I switch between these two view a new query is being created for a component I switch to and the query for the component being switched away from is stopped. So when switching 20 times between these two views I end up with 20 queries in the apollo store but only the last one is in a running state. So the question I have is why do we keep creating a new query every time instead of reusing the old one by just reinitializing it? We could have a mechanism to find it in the store by name and then reuse it by flipping its state to running. If for some reason we need to keep all these old queries in the store then I think we have a bug in the `QueryManager.collectResultBehaviorsFromUpdateQueries` where the `updateQueries` reducer is being called repeatedly for all queries that are stopped and it results in getting exactly the same result and exactly the same state in the store is being regenerated by constantly mutating it.
If I keep my app open for a day and switch views hundreds of times I will end up with hundreds of dead queries which will, nevertheless, participate in constant state mutation without any effect.
Is there something obvious that I'm missing?
Thank you
| @yurigenin If the queries are stopped, they should be removed from the `queries` part of the store. If that's not the case, then I think it's a bug and we should fix it. You could make a PR if you want!
Just to confirm: you are switching components and seeing more and more queries in your store with the Redux devtools?
edit: what I said is wrong. Stopped queries aren't removed from the store any more. That change was made to prevent the client from crashing in some race conditions (I think).
@helfer yes, the redux devtools show lots of these queries whose 'stop' property is false.
@yurigenin could you provide a reproduction or better yet a PR with a failing test? Then we should be able to fix it pretty easily.
@helfer I am experiencing this as well.
Edit: I help to maintain and also have based my code off of the Apollo branch on this repo.
https://github.com/ctrlplusb/react-universally/tree/apollo
It should suffice as an example repo (although a little overkill) if a test case or a PR cannot be created.

Ok, I did a bit of digging and it turns out we no longer delete queries since [this PR](https://github.com/apollostack/apollo-client/pull/696). It's normal that they're in the store, but as you correctly point out they shouldn't get updated with `updateQueries`.
I think we can put in a simple fix in [this loop](https://github.com/apollostack/apollo-client/blob/ba0849bb88ecfe35f6c0a007b38007a994dd4bb7/src/core/QueryManager.ts#L832).
@yurigenin do you think you could make a PR to fix this? I'd be happy to walk you through it if you need help getting started! 🙂
@helfer Sure, I can take a stab at it. I'm finishing something for my day job today and will be happy to jump on it tomorrow. Just a little disclosure: I have never done github PRs so would need a tutorial on how to manage a subproject/forked repo on github. Thanks!
@helfer I have a question about that, just for my own curiosity. What is the benefit of keeping them in the store? I have a rather large application and as a user navigates around, wouldn't that state just keep growing and growing with stopped queries? Is that a result of a flaw with my application design or do I need to intervene and clean up the state when a user changes to a different route? Thanks in advance.
What is the definition of a "stopped" query? IE when would a query be removed from the cache?
Since I am going to use updateQueries to tell components using a query that they need to update their displays, the only way that this could become "no longer needed" is when a component using the query unmounts. Is that right?
@GreenAsJade @carsonperrotti The QUERY_STOP action is dispatched whenever the last observer unsubscribes from a watchQuery, which will usually happens when the component unmounts. As the code is right now, queries remain in the cache in stopped state indefinitely, but they shouldn't run or update any more. We have plans to refactor the queries logic, at which point we may also be able to clear stopped queries from the store. In the meantime it should be possible to clean up stopped queries manually.
PS: I don't think having stopped queries in the store is a big issue for now, because they use relatively little storage space and the client isn't designed to be run for days without resetting the store.
I agree. I don't see any particular issue here, now that I understand it.
@helfer I'm ready to tackle this (set up my dev env and forked apollo-client repo). Now to the hard part. I still do not know what the solution would be. Currently, as soon as the component with a query unmounts its query is stopped (marked `stopped: true` in the store). When a different component (dialog usually) sends a mutation the `QueryManager.collectResultBehaviorsFromUpdateQueries` is called and it iterates through all keys passed to `mutate` in `updateQueries` object, retrieves their last results and passes them to the query reducers. So, in this case, all queries are marked as stopped and if we somehow 'fix' it so that these queries are ignored then no reducer will ever be called.
The problem now is that every time the component with a query is remounted a new query is created. If you switch between two components with queries 100 times you will have 100 query entries (50 for each component) in the store under `queries` key as the screenshot showed. And the reducers will be called 50 times per component always passing and returning the same data (mutated on every call).
The bottom line: if you ignore stopped queries, no reducer will be called; if you keep doing what we are doing now you end up with multiple redundant calls doing exactly nothing.
I'm ready for any ideas...
@helfer Thinking loud. I think one solution would be on creating a new query to always check if there is another query with the same name and variables already in the store under `queries` key. If there is then dispatch a new action `APOLLO_QUERY_REMOVE` and only then create a new query. This, I think' would work: even components not currently mounted would be able to 'reduce' and only one query at a time would exist in the store under `queries` key.
What do you think?
No, AFAIK. One query entry per component is enough. Every time a component mounts it creates a new query entry. So duplicate ones serve no purpose.
> On Nov 18, 2016, at 4:13 PM, GreenAsJade notifications@github.com wrote:
>
> Are the duplicate queries "connected" (in some way) to the specific components that they feed? Just wildly speculating :) If they are, then maybe each duplicate has to remain there to cause the re-render of the wrapped component that it is responsible for?
>
> —
> You are receiving this because you were mentioned.
> Reply to this email directly, view it on GitHub https://github.com/apollostack/apollo-client/issues/902#issuecomment-261666667, or mute the thread https://github.com/notifications/unsubscribe-auth/AKrxkHKJ_yfMXXJudP7AdrcqEg6imNJOks5q_jEggaJpZM4Kwmr3.
Copied from #903 as some of it is related.
-----------------------------------
Once a component mounts its query is added to a map (QueryManager.addObservableQuery) and never, never gets removed from it. There is a method that is supposed to be called to remove it (QueryManager.removeObservableQuery) but it is not called from any path in the app. So every time you unmount and remount any component with a query it adds a new query structure to that map (QueryManager.observableQueries and QueryManager.queryIdsByName). I used the GitHunt-React app and was able to reproduce it easily. Just switch between the Feed view and Submit view. Every time you click on the Feed view a new query is added to the map. I clicked 100 times and got 100 queries. If you register a mutation with updateQueries for the Feed query it will be called exactly 100 times, no kidding. Imagine 20 HOCs with queries and 20 HOCs with mutations. Could get ugly fast. Resetting the store does not solve this problem as observableQueries and queryIdsByName are never cleaned up.
I think the reason the queries are not removed is because they are tied to how updateQueries and reducer logic works for mutations. If you switch to another view but still want the updateQueries and reducers to be called, then you cannot drop these queries so they hang around forever.
This must be redesigned to decouple queries and query reducers. Those should ideally live independently and be registered once as an app starts up and shared among multiple queries if need be. But based on where the code is now it would require major overhaul.
The problem with parameterized queries is that they are repurposed every time you change a variable value (the opposite of the problem above). Let's say you have a view that has a dropdown with 3 choices (Open, InProgress, Closed) and you want to filter data in the view based on one of these values. So you create a query with one variable (status). Every time user picks a different choice from a dropdown you send a new query. Let's say you chose all of them and now you have three different results in the cache. So far so good. Now you want to add several items to the list via a mutation. But since it is the same query only the last one is actually registered with observableQueries and queryIdsByName in the QueryManager. And it means updateQueries and reducer will only be called once with the variable value set to the last one used. For example, you last viewed items with an Open state. In mutation you added some items with a Closed state. updateQueries/reducer is only called for the query results with an Open status. Now if you go to see your list filtered by a Closed state you will not find the newly added ones there since a reducer was not called for these query results. Interesting that GitHunt-React 'solves' this problem by setting forceFetch flag on the query so it always ignores cache which defeats the whole purpose, right?
One more major issue I found (probably need to submit it separately but want to mention it here since it is related) is that it looks like the current design results in memory leaks. See, you can add a reducer function on any query HOC. It is a function that is attached to query options object. But when you switch between different HOCs they are unmounted and remounted again resulting in new objects being constructed. But unmounted HOCs that have reducers attached cannot be garbage collected since they are kept being referenced by observableQueries that never get cleaned up for the reason I described above. That is why reducers should probably be registered globally so that they do not prevent unmounted components from being garbage collected.
Another minor (?) issue is that the queries in the queries sub store in the apollo store never get deleted but just marked as stopped: true. I could not find a good reason fro them to be there once they are stopped but I might have overlooked something. So, as components get remounted this sub store grows and grows and you can only trim it by resetting the entire store.
So dilemma now is that the the ideas behind the apollo-client are great (normalized cache, optimistic responses, paging, etc.) but the implementation is still not up to enterprise level where you can have lots of HOCs with queries and mutations and you need to make sure that the integrity of data in cache is not compromised and there is no unbound growth of internal objects and memory leaks are not an issue. | 2016-12-17T14:31:53Z | 0.5 |
apollographql/apollo-client | 683 | apollographql__apollo-client-683 | [
"678"
] | 3f5e15f58551478988150b9600589719dba5b7ba | diff --git a/src/data/diffAgainstStore.ts b/src/data/diffAgainstStore.ts
--- a/src/data/diffAgainstStore.ts
+++ b/src/data/diffAgainstStore.ts
@@ -2,7 +2,7 @@ import isArray = require('lodash.isarray');
import isNull = require('lodash.isnull');
import isObject = require('lodash.isobject');
import has = require('lodash.has');
-import deepAssign = require('deep-assign');
+import merge = require('lodash.merge');
import {
storeKeyNameFromField,
@@ -221,7 +221,7 @@ export function diffSelectionSetAgainstStore({
}
if (isObject(fieldResult)) {
- deepAssign(result, fieldResult);
+ merge(result, fieldResult);
}
if (!fragmentErrors[typename]) {
fragmentErrors[typename] = null;
@@ -260,7 +260,7 @@ export function diffSelectionSetAgainstStore({
pushMissingField(selection);
}
if (isObject(fieldResult)) {
- deepAssign(result, fieldResult);
+ merge(result, fieldResult);
}
if (!fragmentErrors[typename]) {
| diff --git a/test/readFromStore.ts b/test/readFromStore.ts
--- a/test/readFromStore.ts
+++ b/test/readFromStore.ts
@@ -172,7 +172,9 @@ describe('reading from the store', () => {
deepNestedObj: {
stringField: 'This is a deep string',
numberField: 7,
+ nullField: null,
} as StoreObject,
+ nullObject: null,
};
const store = {
@@ -180,6 +182,7 @@ describe('reading from the store', () => {
nestedObj: {
type: 'id',
id: 'abcde',
+ nullField: null,
generated: false,
},
}) as StoreObject,
@@ -188,6 +191,7 @@ describe('reading from the store', () => {
type: 'id',
id: 'abcdef',
generated: false,
+ nullField: null,
},
}) as StoreObject,
abcdef: result.deepNestedObj as StoreObject,
@@ -199,22 +203,30 @@ describe('reading from the store', () => {
fragment FragmentName on Item {
stringField,
numberField,
+ nullField,
...on Item {
nestedObj {
stringField
+ nullField
deepNestedObj {
stringField
+ nullField
}
}
}
...on Item {
nestedObj {
numberField
+ nullField
deepNestedObj {
numberField
+ nullField
}
}
}
+ ... on Item {
+ nullObject
+ }
}
`,
rootId: 'abcd',
@@ -224,14 +236,18 @@ describe('reading from the store', () => {
assert.deepEqual(queryResult, {
stringField: 'This is a string!',
numberField: 5,
+ nullField: null,
nestedObj: {
stringField: 'This is a string too!',
numberField: 6,
+ nullField: null,
deepNestedObj: {
stringField: 'This is a deep string',
numberField: 7,
+ nullField: null,
},
},
+ nullObject: null,
});
});
| Getting "Can't find field" errors on null fields (even if the field is nullable)
Since 0.4.16.
Once a mutation comes back and its results are reinjected into the store via `updateQueries`
```
Can't find field cursor on result object $ROOT_QUERY.connection({"id":"xxx"}).nodeChildren({"id":"root","limit":40,"cursor":null,"sortBy":null}).
Error
at new ApolloError (webpack:///./~/apollo-client/lib/src/errors.js?:14:22)
at eval (webpack:///./~/apollo-client/lib/src/data/writeToStore.js?:59:23)
at Array.forEach (native)
at writeSelectionSetToStore (webpack:///./~/apollo-client/lib/src/data/writeToStore.js?:53:29)
at eval (webpack:///./~/apollo-client/lib/src/data/writeToStore.js?:121:21)
at Array.forEach (native)
at writeSelectionSetToStore (webpack:///./~/apollo-client/lib/src/data/writeToStore.js?:53:29)
at writeFieldToStore (webpack:///./~/apollo-client/lib/src/data/writeToStore.js?:226:9)
at eval (webpack:///./~/apollo-client/lib/src/data/writeToStore.js?:75:17)
at Array.forEach (native)
```
| This error should happen on `master` - can you submit a failing test as a PR?
Working on it 😉 !
Harder to reproduce than expected. Seems like I have to throw fragments in the mix!
OK, maybe it's a result of that deep-assign PR for fragments then? That's the only relevant change I can think of.
Urr, I'm based on an older version and can't get it to fail, trying with the **latest** master now!
But, yes, I think this PR, even if it'll help us **a lot** may be the one responsible
Okay, maybe I'm not in a good setting to write the test but I see how it's happening in our app: basically, the deepMerge seems to "transform" null fields into undefined.
Wow that would be really silly.
Yes, I agree, that's why I want to get the test first because it seems crazy
Okay I need to go for today, we'll freeze for now, I'll try to get the test ASAP, I think I'm not writing it at the right place: any advice on where I should write it ? For now I tried in `mutationResults` don't really see where to put it apart from there but I tried to make it fail there by adding an `undefined` (which we want to see fail) and didn't.
Tried it here: https://github.com/apollostack/apollo-client/compare/fix-678?expand=1
@rricard is that last link for a test that successfully reproduces the bug, or was it just an (unsuccessful) attempt?
Guys, I don't even think we need to write a fancy test for this, deep-assign doesn't copy null values:
https://github.com/sindresorhus/deep-assign/blob/699948936d466e2ea738610fcf7cc80aa21babf7/test.js#L13
```
test('do not assign null values', t => {
t.deepEqual(fn({}, {foo: null}), {});
});
```
I'll find some other deep-copy thing we can use, or I'll write one myself if there's nothing decent out there.
| 2016-09-20T20:59:37Z | 0.4 |
apollographql/apollo-client | 581 | apollographql__apollo-client-581 | [
"567"
] | 919340626a40eb1a96b9b4a1e035694d8cac2302 | diff --git a/src/index.ts b/src/index.ts
--- a/src/index.ts
+++ b/src/index.ts
@@ -299,6 +299,10 @@ export default class ApolloClient {
}));
};
+ public resetStore() {
+ this.queryManager.resetStore();
+ };
+
private setStore(store: ApolloStore) {
// ensure existing store has apolloReducer
if (isUndefined(store.getState()[this.reduxRootKey])) {
| diff --git a/test/client.ts b/test/client.ts
--- a/test/client.ts
+++ b/test/client.ts
@@ -1594,4 +1594,14 @@ describe('client', () => {
done();
});
});
+
+ it('has a resetStore method which calls QueryManager', (done) => {
+ const client = new ApolloClient();
+ client.queryManager = {
+ resetStore: () => {
+ done();
+ },
+ } as QueryManager;
+ client.resetStore();
+ });
});
| resetStore method disappeared
There used to be a `resetStore` method inside the `ApolloClient` but that seems to have disappeared altogether due to some merge.
Should be an easy fix: the `resetStore` method inside `ApolloClient` should just call the `resetStore` method inside `this.queryManager`. Probably a test to go along with it.
| Whoever does this should also figure out when it disappeared because that will help us prevent similar "accidents" in the future. Also curious to know why this wasn't caught by a test.
I would guess we have a test for the `QueryManager` version but not the `ApolloClient` version.
Was it definitely called `resetStore`? I just did a search back through every commit, and the only `resetStore` I could find is on `QueryManager`:
```
git grep resetStore $(git rev-list --all)
```
grepping for `reset` returned a lot more results, but I didn't see anything interesting.
Maybe a rebase/force push somewhere is to blame?
@johnthepink agreed, even the PR which added it is just on the manager https://github.com/apollostack/apollo-client/pull/314/files#diff-748b449db1e130c8d777fa1ffa1e0725R462
| 2016-08-24T01:43:15Z | 0.4 |
apollographql/apollo-client | 493 | apollographql__apollo-client-493 | [
"490"
] | 0c91cb1555e637c12dbfcdcae31f553da7421784 | diff --git a/src/batching/queryMerging.ts b/src/batching/queryMerging.ts
--- a/src/batching/queryMerging.ts
+++ b/src/batching/queryMerging.ts
@@ -49,6 +49,10 @@ import {
Request,
} from '../networkInterface';
+import {
+ resultKeyNameFromField,
+} from '../data/storeUtils';
+
import assign = require('lodash.assign');
import cloneDeep = require('lodash.clonedeep');
@@ -96,7 +100,7 @@ export function unpackMergedResult(result: GraphQLResult,
const childRequestIndex = mergeInfo.requestIndex;
const fieldMap = fieldMaps[childRequestIndex];
const field = fieldMap[mergeInfo.fieldIndex];
- data[field.name.value] = result.data[dataKey];
+ data[resultKeyNameFromField(field)] = result.data[dataKey];
if (resultArray[childRequestIndex]) {
assign(resultArray[childRequestIndex].data, data);
| diff --git a/test/queryMerging.ts b/test/queryMerging.ts
--- a/test/queryMerging.ts
+++ b/test/queryMerging.ts
@@ -788,4 +788,54 @@ describe('Query merging', () => {
const mergedRequest = mergeRequests([{query: query1}, {query: query2}]);
assert.equal(print(mergedRequest.query), print(expQuery));
});
+
+ it('should be able to correctly merge queries with aliases', () => {
+ const query = gql`
+ query firstQuery {
+ someAlias: author {
+ firstName
+ lastName
+ }
+ }`;
+ const secondQuery = gql`
+ query secondQuery {
+ person {
+ name
+ }
+ }`;
+ const expQuery = gql`
+ query ___composed {
+ ___firstQuery___requestIndex_0___fieldIndex_0: author {
+ firstName
+ lastName
+ }
+
+ ___secondQuery___requestIndex_1___fieldIndex_0: person {
+ name
+ }
+ }`;
+ const firstResult = {
+ someAlias: {
+ firstName: 'John',
+ lastName: 'Smith',
+ },
+ };
+ const secondResult = {
+ person: {
+ name: 'Jane Smith',
+ },
+ };
+ const composedResult = {
+ ___firstQuery___requestIndex_0___fieldIndex_0: firstResult.someAlias,
+ ___secondQuery___requestIndex_1___fieldIndex_0: secondResult.person,
+ };
+ const requests = [{ query }, { query: secondQuery }];
+ const mergedRequest = mergeRequests(requests);
+ assert.equal(print(mergedRequest.query), print(expQuery));
+
+ const unpackedResults = unpackMergedResult({ data: composedResult }, requests);
+ assert.equal(unpackedResults.length, 2);
+ assert.deepEqual(unpackedResults[0], { data: firstResult });
+ assert.deepEqual(unpackedResults[1], { data: secondResult });
+ });
});
| Renaming queries not working with batching
If I send the following query in a request that is batched with other queries...
```
query getActivity {
activities: lookActivities(skip: 0, limit: 10) {
_id
type
owner {
_id
name
}
}
}
```
...it will give me the following error:

However, if it isn't batched with other queries, it will work fine. It will also work fine if I don't rename the query:
```
query getActivity {
lookActivities(skip: 0, limit: 10) {
_id
type
owner {
_id
name
}
}
}
```
| 2016-08-01T20:54:41Z | 0.4 |
|
apollographql/apollo-client | 465 | apollographql__apollo-client-465 | [
"458"
] | f33998bb05c2349eb563403027fb0f1166b9a9f8 | diff --git a/src/QueryManager.ts b/src/QueryManager.ts
--- a/src/QueryManager.ts
+++ b/src/QueryManager.ts
@@ -237,29 +237,39 @@ export class QueryManager {
resultBehaviors: [...resultBehaviors, ...updateQueriesResultBehaviors],
});
- return this.networkInterface.query(request)
- .then((result) => {
- this.store.dispatch({
- type: 'APOLLO_MUTATION_RESULT',
- result,
- mutationId,
- resultBehaviors: [
- ...resultBehaviors,
- ...this.collectResultBehaviorsFromUpdateQueries(updateQueries, result),
- ],
- });
+ return new Promise((resolve, reject) => {
+ this.networkInterface.query(request)
+ .then((result) => {
+ if (result.errors) {
+ reject(new ApolloError({
+ graphQLErrors: result.errors,
+ }));
+ }
- return result;
- })
- .catch((err) => {
- this.store.dispatch({
- type: 'APOLLO_MUTATION_ERROR',
- error: err,
- mutationId,
- });
+ this.store.dispatch({
+ type: 'APOLLO_MUTATION_RESULT',
+ result,
+ mutationId,
+ resultBehaviors: [
+ ...resultBehaviors,
+ ...this.collectResultBehaviorsFromUpdateQueries(updateQueries, result),
+ ],
+ });
- return Promise.reject(err);
- });
+ resolve(result);
+ })
+ .catch((err) => {
+ this.store.dispatch({
+ type: 'APOLLO_MUTATION_ERROR',
+ error: err,
+ mutationId,
+ });
+
+ reject(new ApolloError({
+ networkError: err,
+ }));
+ });
+ });
}
// Returns a query listener that will update the given observer based on the
| diff --git a/test/client.ts b/test/client.ts
--- a/test/client.ts
+++ b/test/client.ts
@@ -1462,4 +1462,71 @@ describe('client', () => {
assert.equal(query.definitions, initialDefinitions);
});
});
+
+ it('should pass a network error correctly on a mutation', (done) => {
+ const mutation = gql`
+ mutation {
+ person {
+ firstName
+ lastName
+ }
+ }`;
+ const data = {
+ person: {
+ firstName: 'John',
+ lastName: 'Smith',
+ },
+ };
+ const networkError = new Error('Some kind of network error.');
+ const client = new ApolloClient({
+ networkInterface: mockNetworkInterface({
+ request: { query: mutation },
+ result: { data },
+ error: networkError,
+ }),
+ });
+
+ client.mutate({ mutation }).then((result) => {
+ done(new Error('Returned a result when it should not have.'));
+ }).catch((error) => {
+ const apolloError = error as ApolloError;
+ assert(apolloError.networkError);
+ assert.equal(apolloError.networkError.message, networkError.message);
+ done();
+ });
+ });
+
+ it('should pass a GraphQL error correctly on a mutation', (done) => {
+ const mutation = gql`
+ mutation {
+ newPerson {
+ person {
+ firstName
+ lastName
+ }
+ }
+ }`;
+ const data = {
+ person: {
+ firstName: 'John',
+ lastName: 'Smith',
+ },
+ };
+ const errors = [ new Error('Some kind of GraphQL error.') ];
+ const client = new ApolloClient({
+ networkInterface: mockNetworkInterface({
+ request: { query: mutation },
+ result: { data, errors },
+ }),
+ });
+ client.mutate({ mutation }).then((result) => {
+ done(new Error('Returned a result when it should not have.'));
+ }).catch((error) => {
+ const apolloError = error as ApolloError;
+ assert(apolloError.graphQLErrors);
+ assert.equal(apolloError.graphQLErrors.length, 1);
+ assert.equal(apolloError.graphQLErrors[0].message, errors[0].message);
+ done();
+ });
+ });
});
diff --git a/test/optimistic.ts b/test/optimistic.ts
--- a/test/optimistic.ts
+++ b/test/optimistic.ts
@@ -503,7 +503,7 @@ describe('optimistic mutation results', () => {
})
.catch((err) => {
assert.instanceOf(err, Error);
- assert.equal(err.message, 'forbidden (test error)');
+ assert.equal(err.message, 'Network error: forbidden (test error)');
const dataInStore = client.queryManager.getDataWithOptimisticResults();
assert.equal((dataInStore['TodoList5'] as any).todos.length, 3);
@@ -539,7 +539,7 @@ describe('optimistic mutation results', () => {
}).catch((err) => {
// it is ok to fail here
assert.instanceOf(err, Error);
- assert.equal(err.message, 'forbidden (test error)');
+ assert.equal(err.message, 'Network error: forbidden (test error)');
return null;
});
@@ -814,7 +814,7 @@ describe('optimistic mutation results', () => {
}).catch((err) => {
// it is ok to fail here
assert.instanceOf(err, Error);
- assert.equal(err.message, 'forbidden (test error)');
+ assert.equal(err.message, 'Network error: forbidden (test error)');
return null;
});
| ApolloError not thrown when receiving mutation result with GraphQL errors
ApolloError is not thrown when the result of a mutation contains errors. Haven't tested with queries yet so I don't know if this is only a problem for mutations.
Example:
``` js
client.mutate({
mutation: gql`
mutation update($input: ProfileUpdateType) {
updateUser(input: $input) {
_id
email
name
picture
}
}
`,
variables: {
input: {
invalidField: 'value'
}
}
})
.then(({ errors }) => {
if (errors) {
console.log('ApolloError not thrown')
}
})
.catch(error => console.log('ApolloError: ', error.message));
```
Results in `ApolloError not thrown` being logged to the console.
Example of result with errors:

| 2016-07-25T20:54:41Z | 0.4 |
|
apollographql/apollo-client | 445 | apollographql__apollo-client-445 | [
"434",
"445"
] | f3c1306a9b22e84b5ed995a9f69d7d4fbe541a6d | diff --git a/src/QueryManager.ts b/src/QueryManager.ts
--- a/src/QueryManager.ts
+++ b/src/QueryManager.ts
@@ -699,11 +699,6 @@ export class QueryManager {
});
this.removeFetchQueryPromise(requestId);
- if (result.errors) {
- reject(new ApolloError({
- graphQLErrors: result.errors,
- }));
- }
return result;
}).then(() => {
@@ -738,9 +733,6 @@ export class QueryManager {
this.removeFetchQueryPromise(requestId);
- reject(new ApolloError({
- networkError: error,
- }));
});
});
return retPromise;
diff --git a/src/errors.ts b/src/errors.ts
--- a/src/errors.ts
+++ b/src/errors.ts
@@ -21,6 +21,9 @@ export class ApolloError extends Error {
this.graphQLErrors = graphQLErrors;
this.networkError = networkError;
+ // set up the stack trace
+ this.stack = new Error().stack;
+
if (!errorMessage) {
this.generateErrorMessage();
} else {
| diff --git a/test/QueryManager.ts b/test/QueryManager.ts
--- a/test/QueryManager.ts
+++ b/test/QueryManager.ts
@@ -2611,7 +2611,7 @@ describe('QueryManager', () => {
});
});
- it('should reject a fetchQuery promise given a network error', (done) => {
+ it('should reject a query promise given a network error', (done) => {
const query = gql`
query {
author {
@@ -2629,7 +2629,7 @@ describe('QueryManager', () => {
store: createApolloStore(),
reduxRootKey: 'apollo',
});
- queryManager.fetchQuery('fake-id', { query }).then((result) => {
+ queryManager.query({ query }).then((result) => {
done(new Error('Returned result on an errored fetchQuery'));
}).catch((error) => {
const apolloError = error as ApolloError;
@@ -2685,7 +2685,7 @@ describe('QueryManager', () => {
});
});
- it('should reject a fetchQuery promise given a GraphQL error', (done) => {
+ it('should reject a query promise given a GraphQL error', (done) => {
const query = gql`
query {
author {
@@ -2703,7 +2703,7 @@ describe('QueryManager', () => {
store: createApolloStore(),
reduxRootKey: 'apollo',
});
- queryManager.fetchQuery('fake-id', { query }).then((result) => {
+ queryManager.query({ query }).then((result) => {
done(new Error('Returned result on an errored fetchQuery'));
}).catch((error) => {
const apolloError = error as ApolloError;
diff --git a/test/errors.ts b/test/errors.ts
--- a/test/errors.ts
+++ b/test/errors.ts
@@ -68,4 +68,14 @@ describe('ApolloError', () => {
assert.include(messages[1], 'Network error');
assert.include(messages[1], 'network error message');
});
+
+ it('should contain a stack trace', () => {
+ const graphQLErrors = [ new Error('graphql error message') ];
+ const networkError = new Error('network error message');
+ const apolloError = new ApolloError({
+ graphQLErrors,
+ networkError,
+ });
+ assert(apolloError.stack, 'Does not contain a stack trace.');
+ });
});
| Several error reporting issues
I think there may be several issues here; this is prompted from [a discussion with @Poincare in Slack](https://apollostack.slack.com/archives/general/p1469029558000617) These may be separate issues; let me know if you'd prefer separate open issues for each.
# Issues
1. `render` error reported as a `networkError`
2. `ApolloError` object does not include stack traces
3. `crashReporter` from `store.ts` not included in `client.middleware()`
# Background
I'm using `react-apollo` and `apollo-client` with an existing Redux store, and had a situation where a Component (hooked up using `connect`) was throwing an error because I was trying to access a property of `undefined` (See line 39 below.) While this is a reduced example, trying to track down the actual error in my app was fairly difficult.
## 1) `render` error
The code below will report the 'cannot read property' error as a `networkError`(!) property of the ApolloError object, even though the error is in a `render` method. I'm not sure why that would be.
## 2) No stack trace on `ApolloError`
The ApolloError object itself doesn't have a stack trace—I have to go into the `networkError` property directly to access it.
## 3) `crashReporter`
While trying to create the reduced test case below, I noticed that if I let Apollo manage the Redux store, I got a separate error message with the proper stack trace, from [this line](https://github.com/apollostack/apollo-client/blob/061180efcb45a8c043af3fc38d71d7642c1300da/src/store.ts#L63). This doesn't happen if you are using an existing Redux store.
# Code
``` javascript
import React from 'react';
import ReactDOM from 'react-dom';
import ApolloClient from 'apollo-client';
import {ApolloProvider, connect} from 'react-apollo';
import gql from 'graphql-tag';
import { createStore, combineReducers, applyMiddleware } from 'redux';
const client = new ApolloClient({
networkInterface: {
query: function() {
return new Promise(function(resolve, reject) {
return resolve({
data: {
test: {
id: 1
}
}
});
});
}
}
});
const store = createStore(
combineReducers({
apollo: client.reducer(),
}),
applyMiddleware(client.middleware())
);
const Example = function(props) {
if (props.myQuery.loading) {
return <div>Loading...</div>;
}
if (props.myQuery.errors) {
console.error(props.myQuery.errors.stack); // undefined
console.error(props.myQuery.errors.networkError.stack); // correct source-mapped stack trace
return <div>Errors!</div>;
};
return <h1>Hello, world!{props.myQuery.doesnt_exist.completely_undefined}</h1>; // throws an error
};
var MyComponent = connect({
mapQueriesToProps({ownProps, state}) {
return {
myQuery: {
query: gql` {
test {id}
}`
}
}
}
})(Example);
ReactDOM.render(
<ApolloProvider client={client} store={store}>
<MyComponent />
</ApolloProvider>,
document.getElementById('body')
);
```
There is currently an [example at http://apolloerror-example.dahjelle.c9users.io/](http://apolloerror-example.dahjelle.c9users.io/), but I have no idea how long c9.io will let it run. :-)
Fix error issues
Fixes #434 and #423.
TODO:
- [x] Update CHANGELOG.md with your change
- [x] Make sure all of the significant new logic is covered by tests
- [x] Rebase your changes on master so that they can be merged easily
- [x] Make sure all tests and linter rules pass
| @dahjelle is this example something we can clone and run locally?
I put it in [this GitHub repo](https://github.com/dahjelle/apolloerror-example). Should be fairly straightforward to get it going (but, then again, it's the stack I'm used to :-) ).
Nice, I dig the mocked network interface! This could be a great way to distribute simple examples :]
I was pretty pumped when I realized you'd set things up to enable mocking a whole GraphQL server. 👍
That's how we run all of the apollo client tests! https://github.com/apollostack/apollo-client/blob/master/test/mocks/mockNetworkInterface.ts
I think (1) may be tied to one of the current issues with `react-apollo`, (2) is definitely fixable from `apollo-client` (I'll work on it) and I'll have to look more into (3) to figure out what is going on.
| 2016-07-21T18:50:26Z | 0.4 |
apollographql/apollo-client | 313 | apollographql__apollo-client-313 | [
"223"
] | 7577a3deece753e235bdf5360757b84dda6ba1cc | diff --git a/gql.js b/gql.js
--- a/gql.js
+++ b/gql.js
@@ -1,4 +1,2 @@
-/* We are placing this file in the root to enable npm-link development
- * Currently, gql resides in a submodule and is not able to be imported when linked
- */
-module.exports = require('./lib/src/gql');
+// This is here for backcompat, even though the right way is to use the other package
+module.exports = require('graphql-tag');
diff --git a/src/gql.ts b/src/gql.ts
deleted file mode 100644
--- a/src/gql.ts
+++ /dev/null
@@ -1,47 +0,0 @@
-import { parse } from 'graphql/language/parser';
-
-import {
- Document,
-} from 'graphql';
-
-const cache: {[queryString: string]: Document} = {};
-
-function parseDocument(doc: string): Document {
- if (cache[doc]) {
- return cache[doc];
- }
-
- const parsed = parse(doc);
-
- if (!parsed || parsed.kind !== 'Document') {
- throw new Error('Not a valid GraphQL document.');
- }
-
- cache[doc] = parsed;
-
- return parsed as Document;
-}
-
-// XXX This should eventually disallow arbitrary string interpolation, like Relay does
-export default function gql(literals, ...substitutions): Document {
- let result = '';
-
- // run the loop only for the substitution count
- for (let i = 0; i < substitutions.length; i++) {
- result += literals[i];
- result += substitutions[i];
- }
-
- // add the last literal
- result += literals[literals.length - 1];
-
- return parseDocument(result);
-}
-
-export function registerGqlTag() {
- if (typeof window !== 'undefined') {
- window['gql'] = gql;
- } else if (typeof global !== 'undefined') {
- global['gql'] = gql;
- }
-}
| diff --git a/test/QueryManager.ts b/test/QueryManager.ts
--- a/test/QueryManager.ts
+++ b/test/QueryManager.ts
@@ -15,7 +15,7 @@ import {
addTypenameToSelectionSet,
} from '../src/queries/queryTransform';
-import gql from '../src/gql';
+import gql from 'graphql-tag';
import {
assert,
diff --git a/test/batching.ts b/test/batching.ts
--- a/test/batching.ts
+++ b/test/batching.ts
@@ -5,7 +5,7 @@ import { assert } from 'chai';
import mockNetworkInterface, {
mockBatchedNetworkInterface,
} from './mocks/mockNetworkInterface';
-import gql from '../src/gql';
+import gql from 'graphql-tag';
import { GraphQLResult } from 'graphql';
const networkInterface = mockNetworkInterface();
diff --git a/test/client.ts b/test/client.ts
--- a/test/client.ts
+++ b/test/client.ts
@@ -18,7 +18,7 @@ import {
Store,
} from '../src/store';
-import gql from '../src/gql';
+import gql from 'graphql-tag';
import {
createStore,
diff --git a/test/diffAgainstStore.ts b/test/diffAgainstStore.ts
--- a/test/diffAgainstStore.ts
+++ b/test/diffAgainstStore.ts
@@ -12,7 +12,7 @@ import {
NormalizedCache,
} from '../src/data/store';
-import gql from '../src/gql';
+import gql from 'graphql-tag';
describe('diffing queries against the store', () => {
it('returns nothing when the store is enough', () => {
diff --git a/test/directives.ts b/test/directives.ts
--- a/test/directives.ts
+++ b/test/directives.ts
@@ -9,7 +9,7 @@ import {
getQueryDefinition,
} from '../src/queries/getFromAST';
-import gql from '../src/gql';
+import gql from 'graphql-tag';
import cloneDeep = require('lodash.clonedeep');
diff --git a/test/getFromAST.ts b/test/getFromAST.ts
--- a/test/getFromAST.ts
+++ b/test/getFromAST.ts
@@ -14,7 +14,7 @@ import {
OperationDefinition,
} from 'graphql';
import { print } from 'graphql/language/printer';
-import gql from '../src/gql';
+import gql from 'graphql-tag';
import { assert } from 'chai';
describe('AST utility functions', () => {
diff --git a/test/gql.ts b/test/gql.ts
deleted file mode 100644
--- a/test/gql.ts
+++ /dev/null
@@ -1,13 +0,0 @@
-import { assert } from 'chai';
-
-import gql from '../src/gql';
-
-describe('gql', () => {
- it('parses queries', () => {
- assert.equal(gql`{ testQuery }`.kind, 'Document');
- });
-
- it('returns the same object for the same query', () => {
- assert.isTrue(gql`{ sameQuery }` === gql`{ sameQuery }`);
- });
-});
diff --git a/test/networkInterface.ts b/test/networkInterface.ts
--- a/test/networkInterface.ts
+++ b/test/networkInterface.ts
@@ -19,7 +19,7 @@ import {
MiddlewareRequest,
} from '../src/middleware';
-import gql from '../src/gql';
+import gql from 'graphql-tag';
import { print } from 'graphql/language/printer';
diff --git a/test/queryMerging.ts b/test/queryMerging.ts
--- a/test/queryMerging.ts
+++ b/test/queryMerging.ts
@@ -25,7 +25,7 @@ import {
OperationDefinition,
} from 'graphql';
-import gql from '../src/gql';
+import gql from 'graphql-tag';
import { assert } from 'chai';
import cloneDeep = require('lodash.clonedeep');
diff --git a/test/queryTransform.ts b/test/queryTransform.ts
--- a/test/queryTransform.ts
+++ b/test/queryTransform.ts
@@ -8,7 +8,7 @@ import {
getMutationDefinition,
} from '../src/queries/getFromAST';
import { print } from 'graphql/language/printer';
-import gql from '../src/gql';
+import gql from 'graphql-tag';
import { assert } from 'chai';
describe('query transforms', () => {
diff --git a/test/readFromStore.ts b/test/readFromStore.ts
--- a/test/readFromStore.ts
+++ b/test/readFromStore.ts
@@ -10,7 +10,7 @@ import {
StoreObject,
} from '../src/data/store';
-import gql from '../src/gql';
+import gql from 'graphql-tag';
describe('reading from the store', () => {
it('rejects malformed queries', () => {
diff --git a/test/roundtrip.ts b/test/roundtrip.ts
--- a/test/roundtrip.ts
+++ b/test/roundtrip.ts
@@ -7,7 +7,7 @@ import {
Document,
} from 'graphql';
-import gql from '../src/gql';
+import gql from 'graphql-tag';
describe('roundtrip', () => {
it('real graphql result', () => {
diff --git a/test/scheduler.ts b/test/scheduler.ts
--- a/test/scheduler.ts
+++ b/test/scheduler.ts
@@ -8,7 +8,7 @@ import {
createApolloStore,
} from '../src/store';
import mockNetworkInterface from './mocks/mockNetworkInterface';
-import gql from '../src/gql';
+import gql from 'graphql-tag';
describe('QueryScheduler', () => {
it('should throw an error if we try to register a non-polling query', () => {
diff --git a/test/tests.ts b/test/tests.ts
--- a/test/tests.ts
+++ b/test/tests.ts
@@ -16,7 +16,6 @@ import './networkInterface';
import './QueryManager';
import './client';
import './store';
-import './gql';
import './queryTransform';
import './getFromAST';
import './directives';
diff --git a/test/writeToStore.ts b/test/writeToStore.ts
--- a/test/writeToStore.ts
+++ b/test/writeToStore.ts
@@ -23,7 +23,7 @@ import {
Node,
} from 'graphql';
-import gql from '../src/gql';
+import gql from 'graphql-tag';
describe('writing to the store', () => {
it('properly normalizes a trivial item', () => {
| Decide on a more elegant solution to make `gql` work with npm link
To fix the current `npm-link` issue, where the module `apollo-client/gql` cannot be found, we will place `gql.js` in the root of the repo and redirect it to `lib/src/`gql.js`.
After a deep discussion about separate modules or not, we decided to go ahead with the current strategy while we think of something better to do.
This is that issue.
| I also ran into the same issue when importing from `writeToStore` and `readFromStore` in relation to #180. Hopefully not nearly as important long-term as `gql`, but I thought it worth mentioning anyway.
@stubailo when do you think we can get the hacky version in. Again if you just wanna write down what you want, im sure we can do it
PR [#243](https://github.com/apollostack/apollo-client/pull/243) will place gql in root. Circle back later for elegant fix
| 2016-06-24T21:40:37Z | 0.3 |
apollographql/apollo-client | 201 | apollographql__apollo-client-201 | [
"193"
] | 021ee1feb3858266233edd0566f216ba739122a0 | diff --git a/src/QueryManager.ts b/src/QueryManager.ts
--- a/src/QueryManager.ts
+++ b/src/QueryManager.ts
@@ -195,6 +195,7 @@ export class QueryManager {
rootId: queryStoreValue.query.id,
selectionSet: queryStoreValue.query.selectionSet,
variables: queryStoreValue.variables,
+ returnPartialData: options.returnPartialData,
});
if (observer.next) {
diff --git a/src/data/readFromStore.ts b/src/data/readFromStore.ts
--- a/src/data/readFromStore.ts
+++ b/src/data/readFromStore.ts
@@ -65,11 +65,13 @@ export function readSelectionSetFromStore({
rootId,
selectionSet,
variables,
+ returnPartialData = false,
}: {
store: NormalizedCache,
rootId: string,
selectionSet: SelectionSet,
variables: Object,
+ returnPartialData?: boolean,
}): Object {
const {
result,
@@ -77,7 +79,7 @@ export function readSelectionSetFromStore({
selectionSet,
rootId,
store,
- throwOnMissingField: true,
+ throwOnMissingField: !returnPartialData,
variables,
});
| diff --git a/test/QueryManager.ts b/test/QueryManager.ts
--- a/test/QueryManager.ts
+++ b/test/QueryManager.ts
@@ -689,6 +689,80 @@ describe('QueryManager', () => {
});
});
+ it('supports returnPartialData #193', () => {
+ const primeQuery = gql`
+ query primeQuery {
+ people_one(id: 1) {
+ name
+ }
+ }
+ `;
+
+ const complexQuery = gql`
+ query complexQuery {
+ luke: people_one(id: 1) {
+ name
+ }
+ vader: people_one(id: 4) {
+ name
+ }
+ }
+ `;
+
+ const diffedQuery = gql`
+ query complexQuery {
+ vader: people_one(id: 4) {
+ name
+ }
+ }
+ `;
+
+ const data1 = {
+ people_one: {
+ name: 'Luke Skywalker',
+ },
+ };
+
+ const data2 = {
+ vader: {
+ name: 'Darth Vader',
+ },
+ };
+
+ const networkInterface = mockNetworkInterface(
+ {
+ request: { query: primeQuery },
+ result: { data: data1 },
+ },
+ {
+ request: { query: diffedQuery },
+ result: { data: data2 },
+ delay: 5,
+ }
+ );
+
+ const queryManager = new QueryManager({
+ networkInterface,
+ store: createApolloStore(),
+ reduxRootKey: 'apollo',
+ });
+
+ // First, prime the store so that query diffing removes the query
+ queryManager.query({
+ query: primeQuery,
+ }).then(() => {
+ const handle = queryManager.watchQuery({
+ query: complexQuery,
+ returnPartialData: true,
+ });
+
+ return handle.result().then((result) => {
+ assert.equal(result.data['luke'].name, 'Luke Skywalker');
+ assert.notProperty(result.data, 'vader');
+ });
+ });
+ });
+
it('runs a mutation', () => {
const mutation = gql`
mutation makeListPrivate {
| Partial data return prematurely fails (watchQuery)
Hey folks,
Getting following error, when using `returnPartialData` with `watchQuery` (looks like it's trying to get the data from Store, before it's available)
```
store.js:11Caught an exception! Error: Can't find field posts on object [object Object].(…)
```
| This is a pretty bad bug! Thanks for the report, we should get it fixed ASAP.
Oops, looks like we have exactly 0 unit tests for `returnPartialData`. That explains why that broke...
| 2016-05-11T02:44:49Z | 0.3 |
apollographql/apollo-client | 200 | apollographql__apollo-client-200 | [
"199"
] | a50182d2ef9c7bb5c8bc6dc942b58527447541a4 | diff --git a/src/gql.ts b/src/gql.ts
--- a/src/gql.ts
+++ b/src/gql.ts
@@ -4,13 +4,21 @@ import {
Document,
} from 'graphql';
+const cache: {[queryString: string]: Document} = {};
+
function parseDocument(doc: string): Document {
+ if (cache[doc]) {
+ return cache[doc];
+ }
+
const parsed = parse(doc);
if (!parsed || parsed.kind !== 'Document') {
throw new Error('Not a valid GraphQL document.');
}
+ cache[doc] = parsed;
+
return parsed as Document;
}
| diff --git a/test/gql.ts b/test/gql.ts
new file mode 100644
--- /dev/null
+++ b/test/gql.ts
@@ -0,0 +1,13 @@
+import { assert } from 'chai';
+
+import gql from '../src/gql';
+
+describe('gql', () => {
+ it('parses queries', () => {
+ assert.equal(gql`{ testQuery }`.kind, 'Document');
+ });
+
+ it('returns the same object for the same query', () => {
+ assert.isTrue(gql`{ sameQuery }` === gql`{ sameQuery }`);
+ });
+});
diff --git a/test/tests.ts b/test/tests.ts
--- a/test/tests.ts
+++ b/test/tests.ts
@@ -17,3 +17,4 @@ import './QueryManager';
import './client';
import './middleware';
import './store';
+import './gql';
| Memoize gql
Since `gql` now creates a new object every time it parses, that breaks `react-apollo` change detection. We should make the same queries `===` each other, like the original strings do.
| 2016-05-11T02:34:02Z | 0.3 |
|
apollographql/apollo-client | 140 | apollographql__apollo-client-140 | [
"137"
] | a9ea8a2a1128ba569b96846fb18fa51d24f33005 | diff --git a/Gruntfile.js b/Gruntfile.js
new file mode 100644
--- /dev/null
+++ b/Gruntfile.js
@@ -0,0 +1,21 @@
+'use strict';
+
+module.exports = function (grunt) {
+ grunt.initConfig({
+ tslint: {
+ options: {
+ // can be a configuration object or a filepath to tslint.json
+ configuration: grunt.file.readJSON('tslint.json')
+ },
+ files: {
+ src: [
+ 'src/**/*.ts',
+ 'test/**/*.ts',
+ '!test/fixtures/**/*.ts'
+ ]
+ }
+ }
+ })
+
+ grunt.loadNpmTasks('grunt-tslint');
+}
diff --git a/src/networkInterface.ts b/src/networkInterface.ts
--- a/src/networkInterface.ts
+++ b/src/networkInterface.ts
@@ -12,10 +12,13 @@ export interface Request {
}
export interface NetworkInterface {
+ query(request: Request): Promise<GraphQLResult>;
+}
+
+export interface HTTPNetworkInterface extends NetworkInterface {
_uri: string;
_opts: RequestInit;
_middlewares: MiddlewareInterface[];
- query(request: Request): Promise<GraphQLResult>;
use(middlewares: MiddlewareInterface[]);
}
@@ -24,7 +27,7 @@ export interface RequestAndOptions {
options: RequestInit;
}
-export function createNetworkInterface(uri: string, opts: RequestInit = {}): NetworkInterface {
+export function createNetworkInterface(uri: string, opts: RequestInit = {}): HTTPNetworkInterface {
if (!uri) {
throw new Error('A remote enpdoint is required for a network layer');
}
diff --git a/src/util/Observable.ts b/src/util/Observable.ts
--- a/src/util/Observable.ts
+++ b/src/util/Observable.ts
@@ -9,7 +9,10 @@ function isSubscription(subscription: Function | Subscription): subscription is
}
export class Observable<T> {
- constructor(private subscriberFunction: SubscriberFunction<T>) {
+ private subscriberFunction: SubscriberFunction<T>;
+
+ constructor(subscriberFunction: SubscriberFunction<T>) {
+ this.subscriberFunction = subscriberFunction;
}
public subscribe(observer: Observer<T>): Subscription {
@@ -19,7 +22,7 @@ export class Observable<T> {
return subscriptionOrCleanupFunction;
} else {
return {
- unsubscribe: <CleanupFunction>subscriptionOrCleanupFunction,
+ unsubscribe: subscriptionOrCleanupFunction,
};
}
}
@@ -32,5 +35,5 @@ export interface Observer<T> {
}
export interface Subscription {
- unsubscribe: CleanupFunction
+ unsubscribe: CleanupFunction;
}
| diff --git a/test/client.ts b/test/client.ts
--- a/test/client.ts
+++ b/test/client.ts
@@ -24,7 +24,7 @@ import {
import {
createNetworkInterface,
- NetworkInterface,
+ HTTPNetworkInterface,
} from '../src/networkInterface';
import mockNetworkInterface from './mocks/mockNetworkInterface';
@@ -67,12 +67,12 @@ describe('client', () => {
it('can allow passing in a network interface', () => {
- const networkInterface: NetworkInterface = createNetworkInterface('swapi');
+ const networkInterface = createNetworkInterface('swapi');
const client = new ApolloClient({
networkInterface,
});
- assert.equal(client.networkInterface._uri, networkInterface._uri);
+ assert.equal((client.networkInterface as HTTPNetworkInterface)._uri, networkInterface._uri);
});
it('can allow passing in a store', () => {
diff --git a/test/mocks/mockNetworkInterface.ts b/test/mocks/mockNetworkInterface.ts
--- a/test/mocks/mockNetworkInterface.ts
+++ b/test/mocks/mockNetworkInterface.ts
@@ -13,7 +13,9 @@ import {
// making multiple queries to the server
export default function mockNetworkInterface(
...mockedResponses: MockedResponse[]
-): NetworkInterface { return new MockNetworkInterface(...mockedResponses) as any }
+): NetworkInterface {
+ return new MockNetworkInterface(...mockedResponses);
+}
export interface MockedResponse {
request: Request;
@@ -22,8 +24,8 @@ export interface MockedResponse {
delay?: number;
}
-export class MockNetworkInterface {
- private mockedResponsesByKey: { [key:string]: MockedResponse[] } = {};
+export class MockNetworkInterface implements NetworkInterface {
+ private mockedResponsesByKey: { [key: string]: MockedResponse[] } = {};
constructor(...mockedResponses: MockedResponse[]) {
mockedResponses.forEach((mockedResponse) => {
diff --git a/test/networkInterface.ts b/test/networkInterface.ts
--- a/test/networkInterface.ts
+++ b/test/networkInterface.ts
@@ -10,7 +10,6 @@ const { assert, expect } = chai;
import {
createNetworkInterface,
- NetworkInterface,
} from '../src/networkInterface';
import {
@@ -68,7 +67,7 @@ describe('network interface', () => {
});
it('should create an instance with a given uri', () => {
- const networkInterface: NetworkInterface = createNetworkInterface('/graphql');
+ const networkInterface = createNetworkInterface('/graphql');
assert.equal(networkInterface._uri, '/graphql');
});
| Linter doesn't lint subdirs
| 2016-04-21T19:18:44Z | 0.1 |
|
apollographql/apollo-client | 111 | apollographql__apollo-client-111 | [
"109"
] | f40e87e634663f7c79b510c13cf733d913e4729b | diff --git a/src/QueryManager.ts b/src/QueryManager.ts
--- a/src/QueryManager.ts
+++ b/src/QueryManager.ts
@@ -39,10 +39,15 @@ import {
printQueryFromDefinition,
} from './queryPrinting';
+import {
+ IdGetter,
+} from './data/extensions';
+
export class QueryManager {
private networkInterface: NetworkInterface;
private store: ApolloStore;
private reduxRootKey: string;
+ private dataIdFromObject: IdGetter;
private resultCallbacks: { [queryId: number]: QueryResultCallback[] };
@@ -52,16 +57,19 @@ export class QueryManager {
networkInterface,
store,
reduxRootKey,
+ dataIdFromObject,
}: {
networkInterface: NetworkInterface,
store: ApolloStore,
reduxRootKey: string,
+ dataIdFromObject?: IdGetter,
}) {
// XXX this might be the place to do introspection for inserting the `id` into the query? or
// is that the network interface?
this.networkInterface = networkInterface;
this.store = store;
this.reduxRootKey = reduxRootKey;
+ this.dataIdFromObject = dataIdFromObject;
this.resultCallbacks = {};
@@ -119,7 +127,7 @@ export class QueryManager {
public watchQuery({
query,
variables,
- forceFetch = true,
+ forceFetch = false,
returnPartialData = false,
}: WatchQueryOptions): WatchedQueryHandle {
// Generate a query ID
@@ -130,13 +138,14 @@ export class QueryManager {
this.resultCallbacks[queryId] = [];
const queryString = query;
+ const queryDef = parseQuery(query);
// Parse the query passed in -- this could also be done by a build plugin or tagged
// template string
const querySS = {
id: 'ROOT_QUERY',
typeName: 'Query',
- selectionSet: parseQuery(query).selectionSet,
+ selectionSet: queryDef.selectionSet,
} as SelectionSetWithRoot;
// If we don't use diffing, then these will be the same as the original query
@@ -155,12 +164,17 @@ export class QueryManager {
throwOnMissingField: false,
rootId: querySS.id,
variables,
+ dataIdFromObject: this.dataIdFromObject,
});
initialResult = result;
if (missingSelectionSets.length) {
- const diffedQueryDef = queryDefinition(missingSelectionSets);
+ const diffedQueryDef = queryDefinition({
+ missingSelectionSets,
+ variableDefinitions: queryDef.variableDefinitions,
+ name: queryDef.name,
+ });
minimizedQuery = {
id: 'ROOT_QUERY',
diff --git a/src/data/diffAgainstStore.ts b/src/data/diffAgainstStore.ts
--- a/src/data/diffAgainstStore.ts
+++ b/src/data/diffAgainstStore.ts
@@ -23,6 +23,10 @@ import {
SelectionSetWithRoot,
} from '../queries/store';
+import {
+ IdGetter,
+} from './extensions';
+
import {
SelectionSet,
Field,
@@ -31,16 +35,19 @@ import {
export interface QueryDiffResult {
result: any;
missingSelectionSets: SelectionSetWithRoot[];
+ mergeUp: boolean;
}
export function diffQueryAgainstStore({
store,
query,
variables,
+ dataIdFromObject,
}: {
store: NormalizedCache,
query: string
variables?: Object,
+ dataIdFromObject?: IdGetter,
}): QueryDiffResult {
const queryDef = parseQuery(query);
@@ -50,6 +57,7 @@ export function diffQueryAgainstStore({
selectionSet: queryDef.selectionSet,
throwOnMissingField: false,
variables,
+ dataIdFromObject,
});
}
@@ -58,11 +66,13 @@ export function diffFragmentAgainstStore({
fragment,
rootId,
variables,
+ dataIdFromObject,
}: {
store: NormalizedCache,
fragment: string,
rootId: string,
variables?: Object,
+ dataIdFromObject?: IdGetter,
}): QueryDiffResult {
const fragmentDef = parseFragment(fragment);
@@ -72,6 +82,7 @@ export function diffFragmentAgainstStore({
selectionSet: fragmentDef.selectionSet,
throwOnMissingField: false,
variables,
+ dataIdFromObject,
});
}
@@ -92,23 +103,22 @@ export function diffSelectionSetAgainstStore({
rootId,
throwOnMissingField = false,
variables,
+ dataIdFromObject,
}: {
selectionSet: SelectionSet,
store: NormalizedCache,
rootId: string,
throwOnMissingField: Boolean,
variables: Object,
+ dataIdFromObject?: IdGetter,
}): QueryDiffResult {
if (selectionSet.kind !== 'SelectionSet') {
throw new Error('Must be a selection set.');
}
const result = {};
-
const missingSelectionSets: SelectionSetWithRoot[] = [];
-
- const missingSelections: Field[] = [];
-
+ const missingFields: Field[] = [];
const storeObj = store[rootId] || {};
selectionSet.selections.forEach((selection) => {
@@ -121,12 +131,21 @@ export function diffSelectionSetAgainstStore({
const storeFieldKey = storeKeyNameFromField(field, variables);
const resultFieldKey = resultKeyNameFromField(field);
+ // Don't push more than one missing field per field in the query
+ let missingFieldPushed = false;
+ function pushMissingField(missingField: Field) {
+ if (!missingFieldPushed) {
+ missingFields.push(missingField);
+ missingFieldPushed = true;
+ }
+ }
+
if (! has(storeObj, storeFieldKey)) {
if (throwOnMissingField) {
throw new Error(`Can't find field ${storeFieldKey} on object ${storeObj}.`);
}
- missingSelections.push(field);
+ missingFields.push(field);
return;
}
@@ -157,10 +176,17 @@ export function diffSelectionSetAgainstStore({
rootId: id,
selectionSet: field.selectionSet,
variables,
+ dataIdFromObject,
});
- itemDiffResult.missingSelectionSets.forEach(
- itemSelectionSet => missingSelectionSets.push(itemSelectionSet));
+ if (! itemDiffResult.mergeUp) {
+ itemDiffResult.missingSelectionSets.forEach(
+ itemSelectionSet => missingSelectionSets.push(itemSelectionSet));
+ } else {
+ // XXX merge all of the missing selections from the children to get a more minimal result
+ pushMissingField(field);
+ }
+
return itemDiffResult.result;
});
return;
@@ -173,11 +199,16 @@ export function diffSelectionSetAgainstStore({
rootId: storeValue,
selectionSet: field.selectionSet,
variables,
+ dataIdFromObject,
});
- // This is a nested query
- subObjDiffResult.missingSelectionSets.forEach(
- subObjSelectionSet => missingSelectionSets.push(subObjSelectionSet));
+ if (! subObjDiffResult.mergeUp) {
+ subObjDiffResult.missingSelectionSets.forEach(
+ subObjSelectionSet => missingSelectionSets.push(subObjSelectionSet));
+ } else {
+ // XXX merge all of the missing selections from the children to get a more minimal result
+ pushMissingField(field);
+ }
result[resultFieldKey] = subObjDiffResult.result;
return;
@@ -186,39 +217,71 @@ export function diffSelectionSetAgainstStore({
throw new Error('Unexpected number value in the store where the query had a subselection.');
});
+ // Set this to true if we don't have enough information at this level to generate a refetch
+ // query, so we need to merge the selection set with the parent, rather than appending
+ let mergeUp = false;
+
// If we weren't able to resolve some selections from the store, construct them into
// a query we can fetch from the server
- if (missingSelections.length) {
- const id = storeObj['id'];
- if (typeof id !== 'string' && rootId !== 'ROOT_QUERY') {
- throw new Error(
- `Can't generate query to refetch object ${rootId}, since it doesn't have a string id.`);
- }
+ if (missingFields.length) {
+ if (dataIdFromObject) {
+ // We have a semantic understanding of IDs
+ const id = dataIdFromObject(storeObj);
+
+ if (typeof id !== 'string' && rootId !== 'ROOT_QUERY') {
+ throw new Error(
+ `Can't generate query to refetch object ${rootId}, since it doesn't have a string id.`);
+ }
+
+ let typeName: string;
- let typeName: string;
+ if (rootId === 'ROOT_QUERY') {
+ // We don't need to do anything interesting to fetch root queries, like have an ID
+ typeName = 'Query';
+ } else if (! storeObj.__typename) {
+ throw new Error(
+ `Can't generate query to refetch object ${rootId}, since __typename wasn't in the store.`);
+ } else {
+ typeName = storeObj.__typename;
+ }
- if (rootId === 'ROOT_QUERY') {
- // We don't need to do anything interesting to fetch root queries, like have an ID
- typeName = 'Query';
- } else if (! storeObj.__typename) {
- throw new Error(
- `Can't generate query to refetch object ${rootId}, since __typename wasn't in the store.`);
+ missingSelectionSets.push({
+ id: rootId,
+ typeName,
+ selectionSet: {
+ kind: 'SelectionSet',
+ selections: missingFields,
+ },
+ });
+ } else if (rootId === 'ROOT_QUERY') {
+ const typeName = 'Query';
+
+ missingSelectionSets.push({
+ id: rootId,
+ typeName,
+ selectionSet: {
+ kind: 'SelectionSet',
+ selections: missingFields,
+ },
+ });
} else {
- typeName = storeObj.__typename;
+ mergeUp = true;
+
+ missingSelectionSets.push({
+ // Sentinel values, all we need is the selection set
+ id: 'CANNOT_REFETCH',
+ typeName: 'CANNOT_REFETCH',
+ selectionSet: {
+ kind: 'SelectionSet',
+ selections: missingFields,
+ },
+ });
}
-
- missingSelectionSets.push({
- id: rootId,
- typeName,
- selectionSet: {
- kind: 'SelectionSet',
- selections: missingSelections,
- },
- });
}
return {
result,
missingSelectionSets,
+ mergeUp,
};
}
diff --git a/src/queryPrinting.ts b/src/queryPrinting.ts
--- a/src/queryPrinting.ts
+++ b/src/queryPrinting.ts
@@ -2,14 +2,16 @@ import {
print,
SelectionSet,
OperationDefinition,
+ VariableDefinition,
+ Name,
} from 'graphql';
import {
SelectionSetWithRoot,
} from './queries/store';
-export function printQueryForMissingData(missingSelectionSets: SelectionSetWithRoot[]) {
- return printQueryFromDefinition(queryDefinition(missingSelectionSets));
+export function printQueryForMissingData(options: QueryDefinitionOptions) {
+ return printQueryFromDefinition(queryDefinition(options));
}
const idField = {
@@ -32,9 +34,16 @@ export function printQueryFromDefinition(queryDef: OperationDefinition) {
return print(queryDocumentAst);
}
-export function queryDefinition(
- missingSelectionSets: SelectionSetWithRoot[]): OperationDefinition {
+export function queryDefinition({
+ missingSelectionSets,
+ variableDefinitions = null,
+ name = null,
+}: QueryDefinitionOptions): OperationDefinition {
const selections = missingSelectionSets.map((missingSelectionSet: SelectionSetWithRoot, ii) => {
+ if (missingSelectionSet.id === 'CANNOT_REFETCH') {
+ throw new Error('diffAgainstStore did not merge selection sets correctly');
+ }
+
if (missingSelectionSet.id === 'ROOT_QUERY') {
if (missingSelectionSet.selectionSet.selections.length > 1) {
throw new Error('Multiple root queries, cannot print that yet.');
@@ -54,8 +63,8 @@ export function queryDefinition(
return {
kind: 'OperationDefinition',
operation: 'query',
- name: null,
- variableDefinitions: null,
+ name,
+ variableDefinitions,
directives: [],
selectionSet: {
kind: 'SelectionSet',
@@ -131,3 +140,9 @@ function inlineFragmentSelection({
selectionSet,
};
}
+
+export type QueryDefinitionOptions = {
+ missingSelectionSets: SelectionSetWithRoot[];
+ variableDefinitions?: VariableDefinition[];
+ name?: Name;
+}
| diff --git a/test/QueryManager.ts b/test/QueryManager.ts
--- a/test/QueryManager.ts
+++ b/test/QueryManager.ts
@@ -12,6 +12,7 @@ import {
} from '../src/store';
import {
+ IdGetter,
getIdField,
} from '../src/data/extensions';
@@ -456,7 +457,97 @@ describe('QueryManager', () => {
},
variables: {},
},
- ], done);
+ ], {
+ dataIdFromObject: getIdField,
+ }, done);
+ });
+
+ it('diffs queries, preserving variable declarations', (done) => {
+ testDiffing([
+ {
+ query: `
+ {
+ people_one(id: "1") {
+ __typename,
+ id,
+ name
+ }
+ }
+ `,
+ diffedQuery: `
+ {
+ people_one(id: "1") {
+ __typename,
+ id,
+ name
+ }
+ }
+ `,
+ diffedQueryResponse: {
+ people_one: {
+ __typename: 'Person',
+ id: '1',
+ name: 'Luke Skywalker',
+ },
+ },
+ fullResponse: {
+ people_one: {
+ __typename: 'Person',
+ id: '1',
+ name: 'Luke Skywalker',
+ },
+ },
+ variables: {},
+ },
+ {
+ query: `
+ query getSeveralPeople($lukeId: String!, $vaderId: String!) {
+ luke: people_one(id: $lukeId) {
+ __typename
+ id
+ name
+ }
+ vader: people_one(id: $vaderId) {
+ __typename
+ id
+ name
+ }
+ }
+ `,
+ diffedQuery: `
+ query getSeveralPeople($lukeId: String!, $vaderId: String!) {
+ vader: people_one(id: $vaderId) {
+ __typename
+ id
+ name
+ }
+ }
+ `,
+ diffedQueryResponse: {
+ vader: {
+ __typename: 'Person',
+ id: '4',
+ name: 'Darth Vader',
+ },
+ },
+ fullResponse: {
+ luke: {
+ __typename: 'Person',
+ id: '1',
+ name: 'Luke Skywalker',
+ },
+ vader: {
+ __typename: 'Person',
+ id: '4',
+ name: 'Darth Vader',
+ },
+ },
+ variables: {
+ lukeId: '1',
+ vaderId: '4',
+ },
+ },
+ ], {}, done);
});
it(`doesn't return data while query is loading`, (done) => {
@@ -689,6 +780,9 @@ function testDiffing(
// Variables to use in all queries
variables?: Object,
}[],
+ config: {
+ dataIdFromObject?: IdGetter,
+ },
done: () => void
) {
const networkInterface = mockNetworkInterface(queryArray.map(({
@@ -708,6 +802,7 @@ function testDiffing(
config: { dataIdFromObject: getIdField },
}),
reduxRootKey: 'apollo',
+ dataIdFromObject: config.dataIdFromObject,
});
const steps = queryArray.map(({ query, fullResponse, variables }) => {
diff --git a/test/diffAgainstStore.ts b/test/diffAgainstStore.ts
--- a/test/diffAgainstStore.ts
+++ b/test/diffAgainstStore.ts
@@ -36,7 +36,7 @@ describe('diffing queries against the store', () => {
}).missingSelectionSets, []);
});
- it('returns correct selection set when the store is missing one field', () => {
+ it('when the store is missing one field and knows about IDs', () => {
const firstQuery = `
{
people_one(id: "1") {
@@ -73,6 +73,7 @@ describe('diffing queries against the store', () => {
assert.deepEqual(stripLoc(diffQueryAgainstStore({
store,
query: secondQuery,
+ dataIdFromObject: getIdField,
}).missingSelectionSets), [
{
id: 'lukeId',
@@ -97,7 +98,7 @@ describe('diffing queries against the store', () => {
]);
});
- it('generates the right query when the store is missing one field', () => {
+ it('when the store is missing one field and knows about IDs', () => {
const firstQuery = `
{
people_one(id: "1") {
@@ -134,9 +135,12 @@ describe('diffing queries against the store', () => {
const { missingSelectionSets } = diffQueryAgainstStore({
store,
query: secondQuery,
+ dataIdFromObject: getIdField,
});
- assert.equal(printQueryForMissingData(missingSelectionSets), `{
+ assert.equal(printQueryForMissingData({
+ missingSelectionSets,
+ }), `{
__node_0: node(id: "lukeId") {
id
... on Person {
@@ -147,7 +151,58 @@ describe('diffing queries against the store', () => {
`);
});
- it('generates the right queries when the store is missing multiple nodes', () => {
+ it('when the store is missing one field and doesn\'t know IDs', () => {
+ const firstQuery = `
+ {
+ people_one(id: "1") {
+ __typename
+ id
+ name
+ }
+ }
+ `;
+
+ const result = {
+ people_one: {
+ __typename: 'Person',
+ id: 'lukeId',
+ name: 'Luke Skywalker',
+ },
+ };
+
+ const store = writeQueryToStore({
+ result,
+ query: firstQuery,
+ });
+
+ const secondQuery = `
+ {
+ people_one(id: "1") {
+ name
+ age
+ }
+ }
+ `;
+
+ const { missingSelectionSets } = diffQueryAgainstStore({
+ store,
+ query: secondQuery,
+ });
+
+ // XXX a more efficient diffing algorithm would actually only fetch `age` here. Something to
+ // implement next
+ assert.equal(printQueryForMissingData({
+ missingSelectionSets,
+ }), `{
+ people_one(id: "1") {
+ name
+ age
+ }
+}
+`);
+ });
+
+ it('when the store is missing multiple nodes', () => {
const firstQuery = `
{
people_one(id: "1") {
@@ -188,9 +243,12 @@ describe('diffing queries against the store', () => {
const { missingSelectionSets } = diffQueryAgainstStore({
store,
query: secondQuery,
+ dataIdFromObject: getIdField,
});
- assert.equal(printQueryForMissingData(missingSelectionSets), `{
+ assert.equal(printQueryForMissingData({
+ missingSelectionSets,
+ }), `{
__node_0: node(id: "lukeId") {
id
... on Person {
@@ -294,7 +352,9 @@ describe('diffing queries against the store', () => {
query: secondQuery,
});
- assert.equal(printQueryForMissingData(missingSelectionSets), `{
+ assert.equal(printQueryForMissingData({
+ missingSelectionSets,
+ }), `{
people_one(id: "2") {
__typename
id
diff --git a/test/queryPrinting.ts b/test/queryPrinting.ts
--- a/test/queryPrinting.ts
+++ b/test/queryPrinting.ts
@@ -23,13 +23,15 @@ describe('printing queries', () => {
};
// Note - the indentation inside template strings is meaningful!
- assert.equal(printQueryForMissingData([
- {
- id,
- typeName,
- selectionSet,
- },
- ]), `{
+ assert.equal(printQueryForMissingData({
+ missingSelectionSets: [
+ {
+ id,
+ typeName,
+ selectionSet,
+ },
+ ],
+ }), `{
__node_0: node(id: "lukeId") {
id
... on Person {
| Query diffing with variables
It looks like the `minimizedQueryString` in QueryManager doesn't account for named queries, and therefore variables:
``` js
const query = {
query,
variables,
forceFetch: false,
}
apolloClient.query(query)
.then((result) => {
// yada yada
});
```
If the original query was:
```
query getThing($thingId: Int!) {
thing(id: $thingId) {
title
}
}
```
Assuming none of the data is current in the store, the minimized query string generated currently is:
```
thing(id: $thingId) {
title
}
```
Which throws an error on the graph server since the variable isn't defined.
| Ooh, that's awkward. OK, maybe we need to pass around variable type definitions wherever we have `SelectionSetWithRoot` instances.
BTW - we want to do an initial open source launch next Tuesday, so keep the bugs coming and I'll try to fix them ASAP!
@stubailo will do. Thanks!
| 2016-04-14T23:02:39Z | 0 |
apollographql/apollo-client | 133 | apollographql__apollo-client-133 | [
"131"
] | 4252eb1ded1de4f3c2e0cf06e09ad946be510b70 | diff --git a/src/QueryManager.ts b/src/QueryManager.ts
--- a/src/QueryManager.ts
+++ b/src/QueryManager.ts
@@ -67,7 +67,7 @@ export class ObservableQuery extends Observable<GraphQLResult> {
}
export interface QuerySubscription extends Subscription {
- refetch();
+ refetch(variables?: any): void;
}
export interface WatchQueryOptions {
@@ -197,8 +197,8 @@ export class QueryManager {
unsubscribe: () => {
this.stopQuery(queryId);
},
- refetch: () => {
- this.fetchQuery(queryId, assign(options, { forceFetch: true }) as WatchQueryOptions);
+ refetch: (variables: any): void => {
+ this.fetchQuery(queryId, assign(options, { forceFetch: true, variables }) as WatchQueryOptions);
},
};
});
| diff --git a/test/QueryManager.ts b/test/QueryManager.ts
--- a/test/QueryManager.ts
+++ b/test/QueryManager.ts
@@ -301,6 +301,82 @@ describe('QueryManager', () => {
});
});
+ it('allows you to refetch queries with new variables', (done) => {
+ const query = `
+ {
+ people_one(id: 1) {
+ name
+ }
+ }
+ `;
+
+ const data1 = {
+ people_one: {
+ name: 'Luke Skywalker',
+ },
+ };
+
+ const data2 = {
+ people_one: {
+ name: 'Luke Skywalker has a new name',
+ },
+ };
+
+ const data3 = {
+ people_one: {
+ name: 'Luke Skywalker has a new name',
+ },
+ };
+
+ const variables = {
+ test: 'I am your father',
+ };
+
+ const networkInterface = mockNetworkInterface(
+ {
+ request: { query: query },
+ result: { data: data1 },
+ },
+ {
+ request: { query: query },
+ result: { data: data2 },
+ },
+ {
+ request: { query: query, variables },
+ result: { data: data2 },
+ }
+ );
+
+ const queryManager = new QueryManager({
+ networkInterface,
+ store: createApolloStore(),
+ reduxRootKey: 'apollo',
+ });
+
+ let handleCount = 0;
+
+ const handle = queryManager.watchQuery({
+ query: query,
+ });
+
+ const subscription = handle.subscribe({
+ next(result) {
+ handleCount++;
+
+ if (handleCount === 1) {
+ assert.deepEqual(result.data, data1);
+ subscription.refetch();
+ } else if (handleCount === 2) {
+ assert.deepEqual(result.data, data2);
+ subscription.refetch(variables);
+ } else if (handleCount === 3) {
+ assert.deepEqual(result.data, data3);
+ done();
+ }
+ },
+ });
+ });
+
it('doesn\'t explode if you refetch before first fetch is done with query diffing', (done) => {
const primeQuery = `
{
| Refetch query with new variables at runtime
Hey @stubailo @jbaxleyiii
Relay provides an option in the component called [setvariables](https://facebook.github.io/relay/docs/api-reference-relay-container.html#setvariables) to change the variables on runtime. Does apollo client support this yet? I see `refetch` function, but it works like refresh button, which wouldn't be helpful if someone wants to change the variables and then refetch the query. I think this option could be super useful for things like infinite scroll pagination or filtering - one can set the variable on runtime and fetch the updated data to update the store with new data.
| If you want to pass new variables you can just unsubscribe from the previous query and start a new one.
`react-apollo` makes this really convenient: You can easily pass in a prop from outside, or put your state in Redux. The `mapQueriesToProps` option takes in `ownProps` and `state`, which can be used as variables in your query.
See this example (this is using data from `Meteor.userId()` but you could put other stuff in there):
https://github.com/apollostack/meteor-starter-kit/blob/3950b23ee39be144168a3f4d9f56ebeafb912533/imports/ui/App.js#L21-L44
However since we already have the `refetch` helper method, it would be pretty simple to have that take in a new set of variables.
Hi @stubailo I did the same thing for passing variables for this demo app: https://github.com/gauravtiwari/apollo_on_rails/blob/master/app/assets/javascripts/components/posts/show.es6.js#L41, but I think to utilise `refetch()` with new variables would be much more clear and easy to use.
`For example:` In infinite scroll pagination, we could just change the number of new records to fetch and page offset and it would fetch the new data.
``` javascript
variables: {
first: 20,
}
```
Later for refetching:
``` javascript
this.refetch({
variables: {first: 40, page: 2}
});
```
@johnthepink would your pagination for the app do something similar if present?
Based on how refetch works right now I think it would be trivial to build!
@stubailo thoughts on passing just an object of variables vs passing a QueryOptions object?
Hmm lets keep variables only for now until someone has a need for the other stuff?
@jbaxleyiii I think for a component, the query mostly revolves around variables (if any), so mutating just the variable object and then have some kinda of observer to detect that the variable changed and refetch (I guess that's how refetch works right now, but with variables)
@jbaxleyiii it totally could. I'm just updating redux and letting it unsubscribe and start over (mixed with `shouldComponentUpdate`). I think I like the refetch idea better though, because the unsubscribe currently removes the data from the store and then adds it back. Which, if you are trying to render components based off the store is a little wonky.
Cool, I'll see about knocking this out now
| 2016-04-20T16:34:22Z | 0.1 |
eclipse-vertx/vert.x | 5,347 | eclipse-vertx__vert.x-5347 | [
"5290"
] | 99cb7e32a65f2d860af6a37fa66568ac388fd838 | diff --git a/vertx-core/src/main/java/io/vertx/core/http/impl/HttpUtils.java b/vertx-core/src/main/java/io/vertx/core/http/impl/HttpUtils.java
--- a/vertx-core/src/main/java/io/vertx/core/http/impl/HttpUtils.java
+++ b/vertx-core/src/main/java/io/vertx/core/http/impl/HttpUtils.java
@@ -40,6 +40,8 @@
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.RandomAccessFile;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.Charset;
@@ -819,4 +821,20 @@ public static boolean canUpgradeToWebSocket(HttpServerRequest req) {
}
return false;
}
+
+ /**
+ * Convert a {@link SocketAddress} to a {@link HostAndPort}.
+ * If the socket address is an {@link InetSocketAddress}, the hostString and port are used.
+ * Otherwise {@code null} is returned.
+ *
+ * @param socketAddress The socket address to convert
+ * @return The converted instance or {@code null} if not applicable.
+ */
+ public static HostAndPort socketAddressToHostAndPort(SocketAddress socketAddress) {
+ if (socketAddress instanceof InetSocketAddress) {
+ InetSocketAddress inetSocketAddress = (InetSocketAddress) socketAddress;
+ return new HostAndPortImpl(inetSocketAddress.getHostString(), inetSocketAddress.getPort());
+ }
+ return null;
+ }
}
diff --git a/vertx-core/src/main/java/io/vertx/core/internal/net/SslChannelProvider.java b/vertx-core/src/main/java/io/vertx/core/internal/net/SslChannelProvider.java
--- a/vertx-core/src/main/java/io/vertx/core/internal/net/SslChannelProvider.java
+++ b/vertx-core/src/main/java/io/vertx/core/internal/net/SslChannelProvider.java
@@ -18,8 +18,10 @@
import io.netty.util.concurrent.ImmediateExecutor;
import io.vertx.core.internal.VertxInternal;
import io.vertx.core.internal.tls.SslContextProvider;
+import io.vertx.core.net.HostAndPort;
import io.vertx.core.net.SocketAddress;
+import java.net.InetSocketAddress;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
@@ -59,25 +61,30 @@ public SslHandler createClientSslHandler(SocketAddress peerAddress, String serve
return sslHandler;
}
- public ChannelHandler createServerHandler(boolean useAlpn, long sslHandshakeTimeout, TimeUnit sslHandshakeTimeoutUnit) {
+ public ChannelHandler createServerHandler(boolean useAlpn, long sslHandshakeTimeout, TimeUnit sslHandshakeTimeoutUnit, HostAndPort remoteAddress) {
if (sni) {
- return createSniHandler(useAlpn, sslHandshakeTimeout, sslHandshakeTimeoutUnit);
+ return createSniHandler(useAlpn, sslHandshakeTimeout, sslHandshakeTimeoutUnit, remoteAddress);
} else {
- return createServerSslHandler(useAlpn, sslHandshakeTimeout, sslHandshakeTimeoutUnit);
+ return createServerSslHandler(useAlpn, sslHandshakeTimeout, sslHandshakeTimeoutUnit, remoteAddress);
}
}
- private SslHandler createServerSslHandler(boolean useAlpn, long sslHandshakeTimeout, TimeUnit sslHandshakeTimeoutUnit) {
+ private SslHandler createServerSslHandler(boolean useAlpn, long sslHandshakeTimeout, TimeUnit sslHandshakeTimeoutUnit, HostAndPort remoteAddress) {
SslContext sslContext = sslContextProvider.sslServerContext(useAlpn);
Executor delegatedTaskExec = sslContextProvider.useWorkerPool() ? workerPool : ImmediateExecutor.INSTANCE;
- SslHandler sslHandler = sslContext.newHandler(ByteBufAllocator.DEFAULT, delegatedTaskExec);
+ SslHandler sslHandler;
+ if (remoteAddress != null) {
+ sslHandler = sslContext.newHandler(ByteBufAllocator.DEFAULT, remoteAddress.host(), remoteAddress.port(), delegatedTaskExec);
+ } else {
+ sslHandler = sslContext.newHandler(ByteBufAllocator.DEFAULT, delegatedTaskExec);
+ }
sslHandler.setHandshakeTimeout(sslHandshakeTimeout, sslHandshakeTimeoutUnit);
return sslHandler;
}
- private SniHandler createSniHandler(boolean useAlpn, long sslHandshakeTimeout, TimeUnit sslHandshakeTimeoutUnit) {
+ private SniHandler createSniHandler(boolean useAlpn, long sslHandshakeTimeout, TimeUnit sslHandshakeTimeoutUnit, HostAndPort remoteAddress) {
Executor delegatedTaskExec = sslContextProvider.useWorkerPool() ? workerPool : ImmediateExecutor.INSTANCE;
- return new VertxSniHandler(sslContextProvider.serverNameMapping(delegatedTaskExec, useAlpn), sslHandshakeTimeoutUnit.toMillis(sslHandshakeTimeout), delegatedTaskExec);
+ return new VertxSniHandler(sslContextProvider.serverNameMapping(delegatedTaskExec, useAlpn), sslHandshakeTimeoutUnit.toMillis(sslHandshakeTimeout), delegatedTaskExec, remoteAddress);
}
}
diff --git a/vertx-core/src/main/java/io/vertx/core/internal/net/VertxSniHandler.java b/vertx-core/src/main/java/io/vertx/core/internal/net/VertxSniHandler.java
--- a/vertx-core/src/main/java/io/vertx/core/internal/net/VertxSniHandler.java
+++ b/vertx-core/src/main/java/io/vertx/core/internal/net/VertxSniHandler.java
@@ -15,6 +15,7 @@
import io.netty.handler.ssl.SslContext;
import io.netty.handler.ssl.SslHandler;
import io.netty.util.AsyncMapping;
+import io.vertx.core.net.HostAndPort;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
@@ -27,16 +28,24 @@
class VertxSniHandler extends SniHandler {
private final Executor delegatedTaskExec;
+ private final HostAndPort remoteAddress;
- public VertxSniHandler(AsyncMapping<? super String, ? extends SslContext> mapping, long handshakeTimeoutMillis, Executor delegatedTaskExec) {
+ public VertxSniHandler(AsyncMapping<? super String, ? extends SslContext> mapping, long handshakeTimeoutMillis, Executor delegatedTaskExec,
+ HostAndPort remoteAddress) {
super(mapping, handshakeTimeoutMillis);
this.delegatedTaskExec = delegatedTaskExec;
+ this.remoteAddress = remoteAddress;
}
@Override
protected SslHandler newSslHandler(SslContext context, ByteBufAllocator allocator) {
- SslHandler sslHandler = context.newHandler(allocator, delegatedTaskExec);
+ SslHandler sslHandler;
+ if (remoteAddress != null) {
+ sslHandler = context.newHandler(allocator, remoteAddress.host(), remoteAddress.port(), delegatedTaskExec);
+ } else {
+ sslHandler = context.newHandler(allocator, delegatedTaskExec);
+ }
sslHandler.setHandshakeTimeout(handshakeTimeoutMillis, TimeUnit.MILLISECONDS);
return sslHandler;
}
diff --git a/vertx-core/src/main/java/io/vertx/core/net/impl/NetServerImpl.java b/vertx-core/src/main/java/io/vertx/core/net/impl/NetServerImpl.java
--- a/vertx-core/src/main/java/io/vertx/core/net/impl/NetServerImpl.java
+++ b/vertx-core/src/main/java/io/vertx/core/net/impl/NetServerImpl.java
@@ -26,6 +26,7 @@
import io.vertx.core.buffer.impl.PartialPooledByteBufAllocator;
import io.vertx.core.http.ClientAuth;
import io.vertx.core.http.HttpServerOptions;
+import io.vertx.core.http.impl.HttpUtils;
import io.vertx.core.internal.CloseSequence;
import io.vertx.core.impl.HostnameResolver;
import io.vertx.core.internal.ContextInternal;
@@ -221,7 +222,8 @@ public void accept(Channel ch, SslContextProvider sslChannelProvider, SslContext
private void configurePipeline(Channel ch, SslContextProvider sslContextProvider, SslContextManager sslContextManager, ServerSSLOptions sslOptions) {
if (options.isSsl()) {
SslChannelProvider sslChannelProvider = new SslChannelProvider(vertx, sslContextProvider, sslOptions.isSni());
- ch.pipeline().addLast("ssl", sslChannelProvider.createServerHandler(options.isUseAlpn(), options.getSslHandshakeTimeout(), options.getSslHandshakeTimeoutUnit()));
+ ch.pipeline().addLast("ssl", sslChannelProvider.createServerHandler(options.isUseAlpn(), options.getSslHandshakeTimeout(),
+ options.getSslHandshakeTimeoutUnit(), HttpUtils.socketAddressToHostAndPort(ch.remoteAddress())));
ChannelPromise p = ch.newPromise();
ch.pipeline().addLast("handshaker", new SslHandshakeCompletionHandler(p));
p.addListener(future -> {
diff --git a/vertx-core/src/main/java/io/vertx/core/net/impl/NetSocketImpl.java b/vertx-core/src/main/java/io/vertx/core/net/impl/NetSocketImpl.java
--- a/vertx-core/src/main/java/io/vertx/core/net/impl/NetSocketImpl.java
+++ b/vertx-core/src/main/java/io/vertx/core/net/impl/NetSocketImpl.java
@@ -27,6 +27,7 @@
import io.vertx.core.eventbus.Message;
import io.vertx.core.eventbus.MessageConsumer;
import io.vertx.core.http.ClientAuth;
+import io.vertx.core.http.impl.HttpUtils;
import io.vertx.core.internal.ContextInternal;
import io.vertx.core.internal.PromiseInternal;
import io.vertx.core.internal.concurrent.InboundMessageQueue;
@@ -323,7 +324,8 @@ private Future<Void> sslUpgrade(String serverName, SSLOptions sslOptions, ByteBu
ClientSSLOptions clientSSLOptions = (ClientSSLOptions) sslOptions;
sslHandler = provider.createClientSslHandler(remoteAddress, serverName, sslOptions.isUseAlpn(), clientSSLOptions.getSslHandshakeTimeout(), clientSSLOptions.getSslHandshakeTimeoutUnit());
} else {
- sslHandler = provider.createServerHandler(sslOptions.isUseAlpn(), sslOptions.getSslHandshakeTimeout(), sslOptions.getSslHandshakeTimeoutUnit());
+ sslHandler = provider.createServerHandler(sslOptions.isUseAlpn(), sslOptions.getSslHandshakeTimeout(),
+ sslOptions.getSslHandshakeTimeoutUnit(), HttpUtils.socketAddressToHostAndPort(chctx.channel().remoteAddress()));
}
chctx.pipeline().addFirst("ssl", sslHandler);
channelPromise.addListener(p);
| diff --git a/vertx-core/src/test/java/io/vertx/tests/net/NetTest.java b/vertx-core/src/test/java/io/vertx/tests/net/NetTest.java
--- a/vertx-core/src/test/java/io/vertx/tests/net/NetTest.java
+++ b/vertx-core/src/test/java/io/vertx/tests/net/NetTest.java
@@ -91,6 +91,7 @@
import static io.vertx.test.http.HttpTestBase.DEFAULT_HTTPS_HOST;
import static io.vertx.test.http.HttpTestBase.DEFAULT_HTTPS_PORT;
import static io.vertx.test.core.TestUtils.*;
+import static io.vertx.tests.tls.HttpTLSTest.testPeerHostServerCert;
import static org.hamcrest.CoreMatchers.*;
/**
@@ -4602,4 +4603,54 @@ public void testConnectToServerShutdown() throws Exception {
assertWaitUntil(closed::get);
fut.await();
}
+
+ /**
+ * Test that for NetServer, the peer host and port info is available in the SSLEngine
+ * when the X509ExtendedKeyManager.chooseEngineServerAlias is called.
+ *
+ * @throws Exception if an error occurs
+ */
+ @Test
+ public void testTLSServerSSLEnginePeerHost() throws Exception {
+ testTLSServerSSLEnginePeerHostImpl(false);
+ }
+
+ /**
+ * Test that for NetServer with start TLS, the peer host and port info is available
+ * in the SSLEngine when the X509ExtendedKeyManager.chooseEngineServerAlias is called.
+ *
+ * @throws Exception if an error occurs
+ */
+ @Test
+ public void testStartTLSServerSSLEnginePeerHost() throws Exception {
+ testTLSServerSSLEnginePeerHostImpl(true);
+ }
+
+ private void testTLSServerSSLEnginePeerHostImpl(boolean startTLS) throws Exception {
+ AtomicBoolean called = new AtomicBoolean(false);
+ testTLS(Cert.NONE, Trust.SERVER_JKS, testPeerHostServerCert(Cert.SERVER_JKS, called), Trust.NONE,
+ false, false, true, startTLS);
+ assertTrue("X509ExtendedKeyManager.chooseEngineServerAlias is not called", called.get());
+ }
+
+ /**
+ * Test that for NetServer with SNI, the peer host and port info is available
+ * in the SSLEngine when the X509ExtendedKeyManager.chooseEngineServerAlias is called.
+ *
+ * @throws Exception if an error occurs
+ */
+ @Test
+ public void testSNIServerSSLEnginePeerHost() throws Exception {
+ AtomicBoolean called = new AtomicBoolean(false);
+ TLSTest test = new TLSTest()
+ .clientTrust(Trust.SNI_JKS_HOST2)
+ .address(SocketAddress.inetSocketAddress(DEFAULT_HTTPS_PORT, "host2.com"))
+ .serverCert(testPeerHostServerCert(Cert.SNI_JKS, called))
+ .sni(true);
+ test.run(true);
+ await();
+ assertEquals("host2.com", cnOf(test.clientPeerCert()));
+ assertEquals("host2.com", test.indicatedServerName);
+ assertTrue("X509ExtendedKeyManager.chooseEngineServerAlias is not called", called.get());
+ }
}
diff --git a/vertx-core/src/test/java/io/vertx/tests/tls/HttpTLSTest.java b/vertx-core/src/test/java/io/vertx/tests/tls/HttpTLSTest.java
--- a/vertx-core/src/test/java/io/vertx/tests/tls/HttpTLSTest.java
+++ b/vertx-core/src/test/java/io/vertx/tests/tls/HttpTLSTest.java
@@ -28,8 +28,10 @@
import java.security.interfaces.RSAPrivateKey;
import java.util.*;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.BiConsumer;
import java.util.function.Function;
import java.util.function.Supplier;
@@ -2118,4 +2120,193 @@ public PrivateKey getPrivateKey(String alias) {
// It is fine using worker threads in this case
}
}
+
+ /**
+ * Test that for HttpServer, the peer host and port info is available in the SSLEngine
+ * when the X509ExtendedKeyManager.chooseEngineServerAlias is called.
+ *
+ * @throws Exception if an error occurs
+ */
+ @Test
+ public void testTLSServerSSLEnginePeerHost() throws Exception {
+ AtomicBoolean called = new AtomicBoolean(false);
+ testTLS(Cert.NONE, Trust.SERVER_JKS, testPeerHostServerCert(Cert.SERVER_JKS, called), Trust.NONE).pass();
+ assertTrue("X509ExtendedKeyManager.chooseEngineServerAlias is not called", called.get());
+ }
+
+ /**
+ * Test that for HttpServer with SNI, the peer host and port info is available in the SSLEngine
+ * when the X509ExtendedKeyManager.chooseEngineServerAlias is called.
+ *
+ * @throws Exception if an error occurs
+ */
+ @Test
+ public void testSNIServerSSLEnginePeerHost() throws Exception {
+ AtomicBoolean called = new AtomicBoolean(false);
+ TLSTest test = testTLS(Cert.NONE, Trust.SNI_JKS_HOST2, testPeerHostServerCert(Cert.SNI_JKS, called), Trust.NONE)
+ .serverSni()
+ .requestOptions(new RequestOptions().setSsl(true).setPort(DEFAULT_HTTPS_PORT).setHost("host2.com"))
+ .pass();
+ assertEquals("host2.com", TestUtils.cnOf(test.clientPeerCert()));
+ assertEquals("host2.com", test.indicatedServerName);
+ assertTrue("X509ExtendedKeyManager.chooseEngineServerAlias is not called", called.get());
+ }
+
+ /**
+ * Create a {@link Cert} that will verify the peer host is not null and port is not -1 in the {@link SSLEngine}
+ * when the {@link X509ExtendedKeyManager#chooseEngineServerAlias(String, Principal[], SSLEngine)}
+ * is called.
+ *
+ * @param delegate The delegated Cert
+ * @param chooseEngineServerAliasCalled Will be set to true when the
+ * X509ExtendedKeyManager.chooseEngineServerAlias is called
+ * @return The {@link Cert}
+ */
+ public static Cert<KeyCertOptions> testPeerHostServerCert(Cert<? extends KeyCertOptions> delegate, AtomicBoolean chooseEngineServerAliasCalled) {
+ return testPeerHostServerCert(delegate, (peerHost, peerPort) -> {
+ chooseEngineServerAliasCalled.set(true);
+ if (peerHost == null || peerPort == -1) {
+ throw new RuntimeException("Missing peer host/port");
+ }
+ });
+ }
+
+ /**
+ * Create a {@link Cert} that will verify the peer host and port in the {@link SSLEngine}
+ * when the {@link X509ExtendedKeyManager#chooseEngineServerAlias(String, Principal[], SSLEngine)}
+ * is called.
+ *
+ * @param delegate The delegated Cert
+ * @param peerHostVerifier The consumer to verify the peer host and port when the
+ * X509ExtendedKeyManager.chooseEngineServerAlias is called
+ * @return The {@link Cert}
+ */
+ public static Cert<KeyCertOptions> testPeerHostServerCert(Cert<? extends KeyCertOptions> delegate, BiConsumer<String, Integer> peerHostVerifier) {
+ return () -> new VerifyServerPeerHostKeyCertOptions(delegate.get(), peerHostVerifier);
+ }
+
+ private static class VerifyServerPeerHostKeyCertOptions implements KeyCertOptions {
+ private final KeyCertOptions delegate;
+ private final BiConsumer<String, Integer> peerHostVerifier;
+
+ VerifyServerPeerHostKeyCertOptions(KeyCertOptions delegate, BiConsumer<String, Integer> peerHostVerifier) {
+ this.delegate = delegate;
+ this.peerHostVerifier = peerHostVerifier;
+ }
+
+ @Override
+ public KeyCertOptions copy() {
+ return new VerifyServerPeerHostKeyCertOptions(delegate.copy(), peerHostVerifier);
+ }
+
+ @Override
+ public KeyManagerFactory getKeyManagerFactory(Vertx vertx) throws Exception {
+ return new VerifyServerPeerHostKeyManagerFactory(delegate.getKeyManagerFactory(vertx), peerHostVerifier);
+ }
+
+ @Override
+ public Function<String, KeyManagerFactory> keyManagerFactoryMapper(Vertx vertx) throws Exception {
+ Function<String, KeyManagerFactory> mapper = delegate.keyManagerFactoryMapper(vertx);
+ return serverName -> new VerifyServerPeerHostKeyManagerFactory(mapper.apply(serverName), peerHostVerifier);
+ }
+ }
+
+ private static class VerifyServerPeerHostKeyManagerFactory extends KeyManagerFactory {
+ VerifyServerPeerHostKeyManagerFactory(KeyManagerFactory delegate, BiConsumer<String, Integer> peerHostVerifier) {
+ super(new KeyManagerFactorySpiWrapper(delegate, peerHostVerifier), delegate.getProvider(), delegate.getAlgorithm());
+ }
+
+ private static class KeyManagerFactorySpiWrapper extends KeyManagerFactorySpi {
+ private final KeyManagerFactory delegate;
+ private final BiConsumer<String, Integer> peerHostVerifier;
+
+ KeyManagerFactorySpiWrapper(KeyManagerFactory delegate, BiConsumer<String, Integer> peerHostVerifier) {
+ super();
+ this.delegate = delegate;
+ this.peerHostVerifier = peerHostVerifier;
+ }
+
+ @Override
+ protected void engineInit(KeyStore keyStore, char[] chars) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableKeyException {
+ delegate.init(keyStore, chars);
+ }
+
+ @Override
+ protected void engineInit(ManagerFactoryParameters managerFactoryParameters) throws InvalidAlgorithmParameterException {
+ delegate.init(managerFactoryParameters);
+ }
+
+ @Override
+ protected KeyManager[] engineGetKeyManagers() {
+ KeyManager[] keyManagers = delegate.getKeyManagers().clone();
+ for (int i = 0; i < keyManagers.length; ++i) {
+ KeyManager km = keyManagers[i];
+ if (km instanceof X509KeyManager) {
+ keyManagers[i] = new VerifyServerPeerHostKeyManager((X509KeyManager) km, peerHostVerifier);
+ }
+ }
+
+ return keyManagers;
+ }
+ }
+ }
+
+ private static class VerifyServerPeerHostKeyManager extends X509ExtendedKeyManager {
+ private final X509KeyManager delegate;
+ private final BiConsumer<String, Integer> peerHostVerifier;
+
+ VerifyServerPeerHostKeyManager(X509KeyManager delegate, BiConsumer<String, Integer> peerHostVerifier) {
+ this.delegate = delegate;
+ this.peerHostVerifier = peerHostVerifier;
+ }
+
+ @Override
+ public String chooseEngineClientAlias(String[] keyType, Principal[] issuers, SSLEngine engine) {
+ if (delegate instanceof X509ExtendedKeyManager) {
+ return ((X509ExtendedKeyManager) delegate).chooseEngineClientAlias(keyType, issuers, engine);
+ } else {
+ return delegate.chooseClientAlias(keyType, issuers, null);
+ }
+ }
+
+ @Override
+ public String chooseEngineServerAlias(String keyType, Principal[] issuers, SSLEngine engine) {
+ peerHostVerifier.accept(engine.getPeerHost(), engine.getPeerPort());
+ if (delegate instanceof X509ExtendedKeyManager) {
+ return ((X509ExtendedKeyManager) delegate).chooseEngineServerAlias(keyType, issuers, engine);
+ } else {
+ return delegate.chooseServerAlias(keyType, issuers, null);
+ }
+ }
+
+ @Override
+ public String chooseClientAlias(String[] keyType, Principal[] issuers, Socket socket) {
+ return delegate.chooseClientAlias(keyType, issuers, socket);
+ }
+
+ @Override
+ public String chooseServerAlias(String keyType, Principal[] issuers, Socket socket) {
+ return delegate.chooseServerAlias(keyType, issuers, socket);
+ }
+
+ @Override
+ public String[] getClientAliases(String s, Principal[] principals) {
+ return delegate.getClientAliases(s, principals);
+ }
+
+ @Override
+ public String[] getServerAliases(String s, Principal[] principals) {
+ return delegate.getServerAliases(s, principals);
+ }
+
+ @Override
+ public X509Certificate[] getCertificateChain(String s) {
+ return delegate.getCertificateChain(s);
+ }
+
+ @Override
+ public PrivateKey getPrivateKey(String s) {
+ return delegate.getPrivateKey(s);
+ }
+ }
}
| Missing peer host and port info in SSLEngine for server SslHandler
### Version
4.4.9
### Context
We have a customized key manager that extends `X509ExtendedKeyManager` that want to override the `public String chooseEngineServerAlias(String keyType, Principal[] issuers, SSLEngine engine)` method to choose the server alias partly depending on the peer host address.
However, the `engine.getPeerHost()` always returns `null`.
After read related code, I find that netty `SslContext.newHandler` do support passing in an advisory peer information of peer host and port.
However, in vert.x, when creating `SslHandler` for server in `SslChannelProvider`, the peer host and port info is not passed to `SslContext.newHandler`, result in `null` for `engine.getPeerHost()` in `X509ExtendedKeyManager.chooseEngineServerAlias`.
(The `SslChannelProvider` do provide peer host and port info when creating client `SslHandler`)
I tried to pass the peer host and port info from `HttpServerWorker` to `SslChannelProvider.createServerHandler` and find the peer host and port are available in the `SSLEngine` in `X509ExtendedKeyManager.chooseEngineServerAlias`:
```diff
diff --git a/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java b/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java
index cf37c4e8b..82402c858 100644
--- a/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java
@@ -35,6 +35,8 @@ import io.vertx.core.impl.VertxInternal;
import io.vertx.core.net.impl.*;
import io.vertx.core.spi.metrics.HttpServerMetrics;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.function.BiConsumer;
@@ -131,7 +133,12 @@ public class HttpServerWorker implements BiConsumer<Channel, SslChannelProvider>
private void configurePipeline(Channel ch, SslChannelProvider sslChannelProvider) {
ChannelPipeline pipeline = ch.pipeline();
if (options.isSsl()) {
- pipeline.addLast("ssl", sslChannelProvider.createServerHandler());
+ SocketAddress remoteAddress = ch.remoteAddress();
+ if (remoteAddress instanceof InetSocketAddress) {
+ pipeline.addLast("ssl", sslChannelProvider.createServerHandler(((InetSocketAddress) remoteAddress).getHostString(), ((InetSocketAddress) remoteAddress).getPort()));
+ } else {
+ pipeline.addLast("ssl", sslChannelProvider.createServerHandler());
+ }
ChannelPromise p = ch.newPromise();
pipeline.addLast("handshaker", new SslHandshakeCompletionHandler(p));
p.addListener(future -> {
diff --git a/src/main/java/io/vertx/core/net/impl/SslChannelProvider.java b/src/main/java/io/vertx/core/net/impl/SslChannelProvider.java
index 290bf8c23..cb5aba5d1 100644
--- a/src/main/java/io/vertx/core/net/impl/SslChannelProvider.java
+++ b/src/main/java/io/vertx/core/net/impl/SslChannelProvider.java
@@ -144,17 +144,21 @@ public class SslChannelProvider {
}
public ChannelHandler createServerHandler() {
+ return createServerHandler(null, -1);
+ }
+
+ public ChannelHandler createServerHandler(String peerHost, int peerPort) {
if (sni) {
return createSniHandler();
} else {
- return createServerSslHandler(useAlpn);
+ return createServerSslHandler(useAlpn, peerHost, peerPort);
}
}
- private SslHandler createServerSslHandler(boolean useAlpn) {
+ private SslHandler createServerSslHandler(boolean useAlpn, String peerHost, int peerPort) {
SslContext sslContext = sslServerContext(useAlpn);
Executor delegatedTaskExec = useWorkerPool ? workerPool : ImmediateExecutor.INSTANCE;
- SslHandler sslHandler = sslContext.newHandler(ByteBufAllocator.DEFAULT, delegatedTaskExec);
+ SslHandler sslHandler = sslContext.newHandler(ByteBufAllocator.DEFAULT, peerHost, peerPort, delegatedTaskExec);
sslHandler.setHandshakeTimeout(sslHandshakeTimeout, sslHandshakeTimeoutUnit);
return sslHandler;
}
```
There are a few other places calling `SslChannelProvider.createServerHandler` so although this change works in my use case, a more complete fix may be needed.
| @ben1222 can you provide a reproducer for this using the vertx tests so we are covered, that would help
@vietj I tried to create a unit test under `Http1xTLSTest`:
```java
public class Http1xTLSTest extends HttpTLSTest {
private static final Logger LOG = LogManager.getLogger(Http1xTLSTest.class);
@Test
public void testTLSServerSSLEnginePeerHost() throws Exception {
testTLS(Cert.NONE, Trust.SERVER_JKS, () -> {
try {
return KeyCertOptions.wrap(new MyKeyManager((X509KeyManager) Cert.SERVER_JKS.get().getKeyManagerFactory(vertx).getKeyManagers()[0]));
} catch (Exception e) {
throw new RuntimeException(e);
}
}, Trust.NONE).pass();
}
private static class MyKeyManager extends X509ExtendedKeyManager {
private final X509KeyManager wrapped;
MyKeyManager(X509KeyManager wrapped) {
this.wrapped = wrapped;
}
@Override
public String chooseEngineClientAlias(String[] keyType, Principal[] issuers, SSLEngine engine) {
throw new UnsupportedOperationException();
}
@Override
public String chooseEngineServerAlias(String keyType, Principal[] issuers, SSLEngine engine) {
LOG.info("In chooseEngineServerAlias, keyType: {}, issuers: {}, peer host: {}, peer port: {}",
keyType, issuers, engine.getPeerHost(), engine.getPeerPort());
if (engine.getPeerHost() == null || engine.getPeerPort() == -1) {
throw new RuntimeException("Missing peer host/port");
}
return wrapped.chooseServerAlias(keyType, issuers, null);
}
@Override
public String chooseClientAlias(String[] keyType, Principal[] issuers, Socket socket) {
throw new UnsupportedOperationException();
}
@Override
public String chooseServerAlias(String keyType, Principal[] issuers, Socket socket) {
throw new UnsupportedOperationException();
}
@Override
public String[] getClientAliases(String s, Principal[] principals) {
throw new UnsupportedOperationException();
}
@Override
public String[] getServerAliases(String s, Principal[] principals) {
return wrapped.getServerAliases(s, principals);
}
@Override
public X509Certificate[] getCertificateChain(String s) {
LOG.info("In getCertificateChain, s: {}", s);
return wrapped.getCertificateChain(s);
}
@Override
public PrivateKey getPrivateKey(String s) {
LOG.info("In getPrivateKey, s: {}", s);
return wrapped.getPrivateKey(s);
}
}
//...
}
```
Currently it will fail with:
```
Starting test: Http1xTLSTest#testTLSServerSSLEnginePeerHost
2024-08-30 00:02:25,606 [vert.x-eventloop-thread-2] INFO io.vertx.core.http.Http1xTLSTest Http1xTLSTest$MyKeyManager.chooseEngineServerAlias:296 null - In chooseEngineServerAlias, keyType: EC, issuers: null, peer host: null, peer port: -1
java.lang.RuntimeException: Missing peer host/port
at io.vertx.core.http.Http1xTLSTest$MyKeyManager.chooseEngineServerAlias(Http1xTLSTest.java:299)
at java.base/sun.security.ssl.X509Authentication$X509PossessionGenerator.createServerPossession(X509Authentication.java:293)
at java.base/sun.security.ssl.X509Authentication$X509PossessionGenerator.createPossession(X509Authentication.java:214)
at java.base/sun.security.ssl.X509Authentication.createPossession(X509Authentication.java:90)
at java.base/sun.security.ssl.CertificateMessage$T13CertificateProducer.choosePossession(CertificateMessage.java:1081)
at java.base/sun.security.ssl.CertificateMessage$T13CertificateProducer.onProduceCertificate(CertificateMessage.java:970)
at java.base/sun.security.ssl.CertificateMessage$T13CertificateProducer.produce(CertificateMessage.java:961)
at java.base/sun.security.ssl.SSLHandshake.produce(SSLHandshake.java:436)
...
java.lang.AssertionError: Should not fail Failed to create SSL connection
at org.junit.Assert.fail(Assert.java:89)
at org.junit.Assert.assertTrue(Assert.java:42)
at org.junit.Assert.assertFalse(Assert.java:65)
at io.vertx.test.core.AsyncTestBase.assertFalse(AsyncTestBase.java:259)
at io.vertx.core.http.HttpTLSTest.access$300(HttpTLSTest.java:68)
at io.vertx.core.http.HttpTLSTest$TLSTest.lambda$run$10(HttpTLSTest.java:1312)
at io.vertx.core.impl.future.FutureImpl$2.onFailure(FutureImpl.java:117)
...
```
With the changes in `HttpServerWorker` and `SslChannelProvider`, it succeeds:
```
Starting test: Http1xTLSTest#testTLSServerSSLEnginePeerHost
2024-08-30 00:10:35,108 [vert.x-eventloop-thread-2] INFO io.vertx.core.http.Http1xTLSTest Http1xTLSTest$MyKeyManager.chooseEngineServerAlias:294 null - In chooseEngineServerAlias, keyType: EC, issuers: null, peer host: 127.0.0.1, peer port: 48470
2024-08-30 00:10:35,111 [vert.x-eventloop-thread-2] INFO io.vertx.core.http.Http1xTLSTest Http1xTLSTest$MyKeyManager.chooseEngineServerAlias:294 null - In chooseEngineServerAlias, keyType: EC, issuers: null, peer host: 127.0.0.1, peer port: 48470
2024-08-30 00:10:35,112 [vert.x-eventloop-thread-2] INFO io.vertx.core.http.Http1xTLSTest Http1xTLSTest$MyKeyManager.chooseEngineServerAlias:294 null - In chooseEngineServerAlias, keyType: EC, issuers: null, peer host: 127.0.0.1, peer port: 48470
2024-08-30 00:10:35,112 [vert.x-eventloop-thread-2] INFO io.vertx.core.http.Http1xTLSTest Http1xTLSTest$MyKeyManager.chooseEngineServerAlias:294 null - In chooseEngineServerAlias, keyType: RSA, issuers: null, peer host: 127.0.0.1, peer port: 48470
2024-08-30 00:10:35,112 [vert.x-eventloop-thread-2] INFO io.vertx.core.http.Http1xTLSTest Http1xTLSTest$MyKeyManager.getPrivateKey:330 null - In getPrivateKey, s: test-store
2024-08-30 00:10:35,113 [vert.x-eventloop-thread-2] INFO io.vertx.core.http.Http1xTLSTest Http1xTLSTest$MyKeyManager.getCertificateChain:324 null - In getCertificateChain, s: test-store
```
do you mind contributing a pull request to the 4.x branch and master branch ?
@vietj I can have a try. Do I need to go through some process before sending the pull request? I see the contributing guideline mentioned about signing ECA?
Since there are a few other places (the `NetServerImpl`, `NetSocketImpl`) calling `SslChannelProvider.createServerHandler`, I'll try to also update them to pass the peer host info to `SslChannelProvider.createServerHandler`, is that ok?
you should sign the Eclipse Agreement indeed
everything should be updated and tested in master and 4.x branches
@vietj I signed ECA and opened pull requests on 4.x branch (#5346) and master branch (#5347), please review. | 2024-10-08T06:17:37Z | 4.4 |
eclipse-vertx/vert.x | 5,273 | eclipse-vertx__vert.x-5273 | [
"5272"
] | d837b49bc346fb37d30dd2f72cf0b0d5976df6a1 | diff --git a/vertx-core/src/main/java/io/vertx/core/impl/HAManager.java b/vertx-core/src/main/java/io/vertx/core/impl/HAManager.java
--- a/vertx-core/src/main/java/io/vertx/core/impl/HAManager.java
+++ b/vertx-core/src/main/java/io/vertx/core/impl/HAManager.java
@@ -373,6 +373,8 @@ private void checkQuorum() {
if (group.equals(this.group)) {
count++;
}
+ } else if (!attainedQuorum) {
+ checkQuorumWhenAdded(node, System.currentTimeMillis());
}
}
boolean attained = count >= quorumSize;
| diff --git a/vertx-core/src/test/java/io/vertx/tests/ha/HAQuorumTest.java b/vertx-core/src/test/java/io/vertx/tests/ha/HAQuorumTest.java
new file mode 100644
--- /dev/null
+++ b/vertx-core/src/test/java/io/vertx/tests/ha/HAQuorumTest.java
@@ -0,0 +1,90 @@
+package io.vertx.tests.ha;
+
+import io.vertx.core.DeploymentOptions;
+import io.vertx.core.Vertx;
+import io.vertx.core.VertxOptions;
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.spi.cluster.ClusterManager;
+import io.vertx.core.spi.cluster.NodeListener;
+import io.vertx.test.core.VertxTestBase;
+import io.vertx.test.fakecluster.FakeClusterManager;
+import org.junit.Test;
+
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+public final class HAQuorumTest extends VertxTestBase {
+ protected ClusterManager getClusterManager() {
+ return new FakeClusterManager() {
+ @Override
+ public void nodeListener(NodeListener listener) {
+ //do nothing
+ }
+ };
+ }
+
+ @Test
+ public void quorumIsObtainedOnNodeInfoPutThatDoneLaterThanClusterWasCreated() throws Exception {
+ //given
+ final Vertx vertx1 = startVertx(2);
+ final DeploymentOptions options = new DeploymentOptions().setHa(true);
+ final JsonObject config = new JsonObject().put("foo", "bar");
+ options.setConfig(config);
+
+ final Map<String, String> haInfoMap = getClusterManager().getSyncMap("__vertx.haInfo");
+ assertEquals(1, haInfoMap.size());
+ final Map.Entry<String, String> vertx1HaInfo = haInfoMap.entrySet().iterator().next();
+ haInfoMap.remove(vertx1HaInfo.getKey());
+
+ final Vertx vertx2 = startVertx(2);
+
+ vertx2.deployVerticle("java:" + HAVerticle2.class.getName(), options).onComplete(onSuccess(id -> {
+ assertTrue(vertx2.deploymentIDs().contains(id));
+ testComplete();
+ }));
+
+ assertWaitUntil(() -> vertx2.deploymentIDs().isEmpty());
+
+ //when
+ haInfoMap.put(vertx1HaInfo.getKey(), vertx1HaInfo.getValue());
+
+ // then
+ await();
+
+ closeVertices(vertx1, vertx2);
+ }
+
+ private Vertx startVertx(int quorumSize) throws Exception {
+ final VertxOptions options = new VertxOptions().setHAEnabled(true);
+ options.getEventBusOptions().setHost("localhost");
+ options.setQuorumSize(quorumSize);
+ final CountDownLatch latch = new CountDownLatch(1);
+ final AtomicReference<Vertx> vertxRef = new AtomicReference<>();
+ Vertx.builder()
+ .with(options)
+ .withClusterManager(getClusterManager())
+ .buildClustered()
+ .onComplete(onSuccess(vertx -> {
+ vertxRef.set(vertx);
+ latch.countDown();
+ }));
+ latch.await(2, TimeUnit.MINUTES);
+ return vertxRef.get();
+ }
+
+ private void closeVertices(Vertx... vertices) throws Exception {
+ CountDownLatch latch = new CountDownLatch(vertices.length);
+ for (Vertx vertex : vertices) {
+ if (vertex != null) {
+ vertex.close().onComplete(onSuccess(res -> {
+ latch.countDown();
+ }));
+ } else {
+ latch.countDown();
+ }
+ }
+ awaitLatch(latch, 2, TimeUnit.MINUTES);
+ }
+}
| HAManager couldn't see quorum if during its initialization cluster manager has all nodes joined already
### Version
4.5.7
### Context
My app is based on Vert.x with embedded Hazelcast cluster manager. When I run several instances of the app with HA mode enabled some instances cannot deploy HA verticles, and logs show that they have not attain quorum: `Quorum not attained. Deployment of verticle will be delayed until there's a quorum.`
But actually they have.
The problem is that Hazelcast cluster could be in a state when all nodes have already joined at the moment when `HAManager` initialization starts. So `HAManager`s method `nodeAdded` will never be called. Also `checkQuorum` method called on `init` see all nodes from `clusterManager`, but `clusterMap` do not contain information from all nodes. They will put it in several milliseconds later of course, but `checkQuorum` method will be completed at that time and never be called again.
### Steps to reproduce
It just occurs in some cases.
| 2024-08-03T10:06:36Z | 4.4 |
|
eclipse-vertx/vert.x | 5,137 | eclipse-vertx__vert.x-5137 | [
"5136"
] | 582d864dc61f566844d7d7150da0c9dc52ac7f01 | diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java b/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
@@ -467,7 +467,10 @@ HttpClientRequest createRequest(HttpConnection connection, HttpClientStream stre
request.redirectHandler(resp -> {
Future<RequestOptions> fut_ = rHandler.apply(resp);
if (fut_ != null) {
- return fut_.compose(this::request);
+ return fut_.compose(o -> {
+ o.setProxyOptions(options.getProxyOptions());
+ return this.request(o);
+ });
} else {
return null;
}
| diff --git a/src/test/java/io/vertx/core/http/HttpTest.java b/src/test/java/io/vertx/core/http/HttpTest.java
--- a/src/test/java/io/vertx/core/http/HttpTest.java
+++ b/src/test/java/io/vertx/core/http/HttpTest.java
@@ -60,8 +60,10 @@
import java.util.function.*;
import java.util.stream.IntStream;
+import static io.vertx.core.http.HttpMethod.GET;
import static io.vertx.core.http.HttpMethod.PUT;
import static io.vertx.test.core.TestUtils.*;
+import static org.hamcrest.CoreMatchers.instanceOf;
/**
* @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
@@ -3726,6 +3728,55 @@ public void testFollowRedirectPutOn308() throws Exception {
testFollowRedirect(HttpMethod.PUT, HttpMethod.PUT, 308, 308, 1, "http://" + DEFAULT_HTTP_HOST_AND_PORT + "/redirected", "http://" + DEFAULT_HTTP_HOST_AND_PORT + "/somepath");
}
+
+ @Test
+ public void testFollowRedirectsWithProxy() throws Exception {
+ Assume.assumeThat("Proxy is only supported with HTTP/1", this, instanceOf(Http1xTest.class));
+ waitFor(2);
+ String location = "http://" + DEFAULT_HTTP_HOST + ":" + DEFAULT_HTTP_PORT + "/ok";
+ server.requestHandler(req -> {
+ if (!req.headers().contains("foo", "bar", true)) {
+ fail("Missing expected header");
+ return;
+ }
+ assertEquals(Collections.singletonList("bar"), req.headers().getAll("foo"));
+ if (req.path().equals("/redirect")) {
+ req.response().setStatusCode(301).putHeader("Location", location).end();
+ } else {
+ req.response().end(req.path());
+ complete();
+ }
+ });
+
+ startServer();
+ startProxy(null, ProxyType.HTTP);
+ client.request(
+ new RequestOptions(requestOptions)
+ .setServer(null)
+ .setMethod(GET)
+ .setURI("/redirect")
+ .setProxyOptions(new ProxyOptions().setPort(proxy.port()))
+ )
+ .compose(req -> req
+ .putHeader("foo", "bar")
+ .setFollowRedirects(true)
+ .send()
+ .compose(resp -> {
+ assertEquals(200, resp.statusCode());
+ assertEquals(location, proxy.getLastUri());
+ return resp.body().compose(body -> {
+ if (resp.statusCode() == 200) {
+ assertEquals(Buffer.buffer("/ok"), body);
+ } else {
+ assertEquals(Buffer.buffer(), body);
+ }
+ return Future.succeededFuture();
+ });
+ })
+ ).onSuccess(v -> testComplete());
+ await();
+ }
+
private void testFollowRedirect(
HttpMethod method,
HttpMethod expectedMethod,
diff --git a/src/test/java/io/vertx/test/proxy/HttpProxy.java b/src/test/java/io/vertx/test/proxy/HttpProxy.java
--- a/src/test/java/io/vertx/test/proxy/HttpProxy.java
+++ b/src/test/java/io/vertx/test/proxy/HttpProxy.java
@@ -168,7 +168,7 @@ public HttpProxy start(Vertx vertx) throws Exception {
}
resp.body().onComplete(ar2 -> {
if (ar2.succeeded()) {
- request.response().end(ar2.result());
+ request.response().setStatusCode(resp.statusCode()).end(ar2.result());
} else {
request.response().setStatusCode(500).end(ar2.cause().toString() + " on client request");
}
| HttpClient drops original proxy on redirect
### Version
Which version(s) did you encounter this bug ?
`>= 4.x`
### Context
`RequestOptions` allows us to set a proxy per request, but upon redirect the original proxy gets lost
It also affects `WebClient`.
### Do you have a reproducer?
I have added tests for it in PR, just run `HttpTest`
| 2024-02-28T07:35:54Z | 4.4 |
|
eclipse-vertx/vert.x | 4,904 | eclipse-vertx__vert.x-4904 | [
"4900"
] | 9cdc623860749f1b22482120f392ac34829e61b5 | diff --git a/src/main/java/io/vertx/core/impl/TaskQueue.java b/src/main/java/io/vertx/core/impl/TaskQueue.java
--- a/src/main/java/io/vertx/core/impl/TaskQueue.java
+++ b/src/main/java/io/vertx/core/impl/TaskQueue.java
@@ -16,6 +16,7 @@
import java.util.LinkedList;
import java.util.concurrent.Executor;
+import java.util.concurrent.RejectedExecutionException;
import java.util.function.Consumer;
/**
@@ -124,11 +125,27 @@ public Consumer<Runnable> unschedule() {
*/
public void execute(Runnable task, Executor executor) {
synchronized (tasks) {
- tasks.add(new ExecuteTask(task, executor));
- if (this.currentExecutor == null) {
- this.currentExecutor = executor;
- executor.execute(runner);
+ if (currentExecutor == null) {
+ currentExecutor = executor;
+ try {
+ executor.execute(runner);
+ } catch (RejectedExecutionException e) {
+ currentExecutor = null;
+ throw e;
+ }
}
+ // Add the task after the runner has been accepted to the executor
+ // to cover the case of a rejected execution exception.
+ tasks.add(new ExecuteTask(task, executor));
+ }
+ }
+
+ /**
+ * Test if the task queue is empty and no current executor is running anymore.
+ */
+ public boolean isEmpty() {
+ synchronized (tasks) {
+ return tasks.isEmpty() && currentExecutor == null;
}
}
| diff --git a/src/test/java/io/vertx/core/impl/TaskQueueTest.java b/src/test/java/io/vertx/core/impl/TaskQueueTest.java
new file mode 100644
--- /dev/null
+++ b/src/test/java/io/vertx/core/impl/TaskQueueTest.java
@@ -0,0 +1,34 @@
+package io.vertx.core.impl;
+
+import org.assertj.core.api.Assertions;
+import org.junit.Test;
+
+import java.util.concurrent.Executor;
+import java.util.concurrent.RejectedExecutionException;
+
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+/**
+ * @author Alexander Schwartz
+ */
+public class TaskQueueTest {
+
+ Executor executorThatAlwaysThrowsRejectedExceptions = new Executor() {
+ @Override
+ public void execute(Runnable command) {
+ throw new RejectedExecutionException();
+ }
+ };
+
+ TaskQueue taskQueue = new TaskQueue();
+
+ @Test
+ public void shouldNotHaveTaskInQueueWhenTaskHasBeenRejected() {
+ assertThatThrownBy(
+ () -> taskQueue.execute(new Thread(), executorThatAlwaysThrowsRejectedExceptions)
+ ).isInstanceOf(RejectedExecutionException.class);
+
+ Assertions.assertThat(taskQueue.isEmpty()).isTrue();
+ }
+
+}
| TasksQueue doesn't remove entry from tasks on RejectedExecutionException
### Version
`master` branch.
### Context
I had a look at the code when analyzing a problem in the 4.4.x after I configured a thread pool with a limited backlog which would reject executions eventually.
I found that the code in `master` throws the exception that the task is rejected, but then the task is still in the `taskQueue`, so it will eventually be executed when another task is then successfully submitted.
So for the caller it is both rejected and accepted, which is confusing and IMHO wrong.
I'll create a different issue for that as it doesn't handle the RejectedExecutionException at all.
### Do you have a reproducer?
Here a unit test as a reproducer - not that (at least for the reproducer) I made the `tasks` attribute public to I can use it the test. See https://github.com/ahus1/vert.x/tree/poc-show-how-taskqueue-fails-in-main
```
TaskQueue taskQueue = new TaskQueue();
Executor executorThatAlwaysThrowsRejectedExceptions = new Executor() {
@Override
public void execute(Runnable command) {
throw new RejectedExecutionException();
}
};
assertThatThrownBy(() -> {
taskQueue.execute(new Thread(), executorThatAlwaysThrowsRejectedExceptions);
}).isInstanceOf(RejectedExecutionException.class);
// this fails in main
Assertions.assertThat(taskQueue.tasks).isEmpty();
```
### Steps to reproduce
See test.
### Extra
I can start writing a test, but I'm not sure on how to test this.
| 2023-10-12T07:58:56Z | 4.4 |
|
eclipse-vertx/vert.x | 5,346 | eclipse-vertx__vert.x-5346 | [
"5290"
] | ba4c372ba8f3259033ba2b0156d1d1acc7a12bc3 | diff --git a/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java b/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java
--- a/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java
@@ -129,7 +129,7 @@ public void accept(Channel ch, SslChannelProvider sslChannelProvider) {
private void configurePipeline(Channel ch, SslChannelProvider sslChannelProvider) {
ChannelPipeline pipeline = ch.pipeline();
if (options.isSsl()) {
- pipeline.addLast("ssl", sslChannelProvider.createServerHandler());
+ pipeline.addLast("ssl", sslChannelProvider.createServerHandler(HttpUtils.socketAddressToHostAndPort(ch.remoteAddress())));
ChannelPromise p = ch.newPromise();
pipeline.addLast("handshaker", new SslHandshakeCompletionHandler(p));
p.addListener(future -> {
diff --git a/src/main/java/io/vertx/core/http/impl/HttpUtils.java b/src/main/java/io/vertx/core/http/impl/HttpUtils.java
--- a/src/main/java/io/vertx/core/http/impl/HttpUtils.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpUtils.java
@@ -40,6 +40,8 @@
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.RandomAccessFile;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.Charset;
@@ -1078,4 +1080,20 @@ public static boolean canUpgradeToWebSocket(HttpServerRequest req) {
}
return false;
}
+
+ /**
+ * Convert a {@link SocketAddress} to a {@link HostAndPort}.
+ * If the socket address is an {@link InetSocketAddress}, the hostString and port are used.
+ * Otherwise {@code null} is returned.
+ *
+ * @param socketAddress The socket address to convert
+ * @return The converted instance or {@code null} if not applicable.
+ */
+ public static HostAndPort socketAddressToHostAndPort(SocketAddress socketAddress) {
+ if (socketAddress instanceof InetSocketAddress) {
+ InetSocketAddress inetSocketAddress = (InetSocketAddress) socketAddress;
+ return new HostAndPortImpl(inetSocketAddress.getHostString(), inetSocketAddress.getPort());
+ }
+ return null;
+ }
}
diff --git a/src/main/java/io/vertx/core/net/impl/NetServerImpl.java b/src/main/java/io/vertx/core/net/impl/NetServerImpl.java
--- a/src/main/java/io/vertx/core/net/impl/NetServerImpl.java
+++ b/src/main/java/io/vertx/core/net/impl/NetServerImpl.java
@@ -14,7 +14,6 @@
import io.netty.channel.Channel;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.ChannelPromise;
-import io.netty.channel.EventLoopGroup;
import io.netty.handler.codec.haproxy.HAProxyMessageDecoder;
import io.netty.handler.logging.LoggingHandler;
import io.netty.handler.stream.ChunkedWriteHandler;
@@ -26,6 +25,7 @@
import io.vertx.core.Future;
import io.vertx.core.Handler;
import io.vertx.core.Promise;
+import io.vertx.core.http.impl.HttpUtils;
import io.vertx.core.impl.ContextInternal;
import io.vertx.core.impl.VertxInternal;
import io.vertx.core.impl.logging.Logger;
@@ -34,7 +34,6 @@
import io.vertx.core.net.NetServerOptions;
import io.vertx.core.net.NetSocket;
import io.vertx.core.net.SocketAddress;
-import io.vertx.core.net.TrafficShapingOptions;
import io.vertx.core.spi.metrics.MetricsProvider;
import io.vertx.core.spi.metrics.TCPMetrics;
import io.vertx.core.spi.metrics.VertxMetrics;
@@ -223,7 +222,7 @@ public void accept(Channel ch, SslChannelProvider sslChannelProvider) {
private void configurePipeline(Channel ch, SslChannelProvider sslChannelProvider) {
if (options.isSsl()) {
- ch.pipeline().addLast("ssl", sslChannelProvider.createServerHandler());
+ ch.pipeline().addLast("ssl", sslChannelProvider.createServerHandler(HttpUtils.socketAddressToHostAndPort(ch.remoteAddress())));
ChannelPromise p = ch.newPromise();
ch.pipeline().addLast("handshaker", new SslHandshakeCompletionHandler(p));
p.addListener(future -> {
diff --git a/src/main/java/io/vertx/core/net/impl/NetSocketImpl.java b/src/main/java/io/vertx/core/net/impl/NetSocketImpl.java
--- a/src/main/java/io/vertx/core/net/impl/NetSocketImpl.java
+++ b/src/main/java/io/vertx/core/net/impl/NetSocketImpl.java
@@ -26,6 +26,7 @@
import io.vertx.core.buffer.Buffer;
import io.vertx.core.eventbus.Message;
import io.vertx.core.eventbus.MessageConsumer;
+import io.vertx.core.http.impl.HttpUtils;
import io.vertx.core.impl.ContextInternal;
import io.vertx.core.impl.future.PromiseInternal;
import io.vertx.core.impl.logging.Logger;
@@ -337,7 +338,7 @@ public Future<Void> upgradeToSsl(String serverName) {
if (remoteAddress != null) {
sslHandler = sslChannelProvider.createClientSslHandler(remoteAddress, serverName, false);
} else {
- sslHandler = sslChannelProvider.createServerHandler();
+ sslHandler = sslChannelProvider.createServerHandler(HttpUtils.socketAddressToHostAndPort(chctx.channel().remoteAddress()));
}
chctx.pipeline().addFirst("ssl", sslHandler);
} else {
diff --git a/src/main/java/io/vertx/core/net/impl/SslChannelProvider.java b/src/main/java/io/vertx/core/net/impl/SslChannelProvider.java
--- a/src/main/java/io/vertx/core/net/impl/SslChannelProvider.java
+++ b/src/main/java/io/vertx/core/net/impl/SslChannelProvider.java
@@ -18,6 +18,7 @@
import io.netty.util.AsyncMapping;
import io.netty.util.concurrent.ImmediateExecutor;
import io.vertx.core.VertxException;
+import io.vertx.core.net.HostAndPort;
import io.vertx.core.net.SocketAddress;
import javax.net.ssl.KeyManagerFactory;
@@ -143,25 +144,30 @@ public SslHandler createClientSslHandler(SocketAddress remoteAddress, String ser
return sslHandler;
}
- public ChannelHandler createServerHandler() {
+ public ChannelHandler createServerHandler(HostAndPort remoteAddress) {
if (sni) {
- return createSniHandler();
+ return createSniHandler(remoteAddress);
} else {
- return createServerSslHandler(useAlpn);
+ return createServerSslHandler(useAlpn, remoteAddress);
}
}
- private SslHandler createServerSslHandler(boolean useAlpn) {
+ private SslHandler createServerSslHandler(boolean useAlpn, HostAndPort remoteAddress) {
SslContext sslContext = sslServerContext(useAlpn);
Executor delegatedTaskExec = useWorkerPool ? workerPool : ImmediateExecutor.INSTANCE;
- SslHandler sslHandler = sslContext.newHandler(ByteBufAllocator.DEFAULT, delegatedTaskExec);
+ SslHandler sslHandler;
+ if (remoteAddress != null) {
+ sslHandler = sslContext.newHandler(ByteBufAllocator.DEFAULT, remoteAddress.host(), remoteAddress.port(), delegatedTaskExec);
+ } else {
+ sslHandler = sslContext.newHandler(ByteBufAllocator.DEFAULT, delegatedTaskExec);
+ }
sslHandler.setHandshakeTimeout(sslHandshakeTimeout, sslHandshakeTimeoutUnit);
return sslHandler;
}
- private SniHandler createSniHandler() {
+ private SniHandler createSniHandler(HostAndPort remoteAddress) {
Executor delegatedTaskExec = useWorkerPool ? workerPool : ImmediateExecutor.INSTANCE;
- return new VertxSniHandler(serverNameMapping(), sslHandshakeTimeoutUnit.toMillis(sslHandshakeTimeout), delegatedTaskExec);
+ return new VertxSniHandler(serverNameMapping(), sslHandshakeTimeoutUnit.toMillis(sslHandshakeTimeout), delegatedTaskExec, remoteAddress);
}
private static int idx(boolean useAlpn) {
diff --git a/src/main/java/io/vertx/core/net/impl/VertxSniHandler.java b/src/main/java/io/vertx/core/net/impl/VertxSniHandler.java
--- a/src/main/java/io/vertx/core/net/impl/VertxSniHandler.java
+++ b/src/main/java/io/vertx/core/net/impl/VertxSniHandler.java
@@ -15,6 +15,7 @@
import io.netty.handler.ssl.SslContext;
import io.netty.handler.ssl.SslHandler;
import io.netty.util.AsyncMapping;
+import io.vertx.core.net.HostAndPort;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
@@ -27,16 +28,24 @@
class VertxSniHandler extends SniHandler {
private final Executor delegatedTaskExec;
+ private final HostAndPort remoteAddress;
- public VertxSniHandler(AsyncMapping<? super String, ? extends SslContext> mapping, long handshakeTimeoutMillis, Executor delegatedTaskExec) {
+ public VertxSniHandler(AsyncMapping<? super String, ? extends SslContext> mapping, long handshakeTimeoutMillis, Executor delegatedTaskExec,
+ HostAndPort remoteAddress) {
super(mapping, handshakeTimeoutMillis);
this.delegatedTaskExec = delegatedTaskExec;
+ this.remoteAddress = remoteAddress;
}
@Override
protected SslHandler newSslHandler(SslContext context, ByteBufAllocator allocator) {
- SslHandler sslHandler = context.newHandler(allocator, delegatedTaskExec);
+ SslHandler sslHandler;
+ if (remoteAddress != null) {
+ sslHandler = context.newHandler(allocator, remoteAddress.host(), remoteAddress.port(), delegatedTaskExec);
+ } else {
+ sslHandler = context.newHandler(allocator, delegatedTaskExec);
+ }
sslHandler.setHandshakeTimeout(handshakeTimeoutMillis, TimeUnit.MILLISECONDS);
return sslHandler;
}
| diff --git a/src/test/java/io/vertx/core/http/HttpTLSTest.java b/src/test/java/io/vertx/core/http/HttpTLSTest.java
--- a/src/test/java/io/vertx/core/http/HttpTLSTest.java
+++ b/src/test/java/io/vertx/core/http/HttpTLSTest.java
@@ -27,8 +27,10 @@
import java.security.interfaces.RSAPrivateKey;
import java.util.*;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.BiConsumer;
import java.util.function.Function;
import java.util.function.Supplier;
@@ -36,7 +38,6 @@
import io.vertx.core.*;
import io.vertx.core.impl.VertxThread;
-import io.vertx.core.net.SSLOptions;
import io.vertx.core.net.impl.KeyStoreHelper;
import org.junit.Assume;
import org.junit.Rule;
@@ -2105,4 +2106,193 @@ public PrivateKey getPrivateKey(String alias) {
// It is fine using worker threads in this case
}
}
+
+ /**
+ * Test that for HttpServer, the peer host and port info is available in the SSLEngine
+ * when the X509ExtendedKeyManager.chooseEngineServerAlias is called.
+ *
+ * @throws Exception if an error occurs
+ */
+ @Test
+ public void testTLSServerSSLEnginePeerHost() throws Exception {
+ AtomicBoolean called = new AtomicBoolean(false);
+ testTLS(Cert.NONE, Trust.SERVER_JKS, testPeerHostServerCert(Cert.SERVER_JKS, called), Trust.NONE).pass();
+ assertTrue("X509ExtendedKeyManager.chooseEngineServerAlias is not called", called.get());
+ }
+
+ /**
+ * Test that for HttpServer with SNI, the peer host and port info is available in the SSLEngine
+ * when the X509ExtendedKeyManager.chooseEngineServerAlias is called.
+ *
+ * @throws Exception if an error occurs
+ */
+ @Test
+ public void testSNIServerSSLEnginePeerHost() throws Exception {
+ AtomicBoolean called = new AtomicBoolean(false);
+ TLSTest test = testTLS(Cert.NONE, Trust.SNI_JKS_HOST2, testPeerHostServerCert(Cert.SNI_JKS, called), Trust.NONE)
+ .serverSni()
+ .requestOptions(new RequestOptions().setSsl(true).setPort(DEFAULT_HTTPS_PORT).setHost("host2.com"))
+ .pass();
+ assertEquals("host2.com", TestUtils.cnOf(test.clientPeerCert()));
+ assertEquals("host2.com", test.indicatedServerName);
+ assertTrue("X509ExtendedKeyManager.chooseEngineServerAlias is not called", called.get());
+ }
+
+ /**
+ * Create a {@link Cert} that will verify the peer host is not null and port is not -1 in the {@link SSLEngine}
+ * when the {@link X509ExtendedKeyManager#chooseEngineServerAlias(String, Principal[], SSLEngine)}
+ * is called.
+ *
+ * @param delegate The delegated Cert
+ * @param chooseEngineServerAliasCalled Will be set to true when the
+ * X509ExtendedKeyManager.chooseEngineServerAlias is called
+ * @return The {@link Cert}
+ */
+ public static Cert<KeyCertOptions> testPeerHostServerCert(Cert<? extends KeyCertOptions> delegate, AtomicBoolean chooseEngineServerAliasCalled) {
+ return testPeerHostServerCert(delegate, (peerHost, peerPort) -> {
+ chooseEngineServerAliasCalled.set(true);
+ if (peerHost == null || peerPort == -1) {
+ throw new RuntimeException("Missing peer host/port");
+ }
+ });
+ }
+
+ /**
+ * Create a {@link Cert} that will verify the peer host and port in the {@link SSLEngine}
+ * when the {@link X509ExtendedKeyManager#chooseEngineServerAlias(String, Principal[], SSLEngine)}
+ * is called.
+ *
+ * @param delegate The delegated Cert
+ * @param peerHostVerifier The consumer to verify the peer host and port when the
+ * X509ExtendedKeyManager.chooseEngineServerAlias is called
+ * @return The {@link Cert}
+ */
+ public static Cert<KeyCertOptions> testPeerHostServerCert(Cert<? extends KeyCertOptions> delegate, BiConsumer<String, Integer> peerHostVerifier) {
+ return () -> new VerifyServerPeerHostKeyCertOptions(delegate.get(), peerHostVerifier);
+ }
+
+ private static class VerifyServerPeerHostKeyCertOptions implements KeyCertOptions {
+ private final KeyCertOptions delegate;
+ private final BiConsumer<String, Integer> peerHostVerifier;
+
+ VerifyServerPeerHostKeyCertOptions(KeyCertOptions delegate, BiConsumer<String, Integer> peerHostVerifier) {
+ this.delegate = delegate;
+ this.peerHostVerifier = peerHostVerifier;
+ }
+
+ @Override
+ public KeyCertOptions copy() {
+ return new VerifyServerPeerHostKeyCertOptions(delegate.copy(), peerHostVerifier);
+ }
+
+ @Override
+ public KeyManagerFactory getKeyManagerFactory(Vertx vertx) throws Exception {
+ return new VerifyServerPeerHostKeyManagerFactory(delegate.getKeyManagerFactory(vertx), peerHostVerifier);
+ }
+
+ @Override
+ public Function<String, KeyManagerFactory> keyManagerFactoryMapper(Vertx vertx) throws Exception {
+ Function<String, KeyManagerFactory> mapper = delegate.keyManagerFactoryMapper(vertx);
+ return serverName -> new VerifyServerPeerHostKeyManagerFactory(mapper.apply(serverName), peerHostVerifier);
+ }
+ }
+
+ private static class VerifyServerPeerHostKeyManagerFactory extends KeyManagerFactory {
+ VerifyServerPeerHostKeyManagerFactory(KeyManagerFactory delegate, BiConsumer<String, Integer> peerHostVerifier) {
+ super(new KeyManagerFactorySpiWrapper(delegate, peerHostVerifier), delegate.getProvider(), delegate.getAlgorithm());
+ }
+
+ private static class KeyManagerFactorySpiWrapper extends KeyManagerFactorySpi {
+ private final KeyManagerFactory delegate;
+ private final BiConsumer<String, Integer> peerHostVerifier;
+
+ KeyManagerFactorySpiWrapper(KeyManagerFactory delegate, BiConsumer<String, Integer> peerHostVerifier) {
+ super();
+ this.delegate = delegate;
+ this.peerHostVerifier = peerHostVerifier;
+ }
+
+ @Override
+ protected void engineInit(KeyStore keyStore, char[] chars) throws KeyStoreException, NoSuchAlgorithmException, UnrecoverableKeyException {
+ delegate.init(keyStore, chars);
+ }
+
+ @Override
+ protected void engineInit(ManagerFactoryParameters managerFactoryParameters) throws InvalidAlgorithmParameterException {
+ delegate.init(managerFactoryParameters);
+ }
+
+ @Override
+ protected KeyManager[] engineGetKeyManagers() {
+ KeyManager[] keyManagers = delegate.getKeyManagers().clone();
+ for (int i = 0; i < keyManagers.length; ++i) {
+ KeyManager km = keyManagers[i];
+ if (km instanceof X509KeyManager) {
+ keyManagers[i] = new VerifyServerPeerHostKeyManager((X509KeyManager) km, peerHostVerifier);
+ }
+ }
+
+ return keyManagers;
+ }
+ }
+ }
+
+ private static class VerifyServerPeerHostKeyManager extends X509ExtendedKeyManager {
+ private final X509KeyManager delegate;
+ private final BiConsumer<String, Integer> peerHostVerifier;
+
+ VerifyServerPeerHostKeyManager(X509KeyManager delegate, BiConsumer<String, Integer> peerHostVerifier) {
+ this.delegate = delegate;
+ this.peerHostVerifier = peerHostVerifier;
+ }
+
+ @Override
+ public String chooseEngineClientAlias(String[] keyType, Principal[] issuers, SSLEngine engine) {
+ if (delegate instanceof X509ExtendedKeyManager) {
+ return ((X509ExtendedKeyManager) delegate).chooseEngineClientAlias(keyType, issuers, engine);
+ } else {
+ return delegate.chooseClientAlias(keyType, issuers, null);
+ }
+ }
+
+ @Override
+ public String chooseEngineServerAlias(String keyType, Principal[] issuers, SSLEngine engine) {
+ peerHostVerifier.accept(engine.getPeerHost(), engine.getPeerPort());
+ if (delegate instanceof X509ExtendedKeyManager) {
+ return ((X509ExtendedKeyManager) delegate).chooseEngineServerAlias(keyType, issuers, engine);
+ } else {
+ return delegate.chooseServerAlias(keyType, issuers, null);
+ }
+ }
+
+ @Override
+ public String chooseClientAlias(String[] keyType, Principal[] issuers, Socket socket) {
+ return delegate.chooseClientAlias(keyType, issuers, socket);
+ }
+
+ @Override
+ public String chooseServerAlias(String keyType, Principal[] issuers, Socket socket) {
+ return delegate.chooseServerAlias(keyType, issuers, socket);
+ }
+
+ @Override
+ public String[] getClientAliases(String s, Principal[] principals) {
+ return delegate.getClientAliases(s, principals);
+ }
+
+ @Override
+ public String[] getServerAliases(String s, Principal[] principals) {
+ return delegate.getServerAliases(s, principals);
+ }
+
+ @Override
+ public X509Certificate[] getCertificateChain(String s) {
+ return delegate.getCertificateChain(s);
+ }
+
+ @Override
+ public PrivateKey getPrivateKey(String s) {
+ return delegate.getPrivateKey(s);
+ }
+ }
}
diff --git a/src/test/java/io/vertx/core/net/NetTest.java b/src/test/java/io/vertx/core/net/NetTest.java
--- a/src/test/java/io/vertx/core/net/NetTest.java
+++ b/src/test/java/io/vertx/core/net/NetTest.java
@@ -106,6 +106,7 @@
import java.util.function.Consumer;
import java.util.function.Supplier;
+import static io.vertx.core.http.HttpTLSTest.testPeerHostServerCert;
import static io.vertx.core.http.HttpTestBase.DEFAULT_HTTPS_HOST;
import static io.vertx.core.http.HttpTestBase.DEFAULT_HTTPS_PORT;
import static io.vertx.test.core.TestUtils.*;
@@ -4598,4 +4599,54 @@ public void testInvalidPort() {
} catch (IllegalArgumentException ignore) {
}
}
+
+ /**
+ * Test that for NetServer, the peer host and port info is available in the SSLEngine
+ * when the X509ExtendedKeyManager.chooseEngineServerAlias is called.
+ *
+ * @throws Exception if an error occurs
+ */
+ @Test
+ public void testTLSServerSSLEnginePeerHost() throws Exception {
+ testTLSServerSSLEnginePeerHostImpl(false);
+ }
+
+ /**
+ * Test that for NetServer with start TLS, the peer host and port info is available
+ * in the SSLEngine when the X509ExtendedKeyManager.chooseEngineServerAlias is called.
+ *
+ * @throws Exception if an error occurs
+ */
+ @Test
+ public void testStartTLSServerSSLEnginePeerHost() throws Exception {
+ testTLSServerSSLEnginePeerHostImpl(true);
+ }
+
+ private void testTLSServerSSLEnginePeerHostImpl(boolean startTLS) throws Exception {
+ AtomicBoolean called = new AtomicBoolean(false);
+ testTLS(Cert.NONE, Trust.SERVER_JKS, testPeerHostServerCert(Cert.SERVER_JKS, called), Trust.NONE,
+ false, false, true, startTLS);
+ assertTrue("X509ExtendedKeyManager.chooseEngineServerAlias is not called", called.get());
+ }
+
+ /**
+ * Test that for NetServer with SNI, the peer host and port info is available
+ * in the SSLEngine when the X509ExtendedKeyManager.chooseEngineServerAlias is called.
+ *
+ * @throws Exception if an error occurs
+ */
+ @Test
+ public void testSNIServerSSLEnginePeerHost() throws Exception {
+ AtomicBoolean called = new AtomicBoolean(false);
+ TLSTest test = new TLSTest()
+ .clientTrust(Trust.SNI_JKS_HOST2)
+ .address(SocketAddress.inetSocketAddress(DEFAULT_HTTPS_PORT, "host2.com"))
+ .serverCert(testPeerHostServerCert(Cert.SNI_JKS, called))
+ .sni(true);
+ test.run(true);
+ await();
+ assertEquals("host2.com", cnOf(test.clientPeerCert()));
+ assertEquals("host2.com", test.indicatedServerName);
+ assertTrue("X509ExtendedKeyManager.chooseEngineServerAlias is not called", called.get());
+ }
}
| Missing peer host and port info in SSLEngine for server SslHandler
### Version
4.4.9
### Context
We have a customized key manager that extends `X509ExtendedKeyManager` that want to override the `public String chooseEngineServerAlias(String keyType, Principal[] issuers, SSLEngine engine)` method to choose the server alias partly depending on the peer host address.
However, the `engine.getPeerHost()` always returns `null`.
After read related code, I find that netty `SslContext.newHandler` do support passing in an advisory peer information of peer host and port.
However, in vert.x, when creating `SslHandler` for server in `SslChannelProvider`, the peer host and port info is not passed to `SslContext.newHandler`, result in `null` for `engine.getPeerHost()` in `X509ExtendedKeyManager.chooseEngineServerAlias`.
(The `SslChannelProvider` do provide peer host and port info when creating client `SslHandler`)
I tried to pass the peer host and port info from `HttpServerWorker` to `SslChannelProvider.createServerHandler` and find the peer host and port are available in the `SSLEngine` in `X509ExtendedKeyManager.chooseEngineServerAlias`:
```diff
diff --git a/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java b/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java
index cf37c4e8b..82402c858 100644
--- a/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java
@@ -35,6 +35,8 @@ import io.vertx.core.impl.VertxInternal;
import io.vertx.core.net.impl.*;
import io.vertx.core.spi.metrics.HttpServerMetrics;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.function.BiConsumer;
@@ -131,7 +133,12 @@ public class HttpServerWorker implements BiConsumer<Channel, SslChannelProvider>
private void configurePipeline(Channel ch, SslChannelProvider sslChannelProvider) {
ChannelPipeline pipeline = ch.pipeline();
if (options.isSsl()) {
- pipeline.addLast("ssl", sslChannelProvider.createServerHandler());
+ SocketAddress remoteAddress = ch.remoteAddress();
+ if (remoteAddress instanceof InetSocketAddress) {
+ pipeline.addLast("ssl", sslChannelProvider.createServerHandler(((InetSocketAddress) remoteAddress).getHostString(), ((InetSocketAddress) remoteAddress).getPort()));
+ } else {
+ pipeline.addLast("ssl", sslChannelProvider.createServerHandler());
+ }
ChannelPromise p = ch.newPromise();
pipeline.addLast("handshaker", new SslHandshakeCompletionHandler(p));
p.addListener(future -> {
diff --git a/src/main/java/io/vertx/core/net/impl/SslChannelProvider.java b/src/main/java/io/vertx/core/net/impl/SslChannelProvider.java
index 290bf8c23..cb5aba5d1 100644
--- a/src/main/java/io/vertx/core/net/impl/SslChannelProvider.java
+++ b/src/main/java/io/vertx/core/net/impl/SslChannelProvider.java
@@ -144,17 +144,21 @@ public class SslChannelProvider {
}
public ChannelHandler createServerHandler() {
+ return createServerHandler(null, -1);
+ }
+
+ public ChannelHandler createServerHandler(String peerHost, int peerPort) {
if (sni) {
return createSniHandler();
} else {
- return createServerSslHandler(useAlpn);
+ return createServerSslHandler(useAlpn, peerHost, peerPort);
}
}
- private SslHandler createServerSslHandler(boolean useAlpn) {
+ private SslHandler createServerSslHandler(boolean useAlpn, String peerHost, int peerPort) {
SslContext sslContext = sslServerContext(useAlpn);
Executor delegatedTaskExec = useWorkerPool ? workerPool : ImmediateExecutor.INSTANCE;
- SslHandler sslHandler = sslContext.newHandler(ByteBufAllocator.DEFAULT, delegatedTaskExec);
+ SslHandler sslHandler = sslContext.newHandler(ByteBufAllocator.DEFAULT, peerHost, peerPort, delegatedTaskExec);
sslHandler.setHandshakeTimeout(sslHandshakeTimeout, sslHandshakeTimeoutUnit);
return sslHandler;
}
```
There are a few other places calling `SslChannelProvider.createServerHandler` so although this change works in my use case, a more complete fix may be needed.
| @ben1222 can you provide a reproducer for this using the vertx tests so we are covered, that would help
@vietj I tried to create a unit test under `Http1xTLSTest`:
```java
public class Http1xTLSTest extends HttpTLSTest {
private static final Logger LOG = LogManager.getLogger(Http1xTLSTest.class);
@Test
public void testTLSServerSSLEnginePeerHost() throws Exception {
testTLS(Cert.NONE, Trust.SERVER_JKS, () -> {
try {
return KeyCertOptions.wrap(new MyKeyManager((X509KeyManager) Cert.SERVER_JKS.get().getKeyManagerFactory(vertx).getKeyManagers()[0]));
} catch (Exception e) {
throw new RuntimeException(e);
}
}, Trust.NONE).pass();
}
private static class MyKeyManager extends X509ExtendedKeyManager {
private final X509KeyManager wrapped;
MyKeyManager(X509KeyManager wrapped) {
this.wrapped = wrapped;
}
@Override
public String chooseEngineClientAlias(String[] keyType, Principal[] issuers, SSLEngine engine) {
throw new UnsupportedOperationException();
}
@Override
public String chooseEngineServerAlias(String keyType, Principal[] issuers, SSLEngine engine) {
LOG.info("In chooseEngineServerAlias, keyType: {}, issuers: {}, peer host: {}, peer port: {}",
keyType, issuers, engine.getPeerHost(), engine.getPeerPort());
if (engine.getPeerHost() == null || engine.getPeerPort() == -1) {
throw new RuntimeException("Missing peer host/port");
}
return wrapped.chooseServerAlias(keyType, issuers, null);
}
@Override
public String chooseClientAlias(String[] keyType, Principal[] issuers, Socket socket) {
throw new UnsupportedOperationException();
}
@Override
public String chooseServerAlias(String keyType, Principal[] issuers, Socket socket) {
throw new UnsupportedOperationException();
}
@Override
public String[] getClientAliases(String s, Principal[] principals) {
throw new UnsupportedOperationException();
}
@Override
public String[] getServerAliases(String s, Principal[] principals) {
return wrapped.getServerAliases(s, principals);
}
@Override
public X509Certificate[] getCertificateChain(String s) {
LOG.info("In getCertificateChain, s: {}", s);
return wrapped.getCertificateChain(s);
}
@Override
public PrivateKey getPrivateKey(String s) {
LOG.info("In getPrivateKey, s: {}", s);
return wrapped.getPrivateKey(s);
}
}
//...
}
```
Currently it will fail with:
```
Starting test: Http1xTLSTest#testTLSServerSSLEnginePeerHost
2024-08-30 00:02:25,606 [vert.x-eventloop-thread-2] INFO io.vertx.core.http.Http1xTLSTest Http1xTLSTest$MyKeyManager.chooseEngineServerAlias:296 null - In chooseEngineServerAlias, keyType: EC, issuers: null, peer host: null, peer port: -1
java.lang.RuntimeException: Missing peer host/port
at io.vertx.core.http.Http1xTLSTest$MyKeyManager.chooseEngineServerAlias(Http1xTLSTest.java:299)
at java.base/sun.security.ssl.X509Authentication$X509PossessionGenerator.createServerPossession(X509Authentication.java:293)
at java.base/sun.security.ssl.X509Authentication$X509PossessionGenerator.createPossession(X509Authentication.java:214)
at java.base/sun.security.ssl.X509Authentication.createPossession(X509Authentication.java:90)
at java.base/sun.security.ssl.CertificateMessage$T13CertificateProducer.choosePossession(CertificateMessage.java:1081)
at java.base/sun.security.ssl.CertificateMessage$T13CertificateProducer.onProduceCertificate(CertificateMessage.java:970)
at java.base/sun.security.ssl.CertificateMessage$T13CertificateProducer.produce(CertificateMessage.java:961)
at java.base/sun.security.ssl.SSLHandshake.produce(SSLHandshake.java:436)
...
java.lang.AssertionError: Should not fail Failed to create SSL connection
at org.junit.Assert.fail(Assert.java:89)
at org.junit.Assert.assertTrue(Assert.java:42)
at org.junit.Assert.assertFalse(Assert.java:65)
at io.vertx.test.core.AsyncTestBase.assertFalse(AsyncTestBase.java:259)
at io.vertx.core.http.HttpTLSTest.access$300(HttpTLSTest.java:68)
at io.vertx.core.http.HttpTLSTest$TLSTest.lambda$run$10(HttpTLSTest.java:1312)
at io.vertx.core.impl.future.FutureImpl$2.onFailure(FutureImpl.java:117)
...
```
With the changes in `HttpServerWorker` and `SslChannelProvider`, it succeeds:
```
Starting test: Http1xTLSTest#testTLSServerSSLEnginePeerHost
2024-08-30 00:10:35,108 [vert.x-eventloop-thread-2] INFO io.vertx.core.http.Http1xTLSTest Http1xTLSTest$MyKeyManager.chooseEngineServerAlias:294 null - In chooseEngineServerAlias, keyType: EC, issuers: null, peer host: 127.0.0.1, peer port: 48470
2024-08-30 00:10:35,111 [vert.x-eventloop-thread-2] INFO io.vertx.core.http.Http1xTLSTest Http1xTLSTest$MyKeyManager.chooseEngineServerAlias:294 null - In chooseEngineServerAlias, keyType: EC, issuers: null, peer host: 127.0.0.1, peer port: 48470
2024-08-30 00:10:35,112 [vert.x-eventloop-thread-2] INFO io.vertx.core.http.Http1xTLSTest Http1xTLSTest$MyKeyManager.chooseEngineServerAlias:294 null - In chooseEngineServerAlias, keyType: EC, issuers: null, peer host: 127.0.0.1, peer port: 48470
2024-08-30 00:10:35,112 [vert.x-eventloop-thread-2] INFO io.vertx.core.http.Http1xTLSTest Http1xTLSTest$MyKeyManager.chooseEngineServerAlias:294 null - In chooseEngineServerAlias, keyType: RSA, issuers: null, peer host: 127.0.0.1, peer port: 48470
2024-08-30 00:10:35,112 [vert.x-eventloop-thread-2] INFO io.vertx.core.http.Http1xTLSTest Http1xTLSTest$MyKeyManager.getPrivateKey:330 null - In getPrivateKey, s: test-store
2024-08-30 00:10:35,113 [vert.x-eventloop-thread-2] INFO io.vertx.core.http.Http1xTLSTest Http1xTLSTest$MyKeyManager.getCertificateChain:324 null - In getCertificateChain, s: test-store
```
do you mind contributing a pull request to the 4.x branch and master branch ?
@vietj I can have a try. Do I need to go through some process before sending the pull request? I see the contributing guideline mentioned about signing ECA?
Since there are a few other places (the `NetServerImpl`, `NetSocketImpl`) calling `SslChannelProvider.createServerHandler`, I'll try to also update them to pass the peer host info to `SslChannelProvider.createServerHandler`, is that ok?
you should sign the Eclipse Agreement indeed
everything should be updated and tested in master and 4.x branches
@vietj I signed ECA and opened pull requests on 4.x branch (#5346) and master branch (#5347), please review. | 2024-10-08T06:16:25Z | 4.5 |
eclipse-vertx/vert.x | 4,616 | eclipse-vertx__vert.x-4616 | [
"4610"
] | 0e77586f55f258dfd4e24f7ffd6d59bcb5b25406 | diff --git a/src/main/java/io/vertx/core/parsetools/impl/JsonParserImpl.java b/src/main/java/io/vertx/core/parsetools/impl/JsonParserImpl.java
--- a/src/main/java/io/vertx/core/parsetools/impl/JsonParserImpl.java
+++ b/src/main/java/io/vertx/core/parsetools/impl/JsonParserImpl.java
@@ -288,6 +288,7 @@ private void checkPending() {
}
if (ended) {
if (pending.isEmpty()) {
+ checkExceptions();
Handler<Void> handler = endHandler;
endHandler = null;
if (handler != null) {
| diff --git a/src/test/java/io/vertx/core/parsetools/JsonParserTest.java b/src/test/java/io/vertx/core/parsetools/JsonParserTest.java
--- a/src/test/java/io/vertx/core/parsetools/JsonParserTest.java
+++ b/src/test/java/io/vertx/core/parsetools/JsonParserTest.java
@@ -96,21 +96,30 @@ public void testParseEmptyArray() {
}
@Test
- public void parseUnfinished() {
- Buffer data = Buffer.buffer("{\"un\":\"finished\"");
+ public void parseUnfinishedThrowingException() {
+ StringBuilder events = new StringBuilder();
+ JsonParser parser = JsonParser.newParser();
+ parser.handler(e -> events.append("json,"));
+ parser.endHandler(v -> events.append("end,"));
+ parser.handle(Buffer.buffer("{\"un\":\"finished\""));
try {
- JsonParser parser = JsonParser.newParser();
- parser.handle(data);
parser.end();
fail();
} catch (DecodeException expected) {
}
+ assertEquals("json,json,", events.toString());
+ }
+
+ @Test
+ public void parseUnfinishedExceptionHandler() {
+ StringBuilder events = new StringBuilder();
JsonParser parser = JsonParser.newParser();
- List<Throwable> errors = new ArrayList<>();
- parser.exceptionHandler(errors::add);
- parser.handle(data);
+ parser.handler(e -> events.append("json,"));
+ parser.endHandler(v -> events.append("end,"));
+ parser.exceptionHandler(e -> events.append("exception,"));
+ parser.handle(Buffer.buffer("{\"un\":\"finished\""));
parser.end();
- assertEquals(1, errors.size());
+ assertEquals("json,json,exception,end,", events.toString());
}
@Test
| JsonParser: endHandler called before exceptionHandler
### Version
Vert.x 4.3.8
### Context
JsonParser may now report errors with a call to exceptionHandler after endHandler is called.
In 4.3.7 and earlier releases endHandler was guaranteed to be last callback if the document was valid.
With new behavior it is impossible to determine if a document is valid unless just waiting for events to propagate.
Related:
#4596
#4597
### Do you have a reproducer?
```java
@Test
// 4.3.8 [end][exception]
// 4.3.7 [exception]
public void testErrorDoc(TestContext context) {
var async = context.async();
StringBuilder s = new StringBuilder();
JsonParser jsonParser = JsonParser.newParser();
jsonParser.endHandler(x -> s.append("[end]"));
jsonParser.exceptionHandler(x -> s.append("[exception]"));
jsonParser.write(Buffer.buffer("{"));
jsonParser.end();
Vertx.vertx().setTimer(10, x -> {
context.assertEquals("[end][exception]", s.toString());
async.complete();
});
}
```
| 2023-02-20T15:03:11Z | 4.3 |
|
eclipse-vertx/vert.x | 4,597 | eclipse-vertx__vert.x-4597 | [
"4338"
] | 7f87fab4de9a967eb57d5dff44c3f4b425ffd3f6 | diff --git a/src/main/java/io/vertx/core/parsetools/impl/JsonParserImpl.java b/src/main/java/io/vertx/core/parsetools/impl/JsonParserImpl.java
--- a/src/main/java/io/vertx/core/parsetools/impl/JsonParserImpl.java
+++ b/src/main/java/io/vertx/core/parsetools/impl/JsonParserImpl.java
@@ -12,6 +12,7 @@
package io.vertx.core.parsetools.impl;
import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonLocation;
import com.fasterxml.jackson.core.JsonToken;
import com.fasterxml.jackson.core.ObjectCodec;
import com.fasterxml.jackson.core.base.ParserBase;
@@ -31,10 +32,7 @@
import io.vertx.core.streams.ReadStream;
import java.io.IOException;
-import java.util.ArrayDeque;
-import java.util.Deque;
-import java.util.List;
-import java.util.Map;
+import java.util.*;
/**
* @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
@@ -54,6 +52,7 @@ public class JsonParserImpl implements JsonParser {
private final ReadStream<Buffer> stream;
private boolean emitting;
private final Deque<JsonEventImpl> pending = new ArrayDeque<>();
+ private List<IOException> collectedExceptions;
public JsonParserImpl(ReadStream<Buffer> stream) {
this.stream = stream;
@@ -143,21 +142,24 @@ private void handleEvent(JsonEventImpl event) {
}
}
+ private void handle(IOException ioe) {
+ if (collectedExceptions == null) {
+ collectedExceptions = new ArrayList<>();
+ }
+ collectedExceptions.add(ioe);
+ }
+
@Override
public void handle(Buffer data) {
byte[] bytes = data.getBytes();
try {
parser.feedInput(bytes, 0, bytes.length);
- checkTokens();
} catch (IOException e) {
- if (exceptionHandler != null) {
- exceptionHandler.handle(e);
- return;
- } else {
- throw new DecodeException(e.getMessage(), e);
- }
+ handle(e);
}
+ checkTokens();
checkPending();
+ checkExceptions();
}
@Override
@@ -167,25 +169,33 @@ public void end() {
}
ended = true;
parser.endOfInput();
- try {
- checkTokens();
- } catch (IOException e) {
- if (exceptionHandler != null) {
- exceptionHandler.handle(e);
- return;
- } else {
- throw new DecodeException(e.getMessage(), e);
- }
- }
+ checkTokens();
checkPending();
+ checkExceptions();
}
- private void checkTokens() throws IOException {
+ private void checkTokens() {
+ JsonLocation prevLocation = null;
while (true) {
- JsonToken token = parser.nextToken();
+ JsonToken token;
+ try {
+ token = parser.nextToken();
+ } catch (IOException e) {
+ JsonLocation location = parser.currentLocation();
+ if (prevLocation != null) {
+ if (location.equals(prevLocation)) {
+ // If we haven't done any progress, give up
+ return;
+ }
+ }
+ prevLocation = location;
+ handle(e);
+ continue;
+ }
if (token == null || token == JsonToken.NOT_AVAILABLE) {
break;
}
+ prevLocation = null;
String field = currentField;
currentField = null;
JsonEventImpl event;
@@ -199,11 +209,20 @@ private void checkTokens() throws IOException {
break;
}
case FIELD_NAME: {
- currentField = parser.getCurrentName();
+ try {
+ currentField = parser.getCurrentName();
+ } catch (IOException e) {
+ handle(e);
+ }
continue;
}
case VALUE_STRING: {
- event = new JsonEventImpl(token, JsonEventType.VALUE, field, parser.getText());
+ try {
+ event = new JsonEventImpl(token, JsonEventType.VALUE, field, parser.getText());
+ } catch (IOException e) {
+ handle(e);
+ continue;
+ }
break;
}
case VALUE_TRUE: {
@@ -219,11 +238,21 @@ private void checkTokens() throws IOException {
break;
}
case VALUE_NUMBER_INT: {
- event = new JsonEventImpl(token, JsonEventType.VALUE, field, parser.getLongValue());
+ try {
+ event = new JsonEventImpl(token, JsonEventType.VALUE, field, parser.getLongValue());
+ } catch (IOException e) {
+ handle(e);
+ continue;
+ }
break;
}
case VALUE_NUMBER_FLOAT: {
- event = new JsonEventImpl(token, JsonEventType.VALUE, field, parser.getDoubleValue());
+ try {
+ event = new JsonEventImpl(token, JsonEventType.VALUE, field, parser.getDoubleValue());
+ } catch (IOException e) {
+ handle(e);
+ continue;
+ }
break;
}
case END_OBJECT: {
@@ -288,6 +317,21 @@ private void checkPending() {
}
}
+ private void checkExceptions() {
+ List<IOException> exceptions = collectedExceptions;
+ collectedExceptions = null;
+ if (exceptions != null && exceptions.size() > 0) {
+ if (exceptionHandler != null) {
+ for (IOException ioe : exceptions) {
+ exceptionHandler.handle(ioe);
+ }
+ } else {
+ IOException ioe = exceptions.get(0);
+ throw new DecodeException(ioe.getMessage(), ioe);
+ }
+ }
+ }
+
@Override
public JsonParser objectEventMode() {
objectValueMode = false;
| diff --git a/src/test/java/io/vertx/core/parsetools/JsonParserTest.java b/src/test/java/io/vertx/core/parsetools/JsonParserTest.java
--- a/src/test/java/io/vertx/core/parsetools/JsonParserTest.java
+++ b/src/test/java/io/vertx/core/parsetools/JsonParserTest.java
@@ -113,6 +113,35 @@ public void parseUnfinished() {
assertEquals(1, errors.size());
}
+ @Test
+ public void testParseWithErrors() {
+ Buffer data = Buffer.buffer("{\"foo\":\"foo_value\"},{\"bar\":\"bar_value\"},{\"juu\":\"juu_value\"}");
+ JsonParser parser = JsonParser.newParser();
+ List<JsonObject> objects = new ArrayList<>();
+ List<Throwable> errors = new ArrayList<>();
+ AtomicInteger endCount = new AtomicInteger();
+ parser.objectValueMode()
+ .handler(event -> objects.add(event.objectValue()))
+ .exceptionHandler(errors::add)
+ .endHandler(v -> endCount.incrementAndGet());
+ parser.write(data);
+ assertEquals(3, objects.size());
+ List<JsonObject> expected = Arrays.asList(
+ new JsonObject().put("foo", "foo_value"),
+ new JsonObject().put("bar", "bar_value"),
+ new JsonObject().put("juu", "juu_value")
+ );
+ assertEquals(expected, objects);
+ assertEquals(2, errors.size());
+ assertEquals(0, endCount.get());
+ objects.clear();
+ errors.clear();
+ parser.end();
+ assertEquals(Collections.emptyList(), objects);
+ assertEquals(Collections.emptyList(), errors);
+ assertEquals(1, endCount.get());
+ }
+
@Test
public void parseNumberFormatException() {
Buffer data = Buffer.buffer(Long.MAX_VALUE + "0");
| Inconsistent error handling with JsonParser
### Version
4.2.6
### Context
I use the JsonParser class to handle a stream of JSON comming from a client request (code follows). When an error is present in the JSON (wrong integer value, additional comma etc ...) everything works as expected, exception handler is called and the code flow continue, the event handler is called with a JSON without the unparseable token (which is fine in my case but maybe weird in others, is it expected/configurable ?). When there is more than one error in the JSON though, the exception handler is called only twice and the json event handler isn't called nor the end handler ...
### Do you have a reproducer?
```java
router.post("/test")
.handler(ctx -> JsonParser.newParser(ctx.request()).objectValueMode()
.handler(event -> System.out.println("event received"))
.exceptionHandler(ex -> System.out.println(ex.getMessage()))
.endHandler(ignored -> { System.out.println("done"); ctx.response().end("done\n"); }));
```
### Steps to reproduce
1. Run the reproducer in a verticle
2. Call the road with a payload like `{"test":"value"}{"test1":"value1"}{"test2":"value2"}`, observe an expected behaviour
3. Call the road with a payload like `{"test":"value"},{"test1":"value1"}{"test2":"value2"}`, observe an expected behaviour
4. Call the road with a payload like `{"test":"value"},{"test1":"value1"},{"test2":"value2"}`, observe an unexpected behaviour
### Extra
jdk11
PS:
I found a work around in the meantime. I could open a PR, but really, I'm not sure it's a good solution, here it [is](https://github.com/MYDIH/vert.x/commit/efcb5d3805ee7f41da1a09caaf8c721f13e9990a)
It introduces some difference between the case where an exception handler is set and the case where it's not, and it's not tested in case of Json input exceeding a single buffer ...
| 2023-02-01T15:27:10Z | 4.3 |
|
eclipse-vertx/vert.x | 4,485 | eclipse-vertx__vert.x-4485 | [
"4484"
] | e3100d67bcbe85665f7147bcc48ce6a4720367db | diff --git a/src/main/java/io/vertx/core/http/impl/EndpointKey.java b/src/main/java/io/vertx/core/http/impl/EndpointKey.java
--- a/src/main/java/io/vertx/core/http/impl/EndpointKey.java
+++ b/src/main/java/io/vertx/core/http/impl/EndpointKey.java
@@ -13,6 +13,8 @@
import io.vertx.core.net.ProxyOptions;
import io.vertx.core.net.SocketAddress;
+import java.util.Objects;
+
final class EndpointKey {
final boolean ssl;
@@ -38,7 +40,7 @@ public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
EndpointKey that = (EndpointKey) o;
- return ssl == that.ssl && serverAddr.equals(that.serverAddr) && peerAddr.equals(that.peerAddr);
+ return ssl == that.ssl && serverAddr.equals(that.serverAddr) && peerAddr.equals(that.peerAddr) && equals(proxyOptions, that.proxyOptions);
}
@Override
@@ -46,6 +48,31 @@ public int hashCode() {
int result = ssl ? 1 : 0;
result = 31 * result + peerAddr.hashCode();
result = 31 * result + serverAddr.hashCode();
+ if (proxyOptions != null) {
+ result = 31 * result + hashCode(proxyOptions);
+ }
return result;
}
+
+ private static boolean equals(ProxyOptions options1, ProxyOptions options2) {
+ if (options1 == options2) {
+ return true;
+ }
+ if (options1 != null && options2 != null) {
+ return Objects.equals(options1.getHost(), options2.getHost()) &&
+ options1.getPort() == options2.getPort() &&
+ Objects.equals(options1.getUsername(), options2.getUsername()) &&
+ Objects.equals(options1.getPassword(), options2.getPassword());
+ }
+ return false;
+ }
+
+ private static int hashCode(ProxyOptions options) {
+ if (options.getUsername() != null && options.getPassword() != null) {
+ return Objects.hash(options.getHost(), options.getPort(), options.getType(), options.getUsername(), options.getPassword());
+ } else {
+ Objects.hash(options.getHost(), options.getPort(), options.getType());
+ }
+ return 0;
+ }
}
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java b/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
@@ -25,6 +25,7 @@
import io.vertx.core.net.impl.pool.ConnectionManager;
import io.vertx.core.net.impl.pool.ConnectionPool;
import io.vertx.core.net.impl.pool.Endpoint;
+import io.vertx.core.net.impl.pool.EndpointProvider;
import io.vertx.core.net.impl.pool.Lease;
import io.vertx.core.spi.metrics.ClientMetrics;
import io.vertx.core.spi.metrics.HttpClientMetrics;
@@ -216,28 +217,11 @@ private void checkExpired(Handler<Long> checker) {
}
private ConnectionManager<EndpointKey, Lease<HttpClientConnection>> httpConnectionManager() {
- int maxPoolSize = Math.max(options.getMaxPoolSize(), options.getHttp2MaxPoolSize());
- return new ConnectionManager<>((key, ctx, dispose) -> {
- ClientMetrics metrics = this.metrics != null ? this.metrics.createEndpointMetrics(key.serverAddr, maxPoolSize) : null;
- HttpChannelConnector connector = new HttpChannelConnector(this, netClient, key.proxyOptions, metrics, options.getProtocolVersion(), key.ssl, options.isUseAlpn(), key.peerAddr, key.serverAddr);
- return new SharedClientHttpStreamEndpoint(
- this,
- metrics,
- options.getMaxWaitQueueSize(),
- options.getMaxPoolSize(),
- options.getHttp2MaxPoolSize(),
- connector,
- dispose);
- });
+ return new ConnectionManager<>();
}
private ConnectionManager<EndpointKey, HttpClientConnection> webSocketConnectionManager() {
- int maxPoolSize = options.getMaxWebSockets();
- return new ConnectionManager<>((key, ctx, dispose) -> {
- ClientMetrics metrics = this.metrics != null ? this.metrics.createEndpointMetrics(key.serverAddr, maxPoolSize) : null;
- HttpChannelConnector connector = new HttpChannelConnector(this, netClient, key.proxyOptions, metrics, HttpVersion.HTTP_1_1, key.ssl, false, key.peerAddr, key.serverAddr);
- return new WebSocketEndpoint(null, maxPoolSize, connector, dispose);
- });
+ return new ConnectionManager<>();
}
Function<ContextInternal, EventLoopContext> contextProvider() {
@@ -275,6 +259,16 @@ private String getHost(RequestOptions request) {
return options.getDefaultHost();
}
+ private ProxyOptions resolveProxyOptions(ProxyOptions proxyOptions, SocketAddress addr) {
+ proxyOptions = getProxyOptions(proxyOptions);
+ if (proxyFilter != null) {
+ if (!proxyFilter.test(addr)) {
+ proxyOptions = null;
+ }
+ }
+ return proxyOptions;
+ }
+
HttpClientMetrics metrics() {
return metrics;
}
@@ -303,15 +297,10 @@ public void webSocket(WebSocketConnectOptions connectOptions, Handler<AsyncResul
}
private void webSocket(WebSocketConnectOptions connectOptions, PromiseInternal<WebSocket> promise) {
- ProxyOptions proxyOptions = getProxyOptions(connectOptions.getProxyOptions());
int port = getPort(connectOptions);
String host = getHost(connectOptions);
SocketAddress addr = SocketAddress.inetSocketAddress(port, host);
- if (proxyFilter != null) {
- if (!proxyFilter.test(addr)) {
- proxyOptions = null;
- }
- }
+ ProxyOptions proxyOptions = resolveProxyOptions(connectOptions.getProxyOptions(), addr);
EndpointKey key = new EndpointKey(connectOptions.isSsl() != null ? connectOptions.isSsl() : options.isSsl(), proxyOptions, addr, addr);
ContextInternal ctx = promise.context();
EventLoopContext eventLoopContext;
@@ -320,9 +309,19 @@ private void webSocket(WebSocketConnectOptions connectOptions, PromiseInternal<W
} else {
eventLoopContext = vertx.createEventLoopContext(ctx.nettyEventLoop(), ctx.workerPool(), ctx.classLoader());
}
+ EndpointProvider<HttpClientConnection> provider = new EndpointProvider<HttpClientConnection>() {
+ @Override
+ public Endpoint<HttpClientConnection> create(ContextInternal ctx, Runnable dispose) {
+ int maxPoolSize = options.getMaxWebSockets();
+ ClientMetrics metrics = HttpClientImpl.this.metrics != null ? HttpClientImpl.this.metrics.createEndpointMetrics(key.serverAddr, maxPoolSize) : null;
+ HttpChannelConnector connector = new HttpChannelConnector(HttpClientImpl.this, netClient, proxyOptions, metrics, HttpVersion.HTTP_1_1, key.ssl, false, key.peerAddr, key.serverAddr);
+ return new WebSocketEndpoint(null, maxPoolSize, connector, dispose);
+ }
+ };
webSocketCM.getConnection(
eventLoopContext,
key,
+ provider,
ar -> {
if (ar.succeeded()) {
Http1xClientConnection conn = (Http1xClientConnection) ar.result();
@@ -550,7 +549,6 @@ private void doRequest(RequestOptions request, PromiseInternal<HttpClientRequest
if (server == null) {
server = SocketAddress.inetSocketAddress(port, host);
}
- ProxyOptions proxyOptions = getProxyOptions(request.getProxyOptions());
HttpMethod method = request.getMethod();
String requestURI = request.getURI();
Boolean ssl = request.isSsl();
@@ -566,37 +564,36 @@ private void doRequest(RequestOptions request, PromiseInternal<HttpClientRequest
throw new IllegalArgumentException("Must enable ALPN when using H2");
}
checkClosed();
- if (proxyFilter != null) {
- if (!proxyFilter.test(server)) {
- proxyOptions = null;
- }
- }
- if (proxyOptions != null) {
- if (!useSSL && proxyOptions.getType() == ProxyType.HTTP) {
- // If the requestURI is as not absolute URI then we do not recompute one for the proxy
- if (!ABS_URI_START_PATTERN.matcher(requestURI).find()) {
- int defaultPort = 80;
- String addPort = (port != -1 && port != defaultPort) ? (":" + port) : "";
- requestURI = (ssl == Boolean.TRUE ? "https://" : "http://") + host + addPort + requestURI;
- }
- if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
- if (headers == null) {
- headers = HttpHeaders.headers();
- }
- headers.add("Proxy-Authorization", "Basic " + Base64.getEncoder()
- .encodeToString((proxyOptions.getUsername() + ":" + proxyOptions.getPassword()).getBytes()));
- }
- server = SocketAddress.inetSocketAddress(proxyOptions.getPort(), proxyOptions.getHost());
- proxyOptions = null;
- }
- }
+ ProxyOptions proxyOptions = resolveProxyOptions(request.getProxyOptions(), server);
String peerHost = host;
if (peerHost.endsWith(".")) {
peerHost = peerHost.substring(0, peerHost.length() - 1);
}
SocketAddress peerAddress = SocketAddress.inetSocketAddress(port, peerHost);
- doRequest(method, peerAddress, server, host, port, useSSL, requestURI, headers, request.getTraceOperation(), timeout, followRedirects, proxyOptions, promise);
+
+ EndpointKey key;
+ if (proxyOptions != null && !useSSL && proxyOptions.getType() == ProxyType.HTTP) {
+ // If the requestURI is as not absolute URI then we do not recompute one for the proxy
+ if (!ABS_URI_START_PATTERN.matcher(requestURI).find()) {
+ int defaultPort = 80;
+ String addPort = (port != -1 && port != defaultPort) ? (":" + port) : "";
+ requestURI = (ssl == Boolean.TRUE ? "https://" : "http://") + host + addPort + requestURI;
+ }
+ if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
+ if (headers == null) {
+ headers = HttpHeaders.headers();
+ }
+ headers.add("Proxy-Authorization", "Basic " + Base64.getEncoder()
+ .encodeToString((proxyOptions.getUsername() + ":" + proxyOptions.getPassword()).getBytes()));
+ }
+ server = SocketAddress.inetSocketAddress(proxyOptions.getPort(), proxyOptions.getHost());
+ key = new EndpointKey(useSSL, proxyOptions, server, peerAddress);
+ proxyOptions = null;
+ } else {
+ key = new EndpointKey(useSSL, proxyOptions, server, peerAddress);
+ }
+ doRequest(method, peerAddress, server, host, port, useSSL, requestURI, headers, request.getTraceOperation(), timeout, followRedirects, proxyOptions, key, promise);
}
private void doRequest(
@@ -612,10 +609,26 @@ private void doRequest(
long timeout,
Boolean followRedirects,
ProxyOptions proxyOptions,
+ EndpointKey key,
PromiseInternal<HttpClientRequest> requestPromise) {
ContextInternal ctx = requestPromise.context();
- EndpointKey key = new EndpointKey(useSSL, proxyOptions, server, peerAddress);
- httpCM.getConnection(ctx, key, timeout, ar1 -> {
+ EndpointProvider<Lease<HttpClientConnection>> provider = new EndpointProvider<Lease<HttpClientConnection>>() {
+ @Override
+ public Endpoint<Lease<HttpClientConnection>> create(ContextInternal ctx, Runnable dispose) {
+ int maxPoolSize = Math.max(options.getMaxPoolSize(), options.getHttp2MaxPoolSize());
+ ClientMetrics metrics = HttpClientImpl.this.metrics != null ? HttpClientImpl.this.metrics.createEndpointMetrics(key.serverAddr, maxPoolSize) : null;
+ HttpChannelConnector connector = new HttpChannelConnector(HttpClientImpl.this, netClient, proxyOptions, metrics, options.getProtocolVersion(), key.ssl, options.isUseAlpn(), key.peerAddr, key.serverAddr);
+ return new SharedClientHttpStreamEndpoint(
+ HttpClientImpl.this,
+ metrics,
+ options.getMaxWaitQueueSize(),
+ options.getMaxPoolSize(),
+ options.getHttp2MaxPoolSize(),
+ connector,
+ dispose);
+ }
+ };
+ httpCM.getConnection(ctx, key, provider, timeout, ar1 -> {
if (ar1.succeeded()) {
Lease<HttpClientConnection> lease = ar1.result();
HttpClientConnection conn = lease.get();
diff --git a/src/main/java/io/vertx/core/net/impl/pool/ConnectionManager.java b/src/main/java/io/vertx/core/net/impl/pool/ConnectionManager.java
--- a/src/main/java/io/vertx/core/net/impl/pool/ConnectionManager.java
+++ b/src/main/java/io/vertx/core/net/impl/pool/ConnectionManager.java
@@ -14,7 +14,6 @@
import io.vertx.core.AsyncResult;
import io.vertx.core.Handler;
import io.vertx.core.impl.ContextInternal;
-import io.vertx.core.impl.EventLoopContext;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
@@ -28,30 +27,27 @@
*/
public class ConnectionManager<K, C> {
- private final EndpointProvider<K, C> endpointProvider;
private final Map<K, Endpoint<C>> endpointMap = new ConcurrentHashMap<>();
- public ConnectionManager(EndpointProvider<K, C> endpointProvider) {
- this.endpointProvider = endpointProvider;
- }
-
public void forEach(Consumer<Endpoint<C>> consumer) {
endpointMap.values().forEach(consumer);
}
public void getConnection(ContextInternal ctx,
K key,
+ EndpointProvider<C> provider,
Handler<AsyncResult<C>> handler) {
- getConnection(ctx, key, 0, handler);
+ getConnection(ctx, key, provider, 0, handler);
}
public void getConnection(ContextInternal ctx,
K key,
+ EndpointProvider<C> provider,
long timeout,
Handler<AsyncResult<C>> handler) {
Runnable dispose = () -> endpointMap.remove(key);
while (true) {
- Endpoint<C> endpoint = endpointMap.computeIfAbsent(key, k -> endpointProvider.create(key, ctx, dispose));
+ Endpoint<C> endpoint = endpointMap.computeIfAbsent(key, k -> provider.create(ctx, dispose));
if (endpoint.getConnection(ctx, timeout, handler)) {
break;
}
diff --git a/src/main/java/io/vertx/core/net/impl/pool/EndpointProvider.java b/src/main/java/io/vertx/core/net/impl/pool/EndpointProvider.java
--- a/src/main/java/io/vertx/core/net/impl/pool/EndpointProvider.java
+++ b/src/main/java/io/vertx/core/net/impl/pool/EndpointProvider.java
@@ -17,15 +17,15 @@
*
* @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
*/
-public interface EndpointProvider<K, C> {
+public interface EndpointProvider<C> {
/**
* Create an endpoint tracked by the {@link ConnectionManager}.
*
+ * @param ctx the creating context
* @param dispose the callback to signal this endpoint should be destroyed
- * @param ctx the creating context
* @return the created endpoint
*/
- Endpoint<C> create(K key, ContextInternal ctx, Runnable dispose);
+ Endpoint<C> create(ContextInternal ctx, Runnable dispose);
}
| diff --git a/src/test/java/io/vertx/core/http/Http1xProxyTest.java b/src/test/java/io/vertx/core/http/Http1xProxyTest.java
--- a/src/test/java/io/vertx/core/http/Http1xProxyTest.java
+++ b/src/test/java/io/vertx/core/http/Http1xProxyTest.java
@@ -17,13 +17,19 @@
import io.vertx.core.net.ProxyOptions;
import io.vertx.core.net.ProxyType;
import io.vertx.core.net.SocketAddress;
+import io.vertx.test.proxy.HttpProxy;
+import io.vertx.test.proxy.SocksProxy;
+import io.vertx.test.proxy.TestProxyBase;
import io.vertx.test.tls.Cert;
import org.junit.Test;
+import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
public class Http1xProxyTest extends HttpTestBase {
@@ -46,7 +52,7 @@ public void testHttpProxyRequest() throws Exception {
startProxy(null, ProxyType.HTTP);
client.close();
client = vertx.createHttpClient(new HttpClientOptions()
- .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.getPort())));
+ .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.port())));
testHttpProxyRequest(() -> client.request(new RequestOptions()
.setHost(DEFAULT_HTTP_HOST)
.setPort(DEFAULT_HTTP_PORT)
@@ -62,7 +68,7 @@ public void testHttpProxyRequest() throws Exception {
public void testHttpProxyRequest2() throws Exception {
startProxy(null, ProxyType.HTTP);
testHttpProxyRequest(() -> client.request(new RequestOptions()
- .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.getPort()))
+ .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.port()))
.setHost(DEFAULT_HTTP_HOST)
.setPort(DEFAULT_HTTP_PORT)
.setURI("/")
@@ -87,7 +93,7 @@ private void testFilter(boolean accept) throws Exception {
startProxy(null, ProxyType.HTTP);
client.close();
client = vertx.createHttpClient(new HttpClientOptions()
- .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.getPort())));
+ .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.port())));
Set<SocketAddress> filtered = Collections.synchronizedSet(new HashSet<>());
((HttpClientImpl)client).proxyFilter(so -> {
filtered.add(so);
@@ -132,7 +138,7 @@ private void testNonProxyHosts(List<String> nonProxyHosts, String host, boolean
client.close();
client = vertx.createHttpClient(new HttpClientOptions()
.setNonProxyHosts(nonProxyHosts)
- .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.getPort())));
+ .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.port())));
testHttpProxyRequest(() -> client.request(new RequestOptions()
.setHost(host)
.setPort(DEFAULT_HTTP_PORT)
@@ -151,7 +157,7 @@ public void testHttpProxyRequestOverrideClientSsl() throws Exception {
startProxy(null, ProxyType.HTTP);
client.close();
client = vertx.createHttpClient(new HttpClientOptions()
- .setSsl(true).setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.getPort())));
+ .setSsl(true).setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.port())));
testHttpProxyRequest(() -> client
.request(new RequestOptions().setSsl(false).setHost("localhost").setPort(8080))
.compose(HttpClientRequest::send)).onComplete(onSuccess(v -> {
@@ -189,7 +195,7 @@ public void testHttpProxyRequestAuth() throws Exception {
client.close();
client = vertx.createHttpClient(new HttpClientOptions()
- .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.getPort())
+ .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.port())
.setUsername("user").setPassword("user")));
server.requestHandler(req -> {
@@ -218,7 +224,7 @@ public void testHttpProxyFtpRequest() throws Exception {
startProxy(null, ProxyType.HTTP);
client.close();
client = vertx.createHttpClient(new HttpClientOptions()
- .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.getPort())));
+ .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.port())));
final String url = "ftp://ftp.gnu.org/gnu/";
proxy.setForceUri("http://localhost:8080/");
server.requestHandler(req -> {
@@ -244,7 +250,7 @@ public void testHttpSocksProxyRequest() throws Exception {
client.close();
client = vertx.createHttpClient(new HttpClientOptions()
- .setProxyOptions(new ProxyOptions().setType(ProxyType.SOCKS5).setHost("localhost").setPort(proxy.getPort())));
+ .setProxyOptions(new ProxyOptions().setType(ProxyType.SOCKS5).setHost("localhost").setPort(proxy.port())));
server.requestHandler(req -> req.response().end());
@@ -269,7 +275,7 @@ public void testHttpSocksProxyRequestAuth() throws Exception {
client.close();
client = vertx.createHttpClient(new HttpClientOptions()
- .setProxyOptions(new ProxyOptions().setType(ProxyType.SOCKS5).setHost("localhost").setPort(proxy.getPort())
+ .setProxyOptions(new ProxyOptions().setType(ProxyType.SOCKS5).setHost("localhost").setPort(proxy.port())
.setUsername("user").setPassword("user")));
server.requestHandler(req -> {
@@ -291,6 +297,178 @@ public void testHttpSocksProxyRequestAuth() throws Exception {
await();
}
+ @Test
+ public void testHttpProxyPooling() throws Exception {
+ HttpProxy proxy1 = new HttpProxy().port(HttpProxy.DEFAULT_PORT);
+ HttpProxy proxy2 = new HttpProxy().port(HttpProxy.DEFAULT_PORT + 1);
+ ProxyOptions req1 = new ProxyOptions()
+ .setType(ProxyType.HTTP)
+ .setHost("localhost")
+ .setPort(proxy1.port());
+ ProxyOptions req2 = new ProxyOptions()
+ .setType(ProxyType.HTTP)
+ .setHost("localhost")
+ .setPort(proxy2.port());
+ List<String> res = testPooling(req1, req2, proxy1, proxy2);
+ assertEquals(Arrays.asList(proxy1.lastLocalAddress(), proxy2.lastLocalAddress()), res);
+ }
+
+ @Test
+ public void testHttpProxyPooling2() throws Exception {
+ HttpProxy proxy = new HttpProxy().port(HttpProxy.DEFAULT_PORT);
+ ProxyOptions req = new ProxyOptions()
+ .setType(ProxyType.HTTP)
+ .setHost("localhost")
+ .setPort(proxy.port());
+ List<String> res = testPooling(req, req, proxy);
+ assertEquals(Arrays.asList(proxy.lastLocalAddress(), proxy.lastLocalAddress()), res);
+ }
+
+ @Test
+ public void testHttpProxyAuthPooling1() throws Exception {
+ HttpProxy proxy = new HttpProxy().port(SocksProxy.DEFAULT_PORT).username(Arrays.asList("user1", "user2"));
+ ProxyOptions req1 = new ProxyOptions()
+ .setUsername("user1")
+ .setPassword("user1")
+ .setType(ProxyType.HTTP)
+ .setHost("localhost")
+ .setPort(proxy.port());
+ ProxyOptions req2 = new ProxyOptions()
+ .setUsername("user2")
+ .setPassword("user2")
+ .setType(ProxyType.HTTP)
+ .setHost("localhost")
+ .setPort(proxy.port());
+ List<String> res = testPooling(req1, req2, proxy);
+ assertEquals(proxy.localAddresses(), res);
+ }
+
+ @Test
+ public void testHttpProxyAuthPooling2() throws Exception {
+ HttpProxy proxy = new HttpProxy().port(SocksProxy.DEFAULT_PORT).username(Arrays.asList("user1"));
+ ProxyOptions req1 = new ProxyOptions()
+ .setUsername("user1")
+ .setPassword("user1")
+ .setType(ProxyType.HTTP)
+ .setHost("localhost")
+ .setPort(proxy.port());
+ ProxyOptions req2 = new ProxyOptions()
+ .setUsername("user1")
+ .setPassword("user1")
+ .setType(ProxyType.HTTP)
+ .setHost("localhost")
+ .setPort(proxy.port());
+ List<String> res = testPooling(req1, req2, proxy);
+ assertEquals(1, proxy.localAddresses().size());
+ assertEquals(Arrays.asList(proxy.localAddresses().get(0), proxy.localAddresses().get(0)), res);
+ }
+
+ @Test
+ public void testSocksProxyPooling1() throws Exception {
+ SocksProxy proxy1 = new SocksProxy().port(SocksProxy.DEFAULT_PORT);
+ SocksProxy proxy2 = new SocksProxy().port(SocksProxy.DEFAULT_PORT + 1);
+ ProxyOptions req1 = new ProxyOptions()
+ .setType(ProxyType.SOCKS5)
+ .setHost("localhost")
+ .setPort(proxy1.port());
+ ProxyOptions req2 = new ProxyOptions()
+ .setType(ProxyType.SOCKS5)
+ .setHost("localhost")
+ .setPort(proxy2.port());
+ List<String> res = testPooling(req1, req2, proxy1, proxy2);
+ assertEquals(Arrays.asList(proxy1.lastLocalAddress(), proxy2.lastLocalAddress()), res);
+ }
+
+ @Test
+ public void testSocksProxyPooling2() throws Exception {
+ SocksProxy proxy = new SocksProxy().port(SocksProxy.DEFAULT_PORT);
+ ProxyOptions req = new ProxyOptions()
+ .setType(ProxyType.SOCKS5)
+ .setHost("localhost")
+ .setPort(proxy.port());
+ List<String> res = testPooling(req, req, proxy);
+ assertEquals(Arrays.asList(proxy.lastLocalAddress(), proxy.lastLocalAddress()), res);
+ }
+
+ @Test
+ public void testSocksProxyAuthPooling1() throws Exception {
+ SocksProxy proxy = new SocksProxy().port(SocksProxy.DEFAULT_PORT).username(Arrays.asList("user1", "user2"));
+ ProxyOptions req1 = new ProxyOptions()
+ .setUsername("user1")
+ .setPassword("user1")
+ .setType(ProxyType.SOCKS5)
+ .setHost("localhost")
+ .setPort(proxy.port());
+ ProxyOptions req2 = new ProxyOptions()
+ .setUsername("user2")
+ .setPassword("user2")
+ .setType(ProxyType.SOCKS5)
+ .setHost("localhost")
+ .setPort(proxy.port());
+ List<String> res = testPooling(req1, req2, proxy);
+ assertEquals(proxy.localAddresses(), res);
+ }
+
+ @Test
+ public void testSocksProxyAuthPooling2() throws Exception {
+ SocksProxy proxy = new SocksProxy().port(SocksProxy.DEFAULT_PORT).username(Arrays.asList("user1"));
+ ProxyOptions req1 = new ProxyOptions()
+ .setUsername("user1")
+ .setPassword("user1")
+ .setType(ProxyType.SOCKS5)
+ .setHost("localhost")
+ .setPort(proxy.port());
+ ProxyOptions req2 = new ProxyOptions()
+ .setUsername("user1")
+ .setPassword("user1")
+ .setType(ProxyType.SOCKS5)
+ .setHost("localhost")
+ .setPort(proxy.port());
+ List<String> res = testPooling(req1, req2, proxy);
+ assertEquals(1, proxy.localAddresses().size());
+ assertEquals(Arrays.asList(proxy.localAddresses().get(0), proxy.localAddresses().get(0)), res);
+ }
+
+ public List<String> testPooling(ProxyOptions request1, ProxyOptions request2, TestProxyBase... proxies) throws Exception {
+ for (TestProxyBase proxy : proxies) {
+ proxy.start(vertx);
+ }
+
+ client.close();
+ client = vertx.createHttpClient(new HttpClientOptions().setMaxPoolSize(1).setKeepAlive(true));
+
+ CompletableFuture<List<String>> ret = new CompletableFuture<>();
+
+ try {
+ server.requestHandler(req -> {
+ SocketAddress addr = req.connection().remoteAddress();
+ req.response().end("" + addr);
+ }).listen(onSuccess(s -> {
+ RequestOptions baseOptions = new RequestOptions()
+ .setHost(DEFAULT_HTTP_HOST)
+ .setPort(DEFAULT_HTTP_PORT)
+ .setURI("/");
+ client.request(new RequestOptions(baseOptions).setProxyOptions(request1))
+ .compose(HttpClientRequest::send)
+ .compose(HttpClientResponse::body)
+ .onComplete(onSuccess(res1 -> {
+ client.request(new RequestOptions(baseOptions).setProxyOptions(request2))
+ .compose(HttpClientRequest::send)
+ .compose(HttpClientResponse::body)
+ .onComplete(onSuccess(res2 -> {
+ ret.complete(Arrays.asList(res1.toString(), res2.toString()));
+ }));
+ }));
+ }));
+
+ return ret.get(40, TimeUnit.SECONDS);
+ } finally {
+ for (TestProxyBase proxy : proxies) {
+ proxy.stop();
+ }
+ }
+ }
+
@Test
public void testWssHttpProxy() throws Exception {
startProxy(null, ProxyType.HTTP);
@@ -301,7 +479,7 @@ public void testWssHttpProxy() throws Exception {
.setProxyOptions(new ProxyOptions()
.setType(ProxyType.HTTP)
.setHost(DEFAULT_HTTP_HOST)
- .setPort(proxy.getPort())), true);
+ .setPort(proxy.port())), true);
}
@Test
@@ -311,7 +489,7 @@ public void testWsHttpProxy() throws Exception {
.setProxyOptions(new ProxyOptions()
.setType(ProxyType.HTTP)
.setHost(DEFAULT_HTTP_HOST)
- .setPort(proxy.getPort())), true);
+ .setPort(proxy.port())), true);
}
@Test
@@ -324,7 +502,7 @@ public void testWssSocks5Proxy() throws Exception {
.setProxyOptions(new ProxyOptions()
.setType(ProxyType.SOCKS5)
.setHost(DEFAULT_HTTP_HOST)
- .setPort(proxy.getPort())), true);
+ .setPort(proxy.port())), true);
}
@Test
@@ -334,7 +512,7 @@ public void testWsSocks5Proxy() throws Exception {
.setProxyOptions(new ProxyOptions()
.setType(ProxyType.SOCKS5)
.setHost(DEFAULT_HTTP_HOST)
- .setPort(proxy.getPort())), true);
+ .setPort(proxy.port())), true);
}
@Test
@@ -345,7 +523,7 @@ public void testWsNonProxyHosts() throws Exception {
.setProxyOptions(new ProxyOptions()
.setType(ProxyType.HTTP)
.setHost(DEFAULT_HTTP_HOST)
- .setPort(proxy.getPort())), false);
+ .setPort(proxy.port())), false);
}
private void testWebSocket(HttpServerOptions serverOptions, HttpClientOptions clientOptions, boolean proxied) throws Exception {
diff --git a/src/test/java/io/vertx/core/http/HttpTestBase.java b/src/test/java/io/vertx/core/http/HttpTestBase.java
--- a/src/test/java/io/vertx/core/http/HttpTestBase.java
+++ b/src/test/java/io/vertx/core/http/HttpTestBase.java
@@ -138,10 +138,11 @@ protected void startServer(SocketAddress bindAddress, Context context, HttpServe
protected void startProxy(String username, ProxyType proxyType) throws Exception {
if (proxyType == ProxyType.HTTP) {
- proxy = new HttpProxy(username);
+ proxy = new HttpProxy();
} else {
- proxy = new SocksProxy(username);
+ proxy = new SocksProxy();
}
+ proxy.username(username);
proxy.start(vertx);
}
}
diff --git a/src/test/java/io/vertx/core/net/NetTest.java b/src/test/java/io/vertx/core/net/NetTest.java
--- a/src/test/java/io/vertx/core/net/NetTest.java
+++ b/src/test/java/io/vertx/core/net/NetTest.java
@@ -2991,7 +2991,7 @@ public void testWithSocks5Proxy() throws Exception {
server.connectHandler(sock -> {
});
- proxy = new SocksProxy(null);
+ proxy = new SocksProxy();
proxy.start(vertx);
server.listen(1234, "localhost", ar -> {
assertTrue(ar.succeeded());
@@ -3020,7 +3020,7 @@ public void testWithSocks5ProxyAuth() throws Exception {
server.connectHandler(sock -> {
});
- proxy = new SocksProxy("username");
+ proxy = new SocksProxy().username("username");
proxy.start(vertx);
server.listen(1234, "localhost", ar -> {
assertTrue(ar.succeeded());
@@ -3054,7 +3054,7 @@ public void testConnectSSLWithSocks5Proxy() throws Exception {
server.connectHandler(sock -> {
});
- proxy = new SocksProxy(null);
+ proxy = new SocksProxy();
proxy.start(vertx);
server.listen(ar -> {
assertTrue(ar.succeeded());
@@ -3088,7 +3088,7 @@ public void testUpgradeSSLWithSocks5Proxy() throws Exception {
server.connectHandler(sock -> {
});
- proxy = new SocksProxy(null);
+ proxy = new SocksProxy();
proxy.start(vertx);
server.listen(ar -> {
assertTrue(ar.succeeded());
@@ -3116,7 +3116,7 @@ public void testWithHttpConnectProxy() throws Exception {
server.connectHandler(sock -> {
});
- proxy = new HttpProxy(null);
+ proxy = new HttpProxy();
proxy.start(vertx);
server.listen(1234, "localhost", ar -> {
assertTrue(ar.succeeded());
@@ -3144,7 +3144,7 @@ public void testWithSocks4aProxy() throws Exception {
server.connectHandler(sock -> {
});
- proxy = new Socks4Proxy(null);
+ proxy = new Socks4Proxy();
proxy.start(vertx);
server.listen(1234, "localhost", ar -> {
assertTrue(ar.succeeded());
@@ -3173,7 +3173,7 @@ public void testWithSocks4aProxyAuth() throws Exception {
server.connectHandler(sock -> {
});
- proxy = new Socks4Proxy("username");
+ proxy = new Socks4Proxy().username("username");
proxy.start(vertx);
server.listen(1234, "localhost", ar -> {
assertTrue(ar.succeeded());
@@ -3201,7 +3201,7 @@ public void testWithSocks4LocalResolver() throws Exception {
server.connectHandler(sock -> {
});
- proxy = new Socks4Proxy(null).start(vertx);
+ proxy = new Socks4Proxy().start(vertx);
server.listen(1234, "localhost", ar -> {
assertTrue(ar.succeeded());
client.connect(1234, "127.0.0.1", ar2 -> {
@@ -3226,7 +3226,7 @@ public void testNonProxyHosts() throws Exception {
server.connectHandler(sock -> {
});
- proxy = new HttpProxy(null);
+ proxy = new HttpProxy();
proxy.start(vertx);
server.listen(1234, "localhost", onSuccess(s -> {
client.connect(1234, "example.com", onSuccess(so -> {
diff --git a/src/test/java/io/vertx/core/net/ProxyErrorTest.java b/src/test/java/io/vertx/core/net/ProxyErrorTest.java
--- a/src/test/java/io/vertx/core/net/ProxyErrorTest.java
+++ b/src/test/java/io/vertx/core/net/ProxyErrorTest.java
@@ -69,7 +69,7 @@ protected VertxOptions getOptions() {
// we don't start http/https servers, due to the error, they will not be queried
private void startProxy(int error, String username) throws Exception {
- proxy = new HttpProxy(username);
+ proxy = new HttpProxy().username(username);
proxy.setError(error);
proxy.start(vertx);
}
@@ -128,7 +128,7 @@ private void proxyTest(int error, String username, String url, Handler<AsyncResu
.setProxyOptions(new ProxyOptions()
.setType(ProxyType.HTTP)
.setHost("localhost")
- .setPort(proxy.getPort()));
+ .setPort(proxy.port()));
HttpClient client = vertx.createHttpClient(options);
client.request(new RequestOptions().setAbsoluteURI(url), ar -> {
diff --git a/src/test/java/io/vertx/core/net/impl/pool/ConnectionManagerTest.java b/src/test/java/io/vertx/core/net/impl/pool/ConnectionManagerTest.java
--- a/src/test/java/io/vertx/core/net/impl/pool/ConnectionManagerTest.java
+++ b/src/test/java/io/vertx/core/net/impl/pool/ConnectionManagerTest.java
@@ -44,9 +44,9 @@ private void testGetConnection(boolean success) {
EventLoopContext ctx = (EventLoopContext) vertx.getOrCreateContext();
Connection result = new Connection();
Throwable failure = new Throwable();
- ConnectionManager<Object, Connection> mgr = new ConnectionManager<>(new EndpointProvider<Object, Connection>() {
+ EndpointProvider<Connection> provider = new EndpointProvider<Connection>() {
@Override
- public Endpoint<Connection> create(Object key, ContextInternal ctx, Runnable dispose) {
+ public Endpoint<Connection> create(ContextInternal ctx, Runnable dispose) {
return new Endpoint<Connection>(dispose) {
@Override
public void requestConnection(ContextInternal ctx, long timeout, Handler<AsyncResult<Connection>> handler) {
@@ -59,8 +59,9 @@ public void requestConnection(ContextInternal ctx, long timeout, Handler<AsyncRe
}
};
}
- });
- mgr.getConnection(ctx, TEST_KEY, ar -> {
+ };
+ ConnectionManager<Object, Connection> mgr = new ConnectionManager<>();
+ mgr.getConnection(ctx, TEST_KEY, provider, ar -> {
if (ar.succeeded()) {
assertTrue(success);
assertSame(result, ar.result());
@@ -87,9 +88,9 @@ private void testDispose(boolean closeConnectionAfterCallback) {
EventLoopContext ctx = (EventLoopContext) vertx.getOrCreateContext();
Connection expected = new Connection();
boolean[] disposed = new boolean[1];
- ConnectionManager<Object, Connection> mgr = new ConnectionManager<>(new EndpointProvider<Object, Connection>() {
+ EndpointProvider<Connection> provider = new EndpointProvider<Connection>() {
@Override
- public Endpoint<Connection> create(Object key, ContextInternal ctx, Runnable dispose) {
+ public Endpoint<Connection> create(ContextInternal ctx, Runnable dispose) {
return new Endpoint<Connection>(dispose) {
@Override
public void requestConnection(ContextInternal ctx, long timeout, Handler<AsyncResult<Connection>> handler) {
@@ -106,14 +107,16 @@ public void requestConnection(ContextInternal ctx, long timeout, Handler<AsyncRe
assertTrue(disposed[0]);
}
}
+
@Override
protected void dispose() {
disposed[0] = true;
}
};
}
- });
- mgr.getConnection(ctx, TEST_KEY, onSuccess(conn -> {
+ };
+ ConnectionManager<Object, Connection> mgr = new ConnectionManager<>();
+ mgr.getConnection(ctx, TEST_KEY, provider, onSuccess(conn -> {
assertEquals(expected, conn);
}));
waitUntil(() -> disposed[0]);
@@ -124,19 +127,21 @@ public void testCloseManager() throws Exception {
EventLoopContext ctx = (EventLoopContext) vertx.getOrCreateContext();
Connection expected = new Connection();
boolean[] disposed = new boolean[1];
- ConnectionManager<Object, Connection> mgr = new ConnectionManager<>(new EndpointProvider<Object, Connection>() {
+ EndpointProvider<Connection> provider = new EndpointProvider<Connection>() {
@Override
- public Endpoint<Connection> create(Object key, ContextInternal ctx, Runnable dispose) {
+ public Endpoint<Connection> create(ContextInternal ctx, Runnable dispose) {
return new Endpoint<Connection>(dispose) {
@Override
public void requestConnection(ContextInternal ctx, long timeout, Handler<AsyncResult<Connection>> handler) {
incRefCount();
handler.handle(Future.succeededFuture(expected));
}
+
@Override
protected void dispose() {
disposed[0] = true;
}
+
@Override
protected void close() {
super.close();
@@ -144,9 +149,10 @@ protected void close() {
}
};
}
- });
+ };
+ ConnectionManager<Object, Connection> mgr = new ConnectionManager<>();
CountDownLatch latch = new CountDownLatch(1);
- mgr.getConnection(ctx, TEST_KEY, onSuccess(conn -> {
+ mgr.getConnection(ctx, TEST_KEY, provider, onSuccess(conn -> {
assertEquals(expected, conn);
latch.countDown();
}));
@@ -162,9 +168,9 @@ public void testCloseManagerImmediately() {
Connection expected = new Connection();
boolean[] disposed = new boolean[1];
AtomicReference<Runnable> adder = new AtomicReference<>();
- ConnectionManager<Object, Connection> mgr = new ConnectionManager<>(new EndpointProvider<Object, Connection>() {
+ EndpointProvider<Connection> provider = new EndpointProvider<Connection>() {
@Override
- public Endpoint<Connection> create(Object key, ContextInternal ctx, Runnable dispose) {
+ public Endpoint<Connection> create(ContextInternal ctx, Runnable dispose) {
return new Endpoint<Connection>(dispose) {
@Override
public void requestConnection(ContextInternal ctx, long timeout, Handler<AsyncResult<Connection>> handler) {
@@ -174,8 +180,9 @@ public void requestConnection(ContextInternal ctx, long timeout, Handler<AsyncRe
}
};
}
- });
- mgr.getConnection(ctx, TEST_KEY, onSuccess(conn -> {
+ };
+ ConnectionManager<Object, Connection> mgr = new ConnectionManager<>();
+ mgr.getConnection(ctx, TEST_KEY, provider, onSuccess(conn -> {
}));
waitUntil(() -> adder.get() != null);
mgr.close();
@@ -186,9 +193,9 @@ public void requestConnection(ContextInternal ctx, long timeout, Handler<AsyncRe
public void testConcurrentDispose() throws Exception {
EventLoopContext ctx = (EventLoopContext) vertx.getOrCreateContext();
ConcurrentLinkedQueue<AtomicBoolean> disposals = new ConcurrentLinkedQueue<>();
- ConnectionManager<Object, Connection> mgr = new ConnectionManager<>(new EndpointProvider<Object, Connection>() {
+ EndpointProvider<Connection> provider = new EndpointProvider<Connection>() {
@Override
- public Endpoint<Connection> create(Object key, ContextInternal ctx, Runnable dispose) {
+ public Endpoint<Connection> create(ContextInternal ctx, Runnable dispose) {
AtomicBoolean disposed = new AtomicBoolean();
disposals.add(disposed);
return new Endpoint<Connection>(dispose) {
@@ -204,13 +211,15 @@ public void requestConnection(ContextInternal ctx, long timeout, Handler<AsyncRe
decRefCount();
}
}
+
@Override
protected void dispose() {
disposed.set(true);
}
};
}
- });
+ };
+ ConnectionManager<Object, Connection> mgr = new ConnectionManager<>();
int num = 100000;
int concurrency = 4;
CountDownLatch[] latches = new CountDownLatch[concurrency];
@@ -219,7 +228,7 @@ protected void dispose() {
latches[i] = cc;
new Thread(() -> {
for (int j = 0;j < num;j++) {
- mgr.getConnection(ctx, TEST_KEY, onSuccess(conn -> {
+ mgr.getConnection(ctx, TEST_KEY, provider, onSuccess(conn -> {
cc.countDown();
}));
}
diff --git a/src/test/java/io/vertx/test/proxy/HttpProxy.java b/src/test/java/io/vertx/test/proxy/HttpProxy.java
--- a/src/test/java/io/vertx/test/proxy/HttpProxy.java
+++ b/src/test/java/io/vertx/test/proxy/HttpProxy.java
@@ -13,7 +13,9 @@
import java.net.UnknownHostException;
import java.util.Base64;
+import java.util.Map;
import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import io.vertx.core.MultiMap;
@@ -44,21 +46,23 @@
* <p>
* @author <a href="http://oss.lehmann.cx/">Alexander Lehmann</a>
*/
-public class HttpProxy extends TestProxyBase {
+public class HttpProxy extends TestProxyBase<HttpProxy> {
- private static final int PORT = 13128;
+ public static final int DEFAULT_PORT = 13128;
private static final Logger log = LoggerFactory.getLogger(HttpProxy.class);
private HttpServer server;
+ private Map<HttpConnection, HttpClient> clientMap = new ConcurrentHashMap<>();
private int error = 0;
private MultiMap lastRequestHeaders = null;
private HttpMethod lastMethod;
- public HttpProxy(String username) {
- super(username);
+ @Override
+ public int defaultPort() {
+ return DEFAULT_PORT;
}
/**
@@ -70,11 +74,12 @@ public HttpProxy(String username) {
@Override
public HttpProxy start(Vertx vertx) throws Exception {
HttpServerOptions options = new HttpServerOptions();
- options.setHost("localhost").setPort(PORT);
+ options.setHost("localhost").setPort(port);
server = vertx.createHttpServer(options);
server.requestHandler(request -> {
HttpMethod method = request.method();
String uri = request.uri();
+ String username = nextUserName();
if (username != null) {
String auth = request.getHeader("Proxy-Authorization");
String expected = "Basic " + Base64.getEncoder().encodeToString((username + ":" + username).getBytes());
@@ -113,6 +118,7 @@ public HttpProxy start(Vertx vertx) throws Exception {
NetClient netClient = vertx.createNetClient(netOptions);
netClient.connect(port, host, ar1 -> {
if (ar1.succeeded()) {
+ localAddresses.add(ar1.result().localAddress().toString());
request.toNetSocket().onComplete(ar2 -> {
if (ar2.succeeded()) {
NetSocket serverSocket = ar2.result();
@@ -136,9 +142,16 @@ public HttpProxy start(Vertx vertx) throws Exception {
if (forceUri != null) {
uri = forceUri;
}
- HttpClient client = vertx.createHttpClient();
RequestOptions opts = new RequestOptions();
opts.setAbsoluteURI(uri);
+ HttpConnection serverConn = request.connection();
+ HttpClient client = clientMap.get(serverConn);
+ if (client == null) {
+ client = vertx.createHttpClient(new HttpClientOptions().setMaxPoolSize(1));
+ client.connectionHandler(conn -> localAddresses.add(conn.localAddress().toString()));
+ clientMap.put(serverConn, client);
+ serverConn.closeHandler(v -> clientMap.remove(serverConn));
+ }
client.request(opts).compose(req -> {
for (String name : request.headers().names()) {
if (!name.equals("Proxy-Authorization")) {
@@ -200,11 +213,6 @@ public void stop() {
}
}
- @Override
- public int getPort() {
- return PORT;
- }
-
@Override
public HttpMethod getLastMethod() {
return lastMethod;
diff --git a/src/test/java/io/vertx/test/proxy/Socks4Proxy.java b/src/test/java/io/vertx/test/proxy/Socks4Proxy.java
--- a/src/test/java/io/vertx/test/proxy/Socks4Proxy.java
+++ b/src/test/java/io/vertx/test/proxy/Socks4Proxy.java
@@ -37,7 +37,7 @@
*
* @author <a href="http://oss.lehmann.cx/">Alexander Lehmann</a>
*/
-public class Socks4Proxy extends TestProxyBase {
+public class Socks4Proxy extends TestProxyBase<Socks4Proxy> {
private static final Logger log = LoggerFactory.getLogger(Socks4Proxy.class);
@@ -45,12 +45,13 @@ public class Socks4Proxy extends TestProxyBase {
private static final Buffer connectResponse = Buffer.buffer(new byte[] { 0, 90, 0, 0, 0, 0, 0, 0 });
private static final Buffer errorResponse = Buffer.buffer(new byte[] { 0, 91, 0, 0, 0, 0, 0, 0 });
- private static final int PORT = 11080;
+ public static final int DEFAULT_PORT = 11080;
private NetServer server;
- public Socks4Proxy(String username) {
- super(username);
+ @Override
+ public int defaultPort() {
+ return DEFAULT_PORT;
}
/**
@@ -62,7 +63,7 @@ public Socks4Proxy(String username) {
@Override
public Socks4Proxy start(Vertx vertx) throws Exception {
NetServerOptions options = new NetServerOptions();
- options.setHost("localhost").setPort(PORT);
+ options.setHost("localhost").setPort(port);
server = vertx.createNetServer(options);
server.connectHandler(socket -> {
socket.handler(buffer -> {
@@ -75,7 +76,7 @@ public Socks4Proxy start(Vertx vertx) throws Exception {
String ip = getByte4(buffer.getBuffer(4, 8));
String authUsername = getString(buffer.getBuffer(8, buffer.length()));
-
+ String username = nextUserName();
if (username != null && !authUsername.equals(username)) {
log.debug("auth failed");
log.debug("writing: " + toHex(errorResponse));
@@ -101,6 +102,7 @@ public Socks4Proxy start(Vertx vertx) throws Exception {
NetClient netClient = vertx.createNetClient(new NetClientOptions());
netClient.connect(port, host, result -> {
if (result.succeeded()) {
+ localAddresses.add(result.result().localAddress().toString());
log.debug("writing: " + toHex(connectResponse));
socket.write(connectResponse);
log.debug("connected, starting pump");
@@ -162,9 +164,4 @@ public void stop() {
server = null;
}
}
-
- @Override
- public int getPort() {
- return PORT;
- }
}
diff --git a/src/test/java/io/vertx/test/proxy/SocksProxy.java b/src/test/java/io/vertx/test/proxy/SocksProxy.java
--- a/src/test/java/io/vertx/test/proxy/SocksProxy.java
+++ b/src/test/java/io/vertx/test/proxy/SocksProxy.java
@@ -38,7 +38,7 @@
*
* @author <a href="http://oss.lehmann.cx/">Alexander Lehmann</a>
*/
-public class SocksProxy extends TestProxyBase {
+public class SocksProxy extends TestProxyBase<SocksProxy> {
private static final Logger log = LoggerFactory.getLogger(SocksProxy.class);
@@ -53,12 +53,13 @@ public class SocksProxy extends TestProxyBase {
private static final Buffer authSuccess = Buffer.buffer(new byte[] { 1, 0 });
private static final Buffer authFailed = Buffer.buffer(new byte[] { 1, 1 });
- private static final int PORT = 11080;
+ public static final int DEFAULT_PORT = 11080;
private NetServer server;
- public SocksProxy(String username) {
- super(username);
+ @Override
+ public int defaultPort() {
+ return DEFAULT_PORT;
}
/**
@@ -70,10 +71,11 @@ public SocksProxy(String username) {
@Override
public SocksProxy start(Vertx vertx) throws Exception {
NetServerOptions options = new NetServerOptions();
- options.setHost("localhost").setPort(PORT);
+ options.setHost("localhost").setPort(port);
server = vertx.createNetServer(options);
server.connectHandler(socket -> {
socket.handler(buffer -> {
+ String username = nextUserName();
Buffer expectedInit = username == null ? clientInit : clientInitAuth;
if (!buffer.equals(expectedInit)) {
throw new IllegalStateException("expected " + toHex(expectedInit) + ", got " + toHex(buffer));
@@ -121,6 +123,7 @@ public SocksProxy start(Vertx vertx) throws Exception {
NetClient netClient = vertx.createNetClient(new NetClientOptions());
netClient.connect(port, host, result -> {
if (result.succeeded()) {
+ localAddresses.add(result.result().localAddress().toString());
log.debug("writing: " + toHex(connectResponse));
socket.write(connectResponse);
log.debug("connected, starting pump");
@@ -201,9 +204,4 @@ public void stop() {
server = null;
}
}
-
- @Override
- public int getPort() {
- return PORT;
- }
}
diff --git a/src/test/java/io/vertx/test/proxy/TestProxyBase.java b/src/test/java/io/vertx/test/proxy/TestProxyBase.java
--- a/src/test/java/io/vertx/test/proxy/TestProxyBase.java
+++ b/src/test/java/io/vertx/test/proxy/TestProxyBase.java
@@ -14,19 +14,69 @@
import io.vertx.core.MultiMap;
import io.vertx.core.Vertx;
import io.vertx.core.http.HttpMethod;
+import io.vertx.core.net.SocketAddress;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.function.Supplier;
/**
* @author <a href="http://oss.lehmann.cx/">Alexander Lehmann</a>
*
*/
-public abstract class TestProxyBase {
+public abstract class TestProxyBase<P extends TestProxyBase<P>> {
- protected final String username;
+ private Supplier<String> username;
+ protected int port;
protected String lastUri;
protected String forceUri;
+ protected List<String> localAddresses = Collections.synchronizedList(new ArrayList<>());
+
+ public TestProxyBase() {
+ port = defaultPort();
+ }
+
+ public P username(String username) {
+ this.username = () -> username;
+ return (P) this;
+ }
- public TestProxyBase(String username) {
+ public P username(Supplier<String> username) {
this.username = username;
+ return (P) this;
+ }
+
+ public P username(Collection<String> username) {
+ Iterator<String> it = username.iterator();
+ this.username = () -> it.hasNext() ? it.next() : null;
+ return (P) this;
+ }
+
+ public String nextUserName() {
+ return username != null ? username.get() : null;
+ }
+
+ public P port(int port) {
+ this.port = port;
+ return (P)this;
+ }
+
+ public int port() {
+ return port;
+ }
+
+ public abstract int defaultPort();
+
+ public String lastLocalAddress() {
+ int idx = localAddresses.size();
+ return idx == 0 ? null : localAddresses.get(idx - 1);
+ }
+
+ public List<String> localAddresses() {
+ return localAddresses;
}
/**
@@ -59,8 +109,6 @@ public MultiMap getLastRequestHeaders() {
throw new UnsupportedOperationException();
}
- public abstract int getPort();
-
public abstract TestProxyBase start(Vertx vertx) throws Exception;
public abstract void stop();
| HTTP client pool proxy aware
The HTTP client pool discrimines connection by their host (and SSL) since proxy used to be configured at the client level. Since we allow proxy settings on a request, we should handle proxy settings in the client pool, providing dynamic proxy configuration for environment that use rolling proxies.
| 2022-09-15T14:23:04Z | 4.3 |
|
eclipse-vertx/vert.x | 4,423 | eclipse-vertx__vert.x-4423 | [
"4422"
] | 0bdaecf62dbb14421d0277fbfe3c90a47812f538 | diff --git a/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java b/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java
--- a/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java
+++ b/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java
@@ -57,25 +57,25 @@ public EventBusImpl(VertxInternal vertx) {
@Override
public <T> EventBus addOutboundInterceptor(Handler<DeliveryContext<T>> interceptor) {
- addInterceptor(OUTBOUND_INTERCEPTORS_UPDATER, interceptor);
+ addInterceptor(OUTBOUND_INTERCEPTORS_UPDATER, Objects.requireNonNull(interceptor));
return this;
}
@Override
public <T> EventBus addInboundInterceptor(Handler<DeliveryContext<T>> interceptor) {
- addInterceptor(INBOUND_INTERCEPTORS_UPDATER, interceptor);
+ addInterceptor(INBOUND_INTERCEPTORS_UPDATER, Objects.requireNonNull(interceptor));
return this;
}
@Override
public <T> EventBus removeOutboundInterceptor(Handler<DeliveryContext<T>> interceptor) {
- removeInterceptor(OUTBOUND_INTERCEPTORS_UPDATER, interceptor);
+ removeInterceptor(OUTBOUND_INTERCEPTORS_UPDATER, Objects.requireNonNull(interceptor));
return this;
}
@Override
public <T> EventBus removeInboundInterceptor(Handler<DeliveryContext<T>> interceptor) {
- removeInterceptor(OUTBOUND_INTERCEPTORS_UPDATER, interceptor);
+ removeInterceptor(INBOUND_INTERCEPTORS_UPDATER, Objects.requireNonNull(interceptor));
return this;
}
@@ -455,7 +455,7 @@ private void removeInterceptor(AtomicReferenceFieldUpdater<EventBusImpl, Handler
Handler[] interceptors = updater.get(this);
int idx = -1;
for (int i = 0;i < interceptors.length;i++) {
- if (interceptors[i] == interceptor) {
+ if (interceptors[i].equals(interceptor)) {
idx = i;
break;
}
| diff --git a/src/test/java/io/vertx/core/eventbus/EventBusInterceptorTest.java b/src/test/java/io/vertx/core/eventbus/EventBusInterceptorTest.java
--- a/src/test/java/io/vertx/core/eventbus/EventBusInterceptorTest.java
+++ b/src/test/java/io/vertx/core/eventbus/EventBusInterceptorTest.java
@@ -147,7 +147,7 @@ public void testMultipleOutboundInterceptors() {
}
@Test
- public void testRemoveOutboundInterceptor() {
+ public void testRemoveInterceptor() {
AtomicInteger cnt1 = new AtomicInteger();
AtomicInteger cnt2 = new AtomicInteger();
@@ -168,25 +168,28 @@ public void testRemoveOutboundInterceptor() {
sc.next();
};
- eb.addOutboundInterceptor(eb1).addOutboundInterceptor(eb2).addOutboundInterceptor(eb3);
+ eb
+ .addInboundInterceptor(eb1).addOutboundInterceptor(eb1)
+ .addInboundInterceptor(eb2).addOutboundInterceptor(eb2)
+ .addInboundInterceptor(eb3).addOutboundInterceptor(eb3);
eb.consumer("some-address", msg -> {
if (msg.body().equals("armadillo")) {
- assertEquals(1, cnt1.get());
- assertEquals(1, cnt2.get());
- assertEquals(1, cnt3.get());
- eb.removeOutboundInterceptor(eb2);
- eb.send("some-address", "aardvark");
- } else if (msg.body().equals("aardvark")) {
assertEquals(2, cnt1.get());
- assertEquals(1, cnt2.get());
+ assertEquals(2, cnt2.get());
assertEquals(2, cnt3.get());
- eb.removeOutboundInterceptor(eb3);
+ eb.removeInboundInterceptor(eb2).removeOutboundInterceptor(eb2);
+ eb.send("some-address", "aardvark");
+ } else if (msg.body().equals("aardvark")) {
+ assertEquals(4, cnt1.get());
+ assertEquals(2, cnt2.get());
+ assertEquals(4, cnt3.get());
+ eb.removeInboundInterceptor(eb3).removeOutboundInterceptor(eb3);
eb.send("some-address", "anteater");
} else if (msg.body().equals("anteater")) {
- assertEquals(3, cnt1.get());
- assertEquals(1, cnt2.get());
- assertEquals(2, cnt3.get());
+ assertEquals(6, cnt1.get());
+ assertEquals(2, cnt2.get());
+ assertEquals(4, cnt3.get());
testComplete();
} else {
fail("wrong body");
| EventBus inbound interceptors are never removed
Since `4.3.0`, `EventBus` interceptors are managed with `AtomicReferenceFieldUpdater`.
But `io.vertx.core.eventbus.impl.EventBusImpl#removeInboundInterceptor` uses the wrong instance:
https://github.com/eclipse-vertx/vert.x/blob/a8d9a164df16c4f2f5eed5f176a5570c46f30d41/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java#L76-L80
| 2022-06-28T14:15:56Z | 4.3 |
|
eclipse-vertx/vert.x | 4,413 | eclipse-vertx__vert.x-4413 | [
"4405"
] | 428f2fd0966057eb9f0e83f70ada8dc08c092a0a | diff --git a/src/main/java/io/vertx/core/http/impl/WebSocketImplBase.java b/src/main/java/io/vertx/core/http/impl/WebSocketImplBase.java
--- a/src/main/java/io/vertx/core/http/impl/WebSocketImplBase.java
+++ b/src/main/java/io/vertx/core/http/impl/WebSocketImplBase.java
@@ -563,7 +563,7 @@ void closeConnection() {
*/
void initiateConnectionCloseTimeout(long timeoutMillis) {
synchronized (conn) {
- closeTimeoutID = context.owner().setTimer(timeoutMillis, id -> {
+ closeTimeoutID = context.setTimer(timeoutMillis, id -> {
synchronized (conn) {
closeTimeoutID = -1L;
}
diff --git a/src/main/java/io/vertx/core/impl/AbstractContext.java b/src/main/java/io/vertx/core/impl/AbstractContext.java
--- a/src/main/java/io/vertx/core/impl/AbstractContext.java
+++ b/src/main/java/io/vertx/core/impl/AbstractContext.java
@@ -85,13 +85,13 @@ public final void endDispatch(ContextInternal previous) {
@Override
public long setPeriodic(long delay, Handler<Long> handler) {
VertxImpl owner = (VertxImpl) owner();
- return owner.scheduleTimeout(this, true, delay, TimeUnit.MILLISECONDS, handler);
+ return owner.scheduleTimeout(this, true, delay, TimeUnit.MILLISECONDS, false, handler);
}
@Override
public long setTimer(long delay, Handler<Long> handler) {
VertxImpl owner = (VertxImpl) owner();
- return owner.scheduleTimeout(this, false, delay, TimeUnit.MILLISECONDS,handler);
+ return owner.scheduleTimeout(this, false, delay, TimeUnit.MILLISECONDS, false, handler);
}
@Override
diff --git a/src/main/java/io/vertx/core/impl/ContextInternal.java b/src/main/java/io/vertx/core/impl/ContextInternal.java
--- a/src/main/java/io/vertx/core/impl/ContextInternal.java
+++ b/src/main/java/io/vertx/core/impl/ContextInternal.java
@@ -277,12 +277,14 @@ default void runOnContext(Handler<Void> action) {
ContextInternal duplicate();
/**
- * Like {@link Vertx#setPeriodic(long, Handler)} except the periodic timer will fire on this context.
+ * Like {@link Vertx#setPeriodic(long, Handler)} except the periodic timer will fire on this context and the
+ * timer will not be associated with the context close hook.
*/
long setPeriodic(long delay, Handler<Long> handler);
/**
- * Like {@link Vertx#setTimer(long, Handler)} except the timer will fire on this context.
+ * Like {@link Vertx#setTimer(long, Handler)} except the timer will fire on this context and the timer
+ * will not be associated with the context close hook.
*/
long setTimer(long delay, Handler<Long> handler);
diff --git a/src/main/java/io/vertx/core/impl/VertxImpl.java b/src/main/java/io/vertx/core/impl/VertxImpl.java
--- a/src/main/java/io/vertx/core/impl/VertxImpl.java
+++ b/src/main/java/io/vertx/core/impl/VertxImpl.java
@@ -358,7 +358,8 @@ public EventBus eventBus() {
}
public long setPeriodic(long delay, Handler<Long> handler) {
- return scheduleTimeout(getOrCreateContext(), true, delay, TimeUnit.MILLISECONDS, handler);
+ ContextInternal ctx = getOrCreateContext();
+ return scheduleTimeout(ctx, true, delay, TimeUnit.MILLISECONDS, ctx.isDeployment(), handler);
}
@Override
@@ -367,7 +368,8 @@ public TimeoutStream periodicStream(long delay) {
}
public long setTimer(long delay, Handler<Long> handler) {
- return scheduleTimeout(getOrCreateContext(), false, delay, TimeUnit.MILLISECONDS, handler);
+ ContextInternal ctx = getOrCreateContext();
+ return scheduleTimeout(ctx, false, delay, TimeUnit.MILLISECONDS, ctx.isDeployment(), handler);
}
@Override
@@ -510,14 +512,19 @@ public DnsClient createDnsClient(DnsClientOptions options) {
return new DnsClientImpl(this, options);
}
- public long scheduleTimeout(ContextInternal context, boolean periodic, long delay, TimeUnit timeUnit, Handler<Long> handler) {
+ public long scheduleTimeout(ContextInternal context,
+ boolean periodic,
+ long delay,
+ TimeUnit timeUnit,
+ boolean addCloseHook,
+ Handler<Long> handler) {
if (delay < 1) {
throw new IllegalArgumentException("Cannot schedule a timer with delay < 1 ms");
}
long timerId = timeoutCounter.getAndIncrement();
InternalTimerHandler task = new InternalTimerHandler(timerId, handler, periodic, context);
timeouts.put(timerId, task);
- if (context.isDeployment()) {
+ if (addCloseHook) {
context.addCloseHook(task);
}
EventLoop el = context.nettyEventLoop();
@@ -526,7 +533,7 @@ public long scheduleTimeout(ContextInternal context, boolean periodic, long dela
} else {
task.future = el.schedule(task, delay, timeUnit);
}
- return timerId;
+ return task.id;
}
public AbstractContext getContext() {
@@ -869,18 +876,18 @@ public HAManager haManager() {
* This class does not rely on the internal {@link #future} for the termination to handle the worker case
* since the actual timer {@link #handler} execution is scheduled when the {@link #future} executes.
*/
- private class InternalTimerHandler implements Handler<Void>, Closeable, Runnable {
+ class InternalTimerHandler implements Handler<Void>, Closeable, Runnable {
private final Handler<Long> handler;
private final boolean periodic;
- private final long timerID;
+ private final long id;
private final ContextInternal context;
private final AtomicBoolean disposed = new AtomicBoolean();
private volatile java.util.concurrent.Future<?> future;
- InternalTimerHandler(long timerID, Handler<Long> runnable, boolean periodic, ContextInternal context) {
+ InternalTimerHandler(long id, Handler<Long> runnable, boolean periodic, ContextInternal context) {
this.context = context;
- this.timerID = timerID;
+ this.id = id;
this.handler = runnable;
this.periodic = periodic;
}
@@ -893,12 +900,12 @@ public void run() {
public void handle(Void v) {
if (periodic) {
if (!disposed.get()) {
- handler.handle(timerID);
+ handler.handle(id);
}
} else if (disposed.compareAndSet(false, true)) {
- timeouts.remove(timerID);
+ timeouts.remove(id);
try {
- handler.handle(timerID);
+ handler.handle(id);
} finally {
// Clean up after it's fired
context.removeCloseHook(this);
@@ -918,7 +925,7 @@ private boolean cancel() {
private boolean tryCancel() {
if (disposed.compareAndSet(false, true)) {
- timeouts.remove(timerID);
+ timeouts.remove(id);
future.cancel(false);
return true;
} else {
@@ -999,8 +1006,9 @@ public synchronized TimeoutStream handler(Handler<Long> handler) {
if (id != null) {
throw new IllegalStateException();
}
+ ContextInternal ctx = getOrCreateContext();
this.handler = handler;
- id = scheduleTimeout(getOrCreateContext(), periodic, delay, TimeUnit.MILLISECONDS, this);
+ this.id = scheduleTimeout(ctx, periodic, delay, TimeUnit.MILLISECONDS, ctx.isDeployment(), this);
} else {
cancel();
}
| diff --git a/src/test/java/io/vertx/core/TimerTest.java b/src/test/java/io/vertx/core/TimerTest.java
--- a/src/test/java/io/vertx/core/TimerTest.java
+++ b/src/test/java/io/vertx/core/TimerTest.java
@@ -460,7 +460,7 @@ public void testRaceWhenTimerCreatedOutsideEventLoop() {
for (int i = 0;i < numThreads;i++) {
Thread th = new Thread(() -> {
// We need something more aggressive than a millisecond for this test
- ((VertxImpl)vertx).scheduleTimeout(((VertxImpl) vertx).getOrCreateContext(), false, 1, TimeUnit.NANOSECONDS, ignore -> {
+ ((VertxImpl)vertx).scheduleTimeout(((VertxImpl) vertx).getOrCreateContext(), false, 1, TimeUnit.NANOSECONDS, false, ignore -> {
count.decrementAndGet();
});
});
@@ -469,4 +469,25 @@ public void testRaceWhenTimerCreatedOutsideEventLoop() {
}
waitUntil(() -> count.get() == 0);
}
+
+ @Test
+ public void testContextTimer() {
+ waitFor(2);
+ vertx.deployVerticle(new AbstractVerticle() {
+ @Override
+ public void start() throws Exception {
+ ((ContextInternal)context).setTimer(1000, id -> {
+ complete();
+ });
+ context.runOnContext(v -> {
+ vertx.undeploy(context.deploymentID(), onSuccess(ar -> {
+ ((ContextInternal)context).setTimer(1, id -> {
+ complete();
+ });
+ }));
+ });
+ }
+ });
+ await();
+ }
}
| IllegalStateException on vertx.close: easy to reproduce
### Questions
Vert.x raises an unhandled IllegalStateException on every vertx.close.
```
java.lang.IllegalStateException: null
at io.vertx.core.impl.CloseFuture.add(CloseFuture.java:56)
at io.vertx.core.impl.ContextInternal.addCloseHook(ContextInternal.java:305)
at io.vertx.core.impl.VertxImpl.scheduleTimeout(VertxImpl.java:521)
at io.vertx.core.impl.VertxImpl.setTimer(VertxImpl.java:370)
at io.vertx.core.http.impl.WebSocketImplBase.initiateConnectionCloseTimeout(WebSocketImplBase.java:566)
at io.vertx.core.http.impl.ServerWebSocketImpl.lambda$close$0(ServerWebSocketImpl.java:149)
at io.vertx.core.impl.future.FutureImpl$3.onSuccess(FutureImpl.java:141)
at io.vertx.core.impl.future.FutureBase.lambda$emitSuccess$0(FutureBase.java:54)
at io.vertx.core.impl.EventLoopContext.execute(EventLoopContext.java:87)
at io.vertx.core.impl.DuplicatedContext.execute(DuplicatedContext.java:179)
at io.vertx.core.impl.future.FutureBase.emitSuccess(FutureBase.java:51)
at io.vertx.core.impl.future.FutureImpl.addListener(FutureImpl.java:196)
at io.vertx.core.impl.future.PromiseImpl.addListener(PromiseImpl.java:23)
at io.vertx.core.impl.future.FutureImpl.onComplete(FutureImpl.java:164)
at io.vertx.core.impl.future.PromiseImpl.onComplete(PromiseImpl.java:23)
at io.vertx.core.http.impl.ServerWebSocketImpl.close(ServerWebSocketImpl.java:145)
at io.vertx.core.http.impl.WebSocketImplBase.close(WebSocketImplBase.java:142)
at io.vertx.core.http.impl.Http1xConnectionBase.close(Http1xConnectionBase.java:95)
at io.vertx.core.http.impl.Http1xServerConnection.close(Http1xServerConnection.java:72)
at io.vertx.core.net.impl.ConnectionBase.close(ConnectionBase.java:131)
at io.vertx.core.net.impl.VertxHandler.close(VertxHandler.java:158)
at io.netty.channel.AbstractChannelHandlerContext.invokeClose(AbstractChannelHandlerContext.java:622)
at io.netty.channel.AbstractChannelHandlerContext.access$1200(AbstractChannelHandlerContext.java:61)
at io.netty.channel.AbstractChannelHandlerContext$11.run(AbstractChannelHandlerContext.java:611)
at io.netty.util.concurrent.AbstractEventExecutor.runTask(AbstractEventExecutor.java:174)
at io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:167)
at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:470)
at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:503)
at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:995)
at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
at java.base/java.lang.Thread.run(Thread.java:829)
```
### Version
4.3.1
### Context
Always reproduces.
### Do you have a reproducer?
Yes.
### Steps to reproduce
Execute the following Kotlin code. During the Thread.sleep you will see the mentioned exception.
```kotlin
val vertx = Vertx.vertx()
vertx.deployVerticle(object : AbstractVerticle() {
override fun start(startPromise: Promise<Void>) {
vertx.createHttpServer().webSocketHandler {
it.accept()
}.listen(8080)
startPromise.complete()
}
})
try {
val client = HttpClient.newHttpClient()
val listener = object : WebSocket.Listener {}
client.newWebSocketBuilder().buildAsync(URI("ws://127.0.0.1:8080/"), listener).join()
} finally {
vertx.close()
}
Thread.sleep(1000)
```
Wating for callbacks to finish doesn't change the exception.
| 2022-06-20T13:16:23Z | 4.3 |
|
eclipse-vertx/vert.x | 4,377 | eclipse-vertx__vert.x-4377 | [
"4376"
] | 9037e3f8d5db4bcc2f0e3e89c84fe9ee48b36bce | diff --git a/src/main/java/io/vertx/core/impl/AbstractContext.java b/src/main/java/io/vertx/core/impl/AbstractContext.java
--- a/src/main/java/io/vertx/core/impl/AbstractContext.java
+++ b/src/main/java/io/vertx/core/impl/AbstractContext.java
@@ -18,6 +18,7 @@
import io.vertx.core.impl.launcher.VertxCommandLauncher;
import java.util.List;
+import java.util.concurrent.TimeUnit;
/**
* A context implementation that does not hold any specific state.
@@ -84,13 +85,13 @@ public final void endDispatch(ContextInternal previous) {
@Override
public long setPeriodic(long delay, Handler<Long> handler) {
VertxImpl owner = (VertxImpl) owner();
- return owner.scheduleTimeout(this, handler, delay, true);
+ return owner.scheduleTimeout(this, true, delay, TimeUnit.MILLISECONDS, handler);
}
@Override
public long setTimer(long delay, Handler<Long> handler) {
VertxImpl owner = (VertxImpl) owner();
- return owner.scheduleTimeout(this, handler, delay, false);
+ return owner.scheduleTimeout(this, false, delay, TimeUnit.MILLISECONDS,handler);
}
@Override
diff --git a/src/main/java/io/vertx/core/impl/VertxImpl.java b/src/main/java/io/vertx/core/impl/VertxImpl.java
--- a/src/main/java/io/vertx/core/impl/VertxImpl.java
+++ b/src/main/java/io/vertx/core/impl/VertxImpl.java
@@ -77,6 +77,7 @@
import java.util.Map;
import java.util.Set;
import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Supplier;
@@ -357,7 +358,7 @@ public EventBus eventBus() {
}
public long setPeriodic(long delay, Handler<Long> handler) {
- return scheduleTimeout(getOrCreateContext(), handler, delay, true);
+ return scheduleTimeout(getOrCreateContext(), true, delay, TimeUnit.MILLISECONDS, handler);
}
@Override
@@ -366,7 +367,7 @@ public TimeoutStream periodicStream(long delay) {
}
public long setTimer(long delay, Handler<Long> handler) {
- return scheduleTimeout(getOrCreateContext(), handler, delay, false);
+ return scheduleTimeout(getOrCreateContext(), false, delay, TimeUnit.MILLISECONDS, handler);
}
@Override
@@ -451,10 +452,9 @@ public Metrics getMetrics() {
}
public boolean cancelTimer(long id) {
- InternalTimerHandler handler = timeouts.remove(id);
+ InternalTimerHandler handler = timeouts.get(id);
if (handler != null) {
- handler.cancel();
- return true;
+ return handler.cancel();
} else {
return false;
}
@@ -510,16 +510,22 @@ public DnsClient createDnsClient(DnsClientOptions options) {
return new DnsClientImpl(this, options);
}
- public long scheduleTimeout(ContextInternal context, Handler<Long> handler, long delay, boolean periodic) {
+ public long scheduleTimeout(ContextInternal context, boolean periodic, long delay, TimeUnit timeUnit, Handler<Long> handler) {
if (delay < 1) {
throw new IllegalArgumentException("Cannot schedule a timer with delay < 1 ms");
}
long timerId = timeoutCounter.getAndIncrement();
- InternalTimerHandler task = new InternalTimerHandler(timerId, handler, periodic, delay, context);
+ InternalTimerHandler task = new InternalTimerHandler(timerId, handler, periodic, context);
timeouts.put(timerId, task);
if (context.isDeployment()) {
context.addCloseHook(task);
}
+ EventLoop el = context.nettyEventLoop();
+ if (periodic) {
+ task.future = el.scheduleAtFixedRate(task, delay, delay, timeUnit);
+ } else {
+ task.future = el.schedule(task, delay, timeUnit);
+ }
return timerId;
}
@@ -869,19 +875,14 @@ private class InternalTimerHandler implements Handler<Void>, Closeable, Runnable
private final boolean periodic;
private final long timerID;
private final ContextInternal context;
- private final java.util.concurrent.Future<?> future;
+ private final AtomicBoolean disposed = new AtomicBoolean();
+ private volatile java.util.concurrent.Future<?> future;
- InternalTimerHandler(long timerID, Handler<Long> runnable, boolean periodic, long delay, ContextInternal context) {
+ InternalTimerHandler(long timerID, Handler<Long> runnable, boolean periodic, ContextInternal context) {
this.context = context;
this.timerID = timerID;
this.handler = runnable;
this.periodic = periodic;
- EventLoop el = context.nettyEventLoop();
- if (periodic) {
- future = el.scheduleAtFixedRate(this, delay, delay, TimeUnit.MILLISECONDS);
- } else {
- future = el.schedule(this, delay, TimeUnit.MILLISECONDS);
- }
}
@Override
@@ -891,10 +892,11 @@ public void run() {
public void handle(Void v) {
if (periodic) {
- if (timeouts.containsKey(timerID)) {
+ if (!disposed.get()) {
handler.handle(timerID);
}
- } else if (timeouts.remove(timerID) != null) {
+ } else if (disposed.compareAndSet(false, true)) {
+ timeouts.remove(timerID);
try {
handler.handle(timerID);
} finally {
@@ -904,18 +906,29 @@ public void handle(Void v) {
}
}
- private void cancel() {
- future.cancel(false);
- if (context.isDeployment()) {
- context.removeCloseHook(this);
+ private boolean cancel() {
+ boolean cancelled = tryCancel();
+ if (cancelled) {
+ if (context.isDeployment()) {
+ context.removeCloseHook(this);
+ }
+ }
+ return cancelled;
+ }
+
+ private boolean tryCancel() {
+ if (disposed.compareAndSet(false, true)) {
+ timeouts.remove(timerID);
+ future.cancel(false);
+ return true;
+ } else {
+ return false;
}
}
// Called via Context close hook when Verticle is undeployed
public void close(Promise<Void> completion) {
- if (timeouts.remove(timerID) != null) {
- future.cancel(false);
- }
+ tryCancel();
completion.complete();
}
}
@@ -987,7 +1000,7 @@ public synchronized TimeoutStream handler(Handler<Long> handler) {
throw new IllegalStateException();
}
this.handler = handler;
- id = scheduleTimeout(getOrCreateContext(), this, delay, periodic);
+ id = scheduleTimeout(getOrCreateContext(), periodic, delay, TimeUnit.MILLISECONDS, this);
} else {
cancel();
}
| diff --git a/src/test/java/io/vertx/core/TimerTest.java b/src/test/java/io/vertx/core/TimerTest.java
--- a/src/test/java/io/vertx/core/TimerTest.java
+++ b/src/test/java/io/vertx/core/TimerTest.java
@@ -12,11 +12,14 @@
package io.vertx.core;
import io.vertx.core.impl.ContextInternal;
+import io.vertx.core.impl.VertxImpl;
import io.vertx.core.impl.VertxInternal;
import io.vertx.core.streams.ReadStream;
+import io.vertx.test.core.Repeat;
import io.vertx.test.core.VertxTestBase;
import org.junit.Test;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
@@ -446,4 +449,24 @@ public void handle(Long l) {
});
await();
}
+
+ @Repeat(times = 100)
+ @Test
+ public void testRaceWhenTimerCreatedOutsideEventLoop() {
+ int numThreads = 1000;
+ int numIter = 1;
+ Thread[] threads = new Thread[numThreads];
+ AtomicInteger count = new AtomicInteger(numIter * numThreads);
+ for (int i = 0;i < numThreads;i++) {
+ Thread th = new Thread(() -> {
+ // We need something more aggressive than a millisecond for this test
+ ((VertxImpl)vertx).scheduleTimeout(((VertxImpl) vertx).getOrCreateContext(), false, 1, TimeUnit.NANOSECONDS, ignore -> {
+ count.decrementAndGet();
+ });
+ });
+ th.start();
+ threads[i] = th;
+ }
+ waitUntil(() -> count.get() == 0);
+ }
}
| Timers created outside event loop might not fire
When a timer is created from a non vertx thread, there can be a race with the timer task that executes before the internal timer task is added to the vertx timeout map. When it happens, the timer will not execute since this check is to check whether the task was cancelled by another thread.
| 2022-05-17T08:18:37Z | 4.3 |
|
eclipse-vertx/vert.x | 4,311 | eclipse-vertx__vert.x-4311 | [
"3856"
] | d39c319c92ea6b24a00256efba65253cf6e4c366 | diff --git a/src/main/java/io/vertx/core/file/AsyncFile.java b/src/main/java/io/vertx/core/file/AsyncFile.java
--- a/src/main/java/io/vertx/core/file/AsyncFile.java
+++ b/src/main/java/io/vertx/core/file/AsyncFile.java
@@ -12,6 +12,7 @@
package io.vertx.core.file;
import io.vertx.codegen.annotations.Fluent;
+import io.vertx.codegen.annotations.Nullable;
import io.vertx.codegen.annotations.VertxGen;
import io.vertx.core.AsyncResult;
import io.vertx.core.Future;
@@ -203,4 +204,48 @@ default void size(Handler<AsyncResult<Long>> handler) {
* @return the size of the file
*/
Future<Long> size();
+
+ /**
+ * Try to acquire a non-shared lock on the entire file.
+ *
+ * @return the lock if it can be acquired immediately, otherwise {@code null}
+ */
+ @Nullable AsyncFileLock tryLock();
+
+ /**
+ * Try to acquire a lock on a portion of this file.
+ *
+ * @param position where the region starts
+ * @param size the size of the region
+ * @param shared whether the lock should be shared
+ * @return the lock if it can be acquired immediately, otherwise {@code null}
+ */
+ @Nullable AsyncFileLock tryLock(long position, long size, boolean shared);
+
+ /**
+ * Acquire a non-shared lock on the entire file.
+ *
+ * @return a future indicating the completion of this operation
+ */
+ Future<AsyncFileLock> lock();
+
+ /**
+ * Like {@link #lock()} but the {@code handler} will be called when the operation is complete or if an error occurs.
+ */
+ void lock(Handler<AsyncResult<AsyncFileLock>> handler);
+
+ /**
+ * Acquire a lock on a portion of this file.
+ *
+ * @param position where the region starts
+ * @param size the size of the region
+ * @param shared whether the lock should be shared
+ * @return a future indicating the completion of this operation
+ */
+ Future<AsyncFileLock> lock(long position, long size, boolean shared);
+
+ /**
+ * Like {@link #lock(long, long, boolean)} but the {@code handler} will be called when the operation is complete or if an error occurs.
+ */
+ void lock(long position, long size, boolean shared, Handler<AsyncResult<AsyncFileLock>> handler);
}
diff --git a/src/main/java/io/vertx/core/file/AsyncFileLock.java b/src/main/java/io/vertx/core/file/AsyncFileLock.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/file/AsyncFileLock.java
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2011-2022 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+
+package io.vertx.core.file;
+
+import io.vertx.codegen.annotations.VertxGen;
+import io.vertx.core.AsyncResult;
+import io.vertx.core.Future;
+import io.vertx.core.Handler;
+
+/**
+ * A lock on a region of an {@link AsyncFile}.
+ */
+@VertxGen
+public interface AsyncFileLock {
+
+ /**
+ * @return the position of the first byte of the locked region
+ */
+ long position();
+
+ /**
+ * @return the size in bytes of the locked region
+ */
+ long size();
+
+ /**
+ * @return {@code true} if this lock is shared, otherwise {@code false}
+ */
+ boolean isShared();
+
+ /**
+ * @return {@code true} if this lock overlaps with the range described by {@code position} and {@code size}, otherwise {@code false}
+ */
+ boolean overlaps(long position, long size);
+
+ /**
+ * Like {@link #isValid()} but blocking.
+ *
+ * @throws FileSystemException if an error occurs
+ */
+ boolean isValidBlocking();
+
+ /**
+ * A lock remains valid until it is released or the file corresponding {@link AsyncFile} is closed.
+ */
+ Future<Boolean> isValid();
+
+ /**
+ * Like {@link #isValid()} but the {@code handler} will be called when the operation completes or if an error occurs.
+ */
+ void isValid(Handler<AsyncResult<Boolean>> handler);
+
+ /**
+ * Like {@link #release()} but blocking.
+ *
+ * @throws FileSystemException if an error occurs
+ */
+ void releaseBlocking();
+
+ /**
+ * Releases this lock;
+ */
+ Future<Void> release();
+
+ /**
+ * Like {@link #release()} but the {@code handler} will be called when the operation completes or if an error occurs.
+ */
+ void release(Handler<AsyncResult<Void>> handler);
+}
diff --git a/src/main/java/io/vertx/core/file/impl/AsyncFileImpl.java b/src/main/java/io/vertx/core/file/impl/AsyncFileImpl.java
--- a/src/main/java/io/vertx/core/file/impl/AsyncFileImpl.java
+++ b/src/main/java/io/vertx/core/file/impl/AsyncFileImpl.java
@@ -18,11 +18,13 @@
import io.vertx.core.Promise;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.file.AsyncFile;
+import io.vertx.core.file.AsyncFileLock;
import io.vertx.core.file.FileSystemException;
import io.vertx.core.file.OpenOptions;
import io.vertx.core.impl.Arguments;
import io.vertx.core.impl.ContextInternal;
import io.vertx.core.impl.VertxInternal;
+import io.vertx.core.impl.future.PromiseInternal;
import io.vertx.core.impl.logging.Logger;
import io.vertx.core.impl.logging.LoggerFactory;
import io.vertx.core.streams.impl.InboundBuffer;
@@ -30,6 +32,8 @@
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.AsynchronousFileChannel;
+import java.nio.channels.CompletionHandler;
+import java.nio.channels.FileLock;
import java.nio.file.OpenOption;
import java.nio.file.Path;
import java.nio.file.Paths;
@@ -590,4 +594,69 @@ public Future<Long> size() {
prom.complete(sizeBlocking());
});
}
+
+ @Override
+ public AsyncFileLock tryLock() {
+ try {
+ return new AsyncFileLockImpl(vertx, ch.tryLock());
+ } catch (IOException e) {
+ throw new FileSystemException(e);
+ }
+ }
+
+ @Override
+ public AsyncFileLock tryLock(long position, long size, boolean shared) {
+ try {
+ return new AsyncFileLockImpl(vertx, ch.tryLock(position, size, shared));
+ } catch (IOException e) {
+ throw new FileSystemException(e);
+ }
+ }
+
+ @Override
+ public Future<AsyncFileLock> lock() {
+ return lock(0, Long.MAX_VALUE, false);
+ }
+
+ @Override
+ public void lock(Handler<AsyncResult<AsyncFileLock>> handler) {
+ Future<AsyncFileLock> future = lock();
+ if (handler != null) {
+ future.onComplete(handler);
+ }
+ }
+
+ private static CompletionHandler<FileLock, PromiseInternal<AsyncFileLock>> LOCK_COMPLETION = new CompletionHandler<FileLock, PromiseInternal<AsyncFileLock>>() {
+ @Override
+ public void completed(FileLock result, PromiseInternal<AsyncFileLock> p) {
+ p.complete(new AsyncFileLockImpl(p.context().owner(), result));
+ }
+
+ @Override
+ public void failed(Throwable t, PromiseInternal<AsyncFileLock> p) {
+ p.fail(new FileSystemException(t));
+ }
+ };
+
+ @Override
+ public Future<AsyncFileLock> lock(long position, long size, boolean shared) {
+ PromiseInternal<AsyncFileLock> promise = vertx.promise();
+ vertx.executeBlockingInternal(prom -> {
+ ch.lock(position, size, shared, promise, LOCK_COMPLETION);
+ }, ar -> {
+ if (ar.failed()) {
+ // Happens only if ch.lock throws a RuntimeException
+ promise.fail(new FileSystemException(ar.cause()));
+ }
+ });
+ return promise.future();
+ }
+
+ @Override
+ public void lock(long position, long size, boolean shared, Handler<AsyncResult<AsyncFileLock>> handler) {
+ Future<AsyncFileLock> future = lock(position, size, shared);
+ if (handler != null) {
+ future.onComplete(handler);
+ }
+ }
}
diff --git a/src/main/java/io/vertx/core/file/impl/AsyncFileLockImpl.java b/src/main/java/io/vertx/core/file/impl/AsyncFileLockImpl.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/file/impl/AsyncFileLockImpl.java
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2011-2022 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+
+package io.vertx.core.file.impl;
+
+import io.vertx.core.AsyncResult;
+import io.vertx.core.Future;
+import io.vertx.core.Handler;
+import io.vertx.core.file.AsyncFileLock;
+import io.vertx.core.file.FileSystemException;
+import io.vertx.core.impl.VertxInternal;
+
+import java.io.IOException;
+import java.nio.channels.FileLock;
+import java.util.Objects;
+
+public class AsyncFileLockImpl implements AsyncFileLock {
+
+ private final VertxInternal vertx;
+ private final FileLock fileLock;
+
+ public AsyncFileLockImpl(VertxInternal vertx, FileLock fileLock) {
+ this.vertx = Objects.requireNonNull(vertx, "vertx is null");
+ this.fileLock = Objects.requireNonNull(fileLock, "fileLock is null");
+ }
+
+ @Override
+ public long position() {
+ return fileLock.position();
+ }
+
+ @Override
+ public long size() {
+ return fileLock.size();
+ }
+
+ @Override
+ public boolean isShared() {
+ return fileLock.isShared();
+ }
+
+ @Override
+ public boolean overlaps(long position, long size) {
+ return fileLock.overlaps(position, size);
+ }
+
+ @Override
+ public boolean isValidBlocking() {
+ return fileLock.isValid();
+ }
+
+ @Override
+ public Future<Boolean> isValid() {
+ return vertx.getOrCreateContext().executeBlockingInternal(prom -> {
+ prom.complete(isValidBlocking());
+ });
+ }
+
+ @Override
+ public void isValid(Handler<AsyncResult<Boolean>> handler) {
+ Future<Boolean> future = isValid();
+ if (handler != null) {
+ future.onComplete(handler);
+ }
+ }
+
+ @Override
+ public void releaseBlocking() {
+ try {
+ fileLock.release();
+ } catch (IOException e) {
+ throw new FileSystemException(e);
+ }
+ }
+
+ @Override
+ public Future<Void> release() {
+ return vertx.getOrCreateContext().executeBlockingInternal(prom -> {
+ try {
+ fileLock.release();
+ prom.complete();
+ } catch (IOException e) {
+ prom.fail(new FileSystemException(e));
+ }
+ });
+ }
+
+ @Override
+ public void release(Handler<AsyncResult<Void>> handler) {
+ Future<Void> future = release();
+ if (handler != null) {
+ future.onComplete(handler);
+ }
+ }
+}
| diff --git a/src/test/java/io/vertx/core/file/FileSystemTest.java b/src/test/java/io/vertx/core/file/FileSystemTest.java
--- a/src/test/java/io/vertx/core/file/FileSystemTest.java
+++ b/src/test/java/io/vertx/core/file/FileSystemTest.java
@@ -31,6 +31,7 @@
import java.io.File;
import java.io.IOException;
+import java.nio.channels.OverlappingFileLockException;
import java.nio.file.*;
import java.nio.file.attribute.*;
import java.util.EnumSet;
@@ -2336,4 +2337,34 @@ public void testFileSizeBlocking() throws Exception {
file.close(onSuccess(v -> testComplete()));
await();
}
+
+ @Test
+ public void testFileLocking() throws Exception {
+ Assume.assumeFalse(Utils.isWindows());
+ String path = tmpFile(".lock").getAbsolutePath();
+ FileSystem fs = vertx.fileSystem();
+ fs.writeFileBlocking(path, Buffer.buffer("HelloLocks"));
+
+ AsyncFile file1 = fs.openBlocking(path, new OpenOptions());
+ AsyncFile file2 = fs.openBlocking(path, new OpenOptions());
+
+ file1.lock(0, "Hello".length(), false, onSuccess(lock1 -> {
+ file2.lock(onFailure(t -> {
+ assertTrue(t instanceof FileSystemException);
+ assertTrue(t.getCause() instanceof OverlappingFileLockException);
+ file2.lock("Hello".length(), "Locks".length(), false, onSuccess(lock2 -> {
+ lock1.release(onSuccess(r1 -> {
+ lock2.release(onSuccess(r2 -> {
+ testComplete();
+ }));
+ }));
+ }));
+ }));
+ }));
+
+ await();
+
+ file1.close();
+ file2.close();
+ }
}
| filesystem api: expose file locking operations
The `lock` and `tryLock` methods of `AsynchronousFileChannel` should be made available for `AsyncFile` wrappers, so one can perform proper file locking operations.
| Hi @panchmp , @vietj is this still open for the grab ?
yes it is.
On Fri, Apr 2, 2021 at 5:10 PM Shay Dratler ***@***.***>
wrote:
> Hi @panchmp <https://github.com/panchmp> , @vietj
> <https://github.com/vietj> is this still open for the grab ?
>
> —
> You are receiving this because you were mentioned.
> Reply to this email directly, view it on GitHub
> <https://github.com/eclipse-vertx/vert.x/issues/3856#issuecomment-812571419>,
> or unsubscribe
> <https://github.com/notifications/unsubscribe-auth/AABXDCWKRQR4G7AKHPJYTBDTGXM7XANCNFSM4ZFNIWIA>
> .
>
Hi @vietj @tsegismont is this still available?
Yes | 2022-03-15T18:23:03Z | 4.2 |
eclipse-vertx/vert.x | 4,307 | eclipse-vertx__vert.x-4307 | [
"4304"
] | 56eb506bf9d1380c863b955cac18976aea58f94b | diff --git a/src/main/java/io/vertx/core/eventbus/EventBusOptions.java b/src/main/java/io/vertx/core/eventbus/EventBusOptions.java
--- a/src/main/java/io/vertx/core/eventbus/EventBusOptions.java
+++ b/src/main/java/io/vertx/core/eventbus/EventBusOptions.java
@@ -173,6 +173,10 @@ public EventBusOptions(JsonObject json) {
public JsonObject toJson() {
JsonObject json = new JsonObject();
EventBusOptionsConverter.toJson(this, json);
+ final String clusterPublicPortName = "clusterPublicPort";
+ if (json.containsKey(clusterPublicPortName) && json.getInteger(clusterPublicPortName) == DEFAULT_CLUSTER_PUBLIC_PORT) {
+ json.remove(clusterPublicPortName);
+ }
return json;
}
| diff --git a/src/test/java/io/vertx/core/VertxOptionsTest.java b/src/test/java/io/vertx/core/VertxOptionsTest.java
--- a/src/test/java/io/vertx/core/VertxOptionsTest.java
+++ b/src/test/java/io/vertx/core/VertxOptionsTest.java
@@ -314,6 +314,14 @@ public void testDefaultJsonOptions() {
assertEquals(def.getBlockedThreadCheckIntervalUnit(), json.getBlockedThreadCheckIntervalUnit());
}
+ @Test
+ public void testDefaultJsonVertxOptions() {
+ VertxOptions vertxOptions1 = new VertxOptions();
+ JsonObject json = vertxOptions1.toJson();
+ VertxOptions vertxOptions2 = new VertxOptions(json);
+ assertEquals(json, vertxOptions2.toJson());
+ }
+
@Test
public void testJsonOptions() {
VertxOptions options = new VertxOptions(new JsonObject());
| clusterPublicPort in EventBusOptions should be ignored in toJson() result if it is the default value
### Questions
`-1` is the default value of `clusterPublicPort` in `EventBusOptions`, but setting this default value will fail:
```java
@Test
public void testEventBusOptions() {
VertxOptions vertxOptions1 = new VertxOptions();
JsonObject json = vertxOptions1.toJson();
VertxOptions vertxOptions2 = new VertxOptions(json); // <== this line leads to exception
assertEquals(json, vertxOptions2.toJson());
}
```
The exception is:
```java
java.lang.IllegalArgumentException: clusterPublicPort p must be in range 0 <= p <= 65535
at io.vertx.core.eventbus.EventBusOptions.setClusterPublicPort(EventBusOptions.java:651)
at io.vertx.core.eventbus.EventBusOptionsConverter.fromJson(EventBusOptionsConverter.java:55)
at io.vertx.core.eventbus.EventBusOptions.<init>(EventBusOptions.java:165)
at io.vertx.core.VertxOptionsConverter.fromJson(VertxOptionsConverter.java:45)
at io.vertx.core.VertxOptions.<init>(VertxOptions.java:181)
at io.vertx.core.VertxOptionsTest.testEventBusOptions(VertxOptionsTest.java:321)
```
This does not make sense as we cannot re-construct the VertxOptions using the JsonObject exported from a default VertxOptions instance.
### Version
latest `master` branch
### Do you have a reproducer?
Please refer to https://gist.github.com/gaol/7593cebcb2d1328d4ed50ece5ee1ac70 for more detail.
### Contribution
I will provide PR for the fix
| 2022-03-14T14:08:19Z | 4.2 |
|
eclipse-vertx/vert.x | 4,225 | eclipse-vertx__vert.x-4225 | [
"4224"
] | 73c58bed36ee4a8892a397e789ddbd592bfc489f | diff --git a/src/main/java/io/vertx/core/net/impl/pool/PoolWaiter.java b/src/main/java/io/vertx/core/net/impl/pool/PoolWaiter.java
--- a/src/main/java/io/vertx/core/net/impl/pool/PoolWaiter.java
+++ b/src/main/java/io/vertx/core/net/impl/pool/PoolWaiter.java
@@ -53,6 +53,7 @@ default void onConnect(PoolWaiter<C> waiter) {
PoolWaiter<C> prev;
PoolWaiter<C> next;
boolean disposed;
+ boolean queued;
PoolWaiter(PoolWaiter.Listener<C> listener, ContextInternal context, final int capacity, Handler<AsyncResult<Lease<C>>> handler) {
this.listener = listener;
diff --git a/src/main/java/io/vertx/core/net/impl/pool/SimpleConnectionPool.java b/src/main/java/io/vertx/core/net/impl/pool/SimpleConnectionPool.java
--- a/src/main/java/io/vertx/core/net/impl/pool/SimpleConnectionPool.java
+++ b/src/main/java/io/vertx/core/net/impl/pool/SimpleConnectionPool.java
@@ -654,8 +654,9 @@ public void run() {
}
};
}
- if (pool.waiters.removeFirst(waiter)) {
+ if (pool.waiters.remove(waiter)) {
cancelled = true;
+ waiter.disposed = true;
} else if (!waiter.disposed) {
waiter.disposed = true;
cancelled = true;
@@ -710,8 +711,8 @@ public Recycle(Slot<C> slot) {
@Override
public Task execute(SimpleConnectionPool<C> pool) {
if (!pool.closed && slot.connection != null) {
- if (pool.waiters.size() > 0) {
- PoolWaiter<C> waiter = pool.waiters.poll();
+ PoolWaiter<C> waiter = pool.waiters.poll();
+ if (waiter != null) {
LeaseImpl<C> lease = new LeaseImpl<>(slot, waiter.handler);
return new Task() {
@Override
@@ -810,11 +811,15 @@ PoolWaiter<C> poll() {
return null;
}
PoolWaiter<C> node = head.next;
- removeFirst(node);
+ remove(node);
return node;
}
void addLast(PoolWaiter<C> node) {
+ if (node.queued) {
+ throw new IllegalStateException();
+ }
+ node.queued = true;
node.prev = head.prev;
node.next = head;
head.prev.next = node;
@@ -823,6 +828,10 @@ void addLast(PoolWaiter<C> node) {
}
void addFirst(PoolWaiter<C> node) {
+ if (node.queued) {
+ throw new IllegalStateException();
+ }
+ node.queued = true;
node.prev = head;
node.next = head.prev;
head.next.prev = node;
@@ -830,12 +839,14 @@ void addFirst(PoolWaiter<C> node) {
size++;
}
- boolean removeFirst(PoolWaiter<C> node) {
- if (node.next == null) {
+ boolean remove(PoolWaiter<C> node) {
+ if (!node.queued) {
return false;
}
node.next.prev = node.prev;
node.prev.next = node.next;
+ node.next = node.prev = null;
+ node.queued = false;
size--;
return true;
}
| diff --git a/src/test/java/io/vertx/core/net/impl/pool/ConnectionPoolTest.java b/src/test/java/io/vertx/core/net/impl/pool/ConnectionPoolTest.java
--- a/src/test/java/io/vertx/core/net/impl/pool/ConnectionPoolTest.java
+++ b/src/test/java/io/vertx/core/net/impl/pool/ConnectionPoolTest.java
@@ -632,10 +632,15 @@ public void onEnqueue(PoolWaiter<Connection> waiter) {
w.complete(waiter);
}
}, 0, ar -> fail());
- w.get(10, TimeUnit.SECONDS);
- pool.cancel(w.get(10, TimeUnit.SECONDS), onSuccess(removed -> {
- assertTrue(removed);
- testComplete();
+ PoolWaiter<Connection> waiter = w.get(10, TimeUnit.SECONDS);
+ pool.cancel(waiter, onSuccess(removed1 -> {
+ assertTrue(removed1);
+ assertEquals(0, pool.waiters());
+ pool.cancel(waiter, onSuccess(removed2 -> {
+ assertFalse(removed2);
+ assertEquals(0, pool.waiters());
+ testComplete();
+ }));
}));
await();
}
| Pool waiter list removal does not check correctly the waiter is in the list
The pool implementation checks incorrectly whether the waiter is enqueued in the list leading to an incorrect waiter list size.
| 2022-01-03T21:04:55Z | 4.2 |
|
eclipse-vertx/vert.x | 4,191 | eclipse-vertx__vert.x-4191 | [
"4187"
] | 774940f67a148d790174fe538505541ea1171382 | diff --git a/src/main/java/io/vertx/core/http/impl/CookieImpl.java b/src/main/java/io/vertx/core/http/impl/CookieImpl.java
--- a/src/main/java/io/vertx/core/http/impl/CookieImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/CookieImpl.java
@@ -26,17 +26,26 @@
public class CookieImpl implements ServerCookie {
private final io.netty.handler.codec.http.cookie.Cookie nettyCookie;
+ // denotes if a cookie has been created from an HTTP request (true) or during the
+ // application/response life cycle (false)
+ private final boolean fromUserAgent;
+
private boolean changed;
- private boolean fromUserAgent;
- // extension features
+ // extension feature(s)
private CookieSameSite sameSite;
public CookieImpl(String name, String value) {
this.nettyCookie = new DefaultCookie(name, value);
+ fromUserAgent = false;
this.changed = true;
}
- public CookieImpl(io.netty.handler.codec.http.cookie.Cookie nettyCookie) {
+ /**
+ * Internal constructor, only used by the CookieJar.
+ *
+ * @param nettyCookie the underlying cookie object
+ */
+ CookieImpl(io.netty.handler.codec.http.cookie.Cookie nettyCookie) {
this.nettyCookie = nettyCookie;
fromUserAgent = true;
}
diff --git a/src/main/java/io/vertx/core/http/impl/CookieJar.java b/src/main/java/io/vertx/core/http/impl/CookieJar.java
--- a/src/main/java/io/vertx/core/http/impl/CookieJar.java
+++ b/src/main/java/io/vertx/core/http/impl/CookieJar.java
@@ -143,35 +143,47 @@ private static int cookieUniqueIdComparator(ServerCookie cookie, String name, St
Objects.requireNonNull(name);
int v = cookie.getName().compareTo(name);
- if (v != 0) {
- return v;
- }
- if (cookie.getPath() == null) {
- if (path != null) {
- return -1;
- }
- } else if (path == null) {
- return 1;
+ if (cookie.isFromUserAgent()) {
+ // user-agent cookies never include a path or domain, so we must assume equality
+ // just by comparing the name
+ return v;
} else {
- v = cookie.getPath().compareTo(path);
+ // perform the tuple check:
+
+ // 1. name comparison (on equals check the next parameter)
if (v != 0) {
return v;
}
- }
- if (cookie.getDomain() == null) {
- if (domain != null) {
- return -1;
+ // 2. path comparison (on equals check the next parameter)
+ if (cookie.getPath() == null) {
+ if (path != null) {
+ return -1;
+ }
+ } else if (path == null) {
+ return 1;
+ } else {
+ v = cookie.getPath().compareTo(path);
+ if (v != 0) {
+ return v;
+ }
+ }
+
+ // 3. domain comparison (on equals terminate with 0)
+ if (cookie.getDomain() == null) {
+ if (domain != null) {
+ return -1;
+ }
+ } else if (domain == null) {
+ return 1;
+ } else {
+ v = cookie.getDomain().compareToIgnoreCase(domain);
+ return v;
}
- } else if (domain == null) {
- return 1;
- } else {
- v = cookie.getDomain().compareToIgnoreCase(domain);
- return v;
- }
- return 0;
+ return 0;
+ }
}
| diff --git a/src/test/java/io/vertx/core/http/HttpTest.java b/src/test/java/io/vertx/core/http/HttpTest.java
--- a/src/test/java/io/vertx/core/http/HttpTest.java
+++ b/src/test/java/io/vertx/core/http/HttpTest.java
@@ -23,6 +23,7 @@
import io.vertx.core.file.AsyncFile;
import io.vertx.core.http.impl.CookieImpl;
import io.vertx.core.http.impl.HttpServerRequestInternal;
+import io.vertx.core.http.impl.ServerCookie;
import io.vertx.core.impl.Utils;
import io.vertx.core.net.*;
import io.vertx.core.net.impl.HAProxyMessageCompletionHandler;
@@ -6035,6 +6036,24 @@ public void testNoCookiesAddCookie() throws Exception {
});
}
+ @Test
+ public void testReplaceCookie() throws Exception {
+ testCookies("XSRF-TOKEN=c359b44aef83415", req -> {
+ assertEquals(1, req.cookieCount());
+ req.response().addCookie(Cookie.cookie("XSRF-TOKEN", "88533580000c314").setPath("/"));
+ Map<String, Cookie> deprecatedMap = req.cookieMap();
+ assertFalse(((ServerCookie) deprecatedMap.get("XSRF-TOKEN")).isFromUserAgent());
+ assertEquals("/", deprecatedMap.get("XSRF-TOKEN").getPath());
+ req.response().end();
+ }, resp -> {
+ List<String> cookies = resp.headers().getAll("set-cookie");
+ // the expired cookie must be sent back
+ assertEquals(1, cookies.size());
+ // ensure that the cookie jar was updated correctly
+ assertEquals("XSRF-TOKEN=88533580000c314; Path=/", cookies.get(0));
+ });
+ }
+
private void testCookies(String cookieHeader, Consumer<HttpServerRequest> serverChecker, Consumer<HttpClientResponse> clientChecker) throws Exception {
server.requestHandler(serverChecker::accept);
startServer(testAddress);
| addCookie method does not replace existing cookie (from browser) and fails when calling RoutingContext.cookieMap()
### Version
4.2.1
### Context
We were using (in version 3.9.9) HttpServerResponse.addCookie to set cookie irrespective of whether the cookie was already present (came from browser) and when we iterate over cookies using RoutingContext.cookieMap() we would get a single cookie with that name (i.e. cookie info was updated)
After upgrading to 4.2.1, calling RoutingContext.cookieMap() fails with this exception stack trace
This method assumes cookies are unique by name. However the implementation for addCookie() always adds a cookie to collection (this seems to be a feature request).
```
default Map<String, Cookie> cookieMap() {
return cookies()
.stream()
.collect(Collectors.toMap(Cookie::getName, cookie -> cookie));
}
```
### Do you have a reproducer?
No.
### Steps to reproduce
Create sample web app that renders simple form and addCookie("test", "somevalue"). On form submit, call addCookie("test", "anothervalie") to modify the cookie value without removing it first. Then invoke RoutingContext.cookieMap() to get all cookies.
### Extra
| It would be good to have setCookie or replaceCookie method that would have the previous semantics (add or replace by cookie name) for convenience
@narras-oss I'm not following the error:
```java
@Test
public void testReplaceCookie() throws Exception {
// receive a HTTP request with a cookie header with value `test=somevalue`
testCookies("test=somevalue", req -> {
// OK
assertEquals(1, req.cookieCount());
// is it supposed to fail here? It currently (master) doesn't
req.response().addCookie(Cookie.cookie("test", "othervalue"));
Map<String, Cookie> deprecatedMap = req.cookieMap();
req.response().end();
}, resp -> {
// received 1 cookie back (the new one replaced the one received)
// test=othervalue
// ?? what was expected?
List<String> cookies = resp.headers().getAll("set-cookie");
});
}
```
This is the code i am specifically using
```
Cookie cookie = Cookie.cookie("test", "somestring")
cookie.setSecure(true);
cookie.setPath("/");
routingContext.addCookie(cookie);
```
Stack trace of exception when calling routingContext.cookieMap() after that is below.
```
Caused by: java.lang.IllegalStateException: Duplicate key XSRF-TOKEN (attempted merging values io.vertx.core.http.impl.CookieImpl@434aa077 and io.vertx.core.http.impl.CookieImpl@4dd9a0b1)
at java.base/java.util.stream.Collectors.duplicateKeyException(Unknown Source)
at java.base/java.util.stream.Collectors.lambda$uniqKeysMapAccumulator$1(Unknown Source)
at java.base/java.util.stream.ReduceOps$3ReducingSink.accept(Unknown Source)
at java.base/java.util.ArrayList$Itr.forEachRemaining(Unknown Source)
at java.base/java.util.Spliterators$IteratorSpliterator.forEachRemaining(Unknown Source)
at java.base/java.util.stream.AbstractPipeline.copyInto(Unknown Source)
at java.base/java.util.stream.AbstractPipeline.wrapAndCopyInto(Unknown Source)
at java.base/java.util.stream.ReduceOps$ReduceOp.evaluateSequential(Unknown Source)
at java.base/java.util.stream.AbstractPipeline.evaluate(Unknown Source)
at java.base/java.util.stream.ReferencePipeline.collect(Unknown Source)
at io.vertx.core.http.HttpServerRequest.cookieMap(HttpServerRequest.java:518)
at io.vertx.ext.web.impl.RoutingContextImpl.cookieMap(RoutingContextImpl.java:282)
at io.vertx.ext.web.impl.RoutingContextDecorator.cookieMap(RoutingContextDecorator.java:84)
```
Updated steps for my specific scenario:
Specifically this is the original value of that comes from browser
XSRF-TOKEN=c359b44aef83415
Then when you try to add a different value for cookie "XSRF-TOKEN" with path set to "/" like shown below
XSRF-TOKEN=88533580000c314, path=/
it is added again instead of being replaced. The bug seems to be here. cookieUniqueIdComparator returns < 0 for XSRF-TOKEN since it also compares path. This is a recent change (probably intentional). but it breaks the deprecated method request.cookieMap()
```
public boolean add(ServerCookie cookie) {
if (cookie == null) {
throw new NullPointerException("cookie cannot be null");
}
for (int i = 0; i < list.size(); i++) {
int cmp = cookieUniqueIdComparator(list.get(i), cookie.getName(), cookie.getDomain(), cookie.getPath());
if (cmp > 0) {
// insert
list.add(i, cookie);
return true;
}
if (cmp == 0) {
// replace
list.set(i, cookie);
return true;
}
}
// reached the end
list.add(cookie);
``` | 2021-12-08T11:05:33Z | 4.2 |
eclipse-vertx/vert.x | 4,172 | eclipse-vertx__vert.x-4172 | [
"4175",
"3361"
] | ace996489c931bdc0a48ed76b2e676d7159551eb | diff --git a/src/main/generated/io/vertx/core/http/HttpClientOptionsConverter.java b/src/main/generated/io/vertx/core/http/HttpClientOptionsConverter.java
--- a/src/main/generated/io/vertx/core/http/HttpClientOptionsConverter.java
+++ b/src/main/generated/io/vertx/core/http/HttpClientOptionsConverter.java
@@ -150,6 +150,11 @@ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, HttpCli
obj.setPoolCleanerPeriod(((Number)member.getValue()).intValue());
}
break;
+ case "poolEventLoopSize":
+ if (member.getValue() instanceof Number) {
+ obj.setPoolEventLoopSize(((Number)member.getValue()).intValue());
+ }
+ break;
case "protocolVersion":
if (member.getValue() instanceof String) {
obj.setProtocolVersion(io.vertx.core.http.HttpVersion.valueOf((String)member.getValue()));
@@ -249,6 +254,7 @@ static void toJson(HttpClientOptions obj, java.util.Map<String, Object> json) {
json.put("pipelining", obj.isPipelining());
json.put("pipeliningLimit", obj.getPipeliningLimit());
json.put("poolCleanerPeriod", obj.getPoolCleanerPeriod());
+ json.put("poolEventLoopSize", obj.getPoolEventLoopSize());
if (obj.getProtocolVersion() != null) {
json.put("protocolVersion", obj.getProtocolVersion().name());
}
diff --git a/src/main/java/examples/HTTPExamples.java b/src/main/java/examples/HTTPExamples.java
--- a/src/main/java/examples/HTTPExamples.java
+++ b/src/main/java/examples/HTTPExamples.java
@@ -11,7 +11,9 @@
package examples;
+import io.vertx.core.AbstractVerticle;
import io.vertx.core.AsyncResult;
+import io.vertx.core.DeploymentOptions;
import io.vertx.core.Future;
import io.vertx.core.Handler;
import io.vertx.core.MultiMap;
@@ -1163,4 +1165,38 @@ public static void setIdentityContentEncodingHeader(HttpServerRequest request) {
.putHeader(HttpHeaders.CONTENT_ENCODING, HttpHeaders.IDENTITY)
.sendFile("/path/to/image.jpg");
}
+
+ public static void httpClientSharing1(Vertx vertx) {
+ HttpClient client = vertx.createSharedHttpClient();
+ vertx.deployVerticle(() -> new AbstractVerticle() {
+ @Override
+ public void start() throws Exception {
+ // Use the client
+ }
+ }, new DeploymentOptions().setInstances(4));
+ }
+
+ public static void httpClientSharing2(Vertx vertx) {
+ vertx.deployVerticle(() -> new AbstractVerticle() {
+ HttpClient client;
+ @Override
+ public void start() {
+ // Get or create a shared client
+ // this actually creates a lease to the client
+ // when the verticle is undeployed, the lease will be released automaticaly
+ client = vertx.createSharedHttpClient("my-client");
+ }
+ }, new DeploymentOptions().setInstances(4));
+ }
+
+ public static void httpClientSharing3(Vertx vertx) {
+ vertx.deployVerticle(() -> new AbstractVerticle() {
+ HttpClient client;
+ @Override
+ public void start() {
+ // The client creates and use two event-loops for 4 instances
+ client = vertx.createSharedHttpClient("my-client", new HttpClientOptions().setPoolEventLoopSize(2));
+ }
+ }, new DeploymentOptions().setInstances(4));
+ }
}
diff --git a/src/main/java/io/vertx/core/Vertx.java b/src/main/java/io/vertx/core/Vertx.java
--- a/src/main/java/io/vertx/core/Vertx.java
+++ b/src/main/java/io/vertx/core/Vertx.java
@@ -176,6 +176,30 @@ static Future<Vertx> clusteredVertx(VertxOptions options) {
*/
HttpClient createHttpClient(HttpClientOptions options);
+ /**
+ * Like {@link #createSharedHttpClient(HttpClientOptions)}, using default options.
+ */
+ HttpClient createSharedHttpClient();
+
+ /**
+ * Like {@link #createSharedHttpClient(String, HttpClientOptions)}, using the default shared client name.
+ */
+ HttpClient createSharedHttpClient(HttpClientOptions options);
+
+ /**
+ * Like {@link #createSharedHttpClient(String, HttpClientOptions)}, using default options.
+ */
+ HttpClient createSharedHttpClient(String name);
+
+ /**
+ * Create a HTTP/HTTPS client using the specified name and options.
+ *
+ * @param name the shared client name
+ * @param options the options to use
+ * @return the client
+ */
+ HttpClient createSharedHttpClient(String name, HttpClientOptions options);
+
/**
* Create a HTTP/HTTPS client using default options
*
diff --git a/src/main/java/io/vertx/core/http/HttpClientOptions.java b/src/main/java/io/vertx/core/http/HttpClientOptions.java
--- a/src/main/java/io/vertx/core/http/HttpClientOptions.java
+++ b/src/main/java/io/vertx/core/http/HttpClientOptions.java
@@ -198,6 +198,11 @@ public class HttpClientOptions extends ClientOptionsBase {
*/
public static final int DEFAULT_POOL_CLEANER_PERIOD = 1000;
+ /**
+ * Default pool event loop size = 0 (reuse current event-loop)
+ */
+ public static final int DEFAULT_POOL_EVENT_LOOP_SIZE = 0;
+
/**
* Default WebSocket closing timeout = 10 second
*/
@@ -219,6 +224,7 @@ public class HttpClientOptions extends ClientOptionsBase {
private int http2ConnectionWindowSize;
private int http2KeepAliveTimeout;
private int poolCleanerPeriod;
+ private int poolEventLoopSize;
private boolean tryUseCompression;
private int maxWebSocketFrameSize;
@@ -292,6 +298,7 @@ public HttpClientOptions(HttpClientOptions other) {
this.forceSni = other.forceSni;
this.decoderInitialBufferSize = other.getDecoderInitialBufferSize();
this.poolCleanerPeriod = other.getPoolCleanerPeriod();
+ this.poolEventLoopSize = other.getPoolEventLoopSize();
this.tryUsePerFrameWebSocketCompression = other.tryUsePerFrameWebSocketCompression;
this.tryUsePerMessageWebSocketCompression = other.tryUsePerMessageWebSocketCompression;
this.webSocketAllowClientNoContext = other.webSocketAllowClientNoContext;
@@ -359,6 +366,7 @@ private void init() {
webSocketRequestServerNoContext = DEFAULT_WEBSOCKET_REQUEST_SERVER_NO_CONTEXT;
webSocketClosingTimeout = DEFAULT_WEBSOCKET_CLOSING_TIMEOUT;
poolCleanerPeriod = DEFAULT_POOL_CLEANER_PERIOD;
+ poolEventLoopSize = DEFAULT_POOL_EVENT_LOOP_SIZE;
tracingPolicy = DEFAULT_TRACING_POLICY;
}
@@ -1325,6 +1333,33 @@ public HttpClientOptions setPoolCleanerPeriod(int poolCleanerPeriod) {
return this;
}
+ /**
+ * @return the max number of event-loop a pool will use, the default value is {@code 0} which implies
+ * to reuse the current event-loop
+ */
+ public int getPoolEventLoopSize() {
+ return poolEventLoopSize;
+ }
+
+ /**
+ * Set the number of event-loop the pool use.
+ *
+ * <ul>
+ * <li>when the size is {@code 0}, the client pool will use the current event-loop</li>
+ * <li>otherwise the client will create and use its own event loop</li>
+ * </ul>
+ *
+ * The default size is {@code 0}.
+ *
+ * @param poolEventLoopSize the new size
+ * @return a reference to this, so the API can be used fluently
+ */
+ public HttpClientOptions setPoolEventLoopSize(int poolEventLoopSize) {
+ Arguments.require(poolEventLoopSize >= 0, "poolEventLoopSize must be >= 0");
+ this.poolEventLoopSize = poolEventLoopSize;
+ return this;
+ }
+
/**
* @return the tracing policy
*/
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientHolder.java b/src/main/java/io/vertx/core/http/impl/HttpClientHolder.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientHolder.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2011-2021 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+
+package io.vertx.core.http.impl;
+
+import io.vertx.core.CompositeFuture;
+import io.vertx.core.Future;
+import io.vertx.core.http.HttpClient;
+import io.vertx.core.http.HttpClientOptions;
+import io.vertx.core.impl.CloseFuture;
+import io.vertx.core.impl.VertxInternal;
+import io.vertx.core.shareddata.Shareable;
+
+public class HttpClientHolder implements Shareable {
+
+ private final int count;
+ private final HttpClient client;
+ private final CloseFuture closeFuture;
+
+ public HttpClientHolder() {
+ count = 1;
+ client = null;
+ closeFuture = null;
+ }
+
+ private HttpClientHolder(int count, HttpClient client, CloseFuture closeFuture) {
+ this.count = count;
+ this.client = client;
+ this.closeFuture = closeFuture;
+ }
+
+ public HttpClient get() {
+ return client;
+ }
+
+ public HttpClientHolder increment() {
+ return client == null ? null : new HttpClientHolder(count + 1, client, closeFuture);
+ }
+
+ public HttpClientHolder init(VertxInternal vertx, HttpClientOptions options) {
+ CloseFuture closeFuture = new CloseFuture();
+ HttpClient client = new HttpClientImpl(vertx, options, closeFuture);
+ return new HttpClientHolder(count, client, closeFuture);
+ }
+
+ public HttpClientHolder decrement() {
+ return count == 1 ? null : new HttpClientHolder(count - 1, client, closeFuture);
+ }
+
+ public Future<Void> close() {
+ return CompositeFuture.all(client.close(), closeFuture.close()).mapEmpty();
+ }
+
+ @Override
+ public String toString() {
+ return "HttpClientHolder{" +
+ "count=" + count +
+ '}';
+ }
+}
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java b/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
@@ -29,6 +29,7 @@
import io.vertx.core.net.ProxyOptions;
import io.vertx.core.net.ProxyType;
import io.vertx.core.net.SocketAddress;
+import io.vertx.core.net.impl.pool.ConnectionPool;
import io.vertx.core.net.impl.pool.Endpoint;
import io.vertx.core.net.impl.pool.Lease;
import io.vertx.core.spi.metrics.ClientMetrics;
@@ -44,6 +45,7 @@
import java.util.Collections;
import java.util.List;
import java.util.Objects;
+import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
@@ -132,6 +134,7 @@ public class HttpClientImpl implements HttpClient, MetricsProvider, Closeable {
private Predicate<SocketAddress> proxyFilter;
private volatile Handler<HttpConnection> connectionHandler;
private volatile Function<HttpClientResponse, Future<RequestOptions>> redirectHandler = DEFAULT_HANDLER;
+ private final Function<ContextInternal, EventLoopContext> contextProvider;
public HttpClientImpl(VertxInternal vertx, HttpClientOptions options, CloseFuture closeFuture) {
this.vertx = vertx;
@@ -169,7 +172,22 @@ public HttpClientImpl(VertxInternal vertx, HttpClientOptions options, CloseFutur
httpCM = httpConnectionManager();
if (options.getPoolCleanerPeriod() > 0 && (options.getKeepAliveTimeout() > 0L || options.getHttp2KeepAliveTimeout() > 0L)) {
PoolChecker checker = new PoolChecker(this);
- timerID = vertx.setTimer(options.getPoolCleanerPeriod(), checker);
+ ContextInternal timerContext = vertx.createEventLoopContext();
+ timerID = timerContext.setTimer(options.getPoolCleanerPeriod(), checker);
+ }
+ int eventLoopSize = options.getPoolEventLoopSize();
+ if (eventLoopSize > 0) {
+ EventLoopContext[] eventLoops = new EventLoopContext[eventLoopSize];
+ for (int i = 0;i < eventLoopSize;i++) {
+ eventLoops[i] = vertx.createEventLoopContext();
+ }
+ AtomicInteger idx = new AtomicInteger();
+ contextProvider = ctx -> {
+ int i = idx.getAndIncrement();
+ return eventLoops[i % eventLoopSize];
+ };
+ } else {
+ contextProvider = ConnectionPool.EVENT_LOOP_CONTEXT_PROVIDER;
}
closeFuture.add(netClient);
@@ -230,6 +248,10 @@ private ConnectionManager<EndpointKey, HttpClientConnection> webSocketConnection
});
}
+ Function<ContextInternal, EventLoopContext> contextProvider() {
+ return contextProvider;
+ }
+
private int getPort(RequestOptions request) {
Integer port = request.getPort();
if (port != null) {
diff --git a/src/main/java/io/vertx/core/http/impl/SharedClientHttpStreamEndpoint.java b/src/main/java/io/vertx/core/http/impl/SharedClientHttpStreamEndpoint.java
--- a/src/main/java/io/vertx/core/http/impl/SharedClientHttpStreamEndpoint.java
+++ b/src/main/java/io/vertx/core/http/impl/SharedClientHttpStreamEndpoint.java
@@ -68,10 +68,13 @@ public SharedClientHttpStreamEndpoint(HttpClientImpl client,
HttpChannelConnector connector,
Runnable dispose) {
super(metrics, dispose);
+
+ ConnectionPool<HttpClientConnection> pool = ConnectionPool.pool(this, new int[]{http1MaxSize, http2MaxSize}, queueMaxSize)
+ .connectionSelector(LIFO_SELECTOR).contextProvider(client.contextProvider());
+
this.client = client;
this.connector = connector;
- this.pool = ConnectionPool.pool(this, new int[] { http1MaxSize, http2MaxSize }, queueMaxSize)
- .connectionSelector(LIFO_SELECTOR);
+ this.pool = pool;
}
@Override
diff --git a/src/main/java/io/vertx/core/http/impl/SharedHttpClient.java b/src/main/java/io/vertx/core/http/impl/SharedHttpClient.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/http/impl/SharedHttpClient.java
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2011-2021 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+
+package io.vertx.core.http.impl;
+
+import io.vertx.core.*;
+import io.vertx.core.http.*;
+import io.vertx.core.impl.VertxInternal;
+import io.vertx.core.shareddata.LocalMap;
+
+import java.util.List;
+import java.util.function.Function;
+
+public class SharedHttpClient implements HttpClient, Closeable {
+
+ private static final String MAP_NAME = "__vertx.shared.httpClients";
+ public static final String DEFAULT_CLIENT_NAME = "SharedHttpClient.DEFAULT";
+
+ private final VertxInternal vertx;
+ private final String name;
+ private final HttpClient delegate;
+
+ private SharedHttpClient(VertxInternal vertx, String name, HttpClient delegate) {
+ this.vertx = vertx;
+ this.name = name;
+ this.delegate = delegate;
+ }
+
+ public static SharedHttpClient create(VertxInternal vertx, String name, HttpClientOptions options) {
+ LocalMap<String, HttpClientHolder> localMap = vertx.sharedData().getLocalMap(MAP_NAME);
+ HttpClient client;
+ HttpClientHolder current, candidate;
+ for (; ; ) {
+ current = localMap.get(name);
+ if (current != null) {
+ candidate = current.increment();
+ if (candidate != null && localMap.replaceIfPresent(name, current, candidate)) {
+ client = candidate.get();
+ break;
+ }
+ } else {
+ candidate = new HttpClientHolder();
+ if (localMap.putIfAbsent(name, candidate) == null) {
+ candidate = candidate.init(vertx, options);
+ client = candidate.get();
+ localMap.put(name, candidate);
+ break;
+ }
+ }
+ }
+ return new SharedHttpClient(vertx, name, client);
+ }
+
+ @Override
+ public void close(Handler<AsyncResult<Void>> handler) {
+ Future<Void> future = close();
+ if (handler != null) {
+ future.onComplete(handler);
+ }
+ }
+
+ @Override
+ public Future<Void> close() {
+ Promise<Void> promise = vertx.promise();
+ LocalMap<String, HttpClientHolder> localMap = vertx.sharedData().getLocalMap(MAP_NAME);
+ HttpClientHolder current, candidate;
+ for (; ; ) {
+ current = localMap.get(name);
+ candidate = current.decrement();
+ if (candidate == null) {
+ if (localMap.removeIfPresent(name, current)) {
+ current.close().onComplete(promise);
+ break;
+ }
+ } else if (localMap.replace(name, current, candidate)) {
+ promise.complete();
+ break;
+ }
+ }
+ return promise.future();
+ }
+
+ @Override
+ public void close(Promise<Void> completion) {
+ close().onComplete(completion);
+ }
+
+ @Override
+ protected void finalize() throws Throwable {
+ // Make sure the shared client count gets decreased if there are no more references to this instance
+ close();
+ super.finalize();
+ }
+
+ @Override
+ public void request(RequestOptions options, Handler<AsyncResult<HttpClientRequest>> handler) {
+ delegate.request(options, handler);
+ }
+
+ @Override
+ public Future<HttpClientRequest> request(RequestOptions options) {
+ return delegate.request(options);
+ }
+
+ @Override
+ public void request(HttpMethod method, int port, String host, String requestURI, Handler<AsyncResult<HttpClientRequest>> handler) {
+ delegate.request(method, port, host, requestURI, handler);
+ }
+
+ @Override
+ public Future<HttpClientRequest> request(HttpMethod method, int port, String host, String requestURI) {
+ return delegate.request(method, port, host, requestURI);
+ }
+
+ @Override
+ public void request(HttpMethod method, String host, String requestURI, Handler<AsyncResult<HttpClientRequest>> handler) {
+ delegate.request(method, host, requestURI, handler);
+ }
+
+ @Override
+ public Future<HttpClientRequest> request(HttpMethod method, String host, String requestURI) {
+ return delegate.request(method, host, requestURI);
+ }
+
+ @Override
+ public void request(HttpMethod method, String requestURI, Handler<AsyncResult<HttpClientRequest>> handler) {
+ delegate.request(method, requestURI, handler);
+ }
+
+ @Override
+ public Future<HttpClientRequest> request(HttpMethod method, String requestURI) {
+ return delegate.request(method, requestURI);
+ }
+
+ @Override
+ public void webSocket(int port, String host, String requestURI, Handler<AsyncResult<WebSocket>> handler) {
+ delegate.webSocket(port, host, requestURI, handler);
+ }
+
+ @Override
+ public Future<WebSocket> webSocket(int port, String host, String requestURI) {
+ return delegate.webSocket(port, host, requestURI);
+ }
+
+ @Override
+ public void webSocket(String host, String requestURI, Handler<AsyncResult<WebSocket>> handler) {
+ delegate.webSocket(host, requestURI, handler);
+ }
+
+ @Override
+ public Future<WebSocket> webSocket(String host, String requestURI) {
+ return delegate.webSocket(host, requestURI);
+ }
+
+ @Override
+ public void webSocket(String requestURI, Handler<AsyncResult<WebSocket>> handler) {
+ delegate.webSocket(requestURI, handler);
+ }
+
+ @Override
+ public Future<WebSocket> webSocket(String requestURI) {
+ return delegate.webSocket(requestURI);
+ }
+
+ @Override
+ public void webSocket(WebSocketConnectOptions options, Handler<AsyncResult<WebSocket>> handler) {
+ delegate.webSocket(options, handler);
+ }
+
+ @Override
+ public Future<WebSocket> webSocket(WebSocketConnectOptions options) {
+ return delegate.webSocket(options);
+ }
+
+ @Override
+ public void webSocketAbs(String url, MultiMap headers, WebsocketVersion version, List<String> subProtocols, Handler<AsyncResult<WebSocket>> handler) {
+ delegate.webSocketAbs(url, headers, version, subProtocols, handler);
+ }
+
+ @Override
+ public Future<WebSocket> webSocketAbs(String url, MultiMap headers, WebsocketVersion version, List<String> subProtocols) {
+ return delegate.webSocketAbs(url, headers, version, subProtocols);
+ }
+
+ @Override
+ public HttpClient connectionHandler(Handler<HttpConnection> handler) {
+ return delegate.connectionHandler(handler);
+ }
+
+ @Override
+ public HttpClient redirectHandler(Function<HttpClientResponse, Future<RequestOptions>> handler) {
+ return delegate.redirectHandler(handler);
+ }
+
+ @Override
+ public Function<HttpClientResponse, Future<RequestOptions>> redirectHandler() {
+ return delegate.redirectHandler();
+ }
+}
diff --git a/src/main/java/io/vertx/core/impl/VertxImpl.java b/src/main/java/io/vertx/core/impl/VertxImpl.java
--- a/src/main/java/io/vertx/core/impl/VertxImpl.java
+++ b/src/main/java/io/vertx/core/impl/VertxImpl.java
@@ -43,6 +43,7 @@
import io.vertx.core.http.HttpServerOptions;
import io.vertx.core.http.impl.HttpClientImpl;
import io.vertx.core.http.impl.HttpServerImpl;
+import io.vertx.core.http.impl.SharedHttpClient;
import io.vertx.core.impl.future.PromiseInternal;
import io.vertx.core.impl.logging.Logger;
import io.vertx.core.impl.logging.LoggerFactory;
@@ -74,7 +75,10 @@
import java.lang.ref.WeakReference;
import java.net.InetAddress;
import java.net.InetSocketAddress;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
@@ -361,6 +365,31 @@ public HttpClient createHttpClient(HttpClientOptions options) {
return client;
}
+ @Override
+ public HttpClient createSharedHttpClient() {
+ return createSharedHttpClient(SharedHttpClient.DEFAULT_CLIENT_NAME, new HttpClientOptions());
+ }
+
+ @Override
+ public HttpClient createSharedHttpClient(HttpClientOptions options) {
+ return createSharedHttpClient(SharedHttpClient.DEFAULT_CLIENT_NAME, options);
+ }
+
+ @Override
+ public HttpClient createSharedHttpClient(String name) {
+ return createSharedHttpClient(name, new HttpClientOptions());
+ }
+
+ @Override
+ public HttpClient createSharedHttpClient(String name, HttpClientOptions options) {
+ CloseFuture closeFuture = new CloseFuture(log);
+ SharedHttpClient client = SharedHttpClient.create(this, name, options);
+ closeFuture.add(client);
+ CloseFuture parentCloseFuture = resolveCloseFuture();
+ parentCloseFuture.add(closeFuture);
+ return client;
+ }
+
@Override
public HttpClient createHttpClient() {
return createHttpClient(new HttpClientOptions());
| diff --git a/src/test/java/io/vertx/core/http/Http1xTest.java b/src/test/java/io/vertx/core/http/Http1xTest.java
--- a/src/test/java/io/vertx/core/http/Http1xTest.java
+++ b/src/test/java/io/vertx/core/http/Http1xTest.java
@@ -13,6 +13,7 @@
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandler;
+import io.netty.channel.EventLoop;
import io.netty.handler.codec.TooLongFrameException;
import io.vertx.core.*;
import io.vertx.core.Future;
@@ -5181,4 +5182,34 @@ private void testNetSocketUpgradeSuccess(Buffer payload) {
}));
await();
}
+
+ @Test
+ public void testClientEventLoopSize() throws Exception {
+ Assume.assumeTrue("Domain socket don't pass this test", testAddress.isInetSocket());
+ server.requestHandler(req -> {
+ req.response().end();
+ });
+ startServer();
+ int size = 4;
+ int maxPoolSize = size + 2;
+ client.close();
+ client = vertx.createHttpClient(new HttpClientOptions()
+ .setMaxPoolSize(maxPoolSize)
+ .setPoolEventLoopSize(size));
+ List<EventLoop> eventLoops = Collections.synchronizedList(new ArrayList<>());
+ client.connectionHandler(conn -> eventLoops.add(((ContextInternal)Vertx.currentContext()).nettyEventLoop()));
+ List<Future> futures = new ArrayList<>();
+ for (int i = 0;i < size * 2;i++) {
+ futures.add(client
+ .request(requestOptions)
+ .compose(HttpClientRequest::send)
+ .compose(HttpClientResponse::body));
+ }
+ CompositeFuture.all(futures).onComplete(onSuccess(v -> {
+ assertEquals(maxPoolSize, eventLoops.size());
+ assertEquals(size, new HashSet<>(eventLoops).size());
+ testComplete();
+ }));
+ await();
+ }
}
diff --git a/src/test/java/io/vertx/core/http/SharedHttpClientTest.java b/src/test/java/io/vertx/core/http/SharedHttpClientTest.java
new file mode 100644
--- /dev/null
+++ b/src/test/java/io/vertx/core/http/SharedHttpClientTest.java
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2011-2021 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+
+package io.vertx.core.http;
+
+import io.vertx.core.*;
+import io.vertx.core.eventbus.Message;
+import io.vertx.core.impl.VertxInternal;
+import io.vertx.core.json.JsonObject;
+import io.vertx.test.core.VertxTestBase;
+import org.junit.Test;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Consumer;
+import java.util.function.Supplier;
+
+import static io.vertx.core.http.HttpMethod.GET;
+
+public class SharedHttpClientTest extends VertxTestBase {
+
+ int sharedPoolSize = 7;
+ int clientVerticleInstances = 8;
+
+ int requestsPerVerticle = 50 * sharedPoolSize;
+ int requestsTotal = clientVerticleInstances * requestsPerVerticle;
+
+ @Test
+ public void testVerticlesUseSamePool() throws Exception {
+ CountDownLatch receivedLatch = new CountDownLatch(requestsTotal);
+ ServerVerticle serverVerticle = new ServerVerticle();
+
+ vertx.deployVerticle(serverVerticle, onSuccess(serverId -> {
+
+ HttpClientOptions clientOptions = httpClientOptions(serverVerticle, sharedPoolSize);
+ DeploymentOptions deploymentOptions = deploymentOptions(clientVerticleInstances, clientOptions);
+
+ Supplier<Verticle> verticleSupplier = () -> new ClientVerticle(clientVerticle -> {
+ // Verify the reply context is the same as of the deployment
+ // We can't compare to the verticle context because the reply context is a DuplicatedContext
+ assertEquals(clientVerticle.context.deploymentID(), Vertx.currentContext().deploymentID());
+ receivedLatch.countDown();
+ });
+
+ vertx.deployVerticle(verticleSupplier, deploymentOptions, onSuccess(clientId -> {
+ vertx.eventBus().publish(ClientVerticle.TRIGGER_ADDRESS, requestsPerVerticle);
+ }));
+ }));
+
+ waitUntil(() -> serverVerticle.connections.size() == sharedPoolSize);
+ serverVerticle.replyLatch.complete();
+ awaitLatch(receivedLatch);
+ assertEquals(serverVerticle.maxConnections, sharedPoolSize);
+ }
+
+ @Test
+ public void testSharedPoolClosedAutomatically() throws Exception {
+ CountDownLatch receivedLatch = new CountDownLatch(requestsTotal);
+ ServerVerticle serverVerticle = new ServerVerticle();
+ AtomicReference<String> clientDeploymentId = new AtomicReference<>();
+
+ vertx.deployVerticle(serverVerticle, onSuccess(serverId -> {
+
+ HttpClientOptions clientOptions = httpClientOptions(serverVerticle, sharedPoolSize)
+ // Make sure connections stay alive for the duration of the test if the server is not closed
+ .setKeepAliveTimeout(3600);
+ DeploymentOptions deploymentOptions = deploymentOptions(clientVerticleInstances, clientOptions);
+
+ Supplier<Verticle> verticleSupplier = () -> new ClientVerticle(clientVerticle -> receivedLatch.countDown());
+
+ vertx.deployVerticle(verticleSupplier, deploymentOptions, onSuccess(clientId -> {
+ clientDeploymentId.set(clientId);
+ vertx.eventBus().publish(ClientVerticle.TRIGGER_ADDRESS, requestsPerVerticle);
+ }));
+ }));
+
+ waitUntil(() -> serverVerticle.connections.size() == sharedPoolSize);
+ serverVerticle.replyLatch.complete();
+ awaitLatch(receivedLatch);
+
+ CountDownLatch undeployLatch = new CountDownLatch(1);
+ vertx.undeploy(clientDeploymentId.get(), onSuccess(v -> {
+ undeployLatch.countDown();
+ }));
+
+ awaitLatch(undeployLatch);
+ assertWaitUntil(() -> serverVerticle.connections.size() == 0);
+ }
+
+ @Test
+ public void testSharedPoolRetainedByOtherDeployment() throws Exception {
+ int keepAliveTimeoutSeconds = 3;
+
+ CountDownLatch receivedLatch = new CountDownLatch(requestsTotal);
+ ServerVerticle serverVerticle = new ServerVerticle();
+ AtomicReference<String> clientDeploymentId = new AtomicReference<>();
+
+ vertx.deployVerticle(serverVerticle, onSuccess(serverId -> {
+
+ HttpClientOptions clientOptions = httpClientOptions(serverVerticle, sharedPoolSize)
+ .setKeepAliveTimeout(keepAliveTimeoutSeconds);
+ DeploymentOptions deploymentOptions = deploymentOptions(clientVerticleInstances, clientOptions);
+
+ Supplier<Verticle> verticleSupplier = () -> new ClientVerticle(clientVerticle -> receivedLatch.countDown());
+
+ vertx.deployVerticle(verticleSupplier, deploymentOptions, onSuccess(clientId -> {
+ clientDeploymentId.set(clientId);
+ vertx.eventBus().publish(ClientVerticle.TRIGGER_ADDRESS, requestsPerVerticle);
+ }));
+ }));
+
+ waitUntil(() -> serverVerticle.connections.size() == sharedPoolSize);
+
+ CountDownLatch deployLatch = new CountDownLatch(1);
+ vertx.deployVerticle(new AbstractVerticle() {
+ @Override
+ public void start() throws Exception {
+ vertx.createSharedHttpClient(ClientVerticle.SHARED_CLIENT_NAME, new HttpClientOptions());
+ }
+ }, onSuccess(v -> {
+ deployLatch.countDown();
+ }));
+ awaitLatch(deployLatch);
+
+ serverVerticle.replyLatch.complete();
+ awaitLatch(receivedLatch);
+
+ CountDownLatch undeployLatch = new CountDownLatch(1);
+ vertx.undeploy(clientDeploymentId.get(), onSuccess(v -> {
+ undeployLatch.countDown();
+ }));
+
+ awaitLatch(undeployLatch);
+
+ waitFor(2);
+ vertx.setTimer((1000 * keepAliveTimeoutSeconds) / 2, l -> {
+ assertTrue(serverVerticle.connections.size() > 0);
+ complete();
+ });
+ vertx.setTimer(2 * 1000 * keepAliveTimeoutSeconds, l -> {
+ assertTrue(serverVerticle.connections.size() == 0);
+ complete();
+ });
+ await();
+ }
+
+ private static class ClientVerticle extends AbstractVerticle implements Handler<Message<Integer>> {
+
+ static final String TRIGGER_ADDRESS = UUID.randomUUID().toString();
+ static final String SHARED_CLIENT_NAME = UUID.randomUUID().toString();
+
+ final Consumer<ClientVerticle> onResponseReceived;
+
+ volatile Context context;
+ HttpClient client;
+
+ ClientVerticle(Consumer<ClientVerticle> onResponseReceived) {
+ this.onResponseReceived = onResponseReceived;
+ }
+
+ @Override
+ public void start(Promise<Void> startPromise) throws Exception {
+ context = super.context;
+ client = vertx.createSharedHttpClient(SHARED_CLIENT_NAME, new HttpClientOptions(config().getJsonObject("httpClientOptions")));
+ vertx.eventBus().consumer(TRIGGER_ADDRESS, this).completionHandler(startPromise);
+ }
+
+ @Override
+ public void handle(Message<Integer> message) {
+ for (int i = 0; i < message.body(); i++) {
+ client.request(GET, "/").compose(HttpClientRequest::send).onComplete(ar -> onResponseReceived.accept(this));
+ }
+ }
+ }
+
+ private static class ServerVerticle extends AbstractVerticle implements Handler<HttpServerRequest> {
+
+ volatile Promise<Void> replyLatch;
+ volatile int port;
+ Set<HttpConnection> connections = Collections.synchronizedSet(new HashSet<>());
+ volatile int maxConnections;
+
+ @Override
+ public void start(Promise<Void> startPromise) throws Exception {
+ replyLatch = ((VertxInternal) vertx).promise();
+ vertx.createHttpServer()
+ .requestHandler(this)
+ .listen(0)
+ .onSuccess(server -> port = server.actualPort())
+ .<Void>mapEmpty()
+ .onComplete(startPromise);
+ }
+
+ @Override
+ public void handle(HttpServerRequest req) {
+ HttpConnection connection = req.connection();
+ connections.add(connection);
+ connection.closeHandler(v -> connections.remove(connection));
+ maxConnections = Math.max(maxConnections, connections.size());
+ replyLatch.future().onComplete(ar -> req.response().end());
+ }
+ }
+
+ private static HttpClientOptions httpClientOptions(ServerVerticle serverVerticle, int sharedPoolSize) {
+ return new HttpClientOptions()
+ .setDefaultPort(serverVerticle.port)
+ .setMaxPoolSize(sharedPoolSize);
+ }
+
+ private static DeploymentOptions deploymentOptions(int instances, HttpClientOptions options) {
+ return new DeploymentOptions()
+ .setInstances(instances)
+ .setConfig(new JsonObject()
+ .put("httpClientOptions", options.toJson()));
+ }
+}
| HTTP client fixed event loop pool
HTTP client uses event loops provided by actual requests to assign connection event-loops. In some case it is desirable to have the client use its own event loop as a pool, like when it is shared between verticles or statically.
Shared HttpClient support
A few Vert.x clients (e.g. JDBC, Cassandra, Mongo) have `createShared` methods. The purpose of these creation methods is that, when you deploy multiple verticle instances, a single resource (JDBC datasource, Mongo client, Cassandra session) is created and reused.
While this requirement was obvious for Vert.x clients wrapping existing libraries, most built-in clients don't have them. So if a user has to open at most `C` connections to an HTTP backend, and deploys `V` verticle instances, it is advised to create an `HttpClient` with `C/V` as max pool size.
On the upside, the user will get maximum performance because the HttpClient will be operated on the verticle context. On the downside, the configuration process is not straightforward.
For Vert.x 4, HttpClient (and some other built-in clients too) have been made safe to use on different contexts. So it is possible to add `createShared` methods now.
The benefit is simplified configuration. The drawback is that this will require an extra hop between the context that actually created the `HttpClient` and the context using it. This should be acceptable by most apps though (the user pays at least this price when sending messages over the eventbus).
|
keep in mind this is not in the 4.0 priority list and I want us to focus on 4.0 priority tasks. | 2021-11-18T10:51:32Z | 4.2 |
eclipse-vertx/vert.x | 4,164 | eclipse-vertx__vert.x-4164 | [
"4163"
] | 0e6526c5eb4355a35014eca2162591453cff4111 | diff --git a/src/main/java/io/vertx/core/Vertx.java b/src/main/java/io/vertx/core/Vertx.java
--- a/src/main/java/io/vertx/core/Vertx.java
+++ b/src/main/java/io/vertx/core/Vertx.java
@@ -660,6 +660,12 @@ default <T> Future<T> executeBlocking(Handler<Promise<T>> blockingCodeHandler) {
@CacheReturn
boolean isNativeTransportEnabled();
+ /**
+ * @return the error (if any) that cause the unavailability of native transport when {@link #isNativeTransportEnabled()} returns {@code false}.
+ */
+ @CacheReturn
+ Throwable unavailableNativeTransportCause();
+
/**
* Set a default exception handler for {@link Context}, set on {@link Context#exceptionHandler(Handler)} at creation.
*
diff --git a/src/main/java/io/vertx/core/http/impl/Http1xUpgradeToH2CHandler.java b/src/main/java/io/vertx/core/http/impl/Http1xUpgradeToH2CHandler.java
--- a/src/main/java/io/vertx/core/http/impl/Http1xUpgradeToH2CHandler.java
+++ b/src/main/java/io/vertx/core/http/impl/Http1xUpgradeToH2CHandler.java
@@ -78,14 +78,16 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception
if (settings != null) {
if (initializer.context.isEventLoopContext()) {
ChannelPipeline pipeline = ctx.pipeline();
+ if (pipeline.get("chunkedWriter") != null) {
+ pipeline.remove("chunkedWriter");
+ }
DefaultFullHttpResponse res = new DefaultFullHttpResponse(HTTP_1_1, SWITCHING_PROTOCOLS, Unpooled.EMPTY_BUFFER, false);
res.headers().add(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE);
res.headers().add(HttpHeaderNames.UPGRADE, Http2CodecUtil.HTTP_UPGRADE_PROTOCOL_NAME);
- ctx.writeAndFlush(res);
+ ctx.write(res);
pipeline.remove("httpEncoder");
if (isCompressionSupported) {
pipeline.remove("deflater");
- pipeline.remove("chunkedWriter");
}
if (isDecompressionSupported) {
pipeline.remove("inflater");
diff --git a/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java b/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java
--- a/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java
@@ -270,7 +270,7 @@ private void configureHttp1OrH2C(ChannelPipeline pipeline, SslChannelProvider ss
if (options.isCompressionSupported()) {
pipeline.addLast("deflater", new HttpChunkContentCompressor(compressionOptions));
}
- if (options.isSsl() || options.isCompressionSupported()) {
+ if (options.isSsl() || options.isCompressionSupported() || !vertx.transport().supportFileRegion()) {
// only add ChunkedWriteHandler when SSL is enabled otherwise it is not needed as FileRegion is used.
pipeline.addLast("chunkedWriter", new ChunkedWriteHandler()); // For large file / sendfile support
}
diff --git a/src/main/java/io/vertx/core/impl/VertxImpl.java b/src/main/java/io/vertx/core/impl/VertxImpl.java
--- a/src/main/java/io/vertx/core/impl/VertxImpl.java
+++ b/src/main/java/io/vertx/core/impl/VertxImpl.java
@@ -326,6 +326,14 @@ public boolean isNativeTransportEnabled() {
return transport != Transport.JDK;
}
+ @Override
+ public Throwable unavailableNativeTransportCause() {
+ if (isNativeTransportEnabled()) {
+ return null;
+ }
+ return transport.unavailabilityCause();
+ }
+
public FileSystem fileSystem() {
return fileSystem;
}
diff --git a/src/main/java/io/vertx/core/net/impl/ConnectionBase.java b/src/main/java/io/vertx/core/net/impl/ConnectionBase.java
--- a/src/main/java/io/vertx/core/net/impl/ConnectionBase.java
+++ b/src/main/java/io/vertx/core/net/impl/ConnectionBase.java
@@ -14,7 +14,7 @@
import io.netty.buffer.Unpooled;
import io.netty.channel.*;
import io.netty.handler.ssl.SslHandler;
-import io.netty.handler.stream.ChunkedFile;
+import io.netty.handler.stream.ChunkedNioFile;
import io.netty.handler.timeout.IdleStateEvent;
import io.netty.util.AttributeKey;
import io.netty.util.ReferenceCountUtil;
@@ -409,7 +409,7 @@ protected void handleIdle(IdleStateEvent event) {
protected abstract void handleInterestedOpsChanged();
protected boolean supportsFileRegion() {
- return !isSsl();
+ return vertx.transport().supportFileRegion() && !isSsl();
}
protected void reportBytesRead(Object msg) {
@@ -505,7 +505,7 @@ public final ChannelFuture sendFile(RandomAccessFile raf, long offset, long leng
ChannelPromise writeFuture = chctx.newPromise();
if (!supportsFileRegion()) {
// Cannot use zero-copy
- writeToChannel(new ChunkedFile(raf, offset, length, 8192), writeFuture);
+ writeToChannel(new ChunkedNioFile(raf.getChannel(), offset, length, 8192), writeFuture);
} else {
// No encryption - use zero-copy.
sendFileRegion(raf, offset, length, writeFuture);
diff --git a/src/main/java/io/vertx/core/net/impl/NetClientImpl.java b/src/main/java/io/vertx/core/net/impl/NetClientImpl.java
--- a/src/main/java/io/vertx/core/net/impl/NetClientImpl.java
+++ b/src/main/java/io/vertx/core/net/impl/NetClientImpl.java
@@ -95,7 +95,7 @@ protected void initChannel(ChannelPipeline pipeline) {
if (logEnabled) {
pipeline.addLast("logging", new LoggingHandler(options.getActivityLogDataFormat()));
}
- if (options.isSsl()) {
+ if (options.isSsl() || !vertx.transport().supportFileRegion()) {
// only add ChunkedWriteHandler when SSL is enabled otherwise it is not needed as FileRegion is used.
pipeline.addLast("chunkedWriter", new ChunkedWriteHandler()); // For large file / sendfile support
}
diff --git a/src/main/java/io/vertx/core/net/impl/NetServerImpl.java b/src/main/java/io/vertx/core/net/impl/NetServerImpl.java
--- a/src/main/java/io/vertx/core/net/impl/NetServerImpl.java
+++ b/src/main/java/io/vertx/core/net/impl/NetServerImpl.java
@@ -255,8 +255,8 @@ protected void initChannel(ChannelPipeline pipeline, boolean ssl) {
if (options.getLogActivity()) {
pipeline.addLast("logging", new LoggingHandler(options.getActivityLogDataFormat()));
}
- if (ssl) {
- // only add ChunkedWriteHandler when SSL is enabled otherwise it is not needed as FileRegion is used.
+ if (ssl || !vertx.transport().supportFileRegion()) {
+ // only add ChunkedWriteHandler when SSL is enabled or FileRegion isn't supported
pipeline.addLast("chunkedWriter", new ChunkedWriteHandler()); // For large file / sendfile support
}
int idleTimeout = options.getIdleTimeout();
diff --git a/src/main/java/io/vertx/core/net/impl/transport/EpollTransport.java b/src/main/java/io/vertx/core/net/impl/transport/EpollTransport.java
--- a/src/main/java/io/vertx/core/net/impl/transport/EpollTransport.java
+++ b/src/main/java/io/vertx/core/net/impl/transport/EpollTransport.java
@@ -62,6 +62,7 @@ public static void setPendingFastOpenRequestsThreshold(int value) {
}
EpollTransport() {
+ super(true);
}
@Override
diff --git a/src/main/java/io/vertx/core/net/impl/transport/IOUringTransport.java b/src/main/java/io/vertx/core/net/impl/transport/IOUringTransport.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/net/impl/transport/IOUringTransport.java
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2011-2019 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+package io.vertx.core.net.impl.transport;
+
+import java.net.SocketAddress;
+import java.util.concurrent.ThreadFactory;
+
+import io.netty.bootstrap.Bootstrap;
+import io.netty.bootstrap.ServerBootstrap;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFactory;
+import io.netty.channel.EventLoopGroup;
+import io.netty.channel.ServerChannel;
+import io.netty.channel.socket.DatagramChannel;
+import io.netty.channel.socket.InternetProtocolFamily;
+import io.netty.channel.unix.DomainSocketAddress;
+import io.netty.incubator.channel.uring.IOUring;
+import io.netty.incubator.channel.uring.IOUringChannelOption;
+import io.netty.incubator.channel.uring.IOUringDatagramChannel;
+import io.netty.incubator.channel.uring.IOUringEventLoopGroup;
+import io.netty.incubator.channel.uring.IOUringServerSocketChannel;
+import io.netty.incubator.channel.uring.IOUringSocketChannel;
+import io.vertx.core.datagram.DatagramSocketOptions;
+import io.vertx.core.net.ClientOptionsBase;
+import io.vertx.core.net.NetServerOptions;
+import io.vertx.core.net.impl.SocketAddressImpl;
+
+public class IOUringTransport extends Transport {
+
+ private static volatile int pendingFastOpenRequestsThreshold = 256;
+
+ /**
+ * Return the number of of pending TFO connections in SYN-RCVD state for TCP_FASTOPEN.
+ * <p>
+ * {@see #setPendingFastOpenRequestsThreshold}
+ */
+ public static int getPendingFastOpenRequestsThreshold() {
+ return pendingFastOpenRequestsThreshold;
+ }
+
+ /**
+ * Set the number of of pending TFO connections in SYN-RCVD state for TCP_FASTOPEN
+ * <p/>
+ * If this value goes over a certain limit the server disables all TFO connections.
+ */
+ public static void setPendingFastOpenRequestsThreshold(int value) {
+ if (value < 0) {
+ throw new IllegalArgumentException("Invalid " + value);
+ }
+ pendingFastOpenRequestsThreshold = value;
+ }
+
+ IOUringTransport() {
+ super(false);
+ }
+
+ @Override
+ public boolean supportFileRegion() {
+ return false;
+ }
+
+ @Override
+ public SocketAddress convert(io.vertx.core.net.SocketAddress address) {
+ if (address.isDomainSocket()) {
+ throw new IllegalArgumentException("Domain socket not supported by IOUring transport");
+ }
+ return super.convert(address);
+ }
+
+ @Override
+ public io.vertx.core.net.SocketAddress convert(SocketAddress address) {
+ if (address instanceof DomainSocketAddress) {
+ return new SocketAddressImpl(((DomainSocketAddress) address).path());
+ }
+ return super.convert(address);
+ }
+
+ @Override
+ public boolean isAvailable() {
+ return IOUring.isAvailable();
+ }
+
+ @Override
+ public Throwable unavailabilityCause() {
+ return IOUring.unavailabilityCause();
+ }
+
+ @Override
+ public EventLoopGroup eventLoopGroup(int type, int nThreads, ThreadFactory threadFactory, int ignoredIoRatio) {
+ IOUringEventLoopGroup eventLoopGroup = new IOUringEventLoopGroup(nThreads, threadFactory);
+ return eventLoopGroup;
+ }
+
+ @Override
+ public DatagramChannel datagramChannel() {
+ return new IOUringDatagramChannel();
+ }
+
+ @Override
+ public DatagramChannel datagramChannel(InternetProtocolFamily family) {
+ return new IOUringDatagramChannel();
+ }
+
+ @Override
+ public ChannelFactory<? extends Channel> channelFactory(boolean domainSocket) {
+ if (domainSocket) {
+ throw new IllegalArgumentException();
+ }
+ return IOUringSocketChannel::new;
+ }
+
+ @Override
+ public ChannelFactory<? extends ServerChannel> serverChannelFactory(boolean domainSocket) {
+ if (domainSocket) {
+ throw new IllegalArgumentException();
+ }
+ return IOUringServerSocketChannel::new;
+ }
+
+ @Override
+ public void configure(DatagramChannel channel, DatagramSocketOptions options) {
+ channel.config().setOption(IOUringChannelOption.SO_REUSEPORT, options.isReusePort());
+ super.configure(channel, options);
+ }
+
+ @Override
+ public void configure(NetServerOptions options, boolean domainSocket, ServerBootstrap bootstrap) {
+ if (domainSocket) {
+ throw new IllegalArgumentException();
+ }
+ bootstrap.option(IOUringChannelOption.SO_REUSEPORT, options.isReusePort());
+ if (options.isTcpFastOpen()) {
+ bootstrap.option(IOUringChannelOption.TCP_FASTOPEN, options.isTcpFastOpen() ? pendingFastOpenRequestsThreshold : 0);
+ }
+ bootstrap.childOption(IOUringChannelOption.TCP_QUICKACK, options.isTcpQuickAck());
+ bootstrap.childOption(IOUringChannelOption.TCP_CORK, options.isTcpCork());
+ super.configure(options, false, bootstrap);
+ }
+
+ @Override
+ public void configure(ClientOptionsBase options, boolean domainSocket, Bootstrap bootstrap) {
+ if (domainSocket) {
+ throw new IllegalArgumentException();
+ }
+ if (options.isTcpFastOpen()) {
+ bootstrap.option(IOUringChannelOption.TCP_FASTOPEN_CONNECT, options.isTcpFastOpen());
+ }
+ bootstrap.option(IOUringChannelOption.TCP_QUICKACK, options.isTcpQuickAck());
+ bootstrap.option(IOUringChannelOption.TCP_CORK, options.isTcpCork());
+ super.configure(options, false, bootstrap);
+ }
+}
diff --git a/src/main/java/io/vertx/core/net/impl/transport/KQueueTransport.java b/src/main/java/io/vertx/core/net/impl/transport/KQueueTransport.java
--- a/src/main/java/io/vertx/core/net/impl/transport/KQueueTransport.java
+++ b/src/main/java/io/vertx/core/net/impl/transport/KQueueTransport.java
@@ -34,6 +34,7 @@
class KQueueTransport extends Transport {
KQueueTransport() {
+ super(true);
}
@Override
diff --git a/src/main/java/io/vertx/core/net/impl/transport/Transport.java b/src/main/java/io/vertx/core/net/impl/transport/Transport.java
--- a/src/main/java/io/vertx/core/net/impl/transport/Transport.java
+++ b/src/main/java/io/vertx/core/net/impl/transport/Transport.java
@@ -47,7 +47,11 @@ public class Transport {
/**
* The JDK transport, always there.
*/
- public static Transport JDK = new Transport();
+ public static Transport JDK = new Transport(false);
+
+ public boolean supportFileRegion() {
+ return true;
+ }
/**
* The native transport, it may be {@code null} or failed.
@@ -64,6 +68,16 @@ public static Transport nativeTransport() {
} catch (Throwable ignore) {
// Jar not here
}
+ try {
+ Transport ioUring = new IOUringTransport();
+ if (ioUring.isAvailable()) {
+ return ioUring;
+ } else {
+ transport = ioUring;
+ }
+ } catch (Throwable ignore) {
+ // Jar not here
+ }
try {
Transport kqueue = new KQueueTransport();
if (kqueue.isAvailable()) {
@@ -90,7 +104,18 @@ public static Transport transport(boolean preferNative) {
}
}
+ private final boolean supportsDomainSockets;
+
protected Transport() {
+ this(false);
+ }
+
+ protected Transport(boolean supportsDomainSockets) {
+ this.supportsDomainSockets = supportsDomainSockets;
+ }
+
+ public boolean supportsDomainSockets() {
+ return supportsDomainSockets;
}
/**
| diff --git a/src/test/java/io/vertx/core/datagram/DatagramTest.java b/src/test/java/io/vertx/core/datagram/DatagramTest.java
--- a/src/test/java/io/vertx/core/datagram/DatagramTest.java
+++ b/src/test/java/io/vertx/core/datagram/DatagramTest.java
@@ -27,6 +27,7 @@
import io.vertx.test.core.TestUtils;
import io.vertx.test.core.VertxTestBase;
import io.vertx.test.netty.TestLoggerFactory;
+import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
@@ -177,33 +178,42 @@ public void testPauseResume() {
peer2 = vertx.createDatagramSocket(new DatagramSocketOptions());
peer2.exceptionHandler(t -> fail(t.getMessage()));
peer2.listen(1234, "127.0.0.1", ar -> {
- Buffer buffer = TestUtils.randomBuffer(128);
- AtomicBoolean received = new AtomicBoolean();
- peer2.handler(packet -> received.set(true));
+ final AtomicBoolean suspendedReceive = new AtomicBoolean();
+ peer2.handler(packet -> suspendedReceive.set(true));
peer2.pause();
+ Buffer buffer = TestUtils.randomBuffer(128);
peer1.send(buffer, 1234, "127.0.0.1", ar2 -> {
assertTrue(ar2.succeeded());
});
- vertx.setTimer(1000, l -> {
- AtomicInteger count = new AtomicInteger();
+ final int MAX_FAILED_ATTEMPTS = 10;
+ vertx.setTimer(1000, ignore -> {
+ Assert.assertFalse(suspendedReceive.get());
+ AtomicBoolean resumedReceive = new AtomicBoolean();
peer2.handler(packet -> {
- switch (count.getAndIncrement()) {
- case 0:
- assertEquals(buffer, packet.data());
- peer1.send(buffer, 1234, "127.0.0.1", ar2 -> {
- assertTrue(ar2.succeeded());
- });
- break;
- case 1:
- assertFalse(received.get());
- assertEquals(buffer, packet.data());
- testComplete();
- break;
- default:
- fail();
+ Assert.assertEquals(buffer, packet.data());
+ if (resumedReceive.compareAndSet(false, true)) {
+ testComplete();
}
});
peer2.resume();
+ peer1.send(buffer, 1234, "127.0.0.1", ar2 -> {
+ assertTrue(ar2.succeeded());
+ });
+ AtomicInteger failedAttempts = new AtomicInteger();
+ vertx.setPeriodic(1000, l -> {
+ if (resumedReceive.get()) {
+ vertx.cancelTimer(l.longValue());
+ return;
+ }
+ if (failedAttempts.incrementAndGet() == MAX_FAILED_ATTEMPTS) {
+ vertx.cancelTimer(l.longValue());
+ fail("failed to receive any packet while resumed: retried " + MAX_FAILED_ATTEMPTS + " times");
+ return;
+ }
+ peer1.send(buffer, 1234, "127.0.0.1", ar2 -> {
+ assertTrue(ar2.succeeded());
+ });
+ });
});
});
await();
diff --git a/src/test/java/io/vertx/core/http/HttpTest.java b/src/test/java/io/vertx/core/http/HttpTest.java
--- a/src/test/java/io/vertx/core/http/HttpTest.java
+++ b/src/test/java/io/vertx/core/http/HttpTest.java
@@ -39,6 +39,7 @@
import io.vertx.core.http.impl.ServerCookie;
import io.vertx.core.http.impl.headers.HeadersMultiMap;
import io.vertx.core.impl.Utils;
+import io.vertx.core.impl.VertxInternal;
import io.vertx.core.net.NetClient;
import io.vertx.core.net.NetClientOptions;
import io.vertx.core.net.NetServerOptions;
@@ -176,6 +177,7 @@ public void testListenSocketAddress() {
public void testListenDomainSocketAddress() throws Exception {
Vertx vx = Vertx.vertx(new VertxOptions().setPreferNativeTransport(true));
Assume.assumeTrue("Native transport must be enabled", vx.isNativeTransportEnabled());
+ Assume.assumeTrue("Transport must support domain sockets", ((VertxInternal) vx).transport().supportsDomainSockets());
int len = 3;
waitFor(len * len);
List<SocketAddress> addresses = new ArrayList<>();
diff --git a/src/test/java/io/vertx/core/net/NetTest.java b/src/test/java/io/vertx/core/net/NetTest.java
--- a/src/test/java/io/vertx/core/net/NetTest.java
+++ b/src/test/java/io/vertx/core/net/NetTest.java
@@ -1945,8 +1945,8 @@ void run(boolean shouldPass) {
@Test
public void testListenDomainSocketAddress() throws Exception {
- Vertx vx = Vertx.vertx(new VertxOptions().setPreferNativeTransport(true));
- Assume.assumeTrue("Native transport must be enabled", vx.isNativeTransportEnabled());
+ VertxInternal vx = (VertxInternal) Vertx.vertx(new VertxOptions().setPreferNativeTransport(true));
+ Assume.assumeTrue("Transport must support domain sockets", vx.transport().supportsDomainSockets());
int len = 3;
waitFor(len * len);
List<SocketAddress> addresses = new ArrayList<>();
diff --git a/src/test/java/io/vertx/it/TransportTest.java b/src/test/java/io/vertx/it/TransportTest.java
--- a/src/test/java/io/vertx/it/TransportTest.java
+++ b/src/test/java/io/vertx/it/TransportTest.java
@@ -50,6 +50,12 @@ public void testNoNative() {
} catch (ClassNotFoundException ignore) {
// Expected
}
+ try {
+ classLoader.loadClass("io.netty.incubator.channel.uring.IOUring");
+ fail("Was not expected to load IOUring class");
+ } catch (ClassNotFoundException ignore) {
+ // Expected
+ }
testNetServer(new VertxOptions());
assertFalse(vertx.isNativeTransportEnabled());
}
| Support IO_URING transport
I'm implementing support for https://github.com/netty/netty-incubator-transport-io_uring on Vert-x `Transport`, see https://github.com/franz1981/vert.x/tree/4.2.1_iouring for some initial experiment.
| 2021-11-09T16:52:53Z | 4.3 |
|
eclipse-vertx/vert.x | 4,160 | eclipse-vertx__vert.x-4160 | [
"4158"
] | 0a5f619617468ada84ffb7bd2379ace4b519ec5b | diff --git a/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java b/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
@@ -1040,17 +1040,6 @@ public void handleInterestedOpsChanged() {
context.execute(writable, handler);
}
- /**
- * @return a list of all pending streams
- */
- private Iterable<Stream> pendingStreams() {
- // There might be duplicate between the requets list and the responses list
- LinkedHashSet<Stream> list = new LinkedHashSet<>();
- list.addAll(requests);
- list.addAll(responses);
- return list;
- }
-
protected void handleClosed() {
super.handleClosed();
long timerID = shutdownTimerID;
@@ -1071,15 +1060,21 @@ protected void handleClosed() {
}
WebSocketImpl ws;
VertxTracer tracer = context.tracer();
- Iterable<Stream> streams;
+ List<Stream> allocatedStreams;
+ List<Stream> sentStreams;
synchronized (this) {
ws = webSocket;
- streams = pendingStreams();
+ sentStreams = new ArrayList<>(responses);
+ allocatedStreams = new ArrayList<>(requests);
+ allocatedStreams.removeAll(responses);
}
if (ws != null) {
ws.handleConnectionClosed();
}
- for (Stream stream : streams) {
+ for (Stream stream : allocatedStreams) {
+ stream.context.execute(null, v -> stream.handleClosed());
+ }
+ for (Stream stream : sentStreams) {
if (metrics != null) {
metrics.requestReset(stream.metric);
}
@@ -1104,15 +1099,16 @@ protected void handleIdle() {
protected void handleException(Throwable e) {
super.handleException(e);
WebSocketImpl ws;
- Iterable<Stream> streams;
+ LinkedHashSet<Stream> allStreams = new LinkedHashSet<>();
synchronized (this) {
ws = webSocket;
- streams = pendingStreams();
+ allStreams.addAll(requests);
+ allStreams.addAll(responses);
}
if (ws != null) {
ws.handleException(e);
}
- for (Stream stream : streams) {
+ for (Stream stream : allStreams) {
stream.handleException(e);
}
}
| diff --git a/src/test/java/io/vertx/core/http/Http1xMetricsTest.java b/src/test/java/io/vertx/core/http/Http1xMetricsTest.java
--- a/src/test/java/io/vertx/core/http/Http1xMetricsTest.java
+++ b/src/test/java/io/vertx/core/http/Http1xMetricsTest.java
@@ -10,10 +10,31 @@
*/
package io.vertx.core.http;
+import io.vertx.test.fakemetrics.FakeHttpClientMetrics;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+
public class Http1xMetricsTest extends HttpMetricsTestBase {
public Http1xMetricsTest() {
super(HttpVersion.HTTP_1_1);
}
+ @Test
+ public void testAllocatedStreamResetShouldNotCallMetricsLifecycle() throws Exception {
+ server.requestHandler(req -> {
+ fail();
+ });
+ startServer();
+ CountDownLatch latch = new CountDownLatch(1);
+ client = vertx.createHttpClient(createBaseClientOptions().setIdleTimeout(2));
+ client.request(requestOptions).onComplete(onSuccess(req -> {
+ req.exceptionHandler(err -> {
+ latch.countDown();
+ });
+ req.connection().close();
+ }));
+ awaitLatch(latch);
+ }
}
diff --git a/src/test/java/io/vertx/test/fakemetrics/FakeHttpClientMetrics.java b/src/test/java/io/vertx/test/fakemetrics/FakeHttpClientMetrics.java
--- a/src/test/java/io/vertx/test/fakemetrics/FakeHttpClientMetrics.java
+++ b/src/test/java/io/vertx/test/fakemetrics/FakeHttpClientMetrics.java
@@ -155,6 +155,7 @@ public static void sanityCheck() {
if (err != null) {
AssertionFailedError afe = new AssertionFailedError();
afe.initCause(err);
+ unexpectedError = null;
throw afe;
}
}
| HTTP/1.1 client metric is reset for allocated but non sent requests
The client calls the HTTP client metrics SPI for HTTP/1.1 requests which have been allocated but not yet sent: every request that is created by `HttpClient#request` that has not yet been sent calls back the SPI when it should not.
| 2021-11-03T07:48:40Z | 4.1 |
|
eclipse-vertx/vert.x | 4,134 | eclipse-vertx__vert.x-4134 | [
"4132"
] | ec94331134e7f1d14979fb9bbbaa5ef8a5f0f116 | diff --git a/src/main/generated/io/vertx/core/eventbus/EventBusOptionsConverter.java b/src/main/generated/io/vertx/core/eventbus/EventBusOptionsConverter.java
--- a/src/main/generated/io/vertx/core/eventbus/EventBusOptionsConverter.java
+++ b/src/main/generated/io/vertx/core/eventbus/EventBusOptionsConverter.java
@@ -150,6 +150,11 @@ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, EventBu
obj.setPort(((Number)member.getValue()).intValue());
}
break;
+ case "readIdleTimeout":
+ if (member.getValue() instanceof Number) {
+ obj.setReadIdleTimeout(((Number)member.getValue()).intValue());
+ }
+ break;
case "receiveBufferSize":
if (member.getValue() instanceof Number) {
obj.setReceiveBufferSize(((Number)member.getValue()).intValue());
@@ -245,6 +250,11 @@ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, EventBu
obj.setUseAlpn((Boolean)member.getValue());
}
break;
+ case "writeIdleTimeout":
+ if (member.getValue() instanceof Number) {
+ obj.setWriteIdleTimeout(((Number)member.getValue()).intValue());
+ }
+ break;
}
}
}
@@ -318,6 +328,7 @@ static void toJson(EventBusOptions obj, java.util.Map<String, Object> json) {
json.put("pfxTrustOptions", obj.getPfxTrustOptions().toJson());
}
json.put("port", obj.getPort());
+ json.put("readIdleTimeout", obj.getReadIdleTimeout());
json.put("receiveBufferSize", obj.getReceiveBufferSize());
json.put("reconnectAttempts", obj.getReconnectAttempts());
json.put("reconnectInterval", obj.getReconnectInterval());
@@ -341,5 +352,6 @@ static void toJson(EventBusOptions obj, java.util.Map<String, Object> json) {
json.put("trustStoreOptions", obj.getTrustStoreOptions().toJson());
}
json.put("useAlpn", obj.isUseAlpn());
+ json.put("writeIdleTimeout", obj.getWriteIdleTimeout());
}
}
diff --git a/src/main/generated/io/vertx/core/net/TCPSSLOptionsConverter.java b/src/main/generated/io/vertx/core/net/TCPSSLOptionsConverter.java
--- a/src/main/generated/io/vertx/core/net/TCPSSLOptionsConverter.java
+++ b/src/main/generated/io/vertx/core/net/TCPSSLOptionsConverter.java
@@ -95,6 +95,11 @@ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, TCPSSLO
obj.setPfxTrustOptions(new io.vertx.core.net.PfxOptions((io.vertx.core.json.JsonObject)member.getValue()));
}
break;
+ case "readIdleTimeout":
+ if (member.getValue() instanceof Number) {
+ obj.setReadIdleTimeout(((Number)member.getValue()).intValue());
+ }
+ break;
case "soLinger":
if (member.getValue() instanceof Number) {
obj.setSoLinger(((Number)member.getValue()).intValue());
@@ -150,6 +155,11 @@ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, TCPSSLO
obj.setUseAlpn((Boolean)member.getValue());
}
break;
+ case "writeIdleTimeout":
+ if (member.getValue() instanceof Number) {
+ obj.setWriteIdleTimeout(((Number)member.getValue()).intValue());
+ }
+ break;
}
}
}
@@ -204,6 +214,7 @@ static void toJson(TCPSSLOptions obj, java.util.Map<String, Object> json) {
if (obj.getPfxTrustOptions() != null) {
json.put("pfxTrustOptions", obj.getPfxTrustOptions().toJson());
}
+ json.put("readIdleTimeout", obj.getReadIdleTimeout());
json.put("soLinger", obj.getSoLinger());
json.put("ssl", obj.isSsl());
json.put("sslHandshakeTimeout", obj.getSslHandshakeTimeout());
@@ -219,5 +230,6 @@ static void toJson(TCPSSLOptions obj, java.util.Map<String, Object> json) {
json.put("trustStoreOptions", obj.getTrustStoreOptions().toJson());
}
json.put("useAlpn", obj.isUseAlpn());
+ json.put("writeIdleTimeout", obj.getWriteIdleTimeout());
}
}
diff --git a/src/main/java/io/vertx/core/eventbus/EventBusOptions.java b/src/main/java/io/vertx/core/eventbus/EventBusOptions.java
--- a/src/main/java/io/vertx/core/eventbus/EventBusOptions.java
+++ b/src/main/java/io/vertx/core/eventbus/EventBusOptions.java
@@ -331,6 +331,24 @@ public EventBusOptions setIdleTimeout(int idleTimeout) {
return this;
}
+ @Override
+ public EventBusOptions setReadIdleTimeout(int idleTimeout) {
+ super.setReadIdleTimeout(idleTimeout);
+ return this;
+ }
+
+ @Override
+ public EventBusOptions setWriteIdleTimeout(int idleTimeout) {
+ super.setWriteIdleTimeout(idleTimeout);
+ return this;
+ }
+
+ @Override
+ public EventBusOptions setIdleTimeoutUnit(TimeUnit idleTimeoutUnit) {
+ super.setIdleTimeoutUnit(idleTimeoutUnit);
+ return this;
+ }
+
@Override
@GenIgnore
public EventBusOptions setKeyCertOptions(KeyCertOptions options) {
diff --git a/src/main/java/io/vertx/core/http/HttpClientOptions.java b/src/main/java/io/vertx/core/http/HttpClientOptions.java
--- a/src/main/java/io/vertx/core/http/HttpClientOptions.java
+++ b/src/main/java/io/vertx/core/http/HttpClientOptions.java
@@ -416,6 +416,18 @@ public HttpClientOptions setIdleTimeout(int idleTimeout) {
return this;
}
+ @Override
+ public HttpClientOptions setReadIdleTimeout(int idleTimeout) {
+ super.setReadIdleTimeout(idleTimeout);
+ return this;
+ }
+
+ @Override
+ public HttpClientOptions setWriteIdleTimeout(int idleTimeout) {
+ super.setWriteIdleTimeout(idleTimeout);
+ return this;
+ }
+
@Override
public HttpClientOptions setIdleTimeoutUnit(TimeUnit idleTimeoutUnit) {
super.setIdleTimeoutUnit(idleTimeoutUnit);
diff --git a/src/main/java/io/vertx/core/http/HttpServerOptions.java b/src/main/java/io/vertx/core/http/HttpServerOptions.java
--- a/src/main/java/io/vertx/core/http/HttpServerOptions.java
+++ b/src/main/java/io/vertx/core/http/HttpServerOptions.java
@@ -313,6 +313,18 @@ public HttpServerOptions setIdleTimeout(int idleTimeout) {
return this;
}
+ @Override
+ public HttpServerOptions setReadIdleTimeout(int idleTimeout) {
+ super.setReadIdleTimeout(idleTimeout);
+ return this;
+ }
+
+ @Override
+ public HttpServerOptions setWriteIdleTimeout(int idleTimeout) {
+ super.setWriteIdleTimeout(idleTimeout);
+ return this;
+ }
+
@Override
public HttpServerOptions setIdleTimeoutUnit(TimeUnit idleTimeoutUnit) {
super.setIdleTimeoutUnit(idleTimeoutUnit);
diff --git a/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java b/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
@@ -25,6 +25,7 @@
import io.netty.handler.codec.http.websocketx.extensions.compression.DeflateFrameClientExtensionHandshaker;
import io.netty.handler.codec.http.websocketx.extensions.compression.PerMessageDeflateClientExtensionHandshaker;
import io.netty.handler.codec.http.websocketx.extensions.compression.PerMessageDeflateServerExtensionHandshaker;
+import io.netty.handler.timeout.IdleStateEvent;
import io.netty.util.ReferenceCountUtil;
import io.netty.util.concurrent.FutureListener;
import io.vertx.core.*;
@@ -1091,13 +1092,13 @@ protected void handleClosed() {
}
}
- protected void handleIdle() {
+ protected void handleIdle(IdleStateEvent event) {
synchronized (this) {
if (webSocket == null && responses.isEmpty() && requests.isEmpty()) {
return;
}
}
- super.handleIdle();
+ super.handleIdle(event);
}
@Override
diff --git a/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java b/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java
@@ -20,6 +20,7 @@
import io.netty.handler.codec.http2.Http2Exception;
import io.netty.handler.codec.http2.Http2Headers;
import io.netty.handler.codec.http2.Http2Stream;
+import io.netty.handler.timeout.IdleStateEvent;
import io.vertx.core.*;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.http.*;
@@ -584,9 +585,9 @@ public HttpClientConnection connection() {
}
@Override
- protected void handleIdle() {
+ protected void handleIdle(IdleStateEvent event) {
if (handler.connection().local().numActiveStreams() > 0) {
- super.handleIdle();
+ super.handleIdle(event);
}
}
diff --git a/src/main/java/io/vertx/core/http/impl/Http2ConnectionBase.java b/src/main/java/io/vertx/core/http/impl/Http2ConnectionBase.java
--- a/src/main/java/io/vertx/core/http/impl/Http2ConnectionBase.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2ConnectionBase.java
@@ -23,6 +23,7 @@
import io.netty.handler.codec.http2.Http2Headers;
import io.netty.handler.codec.http2.Http2Settings;
import io.netty.handler.codec.http2.Http2Stream;
+import io.netty.handler.timeout.IdleStateEvent;
import io.vertx.codegen.annotations.Nullable;
import io.vertx.core.AsyncResult;
import io.vertx.core.Future;
@@ -33,7 +34,6 @@
import io.vertx.core.http.GoAway;
import io.vertx.core.http.HttpConnection;
import io.vertx.core.http.StreamPriority;
-import io.vertx.core.impl.ContextInternal;
import io.vertx.core.impl.EventLoopContext;
import io.vertx.core.impl.future.PromiseInternal;
import io.vertx.core.impl.VertxInternal;
@@ -106,8 +106,8 @@ protected void handleInterestedOpsChanged() {
}
@Override
- protected void handleIdle() {
- super.handleIdle();
+ protected void handleIdle(IdleStateEvent event) {
+ super.handleIdle(event);
}
synchronized void onConnectionError(Throwable cause) {
diff --git a/src/main/java/io/vertx/core/http/impl/HttpChannelConnector.java b/src/main/java/io/vertx/core/http/impl/HttpChannelConnector.java
--- a/src/main/java/io/vertx/core/http/impl/HttpChannelConnector.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpChannelConnector.java
@@ -144,14 +144,20 @@ public void httpConnect(EventLoopContext context, Handler<AsyncResult<HttpClient
}
private void applyHttp2ConnectionOptions(ChannelPipeline pipeline) {
- if (options.getIdleTimeout() > 0) {
- pipeline.addLast("idle", new IdleStateHandler(0, 0, options.getIdleTimeout(), options.getIdleTimeoutUnit()));
+ int idleTimeout = options.getIdleTimeout();
+ int readIdleTimeout = options.getReadIdleTimeout();
+ int writeIdleTimeout = options.getWriteIdleTimeout();
+ if (idleTimeout > 0 || readIdleTimeout > 0 || writeIdleTimeout > 0) {
+ pipeline.addLast("idle", new IdleStateHandler(readIdleTimeout, writeIdleTimeout, idleTimeout, options.getIdleTimeoutUnit()));
}
}
private void applyHttp1xConnectionOptions(ChannelPipeline pipeline) {
- if (options.getIdleTimeout() > 0) {
- pipeline.addLast("idle", new IdleStateHandler(0, 0, options.getIdleTimeout(), options.getIdleTimeoutUnit()));
+ int idleTimeout = options.getIdleTimeout();
+ int readIdleTimeout = options.getReadIdleTimeout();
+ int writeIdleTimeout = options.getWriteIdleTimeout();
+ if (idleTimeout > 0 || readIdleTimeout > 0 || writeIdleTimeout > 0) {
+ pipeline.addLast("idle", new IdleStateHandler(readIdleTimeout, writeIdleTimeout, idleTimeout, options.getIdleTimeoutUnit()));
}
if (options.getLogActivity()) {
pipeline.addLast("logging", new LoggingHandler());
diff --git a/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java b/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java
--- a/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpServerWorker.java
@@ -144,8 +144,11 @@ private void configurePipeline(Channel ch) {
handleHttp1(ch);
} else {
IdleStateHandler idle;
- if (options.getIdleTimeout() > 0) {
- pipeline.addLast("idle", idle = new IdleStateHandler(0, 0, options.getIdleTimeout(), options.getIdleTimeoutUnit()));
+ int idleTimeout = options.getIdleTimeout();
+ int readIdleTimeout = options.getReadIdleTimeout();
+ int writeIdleTimeout = options.getWriteIdleTimeout();
+ if (idleTimeout > 0 || readIdleTimeout > 0 || writeIdleTimeout > 0) {
+ pipeline.addLast("idle", idle = new IdleStateHandler(readIdleTimeout, writeIdleTimeout, idleTimeout, options.getIdleTimeoutUnit()));
} else {
idle = null;
}
@@ -211,8 +214,11 @@ void configureHttp2(ChannelPipeline pipeline) {
pipeline.channel().close();
return;
}
- if (options.getIdleTimeout() > 0) {
- pipeline.addBefore("handler", "idle", new IdleStateHandler(0, 0, options.getIdleTimeout(), options.getIdleTimeoutUnit()));
+ int idleTimeout = options.getIdleTimeout();
+ int readIdleTimeout = options.getReadIdleTimeout();
+ int writeIdleTimeout = options.getWriteIdleTimeout();
+ if (idleTimeout > 0 || readIdleTimeout > 0 || writeIdleTimeout > 0) {
+ pipeline.addBefore("handler", "idle", new IdleStateHandler(readIdleTimeout, writeIdleTimeout, idleTimeout, options.getIdleTimeoutUnit()));
}
}
@@ -261,8 +267,11 @@ private void configureHttp1OrH2C(ChannelPipeline pipeline) {
// only add ChunkedWriteHandler when SSL is enabled otherwise it is not needed as FileRegion is used.
pipeline.addLast("chunkedWriter", new ChunkedWriteHandler()); // For large file / sendfile support
}
- if (options.getIdleTimeout() > 0) {
- pipeline.addLast("idle", new IdleStateHandler(0, 0, options.getIdleTimeout(), options.getIdleTimeoutUnit()));
+ int idleTimeout = options.getIdleTimeout();
+ int readIdleTimeout = options.getReadIdleTimeout();
+ int writeIdleTimeout = options.getWriteIdleTimeout();
+ if (idleTimeout > 0 || readIdleTimeout > 0 || writeIdleTimeout > 0) {
+ pipeline.addLast("idle", new IdleStateHandler(readIdleTimeout, writeIdleTimeout, idleTimeout, options.getIdleTimeoutUnit()));
}
if (disableH2C) {
configureHttp1(pipeline);
diff --git a/src/main/java/io/vertx/core/http/impl/VertxHttp2ConnectionHandler.java b/src/main/java/io/vertx/core/http/impl/VertxHttp2ConnectionHandler.java
--- a/src/main/java/io/vertx/core/http/impl/VertxHttp2ConnectionHandler.java
+++ b/src/main/java/io/vertx/core/http/impl/VertxHttp2ConnectionHandler.java
@@ -150,8 +150,8 @@ public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exc
try {
super.userEventTriggered(ctx, evt);
} finally {
- if (evt instanceof IdleStateEvent && ((IdleStateEvent) evt).state() == IdleState.ALL_IDLE) {
- connection.handleIdle();
+ if (evt instanceof IdleStateEvent) {
+ connection.handleIdle((IdleStateEvent) evt);
}
}
}
diff --git a/src/main/java/io/vertx/core/net/ClientOptionsBase.java b/src/main/java/io/vertx/core/net/ClientOptionsBase.java
--- a/src/main/java/io/vertx/core/net/ClientOptionsBase.java
+++ b/src/main/java/io/vertx/core/net/ClientOptionsBase.java
@@ -261,6 +261,16 @@ public ClientOptionsBase setIdleTimeout(int idleTimeout) {
return (ClientOptionsBase) super.setIdleTimeout(idleTimeout);
}
+ @Override
+ public ClientOptionsBase setReadIdleTimeout(int idleTimeout) {
+ return (ClientOptionsBase) super.setReadIdleTimeout(idleTimeout);
+ }
+
+ @Override
+ public ClientOptionsBase setWriteIdleTimeout(int idleTimeout) {
+ return (ClientOptionsBase) super.setWriteIdleTimeout(idleTimeout);
+ }
+
@Override
public ClientOptionsBase setIdleTimeoutUnit(TimeUnit idleTimeoutUnit) {
return (ClientOptionsBase) super.setIdleTimeoutUnit(idleTimeoutUnit);
diff --git a/src/main/java/io/vertx/core/net/NetClientOptions.java b/src/main/java/io/vertx/core/net/NetClientOptions.java
--- a/src/main/java/io/vertx/core/net/NetClientOptions.java
+++ b/src/main/java/io/vertx/core/net/NetClientOptions.java
@@ -151,6 +151,18 @@ public NetClientOptions setIdleTimeout(int idleTimeout) {
return this;
}
+ @Override
+ public NetClientOptions setReadIdleTimeout(int idleTimeout) {
+ super.setReadIdleTimeout(idleTimeout);
+ return this;
+ }
+
+ @Override
+ public NetClientOptions setWriteIdleTimeout(int idleTimeout) {
+ super.setWriteIdleTimeout(idleTimeout);
+ return this;
+ }
+
@Override
public NetClientOptions setIdleTimeoutUnit(TimeUnit idleTimeoutUnit) {
super.setIdleTimeoutUnit(idleTimeoutUnit);
diff --git a/src/main/java/io/vertx/core/net/NetServerOptions.java b/src/main/java/io/vertx/core/net/NetServerOptions.java
--- a/src/main/java/io/vertx/core/net/NetServerOptions.java
+++ b/src/main/java/io/vertx/core/net/NetServerOptions.java
@@ -181,6 +181,18 @@ public NetServerOptions setIdleTimeout(int idleTimeout) {
return this;
}
+ @Override
+ public NetServerOptions setReadIdleTimeout(int idleTimeout) {
+ super.setReadIdleTimeout(idleTimeout);
+ return this;
+ }
+
+ @Override
+ public NetServerOptions setWriteIdleTimeout(int idleTimeout) {
+ super.setWriteIdleTimeout(idleTimeout);
+ return this;
+ }
+
@Override
public NetServerOptions setIdleTimeoutUnit(TimeUnit idleTimeoutUnit) {
super.setIdleTimeoutUnit(idleTimeoutUnit);
diff --git a/src/main/java/io/vertx/core/net/TCPSSLOptions.java b/src/main/java/io/vertx/core/net/TCPSSLOptions.java
--- a/src/main/java/io/vertx/core/net/TCPSSLOptions.java
+++ b/src/main/java/io/vertx/core/net/TCPSSLOptions.java
@@ -57,6 +57,16 @@ public abstract class TCPSSLOptions extends NetworkOptions {
*/
public static final TimeUnit DEFAULT_IDLE_TIMEOUT_TIME_UNIT = TimeUnit.SECONDS;
+ /**
+ * Default read idle timeout = 0
+ */
+ public static final int DEFAULT_READ_IDLE_TIMEOUT = 0;
+
+ /**
+ * Default write idle timeout = 0
+ */
+ public static final int DEFAULT_WRITE_IDLE_TIMEOUT = 0;
+
/**
* Default use alpn = false
*/
@@ -105,6 +115,8 @@ public abstract class TCPSSLOptions extends NetworkOptions {
private boolean tcpKeepAlive;
private int soLinger;
private int idleTimeout;
+ private int readIdleTimeout;
+ private int writeIdleTimeout;
private TimeUnit idleTimeoutUnit;
private boolean ssl;
private long sslHandshakeTimeout;
@@ -141,6 +153,8 @@ public TCPSSLOptions(TCPSSLOptions other) {
this.soLinger = other.getSoLinger();
this.idleTimeout = other.getIdleTimeout();
this.idleTimeoutUnit = other.getIdleTimeoutUnit() != null ? other.getIdleTimeoutUnit() : DEFAULT_IDLE_TIMEOUT_TIME_UNIT;
+ this.readIdleTimeout = other.getReadIdleTimeout();
+ this.writeIdleTimeout = other.getWriteIdleTimeout();
this.ssl = other.isSsl();
this.sslHandshakeTimeout = other.sslHandshakeTimeout;
this.sslHandshakeTimeoutUnit = other.getSslHandshakeTimeoutUnit() != null ? other.getSslHandshakeTimeoutUnit() : DEFAULT_SSL_HANDSHAKE_TIMEOUT_TIME_UNIT;
@@ -184,6 +198,8 @@ private void init() {
tcpKeepAlive = DEFAULT_TCP_KEEP_ALIVE;
soLinger = DEFAULT_SO_LINGER;
idleTimeout = DEFAULT_IDLE_TIMEOUT;
+ readIdleTimeout = DEFAULT_READ_IDLE_TIMEOUT;
+ writeIdleTimeout = DEFAULT_WRITE_IDLE_TIMEOUT;
idleTimeoutUnit = DEFAULT_IDLE_TIMEOUT_TIME_UNIT;
ssl = DEFAULT_SSL;
sslHandshakeTimeout = DEFAULT_SSL_HANDSHAKE_TIMEOUT;
@@ -259,11 +275,11 @@ public TCPSSLOptions setSoLinger(int soLinger) {
/**
* Set the idle timeout, default time unit is seconds. Zero means don't timeout.
- * This determines if a connection will timeout and be closed if no data is received within the timeout.
+ * This determines if a connection will timeout and be closed if no data is received nor sent within the timeout.
*
* If you want change default time unit, use {@link #setIdleTimeoutUnit(TimeUnit)}
*
- * @param idleTimeout the timeout, in seconds
+ * @param idleTimeout the timeout
* @return a reference to this, so the API can be used fluently
*/
public TCPSSLOptions setIdleTimeout(int idleTimeout) {
@@ -281,6 +297,54 @@ public int getIdleTimeout() {
return idleTimeout;
}
+ /**
+ * Set the read idle timeout, default time unit is seconds. Zero means don't timeout.
+ * This determines if a connection will timeout and be closed if no data is received within the timeout.
+ *
+ * If you want change default time unit, use {@link #setIdleTimeoutUnit(TimeUnit)}
+ *
+ * @param idleTimeout the read timeout
+ * @return a reference to this, so the API can be used fluently
+ */
+ public TCPSSLOptions setReadIdleTimeout(int idleTimeout) {
+ if (idleTimeout < 0) {
+ throw new IllegalArgumentException("readIdleTimeout must be >= 0");
+ }
+ this.readIdleTimeout = idleTimeout;
+ return this;
+ }
+
+ /**
+ * @return the read idle timeout, in time unit specified by {@link #getIdleTimeoutUnit()}.
+ */
+ public int getReadIdleTimeout() {
+ return readIdleTimeout;
+ }
+
+ /**
+ * Set the write idle timeout, default time unit is seconds. Zero means don't timeout.
+ * This determines if a connection will timeout and be closed if no data is sent within the timeout.
+ *
+ * If you want change default time unit, use {@link #setIdleTimeoutUnit(TimeUnit)}
+ *
+ * @param idleTimeout the write timeout
+ * @return a reference to this, so the API can be used fluently
+ */
+ public TCPSSLOptions setWriteIdleTimeout(int idleTimeout) {
+ if (idleTimeout < 0) {
+ throw new IllegalArgumentException("writeIdleTimeout must be >= 0");
+ }
+ this.writeIdleTimeout = idleTimeout;
+ return this;
+ }
+
+ /**
+ * @return the write idle timeout, in time unit specified by {@link #getIdleTimeoutUnit()}.
+ */
+ public int getWriteIdleTimeout() {
+ return writeIdleTimeout;
+ }
+
/**
* Set the idle timeout unit. If not specified, default is seconds.
*
diff --git a/src/main/java/io/vertx/core/net/impl/ConnectionBase.java b/src/main/java/io/vertx/core/net/impl/ConnectionBase.java
--- a/src/main/java/io/vertx/core/net/impl/ConnectionBase.java
+++ b/src/main/java/io/vertx/core/net/impl/ConnectionBase.java
@@ -15,6 +15,7 @@
import io.netty.channel.*;
import io.netty.handler.ssl.SslHandler;
import io.netty.handler.stream.ChunkedFile;
+import io.netty.handler.timeout.IdleStateEvent;
import io.netty.util.AttributeKey;
import io.netty.util.ReferenceCountUtil;
import io.netty.util.concurrent.EventExecutor;
@@ -396,8 +397,9 @@ private void checkCloseHandler(AsyncResult<Void> ar) {
* <p/>
* Subclasses can override it to prevent the idle event to happen (e.g when the connection is pooled) or
* perform extra work when the idle event happens.
+ * @param event
*/
- protected void handleIdle() {
+ protected void handleIdle(IdleStateEvent event) {
chctx.close();
}
diff --git a/src/main/java/io/vertx/core/net/impl/NetClientImpl.java b/src/main/java/io/vertx/core/net/impl/NetClientImpl.java
--- a/src/main/java/io/vertx/core/net/impl/NetClientImpl.java
+++ b/src/main/java/io/vertx/core/net/impl/NetClientImpl.java
@@ -58,6 +58,8 @@ public class NetClientImpl implements MetricsProvider, NetClient, Closeable {
private static final Logger log = LoggerFactory.getLogger(NetClientImpl.class);
protected final int idleTimeout;
+ protected final int readIdleTimeout;
+ protected final int writeIdleTimeout;
private final TimeUnit idleTimeoutUnit;
protected final boolean logEnabled;
@@ -77,6 +79,8 @@ public NetClientImpl(VertxInternal vertx, NetClientOptions options, CloseFuture
this.metrics = vertx.metricsSPI() != null ? vertx.metricsSPI().createNetClientMetrics(options) : null;
this.logEnabled = options.getLogActivity();
this.idleTimeout = options.getIdleTimeout();
+ this.readIdleTimeout = options.getReadIdleTimeout();
+ this.writeIdleTimeout = options.getWriteIdleTimeout();
this.idleTimeoutUnit = options.getIdleTimeoutUnit();
this.closeFuture = closeFuture;
this.proxyFilter = options.getNonProxyHosts() != null ? ProxyFilter.nonProxyHosts(options.getNonProxyHosts()) : ProxyFilter.DEFAULT_PROXY_FILTER;
@@ -92,8 +96,8 @@ protected void initChannel(ChannelPipeline pipeline) {
// only add ChunkedWriteHandler when SSL is enabled otherwise it is not needed as FileRegion is used.
pipeline.addLast("chunkedWriter", new ChunkedWriteHandler()); // For large file / sendfile support
}
- if (idleTimeout > 0) {
- pipeline.addLast("idle", new IdleStateHandler(0, 0, idleTimeout, idleTimeoutUnit));
+ if (idleTimeout > 0 || readIdleTimeout > 0 || writeIdleTimeout > 0) {
+ pipeline.addLast("idle", new IdleStateHandler(readIdleTimeout, writeIdleTimeout, idleTimeout, idleTimeoutUnit));
}
}
diff --git a/src/main/java/io/vertx/core/net/impl/NetServerImpl.java b/src/main/java/io/vertx/core/net/impl/NetServerImpl.java
--- a/src/main/java/io/vertx/core/net/impl/NetServerImpl.java
+++ b/src/main/java/io/vertx/core/net/impl/NetServerImpl.java
@@ -112,8 +112,11 @@ protected void initChannel(ChannelPipeline pipeline) {
// only add ChunkedWriteHandler when SSL is enabled otherwise it is not needed as FileRegion is used.
pipeline.addLast("chunkedWriter", new ChunkedWriteHandler()); // For large file / sendfile support
}
- if (options.getIdleTimeout() > 0) {
- pipeline.addLast("idle", new IdleStateHandler(0, 0, options.getIdleTimeout(), options.getIdleTimeoutUnit()));
+ int idleTimeout = options.getIdleTimeout();
+ int readIdleTimeout = options.getReadIdleTimeout();
+ int writeIdleTimeout = options.getWriteIdleTimeout();
+ if (idleTimeout > 0 || readIdleTimeout > 0 || writeIdleTimeout > 0) {
+ pipeline.addLast("idle", new IdleStateHandler(readIdleTimeout, writeIdleTimeout, idleTimeout, options.getIdleTimeoutUnit()));
}
}
diff --git a/src/main/java/io/vertx/core/net/impl/VertxHandler.java b/src/main/java/io/vertx/core/net/impl/VertxHandler.java
--- a/src/main/java/io/vertx/core/net/impl/VertxHandler.java
+++ b/src/main/java/io/vertx/core/net/impl/VertxHandler.java
@@ -161,8 +161,8 @@ public void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exce
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
- if (evt instanceof IdleStateEvent && ((IdleStateEvent) evt).state() == IdleState.ALL_IDLE) {
- conn.handleIdle();
+ if (evt instanceof IdleStateEvent) {
+ conn.handleIdle((IdleStateEvent) evt);
} else {
ctx.fireUserEventTriggered(evt);
}
| diff --git a/src/test/java/io/vertx/core/net/NetTest.java b/src/test/java/io/vertx/core/net/NetTest.java
--- a/src/test/java/io/vertx/core/net/NetTest.java
+++ b/src/test/java/io/vertx/core/net/NetTest.java
@@ -1169,40 +1169,96 @@ public void testReconnectAttemptsNotEnough() {
}
@Test
- public void testServerIdleTimeout() {
- server.close();
- NetServerOptions netServerOptions = new NetServerOptions();
- netServerOptions.setIdleTimeout(1000);
- netServerOptions.setIdleTimeoutUnit(TimeUnit.MILLISECONDS);
- server = vertx.createNetServer(netServerOptions);
- server.connectHandler(s -> {}).listen(testAddress, ar -> {
- assertTrue(ar.succeeded());
- client.connect(testAddress, res -> {
- assertTrue(res.succeeded());
- NetSocket socket = res.result();
- socket.closeHandler(v -> testComplete());
- });
- });
- await();
+ public void testServerIdleTimeout1() {
+ testTimeout(new NetClientOptions(), new NetServerOptions().setIdleTimeout(500).setIdleTimeoutUnit(TimeUnit.MILLISECONDS), received -> assertEquals("0123456789", received.toString()), true);
}
@Test
- public void testClientIdleTimeout() {
- client.close();
- NetClientOptions netClientOptions = new NetClientOptions();
- netClientOptions.setIdleTimeout(1000);
- netClientOptions.setIdleTimeoutUnit(TimeUnit.MILLISECONDS);
- client = vertx.createNetClient(netClientOptions);
+ public void testServerIdleTimeout2() {
+ testTimeout(new NetClientOptions(), new NetServerOptions().setReadIdleTimeout(500).setIdleTimeoutUnit(TimeUnit.MILLISECONDS), received -> assertEquals("0123456789", received.toString()), true);
+ }
- server.connectHandler(s -> {
- }).listen(testAddress, ar -> {
- assertTrue(ar.succeeded());
- client.connect(testAddress, res -> {
- assertTrue(res.succeeded());
- NetSocket socket = res.result();
- socket.closeHandler(v -> testComplete());
+ @Test
+ public void testServerIdleTimeout3() {
+ // Usually 012 but might be 01 or 0123
+ testTimeout(new NetClientOptions(), new NetServerOptions().setWriteIdleTimeout(500).setIdleTimeoutUnit(TimeUnit.MILLISECONDS), received -> assertFalse("0123456789".equals(received.toString())), true);
+ }
+
+ @Test
+ public void testServerIdleTimeout4() {
+ testTimeout(new NetClientOptions(), new NetServerOptions().setIdleTimeout(500).setIdleTimeoutUnit(TimeUnit.MILLISECONDS), received -> assertEquals("0123456789", received.toString()), false);
+ }
+
+ @Test
+ public void testServerIdleTimeout5() {
+ // Usually 012 but might be 01 or 0123
+ testTimeout(new NetClientOptions(), new NetServerOptions().setReadIdleTimeout(500).setIdleTimeoutUnit(TimeUnit.MILLISECONDS), received -> assertFalse("0123456789".equals(received.toString())), false);
+ }
+
+ @Test
+ public void testServerIdleTimeout6() {
+ testTimeout(new NetClientOptions(), new NetServerOptions().setWriteIdleTimeout(500).setIdleTimeoutUnit(TimeUnit.MILLISECONDS), received -> assertEquals("0123456789", received.toString()), false);
+ }
+
+ @Test
+ public void testClientIdleTimeout1() {
+ testTimeout(new NetClientOptions().setIdleTimeout(500).setIdleTimeoutUnit(TimeUnit.MILLISECONDS), new NetServerOptions(), received -> assertEquals("0123456789", received.toString()), true);
+ }
+
+ @Test
+ public void testClientIdleTimeout2() {
+ testTimeout(new NetClientOptions().setWriteIdleTimeout(500).setIdleTimeoutUnit(TimeUnit.MILLISECONDS), new NetServerOptions(), received -> assertEquals("0123456789", received.toString()), true);
+ }
+
+ @Test
+ public void testClientIdleTimeout3() {
+ // Usually 012 but might be 01 or 0123
+ testTimeout(new NetClientOptions().setReadIdleTimeout(500).setIdleTimeoutUnit(TimeUnit.MILLISECONDS), new NetServerOptions(), received -> assertFalse("0123456789".equals(received.toString())), true);
+ }
+
+ @Test
+ public void testClientIdleTimeout4() {
+ testTimeout(new NetClientOptions().setIdleTimeout(500).setIdleTimeoutUnit(TimeUnit.MILLISECONDS), new NetServerOptions(), received -> assertEquals("0123456789", received.toString()), false);
+ }
+
+ @Test
+ public void testClientIdleTimeout5() {
+ // Usually 012 but might be 01 or 0123
+ testTimeout(new NetClientOptions().setWriteIdleTimeout(500).setIdleTimeoutUnit(TimeUnit.MILLISECONDS), new NetServerOptions(), received -> assertFalse("0123456789".equals(received.toString())), false);
+ }
+
+ @Test
+ public void testClientIdleTimeout6() {
+ testTimeout(new NetClientOptions().setReadIdleTimeout(500).setIdleTimeoutUnit(TimeUnit.MILLISECONDS), new NetServerOptions(), received -> assertEquals("0123456789", received.toString()), false);
+ }
+
+ private void testTimeout(NetClientOptions clientOptions, NetServerOptions serverOptions, Consumer<Buffer> check, boolean clientSends) {
+ server.close();
+ server = vertx.createNetServer(serverOptions);
+ client.close();
+ client = vertx.createNetClient(clientOptions);
+ Buffer received = Buffer.buffer();
+ Handler<NetSocket> receiver = s -> s.handler(received::appendBuffer);
+ Handler<NetSocket> sender = socket -> {
+ AtomicInteger times = new AtomicInteger();
+ vertx.setPeriodic(100, id -> {
+ int val = times.getAndIncrement();
+ if (val < 10) {
+ socket.write("" + val);
+ } else {
+ vertx.cancelTimer(id);
+ }
});
- });
+ socket.closeHandler(v -> {
+ check.accept(received);
+ testComplete();
+ });
+ };
+ Handler<NetSocket> clientHandler = clientSends ? sender : receiver;
+ Handler<NetSocket> serverHandler = clientSends ? receiver : sender;
+ server.connectHandler(serverHandler).listen(testAddress, onSuccess(s -> {
+ client.connect(testAddress, onSuccess(clientHandler::handle));
+ }));
await();
}
@@ -4117,4 +4173,5 @@ public void testConnectTimeout() {
testComplete();
}));
await();
- }}
+ }
+}
| Provide read/write idle timeout granularity for TCP options
| 2021-10-12T19:37:41Z | 4.2 |
|
eclipse-vertx/vert.x | 4,080 | eclipse-vertx__vert.x-4080 | [
"4069"
] | 3789ea19c7be717f7a744a6fedb27455cfe1cd8e | diff --git a/src/main/java/io/vertx/core/impl/VertxImpl.java b/src/main/java/io/vertx/core/impl/VertxImpl.java
--- a/src/main/java/io/vertx/core/impl/VertxImpl.java
+++ b/src/main/java/io/vertx/core/impl/VertxImpl.java
@@ -1079,11 +1079,12 @@ class SharedWorkerPool extends WorkerPool {
@Override
void close() {
synchronized (VertxImpl.this) {
- if (--refCount == 0) {
- namedWorkerPools.remove(name);
- super.close();
+ if (--refCount > 0) {
+ return;
}
+ namedWorkerPools.remove(name);
}
+ super.close();
}
}
diff --git a/src/main/java/io/vertx/core/impl/WorkerExecutorImpl.java b/src/main/java/io/vertx/core/impl/WorkerExecutorImpl.java
--- a/src/main/java/io/vertx/core/impl/WorkerExecutorImpl.java
+++ b/src/main/java/io/vertx/core/impl/WorkerExecutorImpl.java
@@ -86,12 +86,14 @@ public void close(Handler<AsyncResult<Void>> handler) {
@Override
public void close(Promise<Void> completion) {
+ boolean close;
synchronized (this) {
- if (!closed) {
- closed = true;
- closeFuture.remove(this);
- pool.close();
- }
+ close = !closed;
+ closed = true;
+ }
+ if (close) {
+ closeFuture.remove(this);
+ pool.close();
}
completion.complete();
}
| diff --git a/src/test/java/io/vertx/core/VertxTest.java b/src/test/java/io/vertx/core/VertxTest.java
--- a/src/test/java/io/vertx/core/VertxTest.java
+++ b/src/test/java/io/vertx/core/VertxTest.java
@@ -21,6 +21,9 @@
import io.vertx.core.net.NetClientOptions;
import io.vertx.core.net.NetSocket;
import io.vertx.test.core.AsyncTestBase;
+import io.vertx.test.core.Repeat;
+import io.vertx.test.core.RepeatRule;
+import org.junit.Rule;
import org.junit.Test;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.options.OptionsBuilder;
@@ -41,6 +44,9 @@ public class VertxTest extends AsyncTestBase {
private final org.openjdk.jmh.runner.Runner RUNNER = new Runner(new OptionsBuilder().shouldDoGC(true).build());
+ @Rule
+ public RepeatRule repeatRule = new RepeatRule();
+
@Test
public void testCloseHooksCalled() {
AtomicInteger closedCount = new AtomicInteger();
@@ -318,4 +324,21 @@ private void testTCCL(boolean disable) {
});
await();
}
+
+ @Repeat(times = 100)
+ @Test
+ public void testWorkerExecutorConcurrentCloseWithVertx() throws InterruptedException {
+ Vertx vertx = Vertx.vertx();
+ try {
+ CountDownLatch latch = new CountDownLatch(1);
+ WorkerExecutor workerExecutor = vertx.createSharedWorkerExecutor("test");
+ vertx.runOnContext(v -> {
+ latch.countDown();
+ workerExecutor.close();
+ });
+ latch.await();
+ } finally {
+ vertx.close();
+ }
+ }
}
| Deadlock when closing Vertx and WorkerExecutor concurrently
I met a deadlock issue recently on Vertx version 4.1.1 and after investigated and looked into the VertxImpl, CloseFuture, WorkerExecutorImpl, it looks like the deadlock happened in Vertx, please take a look:
In my case, the deadlock issue is hit when the `vertx.close` and `workerExecutor.close` happened to be called at same time, one from main thread and another from eventloop thread. (the `workerExecutor` is created by the `vertx.createSharedWorkerExecutor`)
Backtrace printed by jstack:
```
Found one Java-level deadlock:
=============================
"main":
waiting to lock monitor 0x00007fa958116580 (object 0x00000000b3329eb8, a io.vertx.core.impl.WorkerExecutorImpl),
which is held by "vert.x-eventloop-thread-0"
"vert.x-eventloop-thread-0":
waiting to lock monitor 0x00007fa89c01ae80 (object 0x00000000b3329f58, a io.vertx.core.impl.VertxImpl),
which is held by "main"
Java stack information for the threads listed above:
===================================================
"main":
at io.vertx.core.impl.WorkerExecutorImpl.close(WorkerExecutorImpl.java:90)
- waiting to lock <0x00000000b3329eb8> (a io.vertx.core.impl.WorkerExecutorImpl)
at io.vertx.core.impl.CloseFuture.close(CloseFuture.java:117)
at io.vertx.core.impl.VertxImpl.close(VertxImpl.java:578)
- locked <0x00000000b3329f58> (a io.vertx.core.impl.VertxImpl)
at io.vertx.reactivex.core.Vertx.close(Vertx.java:433)
...
"vert.x-eventloop-thread-0":
at io.vertx.core.impl.VertxImpl$SharedWorkerPool.close(VertxImpl.java:1080)
- waiting to lock <0x00000000b3329f58> (a io.vertx.core.impl.VertxImpl)
at io.vertx.core.impl.WorkerExecutorImpl.close(WorkerExecutorImpl.java:93)
- locked <0x00000000b3329eb8> (a io.vertx.core.impl.WorkerExecutorImpl)
at io.vertx.core.impl.WorkerExecutorImpl.close(WorkerExecutorImpl.java:78)
at io.vertx.core.impl.WorkerExecutorImpl.close(WorkerExecutorImpl.java:84)
at io.vertx.reactivex.core.WorkerExecutor.close(WorkerExecutor.java:207)
at io.vertx.reactivex.core.WorkerExecutor.close(WorkerExecutor.java:214)
...
Found 1 deadlock.
```
Checked vertx 4.1.1 code and find:
When closing WorkerExecutor, it will do:
1. Lock self (WorkerExecutorImpl)
2. Remove self from CloseFuture
3. Close SharedWorkerPool, which require lock Vertx
When closing Vertx, it will do:
1. Lock self (VertxImpl)
2. Close CloseFuture, which synchronized copy the list of callbacks and invoke them outside of the synchronized block, one of the callback is to close the same WorkerExecutorImpl instance as above
So, when the copy of callbacks in CloseFuture happened before removing the WorkerExecutorImpl from CloseFuture, it will still be called and cause deadlock
Time | Main thread | Eventloop thread
-- | -- | --
T1 | Start close Vertx, locked VertxImpl instance |
T2 | | Start close WorkerExecutor, locked WorkerExecutorImpl instance
T3 | In CloseFuture, the list of callbacks are copied |
T4 | | Remove WorkerExecutorImpl instance from CloseFuture. But since the callback is already copied in T3, this doesn't prevent the callback from being called
T5 | The callback of close WorkerExecutorImpl instance is called. This require lock on the WorkerExecutorImpl instance, which was acquired by eventloop thread at T2. So it is waiting for the lock to be released by eventloop thread |
T6 | | Close SharedWorkerPool, and this require lock on the VertxImpl instance, which was acquired by main thread at T1. So it is waiting for the lock to be released by main thread
| Deadlock | Deadlock
The issue seems still exist in 4.1.2
| thanks, do you have a reproducer ?
Hi @vietj ,
I tried write a test case to illustrate the issue, the code loops 100 times but it usually hit the issue in less than 10 iterations on my environment:
```java
@Test
public void testVertx() throws InterruptedException {
Logger LOG = LogManager.getLogger();
for (int i = 0; i < 100; i++) {
LOG.debug("Cur iteration: {}", i);
Vertx vertx = Vertx.vertx();
CountDownLatch latch = new CountDownLatch(1);
WorkerExecutor workerExecutor = vertx.createSharedWorkerExecutor("test");
workerExecutor.rxExecuteBlocking(handler -> handler.complete("foo"))
.doFinally(workerExecutor::close)
.subscribe(s -> {
LOG.debug("Received {}", s);
latch.countDown();
});
LOG.debug("Before await");
latch.await();
LOG.debug("After await");
vertx.close();
}
}
```
thanks for the reproducer
NOTE: here is a simpler (without RxJava) reproducer
```java
@Repeat(times = 100)
@Test
public void testWorkerExecutorConcurrentCloseWithVertx() throws InterruptedException {
Vertx vertx = Vertx.vertx();
try {
CountDownLatch latch = new CountDownLatch(1);
WorkerExecutor workerExecutor = vertx.createSharedWorkerExecutor("test");
vertx.runOnContext(v -> {
latch.countDown();
workerExecutor.close();
});
latch.await();
} finally {
vertx.close();
}
}
``` | 2021-08-25T13:22:30Z | 4.1 |
eclipse-vertx/vert.x | 4,125 | eclipse-vertx__vert.x-4125 | [
"4124"
] | 8c7795a4d89b1381334e77136e95fc8263bcf9a0 | diff --git a/src/main/java/io/vertx/core/http/impl/HttpServerFileUploadImpl.java b/src/main/java/io/vertx/core/http/impl/HttpServerFileUploadImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpServerFileUploadImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpServerFileUploadImpl.java
@@ -76,7 +76,9 @@ private void handleData(Buffer data) {
Handler<Buffer> handler;
synchronized (HttpServerFileUploadImpl.this) {
handler = dataHandler;
- size += data.length();
+ if (lazyCalculateSize) {
+ size += data.length();
+ }
}
if (handler != null) {
context.dispatch(data, handler);
| diff --git a/src/test/java/io/vertx/core/http/HttpServerFileUploadTest.java b/src/test/java/io/vertx/core/http/HttpServerFileUploadTest.java
--- a/src/test/java/io/vertx/core/http/HttpServerFileUploadTest.java
+++ b/src/test/java/io/vertx/core/http/HttpServerFileUploadTest.java
@@ -10,7 +10,6 @@
*/
package io.vertx.core.http;
-import io.netty.channel.ChannelFuture;
import io.netty.handler.codec.DecoderException;
import io.vertx.core.Context;
import io.vertx.core.Future;
@@ -59,116 +58,137 @@ public void setUp() throws Exception {
@Test
public void testFormUploadEmptyFile() {
- testFormUploadFile("", false, false, false);
+ testFormUploadFile("", false, false, false, false);
+ }
+
+ @Test
+ public void testFormUploadEmptyFileWithContentLength() {
+ testFormUploadFile("", true, false, false, false);
}
@Test
public void testFormUploadSmallFile() {
- testFormUploadFile(TestUtils.randomAlphaString(100), false, false, false);
+ testFormUploadFile(TestUtils.randomAlphaString(100), false, false, false, false);
+ }
+
+ @Test
+ public void testFormUploadSmallFileWithContentLength() {
+ testFormUploadFile(TestUtils.randomAlphaString(100), true, false, false, false);
}
@Test
public void testFormUploadMediumFile() {
- testFormUploadFile(TestUtils.randomAlphaString(20000), false, false, false);
+ testFormUploadFile(TestUtils.randomAlphaString(20000), false, false, false, false);
+ }
+
+ @Test
+ public void testFormUploadMediumFileWithContentLength() {
+ testFormUploadFile(TestUtils.randomAlphaString(20000), true, false, false, false);
}
@Test
public void testFormUploadLargeFile() {
- testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), false, false, false);
+ testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), false, false, false, false);
+ }
+
+ @Test
+ public void testFormUploadLargeFileWithContentLength() {
+ testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), true, false, false, false);
}
@Test
public void testFormUploadEmptyFileStreamToDisk() {
- testFormUploadFile("", true, false, false);
+ testFormUploadFile("", false, true, false, false);
}
@Test
public void testFormUploadSmallFileStreamToDisk() {
- testFormUploadFile(TestUtils.randomAlphaString(100), true, false, false);
+ testFormUploadFile(TestUtils.randomAlphaString(100), false, true, false, false);
}
@Test
public void testFormUploadMediumFileStreamToDisk() {
- testFormUploadFile(TestUtils.randomAlphaString(20 * 1024), true, false, false);
+ testFormUploadFile(TestUtils.randomAlphaString(20 * 1024), false, true, false, false);
}
@Test
public void testFormUploadLargeFileStreamToDisk() {
- testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), true, false, false);
+ testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), false, true, false, false);
}
@Test
public void testFormUploadWithExtFilename() {
- testFormUploadFile(null, "%c2%a3%20and%20%e2%82%ac%20rates", "the-content", true, false, false);
+ testFormUploadFile(null, "%c2%a3%20and%20%e2%82%ac%20rates", "the-content", false, true, false, false);
}
@Test
public void testBrokenFormUploadEmptyFile() {
- testFormUploadFile("", true, true, false);
+ testFormUploadFile("", false, true, true, false);
}
@Test
public void testBrokenFormUploadSmallFile() {
- testFormUploadFile(TestUtils.randomAlphaString(100), true, true, false);
+ testFormUploadFile(TestUtils.randomAlphaString(100), false, true, true, false);
}
@Test
public void testBrokenFormUploadMediumFile() {
- testFormUploadFile(TestUtils.randomAlphaString(20 * 1024), true, true, false);
+ testFormUploadFile(TestUtils.randomAlphaString(20 * 1024), false, true, true, false);
}
@Test
public void testBrokenFormUploadLargeFile() {
- testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), true, true, false);
+ testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), false, true, true, false);
}
@Test
public void testBrokenFormUploadEmptyFileStreamToDisk() {
- testFormUploadFile("", true, true, false);
+ testFormUploadFile("", false, true, true, false);
}
@Test
public void testBrokenFormUploadSmallFileStreamToDisk() {
- testFormUploadFile(TestUtils.randomAlphaString(100), true, true, false);
+ testFormUploadFile(TestUtils.randomAlphaString(100), false, true, true, false);
}
@Test
public void testBrokenFormUploadMediumFileStreamToDisk() {
- testFormUploadFile(TestUtils.randomAlphaString(20 * 1024), true, true, false);
+ testFormUploadFile(TestUtils.randomAlphaString(20 * 1024), false, true, true, false);
}
@Test
public void testBrokenFormUploadLargeFileStreamToDisk() {
- testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), true, true, false);
+ testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), false, true, true, false);
}
@Test
public void testCancelFormUploadEmptyFileStreamToDisk() {
- testFormUploadFile("", true, false, true);
+ testFormUploadFile("", false, true, false, true);
}
@Test
public void testCancelFormUploadSmallFileStreamToDisk() {
- testFormUploadFile(TestUtils.randomAlphaString(100), true, false, true);
+ testFormUploadFile(TestUtils.randomAlphaString(100), false, true, false, true);
}
@Test
public void testCancelFormUploadMediumFileStreamToDisk() {
- testFormUploadFile(TestUtils.randomAlphaString(20 * 1024), true, false, true);
+ testFormUploadFile(TestUtils.randomAlphaString(20 * 1024), false, true, false, true);
}
@Test
public void testCancelFormUploadLargeFileStreamToDisk() {
- testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), true, false, true);
+ testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), false, true, false, true);
}
- private void testFormUploadFile(String contentStr, boolean streamToDisk, boolean abortClient, boolean cancelStream) {
- testFormUploadFile("tmp-0.txt", "tmp-0.txt", contentStr, streamToDisk, abortClient, cancelStream);
+ private void testFormUploadFile(String contentStr, boolean includeLength, boolean streamToDisk, boolean abortClient, boolean cancelStream) {
+ testFormUploadFile("tmp-0.txt", "tmp-0.txt", contentStr, includeLength, streamToDisk, abortClient, cancelStream);
}
private void testFormUploadFile(String filename,
String extFilename,
String contentStr,
+ boolean includeLength,
boolean streamToDisk,
boolean abortClient,
boolean cancelStream) {
@@ -307,6 +327,7 @@ private void testFormUploadFile(String filename,
String pro = "--" + boundary + "\r\n" +
"Content-Disposition: form-data; name=\"file\"" + (filename == null ? "" : "; filename=\"" + filename + "\"" ) + (extFilename == null ? "" : "; filename*=\"UTF-8''" + extFilename) + "\"\r\n" +
"Content-Type: image/gif\r\n" +
+ (includeLength ? "Content-Length: " + contentStr.length() + "\r\n" : "") +
"\r\n";
req.headers().set("content-length", "" + (pro + contentStr + epi).length());
req.headers().set("content-type", "multipart/form-data; boundary=" + boundary);
| Server File Upload counts the upload twice if client provides content length header
Source: https://github.com/vert-x3/vertx-web/issues/1928
| 2021-10-08T09:36:37Z | 4.2 |
|
eclipse-vertx/vert.x | 4,053 | eclipse-vertx__vert.x-4053 | [
"3436"
] | 0eb1310277d09908d4ce4025db16f9d39c6431db | diff --git a/src/main/java/io/vertx/core/datagram/impl/DatagramSocketImpl.java b/src/main/java/io/vertx/core/datagram/impl/DatagramSocketImpl.java
--- a/src/main/java/io/vertx/core/datagram/impl/DatagramSocketImpl.java
+++ b/src/main/java/io/vertx/core/datagram/impl/DatagramSocketImpl.java
@@ -25,6 +25,7 @@
import io.vertx.core.AsyncResult;
import io.vertx.core.Future;
import io.vertx.core.Handler;
+import io.vertx.core.VertxException;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.datagram.DatagramSocket;
import io.vertx.core.datagram.DatagramSocketOptions;
@@ -43,6 +44,7 @@
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.NetworkInterface;
+import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.Objects;
@@ -98,11 +100,32 @@ public DatagramSocket listenMulticastGroup(String multicastAddress, Handler<Asyn
return this;
}
+ private NetworkInterface determineMulticastNetworkIface() throws Exception {
+ NetworkInterface iface = null;
+ InetSocketAddress localAddr = channel.localAddress();
+ if (localAddr != null) {
+ iface = NetworkInterface.getByInetAddress(localAddr.getAddress());
+ }
+ if (iface == null) {
+ iface = channel.config().getNetworkInterface();
+ }
+ return iface;
+ }
+
@Override
public Future<Void> listenMulticastGroup(String multicastAddress) {
+ NetworkInterface iface;
+ try {
+ iface = determineMulticastNetworkIface();
+ } catch (Exception e) {
+ return context.failedFuture(e);
+ }
+ if (iface == null) {
+ return context.failedFuture("A valid network interface could not be determined from the socket bind address or multicast interface");
+ }
ChannelFuture fut;
try {
- fut = channel.joinGroup(InetAddress.getByName(multicastAddress));
+ fut = channel.joinGroup(InetAddress.getByName(multicastAddress), iface, null);
} catch (UnknownHostException e) {
return context.failedFuture(e);
}
@@ -150,9 +173,18 @@ public DatagramSocket unlistenMulticastGroup(String multicastAddress, Handler<As
@Override
public Future<Void> unlistenMulticastGroup(String multicastAddress) {
+ NetworkInterface iface;
+ try {
+ iface = determineMulticastNetworkIface();
+ } catch (Exception e) {
+ return context.failedFuture(e);
+ }
+ if (iface == null) {
+ return context.failedFuture("A valid network interface could not be determined from the socket bind address or multicast interface");
+ }
ChannelFuture fut;
try {
- fut = channel.leaveGroup(InetAddress.getByName(multicastAddress));
+ fut = channel.leaveGroup(InetAddress.getByName(multicastAddress), iface, null);
} catch (Exception e) {
return context.failedFuture(e);
}
| diff --git a/src/test/java/io/vertx/core/datagram/DatagramTest.java b/src/test/java/io/vertx/core/datagram/DatagramTest.java
--- a/src/test/java/io/vertx/core/datagram/DatagramTest.java
+++ b/src/test/java/io/vertx/core/datagram/DatagramTest.java
@@ -13,19 +13,21 @@
import io.netty.buffer.ByteBuf;
import io.netty.buffer.UnpooledHeapByteBuf;
import io.vertx.core.AbstractVerticle;
+import io.vertx.core.AsyncResult;
import io.vertx.core.Context;
import io.vertx.core.DeploymentOptions;
+import io.vertx.core.Handler;
import io.vertx.core.Promise;
import io.vertx.core.Vertx;
import io.vertx.core.buffer.Buffer;
-import io.vertx.core.datagram.DatagramSocket;
-import io.vertx.core.datagram.DatagramSocketOptions;
+import io.vertx.core.impl.Utils;
import io.vertx.core.json.JsonObject;
import io.vertx.core.net.NetworkOptions;
import io.vertx.core.streams.WriteStream;
import io.vertx.test.core.TestUtils;
import io.vertx.test.core.VertxTestBase;
import io.vertx.test.netty.TestLoggerFactory;
+import org.junit.Assume;
import org.junit.Test;
import java.net.InetAddress;
@@ -35,6 +37,7 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.BiConsumer;
import static io.vertx.test.core.TestUtils.*;
@@ -357,38 +360,68 @@ public void testPause() {
@Test
public void testMulticastJoinLeave() throws Exception {
+ String iface = NetworkInterface.getByInetAddress(InetAddress.getByName("127.0.0.1")).getName();
+ testMulticastJoinLeave("0.0.0.0", new DatagramSocketOptions(), new DatagramSocketOptions().setMulticastNetworkInterface(iface), (groupAddress, handler) -> {
+ peer1.listenMulticastGroup(groupAddress, iface, null, handler);
+ }, (groupAddress, handler) -> {
+ peer1.unlistenMulticastGroup(groupAddress, iface, null, handler);
+ });
+ }
+
+ @Test
+ public void testMulticastJoinLeaveReuseMulticastNetworkInterface() throws Exception {
+ String iface = NetworkInterface.getByInetAddress(InetAddress.getByName("127.0.0.1")).getName();
+ DatagramSocketOptions options = new DatagramSocketOptions().setMulticastNetworkInterface(iface);
+ testMulticastJoinLeave("0.0.0.0", options, options, (groupAddress, handler) -> {
+ peer1.listenMulticastGroup(groupAddress, handler);
+ }, (groupAddress, handler) -> {
+ peer1.unlistenMulticastGroup(groupAddress, handler);
+ });
+ }
+
+ @Test
+ public void testMulticastJoinLeaveBindOnMulticastGroup() throws Exception {
+ Assume.assumeFalse(Utils.isWindows());
+ String iface = NetworkInterface.getByInetAddress(InetAddress.getByName("127.0.0.1")).getName();
+ DatagramSocketOptions options = new DatagramSocketOptions().setMulticastNetworkInterface(iface);
+ testMulticastJoinLeave("230.0.0.1", options, options, (groupAddress, handler) -> {
+ peer1.listenMulticastGroup(groupAddress, handler);
+ }, (groupAddress, handler) -> {
+ peer1.unlistenMulticastGroup(groupAddress, handler);
+ });
+ }
+
+ private void testMulticastJoinLeave(String bindAddress,
+ DatagramSocketOptions options1,
+ DatagramSocketOptions options2,
+ BiConsumer<String, Handler<AsyncResult<Void>>> join,
+ BiConsumer<String, Handler<AsyncResult<Void>>> leave) {
if (USE_NATIVE_TRANSPORT) {
return;
}
- Buffer buffer = TestUtils.randomBuffer(128);
+ Buffer buffer = Buffer.buffer("HELLO");
String groupAddress = "230.0.0.1";
- String iface = NetworkInterface.getByInetAddress(InetAddress.getByName("127.0.0.1")).getName();
AtomicBoolean received = new AtomicBoolean();
- peer1 = vertx.createDatagramSocket(new DatagramSocketOptions().setMulticastNetworkInterface(iface));
- peer2 = vertx.createDatagramSocket(new DatagramSocketOptions().setMulticastNetworkInterface(iface));
+ peer1 = vertx.createDatagramSocket(options1);
+ peer2 = vertx.createDatagramSocket(options2);
peer1.handler(packet -> {
assertEquals(buffer, packet.data());
received.set(true);
});
- peer1.listen(1234, "0.0.0.0", ar1 -> {
- assertTrue(ar1.succeeded());
- peer1.listenMulticastGroup(groupAddress, iface, null, ar2 -> {
- assertTrue(ar2.succeeded());
- peer2.send(buffer, 1234, groupAddress, ar3 -> {
- assertTrue(ar3.succeeded());
+ peer1.listen(1234, bindAddress, onSuccess(v1 -> {
+ join.accept(groupAddress, onSuccess(v2 -> {
+ peer2.send(buffer, 1234, groupAddress, onSuccess(ar3 -> {
// leave group in 1 second so give it enough time to really receive the packet first
vertx.setTimer(1000, id -> {
- peer1.unlistenMulticastGroup(groupAddress, iface, null, ar4 -> {
- assertTrue(ar4.succeeded());
+ leave.accept(groupAddress, onSuccess(ar4 -> {
AtomicBoolean receivedAfter = new AtomicBoolean();
peer1.handler(packet -> {
// Should not receive any more event as it left the group
receivedAfter.set(true);
});
- peer2.send(buffer, 1234, groupAddress, ar5 -> {
- assertTrue(ar5.succeeded());
+ peer2.send(buffer, 1234, groupAddress, onSuccess(v5 -> {
// schedule a timer which will check in 1 second if we received a message after the group
// was left before
vertx.setTimer(1000, id2 -> {
@@ -396,12 +429,30 @@ public void testMulticastJoinLeave() throws Exception {
assertTrue(received.get());
testComplete();
});
- });
- });
+ }));
+ }));
});
- });
- });
- });
+ }));
+ }));
+ }));
+ await();
+ }
+
+ @Test
+ public void testMulticastJoinWithoutNetworkInterface() {
+ peer1 = vertx.createDatagramSocket(new DatagramSocketOptions());
+ peer1.listenMulticastGroup("230.0.0.1", onFailure(err -> {
+ testComplete();
+ }));
+ await();
+ }
+
+ @Test
+ public void testMulticastLeaveWithoutNetworkInterface() {
+ peer1 = vertx.createDatagramSocket(new DatagramSocketOptions());
+ peer1.unlistenMulticastGroup("230.0.0.1", onFailure(err -> {
+ testComplete();
+ }));
await();
}
@@ -601,4 +652,5 @@ public void start(Promise<Void> startPromise) {
}));
}));
await();
- }}
+ }
+}
| `listenMulticastGroup` doesn't work with a null NetworkInterface
### Version
At least vertx 3.8.3-3.9.1
### Context
`listenMulticastGroup` throws a NullPointerException when it is called with the `listenMulticastGroup(string multicastAddress, Handler<AsyncResult<DatagramSocket>> handler)` signature.
### Do you have a reproducer?
https://github.com/hiddenswitch/udp-broadcast-reproducer
You can use `./gradlew test --tests "udp.broadcast.reproducer.AppTest.testUdpListensNullInterface"`
### Steps to reproduce
1. Create a udp socket (socket "receipient").
2. Listen to a multicast group with the specified overload.
3. Observe a null pointer exception (see below).
4. Observe that heuristically choosing an interface works.
```
SEVERE: Unhandled exception
java.lang.NullPointerException: networkInterface
at io.netty.util.internal.ObjectUtil.checkNotNull(ObjectUtil.java:33)
at io.netty.channel.socket.nio.NioDatagramChannel.joinGroup(NioDatagramChannel.java:409)
at io.netty.channel.socket.nio.NioDatagramChannel.joinGroup(NioDatagramChannel.java:371)
at io.netty.channel.socket.nio.NioDatagramChannel.joinGroup(NioDatagramChannel.java:365)
at io.vertx.core.datagram.impl.DatagramSocketImpl.listenMulticastGroup(DatagramSocketImpl.java:96)
at udp.broadcast.reproducer.AppTest.lambda$testUdpListensNullInterface$0(AppTest.java:31)
at io.vertx.junit5.VertxTestContext.lambda$succeeding$1(VertxTestContext.java:182)
at io.vertx.core.datagram.impl.DatagramSocketImpl.lambda$null$0(DatagramSocketImpl.java:206)
at io.vertx.core.impl.ContextImpl.executeTask(ContextImpl.java:366)
at io.vertx.core.impl.EventLoopContext.execute(EventLoopContext.java:43)
at io.vertx.core.impl.ContextImpl.executeFromIO(ContextImpl.java:229)
at io.vertx.core.net.impl.ChannelFutureListenerAdapter.operationComplete(ChannelFutureListenerAdapter.java:39)
at io.vertx.core.net.impl.ChannelFutureListenerAdapter.operationComplete(ChannelFutureListenerAdapter.java:24)
at io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:577)
at io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:551)
at io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:490)
at io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:615)
at io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:604)
at io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:104)
at io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84)
at io.netty.channel.AbstractChannel$AbstractUnsafe.safeSetSuccess(AbstractChannel.java:984)
at io.netty.channel.AbstractChannel$AbstractUnsafe.bind(AbstractChannel.java:566)
at io.netty.channel.DefaultChannelPipeline$HeadContext.bind(DefaultChannelPipeline.java:1334)
at io.netty.channel.AbstractChannelHandlerContext.invokeBind(AbstractChannelHandlerContext.java:506)
at io.netty.channel.AbstractChannelHandlerContext.access$900(AbstractChannelHandlerContext.java:61)
at io.netty.channel.AbstractChannelHandlerContext$8.run(AbstractChannelHandlerContext.java:496)
at io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:164)
at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:472)
at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:500)
at io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:989)
at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
at java.base/java.lang.Thread.run(Thread.java:830)
```
### Extra
**Platform:** macOS 10.15.5 (`Darwin MacBook-Pro.local 19.5.0 Darwin Kernel Version 19.5.0: Thu Apr 30 18:25:59 PDT 2020; root:xnu-6153.121.1~7/RELEASE_X86_64 x86_64`)
**Java:**
```
openjdk version "13.0.2" 2020-01-14
OpenJDK Runtime Environment (build 13.0.2+8)
OpenJDK 64-Bit Server VM (build 13.0.2+8, mixed mode, sharing)
```
| see also https://github.com/eclipse-vertx/vert.x/issues/3540
yes that one fails, the socket is not able to get a network interface from the local address | 2021-08-05T08:53:37Z | 4.1 |
eclipse-vertx/vert.x | 4,037 | eclipse-vertx__vert.x-4037 | [
"4036"
] | d07329c92ce9b981a9425d2d0a3187554414a9bf | diff --git a/src/main/generated/io/vertx/core/VertxOptionsConverter.java b/src/main/generated/io/vertx/core/VertxOptionsConverter.java
--- a/src/main/generated/io/vertx/core/VertxOptionsConverter.java
+++ b/src/main/generated/io/vertx/core/VertxOptionsConverter.java
@@ -31,6 +31,11 @@ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, VertxOp
obj.setBlockedThreadCheckIntervalUnit(java.util.concurrent.TimeUnit.valueOf((String)member.getValue()));
}
break;
+ case "disableTCCL":
+ if (member.getValue() instanceof Boolean) {
+ obj.setDisableTCCL((Boolean)member.getValue());
+ }
+ break;
case "eventBusOptions":
if (member.getValue() instanceof JsonObject) {
obj.setEventBusOptions(new io.vertx.core.eventbus.EventBusOptions((io.vertx.core.json.JsonObject)member.getValue()));
@@ -132,6 +137,7 @@ static void toJson(VertxOptions obj, java.util.Map<String, Object> json) {
if (obj.getBlockedThreadCheckIntervalUnit() != null) {
json.put("blockedThreadCheckIntervalUnit", obj.getBlockedThreadCheckIntervalUnit().name());
}
+ json.put("disableTCCL", obj.getDisableTCCL());
if (obj.getEventBusOptions() != null) {
json.put("eventBusOptions", obj.getEventBusOptions().toJson());
}
diff --git a/src/main/java/io/vertx/core/VertxOptions.java b/src/main/java/io/vertx/core/VertxOptions.java
--- a/src/main/java/io/vertx/core/VertxOptions.java
+++ b/src/main/java/io/vertx/core/VertxOptions.java
@@ -32,6 +32,8 @@
@DataObject(generateConverter = true, publicConverter = false)
public class VertxOptions {
+ private static final String DISABLE_TCCL_PROP_NAME = "vertx.disableTCCL";
+
/**
* The default number of event loop threads to be used = 2 * number of cores on the machine
*/
@@ -109,6 +111,8 @@ public class VertxOptions {
*/
public static final TimeUnit DEFAULT_WARNING_EXCEPTION_TIME_UNIT = TimeUnit.NANOSECONDS;
+ public static final boolean DEFAULT_DISABLE_TCCL = Boolean.getBoolean(DISABLE_TCCL_PROP_NAME);
+
private int eventLoopPoolSize = DEFAULT_EVENT_LOOP_POOL_SIZE;
private int workerPoolSize = DEFAULT_WORKER_POOL_SIZE;
private int internalBlockingPoolSize = DEFAULT_INTERNAL_BLOCKING_POOL_SIZE;
@@ -130,6 +134,7 @@ public class VertxOptions {
private TimeUnit maxWorkerExecuteTimeUnit = DEFAULT_MAX_WORKER_EXECUTE_TIME_UNIT;
private TimeUnit warningExceptionTimeUnit = DEFAULT_WARNING_EXCEPTION_TIME_UNIT;
private TimeUnit blockedThreadCheckIntervalUnit = DEFAULT_BLOCKED_THREAD_CHECK_INTERVAL_UNIT;
+ private boolean disableTCCL = DEFAULT_DISABLE_TCCL;
/**
* Default constructor
@@ -163,6 +168,7 @@ public VertxOptions(VertxOptions other) {
this.warningExceptionTimeUnit = other.warningExceptionTimeUnit;
this.blockedThreadCheckIntervalUnit = other.blockedThreadCheckIntervalUnit;
this.tracingOptions = other.tracingOptions != null ? other.tracingOptions.copy() : null;
+ this.disableTCCL = other.disableTCCL;
}
/**
@@ -643,6 +649,32 @@ public VertxOptions setTracingOptions(TracingOptions tracingOptions) {
return this;
}
+ /**
+ * @return whether Vert.x sets the {@link Context} classloader as the thread context classloader on actions executed on that {@link Context}
+ */
+ public boolean getDisableTCCL() {
+ return disableTCCL;
+ }
+
+ /**
+ * Configures whether Vert.x sets the {@link Context} classloader as the thread context classloader on actions executed on that {@link Context}.
+ *
+ * When a {@link Context} is created the current thread classloader is captured and associated with this classloader.
+ *
+ * Likewise when a Verticle is created, the Verticle's {@link Context} classloader is set to the current thread classloader
+ * unless this classloader is overriden by {@link DeploymentOptions#getClassLoader()}.
+ *
+ * This setting overrides the (legacy) system property {@code vertx.disableTCCL} and provides control at the
+ * Vertx instance level.
+ *
+ * @param disableTCCL {@code true} to disable thread context classloader update by Vertx
+ * @return a reference to this, so the API can be used fluently
+ */
+ public VertxOptions setDisableTCCL(boolean disableTCCL) {
+ this.disableTCCL = disableTCCL;
+ return this;
+ }
+
public JsonObject toJson() {
JsonObject json = new JsonObject();
VertxOptionsConverter.toJson(this, json);
@@ -673,6 +705,7 @@ public String toString() {
", eventbus=" + eventBusOptions.toJson() +
", warningExceptionTimeUnit=" + warningExceptionTimeUnit +
", warningExceptionTime=" + warningExceptionTime +
+ ", disableTCCL=" + disableTCCL +
'}';
}
}
diff --git a/src/main/java/io/vertx/core/impl/AbstractContext.java b/src/main/java/io/vertx/core/impl/AbstractContext.java
--- a/src/main/java/io/vertx/core/impl/AbstractContext.java
+++ b/src/main/java/io/vertx/core/impl/AbstractContext.java
@@ -19,8 +19,6 @@
import java.util.List;
-import static io.vertx.core.impl.VertxThread.DISABLE_TCCL;
-
/**
* A context implementation that does not hold any specific state.
*
@@ -29,6 +27,12 @@
*/
abstract class AbstractContext implements ContextInternal {
+ final boolean disableTCCL;
+
+ public AbstractContext(boolean disableTCCL) {
+ this.disableTCCL = disableTCCL;
+ }
+
@Override
public abstract boolean isEventLoopContext();
@@ -63,7 +67,7 @@ public final ContextInternal beginDispatch() {
ContextInternal prev;
VertxThread th = (VertxThread) Thread.currentThread();
prev = th.beginEmission(this);
- if (!DISABLE_TCCL) {
+ if (!disableTCCL) {
th.setContextClassLoader(classLoader());
}
return prev;
@@ -71,7 +75,7 @@ public final ContextInternal beginDispatch() {
public final void endDispatch(ContextInternal previous) {
VertxThread th = (VertxThread) Thread.currentThread();
- if (!DISABLE_TCCL) {
+ if (!disableTCCL) {
th.setContextClassLoader(previous != null ? previous.classLoader() : null);
}
th.endEmission(previous);
diff --git a/src/main/java/io/vertx/core/impl/ContextImpl.java b/src/main/java/io/vertx/core/impl/ContextImpl.java
--- a/src/main/java/io/vertx/core/impl/ContextImpl.java
+++ b/src/main/java/io/vertx/core/impl/ContextImpl.java
@@ -75,10 +75,9 @@ static void executeIsolated(Handler<Void> task) {
WorkerPool workerPool,
Deployment deployment,
CloseFuture closeFuture,
- ClassLoader tccl) {
- if (VertxThread.DISABLE_TCCL && tccl != ClassLoader.getSystemClassLoader()) {
- log.warn("You have disabled TCCL checks but you have a custom TCCL to set.");
- }
+ ClassLoader tccl,
+ boolean disableTCCL) {
+ super(disableTCCL);
this.deployment = deployment;
this.config = deployment != null ? deployment.config() : new JsonObject();
this.eventLoop = eventLoop;
diff --git a/src/main/java/io/vertx/core/impl/DuplicatedContext.java b/src/main/java/io/vertx/core/impl/DuplicatedContext.java
--- a/src/main/java/io/vertx/core/impl/DuplicatedContext.java
+++ b/src/main/java/io/vertx/core/impl/DuplicatedContext.java
@@ -38,6 +38,7 @@ class DuplicatedContext extends AbstractContext {
private ConcurrentMap<Object, Object> localData;
DuplicatedContext(ContextImpl delegate) {
+ super(delegate.disableTCCL);
this.delegate = delegate;
}
diff --git a/src/main/java/io/vertx/core/impl/EventLoopContext.java b/src/main/java/io/vertx/core/impl/EventLoopContext.java
--- a/src/main/java/io/vertx/core/impl/EventLoopContext.java
+++ b/src/main/java/io/vertx/core/impl/EventLoopContext.java
@@ -27,8 +27,9 @@ public class EventLoopContext extends ContextImpl {
WorkerPool workerPool,
Deployment deployment,
CloseFuture closeFuture,
- ClassLoader tccl) {
- super(vertx, eventLoop, internalBlockingPool, workerPool, deployment, closeFuture, tccl);
+ ClassLoader tccl,
+ boolean disableTCCL) {
+ super(vertx, eventLoop, internalBlockingPool, workerPool, deployment, closeFuture, tccl, disableTCCL);
}
@Override
diff --git a/src/main/java/io/vertx/core/impl/VertxImpl.java b/src/main/java/io/vertx/core/impl/VertxImpl.java
--- a/src/main/java/io/vertx/core/impl/VertxImpl.java
+++ b/src/main/java/io/vertx/core/impl/VertxImpl.java
@@ -133,6 +133,7 @@ public class VertxImpl implements VertxInternal, MetricsProvider {
private final Transport transport;
private final VertxTracer tracer;
private final ThreadLocal<WeakReference<AbstractContext>> stickyContext = new ThreadLocal<>();
+ private final boolean disableTCCL;
VertxImpl(VertxOptions options, ClusterManager clusterManager, NodeSelector nodeSelector, VertxMetrics metrics,
VertxTracer<?, ?> tracer, Transport transport, FileResolver fileResolver, VertxThreadFactory threadFactory,
@@ -165,6 +166,7 @@ public class VertxImpl implements VertxInternal, MetricsProvider {
defaultWorkerPoolSize = options.getWorkerPoolSize();
maxWorkerExecTime = options.getMaxWorkerExecuteTime();
maxWorkerExecTimeUnit = options.getMaxWorkerExecuteTimeUnit();
+ disableTCCL = options.getDisableTCCL();
this.executorServiceFactory = executorServiceFactory;
this.threadFactory = threadFactory;
@@ -471,12 +473,12 @@ public boolean cancelTimer(long id) {
@Override
public EventLoopContext createEventLoopContext(Deployment deployment, CloseFuture closeFuture, WorkerPool workerPool, ClassLoader tccl) {
- return new EventLoopContext(this, eventLoopGroup.next(), internalWorkerPool, workerPool != null ? workerPool : this.workerPool, deployment, closeFuture, tccl);
+ return new EventLoopContext(this, eventLoopGroup.next(), internalWorkerPool, workerPool != null ? workerPool : this.workerPool, deployment, closeFuture, tccl, disableTCCL);
}
@Override
public EventLoopContext createEventLoopContext(EventLoop eventLoop, WorkerPool workerPool, ClassLoader tccl) {
- return new EventLoopContext(this, eventLoop, internalWorkerPool, workerPool != null ? workerPool : this.workerPool, null, closeFuture, tccl);
+ return new EventLoopContext(this, eventLoop, internalWorkerPool, workerPool != null ? workerPool : this.workerPool, null, closeFuture, tccl, disableTCCL);
}
@Override
@@ -486,7 +488,7 @@ public EventLoopContext createEventLoopContext() {
@Override
public WorkerContext createWorkerContext(Deployment deployment, CloseFuture closeFuture, WorkerPool workerPool, ClassLoader tccl) {
- return new WorkerContext(this, internalWorkerPool, workerPool != null ? workerPool : this.workerPool, deployment, closeFuture, tccl);
+ return new WorkerContext(this, internalWorkerPool, workerPool != null ? workerPool : this.workerPool, deployment, closeFuture, tccl, disableTCCL);
}
@Override
diff --git a/src/main/java/io/vertx/core/impl/VertxThread.java b/src/main/java/io/vertx/core/impl/VertxThread.java
--- a/src/main/java/io/vertx/core/impl/VertxThread.java
+++ b/src/main/java/io/vertx/core/impl/VertxThread.java
@@ -20,9 +20,6 @@
*/
public class VertxThread extends FastThreadLocalThread implements BlockedThreadChecker.Task {
- static final String DISABLE_TCCL_PROP_NAME = "vertx.disableTCCL";
- static final boolean DISABLE_TCCL = Boolean.getBoolean(DISABLE_TCCL_PROP_NAME);
-
private final boolean worker;
private final long maxExecTime;
private final TimeUnit maxExecTimeUnit;
diff --git a/src/main/java/io/vertx/core/impl/WorkerContext.java b/src/main/java/io/vertx/core/impl/WorkerContext.java
--- a/src/main/java/io/vertx/core/impl/WorkerContext.java
+++ b/src/main/java/io/vertx/core/impl/WorkerContext.java
@@ -28,8 +28,9 @@ public class WorkerContext extends ContextImpl {
WorkerPool workerPool,
Deployment deployment,
CloseFuture closeFuture,
- ClassLoader tccl) {
- super(vertx, vertx.getEventLoopGroup().next(), internalBlockingPool, workerPool, deployment, closeFuture, tccl);
+ ClassLoader tccl,
+ boolean disableTCCL) {
+ super(vertx, vertx.getEventLoopGroup().next(), internalBlockingPool, workerPool, deployment, closeFuture, tccl, disableTCCL);
}
@Override
| diff --git a/src/test/benchmarks/io/vertx/core/impl/BenchmarkContext.java b/src/test/benchmarks/io/vertx/core/impl/BenchmarkContext.java
--- a/src/test/benchmarks/io/vertx/core/impl/BenchmarkContext.java
+++ b/src/test/benchmarks/io/vertx/core/impl/BenchmarkContext.java
@@ -30,7 +30,7 @@ public static BenchmarkContext create(Vertx vertx) {
}
public BenchmarkContext(VertxInternal vertx, WorkerPool internalBlockingPool, WorkerPool workerPool, ClassLoader tccl) {
- super(vertx, vertx.getEventLoopGroup().next(), internalBlockingPool, workerPool, null, null, tccl);
+ super(vertx, vertx.getEventLoopGroup().next(), internalBlockingPool, workerPool, null, null, tccl, false);
}
@Override
diff --git a/src/test/java/io/vertx/core/VertxTest.java b/src/test/java/io/vertx/core/VertxTest.java
--- a/src/test/java/io/vertx/core/VertxTest.java
+++ b/src/test/java/io/vertx/core/VertxTest.java
@@ -26,6 +26,8 @@
import org.openjdk.jmh.runner.options.OptionsBuilder;
import java.lang.ref.WeakReference;
+import java.net.URL;
+import java.net.URLClassLoader;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -290,4 +292,30 @@ public void testCloseVertxShouldWaitConcurrentCloseHook() throws Exception {
ref.get().complete();
assertWaitUntil(closed::get);
}
+
+ @Test
+ public void testEnableTCCL() {
+ testTCCL(false);
+ }
+
+ @Test
+ public void testDisableTCCL() {
+ testTCCL(true);
+ }
+
+ private void testTCCL(boolean disable) {
+ VertxOptions options = new VertxOptions().setDisableTCCL(disable);
+ Vertx vertx = Vertx.vertx(options);
+ ClassLoader orig = Thread.currentThread().getContextClassLoader();
+ ClassLoader cl = new URLClassLoader(new URL[0], orig);
+ Thread.currentThread().setContextClassLoader(cl);
+ Context ctx = vertx.getOrCreateContext();
+ Thread.currentThread().setContextClassLoader(orig);
+ ctx.runOnContext(v -> {
+ ClassLoader expected = disable ? orig : cl;
+ assertSame(expected, Thread.currentThread().getContextClassLoader());
+ testComplete();
+ });
+ await();
+ }
}
| Remove legacy TCCL check log
This log is not really helpful, in addition it is possible to disable TCCL and have a TCCL set to the non system classloader.
| 2021-07-27T08:27:34Z | 4.1 |
|
eclipse-vertx/vert.x | 3,946 | eclipse-vertx__vert.x-3946 | [
"1946"
] | ac2028b85a6afe2ef5f7bdfacf1d3ec18979a615 | diff --git a/src/main/java/io/vertx/core/http/impl/Http1xConnectionBase.java b/src/main/java/io/vertx/core/http/impl/Http1xConnectionBase.java
--- a/src/main/java/io/vertx/core/http/impl/Http1xConnectionBase.java
+++ b/src/main/java/io/vertx/core/http/impl/Http1xConnectionBase.java
@@ -119,17 +119,17 @@ public HttpConnection goAwayHandler(@Nullable Handler<GoAway> handler) {
@Override
public HttpConnection shutdownHandler(@Nullable Handler<Void> handler) {
- throw new UnsupportedOperationException("HTTP/1.x connections don't support GOAWAY");
+ throw new UnsupportedOperationException("HTTP/1.x connections cannot be shutdown");
}
@Override
public void shutdown(long timeout, Handler<AsyncResult<Void>> handler) {
- throw new UnsupportedOperationException("HTTP/1.x connections don't support GOAWAY");
+ throw new UnsupportedOperationException("HTTP/1.x connections cannot be shutdown");
}
@Override
public Future<Void> shutdown(long timeoutMs) {
- throw new UnsupportedOperationException("HTTP/1.x connections don't support GOAWAY");
+ throw new UnsupportedOperationException("HTTP/1.x connections cannot be shutdown");
}
@Override
diff --git a/src/main/java/io/vertx/core/http/impl/Http2UpgradedClientConnection.java b/src/main/java/io/vertx/core/http/impl/Http2UpgradedClientConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http2UpgradedClientConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2UpgradedClientConnection.java
@@ -99,10 +99,12 @@ public long lastResponseReceivedTimestamp() {
* The first stream that will send the request using HTTP/1, upgrades the connection when the protocol
* switches and receives the response with HTTP/2 frames.
*/
- private class UpgradingStream implements HttpClientStream {
+ private static class UpgradingStream implements HttpClientStream {
- private final Http1xClientConnection conn;
- private final HttpClientStream stream;
+ private final Http1xClientConnection upgradingConnection;
+ private final HttpClientStream upgradingStream;
+ private final Http2UpgradedClientConnection upgradedConnection;
+ private HttpClientStream upgradedStream;
private Handler<HttpResponseHead> headHandler;
private Handler<Buffer> chunkHandler;
private Handler<MultiMap> endHandler;
@@ -113,16 +115,16 @@ private class UpgradingStream implements HttpClientStream {
private Handler<HttpClientPush> pushHandler;
private Handler<HttpFrame> unknownFrameHandler;
private Handler<Void> closeHandler;
- private HttpClientStream upgradedStream;
- UpgradingStream(HttpClientStream stream, Http1xClientConnection conn) {
- this.conn = conn;
- this.stream = stream;
+ UpgradingStream(HttpClientStream stream, Http2UpgradedClientConnection upgradedConnection, Http1xClientConnection upgradingConnection) {
+ this.upgradedConnection = upgradedConnection;
+ this.upgradingConnection = upgradingConnection;
+ this.upgradingStream = stream;
}
@Override
public HttpClientConnection connection() {
- return Http2UpgradedClientConnection.this;
+ return upgradedConnection;
}
/**
@@ -136,7 +138,7 @@ public void writeHead(HttpRequestHead request,
StreamPriority priority,
boolean connect,
Handler<AsyncResult<Void>> handler) {
- ChannelPipeline pipeline = conn.channel().pipeline();
+ ChannelPipeline pipeline = upgradingConnection.channel().pipeline();
HttpClientCodec httpCodec = pipeline.get(HttpClientCodec.class);
class UpgradeRequestHandler extends ChannelInboundHandlerAdapter {
@@ -146,7 +148,7 @@ public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exc
ChannelPipeline pipeline = ctx.pipeline();
if (evt == HttpClientUpgradeHandler.UpgradeEvent.UPGRADE_SUCCESSFUL) {
// Upgrade handler will remove itself and remove the HttpClientCodec
- pipeline.remove(conn.channelHandlerContext().handler());
+ pipeline.remove(upgradingConnection.channelHandlerContext().handler());
}
}
@@ -164,15 +166,17 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception
}
}
- VertxHttp2ClientUpgradeCodec upgradeCodec = new VertxHttp2ClientUpgradeCodec(client.getOptions().getInitialSettings()) {
+ VertxHttp2ClientUpgradeCodec upgradeCodec = new VertxHttp2ClientUpgradeCodec(upgradedConnection.client.getOptions().getInitialSettings()) {
@Override
public void upgradeTo(ChannelHandlerContext ctx, FullHttpResponse upgradeResponse) throws Exception {
// Now we need to upgrade this to an HTTP2
- VertxHttp2ConnectionHandler<Http2ClientConnection> handler = Http2ClientConnection.createHttp2ConnectionHandler(client, conn.metrics, (EventLoopContext) conn.getContext(), current.metric(), conn -> {
- conn.upgradeStream(stream.metric(), stream.getContext(), ar -> {
- UpgradingStream.this.conn.closeHandler(null);
- UpgradingStream.this.conn.exceptionHandler(null);
+ VertxHttp2ConnectionHandler<Http2ClientConnection> handler = Http2ClientConnection.createHttp2ConnectionHandler(upgradedConnection.client, upgradingConnection.metrics, (EventLoopContext) upgradingConnection.getContext(), upgradedConnection.current.metric(), conn -> {
+ conn.upgradeStream(upgradingStream.metric(), upgradingStream.getContext(), ar -> {
+ upgradingConnection.closeHandler(null);
+ upgradingConnection.exceptionHandler(null);
+ upgradingConnection.evictionHandler(null);
+ upgradingConnection.concurrencyChangeHandler(null);
if (ar.succeeded()) {
upgradedStream = ar.result();
upgradedStream.headHandler(headHandler);
@@ -185,16 +189,16 @@ public void upgradeTo(ChannelHandlerContext ctx, FullHttpResponse upgradeRespons
upgradedStream.pushHandler(pushHandler);
upgradedStream.unknownFrameHandler(unknownFrameHandler);
upgradedStream.closeHandler(closeHandler);
- stream.headHandler(null);
- stream.chunkHandler(null);
- stream.endHandler(null);
- stream.priorityHandler(null);
- stream.exceptionHandler(null);
- stream.drainHandler(null);
- stream.continueHandler(null);
- stream.pushHandler(null);
- stream.unknownFrameHandler(null);
- stream.closeHandler(null);
+ upgradingStream.headHandler(null);
+ upgradingStream.chunkHandler(null);
+ upgradingStream.endHandler(null);
+ upgradingStream.priorityHandler(null);
+ upgradingStream.exceptionHandler(null);
+ upgradingStream.drainHandler(null);
+ upgradingStream.continueHandler(null);
+ upgradingStream.pushHandler(null);
+ upgradingStream.unknownFrameHandler(null);
+ upgradingStream.closeHandler(null);
headHandler = null;
chunkHandler = null;
endHandler = null;
@@ -204,15 +208,24 @@ public void upgradeTo(ChannelHandlerContext ctx, FullHttpResponse upgradeRespons
continueHandler = null;
pushHandler = null;
closeHandler = null;
- current = conn;
- conn.closeHandler(closeHandler);
- conn.exceptionHandler(exceptionHandler);
- conn.pingHandler(pingHandler);
- conn.goAwayHandler(goAwayHandler);
- conn.shutdownHandler(shutdownHandler);
- conn.remoteSettingsHandler(remoteSettingsHandler);
- conn.evictionHandler(evictionHandler);
- conn.concurrencyChangeHandler(concurrencyChangeHandler);
+ upgradedConnection.current = conn;
+ conn.closeHandler(upgradedConnection.closeHandler);
+ conn.exceptionHandler(upgradedConnection.exceptionHandler);
+ conn.pingHandler(upgradedConnection.pingHandler);
+ conn.goAwayHandler(upgradedConnection.goAwayHandler);
+ conn.shutdownHandler(upgradedConnection.shutdownHandler);
+ conn.remoteSettingsHandler(upgradedConnection.remoteSettingsHandler);
+ conn.evictionHandler(upgradedConnection.evictionHandler);
+ conn.concurrencyChangeHandler(upgradedConnection.concurrencyChangeHandler);
+ Handler<Long> concurrencyChangeHandler = upgradedConnection.concurrencyChangeHandler;
+ upgradedConnection.closeHandler = null;
+ upgradedConnection.exceptionHandler = null;
+ upgradedConnection.pingHandler = null;
+ upgradedConnection.goAwayHandler = null;
+ upgradedConnection.shutdownHandler = null;
+ upgradedConnection.remoteSettingsHandler = null;
+ upgradedConnection.evictionHandler = null;
+ upgradedConnection.concurrencyChangeHandler = null;
concurrencyChangeHandler.handle(conn.concurrency());
} else {
// Handle me
@@ -220,7 +233,7 @@ public void upgradeTo(ChannelHandlerContext ctx, FullHttpResponse upgradeRespons
}
});
});
- conn.channel().pipeline().addLast(handler);
+ upgradingConnection.channel().pipeline().addLast(handler);
handler.clientUpgrade(ctx);
}
};
@@ -307,11 +320,11 @@ private void doWriteHead(HttpRequestHead head,
StreamPriority priority,
boolean connect,
Handler<AsyncResult<Void>> handler) {
- EventExecutor exec = conn.channelHandlerContext().executor();
+ EventExecutor exec = upgradingConnection.channelHandlerContext().executor();
if (exec.inEventLoop()) {
- stream.writeHead(head, chunked, buf, end, priority, connect, handler);
+ upgradingStream.writeHead(head, chunked, buf, end, priority, connect, handler);
if (end) {
- ChannelPipeline pipeline = conn.channelHandlerContext().pipeline();
+ ChannelPipeline pipeline = upgradingConnection.channelHandlerContext().pipeline();
pipeline.fireUserEventTriggered(SEND_BUFFERED_MESSAGES);
}
} else {
@@ -326,21 +339,21 @@ public int id() {
@Override
public Object metric() {
- return stream.metric();
+ return upgradingStream.metric();
}
@Override
public HttpVersion version() {
HttpClientStream s = upgradedStream;
if (s == null) {
- s = stream;
+ s = upgradingStream;
}
return s.version();
}
@Override
public ContextInternal getContext() {
- return stream.getContext();
+ return upgradingStream.getContext();
}
@Override
@@ -348,7 +361,7 @@ public void continueHandler(Handler<Void> handler) {
if (upgradedStream != null) {
upgradedStream.continueHandler(handler);
} else {
- stream.continueHandler(handler);
+ upgradingStream.continueHandler(handler);
continueHandler = handler;
}
}
@@ -358,7 +371,7 @@ public void pushHandler(Handler<HttpClientPush> handler) {
if (pushHandler != null) {
upgradedStream.pushHandler(handler);
} else {
- stream.pushHandler(handler);
+ upgradingStream.pushHandler(handler);
pushHandler = handler;
}
}
@@ -368,7 +381,7 @@ public void closeHandler(Handler<Void> handler) {
if (closeHandler != null) {
upgradedStream.closeHandler(handler);
} else {
- stream.closeHandler(handler);
+ upgradingStream.closeHandler(handler);
closeHandler = handler;
}
}
@@ -378,7 +391,7 @@ public void drainHandler(Handler<Void> handler) {
if (upgradedStream != null) {
upgradedStream.drainHandler(handler);
} else {
- stream.drainHandler(handler);
+ upgradingStream.drainHandler(handler);
drainHandler = handler;
}
}
@@ -388,7 +401,7 @@ public void exceptionHandler(Handler<Throwable> handler) {
if (upgradedStream != null) {
upgradedStream.exceptionHandler(handler);
} else {
- stream.exceptionHandler(handler);
+ upgradingStream.exceptionHandler(handler);
exceptionHandler = handler;
}
}
@@ -398,7 +411,7 @@ public void headHandler(Handler<HttpResponseHead> handler) {
if (upgradedStream != null) {
upgradedStream.headHandler(handler);
} else {
- stream.headHandler(handler);
+ upgradingStream.headHandler(handler);
headHandler = handler;
}
}
@@ -408,7 +421,7 @@ public void chunkHandler(Handler<Buffer> handler) {
if (upgradedStream != null) {
upgradedStream.chunkHandler(handler);
} else {
- stream.chunkHandler(handler);
+ upgradingStream.chunkHandler(handler);
chunkHandler = handler;
}
}
@@ -418,7 +431,7 @@ public void endHandler(Handler<MultiMap> handler) {
if (upgradedStream != null) {
upgradedStream.endHandler(handler);
} else {
- stream.endHandler(handler);
+ upgradingStream.endHandler(handler);
endHandler = handler;
}
}
@@ -428,7 +441,7 @@ public void unknownFrameHandler(Handler<HttpFrame> handler) {
if (upgradedStream != null) {
upgradedStream.unknownFrameHandler(handler);
} else {
- stream.unknownFrameHandler(handler);
+ upgradingStream.unknownFrameHandler(handler);
unknownFrameHandler = handler;
}
}
@@ -438,18 +451,18 @@ public void priorityHandler(Handler<StreamPriority> handler) {
if (upgradedStream != null) {
upgradedStream.priorityHandler(handler);
} else {
- stream.priorityHandler(handler);
+ upgradingStream.priorityHandler(handler);
priorityHandler = handler;
}
}
@Override
public void writeBuffer(ByteBuf buf, boolean end, Handler<AsyncResult<Void>> handler) {
- EventExecutor exec = conn.channelHandlerContext().executor();
+ EventExecutor exec = upgradingConnection.channelHandlerContext().executor();
if (exec.inEventLoop()) {
- stream.writeBuffer(buf, end, handler);
+ upgradingStream.writeBuffer(buf, end, handler);
if (end) {
- ChannelPipeline pipeline = conn.channelHandlerContext().pipeline();
+ ChannelPipeline pipeline = upgradingConnection.channelHandlerContext().pipeline();
pipeline.fireUserEventTriggered(SEND_BUFFERED_MESSAGES);
}
} else {
@@ -459,42 +472,42 @@ public void writeBuffer(ByteBuf buf, boolean end, Handler<AsyncResult<Void>> han
@Override
public void writeFrame(int type, int flags, ByteBuf payload) {
- stream.writeFrame(type, flags, payload);
+ upgradingStream.writeFrame(type, flags, payload);
}
@Override
public void doSetWriteQueueMaxSize(int size) {
- stream.doSetWriteQueueMaxSize(size);
+ upgradingStream.doSetWriteQueueMaxSize(size);
}
@Override
public boolean isNotWritable() {
- return stream.isNotWritable();
+ return upgradingStream.isNotWritable();
}
@Override
public void doPause() {
- stream.doPause();
+ upgradingStream.doPause();
}
@Override
public void doFetch(long amount) {
- stream.doFetch(amount);
+ upgradingStream.doFetch(amount);
}
@Override
public void reset(Throwable cause) {
- stream.reset(cause);
+ upgradingStream.reset(cause);
}
@Override
public StreamPriority priority() {
- return stream.priority();
+ return upgradingStream.priority();
}
@Override
public void updatePriority(StreamPriority streamPriority) {
- stream.updatePriority(streamPriority);
+ upgradingStream.updatePriority(streamPriority);
}
}
@@ -504,7 +517,7 @@ public void createStream(ContextInternal context, Handler<AsyncResult<HttpClient
current.createStream(context, ar -> {
if (ar.succeeded()) {
HttpClientStream stream = ar.result();
- UpgradingStream upgradingStream = new UpgradingStream(stream, (Http1xClientConnection) current);
+ UpgradingStream upgradingStream = new UpgradingStream(stream, this, (Http1xClientConnection) current);
handler.handle(Future.succeededFuture(upgradingStream));
} else {
handler.handle(ar);
@@ -520,20 +533,6 @@ public ContextInternal getContext() {
return current.getContext();
}
- @Override
- public HttpConnection closeHandler(Handler<Void> handler) {
- closeHandler = handler;
- current.closeHandler(handler);
- return this;
- }
-
- @Override
- public HttpConnection exceptionHandler(Handler<Throwable> handler) {
- exceptionHandler = handler;
- current.exceptionHandler(handler);
- return this;
- }
-
@Override
public HttpConnection remoteSettingsHandler(Handler<Http2Settings> handler) {
if (current instanceof Http1xClientConnection) {
@@ -574,13 +573,30 @@ public HttpConnection shutdownHandler(@Nullable Handler<Void> handler) {
return this;
}
+ @Override
+ public HttpConnection closeHandler(Handler<Void> handler) {
+ if (current instanceof Http1xClientConnection) {
+ closeHandler = handler;
+ }
+ current.closeHandler(handler);
+ return this;
+ }
+
+ @Override
+ public HttpConnection exceptionHandler(Handler<Throwable> handler) {
+ if (current instanceof Http1xClientConnection) {
+ exceptionHandler = handler;
+ }
+ current.exceptionHandler(handler);
+ return this;
+ }
+
@Override
public HttpClientConnection evictionHandler(Handler<Void> handler) {
if (current instanceof Http1xClientConnection) {
evictionHandler = handler;
- } else {
- current.evictionHandler(handler);
}
+ current.evictionHandler(handler);
return this;
}
@@ -588,9 +604,8 @@ public HttpClientConnection evictionHandler(Handler<Void> handler) {
public HttpClientConnection concurrencyChangeHandler(Handler<Long> handler) {
if (current instanceof Http1xClientConnection) {
concurrencyChangeHandler = handler;
- } else {
- current.concurrencyChangeHandler(handler);
}
+ current.concurrencyChangeHandler(handler);
return this;
}
| diff --git a/src/test/java/io/vertx/core/http/Http2Test.java b/src/test/java/io/vertx/core/http/Http2Test.java
--- a/src/test/java/io/vertx/core/http/Http2Test.java
+++ b/src/test/java/io/vertx/core/http/Http2Test.java
@@ -930,4 +930,36 @@ public void testAppendToHttpChunks() throws Exception {
}));
await();
}
+
+ @Test
+ public void testNonUpgradedH2CConnectionIsEvictedFromThePool() {
+ client.close();
+ client = vertx.createHttpClient(new HttpClientOptions().setProtocolVersion(HttpVersion.HTTP_2));
+ server.close();
+ System.setProperty("vertx.disableH2c", "true");
+ server = vertx.createHttpServer();
+ try {
+ server.requestHandler(req -> req.response().end());
+ server.listen(testAddress, onSuccess(s -> {
+ Promise<Void> promise = Promise.promise();
+ client.request(requestOptions).compose(req -> {
+ req.connection().closeHandler(v -> {
+ promise.complete();
+ });
+ return req.send().compose(HttpClientResponse::body);
+ }).onSuccess(b -> {
+ server.close()
+ .compose(r -> promise.future())
+ .compose(a -> server.listen(testAddress)).onComplete(onSuccess(v -> {
+ client.request(requestOptions).compose(req -> req.send().compose(HttpClientResponse::body)).onSuccess(b2 -> {
+ testComplete();
+ });
+ }));
+ });
+ }));
+ await();
+ } finally {
+ System.clearProperty("vertx.disableH2c");
+ }
+ }
}
| How to capture http request in vertx?
A code like this not works

But this works.

Anyone can help me?
Thanks.
I am using fiddler to capture this traffic and my vertx version is 3.2.1
| vertx http client has a proxy settings on its own - did you try use those instead of setting the system properties? something like:
```java
vertx.createHttpClient(new HttpClientOptions()
.setProxyOptions(new ProxyOptions()
.setPort(9999).setHost("localhost")))
```
this works in 3.4.1 but not works for 3.2.1. thanks for you answer~ | 2021-05-25T21:08:58Z | 4.1 |
eclipse-vertx/vert.x | 3,913 | eclipse-vertx__vert.x-3913 | [
"1937"
] | 6e2a1dac65c280d071da9339e78687e09c7dd449 | diff --git a/src/main/java/io/vertx/core/http/impl/Http1xServerRequest.java b/src/main/java/io/vertx/core/http/impl/Http1xServerRequest.java
--- a/src/main/java/io/vertx/core/http/impl/Http1xServerRequest.java
+++ b/src/main/java/io/vertx/core/http/impl/Http1xServerRequest.java
@@ -14,20 +14,19 @@
import io.netty.handler.codec.http.*;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http.multipart.Attribute;
-import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder;
import io.netty.handler.codec.http.multipart.InterfaceHttpData;
import io.vertx.codegen.annotations.Nullable;
import io.vertx.core.Context;
import io.vertx.core.Future;
import io.vertx.core.Handler;
import io.vertx.core.MultiMap;
-import io.vertx.core.Promise;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.http.*;
import io.vertx.core.http.Cookie;
import io.vertx.core.http.HttpMethod;
import io.vertx.core.http.HttpVersion;
import io.vertx.core.http.impl.headers.HeadersAdaptor;
+import io.vertx.core.http.impl.netty.HttpPostRequestDecoder;
import io.vertx.core.impl.ContextInternal;
import io.vertx.core.impl.VertxInternal;
import io.vertx.core.impl.future.PromiseInternal;
diff --git a/src/main/java/io/vertx/core/http/impl/Http2ServerRequest.java b/src/main/java/io/vertx/core/http/impl/Http2ServerRequest.java
--- a/src/main/java/io/vertx/core/http/impl/Http2ServerRequest.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2ServerRequest.java
@@ -17,7 +17,6 @@
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.LastHttpContent;
import io.netty.handler.codec.http.multipart.Attribute;
-import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder;
import io.netty.handler.codec.http.multipart.InterfaceHttpData;
import io.netty.handler.codec.http2.Http2Headers;
import io.vertx.codegen.annotations.Nullable;
@@ -37,6 +36,7 @@
import io.vertx.core.http.StreamResetException;
import io.vertx.core.http.HttpFrame;
import io.vertx.core.http.impl.headers.Http2HeadersAdaptor;
+import io.vertx.core.http.impl.netty.HttpPostRequestDecoder;
import io.vertx.core.impl.ContextInternal;
import io.vertx.core.impl.logging.Logger;
import io.vertx.core.impl.logging.LoggerFactory;
diff --git a/src/main/java/io/vertx/core/http/impl/netty/CaseIgnoringComparator.java b/src/main/java/io/vertx/core/http/impl/netty/CaseIgnoringComparator.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/http/impl/netty/CaseIgnoringComparator.java
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2012 The Netty Project
+ *
+ * The Netty Project licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package io.vertx.core.http.impl.netty;
+
+import java.io.Serializable;
+import java.util.Comparator;
+
+final class CaseIgnoringComparator implements Comparator<CharSequence>, Serializable {
+
+ private static final long serialVersionUID = 4582133183775373862L;
+
+ static final CaseIgnoringComparator INSTANCE = new CaseIgnoringComparator();
+
+ private CaseIgnoringComparator() {
+ }
+
+ @Override
+ public int compare(CharSequence o1, CharSequence o2) {
+ int o1Length = o1.length();
+ int o2Length = o2.length();
+ int min = Math.min(o1Length, o2Length);
+ for (int i = 0; i < min; i++) {
+ char c1 = o1.charAt(i);
+ char c2 = o2.charAt(i);
+ if (c1 != c2) {
+ c1 = Character.toUpperCase(c1);
+ c2 = Character.toUpperCase(c2);
+ if (c1 != c2) {
+ c1 = Character.toLowerCase(c1);
+ c2 = Character.toLowerCase(c2);
+ if (c1 != c2) {
+ return c1 - c2;
+ }
+ }
+ }
+ }
+ return o1Length - o2Length;
+ }
+
+ private Object readResolve() {
+ return INSTANCE;
+ }
+}
diff --git a/src/main/java/io/vertx/core/http/impl/netty/HttpPostBodyUtil.java b/src/main/java/io/vertx/core/http/impl/netty/HttpPostBodyUtil.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/http/impl/netty/HttpPostBodyUtil.java
@@ -0,0 +1,267 @@
+/*
+ * Copyright 2012 The Netty Project
+ *
+ * The Netty Project licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package io.vertx.core.http.impl.netty;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.handler.codec.http.HttpConstants;
+
+/**
+ * Shared Static object between HttpMessageDecoder, HttpPostRequestDecoder and HttpPostRequestEncoder
+ */
+final class HttpPostBodyUtil {
+
+ public static final int chunkSize = 8096;
+
+ /**
+ * Default Content-Type in binary form
+ */
+ public static final String DEFAULT_BINARY_CONTENT_TYPE = "application/octet-stream";
+
+ /**
+ * Default Content-Type in Text form
+ */
+ public static final String DEFAULT_TEXT_CONTENT_TYPE = "text/plain";
+
+ /**
+ * Allowed mechanism for multipart
+ * mechanism := "7bit"
+ / "8bit"
+ / "binary"
+ Not allowed: "quoted-printable"
+ / "base64"
+ */
+ public enum TransferEncodingMechanism {
+ /**
+ * Default encoding
+ */
+ BIT7("7bit"),
+ /**
+ * Short lines but not in ASCII - no encoding
+ */
+ BIT8("8bit"),
+ /**
+ * Could be long text not in ASCII - no encoding
+ */
+ BINARY("binary");
+
+ private final String value;
+
+ TransferEncodingMechanism(String value) {
+ this.value = value;
+ }
+
+ public String value() {
+ return value;
+ }
+
+ @Override
+ public String toString() {
+ return value;
+ }
+ }
+
+ private HttpPostBodyUtil() {
+ }
+
+ /**
+ * This class intends to decrease the CPU in seeking ahead some bytes in
+ * HttpPostRequestDecoder
+ */
+ static class SeekAheadOptimize {
+ byte[] bytes;
+ int readerIndex;
+ int pos;
+ int origPos;
+ int limit;
+ ByteBuf buffer;
+
+ /**
+ * @param buffer buffer with a backing byte array
+ */
+ SeekAheadOptimize(ByteBuf buffer) {
+ if (!buffer.hasArray()) {
+ throw new IllegalArgumentException("buffer hasn't backing byte array");
+ }
+ this.buffer = buffer;
+ bytes = buffer.array();
+ readerIndex = buffer.readerIndex();
+ origPos = pos = buffer.arrayOffset() + readerIndex;
+ limit = buffer.arrayOffset() + buffer.writerIndex();
+ }
+
+ /**
+ *
+ * @param minus this value will be used as (currentPos - minus) to set
+ * the current readerIndex in the buffer.
+ */
+ void setReadPosition(int minus) {
+ pos -= minus;
+ readerIndex = getReadPosition(pos);
+ buffer.readerIndex(readerIndex);
+ }
+
+ /**
+ *
+ * @param index raw index of the array (pos in general)
+ * @return the value equivalent of raw index to be used in readerIndex(value)
+ */
+ int getReadPosition(int index) {
+ return index - origPos + readerIndex;
+ }
+ }
+
+ /**
+ * Find the first non whitespace
+ * @return the rank of the first non whitespace
+ */
+ static int findNonWhitespace(String sb, int offset) {
+ int result;
+ for (result = offset; result < sb.length(); result ++) {
+ if (!Character.isWhitespace(sb.charAt(result))) {
+ break;
+ }
+ }
+ return result;
+ }
+
+ /**
+ * Find the end of String
+ * @return the rank of the end of string
+ */
+ static int findEndOfString(String sb) {
+ int result;
+ for (result = sb.length(); result > 0; result --) {
+ if (!Character.isWhitespace(sb.charAt(result - 1))) {
+ break;
+ }
+ }
+ return result;
+ }
+
+ /**
+ * Try to find first LF or CRLF as Line Breaking
+ *
+ * @param buffer the buffer to search in
+ * @param index the index to start from in the buffer
+ * @return a relative position from index > 0 if LF or CRLF is found
+ * or < 0 if not found
+ */
+ static int findLineBreak(ByteBuf buffer, int index) {
+ int toRead = buffer.readableBytes() - (index - buffer.readerIndex());
+ int posFirstChar = buffer.bytesBefore(index, toRead, HttpConstants.LF);
+ if (posFirstChar == -1) {
+ // No LF, so neither CRLF
+ return -1;
+ }
+ if (posFirstChar > 0 && buffer.getByte(index + posFirstChar - 1) == HttpConstants.CR) {
+ posFirstChar--;
+ }
+ return posFirstChar;
+ }
+
+ /**
+ * Try to find last LF or CRLF as Line Breaking
+ *
+ * @param buffer the buffer to search in
+ * @param index the index to start from in the buffer
+ * @return a relative position from index > 0 if LF or CRLF is found
+ * or < 0 if not found
+ */
+ static int findLastLineBreak(ByteBuf buffer, int index) {
+ int candidate = findLineBreak(buffer, index);
+ int findCRLF = 0;
+ if (candidate >= 0) {
+ if (buffer.getByte(index + candidate) == HttpConstants.CR) {
+ findCRLF = 2;
+ } else {
+ findCRLF = 1;
+ }
+ candidate += findCRLF;
+ }
+ int next;
+ while (candidate > 0 && (next = findLineBreak(buffer, index + candidate)) >= 0) {
+ candidate += next;
+ if (buffer.getByte(index + candidate) == HttpConstants.CR) {
+ findCRLF = 2;
+ } else {
+ findCRLF = 1;
+ }
+ candidate += findCRLF;
+ }
+ return candidate - findCRLF;
+ }
+
+ /**
+ * Try to find the delimiter, with LF or CRLF in front of it (added as delimiters) if needed
+ *
+ * @param buffer the buffer to search in
+ * @param index the index to start from in the buffer
+ * @param delimiter the delimiter as byte array
+ * @param precededByLineBreak true if it must be preceded by LF or CRLF, else false
+ * @return a relative position from index > 0 if delimiter found designing the start of it
+ * (including LF or CRLF is asked)
+ * or a number < 0 if delimiter is not found
+ * @throws IndexOutOfBoundsException
+ * if {@code offset + delimiter.length} is greater than {@code buffer.capacity}
+ */
+ static int findDelimiter(ByteBuf buffer, int index, byte[] delimiter, boolean precededByLineBreak) {
+ final int delimiterLength = delimiter.length;
+ final int readerIndex = buffer.readerIndex();
+ final int writerIndex = buffer.writerIndex();
+ int toRead = writerIndex - index;
+ int newOffset = index;
+ boolean delimiterNotFound = true;
+ while (delimiterNotFound && delimiterLength <= toRead) {
+ // Find first position: delimiter
+ int posDelimiter = buffer.bytesBefore(newOffset, toRead, delimiter[0]);
+ if (posDelimiter < 0) {
+ return -1;
+ }
+ newOffset += posDelimiter;
+ toRead -= posDelimiter;
+ // Now check for delimiter
+ delimiterNotFound = false;
+ for (int i = 0; i < delimiterLength; i++) {
+ if (buffer.getByte(newOffset + i) != delimiter[i]) {
+ newOffset++;
+ toRead--;
+ delimiterNotFound = true;
+ break;
+ }
+ }
+ if (!delimiterNotFound) {
+ // Delimiter found, find if necessary: LF or CRLF
+ if (precededByLineBreak && newOffset > readerIndex) {
+ if (buffer.getByte(newOffset - 1) == HttpConstants.LF) {
+ newOffset--;
+ // Check if CR before: not mandatory to be there
+ if (newOffset > readerIndex && buffer.getByte(newOffset - 1) == HttpConstants.CR) {
+ newOffset--;
+ }
+ } else {
+ // Delimiter with Line Break could be further: iterate after first char of delimiter
+ newOffset++;
+ toRead--;
+ delimiterNotFound = true;
+ continue;
+ }
+ }
+ return newOffset - readerIndex;
+ }
+ }
+ return -1;
+ }
+}
diff --git a/src/main/java/io/vertx/core/http/impl/netty/HttpPostMultipartRequestDecoder.java b/src/main/java/io/vertx/core/http/impl/netty/HttpPostMultipartRequestDecoder.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/http/impl/netty/HttpPostMultipartRequestDecoder.java
@@ -0,0 +1,1379 @@
+/*
+ * Copyright 2012 The Netty Project
+ *
+ * The Netty Project licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package io.vertx.core.http.impl.netty;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.handler.codec.http.HttpConstants;
+import io.netty.handler.codec.http.HttpContent;
+import io.netty.handler.codec.http.HttpHeaderNames;
+import io.netty.handler.codec.http.HttpHeaderValues;
+import io.netty.handler.codec.http.HttpRequest;
+import io.netty.handler.codec.http.LastHttpContent;
+import io.netty.handler.codec.http.QueryStringDecoder;
+import io.netty.handler.codec.http.multipart.Attribute;
+import io.netty.handler.codec.http.multipart.DefaultHttpDataFactory;
+import io.netty.handler.codec.http.multipart.FileUpload;
+import io.netty.handler.codec.http.multipart.HttpData;
+import io.netty.handler.codec.http.multipart.HttpDataFactory;
+import io.vertx.core.http.impl.netty.HttpPostBodyUtil.SeekAheadOptimize;
+import io.vertx.core.http.impl.netty.HttpPostBodyUtil.TransferEncodingMechanism;
+import io.vertx.core.http.impl.netty.HttpPostRequestDecoder.EndOfDataDecoderException;
+import io.vertx.core.http.impl.netty.HttpPostRequestDecoder.ErrorDataDecoderException;
+import io.vertx.core.http.impl.netty.HttpPostRequestDecoder.MultiPartStatus;
+import io.vertx.core.http.impl.netty.HttpPostRequestDecoder.NotEnoughDataDecoderException;
+import io.netty.handler.codec.http.multipart.InterfaceHttpData;
+import io.netty.handler.codec.http.multipart.InterfaceHttpPostRequestDecoder;
+import io.netty.util.CharsetUtil;
+import io.netty.util.internal.InternalThreadLocalMap;
+import io.netty.util.internal.PlatformDependent;
+import io.netty.util.internal.StringUtil;
+
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.nio.charset.IllegalCharsetNameException;
+import java.nio.charset.UnsupportedCharsetException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import static io.netty.util.internal.ObjectUtil.checkNotNull;
+import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero;
+
+/**
+ * This decoder will decode Body and can handle POST BODY.
+ *
+ * You <strong>MUST</strong> call {@link #destroy()} after completion to release all resources.
+ *
+ */
+public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequestDecoder {
+
+ /**
+ * Factory used to create InterfaceHttpData
+ */
+ private final HttpDataFactory factory;
+
+ /**
+ * Request to decode
+ */
+ private final HttpRequest request;
+
+ /**
+ * Default charset to use
+ */
+ private Charset charset;
+
+ /**
+ * Does the last chunk already received
+ */
+ private boolean isLastChunk;
+
+ /**
+ * HttpDatas from Body
+ */
+ private final List<InterfaceHttpData> bodyListHttpData = new ArrayList<InterfaceHttpData>();
+
+ /**
+ * HttpDatas as Map from Body
+ */
+ private final Map<String, List<InterfaceHttpData>> bodyMapHttpData = new TreeMap<String, List<InterfaceHttpData>>(
+ CaseIgnoringComparator.INSTANCE);
+
+ /**
+ * The current channelBuffer
+ */
+ private ByteBuf undecodedChunk;
+
+ /**
+ * Body HttpDatas current position
+ */
+ private int bodyListHttpDataRank;
+
+ /**
+ * If multipart, this is the boundary for the global multipart
+ */
+ private String multipartDataBoundary;
+
+ /**
+ * If multipart, there could be internal multiparts (mixed) to the global
+ * multipart. Only one level is allowed.
+ */
+ private String multipartMixedBoundary;
+
+ /**
+ * Current getStatus
+ */
+ private MultiPartStatus currentStatus = MultiPartStatus.NOTSTARTED;
+
+ /**
+ * Used in Multipart
+ */
+ private Map<CharSequence, Attribute> currentFieldAttributes;
+
+ /**
+ * The current FileUpload that is currently in decode process
+ */
+ private FileUpload currentFileUpload;
+
+ /**
+ * The current Attribute that is currently in decode process
+ */
+ private Attribute currentAttribute;
+
+ private boolean destroyed;
+
+ private int discardThreshold = HttpPostRequestDecoder.DEFAULT_DISCARD_THRESHOLD;
+
+ /**
+ *
+ * @param request
+ * the request to decode
+ * @throws NullPointerException
+ * for request
+ * @throws ErrorDataDecoderException
+ * if the default charset was wrong when decoding or other
+ * errors
+ */
+ public HttpPostMultipartRequestDecoder(HttpRequest request) {
+ this(new DefaultHttpDataFactory(DefaultHttpDataFactory.MINSIZE), request, HttpConstants.DEFAULT_CHARSET);
+ }
+
+ /**
+ *
+ * @param factory
+ * the factory used to create InterfaceHttpData
+ * @param request
+ * the request to decode
+ * @throws NullPointerException
+ * for request or factory
+ * @throws ErrorDataDecoderException
+ * if the default charset was wrong when decoding or other
+ * errors
+ */
+ public HttpPostMultipartRequestDecoder(HttpDataFactory factory, HttpRequest request) {
+ this(factory, request, HttpConstants.DEFAULT_CHARSET);
+ }
+
+ /**
+ *
+ * @param factory
+ * the factory used to create InterfaceHttpData
+ * @param request
+ * the request to decode
+ * @param charset
+ * the charset to use as default
+ * @throws NullPointerException
+ * for request or charset or factory
+ * @throws ErrorDataDecoderException
+ * if the default charset was wrong when decoding or other
+ * errors
+ */
+ public HttpPostMultipartRequestDecoder(HttpDataFactory factory, HttpRequest request, Charset charset) {
+ this.request = checkNotNull(request, "request");
+ this.charset = checkNotNull(charset, "charset");
+ this.factory = checkNotNull(factory, "factory");
+ // Fill default values
+
+ String contentTypeValue = this.request.headers().get(HttpHeaderNames.CONTENT_TYPE);
+ if (contentTypeValue == null) {
+ throw new ErrorDataDecoderException("No '" + HttpHeaderNames.CONTENT_TYPE + "' header present.");
+ }
+
+ String[] dataBoundary = HttpPostRequestDecoder.getMultipartDataBoundary(contentTypeValue);
+ if (dataBoundary != null) {
+ multipartDataBoundary = dataBoundary[0];
+ if (dataBoundary.length > 1 && dataBoundary[1] != null) {
+ try {
+ this.charset = Charset.forName(dataBoundary[1]);
+ } catch (IllegalCharsetNameException e) {
+ throw new ErrorDataDecoderException(e);
+ }
+ }
+ } else {
+ multipartDataBoundary = null;
+ }
+ currentStatus = MultiPartStatus.HEADERDELIMITER;
+
+ try {
+ if (request instanceof HttpContent) {
+ // Offer automatically if the given request is als type of HttpContent
+ // See #1089
+ offer((HttpContent) request);
+ } else {
+ parseBody();
+ }
+ } catch (Throwable e) {
+ destroy();
+ PlatformDependent.throwException(e);
+ }
+ }
+
+ private void checkDestroyed() {
+ if (destroyed) {
+ throw new IllegalStateException(HttpPostMultipartRequestDecoder.class.getSimpleName()
+ + " was destroyed already");
+ }
+ }
+
+ /**
+ * True if this request is a Multipart request
+ *
+ * @return True if this request is a Multipart request
+ */
+ @Override
+ public boolean isMultipart() {
+ checkDestroyed();
+ return true;
+ }
+
+ /**
+ * Set the amount of bytes after which read bytes in the buffer should be discarded.
+ * Setting this lower gives lower memory usage but with the overhead of more memory copies.
+ * Use {@code 0} to disable it.
+ */
+ @Override
+ public void setDiscardThreshold(int discardThreshold) {
+ this.discardThreshold = checkPositiveOrZero(discardThreshold, "discardThreshold");
+ }
+
+ /**
+ * Return the threshold in bytes after which read data in the buffer should be discarded.
+ */
+ @Override
+ public int getDiscardThreshold() {
+ return discardThreshold;
+ }
+
+ /**
+ * This getMethod returns a List of all HttpDatas from body.<br>
+ *
+ * If chunked, all chunks must have been offered using offer() getMethod. If
+ * not, NotEnoughDataDecoderException will be raised.
+ *
+ * @return the list of HttpDatas from Body part for POST getMethod
+ * @throws NotEnoughDataDecoderException
+ * Need more chunks
+ */
+ @Override
+ public List<InterfaceHttpData> getBodyHttpDatas() {
+ checkDestroyed();
+
+ if (!isLastChunk) {
+ throw new NotEnoughDataDecoderException();
+ }
+ return bodyListHttpData;
+ }
+
+ /**
+ * This getMethod returns a List of all HttpDatas with the given name from
+ * body.<br>
+ *
+ * If chunked, all chunks must have been offered using offer() getMethod. If
+ * not, NotEnoughDataDecoderException will be raised.
+ *
+ * @return All Body HttpDatas with the given name (ignore case)
+ * @throws NotEnoughDataDecoderException
+ * need more chunks
+ */
+ @Override
+ public List<InterfaceHttpData> getBodyHttpDatas(String name) {
+ checkDestroyed();
+
+ if (!isLastChunk) {
+ throw new NotEnoughDataDecoderException();
+ }
+ return bodyMapHttpData.get(name);
+ }
+
+ /**
+ * This getMethod returns the first InterfaceHttpData with the given name from
+ * body.<br>
+ *
+ * If chunked, all chunks must have been offered using offer() getMethod. If
+ * not, NotEnoughDataDecoderException will be raised.
+ *
+ * @return The first Body InterfaceHttpData with the given name (ignore
+ * case)
+ * @throws NotEnoughDataDecoderException
+ * need more chunks
+ */
+ @Override
+ public InterfaceHttpData getBodyHttpData(String name) {
+ checkDestroyed();
+
+ if (!isLastChunk) {
+ throw new NotEnoughDataDecoderException();
+ }
+ List<InterfaceHttpData> list = bodyMapHttpData.get(name);
+ if (list != null) {
+ return list.get(0);
+ }
+ return null;
+ }
+
+ /**
+ * Initialized the internals from a new chunk
+ *
+ * @param content
+ * the new received chunk
+ * @throws ErrorDataDecoderException
+ * if there is a problem with the charset decoding or other
+ * errors
+ */
+ @Override
+ public HttpPostMultipartRequestDecoder offer(HttpContent content) {
+ checkDestroyed();
+
+ if (content instanceof LastHttpContent) {
+ isLastChunk = true;
+ }
+
+ ByteBuf buf = content.content();
+ if (undecodedChunk == null) {
+ undecodedChunk =
+ // Since the Handler will release the incoming later on, we need to copy it
+ //
+ // We are explicit allocate a buffer and NOT calling copy() as otherwise it may set a maxCapacity
+ // which is not really usable for us as we may exceed it once we add more bytes.
+ buf.alloc().buffer(buf.readableBytes()).writeBytes(buf);
+ } else {
+ undecodedChunk.writeBytes(buf);
+ }
+ parseBody();
+ if (undecodedChunk != null && undecodedChunk.writerIndex() > discardThreshold) {
+ if (undecodedChunk.refCnt() == 1) {
+ // It's safe to call discardBytes() as we are the only owner of the buffer.
+ undecodedChunk.discardReadBytes();
+ } else {
+ // There seems to be multiple references of the buffer. Let's copy the data and release the buffer to
+ // ensure we can give back memory to the system.
+ ByteBuf buffer = undecodedChunk.alloc().buffer(undecodedChunk.readableBytes());
+ buffer.writeBytes(undecodedChunk);
+ undecodedChunk.release();
+ undecodedChunk = buffer;
+ }
+ }
+ return this;
+ }
+
+ /**
+ * True if at current getStatus, there is an available decoded
+ * InterfaceHttpData from the Body.
+ *
+ * This getMethod works for chunked and not chunked request.
+ *
+ * @return True if at current getStatus, there is a decoded InterfaceHttpData
+ * @throws EndOfDataDecoderException
+ * No more data will be available
+ */
+ @Override
+ public boolean hasNext() {
+ checkDestroyed();
+
+ if (currentStatus == MultiPartStatus.EPILOGUE) {
+ // OK except if end of list
+ if (bodyListHttpDataRank >= bodyListHttpData.size()) {
+ throw new EndOfDataDecoderException();
+ }
+ }
+ return !bodyListHttpData.isEmpty() && bodyListHttpDataRank < bodyListHttpData.size();
+ }
+
+ /**
+ * Returns the next available InterfaceHttpData or null if, at the time it
+ * is called, there is no more available InterfaceHttpData. A subsequent
+ * call to offer(httpChunk) could enable more data.
+ *
+ * Be sure to call {@link InterfaceHttpData#release()} after you are done
+ * with processing to make sure to not leak any resources
+ *
+ * @return the next available InterfaceHttpData or null if none
+ * @throws EndOfDataDecoderException
+ * No more data will be available
+ */
+ @Override
+ public InterfaceHttpData next() {
+ checkDestroyed();
+
+ if (hasNext()) {
+ return bodyListHttpData.get(bodyListHttpDataRank++);
+ }
+ return null;
+ }
+
+ @Override
+ public InterfaceHttpData currentPartialHttpData() {
+ if (currentFileUpload != null) {
+ return currentFileUpload;
+ } else {
+ return currentAttribute;
+ }
+ }
+
+ /**
+ * This getMethod will parse as much as possible data and fill the list and map
+ *
+ * @throws ErrorDataDecoderException
+ * if there is a problem with the charset decoding or other
+ * errors
+ */
+ private void parseBody() {
+ if (currentStatus == MultiPartStatus.PREEPILOGUE || currentStatus == MultiPartStatus.EPILOGUE) {
+ if (isLastChunk) {
+ currentStatus = MultiPartStatus.EPILOGUE;
+ }
+ return;
+ }
+ parseBodyMultipart();
+ }
+
+ /**
+ * Utility function to add a new decoded data
+ */
+ protected void addHttpData(InterfaceHttpData data) {
+ if (data == null) {
+ return;
+ }
+ List<InterfaceHttpData> datas = bodyMapHttpData.get(data.getName());
+ if (datas == null) {
+ datas = new ArrayList<InterfaceHttpData>(1);
+ bodyMapHttpData.put(data.getName(), datas);
+ }
+ datas.add(data);
+ bodyListHttpData.add(data);
+ }
+
+ /**
+ * Parse the Body for multipart
+ *
+ * @throws ErrorDataDecoderException
+ * if there is a problem with the charset decoding or other
+ * errors
+ */
+ private void parseBodyMultipart() {
+ if (undecodedChunk == null || undecodedChunk.readableBytes() == 0) {
+ // nothing to decode
+ return;
+ }
+ InterfaceHttpData data = decodeMultipart(currentStatus);
+ while (data != null) {
+ addHttpData(data);
+ if (currentStatus == MultiPartStatus.PREEPILOGUE || currentStatus == MultiPartStatus.EPILOGUE) {
+ break;
+ }
+ data = decodeMultipart(currentStatus);
+ }
+ }
+
+ /**
+ * Decode a multipart request by pieces<br>
+ * <br>
+ * NOTSTARTED PREAMBLE (<br>
+ * (HEADERDELIMITER DISPOSITION (FIELD | FILEUPLOAD))*<br>
+ * (HEADERDELIMITER DISPOSITION MIXEDPREAMBLE<br>
+ * (MIXEDDELIMITER MIXEDDISPOSITION MIXEDFILEUPLOAD)+<br>
+ * MIXEDCLOSEDELIMITER)*<br>
+ * CLOSEDELIMITER)+ EPILOGUE<br>
+ *
+ * Inspired from HttpMessageDecoder
+ *
+ * @return the next decoded InterfaceHttpData or null if none until now.
+ * @throws ErrorDataDecoderException
+ * if an error occurs
+ */
+ private InterfaceHttpData decodeMultipart(MultiPartStatus state) {
+ switch (state) {
+ case NOTSTARTED:
+ throw new ErrorDataDecoderException("Should not be called with the current getStatus");
+ case PREAMBLE:
+ // Content-type: multipart/form-data, boundary=AaB03x
+ throw new ErrorDataDecoderException("Should not be called with the current getStatus");
+ case HEADERDELIMITER: {
+ // --AaB03x or --AaB03x--
+ return findMultipartDelimiter(multipartDataBoundary, MultiPartStatus.DISPOSITION,
+ MultiPartStatus.PREEPILOGUE);
+ }
+ case DISPOSITION: {
+ // content-disposition: form-data; name="field1"
+ // content-disposition: form-data; name="pics"; filename="file1.txt"
+ // and other immediate values like
+ // Content-type: image/gif
+ // Content-Type: text/plain
+ // Content-Type: text/plain; charset=ISO-8859-1
+ // Content-Transfer-Encoding: binary
+ // The following line implies a change of mode (mixed mode)
+ // Content-type: multipart/mixed, boundary=BbC04y
+ return findMultipartDisposition();
+ }
+ case FIELD: {
+ // Now get value according to Content-Type and Charset
+ Charset localCharset = null;
+ Attribute charsetAttribute = currentFieldAttributes.get(HttpHeaderValues.CHARSET);
+ if (charsetAttribute != null) {
+ try {
+ localCharset = Charset.forName(charsetAttribute.getValue());
+ } catch (IOException e) {
+ throw new ErrorDataDecoderException(e);
+ } catch (UnsupportedCharsetException e) {
+ throw new ErrorDataDecoderException(e);
+ }
+ }
+ Attribute nameAttribute = currentFieldAttributes.get(HttpHeaderValues.NAME);
+ if (currentAttribute == null) {
+ Attribute lengthAttribute = currentFieldAttributes
+ .get(HttpHeaderNames.CONTENT_LENGTH);
+ long size;
+ try {
+ size = lengthAttribute != null? Long.parseLong(lengthAttribute
+ .getValue()) : 0L;
+ } catch (IOException e) {
+ throw new ErrorDataDecoderException(e);
+ } catch (NumberFormatException ignored) {
+ size = 0;
+ }
+ try {
+ if (size > 0) {
+ currentAttribute = factory.createAttribute(request,
+ cleanString(nameAttribute.getValue()), size);
+ } else {
+ currentAttribute = factory.createAttribute(request,
+ cleanString(nameAttribute.getValue()));
+ }
+ } catch (NullPointerException e) {
+ throw new ErrorDataDecoderException(e);
+ } catch (IllegalArgumentException e) {
+ throw new ErrorDataDecoderException(e);
+ } catch (IOException e) {
+ throw new ErrorDataDecoderException(e);
+ }
+ if (localCharset != null) {
+ currentAttribute.setCharset(localCharset);
+ }
+ }
+ // load data
+ if (!loadDataMultipartOptimized(undecodedChunk, multipartDataBoundary, currentAttribute)) {
+ // Delimiter is not found. Need more chunks.
+ return null;
+ }
+ Attribute finalAttribute = currentAttribute;
+ currentAttribute = null;
+ currentFieldAttributes = null;
+ // ready to load the next one
+ currentStatus = MultiPartStatus.HEADERDELIMITER;
+ return finalAttribute;
+ }
+ case FILEUPLOAD: {
+ // eventually restart from existing FileUpload
+ return getFileUpload(multipartDataBoundary);
+ }
+ case MIXEDDELIMITER: {
+ // --AaB03x or --AaB03x--
+ // Note that currentFieldAttributes exists
+ return findMultipartDelimiter(multipartMixedBoundary, MultiPartStatus.MIXEDDISPOSITION,
+ MultiPartStatus.HEADERDELIMITER);
+ }
+ case MIXEDDISPOSITION: {
+ return findMultipartDisposition();
+ }
+ case MIXEDFILEUPLOAD: {
+ // eventually restart from existing FileUpload
+ return getFileUpload(multipartMixedBoundary);
+ }
+ case PREEPILOGUE:
+ return null;
+ case EPILOGUE:
+ return null;
+ default:
+ throw new ErrorDataDecoderException("Shouldn't reach here.");
+ }
+ }
+
+ /**
+ * Skip control Characters
+ *
+ * @throws NotEnoughDataDecoderException
+ */
+ private static void skipControlCharacters(ByteBuf undecodedChunk) {
+ if (!undecodedChunk.hasArray()) {
+ try {
+ skipControlCharactersStandard(undecodedChunk);
+ } catch (IndexOutOfBoundsException e1) {
+ throw new NotEnoughDataDecoderException(e1);
+ }
+ return;
+ }
+ SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk);
+ while (sao.pos < sao.limit) {
+ char c = (char) (sao.bytes[sao.pos++] & 0xFF);
+ if (!Character.isISOControl(c) && !Character.isWhitespace(c)) {
+ sao.setReadPosition(1);
+ return;
+ }
+ }
+ throw new NotEnoughDataDecoderException("Access out of bounds");
+ }
+
+ private static void skipControlCharactersStandard(ByteBuf undecodedChunk) {
+ for (;;) {
+ char c = (char) undecodedChunk.readUnsignedByte();
+ if (!Character.isISOControl(c) && !Character.isWhitespace(c)) {
+ undecodedChunk.readerIndex(undecodedChunk.readerIndex() - 1);
+ break;
+ }
+ }
+ }
+
+ /**
+ * Find the next Multipart Delimiter
+ *
+ * @param delimiter
+ * delimiter to find
+ * @param dispositionStatus
+ * the next getStatus if the delimiter is a start
+ * @param closeDelimiterStatus
+ * the next getStatus if the delimiter is a close delimiter
+ * @return the next InterfaceHttpData if any
+ * @throws ErrorDataDecoderException
+ */
+ private InterfaceHttpData findMultipartDelimiter(String delimiter, MultiPartStatus dispositionStatus,
+ MultiPartStatus closeDelimiterStatus) {
+ // --AaB03x or --AaB03x--
+ int readerIndex = undecodedChunk.readerIndex();
+ try {
+ skipControlCharacters(undecodedChunk);
+ } catch (NotEnoughDataDecoderException ignored) {
+ undecodedChunk.readerIndex(readerIndex);
+ return null;
+ }
+ skipOneLine();
+ String newline;
+ try {
+ newline = readDelimiterOptimized(undecodedChunk, delimiter, charset);
+ } catch (NotEnoughDataDecoderException ignored) {
+ undecodedChunk.readerIndex(readerIndex);
+ return null;
+ }
+ if (newline.equals(delimiter)) {
+ currentStatus = dispositionStatus;
+ return decodeMultipart(dispositionStatus);
+ }
+ if (newline.equals(delimiter + "--")) {
+ // CLOSEDELIMITER or MIXED CLOSEDELIMITER found
+ currentStatus = closeDelimiterStatus;
+ if (currentStatus == MultiPartStatus.HEADERDELIMITER) {
+ // MIXEDCLOSEDELIMITER
+ // end of the Mixed part
+ currentFieldAttributes = null;
+ return decodeMultipart(MultiPartStatus.HEADERDELIMITER);
+ }
+ return null;
+ }
+ undecodedChunk.readerIndex(readerIndex);
+ throw new ErrorDataDecoderException("No Multipart delimiter found");
+ }
+
+ /**
+ * Find the next Disposition
+ *
+ * @return the next InterfaceHttpData if any
+ * @throws ErrorDataDecoderException
+ */
+ private InterfaceHttpData findMultipartDisposition() {
+ int readerIndex = undecodedChunk.readerIndex();
+ if (currentStatus == MultiPartStatus.DISPOSITION) {
+ currentFieldAttributes = new TreeMap<CharSequence, Attribute>(CaseIgnoringComparator.INSTANCE);
+ }
+ // read many lines until empty line with newline found! Store all data
+ while (!skipOneLine()) {
+ String newline;
+ try {
+ skipControlCharacters(undecodedChunk);
+ newline = readLineOptimized(undecodedChunk, charset);
+ } catch (NotEnoughDataDecoderException ignored) {
+ undecodedChunk.readerIndex(readerIndex);
+ return null;
+ }
+ String[] contents = splitMultipartHeader(newline);
+ if (HttpHeaderNames.CONTENT_DISPOSITION.contentEqualsIgnoreCase(contents[0])) {
+ boolean checkSecondArg;
+ if (currentStatus == MultiPartStatus.DISPOSITION) {
+ checkSecondArg = HttpHeaderValues.FORM_DATA.contentEqualsIgnoreCase(contents[1]);
+ } else {
+ checkSecondArg = HttpHeaderValues.ATTACHMENT.contentEqualsIgnoreCase(contents[1])
+ || HttpHeaderValues.FILE.contentEqualsIgnoreCase(contents[1]);
+ }
+ if (checkSecondArg) {
+ // read next values and store them in the map as Attribute
+ for (int i = 2; i < contents.length; i++) {
+ String[] values = contents[i].split("=", 2);
+ Attribute attribute;
+ try {
+ attribute = getContentDispositionAttribute(values);
+ } catch (NullPointerException e) {
+ throw new ErrorDataDecoderException(e);
+ } catch (IllegalArgumentException e) {
+ throw new ErrorDataDecoderException(e);
+ }
+ currentFieldAttributes.put(attribute.getName(), attribute);
+ }
+ }
+ } else if (HttpHeaderNames.CONTENT_TRANSFER_ENCODING.contentEqualsIgnoreCase(contents[0])) {
+ Attribute attribute;
+ try {
+ attribute = factory.createAttribute(request, HttpHeaderNames.CONTENT_TRANSFER_ENCODING.toString(),
+ cleanString(contents[1]));
+ } catch (NullPointerException e) {
+ throw new ErrorDataDecoderException(e);
+ } catch (IllegalArgumentException e) {
+ throw new ErrorDataDecoderException(e);
+ }
+
+ currentFieldAttributes.put(HttpHeaderNames.CONTENT_TRANSFER_ENCODING, attribute);
+ } else if (HttpHeaderNames.CONTENT_LENGTH.contentEqualsIgnoreCase(contents[0])) {
+ Attribute attribute;
+ try {
+ attribute = factory.createAttribute(request, HttpHeaderNames.CONTENT_LENGTH.toString(),
+ cleanString(contents[1]));
+ } catch (NullPointerException e) {
+ throw new ErrorDataDecoderException(e);
+ } catch (IllegalArgumentException e) {
+ throw new ErrorDataDecoderException(e);
+ }
+
+ currentFieldAttributes.put(HttpHeaderNames.CONTENT_LENGTH, attribute);
+ } else if (HttpHeaderNames.CONTENT_TYPE.contentEqualsIgnoreCase(contents[0])) {
+ // Take care of possible "multipart/mixed"
+ if (HttpHeaderValues.MULTIPART_MIXED.contentEqualsIgnoreCase(contents[1])) {
+ if (currentStatus == MultiPartStatus.DISPOSITION) {
+ String values = StringUtil.substringAfter(contents[2], '=');
+ multipartMixedBoundary = "--" + values;
+ currentStatus = MultiPartStatus.MIXEDDELIMITER;
+ return decodeMultipart(MultiPartStatus.MIXEDDELIMITER);
+ } else {
+ throw new ErrorDataDecoderException("Mixed Multipart found in a previous Mixed Multipart");
+ }
+ } else {
+ for (int i = 1; i < contents.length; i++) {
+ final String charsetHeader = HttpHeaderValues.CHARSET.toString();
+ if (contents[i].regionMatches(true, 0, charsetHeader, 0, charsetHeader.length())) {
+ String values = StringUtil.substringAfter(contents[i], '=');
+ Attribute attribute;
+ try {
+ attribute = factory.createAttribute(request, charsetHeader, cleanString(values));
+ } catch (NullPointerException e) {
+ throw new ErrorDataDecoderException(e);
+ } catch (IllegalArgumentException e) {
+ throw new ErrorDataDecoderException(e);
+ }
+ currentFieldAttributes.put(HttpHeaderValues.CHARSET, attribute);
+ } else {
+ Attribute attribute;
+ try {
+ attribute = factory.createAttribute(request,
+ cleanString(contents[0]), contents[i]);
+ } catch (NullPointerException e) {
+ throw new ErrorDataDecoderException(e);
+ } catch (IllegalArgumentException e) {
+ throw new ErrorDataDecoderException(e);
+ }
+ currentFieldAttributes.put(attribute.getName(), attribute);
+ }
+ }
+ }
+ }
+ }
+ // Is it a FileUpload
+ Attribute filenameAttribute = currentFieldAttributes.get(HttpHeaderValues.FILENAME);
+ if (currentStatus == MultiPartStatus.DISPOSITION) {
+ if (filenameAttribute != null) {
+ // FileUpload
+ currentStatus = MultiPartStatus.FILEUPLOAD;
+ // do not change the buffer position
+ return decodeMultipart(MultiPartStatus.FILEUPLOAD);
+ } else {
+ // Field
+ currentStatus = MultiPartStatus.FIELD;
+ // do not change the buffer position
+ return decodeMultipart(MultiPartStatus.FIELD);
+ }
+ } else {
+ if (filenameAttribute != null) {
+ // FileUpload
+ currentStatus = MultiPartStatus.MIXEDFILEUPLOAD;
+ // do not change the buffer position
+ return decodeMultipart(MultiPartStatus.MIXEDFILEUPLOAD);
+ } else {
+ // Field is not supported in MIXED mode
+ throw new ErrorDataDecoderException("Filename not found");
+ }
+ }
+ }
+
+ private static final String FILENAME_ENCODED = HttpHeaderValues.FILENAME.toString() + '*';
+
+ private Attribute getContentDispositionAttribute(String... values) {
+ String name = cleanString(values[0]);
+ String value = values[1];
+
+ // Filename can be token, quoted or encoded. See https://tools.ietf.org/html/rfc5987
+ if (HttpHeaderValues.FILENAME.contentEquals(name)) {
+ // Value is quoted or token. Strip if quoted:
+ int last = value.length() - 1;
+ if (last > 0 &&
+ value.charAt(0) == HttpConstants.DOUBLE_QUOTE &&
+ value.charAt(last) == HttpConstants.DOUBLE_QUOTE) {
+ value = value.substring(1, last);
+ }
+ } else if (FILENAME_ENCODED.equals(name)) {
+ try {
+ name = HttpHeaderValues.FILENAME.toString();
+ String[] split = cleanString(value).split("'", 3);
+ value = QueryStringDecoder.decodeComponent(split[2], Charset.forName(split[0]));
+ } catch (ArrayIndexOutOfBoundsException e) {
+ throw new ErrorDataDecoderException(e);
+ } catch (UnsupportedCharsetException e) {
+ throw new ErrorDataDecoderException(e);
+ }
+ } else {
+ // otherwise we need to clean the value
+ value = cleanString(value);
+ }
+ return factory.createAttribute(request, name, value);
+ }
+
+ /**
+ * Get the FileUpload (new one or current one)
+ *
+ * @param delimiter
+ * the delimiter to use
+ * @return the InterfaceHttpData if any
+ * @throws ErrorDataDecoderException
+ */
+ protected InterfaceHttpData getFileUpload(String delimiter) {
+ // eventually restart from existing FileUpload
+ // Now get value according to Content-Type and Charset
+ Attribute encoding = currentFieldAttributes.get(HttpHeaderNames.CONTENT_TRANSFER_ENCODING);
+ Charset localCharset = charset;
+ // Default
+ TransferEncodingMechanism mechanism = TransferEncodingMechanism.BIT7;
+ if (encoding != null) {
+ String code;
+ try {
+ code = encoding.getValue().toLowerCase();
+ } catch (IOException e) {
+ throw new ErrorDataDecoderException(e);
+ }
+ if (code.equals(TransferEncodingMechanism.BIT7.value())) {
+ localCharset = CharsetUtil.US_ASCII;
+ } else if (code.equals(TransferEncodingMechanism.BIT8.value())) {
+ localCharset = CharsetUtil.ISO_8859_1;
+ mechanism = TransferEncodingMechanism.BIT8;
+ } else if (code.equals(TransferEncodingMechanism.BINARY.value())) {
+ // no real charset, so let the default
+ mechanism = TransferEncodingMechanism.BINARY;
+ } else {
+ throw new ErrorDataDecoderException("TransferEncoding Unknown: " + code);
+ }
+ }
+ Attribute charsetAttribute = currentFieldAttributes.get(HttpHeaderValues.CHARSET);
+ if (charsetAttribute != null) {
+ try {
+ localCharset = Charset.forName(charsetAttribute.getValue());
+ } catch (IOException e) {
+ throw new ErrorDataDecoderException(e);
+ } catch (UnsupportedCharsetException e) {
+ throw new ErrorDataDecoderException(e);
+ }
+ }
+ if (currentFileUpload == null) {
+ Attribute filenameAttribute = currentFieldAttributes.get(HttpHeaderValues.FILENAME);
+ Attribute nameAttribute = currentFieldAttributes.get(HttpHeaderValues.NAME);
+ Attribute contentTypeAttribute = currentFieldAttributes.get(HttpHeaderNames.CONTENT_TYPE);
+ Attribute lengthAttribute = currentFieldAttributes.get(HttpHeaderNames.CONTENT_LENGTH);
+ long size;
+ try {
+ size = lengthAttribute != null ? Long.parseLong(lengthAttribute.getValue()) : 0L;
+ } catch (IOException e) {
+ throw new ErrorDataDecoderException(e);
+ } catch (NumberFormatException ignored) {
+ size = 0;
+ }
+ try {
+ String contentType;
+ if (contentTypeAttribute != null) {
+ contentType = contentTypeAttribute.getValue();
+ } else {
+ contentType = HttpPostBodyUtil.DEFAULT_BINARY_CONTENT_TYPE;
+ }
+ currentFileUpload = factory.createFileUpload(request,
+ cleanString(nameAttribute.getValue()), cleanString(filenameAttribute.getValue()),
+ contentType, mechanism.value(), localCharset,
+ size);
+ } catch (NullPointerException e) {
+ throw new ErrorDataDecoderException(e);
+ } catch (IllegalArgumentException e) {
+ throw new ErrorDataDecoderException(e);
+ } catch (IOException e) {
+ throw new ErrorDataDecoderException(e);
+ }
+ }
+ // load data as much as possible
+ if (!loadDataMultipartOptimized(undecodedChunk, delimiter, currentFileUpload)) {
+ // Delimiter is not found. Need more chunks.
+ return null;
+ }
+ if (currentFileUpload.isCompleted()) {
+ // ready to load the next one
+ if (currentStatus == MultiPartStatus.FILEUPLOAD) {
+ currentStatus = MultiPartStatus.HEADERDELIMITER;
+ currentFieldAttributes = null;
+ } else {
+ currentStatus = MultiPartStatus.MIXEDDELIMITER;
+ cleanMixedAttributes();
+ }
+ FileUpload fileUpload = currentFileUpload;
+ currentFileUpload = null;
+ return fileUpload;
+ }
+ // do not change the buffer position
+ // since some can be already saved into FileUpload
+ // So do not change the currentStatus
+ return null;
+ }
+
+ /**
+ * Destroy the {@link HttpPostMultipartRequestDecoder} and release all it resources. After this method
+ * was called it is not possible to operate on it anymore.
+ */
+ @Override
+ public void destroy() {
+ // Release all data items, including those not yet pulled, only file based items
+ cleanFiles();
+ // Clean Memory based data
+ for (InterfaceHttpData httpData : bodyListHttpData) {
+ // Might have been already released by the user
+ if (httpData.refCnt() > 0) {
+ httpData.release();
+ }
+ }
+
+ destroyed = true;
+
+ if (undecodedChunk != null && undecodedChunk.refCnt() > 0) {
+ undecodedChunk.release();
+ undecodedChunk = null;
+ }
+ }
+
+ /**
+ * Clean all HttpDatas (on Disk) for the current request.
+ */
+ @Override
+ public void cleanFiles() {
+ checkDestroyed();
+
+ factory.cleanRequestHttpData(request);
+ }
+
+ /**
+ * Remove the given FileUpload from the list of FileUploads to clean
+ */
+ @Override
+ public void removeHttpDataFromClean(InterfaceHttpData data) {
+ checkDestroyed();
+
+ factory.removeHttpDataFromClean(request, data);
+ }
+
+ /**
+ * Remove all Attributes that should be cleaned between two FileUpload in
+ * Mixed mode
+ */
+ private void cleanMixedAttributes() {
+ currentFieldAttributes.remove(HttpHeaderValues.CHARSET);
+ currentFieldAttributes.remove(HttpHeaderNames.CONTENT_LENGTH);
+ currentFieldAttributes.remove(HttpHeaderNames.CONTENT_TRANSFER_ENCODING);
+ currentFieldAttributes.remove(HttpHeaderNames.CONTENT_TYPE);
+ currentFieldAttributes.remove(HttpHeaderValues.FILENAME);
+ }
+
+ /**
+ * Read one line up to the CRLF or LF
+ *
+ * @return the String from one line
+ * @throws NotEnoughDataDecoderException
+ * Need more chunks and reset the {@code readerIndex} to the previous
+ * value
+ */
+ private static String readLineOptimized(ByteBuf undecodedChunk, Charset charset) {
+ int readerIndex = undecodedChunk.readerIndex();
+ ByteBuf line = null;
+ try {
+ if (undecodedChunk.isReadable()) {
+ int posLfOrCrLf = HttpPostBodyUtil.findLineBreak(undecodedChunk, undecodedChunk.readerIndex());
+ if (posLfOrCrLf <= 0) {
+ throw new NotEnoughDataDecoderException();
+ }
+ try {
+ line = undecodedChunk.alloc().heapBuffer(posLfOrCrLf);
+ line.writeBytes(undecodedChunk, posLfOrCrLf);
+
+ byte nextByte = undecodedChunk.readByte();
+ if (nextByte == HttpConstants.CR) {
+ // force read next byte since LF is the following one
+ undecodedChunk.readByte();
+ }
+ return line.toString(charset);
+ } finally {
+ line.release();
+ }
+ }
+ } catch (IndexOutOfBoundsException e) {
+ undecodedChunk.readerIndex(readerIndex);
+ throw new NotEnoughDataDecoderException(e);
+ }
+ undecodedChunk.readerIndex(readerIndex);
+ throw new NotEnoughDataDecoderException();
+ }
+
+ /**
+ * Read one line up to --delimiter or --delimiter-- and if existing the CRLF
+ * or LF Read one line up to --delimiter or --delimiter-- and if existing
+ * the CRLF or LF. Note that CRLF or LF are mandatory for opening delimiter
+ * (--delimiter) but not for closing delimiter (--delimiter--) since some
+ * clients does not include CRLF in this case.
+ *
+ * @param delimiter
+ * of the form --string, such that '--' is already included
+ * @return the String from one line as the delimiter searched (opening or
+ * closing)
+ * @throws NotEnoughDataDecoderException
+ * Need more chunks and reset the {@code readerIndex} to the previous
+ * value
+ */
+ private static String readDelimiterOptimized(ByteBuf undecodedChunk, String delimiter, Charset charset) {
+ final int readerIndex = undecodedChunk.readerIndex();
+ final byte[] bdelimiter = delimiter.getBytes(charset);
+ final int delimiterLength = bdelimiter.length;
+ try {
+ int delimiterPos = HttpPostBodyUtil.findDelimiter(undecodedChunk, readerIndex, bdelimiter, false);
+ if (delimiterPos < 0) {
+ // delimiter not found so break here !
+ undecodedChunk.readerIndex(readerIndex);
+ throw new NotEnoughDataDecoderException();
+ }
+ StringBuilder sb = new StringBuilder(delimiter);
+ undecodedChunk.readerIndex(readerIndex + delimiterPos + delimiterLength);
+ // Now check if either opening delimiter or closing delimiter
+ if (undecodedChunk.isReadable()) {
+ byte nextByte = undecodedChunk.readByte();
+ // first check for opening delimiter
+ if (nextByte == HttpConstants.CR) {
+ nextByte = undecodedChunk.readByte();
+ if (nextByte == HttpConstants.LF) {
+ return sb.toString();
+ } else {
+ // error since CR must be followed by LF
+ // delimiter not found so break here !
+ undecodedChunk.readerIndex(readerIndex);
+ throw new NotEnoughDataDecoderException();
+ }
+ } else if (nextByte == HttpConstants.LF) {
+ return sb.toString();
+ } else if (nextByte == '-') {
+ sb.append('-');
+ // second check for closing delimiter
+ nextByte = undecodedChunk.readByte();
+ if (nextByte == '-') {
+ sb.append('-');
+ // now try to find if CRLF or LF there
+ if (undecodedChunk.isReadable()) {
+ nextByte = undecodedChunk.readByte();
+ if (nextByte == HttpConstants.CR) {
+ nextByte = undecodedChunk.readByte();
+ if (nextByte == HttpConstants.LF) {
+ return sb.toString();
+ } else {
+ // error CR without LF
+ // delimiter not found so break here !
+ undecodedChunk.readerIndex(readerIndex);
+ throw new NotEnoughDataDecoderException();
+ }
+ } else if (nextByte == HttpConstants.LF) {
+ return sb.toString();
+ } else {
+ // No CRLF but ok however (Adobe Flash uploader)
+ // minus 1 since we read one char ahead but
+ // should not
+ undecodedChunk.readerIndex(undecodedChunk.readerIndex() - 1);
+ return sb.toString();
+ }
+ }
+ // FIXME what do we do here?
+ // either considering it is fine, either waiting for
+ // more data to come?
+ // lets try considering it is fine...
+ return sb.toString();
+ }
+ // only one '-' => not enough
+ // whatever now => error since incomplete
+ }
+ }
+ } catch (IndexOutOfBoundsException e) {
+ undecodedChunk.readerIndex(readerIndex);
+ throw new NotEnoughDataDecoderException(e);
+ }
+ undecodedChunk.readerIndex(readerIndex);
+ throw new NotEnoughDataDecoderException();
+ }
+
+ /**
+ * Rewrite buffer in order to skip lengthToSkip bytes from current readerIndex,
+ * such that any readable bytes available after readerIndex + lengthToSkip (so before writerIndex)
+ * are moved at readerIndex position,
+ * therefore decreasing writerIndex of lengthToSkip at the end of the process.
+ *
+ * @param buffer the buffer to rewrite from current readerIndex
+ * @param lengthToSkip the size to skip from readerIndex
+ */
+ private static void rewriteCurrentBuffer(ByteBuf buffer, int lengthToSkip) {
+ if (lengthToSkip == 0) {
+ return;
+ }
+ final int readerIndex = buffer.readerIndex();
+ final int readableBytes = buffer.readableBytes();
+ if (readableBytes == lengthToSkip) {
+ buffer.readerIndex(readerIndex);
+ buffer.writerIndex(readerIndex);
+ return;
+ }
+ buffer.setBytes(readerIndex, buffer, readerIndex + lengthToSkip, readableBytes - lengthToSkip);
+ buffer.readerIndex(readerIndex);
+ buffer.writerIndex(readerIndex + readableBytes - lengthToSkip);
+ }
+
+ /**
+ * Load the field value or file data from a Multipart request
+ *
+ * @return {@code true} if the last chunk is loaded (boundary delimiter found), {@code false} if need more chunks
+ * @throws ErrorDataDecoderException
+ */
+ private static boolean loadDataMultipartOptimized(ByteBuf undecodedChunk, String delimiter, HttpData httpData) {
+ if (!undecodedChunk.isReadable()) {
+ return false;
+ }
+ final int startReaderIndex = undecodedChunk.readerIndex();
+ final byte[] bdelimiter = delimiter.getBytes(httpData.getCharset());
+ int posDelimiter = HttpPostBodyUtil.findDelimiter(undecodedChunk, startReaderIndex, bdelimiter, true);
+ if (posDelimiter < 0) {
+ // Not found but however perhaps because incomplete so search LF or CRLF from the end.
+ // Possible last bytes contain partially delimiter
+ // (delimiter is possibly partially there, at least 1 missing byte),
+ // therefore searching last delimiter.length +1 (+1 for CRLF instead of LF)
+ int lastPosition = undecodedChunk.readableBytes() - bdelimiter.length - 1;
+ if (lastPosition < 0) {
+ // Not enough bytes, but at most delimiter.length bytes available so can still try to find CRLF there
+ lastPosition = 0;
+ }
+ posDelimiter = HttpPostBodyUtil.findLastLineBreak(undecodedChunk, startReaderIndex + lastPosition);
+ if (posDelimiter < 0) {
+ // not found so this chunk can be fully added
+ ByteBuf content = undecodedChunk.copy();
+ try {
+ httpData.addContent(content, false);
+ } catch (IOException e) {
+ throw new ErrorDataDecoderException(e);
+ }
+ undecodedChunk.readerIndex(startReaderIndex);
+ undecodedChunk.writerIndex(startReaderIndex);
+ return false;
+ }
+ // posDelimiter is not from startReaderIndex but from startReaderIndex + lastPosition
+ posDelimiter += lastPosition;
+ if (posDelimiter == 0) {
+ // Nothing to add
+ return false;
+ }
+ // Not fully but still some bytes to provide: httpData is not yet finished since delimiter not found
+ ByteBuf content = undecodedChunk.copy(startReaderIndex, posDelimiter);
+ try {
+ httpData.addContent(content, false);
+ } catch (IOException e) {
+ throw new ErrorDataDecoderException(e);
+ }
+ rewriteCurrentBuffer(undecodedChunk, posDelimiter);
+ return false;
+ }
+ // Delimiter found at posDelimiter, including LF or CRLF, so httpData has its last chunk
+ ByteBuf content = undecodedChunk.copy(startReaderIndex, posDelimiter);
+ try {
+ httpData.addContent(content, true);
+ } catch (IOException e) {
+ throw new ErrorDataDecoderException(e);
+ }
+ rewriteCurrentBuffer(undecodedChunk, posDelimiter);
+ return true;
+ }
+
+ /**
+ * Clean the String from any unallowed character
+ *
+ * @return the cleaned String
+ */
+ private static String cleanString(String field) {
+ int size = field.length();
+ StringBuilder sb = new StringBuilder(size);
+ for (int i = 0; i < size; i++) {
+ char nextChar = field.charAt(i);
+ switch (nextChar) {
+ case HttpConstants.COLON:
+ case HttpConstants.COMMA:
+ case HttpConstants.EQUALS:
+ case HttpConstants.SEMICOLON:
+ case HttpConstants.HT:
+ sb.append(HttpConstants.SP_CHAR);
+ break;
+ case HttpConstants.DOUBLE_QUOTE:
+ // nothing added, just removes it
+ break;
+ default:
+ sb.append(nextChar);
+ break;
+ }
+ }
+ return sb.toString().trim();
+ }
+
+ /**
+ * Skip one empty line
+ *
+ * @return True if one empty line was skipped
+ */
+ private boolean skipOneLine() {
+ if (!undecodedChunk.isReadable()) {
+ return false;
+ }
+ byte nextByte = undecodedChunk.readByte();
+ if (nextByte == HttpConstants.CR) {
+ if (!undecodedChunk.isReadable()) {
+ undecodedChunk.readerIndex(undecodedChunk.readerIndex() - 1);
+ return false;
+ }
+ nextByte = undecodedChunk.readByte();
+ if (nextByte == HttpConstants.LF) {
+ return true;
+ }
+ undecodedChunk.readerIndex(undecodedChunk.readerIndex() - 2);
+ return false;
+ }
+ if (nextByte == HttpConstants.LF) {
+ return true;
+ }
+ undecodedChunk.readerIndex(undecodedChunk.readerIndex() - 1);
+ return false;
+ }
+
+ /**
+ * Split one header in Multipart
+ *
+ * @return an array of String where rank 0 is the name of the header,
+ * follows by several values that were separated by ';' or ','
+ */
+ private static String[] splitMultipartHeader(String sb) {
+ ArrayList<String> headers = new ArrayList<String>(1);
+ int nameStart;
+ int nameEnd;
+ int colonEnd;
+ int valueStart;
+ int valueEnd;
+ nameStart = HttpPostBodyUtil.findNonWhitespace(sb, 0);
+ for (nameEnd = nameStart; nameEnd < sb.length(); nameEnd++) {
+ char ch = sb.charAt(nameEnd);
+ if (ch == ':' || Character.isWhitespace(ch)) {
+ break;
+ }
+ }
+ for (colonEnd = nameEnd; colonEnd < sb.length(); colonEnd++) {
+ if (sb.charAt(colonEnd) == ':') {
+ colonEnd++;
+ break;
+ }
+ }
+ valueStart = HttpPostBodyUtil.findNonWhitespace(sb, colonEnd);
+ valueEnd = HttpPostBodyUtil.findEndOfString(sb);
+ headers.add(sb.substring(nameStart, nameEnd));
+ String svalue = (valueStart >= valueEnd) ? StringUtil.EMPTY_STRING : sb.substring(valueStart, valueEnd);
+ String[] values;
+ if (svalue.indexOf(';') >= 0) {
+ values = splitMultipartHeaderValues(svalue);
+ } else {
+ values = svalue.split(",");
+ }
+ for (String value : values) {
+ headers.add(value.trim());
+ }
+ String[] array = new String[headers.size()];
+ for (int i = 0; i < headers.size(); i++) {
+ array[i] = headers.get(i);
+ }
+ return array;
+ }
+
+ /**
+ * Split one header value in Multipart
+ * @return an array of String where values that were separated by ';' or ','
+ */
+ private static String[] splitMultipartHeaderValues(String svalue) {
+ List<String> values = InternalThreadLocalMap.get().arrayList(1);
+ boolean inQuote = false;
+ boolean escapeNext = false;
+ int start = 0;
+ for (int i = 0; i < svalue.length(); i++) {
+ char c = svalue.charAt(i);
+ if (inQuote) {
+ if (escapeNext) {
+ escapeNext = false;
+ } else {
+ if (c == '\\') {
+ escapeNext = true;
+ } else if (c == '"') {
+ inQuote = false;
+ }
+ }
+ } else {
+ if (c == '"') {
+ inQuote = true;
+ } else if (c == ';') {
+ values.add(svalue.substring(start, i));
+ start = i + 1;
+ }
+ }
+ }
+ values.add(svalue.substring(start));
+ return values.toArray(new String[0]);
+ }
+
+ /**
+ * This method is package private intentionally in order to allow during tests
+ * to access to the amount of memory allocated (capacity) within the private
+ * ByteBuf undecodedChunk
+ *
+ * @return the number of bytes the internal buffer can contain
+ */
+ int getCurrentAllocatedCapacity() {
+ return undecodedChunk.capacity();
+ }
+}
diff --git a/src/main/java/io/vertx/core/http/impl/netty/HttpPostRequestDecoder.java b/src/main/java/io/vertx/core/http/impl/netty/HttpPostRequestDecoder.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/http/impl/netty/HttpPostRequestDecoder.java
@@ -0,0 +1,330 @@
+package io.vertx.core.http.impl.netty;
+
+import io.netty.handler.codec.DecoderException;
+import io.netty.handler.codec.http.HttpConstants;
+import io.netty.handler.codec.http.HttpContent;
+import io.netty.handler.codec.http.HttpHeaderNames;
+import io.netty.handler.codec.http.HttpHeaderValues;
+import io.netty.handler.codec.http.HttpRequest;
+import io.netty.handler.codec.http.multipart.DefaultHttpDataFactory;
+import io.netty.handler.codec.http.multipart.HttpDataFactory;
+import io.netty.handler.codec.http.multipart.InterfaceHttpData;
+import io.netty.handler.codec.http.multipart.InterfaceHttpPostRequestDecoder;
+import io.netty.util.internal.ObjectUtil;
+import io.netty.util.internal.StringUtil;
+
+import java.nio.charset.Charset;
+import java.util.List;
+
+/**
+ * This decoder will decode Body and can handle POST BODY.
+ *
+ * You <strong>MUST</strong> call {@link #destroy()} after completion to release all resources.
+ *
+ */
+public class HttpPostRequestDecoder implements InterfaceHttpPostRequestDecoder {
+
+ static final int DEFAULT_DISCARD_THRESHOLD = 10 * 1024 * 1024;
+
+ private final InterfaceHttpPostRequestDecoder decoder;
+
+ /**
+ *
+ * @param request
+ * the request to decode
+ * @throws NullPointerException
+ * for request
+ * @throws ErrorDataDecoderException
+ * if the default charset was wrong when decoding or other
+ * errors
+ */
+ public HttpPostRequestDecoder(HttpRequest request) {
+ this(new DefaultHttpDataFactory(DefaultHttpDataFactory.MINSIZE), request, HttpConstants.DEFAULT_CHARSET);
+ }
+
+ /**
+ *
+ * @param factory
+ * the factory used to create InterfaceHttpData
+ * @param request
+ * the request to decode
+ * @throws NullPointerException
+ * for request or factory
+ * @throws ErrorDataDecoderException
+ * if the default charset was wrong when decoding or other
+ * errors
+ */
+ public HttpPostRequestDecoder(HttpDataFactory factory, HttpRequest request) {
+ this(factory, request, HttpConstants.DEFAULT_CHARSET);
+ }
+
+ /**
+ *
+ * @param factory
+ * the factory used to create InterfaceHttpData
+ * @param request
+ * the request to decode
+ * @param charset
+ * the charset to use as default
+ * @throws NullPointerException
+ * for request or charset or factory
+ * @throws ErrorDataDecoderException
+ * if the default charset was wrong when decoding or other
+ * errors
+ */
+ public HttpPostRequestDecoder(HttpDataFactory factory, HttpRequest request, Charset charset) {
+ ObjectUtil.checkNotNull(factory, "factory");
+ ObjectUtil.checkNotNull(request, "request");
+ ObjectUtil.checkNotNull(charset, "charset");
+
+ // Fill default values
+ if (isMultipart(request)) {
+ decoder = new HttpPostMultipartRequestDecoder(factory, request, charset);
+ } else {
+ decoder = new HttpPostStandardRequestDecoder(factory, request, charset);
+ }
+ }
+
+ /**
+ * states follow NOTSTARTED PREAMBLE ( (HEADERDELIMITER DISPOSITION (FIELD |
+ * FILEUPLOAD))* (HEADERDELIMITER DISPOSITION MIXEDPREAMBLE (MIXEDDELIMITER
+ * MIXEDDISPOSITION MIXEDFILEUPLOAD)+ MIXEDCLOSEDELIMITER)* CLOSEDELIMITER)+
+ * EPILOGUE
+ *
+ * First getStatus is: NOSTARTED
+ *
+ * Content-type: multipart/form-data, boundary=AaB03x => PREAMBLE in Header
+ *
+ * --AaB03x => HEADERDELIMITER content-disposition: form-data; name="field1"
+ * => DISPOSITION
+ *
+ * Joe Blow => FIELD --AaB03x => HEADERDELIMITER content-disposition:
+ * form-data; name="pics" => DISPOSITION Content-type: multipart/mixed,
+ * boundary=BbC04y
+ *
+ * --BbC04y => MIXEDDELIMITER Content-disposition: attachment;
+ * filename="file1.txt" => MIXEDDISPOSITION Content-Type: text/plain
+ *
+ * ... contents of file1.txt ... => MIXEDFILEUPLOAD --BbC04y =>
+ * MIXEDDELIMITER Content-disposition: file; filename="file2.gif" =>
+ * MIXEDDISPOSITION Content-type: image/gif Content-Transfer-Encoding:
+ * binary
+ *
+ * ...contents of file2.gif... => MIXEDFILEUPLOAD --BbC04y-- =>
+ * MIXEDCLOSEDELIMITER --AaB03x-- => CLOSEDELIMITER
+ *
+ * Once CLOSEDELIMITER is found, last getStatus is EPILOGUE
+ */
+ protected enum MultiPartStatus {
+ NOTSTARTED, PREAMBLE, HEADERDELIMITER, DISPOSITION, FIELD, FILEUPLOAD, MIXEDPREAMBLE, MIXEDDELIMITER,
+ MIXEDDISPOSITION, MIXEDFILEUPLOAD, MIXEDCLOSEDELIMITER, CLOSEDELIMITER, PREEPILOGUE, EPILOGUE
+ }
+
+ /**
+ * Check if the given request is a multipart request
+ * @return True if the request is a Multipart request
+ */
+ public static boolean isMultipart(HttpRequest request) {
+ String mimeType = request.headers().get(HttpHeaderNames.CONTENT_TYPE);
+ if (mimeType != null && mimeType.startsWith(HttpHeaderValues.MULTIPART_FORM_DATA.toString())) {
+ return getMultipartDataBoundary(mimeType) != null;
+ }
+ return false;
+ }
+
+ /**
+ * Check from the request ContentType if this request is a Multipart request.
+ * @return an array of String if multipartDataBoundary exists with the multipartDataBoundary
+ * as first element, charset if any as second (missing if not set), else null
+ */
+ protected static String[] getMultipartDataBoundary(String contentType) {
+ // Check if Post using "multipart/form-data; boundary=--89421926422648 [; charset=xxx]"
+ String[] headerContentType = splitHeaderContentType(contentType);
+ final String multiPartHeader = HttpHeaderValues.MULTIPART_FORM_DATA.toString();
+ if (headerContentType[0].regionMatches(true, 0, multiPartHeader, 0 , multiPartHeader.length())) {
+ int mrank;
+ int crank;
+ final String boundaryHeader = HttpHeaderValues.BOUNDARY.toString();
+ if (headerContentType[1].regionMatches(true, 0, boundaryHeader, 0, boundaryHeader.length())) {
+ mrank = 1;
+ crank = 2;
+ } else if (headerContentType[2].regionMatches(true, 0, boundaryHeader, 0, boundaryHeader.length())) {
+ mrank = 2;
+ crank = 1;
+ } else {
+ return null;
+ }
+ String boundary = StringUtil.substringAfter(headerContentType[mrank], '=');
+ if (boundary == null) {
+ throw new ErrorDataDecoderException("Needs a boundary value");
+ }
+ if (boundary.charAt(0) == '"') {
+ String bound = boundary.trim();
+ int index = bound.length() - 1;
+ if (bound.charAt(index) == '"') {
+ boundary = bound.substring(1, index);
+ }
+ }
+ final String charsetHeader = HttpHeaderValues.CHARSET.toString();
+ if (headerContentType[crank].regionMatches(true, 0, charsetHeader, 0, charsetHeader.length())) {
+ String charset = StringUtil.substringAfter(headerContentType[crank], '=');
+ if (charset != null) {
+ return new String[] {"--" + boundary, charset};
+ }
+ }
+ return new String[] {"--" + boundary};
+ }
+ return null;
+ }
+
+ @Override
+ public boolean isMultipart() {
+ return decoder.isMultipart();
+ }
+
+ @Override
+ public void setDiscardThreshold(int discardThreshold) {
+ decoder.setDiscardThreshold(discardThreshold);
+ }
+
+ @Override
+ public int getDiscardThreshold() {
+ return decoder.getDiscardThreshold();
+ }
+
+ @Override
+ public List<InterfaceHttpData> getBodyHttpDatas() {
+ return decoder.getBodyHttpDatas();
+ }
+
+ @Override
+ public List<InterfaceHttpData> getBodyHttpDatas(String name) {
+ return decoder.getBodyHttpDatas(name);
+ }
+
+ @Override
+ public InterfaceHttpData getBodyHttpData(String name) {
+ return decoder.getBodyHttpData(name);
+ }
+
+ @Override
+ public InterfaceHttpPostRequestDecoder offer(HttpContent content) {
+ return decoder.offer(content);
+ }
+
+ @Override
+ public boolean hasNext() {
+ return decoder.hasNext();
+ }
+
+ @Override
+ public InterfaceHttpData next() {
+ return decoder.next();
+ }
+
+ @Override
+ public InterfaceHttpData currentPartialHttpData() {
+ return decoder.currentPartialHttpData();
+ }
+
+ @Override
+ public void destroy() {
+ decoder.destroy();
+ }
+
+ @Override
+ public void cleanFiles() {
+ decoder.cleanFiles();
+ }
+
+ @Override
+ public void removeHttpDataFromClean(InterfaceHttpData data) {
+ decoder.removeHttpDataFromClean(data);
+ }
+
+ /**
+ * Split the very first line (Content-Type value) in 3 Strings
+ *
+ * @return the array of 3 Strings
+ */
+ private static String[] splitHeaderContentType(String sb) {
+ int aStart;
+ int aEnd;
+ int bStart;
+ int bEnd;
+ int cStart;
+ int cEnd;
+ aStart = HttpPostBodyUtil.findNonWhitespace(sb, 0);
+ aEnd = sb.indexOf(';');
+ if (aEnd == -1) {
+ return new String[] { sb, "", "" };
+ }
+ bStart = HttpPostBodyUtil.findNonWhitespace(sb, aEnd + 1);
+ if (sb.charAt(aEnd - 1) == ' ') {
+ aEnd--;
+ }
+ bEnd = sb.indexOf(';', bStart);
+ if (bEnd == -1) {
+ bEnd = HttpPostBodyUtil.findEndOfString(sb);
+ return new String[] { sb.substring(aStart, aEnd), sb.substring(bStart, bEnd), "" };
+ }
+ cStart = HttpPostBodyUtil.findNonWhitespace(sb, bEnd + 1);
+ if (sb.charAt(bEnd - 1) == ' ') {
+ bEnd--;
+ }
+ cEnd = HttpPostBodyUtil.findEndOfString(sb);
+ return new String[] { sb.substring(aStart, aEnd), sb.substring(bStart, bEnd), sb.substring(cStart, cEnd) };
+ }
+
+ /**
+ * Exception when try reading data from request in chunked format, and not
+ * enough data are available (need more chunks)
+ */
+ public static class NotEnoughDataDecoderException extends DecoderException {
+ private static final long serialVersionUID = -7846841864603865638L;
+
+ public NotEnoughDataDecoderException() {
+ }
+
+ public NotEnoughDataDecoderException(String msg) {
+ super(msg);
+ }
+
+ public NotEnoughDataDecoderException(Throwable cause) {
+ super(cause);
+ }
+
+ public NotEnoughDataDecoderException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+ }
+
+ /**
+ * Exception when the body is fully decoded, even if there is still data
+ */
+ public static class EndOfDataDecoderException extends DecoderException {
+ private static final long serialVersionUID = 1336267941020800769L;
+ }
+
+ /**
+ * Exception when an error occurs while decoding
+ */
+ public static class ErrorDataDecoderException extends DecoderException {
+ private static final long serialVersionUID = 5020247425493164465L;
+
+ public ErrorDataDecoderException() {
+ }
+
+ public ErrorDataDecoderException(String msg) {
+ super(msg);
+ }
+
+ public ErrorDataDecoderException(Throwable cause) {
+ super(cause);
+ }
+
+ public ErrorDataDecoderException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+ }
+}
diff --git a/src/main/java/io/vertx/core/http/impl/netty/HttpPostStandardRequestDecoder.java b/src/main/java/io/vertx/core/http/impl/netty/HttpPostStandardRequestDecoder.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/http/impl/netty/HttpPostStandardRequestDecoder.java
@@ -0,0 +1,779 @@
+/*
+ * Copyright 2012 The Netty Project
+ *
+ * The Netty Project licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package io.vertx.core.http.impl.netty;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import io.netty.handler.codec.http.HttpConstants;
+import io.netty.handler.codec.http.HttpContent;
+import io.netty.handler.codec.http.HttpRequest;
+import io.netty.handler.codec.http.LastHttpContent;
+import io.netty.handler.codec.http.QueryStringDecoder;
+import io.netty.handler.codec.http.multipart.Attribute;
+import io.netty.handler.codec.http.multipart.DefaultHttpDataFactory;
+import io.netty.handler.codec.http.multipart.HttpData;
+import io.netty.handler.codec.http.multipart.HttpDataFactory;
+import io.vertx.core.http.impl.netty.HttpPostBodyUtil.SeekAheadOptimize;
+import io.vertx.core.http.impl.netty.HttpPostRequestDecoder.EndOfDataDecoderException;
+import io.vertx.core.http.impl.netty.HttpPostRequestDecoder.ErrorDataDecoderException;
+import io.vertx.core.http.impl.netty.HttpPostRequestDecoder.MultiPartStatus;
+import io.vertx.core.http.impl.netty.HttpPostRequestDecoder.NotEnoughDataDecoderException;
+import io.netty.handler.codec.http.multipart.InterfaceHttpData;
+import io.netty.handler.codec.http.multipart.InterfaceHttpPostRequestDecoder;
+import io.netty.util.ByteProcessor;
+import io.netty.util.internal.PlatformDependent;
+import io.netty.util.internal.StringUtil;
+
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import static io.netty.util.internal.ObjectUtil.checkNotNull;
+import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero;
+
+/**
+ * This decoder will decode Body and can handle POST BODY.
+ *
+ * You <strong>MUST</strong> call {@link #destroy()} after completion to release all resources.
+ *
+ */
+public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestDecoder {
+
+ /**
+ * Factory used to create InterfaceHttpData
+ */
+ private final HttpDataFactory factory;
+
+ /**
+ * Request to decode
+ */
+ private final HttpRequest request;
+
+ /**
+ * Default charset to use
+ */
+ private final Charset charset;
+
+ /**
+ * Does the last chunk already received
+ */
+ private boolean isLastChunk;
+
+ /**
+ * HttpDatas from Body
+ */
+ private final List<InterfaceHttpData> bodyListHttpData = new ArrayList<InterfaceHttpData>();
+
+ /**
+ * HttpDatas as Map from Body
+ */
+ private final Map<String, List<InterfaceHttpData>> bodyMapHttpData = new TreeMap<String, List<InterfaceHttpData>>(
+ CaseIgnoringComparator.INSTANCE);
+
+ /**
+ * The current channelBuffer
+ */
+ private ByteBuf undecodedChunk;
+
+ /**
+ * Body HttpDatas current position
+ */
+ private int bodyListHttpDataRank;
+
+ /**
+ * Current getStatus
+ */
+ private MultiPartStatus currentStatus = MultiPartStatus.NOTSTARTED;
+
+ /**
+ * The current Attribute that is currently in decode process
+ */
+ private Attribute currentAttribute;
+
+ private boolean destroyed;
+
+ private int discardThreshold = HttpPostRequestDecoder.DEFAULT_DISCARD_THRESHOLD;
+
+ /**
+ *
+ * @param request
+ * the request to decode
+ * @throws NullPointerException
+ * for request
+ * @throws ErrorDataDecoderException
+ * if the default charset was wrong when decoding or other
+ * errors
+ */
+ public HttpPostStandardRequestDecoder(HttpRequest request) {
+ this(new DefaultHttpDataFactory(DefaultHttpDataFactory.MINSIZE), request, HttpConstants.DEFAULT_CHARSET);
+ }
+
+ /**
+ *
+ * @param factory
+ * the factory used to create InterfaceHttpData
+ * @param request
+ * the request to decode
+ * @throws NullPointerException
+ * for request or factory
+ * @throws ErrorDataDecoderException
+ * if the default charset was wrong when decoding or other
+ * errors
+ */
+ public HttpPostStandardRequestDecoder(HttpDataFactory factory, HttpRequest request) {
+ this(factory, request, HttpConstants.DEFAULT_CHARSET);
+ }
+
+ /**
+ *
+ * @param factory
+ * the factory used to create InterfaceHttpData
+ * @param request
+ * the request to decode
+ * @param charset
+ * the charset to use as default
+ * @throws NullPointerException
+ * for request or charset or factory
+ * @throws ErrorDataDecoderException
+ * if the default charset was wrong when decoding or other
+ * errors
+ */
+ public HttpPostStandardRequestDecoder(HttpDataFactory factory, HttpRequest request, Charset charset) {
+ this.request = checkNotNull(request, "request");
+ this.charset = checkNotNull(charset, "charset");
+ this.factory = checkNotNull(factory, "factory");
+ try {
+ if (request instanceof HttpContent) {
+ // Offer automatically if the given request is as type of HttpContent
+ // See #1089
+ offer((HttpContent) request);
+ } else {
+ parseBody();
+ }
+ } catch (Throwable e) {
+ destroy();
+ PlatformDependent.throwException(e);
+ }
+ }
+
+ private void checkDestroyed() {
+ if (destroyed) {
+ throw new IllegalStateException(HttpPostStandardRequestDecoder.class.getSimpleName()
+ + " was destroyed already");
+ }
+ }
+
+ /**
+ * True if this request is a Multipart request
+ *
+ * @return True if this request is a Multipart request
+ */
+ @Override
+ public boolean isMultipart() {
+ checkDestroyed();
+ return false;
+ }
+
+ /**
+ * Set the amount of bytes after which read bytes in the buffer should be discarded.
+ * Setting this lower gives lower memory usage but with the overhead of more memory copies.
+ * Use {@code 0} to disable it.
+ */
+ @Override
+ public void setDiscardThreshold(int discardThreshold) {
+ this.discardThreshold = checkPositiveOrZero(discardThreshold, "discardThreshold");
+ }
+
+ /**
+ * Return the threshold in bytes after which read data in the buffer should be discarded.
+ */
+ @Override
+ public int getDiscardThreshold() {
+ return discardThreshold;
+ }
+
+ /**
+ * This getMethod returns a List of all HttpDatas from body.<br>
+ *
+ * If chunked, all chunks must have been offered using offer() getMethod. If
+ * not, NotEnoughDataDecoderException will be raised.
+ *
+ * @return the list of HttpDatas from Body part for POST getMethod
+ * @throws NotEnoughDataDecoderException
+ * Need more chunks
+ */
+ @Override
+ public List<InterfaceHttpData> getBodyHttpDatas() {
+ checkDestroyed();
+
+ if (!isLastChunk) {
+ throw new NotEnoughDataDecoderException();
+ }
+ return bodyListHttpData;
+ }
+
+ /**
+ * This getMethod returns a List of all HttpDatas with the given name from
+ * body.<br>
+ *
+ * If chunked, all chunks must have been offered using offer() getMethod. If
+ * not, NotEnoughDataDecoderException will be raised.
+ *
+ * @return All Body HttpDatas with the given name (ignore case)
+ * @throws NotEnoughDataDecoderException
+ * need more chunks
+ */
+ @Override
+ public List<InterfaceHttpData> getBodyHttpDatas(String name) {
+ checkDestroyed();
+
+ if (!isLastChunk) {
+ throw new NotEnoughDataDecoderException();
+ }
+ return bodyMapHttpData.get(name);
+ }
+
+ /**
+ * This getMethod returns the first InterfaceHttpData with the given name from
+ * body.<br>
+ *
+ * If chunked, all chunks must have been offered using offer() getMethod. If
+ * not, NotEnoughDataDecoderException will be raised.
+ *
+ * @return The first Body InterfaceHttpData with the given name (ignore
+ * case)
+ * @throws NotEnoughDataDecoderException
+ * need more chunks
+ */
+ @Override
+ public InterfaceHttpData getBodyHttpData(String name) {
+ checkDestroyed();
+
+ if (!isLastChunk) {
+ throw new NotEnoughDataDecoderException();
+ }
+ List<InterfaceHttpData> list = bodyMapHttpData.get(name);
+ if (list != null) {
+ return list.get(0);
+ }
+ return null;
+ }
+
+ /**
+ * Initialized the internals from a new chunk
+ *
+ * @param content
+ * the new received chunk
+ * @throws ErrorDataDecoderException
+ * if there is a problem with the charset decoding or other
+ * errors
+ */
+ @Override
+ public HttpPostStandardRequestDecoder offer(HttpContent content) {
+ checkDestroyed();
+
+ if (content instanceof LastHttpContent) {
+ isLastChunk = true;
+ }
+
+ ByteBuf buf = content.content();
+ if (undecodedChunk == null) {
+ undecodedChunk =
+ // Since the Handler will release the incoming later on, we need to copy it
+ //
+ // We are explicit allocate a buffer and NOT calling copy() as otherwise it may set a maxCapacity
+ // which is not really usable for us as we may exceed it once we add more bytes.
+ buf.alloc().buffer(buf.readableBytes()).writeBytes(buf);
+ } else {
+ undecodedChunk.writeBytes(buf);
+ }
+ parseBody();
+ if (undecodedChunk != null && undecodedChunk.writerIndex() > discardThreshold) {
+ if (undecodedChunk.refCnt() == 1) {
+ // It's safe to call discardBytes() as we are the only owner of the buffer.
+ undecodedChunk.discardReadBytes();
+ } else {
+ // There seems to be multiple references of the buffer. Let's copy the data and release the buffer to
+ // ensure we can give back memory to the system.
+ ByteBuf buffer = undecodedChunk.alloc().buffer(undecodedChunk.readableBytes());
+ buffer.writeBytes(undecodedChunk);
+ undecodedChunk.release();
+ undecodedChunk = buffer;
+ }
+ }
+ return this;
+ }
+
+ /**
+ * True if at current getStatus, there is an available decoded
+ * InterfaceHttpData from the Body.
+ *
+ * This getMethod works for chunked and not chunked request.
+ *
+ * @return True if at current getStatus, there is a decoded InterfaceHttpData
+ * @throws EndOfDataDecoderException
+ * No more data will be available
+ */
+ @Override
+ public boolean hasNext() {
+ checkDestroyed();
+
+ if (currentStatus == MultiPartStatus.EPILOGUE) {
+ // OK except if end of list
+ if (bodyListHttpDataRank >= bodyListHttpData.size()) {
+ throw new EndOfDataDecoderException();
+ }
+ }
+ return !bodyListHttpData.isEmpty() && bodyListHttpDataRank < bodyListHttpData.size();
+ }
+
+ /**
+ * Returns the next available InterfaceHttpData or null if, at the time it
+ * is called, there is no more available InterfaceHttpData. A subsequent
+ * call to offer(httpChunk) could enable more data.
+ *
+ * Be sure to call {@link InterfaceHttpData#release()} after you are done
+ * with processing to make sure to not leak any resources
+ *
+ * @return the next available InterfaceHttpData or null if none
+ * @throws EndOfDataDecoderException
+ * No more data will be available
+ */
+ @Override
+ public InterfaceHttpData next() {
+ checkDestroyed();
+
+ if (hasNext()) {
+ return bodyListHttpData.get(bodyListHttpDataRank++);
+ }
+ return null;
+ }
+
+ @Override
+ public InterfaceHttpData currentPartialHttpData() {
+ return currentAttribute;
+ }
+
+ /**
+ * This getMethod will parse as much as possible data and fill the list and map
+ *
+ * @throws ErrorDataDecoderException
+ * if there is a problem with the charset decoding or other
+ * errors
+ */
+ private void parseBody() {
+ if (currentStatus == MultiPartStatus.PREEPILOGUE || currentStatus == MultiPartStatus.EPILOGUE) {
+ if (isLastChunk) {
+ currentStatus = MultiPartStatus.EPILOGUE;
+ }
+ return;
+ }
+ parseBodyAttributes();
+ }
+
+ /**
+ * Utility function to add a new decoded data
+ */
+ protected void addHttpData(InterfaceHttpData data) {
+ if (data == null) {
+ return;
+ }
+ List<InterfaceHttpData> datas = bodyMapHttpData.get(data.getName());
+ if (datas == null) {
+ datas = new ArrayList<InterfaceHttpData>(1);
+ bodyMapHttpData.put(data.getName(), datas);
+ }
+ datas.add(data);
+ bodyListHttpData.add(data);
+ }
+
+ /**
+ * This getMethod fill the map and list with as much Attribute as possible from
+ * Body in not Multipart mode.
+ *
+ * @throws ErrorDataDecoderException
+ * if there is a problem with the charset decoding or other
+ * errors
+ */
+ private void parseBodyAttributesStandard() {
+ int firstpos = undecodedChunk.readerIndex();
+ int currentpos = firstpos;
+ int equalpos;
+ int ampersandpos;
+ if (currentStatus == MultiPartStatus.NOTSTARTED) {
+ currentStatus = MultiPartStatus.DISPOSITION;
+ }
+ boolean contRead = true;
+ try {
+ while (undecodedChunk.isReadable() && contRead) {
+ char read = (char) undecodedChunk.readUnsignedByte();
+ currentpos++;
+ switch (currentStatus) {
+ case DISPOSITION:// search '='
+ if (read == '=') {
+ currentStatus = MultiPartStatus.FIELD;
+ equalpos = currentpos - 1;
+ String key = decodeAttribute(undecodedChunk.toString(firstpos, equalpos - firstpos, charset),
+ charset);
+ currentAttribute = factory.createAttribute(request, key);
+ firstpos = currentpos;
+ } else if (read == '&') { // special empty FIELD
+ currentStatus = MultiPartStatus.DISPOSITION;
+ ampersandpos = currentpos - 1;
+ String key = decodeAttribute(
+ undecodedChunk.toString(firstpos, ampersandpos - firstpos, charset), charset);
+ currentAttribute = factory.createAttribute(request, key);
+ currentAttribute.setValue(""); // empty
+ addHttpData(currentAttribute);
+ currentAttribute = null;
+ firstpos = currentpos;
+ contRead = true;
+ }
+ break;
+ case FIELD:// search '&' or end of line
+ if (read == '&') {
+ currentStatus = MultiPartStatus.DISPOSITION;
+ ampersandpos = currentpos - 1;
+ setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos));
+ firstpos = currentpos;
+ contRead = true;
+ } else if (read == HttpConstants.CR) {
+ if (undecodedChunk.isReadable()) {
+ read = (char) undecodedChunk.readUnsignedByte();
+ currentpos++;
+ if (read == HttpConstants.LF) {
+ currentStatus = MultiPartStatus.PREEPILOGUE;
+ ampersandpos = currentpos - 2;
+ setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos));
+ firstpos = currentpos;
+ contRead = false;
+ } else {
+ // Error
+ throw new ErrorDataDecoderException("Bad end of line");
+ }
+ } else {
+ currentpos--;
+ }
+ } else if (read == HttpConstants.LF) {
+ currentStatus = MultiPartStatus.PREEPILOGUE;
+ ampersandpos = currentpos - 1;
+ setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos));
+ firstpos = currentpos;
+ contRead = false;
+ }
+ break;
+ default:
+ // just stop
+ contRead = false;
+ }
+ }
+ if (isLastChunk && currentAttribute != null) {
+ // special case
+ ampersandpos = currentpos;
+ if (ampersandpos > firstpos) {
+ setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos));
+ } else if (!currentAttribute.isCompleted()) {
+ setFinalBuffer(Unpooled.EMPTY_BUFFER);
+ }
+ firstpos = currentpos;
+ currentStatus = MultiPartStatus.EPILOGUE;
+ } else if (contRead && currentAttribute != null && currentStatus == MultiPartStatus.FIELD) {
+ // reset index except if to continue in case of FIELD getStatus
+ currentAttribute.addContent(undecodedChunk.retainedSlice(firstpos, currentpos - firstpos),
+ false);
+ firstpos = currentpos;
+ }
+ undecodedChunk.readerIndex(firstpos);
+ } catch (ErrorDataDecoderException e) {
+ // error while decoding
+ undecodedChunk.readerIndex(firstpos);
+ throw e;
+ } catch (IOException e) {
+ // error while decoding
+ undecodedChunk.readerIndex(firstpos);
+ throw new ErrorDataDecoderException(e);
+ } catch (IllegalArgumentException e) {
+ // error while decoding
+ undecodedChunk.readerIndex(firstpos);
+ throw new ErrorDataDecoderException(e);
+ }
+ }
+
+ /**
+ * This getMethod fill the map and list with as much Attribute as possible from
+ * Body in not Multipart mode.
+ *
+ * @throws ErrorDataDecoderException
+ * if there is a problem with the charset decoding or other
+ * errors
+ */
+ private void parseBodyAttributes() {
+ if (undecodedChunk == null) {
+ return;
+ }
+ if (!undecodedChunk.hasArray()) {
+ parseBodyAttributesStandard();
+ return;
+ }
+ SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk);
+ int firstpos = undecodedChunk.readerIndex();
+ int currentpos = firstpos;
+ int equalpos;
+ int ampersandpos;
+ if (currentStatus == MultiPartStatus.NOTSTARTED) {
+ currentStatus = MultiPartStatus.DISPOSITION;
+ }
+ boolean contRead = true;
+ try {
+ loop: while (sao.pos < sao.limit) {
+ char read = (char) (sao.bytes[sao.pos++] & 0xFF);
+ currentpos++;
+ switch (currentStatus) {
+ case DISPOSITION:// search '='
+ if (read == '=') {
+ currentStatus = MultiPartStatus.FIELD;
+ equalpos = currentpos - 1;
+ String key = decodeAttribute(undecodedChunk.toString(firstpos, equalpos - firstpos, charset),
+ charset);
+ currentAttribute = factory.createAttribute(request, key);
+ firstpos = currentpos;
+ } else if (read == '&') { // special empty FIELD
+ currentStatus = MultiPartStatus.DISPOSITION;
+ ampersandpos = currentpos - 1;
+ String key = decodeAttribute(
+ undecodedChunk.toString(firstpos, ampersandpos - firstpos, charset), charset);
+ currentAttribute = factory.createAttribute(request, key);
+ currentAttribute.setValue(""); // empty
+ addHttpData(currentAttribute);
+ currentAttribute = null;
+ firstpos = currentpos;
+ contRead = true;
+ }
+ break;
+ case FIELD:// search '&' or end of line
+ if (read == '&') {
+ currentStatus = MultiPartStatus.DISPOSITION;
+ ampersandpos = currentpos - 1;
+ setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos));
+ firstpos = currentpos;
+ contRead = true;
+ } else if (read == HttpConstants.CR) {
+ if (sao.pos < sao.limit) {
+ read = (char) (sao.bytes[sao.pos++] & 0xFF);
+ currentpos++;
+ if (read == HttpConstants.LF) {
+ currentStatus = MultiPartStatus.PREEPILOGUE;
+ ampersandpos = currentpos - 2;
+ sao.setReadPosition(0);
+ setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos));
+ firstpos = currentpos;
+ contRead = false;
+ break loop;
+ } else {
+ // Error
+ sao.setReadPosition(0);
+ throw new ErrorDataDecoderException("Bad end of line");
+ }
+ } else {
+ if (sao.limit > 0) {
+ currentpos--;
+ }
+ }
+ } else if (read == HttpConstants.LF) {
+ currentStatus = MultiPartStatus.PREEPILOGUE;
+ ampersandpos = currentpos - 1;
+ sao.setReadPosition(0);
+ setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos));
+ firstpos = currentpos;
+ contRead = false;
+ break loop;
+ }
+ break;
+ default:
+ // just stop
+ sao.setReadPosition(0);
+ contRead = false;
+ break loop;
+ }
+ }
+ if (isLastChunk && currentAttribute != null) {
+ // special case
+ ampersandpos = currentpos;
+ if (ampersandpos > firstpos) {
+ setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos));
+ } else if (!currentAttribute.isCompleted()) {
+ setFinalBuffer(Unpooled.EMPTY_BUFFER);
+ }
+ firstpos = currentpos;
+ currentStatus = MultiPartStatus.EPILOGUE;
+ } else if (contRead && currentAttribute != null && currentStatus == MultiPartStatus.FIELD) {
+ // reset index except if to continue in case of FIELD getStatus
+ currentAttribute.addContent(undecodedChunk.retainedSlice(firstpos, currentpos - firstpos),
+ false);
+ firstpos = currentpos;
+ }
+ undecodedChunk.readerIndex(firstpos);
+ } catch (ErrorDataDecoderException e) {
+ // error while decoding
+ undecodedChunk.readerIndex(firstpos);
+ throw e;
+ } catch (IOException e) {
+ // error while decoding
+ undecodedChunk.readerIndex(firstpos);
+ throw new ErrorDataDecoderException(e);
+ } catch (IllegalArgumentException e) {
+ // error while decoding
+ undecodedChunk.readerIndex(firstpos);
+ throw new ErrorDataDecoderException(e);
+ }
+ }
+
+ private void setFinalBuffer(ByteBuf buffer) throws IOException {
+ currentAttribute.addContent(buffer, true);
+ ByteBuf decodedBuf = decodeAttribute(currentAttribute.getByteBuf(), charset);
+ if (decodedBuf != null) { // override content only when ByteBuf needed decoding
+ currentAttribute.setContent(decodedBuf);
+ }
+ addHttpData(currentAttribute);
+ currentAttribute = null;
+ }
+
+ /**
+ * Decode component
+ *
+ * @return the decoded component
+ */
+ private static String decodeAttribute(String s, Charset charset) {
+ try {
+ return QueryStringDecoder.decodeComponent(s, charset);
+ } catch (IllegalArgumentException e) {
+ throw new ErrorDataDecoderException("Bad string: '" + s + '\'', e);
+ }
+ }
+
+ private static ByteBuf decodeAttribute(ByteBuf b, Charset charset) {
+ int firstEscaped = b.forEachByte(new UrlEncodedDetector());
+ if (firstEscaped == -1) {
+ return null; // nothing to decode
+ }
+
+ ByteBuf buf = b.alloc().buffer(b.readableBytes());
+ UrlDecoder urlDecode = new UrlDecoder(buf);
+ int idx = b.forEachByte(urlDecode);
+ if (urlDecode.nextEscapedIdx != 0) { // incomplete hex byte
+ if (idx == -1) {
+ idx = b.readableBytes() - 1;
+ }
+ idx -= urlDecode.nextEscapedIdx - 1;
+ buf.release();
+ throw new ErrorDataDecoderException(
+ String.format("Invalid hex byte at index '%d' in string: '%s'", idx, b.toString(charset)));
+ }
+
+ return buf;
+ }
+
+ /**
+ * Destroy the {@link HttpPostStandardRequestDecoder} and release all it resources. After this method
+ * was called it is not possible to operate on it anymore.
+ */
+ @Override
+ public void destroy() {
+ // Release all data items, including those not yet pulled, only file based items
+ cleanFiles();
+ // Clean Memory based data
+ for (InterfaceHttpData httpData : bodyListHttpData) {
+ // Might have been already released by the user
+ if (httpData.refCnt() > 0) {
+ httpData.release();
+ }
+ }
+
+ destroyed = true;
+
+ if (undecodedChunk != null && undecodedChunk.refCnt() > 0) {
+ undecodedChunk.release();
+ undecodedChunk = null;
+ }
+ }
+
+ /**
+ * Clean all {@link HttpData}s for the current request.
+ */
+ @Override
+ public void cleanFiles() {
+ checkDestroyed();
+
+ factory.cleanRequestHttpData(request);
+ }
+
+ /**
+ * Remove the given FileUpload from the list of FileUploads to clean
+ */
+ @Override
+ public void removeHttpDataFromClean(InterfaceHttpData data) {
+ checkDestroyed();
+
+ factory.removeHttpDataFromClean(request, data);
+ }
+
+ private static final class UrlEncodedDetector implements ByteProcessor {
+ @Override
+ public boolean process(byte value) throws Exception {
+ return value != '%' && value != '+';
+ }
+ }
+
+ private static final class UrlDecoder implements ByteProcessor {
+
+ private final ByteBuf output;
+ private int nextEscapedIdx;
+ private byte hiByte;
+
+ UrlDecoder(ByteBuf output) {
+ this.output = output;
+ }
+
+ @Override
+ public boolean process(byte value) {
+ if (nextEscapedIdx != 0) {
+ if (nextEscapedIdx == 1) {
+ hiByte = value;
+ ++nextEscapedIdx;
+ } else {
+ int hi = StringUtil.decodeHexNibble((char) hiByte);
+ int lo = StringUtil.decodeHexNibble((char) value);
+ if (hi == -1 || lo == -1) {
+ ++nextEscapedIdx;
+ return false;
+ }
+ output.writeByte((hi << 4) + lo);
+ nextEscapedIdx = 0;
+ }
+ } else if (value == '%') {
+ nextEscapedIdx = 1;
+ } else if (value == '+') {
+ output.writeByte(' ');
+ } else {
+ output.writeByte(value);
+ }
+ return true;
+ }
+ }
+}
| diff --git a/src/test/java/io/vertx/core/http/Http1xServerFileUploadTest.java b/src/test/java/io/vertx/core/http/Http1xServerFileUploadTest.java
--- a/src/test/java/io/vertx/core/http/Http1xServerFileUploadTest.java
+++ b/src/test/java/io/vertx/core/http/Http1xServerFileUploadTest.java
@@ -17,7 +17,6 @@
*/
public class Http1xServerFileUploadTest extends HttpServerFileUploadTest {
-
@Ignore
@Test
@Override
| Request for a new ContextImpl that mimics a Worker Context but allows multiple queues
I have run into the following issue in the past before using Vert.x and now that I am using it as well. I really like the functionality that a Worker Context provides: A separate thread that runs tasks in the order they were submitted to the context to be executed. However this functionality could be even more helpful if the Worker Context would allow the submission of tasks to be executed in order in different queues based on some (defined?arbitrary?) identifier/priority. For example, I have the need to "serialize" the execution of tasks but tasks which have different priorities could be executed in parallel but I still need the "serialization" feature for tasks of the same priority.
Thanks - Juan
| hi, do you have an actual use case for this ?
Yes. I need to process several messages on a worker context but those messages can be categorized and there is no need for messages of category A to be processed sequentially with messages of category B, but all messages for category A/B need to be processed in order.. Think about messages for order processing (not my actual use case but close), I have a Verticle which processes messages (creation, activation, provision, etc.) but all messages for an order need to be processed sequentially but messages for different orders can be processed in parallel. I think this new ContextImpl would be some sort hybrid of Worker and Multi-threaded contexts.
why would you need a specific context implementation for this ?
there is this existing issue https://github.com/eclipse/vert.x/issues/1539 that could somehow help for your use case
@vietj I agree that requesting the creation of a new Context Implementation might not be the right approach. My main complain with a possible implementation of #1539 (unless I am wrong the issue has no solution yet, right?) is that there is no way to indicate how to place the Handler in a specific TaskQueue for execution unless the signature of Context.executeBlocking can change to accept a new structure instead of a Handler (so you could determine what TaskQueue to use). I was hoping that there could be something of an `executeBlocking(blockingCodeHandler, "Category/Queue", resultCodeHandler)` that would allow the blockingCodeHandler to be executed on a particular TaskQueue. Anyway, it seems you have not warmed up to the idea, so please feel free to close the issue. IMHO I still think there is value in supporting something like this.
After some thought I was able to come up with a simple(r?) solution to what I think the improvement should be:
https://gist.github.com/juanavelez/bf47ae84f842896350ca046a1d88ac5f.
Let me know what you think.
Thanks - Juan
it seems you can do it without requiring a modification in Vert.x itself with an helper
I am sorry I don't understand what you meant since my modifications where to Vert.x core code themselves: src/main/java/io/vertx/core/Context.java, src/main/java/io/vertx/core/Vertx.java, src/main/java/io/vertx/core/impl/ContextImpl.java, src/main/java/io/vertx/core/impl/VertxImpl.java, src/test/java/io/vertx/test/core/ContextTest.java
I mean that we cannot have this in Vert.x core and you should make your own helper based on Vert.x core to implement this particular use case you need
May I ask why you cannot have this in Vert.x core?
why should we have ?
no offense to your proposal but this code does not seem generic purpose enough to be in Vert.x Core and we prefer to keep the Vert.x Core API minimal (less is more) and not overload it with a different meaning.
In your case you seem to need to add the notion of priority to task execution and it seems better to handle this with your concurrency util build on top of Vert.x Context concurrency. This will also allow you to improve it and tune it to your needs.
If you think this can interest the community, I encourage you to create a library for it and publish it then on the vertx-awesome page https://github.com/vert-x3/vertx-awesome . That's what many people in the community do. | 2021-05-06T22:07:58Z | 4 |
eclipse-vertx/vert.x | 3,853 | eclipse-vertx__vert.x-3853 | [
"3823"
] | 5a120ee42bc278044b290e0411764050f80f9a6c | diff --git a/src/main/java/io/vertx/core/json/JsonArray.java b/src/main/java/io/vertx/core/json/JsonArray.java
--- a/src/main/java/io/vertx/core/json/JsonArray.java
+++ b/src/main/java/io/vertx/core/json/JsonArray.java
@@ -12,11 +12,13 @@
package io.vertx.core.json;
import io.vertx.core.buffer.Buffer;
+import io.vertx.core.json.impl.JsonUtil;
import io.vertx.core.shareddata.Shareable;
import io.vertx.core.shareddata.impl.ClusterSerializable;
import java.time.Instant;
import java.util.*;
+import java.util.function.Function;
import java.util.stream.Stream;
import static io.vertx.core.json.impl.JsonUtil.*;
@@ -557,12 +559,26 @@ public String encodePrettily() {
* Deep copy of the JSON array.
*
* @return a copy where all elements have been copied recursively
+ * @throws IllegalStateException when a nested element cannot be copied
*/
@Override
public JsonArray copy() {
+ return copy(DEFAULT_CLONER);
+ }
+
+ /**
+ * Deep copy of the JSON array.
+ *
+ * <p> Unlike {@link #copy()} that can fail when an unknown element cannot be copied, this method
+ * delegates the copy of such element to the {@code cloner} function and will not fail.
+ *
+ * @param cloner a function that copies custom values not supported by the JSON implementation
+ * @return a copy where all elements have been copied recursively
+ */
+ public JsonArray copy(Function<Object, ?> cloner) {
List<Object> copiedList = new ArrayList<>(list.size());
for (Object val : list) {
- copiedList.add(checkAndCopy(val));
+ copiedList.add(deepCopy(val, cloner));
}
return new JsonArray(copiedList);
}
diff --git a/src/main/java/io/vertx/core/json/JsonObject.java b/src/main/java/io/vertx/core/json/JsonObject.java
--- a/src/main/java/io/vertx/core/json/JsonObject.java
+++ b/src/main/java/io/vertx/core/json/JsonObject.java
@@ -11,11 +11,13 @@
package io.vertx.core.json;
import io.vertx.core.buffer.Buffer;
+import io.vertx.core.json.impl.JsonUtil;
import io.vertx.core.shareddata.Shareable;
import io.vertx.core.shareddata.impl.ClusterSerializable;
import java.time.Instant;
import java.util.*;
+import java.util.function.Function;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
@@ -751,9 +753,23 @@ public Buffer toBuffer() {
* Deep copy of this JSON object.
*
* @return a copy where all elements have been copied recursively
+ * @throws IllegalStateException when a nested element cannot be copied
*/
@Override
public JsonObject copy() {
+ return copy(DEFAULT_CLONER);
+ }
+
+ /**
+ * Deep copy of this JSON object.
+ *
+ * <p> Unlike {@link #copy()} that can fail when an unknown element cannot be copied, this method
+ * delegates the copy of such element to the {@code cloner} function and will not fail.
+ *
+ * @param cloner a function that copies custom values not supported by the JSON implementation
+ * @return a copy where all elements have been copied recursively
+ */
+ public JsonObject copy(Function<Object, ?> cloner) {
Map<String, Object> copiedMap;
if (map instanceof LinkedHashMap) {
copiedMap = new LinkedHashMap<>(map.size());
@@ -761,7 +777,7 @@ public JsonObject copy() {
copiedMap = new HashMap<>(map.size());
}
for (Map.Entry<String, Object> entry : map.entrySet()) {
- Object val = checkAndCopy(entry.getValue());
+ Object val = deepCopy(entry.getValue(), cloner);
copiedMap.put(entry.getKey(), val);
}
return new JsonObject(copiedMap);
diff --git a/src/main/java/io/vertx/core/json/impl/JsonUtil.java b/src/main/java/io/vertx/core/json/impl/JsonUtil.java
--- a/src/main/java/io/vertx/core/json/impl/JsonUtil.java
+++ b/src/main/java/io/vertx/core/json/impl/JsonUtil.java
@@ -19,6 +19,7 @@
import java.util.Base64;
import java.util.List;
import java.util.Map;
+import java.util.function.Function;
import static java.time.format.DateTimeFormatter.ISO_INSTANT;
@@ -81,8 +82,12 @@ public static Object wrapJsonValue(Object val) {
return val;
}
+ public static final Function<Object, ?> DEFAULT_CLONER = o -> {
+ throw new IllegalStateException("Illegal type in Json: " + o.getClass());
+ };
+
@SuppressWarnings("unchecked")
- public static Object checkAndCopy(Object val) {
+ public static Object deepCopy(Object val, Function<Object, ?> copier) {
if (val == null) {
// OK
} else if (val instanceof Number) {
@@ -101,9 +106,9 @@ public static Object checkAndCopy(Object val) {
// JsonObject, JsonArray or any user defined type that can shared across the cluster
val = ((Shareable) val).copy();
} else if (val instanceof Map) {
- val = (new JsonObject((Map) val)).copy();
+ val = (new JsonObject((Map) val)).copy(copier);
} else if (val instanceof List) {
- val = (new JsonArray((List) val)).copy();
+ val = (new JsonArray((List) val)).copy(copier);
} else if (val instanceof Buffer) {
val = ((Buffer) val).copy();
} else if (val instanceof byte[]) {
@@ -113,7 +118,7 @@ public static Object checkAndCopy(Object val) {
} else if (val instanceof Enum) {
// OK
} else {
- throw new IllegalStateException("Illegal type in Json: " + val.getClass());
+ val = copier.apply(val);
}
return val;
}
| diff --git a/src/test/java/io/vertx/core/json/JsonArrayTest.java b/src/test/java/io/vertx/core/json/JsonArrayTest.java
--- a/src/test/java/io/vertx/core/json/JsonArrayTest.java
+++ b/src/test/java/io/vertx/core/json/JsonArrayTest.java
@@ -21,6 +21,7 @@
import java.math.BigInteger;
import java.time.Instant;
import java.util.*;
+import java.util.function.Function;
import java.util.stream.Collectors;
import static io.vertx.core.json.impl.JsonUtil.BASE64_DECODER;
@@ -797,49 +798,55 @@ public void testCopy() {
}
@Test
- public void testInvalidValsOnCopy() {
- List<Object> invalid = new ArrayList<>();
- invalid.add(new SomeClass());
- JsonArray arr = new JsonArray(invalid);
+ public void testInvalidValsOnCopy1() {
+ SomeClass invalid = new SomeClass();
+ JsonArray array = new JsonArray(Collections.singletonList(invalid));
try {
- arr.copy();
+ array.copy();
fail();
} catch (IllegalStateException e) {
// OK
}
+ array = array.copy(SomeClass.CLONER);
+ assertTrue(array.getValue(0) instanceof SomeClass);
+ assertNotSame(array.getValue(0), invalid);
}
@Test
public void testInvalidValsOnCopy2() {
- List<Object> invalid = new ArrayList<>();
- List<Object> invalid2 = new ArrayList<>();
- invalid2.add(new SomeClass());
- invalid.add(invalid2);
- JsonArray arr = new JsonArray(invalid);
+ SomeClass invalid = new SomeClass();
+ JsonArray array = new JsonArray(Collections.singletonList(Collections.singletonMap("foo", invalid)));
try {
- arr.copy();
+ array.copy();
fail();
} catch (IllegalStateException e) {
// OK
}
+ array = array.copy(SomeClass.CLONER);
+ assertTrue(array.getJsonObject(0).getValue("foo") instanceof SomeClass);
+ assertNotSame(array.getJsonObject(0).getValue("foo"), invalid);
}
@Test
public void testInvalidValsOnCopy3() {
- List<Object> invalid = new ArrayList<>();
- Map<String, Object> invalid2 = new HashMap<>();
- invalid2.put("foo", new SomeClass());
- invalid.add(invalid2);
- JsonArray arr = new JsonArray(invalid);
+ SomeClass invalid = new SomeClass();
+ JsonArray array = new JsonArray(Collections.singletonList(Collections.singletonList(invalid)));
try {
- arr.copy();
+ array.copy();
fail();
} catch (IllegalStateException e) {
// OK
}
+ array = array.copy(SomeClass.CLONER);
+ assertTrue(array.getJsonArray(0).getValue(0) instanceof SomeClass);
+ assertNotSame(array.getJsonArray(0).getValue(0), invalid);
}
- class SomeClass {
+ static class SomeClass {
+ static final Function<Object, ?> CLONER = o -> {
+ assertTrue(o instanceof SomeClass);
+ return new SomeClass();
+ };
}
@Test
diff --git a/src/test/java/io/vertx/core/json/JsonObjectTest.java b/src/test/java/io/vertx/core/json/JsonObjectTest.java
--- a/src/test/java/io/vertx/core/json/JsonObjectTest.java
+++ b/src/test/java/io/vertx/core/json/JsonObjectTest.java
@@ -23,6 +23,7 @@
import java.time.temporal.ChronoUnit;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Function;
import java.util.stream.Collectors;
import static io.vertx.core.json.impl.JsonUtil.BASE64_DECODER;
@@ -1392,48 +1393,54 @@ public void testCopy() {
@Test
public void testInvalidValsOnCopy1() {
- Map<String, Object> invalid = new HashMap<>();
- invalid.put("foo", new SomeClass());
- JsonObject object = new JsonObject(invalid);
+ SomeClass invalid = new SomeClass();
+ JsonObject object = new JsonObject(Collections.singletonMap("foo", invalid));
try {
object.copy();
fail();
} catch (IllegalStateException e) {
// OK
}
+ object = object.copy(SomeClass.CLONER);
+ assertTrue(object.getValue("foo") instanceof SomeClass);
+ assertNotSame(object.getValue("foo"), invalid);
}
@Test
public void testInvalidValsOnCopy2() {
- Map<String, Object> invalid = new HashMap<>();
- Map<String, Object> invalid2 = new HashMap<>();
- invalid2.put("foo", new SomeClass());
- invalid.put("bar",invalid2);
- JsonObject object = new JsonObject(invalid);
+ SomeClass invalid = new SomeClass();
+ JsonObject object = new JsonObject(Collections.singletonMap("bar", Collections.singletonMap("foo", invalid)));
try {
object.copy();
fail();
} catch (IllegalStateException e) {
// OK
}
+ object = object.copy(SomeClass.CLONER);
+ assertTrue(object.getJsonObject("bar").getValue("foo") instanceof SomeClass);
+ assertNotSame(object.getJsonObject("bar").getValue("foo"), invalid);
}
@Test
public void testInvalidValsOnCopy3() {
- Map<String, Object> invalid = new HashMap<>();
- List<Object> invalid2 = new ArrayList<>();
- invalid2.add(new SomeClass());
- invalid.put("bar",invalid2);
- JsonObject object = new JsonObject(invalid);
+ SomeClass invalid = new SomeClass();
+ JsonObject object = new JsonObject(Collections.singletonMap("bar", Collections.singletonList(invalid)));
try {
object.copy();
fail();
} catch (IllegalStateException e) {
// OK
}
+ object = object.copy(SomeClass.CLONER);
+ assertTrue(object.getJsonArray("bar").getValue(0) instanceof SomeClass);
+ assertNotSame(object.getJsonArray("bar").getValue(0), invalid);
}
- class SomeClass {
+ static class SomeClass {
+ static final Function<Object, ?> CLONER = o -> {
+ assertTrue(o instanceof SomeClass);
+ return new SomeClass();
+ };
}
@Test
| Implementation of JSON RFCs introduced inconsistency to Json{Object|Array} API
### Version
Vert.x 4.0.0
### Context
The implementation of the JSON RFCs (#3197) removed the type checking from methods JsonObject.put(String, Object) and JsonArray.add(int, Object) in order to allow future POJO mappings. Nevertheless the methods Json{Object|Array}.copy() are still checking types. This leads to the situation that developers are able to successfully create Json{Object|Array}s that cannot be copied afterwards.
When using the following piece of code for creating valid JsonObjects
```
try {
jsonObject.put(key, value);
}
catch (IllegalStateExcveption e) {
jsonObject.put(key, value.toString());
}
```
which are copied afterwards, #3197 breaks that piece of code and is not backward compatible.
### Do you have a reproducer?
A small reproducer can be found at https://github.com/riemenschneider/JDBCPreparedQueryTest. The test fails with Vert.x 4.0.0 and runs successful using Vert.x 3.9.5.
### Steps to reproduce
1. Clone the reproducer mentioned above.
2. Run ``gradlew test``.
3. Change Vert.x dependency in ``build.gradle`` to ``io.vertx:vertx-core:3.9.5`` and run again ``gradlew test``.
| do you mean https://github.com/riemenschneider/JSONObjectTest as reproducer ? | 2021-03-12T08:28:23Z | 4 |
eclipse-vertx/vert.x | 3,800 | eclipse-vertx__vert.x-3800 | [
"3799"
] | f05a5e29be3c10f413d08970c45d3044423e47f0 | diff --git a/src/main/java/io/vertx/core/impl/VertxBuilder.java b/src/main/java/io/vertx/core/impl/VertxBuilder.java
--- a/src/main/java/io/vertx/core/impl/VertxBuilder.java
+++ b/src/main/java/io/vertx/core/impl/VertxBuilder.java
@@ -18,6 +18,7 @@
import io.vertx.core.net.impl.transport.Transport;
import io.vertx.core.spi.VertxMetricsFactory;
import io.vertx.core.spi.VertxServiceProvider;
+import io.vertx.core.spi.VertxThreadFactory;
import io.vertx.core.spi.VertxTracerFactory;
import io.vertx.core.spi.cluster.ClusterManager;
import io.vertx.core.spi.cluster.NodeSelector;
@@ -28,7 +29,6 @@
import java.util.ArrayList;
import java.util.Collection;
-import java.util.Objects;
/**
* Vertx builder for creating vertx instances with SPI overrides.
@@ -43,6 +43,7 @@ public class VertxBuilder {
private ClusterManager clusterManager;
private NodeSelector clusterNodeSelector;
private VertxTracer tracer;
+ private VertxThreadFactory threadFactory;
private VertxMetrics metrics;
private FileResolver fileResolver;
@@ -158,6 +159,23 @@ public VertxBuilder metrics(VertxMetrics metrics) {
return this;
}
+ /**
+ * @return the {@code VertxThreadFactory} to use
+ */
+ public VertxThreadFactory threadFactory() {
+ return threadFactory;
+ }
+
+ /**
+ * Set the {@code VertxThreadFactory} instance to use.
+ * @param factory the metrics
+ * @return this builder instance
+ */
+ public VertxBuilder threadFactory(VertxThreadFactory factory) {
+ this.threadFactory = factory;
+ return this;
+ }
+
/**
* Build and return the vertx instance
*/
@@ -169,7 +187,8 @@ public Vertx vertx() {
metrics,
tracer,
transport,
- fileResolver);
+ fileResolver,
+ threadFactory);
vertx.init();
return vertx;
}
@@ -188,7 +207,8 @@ public void clusteredVertx(Handler<AsyncResult<Vertx>> handler) {
metrics,
tracer,
transport,
- fileResolver);
+ fileResolver,
+ threadFactory);
vertx.initClustered(options, handler);
}
@@ -205,6 +225,7 @@ public VertxBuilder init() {
initClusterManager(options, providers);
providers.addAll(ServiceHelper.loadFactories(VertxServiceProvider.class));
initProviders(providers);
+ initThreadFactory();
return this;
}
@@ -266,4 +287,11 @@ private void initFileResolver() {
}
fileResolver = new FileResolver(options.getFileSystemOptions());
}
+
+ private void initThreadFactory() {
+ if (threadFactory != null) {
+ return;
+ }
+ threadFactory = VertxThreadFactory.INSTANCE;
+ }
}
diff --git a/src/main/java/io/vertx/core/impl/VertxImpl.java b/src/main/java/io/vertx/core/impl/VertxImpl.java
--- a/src/main/java/io/vertx/core/impl/VertxImpl.java
+++ b/src/main/java/io/vertx/core/impl/VertxImpl.java
@@ -56,6 +56,7 @@
import io.vertx.core.shareddata.SharedData;
import io.vertx.core.shareddata.impl.SharedDataImpl;
import io.vertx.core.spi.VerticleFactory;
+import io.vertx.core.spi.VertxThreadFactory;
import io.vertx.core.spi.cluster.ClusterManager;
import io.vertx.core.spi.cluster.NodeSelector;
import io.vertx.core.spi.metrics.Metrics;
@@ -71,6 +72,7 @@
import java.net.InetSocketAddress;
import java.util.*;
import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Supplier;
@@ -111,6 +113,7 @@ public class VertxImpl implements VertxInternal, MetricsProvider {
private final Map<ServerID, NetServerImpl> sharedNetServers = new HashMap<>();
final WorkerPool workerPool;
final WorkerPool internalBlockingPool;
+ private final VertxThreadFactory threadFactory;
private final ThreadFactory eventLoopThreadFactory;
private final EventLoopGroup eventLoopGroup;
private final EventLoopGroup acceptorEventLoopGroup;
@@ -132,7 +135,7 @@ public class VertxImpl implements VertxInternal, MetricsProvider {
private final VertxTracer tracer;
private final ThreadLocal<WeakReference<AbstractContext>> stickyContext = new ThreadLocal<>();
- VertxImpl(VertxOptions options, ClusterManager clusterManager, NodeSelector nodeSelector, VertxMetrics metrics, VertxTracer<?, ?> tracer, Transport transport, FileResolver fileResolver) {
+ VertxImpl(VertxOptions options, ClusterManager clusterManager, NodeSelector nodeSelector, VertxMetrics metrics, VertxTracer<?, ?> tracer, Transport transport, FileResolver fileResolver, VertxThreadFactory threadFactory) {
// Sanity check
if (Vertx.currentContext() != null) {
log.warn("You're already on a Vert.x context, are you sure you want to create a new Vertx instance?");
@@ -141,9 +144,9 @@ public class VertxImpl implements VertxInternal, MetricsProvider {
maxEventLoopExecTime = options.getMaxEventLoopExecuteTime();
maxEventLoopExecTimeUnit = options.getMaxEventLoopExecuteTimeUnit();
checker = new BlockedThreadChecker(options.getBlockedThreadCheckInterval(), options.getBlockedThreadCheckIntervalUnit(), options.getWarningExceptionTime(), options.getWarningExceptionTimeUnit());
- eventLoopThreadFactory = new VertxThreadFactory("vert.x-eventloop-thread-", checker, false, maxEventLoopExecTime, maxEventLoopExecTimeUnit);
+ eventLoopThreadFactory = createThreadFactory(maxEventLoopExecTime, maxEventLoopExecTimeUnit, "vert.x-eventloop-thread-", false);
eventLoopGroup = transport.eventLoopGroup(Transport.IO_EVENT_LOOP_GROUP, options.getEventLoopPoolSize(), eventLoopThreadFactory, NETTY_IO_RATIO);
- ThreadFactory acceptorEventLoopThreadFactory = new VertxThreadFactory("vert.x-acceptor-thread-", checker, false, options.getMaxEventLoopExecuteTime(), options.getMaxEventLoopExecuteTimeUnit());
+ ThreadFactory acceptorEventLoopThreadFactory = createThreadFactory(options.getMaxEventLoopExecuteTime(), options.getMaxEventLoopExecuteTimeUnit(), "vert.x-acceptor-thread-", false);
// The acceptor event loop thread needs to be from a different pool otherwise can get lags in accepted connections
// under a lot of load
acceptorEventLoopGroup = transport.eventLoopGroup(Transport.ACCEPTOR_EVENT_LOOP_GROUP, 1, acceptorEventLoopThreadFactory, 100);
@@ -151,10 +154,10 @@ public class VertxImpl implements VertxInternal, MetricsProvider {
int workerPoolSize = options.getWorkerPoolSize();
ExecutorService workerExec = new ThreadPoolExecutor(workerPoolSize, workerPoolSize,
0L, TimeUnit.MILLISECONDS, new LinkedTransferQueue<>(),
- new VertxThreadFactory("vert.x-worker-thread-", checker, true, options.getMaxWorkerExecuteTime(), options.getMaxWorkerExecuteTimeUnit()));
+ createThreadFactory(options.getMaxWorkerExecuteTime(), options.getMaxWorkerExecuteTimeUnit(), "vert.x-worker-thread-", true));
PoolMetrics workerPoolMetrics = metrics != null ? metrics.createPoolMetrics("worker", "vert.x-worker-thread", options.getWorkerPoolSize()) : null;
ExecutorService internalBlockingExec = Executors.newFixedThreadPool(options.getInternalBlockingPoolSize(),
- new VertxThreadFactory("vert.x-internal-blocking-", checker, true, options.getMaxWorkerExecuteTime(), options.getMaxWorkerExecuteTimeUnit()));
+ createThreadFactory(options.getMaxWorkerExecuteTime(), options.getMaxWorkerExecuteTimeUnit(), "vert.x-internal-blocking-", true));
PoolMetrics internalBlockingPoolMetrics = metrics != null ? metrics.createPoolMetrics("worker", "vert.x-internal-blocking", options.getInternalBlockingPoolSize()) : null;
internalBlockingPool = new WorkerPool(internalBlockingExec, internalBlockingPoolMetrics);
namedWorkerPools = new HashMap<>();
@@ -163,6 +166,7 @@ public class VertxImpl implements VertxInternal, MetricsProvider {
maxWorkerExecTime = options.getMaxWorkerExecuteTime();
maxWorkerExecTimeUnit = options.getMaxWorkerExecuteTimeUnit();
+ this.threadFactory = threadFactory;
this.metrics = metrics;
this.transport = transport;
this.fileResolver = fileResolver;
@@ -1115,7 +1119,7 @@ public synchronized SharedWorkerPool createSharedWorkerPool(String name, int poo
}
SharedWorkerPool sharedWorkerPool = namedWorkerPools.get(name);
if (sharedWorkerPool == null) {
- ExecutorService workerExec = Executors.newFixedThreadPool(poolSize, new VertxThreadFactory(name + "-", checker, true, maxExecuteTime, maxExecuteTimeUnit));
+ ExecutorService workerExec = Executors.newFixedThreadPool(poolSize, createThreadFactory(maxExecuteTime, maxExecuteTimeUnit, name + "-", true));
PoolMetrics workerMetrics = metrics != null ? metrics.createPoolMetrics("worker", name, poolSize) : null;
namedWorkerPools.put(name, sharedWorkerPool = new SharedWorkerPool(name, workerExec, workerMetrics));
} else {
@@ -1124,6 +1128,18 @@ public synchronized SharedWorkerPool createSharedWorkerPool(String name, int poo
return sharedWorkerPool;
}
+ private ThreadFactory createThreadFactory(long maxExecuteTime, TimeUnit maxExecuteTimeUnit, String prefix, boolean worker) {
+ AtomicInteger threadCount = new AtomicInteger(0);
+ return runnable -> {
+ VertxThread thread = threadFactory.newVertxThread(runnable, prefix + threadCount.getAndIncrement(), worker, maxExecuteTime, maxExecuteTimeUnit);
+ checker.registerThread(thread, thread);
+ // Vert.x threads are NOT daemons - we want them to prevent JVM exit so embedded user doesn't
+ // have to explicitly prevent JVM from exiting.
+ thread.setDaemon(false);
+ return thread;
+ };
+ }
+
@Override
public Vertx exceptionHandler(Handler<Throwable> handler) {
exceptionHandler = handler;
diff --git a/src/main/java/io/vertx/core/impl/VertxThread.java b/src/main/java/io/vertx/core/impl/VertxThread.java
--- a/src/main/java/io/vertx/core/impl/VertxThread.java
+++ b/src/main/java/io/vertx/core/impl/VertxThread.java
@@ -18,7 +18,7 @@
/**
* @author <a href="mailto:nmaurer@redhat.com">Norman Maurer</a>
*/
-public final class VertxThread extends FastThreadLocalThread implements BlockedThreadChecker.Task {
+public class VertxThread extends FastThreadLocalThread implements BlockedThreadChecker.Task {
static final String DISABLE_TCCL_PROP_NAME = "vertx.disableTCCL";
static final boolean DISABLE_TCCL = Boolean.getBoolean(DISABLE_TCCL_PROP_NAME);
diff --git a/src/main/java/io/vertx/core/impl/VertxThreadFactory.java b/src/main/java/io/vertx/core/impl/VertxThreadFactory.java
deleted file mode 100644
--- a/src/main/java/io/vertx/core/impl/VertxThreadFactory.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2011-2019 Contributors to the Eclipse Foundation
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
- * which is available at https://www.apache.org/licenses/LICENSE-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
- */
-
-package io.vertx.core.impl;
-
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * @author <a href="http://tfox.org">Tim Fox</a>
- */
-public class VertxThreadFactory implements ThreadFactory {
-
- private final String prefix;
- private final AtomicInteger threadCount = new AtomicInteger(0);
- private final BlockedThreadChecker checker;
- private final boolean worker;
- private final long maxExecTime;
- private final TimeUnit maxExecTimeUnit;
-
- VertxThreadFactory(String prefix, BlockedThreadChecker checker, boolean worker, long maxExecTime, TimeUnit maxExecTimeUnit) {
- this.prefix = prefix;
- this.checker = checker;
- this.worker = worker;
- this.maxExecTime = maxExecTime;
- this.maxExecTimeUnit = maxExecTimeUnit;
- }
-
- public Thread newThread(Runnable runnable) {
- VertxThread t = new VertxThread(runnable, prefix + threadCount.getAndIncrement(), worker, maxExecTime, maxExecTimeUnit);
- // Vert.x threads are NOT daemons - we want them to prevent JVM exit so embededd user doesn't
- // have to explicitly prevent JVM from exiting.
- if (checker != null) {
- checker.registerThread(t, t);
- }
- // I know the default is false anyway, but just to be explicit- Vert.x threads are NOT daemons
- // we want to prevent the JVM from exiting until Vert.x instances are closed
- t.setDaemon(false);
- return t;
- }
-}
diff --git a/src/main/java/io/vertx/core/spi/VertxThreadFactory.java b/src/main/java/io/vertx/core/spi/VertxThreadFactory.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/spi/VertxThreadFactory.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2011-2019 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+
+package io.vertx.core.spi;
+
+import io.vertx.core.impl.VertxBuilder;
+import io.vertx.core.impl.VertxThread;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * @author <a href="http://tfox.org">Tim Fox</a>
+ */
+public interface VertxThreadFactory extends VertxServiceProvider {
+
+ VertxThreadFactory INSTANCE = new VertxThreadFactory() {
+ };
+
+ @Override
+ default void init(VertxBuilder builder) {
+ if (builder.threadFactory() == null) {
+ builder.threadFactory(this);
+ }
+ }
+
+ default VertxThread newVertxThread(Runnable target, String name, boolean worker, long maxExecTime, TimeUnit maxExecTimeUnit) {
+ return new VertxThread(target, name, worker, maxExecTime, maxExecTimeUnit);
+ }
+}
| diff --git a/src/test/benchmarks/io/vertx/core/impl/VertxExecutorService.java b/src/test/benchmarks/io/vertx/core/impl/VertxExecutorService.java
--- a/src/test/benchmarks/io/vertx/core/impl/VertxExecutorService.java
+++ b/src/test/benchmarks/io/vertx/core/impl/VertxExecutorService.java
@@ -11,6 +11,8 @@
package io.vertx.core.impl;
+import io.vertx.core.spi.VertxThreadFactory;
+
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
@@ -22,8 +24,8 @@ public class VertxExecutorService extends ThreadPoolExecutor {
public VertxExecutorService(int maxThreads, String prefix) {
super(maxThreads, maxThreads,
- 0L, TimeUnit.MILLISECONDS,
- new LinkedBlockingQueue<>(),
- new VertxThreadFactory(prefix, new BlockedThreadChecker(10000, TimeUnit.MILLISECONDS, 10000, TimeUnit.MILLISECONDS), false, 10000, TimeUnit.NANOSECONDS));
+ 0L, TimeUnit.MILLISECONDS,
+ new LinkedBlockingQueue<>(),
+ r -> VertxThreadFactory.INSTANCE.newVertxThread(r, prefix, false, 10000, TimeUnit.NANOSECONDS));
}
}
diff --git a/src/test/classpath/threadfactory/META-INF/services/io.vertx.core.spi.VertxServiceProvider b/src/test/classpath/threadfactory/META-INF/services/io.vertx.core.spi.VertxServiceProvider
new file mode 100644
--- /dev/null
+++ b/src/test/classpath/threadfactory/META-INF/services/io.vertx.core.spi.VertxServiceProvider
@@ -0,0 +1 @@
+io.vertx.it.CustomVertxThreadFactory
diff --git a/src/test/java/io/vertx/core/impl/GlobalEventExecutorNotificationTest.java b/src/test/java/io/vertx/core/impl/GlobalEventExecutorNotificationTest.java
--- a/src/test/java/io/vertx/core/impl/GlobalEventExecutorNotificationTest.java
+++ b/src/test/java/io/vertx/core/impl/GlobalEventExecutorNotificationTest.java
@@ -60,7 +60,7 @@ public ChannelFactory<? extends Channel> channelFactory(boolean domainSocket) {
throw cause;
};
}
- }).vertx();
+ }).init().vertx();
vertx.createNetServer().connectHandler(so -> {
fail();
@@ -83,7 +83,7 @@ public ChannelFactory<? extends ServerChannel> serverChannelFactory(boolean doma
throw cause;
};
}
- }).vertx();
+ }).init().vertx();
vertx.createNetServer()
.connectHandler(so -> fail())
@@ -103,7 +103,7 @@ public ChannelFactory<? extends ServerChannel> serverChannelFactory(boolean doma
throw cause;
};
}
- }).vertx();
+ }).init().vertx();
vertx.createHttpServer()
.requestHandler(req -> fail())
diff --git a/src/test/java/io/vertx/it/CustomVertxThread.java b/src/test/java/io/vertx/it/CustomVertxThread.java
new file mode 100644
--- /dev/null
+++ b/src/test/java/io/vertx/it/CustomVertxThread.java
@@ -0,0 +1,11 @@
+package io.vertx.it;
+
+import io.vertx.core.impl.VertxThread;
+
+import java.util.concurrent.TimeUnit;
+
+public class CustomVertxThread extends VertxThread {
+ public CustomVertxThread(Runnable target, String name, boolean worker, long maxExecTime, TimeUnit maxExecTimeUnit) {
+ super(target, name, worker, maxExecTime, maxExecTimeUnit);
+ }
+}
diff --git a/src/test/java/io/vertx/it/CustomVertxThreadFactory.java b/src/test/java/io/vertx/it/CustomVertxThreadFactory.java
new file mode 100644
--- /dev/null
+++ b/src/test/java/io/vertx/it/CustomVertxThreadFactory.java
@@ -0,0 +1,14 @@
+package io.vertx.it;
+
+import io.vertx.core.impl.VertxThread;
+import io.vertx.core.spi.VertxThreadFactory;
+
+import java.util.concurrent.TimeUnit;
+
+public class CustomVertxThreadFactory implements VertxThreadFactory {
+
+ @Override
+ public VertxThread newVertxThread(Runnable target, String name, boolean worker, long maxExecTime, TimeUnit maxExecTimeUnit) {
+ return new CustomVertxThread(target, name, worker, maxExecTime, maxExecTimeUnit);
+ }
+}
diff --git a/src/test/java/io/vertx/it/VertxThreadFactoryTest.java b/src/test/java/io/vertx/it/VertxThreadFactoryTest.java
new file mode 100644
--- /dev/null
+++ b/src/test/java/io/vertx/it/VertxThreadFactoryTest.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2011-2019 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+
+package io.vertx.it;
+
+import io.vertx.test.core.VertxTestBase;
+import org.junit.Test;
+
+/**
+ * @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
+ */
+public class VertxThreadFactoryTest extends VertxTestBase {
+
+ @Test
+ public void testJsonObject() {
+ vertx.runOnContext(v -> {
+ Thread current = Thread.currentThread();
+ assertEquals(CustomVertxThread.class, current.getClass());
+ testComplete();
+ });
+ await();
+ }
+}
| VertxThread factory
A runtime embedding might want to have Vert.x uses a specific subclass of `VertxThread`. This is a port of https://github.com/eclipse-vertx/vert.x/pull/3749 using the Vert.x 4 service provider mechanism and with a simplification of the factory responsibility.
| 2021-02-10T12:53:46Z | 4 |
|
eclipse-vertx/vert.x | 3,764 | eclipse-vertx__vert.x-3764 | [
"3760"
] | 8b23acadbe45b5057997ca3f64989a37d0752e29 | diff --git a/src/main/java/io/vertx/core/http/impl/Http1xServerRequest.java b/src/main/java/io/vertx/core/http/impl/Http1xServerRequest.java
--- a/src/main/java/io/vertx/core/http/impl/Http1xServerRequest.java
+++ b/src/main/java/io/vertx/core/http/impl/Http1xServerRequest.java
@@ -13,12 +13,14 @@
import io.netty.handler.codec.http.*;
import io.netty.handler.codec.http.HttpHeaders;
+import io.netty.handler.codec.http.multipart.Attribute;
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder;
import io.netty.handler.codec.http.multipart.InterfaceHttpData;
import io.vertx.codegen.annotations.Nullable;
import io.vertx.core.Future;
import io.vertx.core.Handler;
import io.vertx.core.MultiMap;
+import io.vertx.core.Promise;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.http.*;
import io.vertx.core.http.Cookie;
@@ -551,6 +553,18 @@ private void onEnd() {
private void endDecode() {
try {
decoder.offer(LastHttpContent.EMPTY_LAST_CONTENT);
+ while (decoder.hasNext()) {
+ InterfaceHttpData data = decoder.next();
+ if (data instanceof Attribute) {
+ Attribute attr = (Attribute) data;
+ try {
+ attributes().add(attr.getName(), attr.getValue());
+ } catch (Exception e) {
+ // Will never happen, anyway handle it somehow just in case
+ handleException(e);
+ }
+ }
+ }
} catch (HttpPostRequestDecoder.ErrorDataDecoderException e) {
handleException(e);
} catch (HttpPostRequestDecoder.EndOfDataDecoderException e) {
diff --git a/src/main/java/io/vertx/core/http/impl/Http2ServerRequestImpl.java b/src/main/java/io/vertx/core/http/impl/Http2ServerRequestImpl.java
--- a/src/main/java/io/vertx/core/http/impl/Http2ServerRequestImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2ServerRequestImpl.java
@@ -16,6 +16,7 @@
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.LastHttpContent;
+import io.netty.handler.codec.http.multipart.Attribute;
import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder;
import io.netty.handler.codec.http.multipart.InterfaceHttpData;
import io.netty.handler.codec.http2.Http2Headers;
@@ -215,6 +216,18 @@ void handleEnd(MultiMap trailers) {
if (postRequestDecoder != null) {
try {
postRequestDecoder.offer(LastHttpContent.EMPTY_LAST_CONTENT);
+ while (postRequestDecoder.hasNext()) {
+ InterfaceHttpData data = postRequestDecoder.next();
+ if (data instanceof Attribute) {
+ Attribute attr = (Attribute) data;
+ try {
+ formAttributes().add(attr.getName(), attr.getValue());
+ } catch (Exception e) {
+ // Will never happen, anyway handle it somehow just in case
+ handleException(e);
+ }
+ }
+ }
} catch (HttpPostRequestDecoder.EndOfDataDecoderException e) {
// ignore this as it is expected
} catch (Exception e) {
diff --git a/src/main/java/io/vertx/core/http/impl/NettyFileUploadDataFactory.java b/src/main/java/io/vertx/core/http/impl/NettyFileUploadDataFactory.java
--- a/src/main/java/io/vertx/core/http/impl/NettyFileUploadDataFactory.java
+++ b/src/main/java/io/vertx/core/http/impl/NettyFileUploadDataFactory.java
@@ -11,13 +11,11 @@
package io.vertx.core.http.impl;
-import io.netty.handler.codec.http.HttpConstants;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.multipart.Attribute;
import io.netty.handler.codec.http.multipart.DefaultHttpDataFactory;
import io.netty.handler.codec.http.multipart.FileUpload;
import io.netty.handler.codec.http.multipart.MemoryAttribute;
-import io.vertx.core.Context;
import io.vertx.core.Handler;
import io.vertx.core.http.HttpServerFileUpload;
import io.vertx.core.http.HttpServerRequest;
@@ -78,18 +76,22 @@ public FileUpload createFileUpload(HttpRequest httpRequest, String name, String
return nettyUpload;
}
- /**
- * Vert.x attribute that will add itself to the HTTP request form attributes when completed.
- */
- private class VertxAttribute extends MemoryAttribute {
+ private static class VertxAttribute extends MemoryAttribute {
public VertxAttribute(String name, long definedSize) {
super(name, definedSize);
setMaxSize(1024);
}
+ String value;
@Override
protected void setCompleted() {
super.setCompleted();
- request.formAttributes().set(getName(), getValue());
+ // Capture value before it gets corrupted
+ // this can be called multiple times (e.g "vert+x" then "vert x"
+ value = super.getValue();
+ }
+ @Override
+ public String getValue() {
+ return value;
}
}
}
| diff --git a/src/test/java/io/vertx/core/http/Http2ServerTest.java b/src/test/java/io/vertx/core/http/Http2ServerTest.java
--- a/src/test/java/io/vertx/core/http/Http2ServerTest.java
+++ b/src/test/java/io/vertx/core/http/Http2ServerTest.java
@@ -649,7 +649,7 @@ public void testPostFileUpload() throws Exception {
});
});
req.endHandler(v -> {
- assertEquals(5, req.formAttributes().size());
+ assertEquals(0, req.formAttributes().size());
req.response().putHeader("content-type", "text/plain").end("done");
});
});
diff --git a/src/test/java/io/vertx/core/http/HttpServerFileUploadTest.java b/src/test/java/io/vertx/core/http/HttpServerFileUploadTest.java
--- a/src/test/java/io/vertx/core/http/HttpServerFileUploadTest.java
+++ b/src/test/java/io/vertx/core/http/HttpServerFileUploadTest.java
@@ -26,6 +26,8 @@
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.net.URLEncoder;
+import java.util.Arrays;
+import java.util.Collections;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
@@ -325,11 +327,7 @@ private void testFormUploadFile(String filename,
resp.bodyHandler(body -> {
assertEquals(0, body.length());
});
- if (cancelStream) {
- assertEquals(0, attributeCount.get());
- } else {
- assertEquals(3, attributeCount.get());
- }
+ assertEquals(0, attributeCount.get());
complete();
}));
}
@@ -354,8 +352,16 @@ public void testFormUploadAttributes() throws Exception {
attributeCount.set(attrs.size());
assertEquals("vert x", attrs.get("framework"));
assertEquals("vert x", req.getFormAttribute("framework"));
+ assertEquals("vert x", req.formAttributes().get("framework"));
+ assertEquals(Collections.singletonList("vert x"), req.formAttributes().getAll("framework"));
assertEquals("jvm", attrs.get("runson"));
assertEquals("jvm", req.getFormAttribute("runson"));
+ assertEquals("jvm", req.formAttributes().get("runson"));
+ assertEquals(Collections.singletonList("jvm"), req.formAttributes().getAll("runson"));
+ assertEquals("0", attrs.get("list"));
+ assertEquals("0", req.getFormAttribute("list"));
+ assertEquals("0", req.formAttributes().get("list"));
+ assertEquals(Arrays.asList("0", "1"), req.formAttributes().getAll("list"));
req.response().end();
});
}
@@ -363,7 +369,12 @@ public void testFormUploadAttributes() throws Exception {
Buffer buffer = Buffer.buffer();
// Make sure we have one param that needs url encoding
- buffer.appendString("framework=" + URLEncoder.encode("vert x", "UTF-8") + "&runson=jvm", "UTF-8");
+ buffer.appendString(
+ "framework=" + URLEncoder.encode("vert x", "UTF-8") +
+ "&runson=jvm" +
+ "&list=0" +
+ "&list=1"
+ , "UTF-8");
server.listen(testAddress, onSuccess(s -> {
client.request(new RequestOptions(requestOptions)
.setMethod(HttpMethod.POST)
@@ -377,7 +388,7 @@ public void testFormUploadAttributes() throws Exception {
resp.bodyHandler(body -> {
assertEquals(0, body.length());
});
- assertEquals(2, attributeCount.get());
+ assertEquals(3, attributeCount.get());
testComplete();
}));
}));
| Form upload attribute might get corrupted
The current server form upload processing will create the form multi-map when the HTTP server request ends. Some attributes might use `ByteBuf` that can be recycled by the request decoder overwriting the attribute value. We should capture the request form attribute value as a string to avoid this case.
This issue was found when upgrading to Netty 4.1.52.Final, this version changes the implementation of the post request decoder and this situation happens with very large file upload (32M) and was found in vertx-web-client tests.
| 2021-01-25T12:46:10Z | 4 |
|
eclipse-vertx/vert.x | 3,754 | eclipse-vertx__vert.x-3754 | [
"3753"
] | a7b7214c58b85703613cd658c7f32d339a842566 | diff --git a/src/main/java/io/vertx/core/impl/future/CompositeFutureImpl.java b/src/main/java/io/vertx/core/impl/future/CompositeFutureImpl.java
--- a/src/main/java/io/vertx/core/impl/future/CompositeFutureImpl.java
+++ b/src/main/java/io/vertx/core/impl/future/CompositeFutureImpl.java
@@ -17,6 +17,7 @@
import io.vertx.core.Handler;
import java.util.function.Function;
+import java.util.stream.Collectors;
/**
* @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
@@ -188,4 +189,16 @@ public CompositeFuture onSuccess(Handler<CompositeFuture> handler) {
public CompositeFuture onFailure(Handler<Throwable> handler) {
return (CompositeFuture)super.onFailure(handler);
}
+
+ @Override
+ protected void formatValue(Object value, StringBuilder sb) {
+ sb.append('(');
+ for (int i = 0;i < results.length;i++) {
+ if (i > 0) {
+ sb.append(',');
+ }
+ sb.append(results[i]);
+ }
+ sb.append(')');
+ }
}
diff --git a/src/main/java/io/vertx/core/impl/future/FutureImpl.java b/src/main/java/io/vertx/core/impl/future/FutureImpl.java
--- a/src/main/java/io/vertx/core/impl/future/FutureImpl.java
+++ b/src/main/java/io/vertx/core/impl/future/FutureImpl.java
@@ -19,7 +19,6 @@
import java.util.ArrayList;
import java.util.Objects;
-import java.util.function.Function;
/**
* Future implementation.
@@ -208,12 +207,22 @@ public String toString() {
return "Future{cause=" + ((Throwable)value).getMessage() + "}";
}
if (value != null) {
- return "Future{result=" + (value == NULL_VALUE ? "null" : value) + "}";
+ if (value == NULL_VALUE) {
+ return "Future{result=null}";
+ }
+ StringBuilder sb = new StringBuilder("Future{result=");
+ formatValue(value, sb);
+ sb.append("}");
+ return sb.toString();
}
return "Future{unresolved}";
}
}
+ protected void formatValue(Object value, StringBuilder sb) {
+ sb.append(value);
+ }
+
private static class ListenerArray<T> extends ArrayList<Listener<T>> implements Listener<T> {
@Override
public void onSuccess(T value) {
| diff --git a/src/test/java/io/vertx/core/CompositeFutureTest.java b/src/test/java/io/vertx/core/CompositeFutureTest.java
--- a/src/test/java/io/vertx/core/CompositeFutureTest.java
+++ b/src/test/java/io/vertx/core/CompositeFutureTest.java
@@ -509,4 +509,10 @@ public void testCompositeFutureMulti() {
p2.complete(4);
assertEquals(2, count.get());
}
+
+ @Test
+ public void testToString() {
+ assertEquals("Future{result=(Future{result=null},Future{result=null})}", CompositeFuture.all(Future.succeededFuture(), Future.succeededFuture()).toString());
+ assertEquals("Future{result=(Future{result=true},Future{result=false})}", CompositeFuture.all(Future.succeededFuture(true), Future.succeededFuture(false)).toString());
+ }
}
| CompositeFuture toString gives stack overflow for completed composite future
### Version
4.0.0
### Context
I get a stack overflow exception while logging a completed CompositeFuture
### Do you have a reproducer?
Yes:
```
import io.vertx.core.CompositeFuture;
import static io.vertx.core.Future.succeededFuture;
import static java.util.Collections.singletonList;
public class Simple {
public static void main(String[] args) {
CompositeFuture compositeFuture = CompositeFuture.all(singletonList(succeededFuture()));
System.out.println(compositeFuture.toString());
}
}
```
The code above gives a stack overflow exception
### Steps to reproduce
Run program above
### Extra
Java 11
| does it happen also with vertx 3 ?
No - it works with 3.9.5
The issue seems to be that the .value of CompositeFutureImpl is a reference to the composite future itself, giving a recursive call.
I would suggest overriding toString to show the count and size instead e.g. CompositeFutureImpl( 2/5 completed)? But that might be more of a feature request.
I think we can kill 2 birds with one stone here | 2021-01-21T16:27:25Z | 4 |
eclipse-vertx/vert.x | 3,718 | eclipse-vertx__vert.x-3718 | [
"3714"
] | 6e8a672768eb5f148def804233c7015c1bdb1765 | diff --git a/src/main/generated/io/vertx/core/DeploymentOptionsConverter.java b/src/main/generated/io/vertx/core/DeploymentOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/DeploymentOptionsConverter.java
@@ -0,0 +1,125 @@
+package io.vertx.core;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.DeploymentOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.DeploymentOptions} original class using Vert.x codegen.
+ */
+ class DeploymentOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, DeploymentOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "config":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setConfig(((JsonObject)member.getValue()).copy());
+ }
+ break;
+ case "extraClasspath":
+ if (member.getValue() instanceof JsonArray) {
+ java.util.ArrayList<java.lang.String> list = new java.util.ArrayList<>();
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ list.add((String)item);
+ });
+ obj.setExtraClasspath(list);
+ }
+ break;
+ case "ha":
+ if (member.getValue() instanceof Boolean) {
+ obj.setHa((Boolean)member.getValue());
+ }
+ break;
+ case "instances":
+ if (member.getValue() instanceof Number) {
+ obj.setInstances(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "isolatedClasses":
+ if (member.getValue() instanceof JsonArray) {
+ java.util.ArrayList<java.lang.String> list = new java.util.ArrayList<>();
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ list.add((String)item);
+ });
+ obj.setIsolatedClasses(list);
+ }
+ break;
+ case "isolationGroup":
+ if (member.getValue() instanceof String) {
+ obj.setIsolationGroup((String)member.getValue());
+ }
+ break;
+ case "maxWorkerExecuteTime":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxWorkerExecuteTime(((Number)member.getValue()).longValue());
+ }
+ break;
+ case "maxWorkerExecuteTimeUnit":
+ if (member.getValue() instanceof String) {
+ obj.setMaxWorkerExecuteTimeUnit(java.util.concurrent.TimeUnit.valueOf((String)member.getValue()));
+ }
+ break;
+ case "multiThreaded":
+ if (member.getValue() instanceof Boolean) {
+ obj.setMultiThreaded((Boolean)member.getValue());
+ }
+ break;
+ case "worker":
+ if (member.getValue() instanceof Boolean) {
+ obj.setWorker((Boolean)member.getValue());
+ }
+ break;
+ case "workerPoolName":
+ if (member.getValue() instanceof String) {
+ obj.setWorkerPoolName((String)member.getValue());
+ }
+ break;
+ case "workerPoolSize":
+ if (member.getValue() instanceof Number) {
+ obj.setWorkerPoolSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(DeploymentOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(DeploymentOptions obj, java.util.Map<String, Object> json) {
+ if (obj.getConfig() != null) {
+ json.put("config", obj.getConfig());
+ }
+ if (obj.getExtraClasspath() != null) {
+ JsonArray array = new JsonArray();
+ obj.getExtraClasspath().forEach(item -> array.add(item));
+ json.put("extraClasspath", array);
+ }
+ json.put("ha", obj.isHa());
+ json.put("instances", obj.getInstances());
+ if (obj.getIsolatedClasses() != null) {
+ JsonArray array = new JsonArray();
+ obj.getIsolatedClasses().forEach(item -> array.add(item));
+ json.put("isolatedClasses", array);
+ }
+ if (obj.getIsolationGroup() != null) {
+ json.put("isolationGroup", obj.getIsolationGroup());
+ }
+ json.put("maxWorkerExecuteTime", obj.getMaxWorkerExecuteTime());
+ if (obj.getMaxWorkerExecuteTimeUnit() != null) {
+ json.put("maxWorkerExecuteTimeUnit", obj.getMaxWorkerExecuteTimeUnit().name());
+ }
+ json.put("multiThreaded", obj.isMultiThreaded());
+ json.put("worker", obj.isWorker());
+ if (obj.getWorkerPoolName() != null) {
+ json.put("workerPoolName", obj.getWorkerPoolName());
+ }
+ json.put("workerPoolSize", obj.getWorkerPoolSize());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/VertxOptionsConverter.java b/src/main/generated/io/vertx/core/VertxOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/VertxOptionsConverter.java
@@ -0,0 +1,211 @@
+package io.vertx.core;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.VertxOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.VertxOptions} original class using Vert.x codegen.
+ */
+ class VertxOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, VertxOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "addressResolverOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setAddressResolverOptions(new io.vertx.core.dns.AddressResolverOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "blockedThreadCheckInterval":
+ if (member.getValue() instanceof Number) {
+ obj.setBlockedThreadCheckInterval(((Number)member.getValue()).longValue());
+ }
+ break;
+ case "blockedThreadCheckIntervalUnit":
+ if (member.getValue() instanceof String) {
+ obj.setBlockedThreadCheckIntervalUnit(java.util.concurrent.TimeUnit.valueOf((String)member.getValue()));
+ }
+ break;
+ case "clusterHost":
+ if (member.getValue() instanceof String) {
+ obj.setClusterHost((String)member.getValue());
+ }
+ break;
+ case "clusterPingInterval":
+ if (member.getValue() instanceof Number) {
+ obj.setClusterPingInterval(((Number)member.getValue()).longValue());
+ }
+ break;
+ case "clusterPingReplyInterval":
+ if (member.getValue() instanceof Number) {
+ obj.setClusterPingReplyInterval(((Number)member.getValue()).longValue());
+ }
+ break;
+ case "clusterPort":
+ if (member.getValue() instanceof Number) {
+ obj.setClusterPort(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "clusterPublicHost":
+ if (member.getValue() instanceof String) {
+ obj.setClusterPublicHost((String)member.getValue());
+ }
+ break;
+ case "clusterPublicPort":
+ if (member.getValue() instanceof Number) {
+ obj.setClusterPublicPort(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "clustered":
+ if (member.getValue() instanceof Boolean) {
+ obj.setClustered((Boolean)member.getValue());
+ }
+ break;
+ case "eventBusOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setEventBusOptions(new io.vertx.core.eventbus.EventBusOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "eventLoopPoolSize":
+ if (member.getValue() instanceof Number) {
+ obj.setEventLoopPoolSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "fileResolverCachingEnabled":
+ if (member.getValue() instanceof Boolean) {
+ obj.setFileResolverCachingEnabled((Boolean)member.getValue());
+ }
+ break;
+ case "fileSystemOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setFileSystemOptions(new io.vertx.core.file.FileSystemOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "haEnabled":
+ if (member.getValue() instanceof Boolean) {
+ obj.setHAEnabled((Boolean)member.getValue());
+ }
+ break;
+ case "haGroup":
+ if (member.getValue() instanceof String) {
+ obj.setHAGroup((String)member.getValue());
+ }
+ break;
+ case "internalBlockingPoolSize":
+ if (member.getValue() instanceof Number) {
+ obj.setInternalBlockingPoolSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxEventLoopExecuteTime":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxEventLoopExecuteTime(((Number)member.getValue()).longValue());
+ }
+ break;
+ case "maxEventLoopExecuteTimeUnit":
+ if (member.getValue() instanceof String) {
+ obj.setMaxEventLoopExecuteTimeUnit(java.util.concurrent.TimeUnit.valueOf((String)member.getValue()));
+ }
+ break;
+ case "maxWorkerExecuteTime":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxWorkerExecuteTime(((Number)member.getValue()).longValue());
+ }
+ break;
+ case "maxWorkerExecuteTimeUnit":
+ if (member.getValue() instanceof String) {
+ obj.setMaxWorkerExecuteTimeUnit(java.util.concurrent.TimeUnit.valueOf((String)member.getValue()));
+ }
+ break;
+ case "metricsOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setMetricsOptions(new io.vertx.core.metrics.MetricsOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "preferNativeTransport":
+ if (member.getValue() instanceof Boolean) {
+ obj.setPreferNativeTransport((Boolean)member.getValue());
+ }
+ break;
+ case "quorumSize":
+ if (member.getValue() instanceof Number) {
+ obj.setQuorumSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "warningExceptionTime":
+ if (member.getValue() instanceof Number) {
+ obj.setWarningExceptionTime(((Number)member.getValue()).longValue());
+ }
+ break;
+ case "warningExceptionTimeUnit":
+ if (member.getValue() instanceof String) {
+ obj.setWarningExceptionTimeUnit(java.util.concurrent.TimeUnit.valueOf((String)member.getValue()));
+ }
+ break;
+ case "workerPoolSize":
+ if (member.getValue() instanceof Number) {
+ obj.setWorkerPoolSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(VertxOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(VertxOptions obj, java.util.Map<String, Object> json) {
+ if (obj.getAddressResolverOptions() != null) {
+ json.put("addressResolverOptions", obj.getAddressResolverOptions().toJson());
+ }
+ json.put("blockedThreadCheckInterval", obj.getBlockedThreadCheckInterval());
+ if (obj.getBlockedThreadCheckIntervalUnit() != null) {
+ json.put("blockedThreadCheckIntervalUnit", obj.getBlockedThreadCheckIntervalUnit().name());
+ }
+ if (obj.getClusterHost() != null) {
+ json.put("clusterHost", obj.getClusterHost());
+ }
+ json.put("clusterPingInterval", obj.getClusterPingInterval());
+ json.put("clusterPingReplyInterval", obj.getClusterPingReplyInterval());
+ json.put("clusterPort", obj.getClusterPort());
+ if (obj.getClusterPublicHost() != null) {
+ json.put("clusterPublicHost", obj.getClusterPublicHost());
+ }
+ json.put("clusterPublicPort", obj.getClusterPublicPort());
+ json.put("clustered", obj.isClustered());
+ if (obj.getEventBusOptions() != null) {
+ json.put("eventBusOptions", obj.getEventBusOptions().toJson());
+ }
+ json.put("eventLoopPoolSize", obj.getEventLoopPoolSize());
+ json.put("fileResolverCachingEnabled", obj.isFileResolverCachingEnabled());
+ if (obj.getFileSystemOptions() != null) {
+ json.put("fileSystemOptions", obj.getFileSystemOptions().toJson());
+ }
+ json.put("haEnabled", obj.isHAEnabled());
+ if (obj.getHAGroup() != null) {
+ json.put("haGroup", obj.getHAGroup());
+ }
+ json.put("internalBlockingPoolSize", obj.getInternalBlockingPoolSize());
+ json.put("maxEventLoopExecuteTime", obj.getMaxEventLoopExecuteTime());
+ if (obj.getMaxEventLoopExecuteTimeUnit() != null) {
+ json.put("maxEventLoopExecuteTimeUnit", obj.getMaxEventLoopExecuteTimeUnit().name());
+ }
+ json.put("maxWorkerExecuteTime", obj.getMaxWorkerExecuteTime());
+ if (obj.getMaxWorkerExecuteTimeUnit() != null) {
+ json.put("maxWorkerExecuteTimeUnit", obj.getMaxWorkerExecuteTimeUnit().name());
+ }
+ if (obj.getMetricsOptions() != null) {
+ json.put("metricsOptions", obj.getMetricsOptions().toJson());
+ }
+ json.put("preferNativeTransport", obj.getPreferNativeTransport());
+ json.put("quorumSize", obj.getQuorumSize());
+ json.put("warningExceptionTime", obj.getWarningExceptionTime());
+ if (obj.getWarningExceptionTimeUnit() != null) {
+ json.put("warningExceptionTimeUnit", obj.getWarningExceptionTimeUnit().name());
+ }
+ json.put("workerPoolSize", obj.getWorkerPoolSize());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/cli/ArgumentConverter.java b/src/main/generated/io/vertx/core/cli/ArgumentConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/cli/ArgumentConverter.java
@@ -0,0 +1,75 @@
+package io.vertx.core.cli;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.cli.Argument}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.cli.Argument} original class using Vert.x codegen.
+ */
+ class ArgumentConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, Argument obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "argName":
+ if (member.getValue() instanceof String) {
+ obj.setArgName((String)member.getValue());
+ }
+ break;
+ case "defaultValue":
+ if (member.getValue() instanceof String) {
+ obj.setDefaultValue((String)member.getValue());
+ }
+ break;
+ case "description":
+ if (member.getValue() instanceof String) {
+ obj.setDescription((String)member.getValue());
+ }
+ break;
+ case "hidden":
+ if (member.getValue() instanceof Boolean) {
+ obj.setHidden((Boolean)member.getValue());
+ }
+ break;
+ case "index":
+ if (member.getValue() instanceof Number) {
+ obj.setIndex(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "multiValued":
+ if (member.getValue() instanceof Boolean) {
+ obj.setMultiValued((Boolean)member.getValue());
+ }
+ break;
+ case "required":
+ if (member.getValue() instanceof Boolean) {
+ obj.setRequired((Boolean)member.getValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(Argument obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(Argument obj, java.util.Map<String, Object> json) {
+ if (obj.getArgName() != null) {
+ json.put("argName", obj.getArgName());
+ }
+ if (obj.getDefaultValue() != null) {
+ json.put("defaultValue", obj.getDefaultValue());
+ }
+ if (obj.getDescription() != null) {
+ json.put("description", obj.getDescription());
+ }
+ json.put("hidden", obj.isHidden());
+ json.put("index", obj.getIndex());
+ json.put("multiValued", obj.isMultiValued());
+ json.put("required", obj.isRequired());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/cli/OptionConverter.java b/src/main/generated/io/vertx/core/cli/OptionConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/cli/OptionConverter.java
@@ -0,0 +1,123 @@
+package io.vertx.core.cli;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.cli.Option}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.cli.Option} original class using Vert.x codegen.
+ */
+ class OptionConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, Option obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "argName":
+ if (member.getValue() instanceof String) {
+ obj.setArgName((String)member.getValue());
+ }
+ break;
+ case "choices":
+ if (member.getValue() instanceof JsonArray) {
+ java.util.LinkedHashSet<java.lang.String> list = new java.util.LinkedHashSet<>();
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ list.add((String)item);
+ });
+ obj.setChoices(list);
+ }
+ break;
+ case "defaultValue":
+ if (member.getValue() instanceof String) {
+ obj.setDefaultValue((String)member.getValue());
+ }
+ break;
+ case "description":
+ if (member.getValue() instanceof String) {
+ obj.setDescription((String)member.getValue());
+ }
+ break;
+ case "flag":
+ if (member.getValue() instanceof Boolean) {
+ obj.setFlag((Boolean)member.getValue());
+ }
+ break;
+ case "help":
+ if (member.getValue() instanceof Boolean) {
+ obj.setHelp((Boolean)member.getValue());
+ }
+ break;
+ case "hidden":
+ if (member.getValue() instanceof Boolean) {
+ obj.setHidden((Boolean)member.getValue());
+ }
+ break;
+ case "longName":
+ if (member.getValue() instanceof String) {
+ obj.setLongName((String)member.getValue());
+ }
+ break;
+ case "multiValued":
+ if (member.getValue() instanceof Boolean) {
+ obj.setMultiValued((Boolean)member.getValue());
+ }
+ break;
+ case "name":
+ break;
+ case "required":
+ if (member.getValue() instanceof Boolean) {
+ obj.setRequired((Boolean)member.getValue());
+ }
+ break;
+ case "shortName":
+ if (member.getValue() instanceof String) {
+ obj.setShortName((String)member.getValue());
+ }
+ break;
+ case "singleValued":
+ if (member.getValue() instanceof Boolean) {
+ obj.setSingleValued((Boolean)member.getValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(Option obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(Option obj, java.util.Map<String, Object> json) {
+ if (obj.getArgName() != null) {
+ json.put("argName", obj.getArgName());
+ }
+ if (obj.getChoices() != null) {
+ JsonArray array = new JsonArray();
+ obj.getChoices().forEach(item -> array.add(item));
+ json.put("choices", array);
+ }
+ if (obj.getDefaultValue() != null) {
+ json.put("defaultValue", obj.getDefaultValue());
+ }
+ if (obj.getDescription() != null) {
+ json.put("description", obj.getDescription());
+ }
+ json.put("flag", obj.isFlag());
+ json.put("help", obj.isHelp());
+ json.put("hidden", obj.isHidden());
+ if (obj.getLongName() != null) {
+ json.put("longName", obj.getLongName());
+ }
+ json.put("multiValued", obj.isMultiValued());
+ if (obj.getName() != null) {
+ json.put("name", obj.getName());
+ }
+ json.put("required", obj.isRequired());
+ if (obj.getShortName() != null) {
+ json.put("shortName", obj.getShortName());
+ }
+ json.put("singleValued", obj.isSingleValued());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/datagram/DatagramSocketOptionsConverter.java b/src/main/generated/io/vertx/core/datagram/DatagramSocketOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/datagram/DatagramSocketOptionsConverter.java
@@ -0,0 +1,59 @@
+package io.vertx.core.datagram;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.datagram.DatagramSocketOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.datagram.DatagramSocketOptions} original class using Vert.x codegen.
+ */
+ class DatagramSocketOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, DatagramSocketOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "broadcast":
+ if (member.getValue() instanceof Boolean) {
+ obj.setBroadcast((Boolean)member.getValue());
+ }
+ break;
+ case "ipV6":
+ if (member.getValue() instanceof Boolean) {
+ obj.setIpV6((Boolean)member.getValue());
+ }
+ break;
+ case "loopbackModeDisabled":
+ if (member.getValue() instanceof Boolean) {
+ obj.setLoopbackModeDisabled((Boolean)member.getValue());
+ }
+ break;
+ case "multicastNetworkInterface":
+ if (member.getValue() instanceof String) {
+ obj.setMulticastNetworkInterface((String)member.getValue());
+ }
+ break;
+ case "multicastTimeToLive":
+ if (member.getValue() instanceof Number) {
+ obj.setMulticastTimeToLive(((Number)member.getValue()).intValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(DatagramSocketOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(DatagramSocketOptions obj, java.util.Map<String, Object> json) {
+ json.put("broadcast", obj.isBroadcast());
+ json.put("ipV6", obj.isIpV6());
+ json.put("loopbackModeDisabled", obj.isLoopbackModeDisabled());
+ if (obj.getMulticastNetworkInterface() != null) {
+ json.put("multicastNetworkInterface", obj.getMulticastNetworkInterface());
+ }
+ json.put("multicastTimeToLive", obj.getMulticastTimeToLive());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/dns/AddressResolverOptionsConverter.java b/src/main/generated/io/vertx/core/dns/AddressResolverOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/dns/AddressResolverOptionsConverter.java
@@ -0,0 +1,127 @@
+package io.vertx.core.dns;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.dns.AddressResolverOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.dns.AddressResolverOptions} original class using Vert.x codegen.
+ */
+ class AddressResolverOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, AddressResolverOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "cacheMaxTimeToLive":
+ if (member.getValue() instanceof Number) {
+ obj.setCacheMaxTimeToLive(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "cacheMinTimeToLive":
+ if (member.getValue() instanceof Number) {
+ obj.setCacheMinTimeToLive(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "cacheNegativeTimeToLive":
+ if (member.getValue() instanceof Number) {
+ obj.setCacheNegativeTimeToLive(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "hostsPath":
+ if (member.getValue() instanceof String) {
+ obj.setHostsPath((String)member.getValue());
+ }
+ break;
+ case "hostsValue":
+ if (member.getValue() instanceof String) {
+ obj.setHostsValue(io.vertx.core.buffer.Buffer.buffer(java.util.Base64.getDecoder().decode((String)member.getValue())));
+ }
+ break;
+ case "maxQueries":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxQueries(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "ndots":
+ if (member.getValue() instanceof Number) {
+ obj.setNdots(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "optResourceEnabled":
+ if (member.getValue() instanceof Boolean) {
+ obj.setOptResourceEnabled((Boolean)member.getValue());
+ }
+ break;
+ case "queryTimeout":
+ if (member.getValue() instanceof Number) {
+ obj.setQueryTimeout(((Number)member.getValue()).longValue());
+ }
+ break;
+ case "rdFlag":
+ if (member.getValue() instanceof Boolean) {
+ obj.setRdFlag((Boolean)member.getValue());
+ }
+ break;
+ case "rotateServers":
+ if (member.getValue() instanceof Boolean) {
+ obj.setRotateServers((Boolean)member.getValue());
+ }
+ break;
+ case "searchDomains":
+ if (member.getValue() instanceof JsonArray) {
+ java.util.ArrayList<java.lang.String> list = new java.util.ArrayList<>();
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ list.add((String)item);
+ });
+ obj.setSearchDomains(list);
+ }
+ break;
+ case "servers":
+ if (member.getValue() instanceof JsonArray) {
+ java.util.ArrayList<java.lang.String> list = new java.util.ArrayList<>();
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ list.add((String)item);
+ });
+ obj.setServers(list);
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(AddressResolverOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(AddressResolverOptions obj, java.util.Map<String, Object> json) {
+ json.put("cacheMaxTimeToLive", obj.getCacheMaxTimeToLive());
+ json.put("cacheMinTimeToLive", obj.getCacheMinTimeToLive());
+ json.put("cacheNegativeTimeToLive", obj.getCacheNegativeTimeToLive());
+ if (obj.getHostsPath() != null) {
+ json.put("hostsPath", obj.getHostsPath());
+ }
+ if (obj.getHostsValue() != null) {
+ json.put("hostsValue", java.util.Base64.getEncoder().encodeToString(obj.getHostsValue().getBytes()));
+ }
+ json.put("maxQueries", obj.getMaxQueries());
+ json.put("ndots", obj.getNdots());
+ json.put("optResourceEnabled", obj.isOptResourceEnabled());
+ json.put("queryTimeout", obj.getQueryTimeout());
+ json.put("rdFlag", obj.getRdFlag());
+ json.put("rotateServers", obj.isRotateServers());
+ if (obj.getSearchDomains() != null) {
+ JsonArray array = new JsonArray();
+ obj.getSearchDomains().forEach(item -> array.add(item));
+ json.put("searchDomains", array);
+ }
+ if (obj.getServers() != null) {
+ JsonArray array = new JsonArray();
+ obj.getServers().forEach(item -> array.add(item));
+ json.put("servers", array);
+ }
+ }
+}
diff --git a/src/main/generated/io/vertx/core/dns/DnsClientOptionsConverter.java b/src/main/generated/io/vertx/core/dns/DnsClientOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/dns/DnsClientOptionsConverter.java
@@ -0,0 +1,59 @@
+package io.vertx.core.dns;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.dns.DnsClientOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.dns.DnsClientOptions} original class using Vert.x codegen.
+ */
+public class DnsClientOptionsConverter {
+
+ public static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, DnsClientOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "host":
+ if (member.getValue() instanceof String) {
+ obj.setHost((String)member.getValue());
+ }
+ break;
+ case "logActivity":
+ if (member.getValue() instanceof Boolean) {
+ obj.setLogActivity((Boolean)member.getValue());
+ }
+ break;
+ case "port":
+ if (member.getValue() instanceof Number) {
+ obj.setPort(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "queryTimeout":
+ if (member.getValue() instanceof Number) {
+ obj.setQueryTimeout(((Number)member.getValue()).longValue());
+ }
+ break;
+ case "recursionDesired":
+ if (member.getValue() instanceof Boolean) {
+ obj.setRecursionDesired((Boolean)member.getValue());
+ }
+ break;
+ }
+ }
+ }
+
+ public static void toJson(DnsClientOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ public static void toJson(DnsClientOptions obj, java.util.Map<String, Object> json) {
+ if (obj.getHost() != null) {
+ json.put("host", obj.getHost());
+ }
+ json.put("logActivity", obj.getLogActivity());
+ json.put("port", obj.getPort());
+ json.put("queryTimeout", obj.getQueryTimeout());
+ json.put("recursionDesired", obj.isRecursionDesired());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/eventbus/EventBusOptionsConverter.java b/src/main/generated/io/vertx/core/eventbus/EventBusOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/eventbus/EventBusOptionsConverter.java
@@ -0,0 +1,347 @@
+package io.vertx.core.eventbus;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.eventbus.EventBusOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.eventbus.EventBusOptions} original class using Vert.x codegen.
+ */
+ class EventBusOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, EventBusOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "acceptBacklog":
+ if (member.getValue() instanceof Number) {
+ obj.setAcceptBacklog(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "clientAuth":
+ if (member.getValue() instanceof String) {
+ obj.setClientAuth(io.vertx.core.http.ClientAuth.valueOf((String)member.getValue()));
+ }
+ break;
+ case "clusterPingInterval":
+ if (member.getValue() instanceof Number) {
+ obj.setClusterPingInterval(((Number)member.getValue()).longValue());
+ }
+ break;
+ case "clusterPingReplyInterval":
+ if (member.getValue() instanceof Number) {
+ obj.setClusterPingReplyInterval(((Number)member.getValue()).longValue());
+ }
+ break;
+ case "clusterPublicHost":
+ if (member.getValue() instanceof String) {
+ obj.setClusterPublicHost((String)member.getValue());
+ }
+ break;
+ case "clusterPublicPort":
+ if (member.getValue() instanceof Number) {
+ obj.setClusterPublicPort(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "clustered":
+ if (member.getValue() instanceof Boolean) {
+ obj.setClustered((Boolean)member.getValue());
+ }
+ break;
+ case "connectTimeout":
+ if (member.getValue() instanceof Number) {
+ obj.setConnectTimeout(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "crlPaths":
+ if (member.getValue() instanceof JsonArray) {
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ obj.addCrlPath((String)item);
+ });
+ }
+ break;
+ case "crlValues":
+ if (member.getValue() instanceof JsonArray) {
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ obj.addCrlValue(io.vertx.core.buffer.Buffer.buffer(java.util.Base64.getDecoder().decode((String)item)));
+ });
+ }
+ break;
+ case "enabledCipherSuites":
+ if (member.getValue() instanceof JsonArray) {
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ obj.addEnabledCipherSuite((String)item);
+ });
+ }
+ break;
+ case "enabledSecureTransportProtocols":
+ if (member.getValue() instanceof JsonArray) {
+ java.util.LinkedHashSet<java.lang.String> list = new java.util.LinkedHashSet<>();
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ list.add((String)item);
+ });
+ obj.setEnabledSecureTransportProtocols(list);
+ }
+ break;
+ case "host":
+ if (member.getValue() instanceof String) {
+ obj.setHost((String)member.getValue());
+ }
+ break;
+ case "idleTimeout":
+ if (member.getValue() instanceof Number) {
+ obj.setIdleTimeout(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "idleTimeoutUnit":
+ if (member.getValue() instanceof String) {
+ obj.setIdleTimeoutUnit(java.util.concurrent.TimeUnit.valueOf((String)member.getValue()));
+ }
+ break;
+ case "jdkSslEngineOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setJdkSslEngineOptions(new io.vertx.core.net.JdkSSLEngineOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "keyStoreOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setKeyStoreOptions(new io.vertx.core.net.JksOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "logActivity":
+ if (member.getValue() instanceof Boolean) {
+ obj.setLogActivity((Boolean)member.getValue());
+ }
+ break;
+ case "openSslEngineOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setOpenSslEngineOptions(new io.vertx.core.net.OpenSSLEngineOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "pemKeyCertOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setPemKeyCertOptions(new io.vertx.core.net.PemKeyCertOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "pemTrustOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setPemTrustOptions(new io.vertx.core.net.PemTrustOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "pfxKeyCertOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setPfxKeyCertOptions(new io.vertx.core.net.PfxOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "pfxTrustOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setPfxTrustOptions(new io.vertx.core.net.PfxOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "port":
+ if (member.getValue() instanceof Number) {
+ obj.setPort(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "receiveBufferSize":
+ if (member.getValue() instanceof Number) {
+ obj.setReceiveBufferSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "reconnectAttempts":
+ if (member.getValue() instanceof Number) {
+ obj.setReconnectAttempts(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "reconnectInterval":
+ if (member.getValue() instanceof Number) {
+ obj.setReconnectInterval(((Number)member.getValue()).longValue());
+ }
+ break;
+ case "reuseAddress":
+ if (member.getValue() instanceof Boolean) {
+ obj.setReuseAddress((Boolean)member.getValue());
+ }
+ break;
+ case "reusePort":
+ if (member.getValue() instanceof Boolean) {
+ obj.setReusePort((Boolean)member.getValue());
+ }
+ break;
+ case "sendBufferSize":
+ if (member.getValue() instanceof Number) {
+ obj.setSendBufferSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "soLinger":
+ if (member.getValue() instanceof Number) {
+ obj.setSoLinger(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "ssl":
+ if (member.getValue() instanceof Boolean) {
+ obj.setSsl((Boolean)member.getValue());
+ }
+ break;
+ case "sslHandshakeTimeout":
+ if (member.getValue() instanceof Number) {
+ obj.setSslHandshakeTimeout(((Number)member.getValue()).longValue());
+ }
+ break;
+ case "sslHandshakeTimeoutUnit":
+ if (member.getValue() instanceof String) {
+ obj.setSslHandshakeTimeoutUnit(java.util.concurrent.TimeUnit.valueOf((String)member.getValue()));
+ }
+ break;
+ case "tcpCork":
+ if (member.getValue() instanceof Boolean) {
+ obj.setTcpCork((Boolean)member.getValue());
+ }
+ break;
+ case "tcpFastOpen":
+ if (member.getValue() instanceof Boolean) {
+ obj.setTcpFastOpen((Boolean)member.getValue());
+ }
+ break;
+ case "tcpKeepAlive":
+ if (member.getValue() instanceof Boolean) {
+ obj.setTcpKeepAlive((Boolean)member.getValue());
+ }
+ break;
+ case "tcpNoDelay":
+ if (member.getValue() instanceof Boolean) {
+ obj.setTcpNoDelay((Boolean)member.getValue());
+ }
+ break;
+ case "tcpQuickAck":
+ if (member.getValue() instanceof Boolean) {
+ obj.setTcpQuickAck((Boolean)member.getValue());
+ }
+ break;
+ case "trafficClass":
+ if (member.getValue() instanceof Number) {
+ obj.setTrafficClass(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "trustAll":
+ if (member.getValue() instanceof Boolean) {
+ obj.setTrustAll((Boolean)member.getValue());
+ }
+ break;
+ case "trustStoreOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setTrustStoreOptions(new io.vertx.core.net.JksOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "useAlpn":
+ if (member.getValue() instanceof Boolean) {
+ obj.setUseAlpn((Boolean)member.getValue());
+ }
+ break;
+ case "usePooledBuffers":
+ if (member.getValue() instanceof Boolean) {
+ obj.setUsePooledBuffers((Boolean)member.getValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(EventBusOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(EventBusOptions obj, java.util.Map<String, Object> json) {
+ json.put("acceptBacklog", obj.getAcceptBacklog());
+ if (obj.getClientAuth() != null) {
+ json.put("clientAuth", obj.getClientAuth().name());
+ }
+ json.put("clusterPingInterval", obj.getClusterPingInterval());
+ json.put("clusterPingReplyInterval", obj.getClusterPingReplyInterval());
+ if (obj.getClusterPublicHost() != null) {
+ json.put("clusterPublicHost", obj.getClusterPublicHost());
+ }
+ json.put("clusterPublicPort", obj.getClusterPublicPort());
+ json.put("clustered", obj.isClustered());
+ json.put("connectTimeout", obj.getConnectTimeout());
+ if (obj.getCrlPaths() != null) {
+ JsonArray array = new JsonArray();
+ obj.getCrlPaths().forEach(item -> array.add(item));
+ json.put("crlPaths", array);
+ }
+ if (obj.getCrlValues() != null) {
+ JsonArray array = new JsonArray();
+ obj.getCrlValues().forEach(item -> array.add(java.util.Base64.getEncoder().encodeToString(item.getBytes())));
+ json.put("crlValues", array);
+ }
+ if (obj.getEnabledCipherSuites() != null) {
+ JsonArray array = new JsonArray();
+ obj.getEnabledCipherSuites().forEach(item -> array.add(item));
+ json.put("enabledCipherSuites", array);
+ }
+ if (obj.getEnabledSecureTransportProtocols() != null) {
+ JsonArray array = new JsonArray();
+ obj.getEnabledSecureTransportProtocols().forEach(item -> array.add(item));
+ json.put("enabledSecureTransportProtocols", array);
+ }
+ if (obj.getHost() != null) {
+ json.put("host", obj.getHost());
+ }
+ json.put("idleTimeout", obj.getIdleTimeout());
+ if (obj.getIdleTimeoutUnit() != null) {
+ json.put("idleTimeoutUnit", obj.getIdleTimeoutUnit().name());
+ }
+ if (obj.getJdkSslEngineOptions() != null) {
+ json.put("jdkSslEngineOptions", obj.getJdkSslEngineOptions().toJson());
+ }
+ if (obj.getKeyStoreOptions() != null) {
+ json.put("keyStoreOptions", obj.getKeyStoreOptions().toJson());
+ }
+ json.put("logActivity", obj.getLogActivity());
+ if (obj.getOpenSslEngineOptions() != null) {
+ json.put("openSslEngineOptions", obj.getOpenSslEngineOptions().toJson());
+ }
+ if (obj.getPemKeyCertOptions() != null) {
+ json.put("pemKeyCertOptions", obj.getPemKeyCertOptions().toJson());
+ }
+ if (obj.getPemTrustOptions() != null) {
+ json.put("pemTrustOptions", obj.getPemTrustOptions().toJson());
+ }
+ if (obj.getPfxKeyCertOptions() != null) {
+ json.put("pfxKeyCertOptions", obj.getPfxKeyCertOptions().toJson());
+ }
+ if (obj.getPfxTrustOptions() != null) {
+ json.put("pfxTrustOptions", obj.getPfxTrustOptions().toJson());
+ }
+ json.put("port", obj.getPort());
+ json.put("receiveBufferSize", obj.getReceiveBufferSize());
+ json.put("reconnectAttempts", obj.getReconnectAttempts());
+ json.put("reconnectInterval", obj.getReconnectInterval());
+ json.put("reuseAddress", obj.isReuseAddress());
+ json.put("reusePort", obj.isReusePort());
+ json.put("sendBufferSize", obj.getSendBufferSize());
+ json.put("soLinger", obj.getSoLinger());
+ json.put("ssl", obj.isSsl());
+ json.put("sslHandshakeTimeout", obj.getSslHandshakeTimeout());
+ if (obj.getSslHandshakeTimeoutUnit() != null) {
+ json.put("sslHandshakeTimeoutUnit", obj.getSslHandshakeTimeoutUnit().name());
+ }
+ json.put("tcpCork", obj.isTcpCork());
+ json.put("tcpFastOpen", obj.isTcpFastOpen());
+ json.put("tcpKeepAlive", obj.isTcpKeepAlive());
+ json.put("tcpNoDelay", obj.isTcpNoDelay());
+ json.put("tcpQuickAck", obj.isTcpQuickAck());
+ json.put("trafficClass", obj.getTrafficClass());
+ json.put("trustAll", obj.isTrustAll());
+ if (obj.getTrustStoreOptions() != null) {
+ json.put("trustStoreOptions", obj.getTrustStoreOptions().toJson());
+ }
+ json.put("useAlpn", obj.isUseAlpn());
+ json.put("usePooledBuffers", obj.isUsePooledBuffers());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/file/CopyOptionsConverter.java b/src/main/generated/io/vertx/core/file/CopyOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/file/CopyOptionsConverter.java
@@ -0,0 +1,51 @@
+package io.vertx.core.file;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.file.CopyOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.file.CopyOptions} original class using Vert.x codegen.
+ */
+ class CopyOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, CopyOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "atomicMove":
+ if (member.getValue() instanceof Boolean) {
+ obj.setAtomicMove((Boolean)member.getValue());
+ }
+ break;
+ case "copyAttributes":
+ if (member.getValue() instanceof Boolean) {
+ obj.setCopyAttributes((Boolean)member.getValue());
+ }
+ break;
+ case "nofollowLinks":
+ if (member.getValue() instanceof Boolean) {
+ obj.setNofollowLinks((Boolean)member.getValue());
+ }
+ break;
+ case "replaceExisting":
+ if (member.getValue() instanceof Boolean) {
+ obj.setReplaceExisting((Boolean)member.getValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(CopyOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(CopyOptions obj, java.util.Map<String, Object> json) {
+ json.put("atomicMove", obj.isAtomicMove());
+ json.put("copyAttributes", obj.isCopyAttributes());
+ json.put("nofollowLinks", obj.isNofollowLinks());
+ json.put("replaceExisting", obj.isReplaceExisting());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/file/FileSystemOptionsConverter.java b/src/main/generated/io/vertx/core/file/FileSystemOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/file/FileSystemOptionsConverter.java
@@ -0,0 +1,47 @@
+package io.vertx.core.file;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.file.FileSystemOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.file.FileSystemOptions} original class using Vert.x codegen.
+ */
+ class FileSystemOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, FileSystemOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "classPathResolvingEnabled":
+ if (member.getValue() instanceof Boolean) {
+ obj.setClassPathResolvingEnabled((Boolean)member.getValue());
+ }
+ break;
+ case "fileCacheDir":
+ if (member.getValue() instanceof String) {
+ obj.setFileCacheDir((String)member.getValue());
+ }
+ break;
+ case "fileCachingEnabled":
+ if (member.getValue() instanceof Boolean) {
+ obj.setFileCachingEnabled((Boolean)member.getValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(FileSystemOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(FileSystemOptions obj, java.util.Map<String, Object> json) {
+ json.put("classPathResolvingEnabled", obj.isClassPathResolvingEnabled());
+ if (obj.getFileCacheDir() != null) {
+ json.put("fileCacheDir", obj.getFileCacheDir());
+ }
+ json.put("fileCachingEnabled", obj.isFileCachingEnabled());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/file/OpenOptionsConverter.java b/src/main/generated/io/vertx/core/file/OpenOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/file/OpenOptionsConverter.java
@@ -0,0 +1,95 @@
+package io.vertx.core.file;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.file.OpenOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.file.OpenOptions} original class using Vert.x codegen.
+ */
+ class OpenOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, OpenOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "append":
+ if (member.getValue() instanceof Boolean) {
+ obj.setAppend((Boolean)member.getValue());
+ }
+ break;
+ case "create":
+ if (member.getValue() instanceof Boolean) {
+ obj.setCreate((Boolean)member.getValue());
+ }
+ break;
+ case "createNew":
+ if (member.getValue() instanceof Boolean) {
+ obj.setCreateNew((Boolean)member.getValue());
+ }
+ break;
+ case "deleteOnClose":
+ if (member.getValue() instanceof Boolean) {
+ obj.setDeleteOnClose((Boolean)member.getValue());
+ }
+ break;
+ case "dsync":
+ if (member.getValue() instanceof Boolean) {
+ obj.setDsync((Boolean)member.getValue());
+ }
+ break;
+ case "perms":
+ if (member.getValue() instanceof String) {
+ obj.setPerms((String)member.getValue());
+ }
+ break;
+ case "read":
+ if (member.getValue() instanceof Boolean) {
+ obj.setRead((Boolean)member.getValue());
+ }
+ break;
+ case "sparse":
+ if (member.getValue() instanceof Boolean) {
+ obj.setSparse((Boolean)member.getValue());
+ }
+ break;
+ case "sync":
+ if (member.getValue() instanceof Boolean) {
+ obj.setSync((Boolean)member.getValue());
+ }
+ break;
+ case "truncateExisting":
+ if (member.getValue() instanceof Boolean) {
+ obj.setTruncateExisting((Boolean)member.getValue());
+ }
+ break;
+ case "write":
+ if (member.getValue() instanceof Boolean) {
+ obj.setWrite((Boolean)member.getValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(OpenOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(OpenOptions obj, java.util.Map<String, Object> json) {
+ json.put("append", obj.isAppend());
+ json.put("create", obj.isCreate());
+ json.put("createNew", obj.isCreateNew());
+ json.put("deleteOnClose", obj.isDeleteOnClose());
+ json.put("dsync", obj.isDsync());
+ if (obj.getPerms() != null) {
+ json.put("perms", obj.getPerms());
+ }
+ json.put("read", obj.isRead());
+ json.put("sparse", obj.isSparse());
+ json.put("sync", obj.isSync());
+ json.put("truncateExisting", obj.isTruncateExisting());
+ json.put("write", obj.isWrite());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/http/GoAwayConverter.java b/src/main/generated/io/vertx/core/http/GoAwayConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/http/GoAwayConverter.java
@@ -0,0 +1,47 @@
+package io.vertx.core.http;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.http.GoAway}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.http.GoAway} original class using Vert.x codegen.
+ */
+ class GoAwayConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, GoAway obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "debugData":
+ if (member.getValue() instanceof String) {
+ obj.setDebugData(io.vertx.core.buffer.Buffer.buffer(java.util.Base64.getDecoder().decode((String)member.getValue())));
+ }
+ break;
+ case "errorCode":
+ if (member.getValue() instanceof Number) {
+ obj.setErrorCode(((Number)member.getValue()).longValue());
+ }
+ break;
+ case "lastStreamId":
+ if (member.getValue() instanceof Number) {
+ obj.setLastStreamId(((Number)member.getValue()).intValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(GoAway obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(GoAway obj, java.util.Map<String, Object> json) {
+ if (obj.getDebugData() != null) {
+ json.put("debugData", java.util.Base64.getEncoder().encodeToString(obj.getDebugData().getBytes()));
+ }
+ json.put("errorCode", obj.getErrorCode());
+ json.put("lastStreamId", obj.getLastStreamId());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/http/Http2SettingsConverter.java b/src/main/generated/io/vertx/core/http/Http2SettingsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/http/Http2SettingsConverter.java
@@ -0,0 +1,63 @@
+package io.vertx.core.http;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.http.Http2Settings}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.http.Http2Settings} original class using Vert.x codegen.
+ */
+ class Http2SettingsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, Http2Settings obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "headerTableSize":
+ if (member.getValue() instanceof Number) {
+ obj.setHeaderTableSize(((Number)member.getValue()).longValue());
+ }
+ break;
+ case "initialWindowSize":
+ if (member.getValue() instanceof Number) {
+ obj.setInitialWindowSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxConcurrentStreams":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxConcurrentStreams(((Number)member.getValue()).longValue());
+ }
+ break;
+ case "maxFrameSize":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxFrameSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxHeaderListSize":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxHeaderListSize(((Number)member.getValue()).longValue());
+ }
+ break;
+ case "pushEnabled":
+ if (member.getValue() instanceof Boolean) {
+ obj.setPushEnabled((Boolean)member.getValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(Http2Settings obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(Http2Settings obj, java.util.Map<String, Object> json) {
+ json.put("headerTableSize", obj.getHeaderTableSize());
+ json.put("initialWindowSize", obj.getInitialWindowSize());
+ json.put("maxConcurrentStreams", obj.getMaxConcurrentStreams());
+ json.put("maxFrameSize", obj.getMaxFrameSize());
+ json.put("maxHeaderListSize", obj.getMaxHeaderListSize());
+ json.put("pushEnabled", obj.isPushEnabled());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/http/HttpClientOptionsConverter.java b/src/main/generated/io/vertx/core/http/HttpClientOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/http/HttpClientOptionsConverter.java
@@ -0,0 +1,292 @@
+package io.vertx.core.http;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.http.HttpClientOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.http.HttpClientOptions} original class using Vert.x codegen.
+ */
+ class HttpClientOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, HttpClientOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "alpnVersions":
+ if (member.getValue() instanceof JsonArray) {
+ java.util.ArrayList<io.vertx.core.http.HttpVersion> list = new java.util.ArrayList<>();
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ list.add(io.vertx.core.http.HttpVersion.valueOf((String)item));
+ });
+ obj.setAlpnVersions(list);
+ }
+ break;
+ case "decoderInitialBufferSize":
+ if (member.getValue() instanceof Number) {
+ obj.setDecoderInitialBufferSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "defaultHost":
+ if (member.getValue() instanceof String) {
+ obj.setDefaultHost((String)member.getValue());
+ }
+ break;
+ case "defaultPort":
+ if (member.getValue() instanceof Number) {
+ obj.setDefaultPort(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "forceSni":
+ if (member.getValue() instanceof Boolean) {
+ obj.setForceSni((Boolean)member.getValue());
+ }
+ break;
+ case "http2ClearTextUpgrade":
+ if (member.getValue() instanceof Boolean) {
+ obj.setHttp2ClearTextUpgrade((Boolean)member.getValue());
+ }
+ break;
+ case "http2ConnectionWindowSize":
+ if (member.getValue() instanceof Number) {
+ obj.setHttp2ConnectionWindowSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "http2KeepAliveTimeout":
+ if (member.getValue() instanceof Number) {
+ obj.setHttp2KeepAliveTimeout(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "http2MaxPoolSize":
+ if (member.getValue() instanceof Number) {
+ obj.setHttp2MaxPoolSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "http2MultiplexingLimit":
+ if (member.getValue() instanceof Number) {
+ obj.setHttp2MultiplexingLimit(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "initialSettings":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setInitialSettings(new io.vertx.core.http.Http2Settings((JsonObject)member.getValue()));
+ }
+ break;
+ case "keepAlive":
+ if (member.getValue() instanceof Boolean) {
+ obj.setKeepAlive((Boolean)member.getValue());
+ }
+ break;
+ case "keepAliveTimeout":
+ if (member.getValue() instanceof Number) {
+ obj.setKeepAliveTimeout(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxChunkSize":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxChunkSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxHeaderSize":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxHeaderSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxInitialLineLength":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxInitialLineLength(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxPoolSize":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxPoolSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxRedirects":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxRedirects(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxWaitQueueSize":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxWaitQueueSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxWebSocketFrameSize":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxWebSocketFrameSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxWebSocketMessageSize":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxWebSocketMessageSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxWebsocketFrameSize":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxWebsocketFrameSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxWebsocketMessageSize":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxWebsocketMessageSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "pipelining":
+ if (member.getValue() instanceof Boolean) {
+ obj.setPipelining((Boolean)member.getValue());
+ }
+ break;
+ case "pipeliningLimit":
+ if (member.getValue() instanceof Number) {
+ obj.setPipeliningLimit(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "poolCleanerPeriod":
+ if (member.getValue() instanceof Number) {
+ obj.setPoolCleanerPeriod(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "protocolVersion":
+ if (member.getValue() instanceof String) {
+ obj.setProtocolVersion(io.vertx.core.http.HttpVersion.valueOf((String)member.getValue()));
+ }
+ break;
+ case "sendUnmaskedFrames":
+ if (member.getValue() instanceof Boolean) {
+ obj.setSendUnmaskedFrames((Boolean)member.getValue());
+ }
+ break;
+ case "tryUseCompression":
+ if (member.getValue() instanceof Boolean) {
+ obj.setTryUseCompression((Boolean)member.getValue());
+ }
+ break;
+ case "tryUsePerFrameWebSocketCompression":
+ if (member.getValue() instanceof Boolean) {
+ obj.setTryUsePerFrameWebSocketCompression((Boolean)member.getValue());
+ }
+ break;
+ case "tryUsePerFrameWebsocketCompression":
+ if (member.getValue() instanceof Boolean) {
+ obj.setTryUsePerFrameWebsocketCompression((Boolean)member.getValue());
+ }
+ break;
+ case "tryUsePerMessageWebSocketCompression":
+ if (member.getValue() instanceof Boolean) {
+ obj.setTryUsePerMessageWebSocketCompression((Boolean)member.getValue());
+ }
+ break;
+ case "tryUsePerMessageWebsocketCompression":
+ if (member.getValue() instanceof Boolean) {
+ obj.setTryUsePerMessageWebsocketCompression((Boolean)member.getValue());
+ }
+ break;
+ case "tryWebSocketDeflateFrameCompression":
+ break;
+ case "tryWebsocketDeflateFrameCompression":
+ break;
+ case "verifyHost":
+ if (member.getValue() instanceof Boolean) {
+ obj.setVerifyHost((Boolean)member.getValue());
+ }
+ break;
+ case "webSocketClosingTimeout":
+ if (member.getValue() instanceof Number) {
+ obj.setWebSocketClosingTimeout(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "webSocketCompressionAllowClientNoContext":
+ if (member.getValue() instanceof Boolean) {
+ obj.setWebSocketCompressionAllowClientNoContext((Boolean)member.getValue());
+ }
+ break;
+ case "webSocketCompressionLevel":
+ if (member.getValue() instanceof Number) {
+ obj.setWebSocketCompressionLevel(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "webSocketCompressionRequestServerNoContext":
+ if (member.getValue() instanceof Boolean) {
+ obj.setWebSocketCompressionRequestServerNoContext((Boolean)member.getValue());
+ }
+ break;
+ case "websocketCompressionAllowClientNoContext":
+ if (member.getValue() instanceof Boolean) {
+ obj.setWebsocketCompressionAllowClientNoContext((Boolean)member.getValue());
+ }
+ break;
+ case "websocketCompressionLevel":
+ if (member.getValue() instanceof Number) {
+ obj.setWebsocketCompressionLevel(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "websocketCompressionRequestServerNoContext":
+ if (member.getValue() instanceof Boolean) {
+ obj.setWebsocketCompressionRequestServerNoContext((Boolean)member.getValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(HttpClientOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(HttpClientOptions obj, java.util.Map<String, Object> json) {
+ if (obj.getAlpnVersions() != null) {
+ JsonArray array = new JsonArray();
+ obj.getAlpnVersions().forEach(item -> array.add(item.name()));
+ json.put("alpnVersions", array);
+ }
+ json.put("decoderInitialBufferSize", obj.getDecoderInitialBufferSize());
+ if (obj.getDefaultHost() != null) {
+ json.put("defaultHost", obj.getDefaultHost());
+ }
+ json.put("defaultPort", obj.getDefaultPort());
+ json.put("forceSni", obj.isForceSni());
+ json.put("http2ClearTextUpgrade", obj.isHttp2ClearTextUpgrade());
+ json.put("http2ConnectionWindowSize", obj.getHttp2ConnectionWindowSize());
+ json.put("http2KeepAliveTimeout", obj.getHttp2KeepAliveTimeout());
+ json.put("http2MaxPoolSize", obj.getHttp2MaxPoolSize());
+ json.put("http2MultiplexingLimit", obj.getHttp2MultiplexingLimit());
+ if (obj.getInitialSettings() != null) {
+ json.put("initialSettings", obj.getInitialSettings().toJson());
+ }
+ json.put("keepAlive", obj.isKeepAlive());
+ json.put("keepAliveTimeout", obj.getKeepAliveTimeout());
+ json.put("maxChunkSize", obj.getMaxChunkSize());
+ json.put("maxHeaderSize", obj.getMaxHeaderSize());
+ json.put("maxInitialLineLength", obj.getMaxInitialLineLength());
+ json.put("maxPoolSize", obj.getMaxPoolSize());
+ json.put("maxRedirects", obj.getMaxRedirects());
+ json.put("maxWaitQueueSize", obj.getMaxWaitQueueSize());
+ json.put("maxWebSocketFrameSize", obj.getMaxWebSocketFrameSize());
+ json.put("maxWebSocketMessageSize", obj.getMaxWebSocketMessageSize());
+ json.put("maxWebsocketFrameSize", obj.getMaxWebsocketFrameSize());
+ json.put("maxWebsocketMessageSize", obj.getMaxWebsocketMessageSize());
+ json.put("pipelining", obj.isPipelining());
+ json.put("pipeliningLimit", obj.getPipeliningLimit());
+ json.put("poolCleanerPeriod", obj.getPoolCleanerPeriod());
+ if (obj.getProtocolVersion() != null) {
+ json.put("protocolVersion", obj.getProtocolVersion().name());
+ }
+ json.put("sendUnmaskedFrames", obj.isSendUnmaskedFrames());
+ json.put("tryUseCompression", obj.isTryUseCompression());
+ json.put("tryUsePerMessageWebSocketCompression", obj.getTryUsePerMessageWebSocketCompression());
+ json.put("tryUsePerMessageWebsocketCompression", obj.getTryUsePerMessageWebsocketCompression());
+ json.put("tryWebSocketDeflateFrameCompression", obj.getTryWebSocketDeflateFrameCompression());
+ json.put("tryWebsocketDeflateFrameCompression", obj.getTryWebsocketDeflateFrameCompression());
+ json.put("verifyHost", obj.isVerifyHost());
+ json.put("webSocketClosingTimeout", obj.getWebSocketClosingTimeout());
+ json.put("webSocketCompressionAllowClientNoContext", obj.getWebSocketCompressionAllowClientNoContext());
+ json.put("webSocketCompressionLevel", obj.getWebSocketCompressionLevel());
+ json.put("webSocketCompressionRequestServerNoContext", obj.getWebSocketCompressionRequestServerNoContext());
+ json.put("websocketCompressionAllowClientNoContext", obj.getWebsocketCompressionAllowClientNoContext());
+ json.put("websocketCompressionLevel", obj.getWebsocketCompressionLevel());
+ json.put("websocketCompressionRequestServerNoContext", obj.getWebsocketCompressionRequestServerNoContext());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/http/HttpServerOptionsConverter.java b/src/main/generated/io/vertx/core/http/HttpServerOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/http/HttpServerOptionsConverter.java
@@ -0,0 +1,223 @@
+package io.vertx.core.http;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.http.HttpServerOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.http.HttpServerOptions} original class using Vert.x codegen.
+ */
+ class HttpServerOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, HttpServerOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "acceptUnmaskedFrames":
+ if (member.getValue() instanceof Boolean) {
+ obj.setAcceptUnmaskedFrames((Boolean)member.getValue());
+ }
+ break;
+ case "alpnVersions":
+ if (member.getValue() instanceof JsonArray) {
+ java.util.ArrayList<io.vertx.core.http.HttpVersion> list = new java.util.ArrayList<>();
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ list.add(io.vertx.core.http.HttpVersion.valueOf((String)item));
+ });
+ obj.setAlpnVersions(list);
+ }
+ break;
+ case "compressionLevel":
+ if (member.getValue() instanceof Number) {
+ obj.setCompressionLevel(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "compressionSupported":
+ if (member.getValue() instanceof Boolean) {
+ obj.setCompressionSupported((Boolean)member.getValue());
+ }
+ break;
+ case "decoderInitialBufferSize":
+ if (member.getValue() instanceof Number) {
+ obj.setDecoderInitialBufferSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "decompressionSupported":
+ if (member.getValue() instanceof Boolean) {
+ obj.setDecompressionSupported((Boolean)member.getValue());
+ }
+ break;
+ case "handle100ContinueAutomatically":
+ if (member.getValue() instanceof Boolean) {
+ obj.setHandle100ContinueAutomatically((Boolean)member.getValue());
+ }
+ break;
+ case "http2ConnectionWindowSize":
+ if (member.getValue() instanceof Number) {
+ obj.setHttp2ConnectionWindowSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "initialSettings":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setInitialSettings(new io.vertx.core.http.Http2Settings((JsonObject)member.getValue()));
+ }
+ break;
+ case "maxChunkSize":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxChunkSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxHeaderSize":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxHeaderSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxInitialLineLength":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxInitialLineLength(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxWebSocketFrameSize":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxWebSocketFrameSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxWebSocketMessageSize":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxWebSocketMessageSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxWebsocketFrameSize":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxWebsocketFrameSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "maxWebsocketMessageSize":
+ if (member.getValue() instanceof Number) {
+ obj.setMaxWebsocketMessageSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "perFrameWebSocketCompressionSupported":
+ if (member.getValue() instanceof Boolean) {
+ obj.setPerFrameWebSocketCompressionSupported((Boolean)member.getValue());
+ }
+ break;
+ case "perFrameWebsocketCompressionSupported":
+ if (member.getValue() instanceof Boolean) {
+ obj.setPerFrameWebsocketCompressionSupported((Boolean)member.getValue());
+ }
+ break;
+ case "perMessageWebSocketCompressionSupported":
+ if (member.getValue() instanceof Boolean) {
+ obj.setPerMessageWebSocketCompressionSupported((Boolean)member.getValue());
+ }
+ break;
+ case "perMessageWebsocketCompressionSupported":
+ if (member.getValue() instanceof Boolean) {
+ obj.setPerMessageWebsocketCompressionSupported((Boolean)member.getValue());
+ }
+ break;
+ case "webSocketAllowServerNoContext":
+ if (member.getValue() instanceof Boolean) {
+ obj.setWebSocketAllowServerNoContext((Boolean)member.getValue());
+ }
+ break;
+ case "webSocketClosingTimeout":
+ if (member.getValue() instanceof Number) {
+ obj.setWebSocketClosingTimeout(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "webSocketCompressionLevel":
+ if (member.getValue() instanceof Number) {
+ obj.setWebSocketCompressionLevel(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "webSocketPreferredClientNoContext":
+ if (member.getValue() instanceof Boolean) {
+ obj.setWebSocketPreferredClientNoContext((Boolean)member.getValue());
+ }
+ break;
+ case "webSocketSubProtocols":
+ if (member.getValue() instanceof JsonArray) {
+ java.util.ArrayList<java.lang.String> list = new java.util.ArrayList<>();
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ list.add((String)item);
+ });
+ obj.setWebSocketSubProtocols(list);
+ }
+ break;
+ case "websocketAllowServerNoContext":
+ if (member.getValue() instanceof Boolean) {
+ obj.setWebsocketAllowServerNoContext((Boolean)member.getValue());
+ }
+ break;
+ case "websocketCompressionLevel":
+ if (member.getValue() instanceof Number) {
+ obj.setWebsocketCompressionLevel(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "websocketPreferredClientNoContext":
+ if (member.getValue() instanceof Boolean) {
+ obj.setWebsocketPreferredClientNoContext((Boolean)member.getValue());
+ }
+ break;
+ case "websocketSubProtocols":
+ if (member.getValue() instanceof String) {
+ obj.setWebsocketSubProtocols((String)member.getValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(HttpServerOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(HttpServerOptions obj, java.util.Map<String, Object> json) {
+ json.put("acceptUnmaskedFrames", obj.isAcceptUnmaskedFrames());
+ if (obj.getAlpnVersions() != null) {
+ JsonArray array = new JsonArray();
+ obj.getAlpnVersions().forEach(item -> array.add(item.name()));
+ json.put("alpnVersions", array);
+ }
+ json.put("compressionLevel", obj.getCompressionLevel());
+ json.put("compressionSupported", obj.isCompressionSupported());
+ json.put("decoderInitialBufferSize", obj.getDecoderInitialBufferSize());
+ json.put("decompressionSupported", obj.isDecompressionSupported());
+ json.put("handle100ContinueAutomatically", obj.isHandle100ContinueAutomatically());
+ json.put("http2ConnectionWindowSize", obj.getHttp2ConnectionWindowSize());
+ if (obj.getInitialSettings() != null) {
+ json.put("initialSettings", obj.getInitialSettings().toJson());
+ }
+ json.put("maxChunkSize", obj.getMaxChunkSize());
+ json.put("maxHeaderSize", obj.getMaxHeaderSize());
+ json.put("maxInitialLineLength", obj.getMaxInitialLineLength());
+ json.put("maxWebSocketFrameSize", obj.getMaxWebSocketFrameSize());
+ json.put("maxWebSocketMessageSize", obj.getMaxWebSocketMessageSize());
+ json.put("maxWebsocketFrameSize", obj.getMaxWebsocketFrameSize());
+ json.put("maxWebsocketMessageSize", obj.getMaxWebsocketMessageSize());
+ json.put("perFrameWebSocketCompressionSupported", obj.getPerFrameWebSocketCompressionSupported());
+ json.put("perFrameWebsocketCompressionSupported", obj.getPerFrameWebsocketCompressionSupported());
+ json.put("perMessageWebSocketCompressionSupported", obj.getPerMessageWebSocketCompressionSupported());
+ json.put("perMessageWebsocketCompressionSupported", obj.getPerMessageWebsocketCompressionSupported());
+ json.put("webSocketAllowServerNoContext", obj.getWebSocketAllowServerNoContext());
+ json.put("webSocketClosingTimeout", obj.getWebSocketClosingTimeout());
+ json.put("webSocketCompressionLevel", obj.getWebSocketCompressionLevel());
+ json.put("webSocketPreferredClientNoContext", obj.getWebSocketPreferredClientNoContext());
+ if (obj.getWebSocketSubProtocols() != null) {
+ JsonArray array = new JsonArray();
+ obj.getWebSocketSubProtocols().forEach(item -> array.add(item));
+ json.put("webSocketSubProtocols", array);
+ }
+ json.put("websocketAllowServerNoContext", obj.getWebsocketAllowServerNoContext());
+ json.put("websocketCompressionLevel", obj.getWebsocketCompressionLevel());
+ json.put("websocketPreferredClientNoContext", obj.getWebsocketPreferredClientNoContext());
+ if (obj.getWebsocketSubProtocols() != null) {
+ json.put("websocketSubProtocols", obj.getWebsocketSubProtocols());
+ }
+ }
+}
diff --git a/src/main/generated/io/vertx/core/http/RequestOptionsConverter.java b/src/main/generated/io/vertx/core/http/RequestOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/http/RequestOptionsConverter.java
@@ -0,0 +1,65 @@
+package io.vertx.core.http;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.http.RequestOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.http.RequestOptions} original class using Vert.x codegen.
+ */
+public class RequestOptionsConverter {
+
+ public static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, RequestOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "headers":
+ if (member.getValue() instanceof JsonObject) {
+ ((Iterable<java.util.Map.Entry<String, Object>>)member.getValue()).forEach(entry -> {
+ if (entry.getValue() instanceof String)
+ obj.addHeader(entry.getKey(), (String)entry.getValue());
+ });
+ }
+ break;
+ case "host":
+ if (member.getValue() instanceof String) {
+ obj.setHost((String)member.getValue());
+ }
+ break;
+ case "port":
+ if (member.getValue() instanceof Number) {
+ obj.setPort(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "ssl":
+ if (member.getValue() instanceof Boolean) {
+ obj.setSsl((Boolean)member.getValue());
+ }
+ break;
+ case "uri":
+ if (member.getValue() instanceof String) {
+ obj.setURI((String)member.getValue());
+ }
+ break;
+ }
+ }
+ }
+
+ public static void toJson(RequestOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ public static void toJson(RequestOptions obj, java.util.Map<String, Object> json) {
+ if (obj.getHost() != null) {
+ json.put("host", obj.getHost());
+ }
+ json.put("port", obj.getPort());
+ if (obj.isSsl() != null) {
+ json.put("ssl", obj.isSsl());
+ }
+ if (obj.getURI() != null) {
+ json.put("uri", obj.getURI());
+ }
+ }
+}
diff --git a/src/main/generated/io/vertx/core/http/WebSocketConnectOptionsConverter.java b/src/main/generated/io/vertx/core/http/WebSocketConnectOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/http/WebSocketConnectOptionsConverter.java
@@ -0,0 +1,50 @@
+package io.vertx.core.http;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.http.WebSocketConnectOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.http.WebSocketConnectOptions} original class using Vert.x codegen.
+ */
+public class WebSocketConnectOptionsConverter {
+
+ public static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, WebSocketConnectOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "subProtocols":
+ if (member.getValue() instanceof JsonArray) {
+ java.util.ArrayList<java.lang.String> list = new java.util.ArrayList<>();
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ list.add((String)item);
+ });
+ obj.setSubProtocols(list);
+ }
+ break;
+ case "version":
+ if (member.getValue() instanceof String) {
+ obj.setVersion(io.vertx.core.http.WebsocketVersion.valueOf((String)member.getValue()));
+ }
+ break;
+ }
+ }
+ }
+
+ public static void toJson(WebSocketConnectOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ public static void toJson(WebSocketConnectOptions obj, java.util.Map<String, Object> json) {
+ if (obj.getSubProtocols() != null) {
+ JsonArray array = new JsonArray();
+ obj.getSubProtocols().forEach(item -> array.add(item));
+ json.put("subProtocols", array);
+ }
+ if (obj.getVersion() != null) {
+ json.put("version", obj.getVersion().name());
+ }
+ }
+}
diff --git a/src/main/generated/io/vertx/core/metrics/MetricsOptionsConverter.java b/src/main/generated/io/vertx/core/metrics/MetricsOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/metrics/MetricsOptionsConverter.java
@@ -0,0 +1,33 @@
+package io.vertx.core.metrics;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.metrics.MetricsOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.metrics.MetricsOptions} original class using Vert.x codegen.
+ */
+ class MetricsOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, MetricsOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "enabled":
+ if (member.getValue() instanceof Boolean) {
+ obj.setEnabled((Boolean)member.getValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(MetricsOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(MetricsOptions obj, java.util.Map<String, Object> json) {
+ json.put("enabled", obj.isEnabled());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/net/ClientOptionsBaseConverter.java b/src/main/generated/io/vertx/core/net/ClientOptionsBaseConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/net/ClientOptionsBaseConverter.java
@@ -0,0 +1,63 @@
+package io.vertx.core.net;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.net.ClientOptionsBase}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.net.ClientOptionsBase} original class using Vert.x codegen.
+ */
+ class ClientOptionsBaseConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, ClientOptionsBase obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "connectTimeout":
+ if (member.getValue() instanceof Number) {
+ obj.setConnectTimeout(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "localAddress":
+ if (member.getValue() instanceof String) {
+ obj.setLocalAddress((String)member.getValue());
+ }
+ break;
+ case "metricsName":
+ if (member.getValue() instanceof String) {
+ obj.setMetricsName((String)member.getValue());
+ }
+ break;
+ case "proxyOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setProxyOptions(new io.vertx.core.net.ProxyOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "trustAll":
+ if (member.getValue() instanceof Boolean) {
+ obj.setTrustAll((Boolean)member.getValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(ClientOptionsBase obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(ClientOptionsBase obj, java.util.Map<String, Object> json) {
+ json.put("connectTimeout", obj.getConnectTimeout());
+ if (obj.getLocalAddress() != null) {
+ json.put("localAddress", obj.getLocalAddress());
+ }
+ if (obj.getMetricsName() != null) {
+ json.put("metricsName", obj.getMetricsName());
+ }
+ if (obj.getProxyOptions() != null) {
+ json.put("proxyOptions", obj.getProxyOptions().toJson());
+ }
+ json.put("trustAll", obj.isTrustAll());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/net/JksOptionsConverter.java b/src/main/generated/io/vertx/core/net/JksOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/net/JksOptionsConverter.java
@@ -0,0 +1,51 @@
+package io.vertx.core.net;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.net.JksOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.net.JksOptions} original class using Vert.x codegen.
+ */
+public class JksOptionsConverter {
+
+ public static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, JksOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "password":
+ if (member.getValue() instanceof String) {
+ obj.setPassword((String)member.getValue());
+ }
+ break;
+ case "path":
+ if (member.getValue() instanceof String) {
+ obj.setPath((String)member.getValue());
+ }
+ break;
+ case "value":
+ if (member.getValue() instanceof String) {
+ obj.setValue(io.vertx.core.buffer.Buffer.buffer(java.util.Base64.getDecoder().decode((String)member.getValue())));
+ }
+ break;
+ }
+ }
+ }
+
+ public static void toJson(JksOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ public static void toJson(JksOptions obj, java.util.Map<String, Object> json) {
+ if (obj.getPassword() != null) {
+ json.put("password", obj.getPassword());
+ }
+ if (obj.getPath() != null) {
+ json.put("path", obj.getPath());
+ }
+ if (obj.getValue() != null) {
+ json.put("value", java.util.Base64.getEncoder().encodeToString(obj.getValue().getBytes()));
+ }
+ }
+}
diff --git a/src/main/generated/io/vertx/core/net/KeyStoreOptionsConverter.java b/src/main/generated/io/vertx/core/net/KeyStoreOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/net/KeyStoreOptionsConverter.java
@@ -0,0 +1,67 @@
+package io.vertx.core.net;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.net.KeyStoreOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.net.KeyStoreOptions} original class using Vert.x codegen.
+ */
+public class KeyStoreOptionsConverter {
+
+ public static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, KeyStoreOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "password":
+ if (member.getValue() instanceof String) {
+ obj.setPassword((String)member.getValue());
+ }
+ break;
+ case "path":
+ if (member.getValue() instanceof String) {
+ obj.setPath((String)member.getValue());
+ }
+ break;
+ case "provider":
+ if (member.getValue() instanceof String) {
+ obj.setProvider((String)member.getValue());
+ }
+ break;
+ case "type":
+ if (member.getValue() instanceof String) {
+ obj.setType((String)member.getValue());
+ }
+ break;
+ case "value":
+ if (member.getValue() instanceof String) {
+ obj.setValue(io.vertx.core.buffer.Buffer.buffer(java.util.Base64.getDecoder().decode((String)member.getValue())));
+ }
+ break;
+ }
+ }
+ }
+
+ public static void toJson(KeyStoreOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ public static void toJson(KeyStoreOptions obj, java.util.Map<String, Object> json) {
+ if (obj.getPassword() != null) {
+ json.put("password", obj.getPassword());
+ }
+ if (obj.getPath() != null) {
+ json.put("path", obj.getPath());
+ }
+ if (obj.getProvider() != null) {
+ json.put("provider", obj.getProvider());
+ }
+ if (obj.getType() != null) {
+ json.put("type", obj.getType());
+ }
+ if (obj.getValue() != null) {
+ json.put("value", java.util.Base64.getEncoder().encodeToString(obj.getValue().getBytes()));
+ }
+ }
+}
diff --git a/src/main/generated/io/vertx/core/net/NetClientOptionsConverter.java b/src/main/generated/io/vertx/core/net/NetClientOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/net/NetClientOptionsConverter.java
@@ -0,0 +1,47 @@
+package io.vertx.core.net;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.net.NetClientOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.net.NetClientOptions} original class using Vert.x codegen.
+ */
+ class NetClientOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, NetClientOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "hostnameVerificationAlgorithm":
+ if (member.getValue() instanceof String) {
+ obj.setHostnameVerificationAlgorithm((String)member.getValue());
+ }
+ break;
+ case "reconnectAttempts":
+ if (member.getValue() instanceof Number) {
+ obj.setReconnectAttempts(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "reconnectInterval":
+ if (member.getValue() instanceof Number) {
+ obj.setReconnectInterval(((Number)member.getValue()).longValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(NetClientOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(NetClientOptions obj, java.util.Map<String, Object> json) {
+ if (obj.getHostnameVerificationAlgorithm() != null) {
+ json.put("hostnameVerificationAlgorithm", obj.getHostnameVerificationAlgorithm());
+ }
+ json.put("reconnectAttempts", obj.getReconnectAttempts());
+ json.put("reconnectInterval", obj.getReconnectInterval());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/net/NetServerOptionsConverter.java b/src/main/generated/io/vertx/core/net/NetServerOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/net/NetServerOptionsConverter.java
@@ -0,0 +1,67 @@
+package io.vertx.core.net;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.net.NetServerOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.net.NetServerOptions} original class using Vert.x codegen.
+ */
+ class NetServerOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, NetServerOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "acceptBacklog":
+ if (member.getValue() instanceof Number) {
+ obj.setAcceptBacklog(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "clientAuth":
+ if (member.getValue() instanceof String) {
+ obj.setClientAuth(io.vertx.core.http.ClientAuth.valueOf((String)member.getValue()));
+ }
+ break;
+ case "clientAuthRequired":
+ if (member.getValue() instanceof Boolean) {
+ obj.setClientAuthRequired((Boolean)member.getValue());
+ }
+ break;
+ case "host":
+ if (member.getValue() instanceof String) {
+ obj.setHost((String)member.getValue());
+ }
+ break;
+ case "port":
+ if (member.getValue() instanceof Number) {
+ obj.setPort(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "sni":
+ if (member.getValue() instanceof Boolean) {
+ obj.setSni((Boolean)member.getValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(NetServerOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(NetServerOptions obj, java.util.Map<String, Object> json) {
+ json.put("acceptBacklog", obj.getAcceptBacklog());
+ if (obj.getClientAuth() != null) {
+ json.put("clientAuth", obj.getClientAuth().name());
+ }
+ json.put("clientAuthRequired", obj.isClientAuthRequired());
+ if (obj.getHost() != null) {
+ json.put("host", obj.getHost());
+ }
+ json.put("port", obj.getPort());
+ json.put("sni", obj.isSni());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/net/NetworkOptionsConverter.java b/src/main/generated/io/vertx/core/net/NetworkOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/net/NetworkOptionsConverter.java
@@ -0,0 +1,63 @@
+package io.vertx.core.net;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.net.NetworkOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.net.NetworkOptions} original class using Vert.x codegen.
+ */
+ class NetworkOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, NetworkOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "logActivity":
+ if (member.getValue() instanceof Boolean) {
+ obj.setLogActivity((Boolean)member.getValue());
+ }
+ break;
+ case "receiveBufferSize":
+ if (member.getValue() instanceof Number) {
+ obj.setReceiveBufferSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "reuseAddress":
+ if (member.getValue() instanceof Boolean) {
+ obj.setReuseAddress((Boolean)member.getValue());
+ }
+ break;
+ case "reusePort":
+ if (member.getValue() instanceof Boolean) {
+ obj.setReusePort((Boolean)member.getValue());
+ }
+ break;
+ case "sendBufferSize":
+ if (member.getValue() instanceof Number) {
+ obj.setSendBufferSize(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "trafficClass":
+ if (member.getValue() instanceof Number) {
+ obj.setTrafficClass(((Number)member.getValue()).intValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(NetworkOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(NetworkOptions obj, java.util.Map<String, Object> json) {
+ json.put("logActivity", obj.getLogActivity());
+ json.put("receiveBufferSize", obj.getReceiveBufferSize());
+ json.put("reuseAddress", obj.isReuseAddress());
+ json.put("reusePort", obj.isReusePort());
+ json.put("sendBufferSize", obj.getSendBufferSize());
+ json.put("trafficClass", obj.getTrafficClass());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/net/OpenSSLEngineOptionsConverter.java b/src/main/generated/io/vertx/core/net/OpenSSLEngineOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/net/OpenSSLEngineOptionsConverter.java
@@ -0,0 +1,33 @@
+package io.vertx.core.net;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.net.OpenSSLEngineOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.net.OpenSSLEngineOptions} original class using Vert.x codegen.
+ */
+ class OpenSSLEngineOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, OpenSSLEngineOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "sessionCacheEnabled":
+ if (member.getValue() instanceof Boolean) {
+ obj.setSessionCacheEnabled((Boolean)member.getValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(OpenSSLEngineOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(OpenSSLEngineOptions obj, java.util.Map<String, Object> json) {
+ json.put("sessionCacheEnabled", obj.isSessionCacheEnabled());
+ }
+}
diff --git a/src/main/generated/io/vertx/core/net/PemKeyCertOptionsConverter.java b/src/main/generated/io/vertx/core/net/PemKeyCertOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/net/PemKeyCertOptionsConverter.java
@@ -0,0 +1,107 @@
+package io.vertx.core.net;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.net.PemKeyCertOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.net.PemKeyCertOptions} original class using Vert.x codegen.
+ */
+ class PemKeyCertOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, PemKeyCertOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "certPath":
+ if (member.getValue() instanceof String) {
+ obj.setCertPath((String)member.getValue());
+ }
+ break;
+ case "certPaths":
+ if (member.getValue() instanceof JsonArray) {
+ java.util.ArrayList<java.lang.String> list = new java.util.ArrayList<>();
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ list.add((String)item);
+ });
+ obj.setCertPaths(list);
+ }
+ break;
+ case "certValue":
+ if (member.getValue() instanceof String) {
+ obj.setCertValue(io.vertx.core.buffer.Buffer.buffer(java.util.Base64.getDecoder().decode((String)member.getValue())));
+ }
+ break;
+ case "certValues":
+ if (member.getValue() instanceof JsonArray) {
+ java.util.ArrayList<io.vertx.core.buffer.Buffer> list = new java.util.ArrayList<>();
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ list.add(io.vertx.core.buffer.Buffer.buffer(java.util.Base64.getDecoder().decode((String)item)));
+ });
+ obj.setCertValues(list);
+ }
+ break;
+ case "keyPath":
+ if (member.getValue() instanceof String) {
+ obj.setKeyPath((String)member.getValue());
+ }
+ break;
+ case "keyPaths":
+ if (member.getValue() instanceof JsonArray) {
+ java.util.ArrayList<java.lang.String> list = new java.util.ArrayList<>();
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ list.add((String)item);
+ });
+ obj.setKeyPaths(list);
+ }
+ break;
+ case "keyValue":
+ if (member.getValue() instanceof String) {
+ obj.setKeyValue(io.vertx.core.buffer.Buffer.buffer(java.util.Base64.getDecoder().decode((String)member.getValue())));
+ }
+ break;
+ case "keyValues":
+ if (member.getValue() instanceof JsonArray) {
+ java.util.ArrayList<io.vertx.core.buffer.Buffer> list = new java.util.ArrayList<>();
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ list.add(io.vertx.core.buffer.Buffer.buffer(java.util.Base64.getDecoder().decode((String)item)));
+ });
+ obj.setKeyValues(list);
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(PemKeyCertOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(PemKeyCertOptions obj, java.util.Map<String, Object> json) {
+ if (obj.getCertPaths() != null) {
+ JsonArray array = new JsonArray();
+ obj.getCertPaths().forEach(item -> array.add(item));
+ json.put("certPaths", array);
+ }
+ if (obj.getCertValues() != null) {
+ JsonArray array = new JsonArray();
+ obj.getCertValues().forEach(item -> array.add(java.util.Base64.getEncoder().encodeToString(item.getBytes())));
+ json.put("certValues", array);
+ }
+ if (obj.getKeyPaths() != null) {
+ JsonArray array = new JsonArray();
+ obj.getKeyPaths().forEach(item -> array.add(item));
+ json.put("keyPaths", array);
+ }
+ if (obj.getKeyValues() != null) {
+ JsonArray array = new JsonArray();
+ obj.getKeyValues().forEach(item -> array.add(java.util.Base64.getEncoder().encodeToString(item.getBytes())));
+ json.put("keyValues", array);
+ }
+ }
+}
diff --git a/src/main/generated/io/vertx/core/net/PemTrustOptionsConverter.java b/src/main/generated/io/vertx/core/net/PemTrustOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/net/PemTrustOptionsConverter.java
@@ -0,0 +1,53 @@
+package io.vertx.core.net;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.net.PemTrustOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.net.PemTrustOptions} original class using Vert.x codegen.
+ */
+ class PemTrustOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, PemTrustOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "certPaths":
+ if (member.getValue() instanceof JsonArray) {
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ obj.addCertPath((String)item);
+ });
+ }
+ break;
+ case "certValues":
+ if (member.getValue() instanceof JsonArray) {
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ obj.addCertValue(io.vertx.core.buffer.Buffer.buffer(java.util.Base64.getDecoder().decode((String)item)));
+ });
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(PemTrustOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(PemTrustOptions obj, java.util.Map<String, Object> json) {
+ if (obj.getCertPaths() != null) {
+ JsonArray array = new JsonArray();
+ obj.getCertPaths().forEach(item -> array.add(item));
+ json.put("certPaths", array);
+ }
+ if (obj.getCertValues() != null) {
+ JsonArray array = new JsonArray();
+ obj.getCertValues().forEach(item -> array.add(java.util.Base64.getEncoder().encodeToString(item.getBytes())));
+ json.put("certValues", array);
+ }
+ }
+}
diff --git a/src/main/generated/io/vertx/core/net/PfxOptionsConverter.java b/src/main/generated/io/vertx/core/net/PfxOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/net/PfxOptionsConverter.java
@@ -0,0 +1,51 @@
+package io.vertx.core.net;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.net.PfxOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.net.PfxOptions} original class using Vert.x codegen.
+ */
+ class PfxOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, PfxOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "password":
+ if (member.getValue() instanceof String) {
+ obj.setPassword((String)member.getValue());
+ }
+ break;
+ case "path":
+ if (member.getValue() instanceof String) {
+ obj.setPath((String)member.getValue());
+ }
+ break;
+ case "value":
+ if (member.getValue() instanceof String) {
+ obj.setValue(io.vertx.core.buffer.Buffer.buffer(java.util.Base64.getDecoder().decode((String)member.getValue())));
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(PfxOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(PfxOptions obj, java.util.Map<String, Object> json) {
+ if (obj.getPassword() != null) {
+ json.put("password", obj.getPassword());
+ }
+ if (obj.getPath() != null) {
+ json.put("path", obj.getPath());
+ }
+ if (obj.getValue() != null) {
+ json.put("value", java.util.Base64.getEncoder().encodeToString(obj.getValue().getBytes()));
+ }
+ }
+}
diff --git a/src/main/generated/io/vertx/core/net/ProxyOptionsConverter.java b/src/main/generated/io/vertx/core/net/ProxyOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/net/ProxyOptionsConverter.java
@@ -0,0 +1,65 @@
+package io.vertx.core.net;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.net.ProxyOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.net.ProxyOptions} original class using Vert.x codegen.
+ */
+ class ProxyOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, ProxyOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "host":
+ if (member.getValue() instanceof String) {
+ obj.setHost((String)member.getValue());
+ }
+ break;
+ case "password":
+ if (member.getValue() instanceof String) {
+ obj.setPassword((String)member.getValue());
+ }
+ break;
+ case "port":
+ if (member.getValue() instanceof Number) {
+ obj.setPort(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "type":
+ if (member.getValue() instanceof String) {
+ obj.setType(io.vertx.core.net.ProxyType.valueOf((String)member.getValue()));
+ }
+ break;
+ case "username":
+ if (member.getValue() instanceof String) {
+ obj.setUsername((String)member.getValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(ProxyOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(ProxyOptions obj, java.util.Map<String, Object> json) {
+ if (obj.getHost() != null) {
+ json.put("host", obj.getHost());
+ }
+ if (obj.getPassword() != null) {
+ json.put("password", obj.getPassword());
+ }
+ json.put("port", obj.getPort());
+ if (obj.getType() != null) {
+ json.put("type", obj.getType().name());
+ }
+ if (obj.getUsername() != null) {
+ json.put("username", obj.getUsername());
+ }
+ }
+}
diff --git a/src/main/generated/io/vertx/core/net/TCPSSLOptionsConverter.java b/src/main/generated/io/vertx/core/net/TCPSSLOptionsConverter.java
new file mode 100644
--- /dev/null
+++ b/src/main/generated/io/vertx/core/net/TCPSSLOptionsConverter.java
@@ -0,0 +1,227 @@
+package io.vertx.core.net;
+
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.JsonArray;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+
+/**
+ * Converter for {@link io.vertx.core.net.TCPSSLOptions}.
+ * NOTE: This class has been automatically generated from the {@link io.vertx.core.net.TCPSSLOptions} original class using Vert.x codegen.
+ */
+ class TCPSSLOptionsConverter {
+
+ static void fromJson(Iterable<java.util.Map.Entry<String, Object>> json, TCPSSLOptions obj) {
+ for (java.util.Map.Entry<String, Object> member : json) {
+ switch (member.getKey()) {
+ case "crlPaths":
+ if (member.getValue() instanceof JsonArray) {
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ obj.addCrlPath((String)item);
+ });
+ }
+ break;
+ case "crlValues":
+ if (member.getValue() instanceof JsonArray) {
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ obj.addCrlValue(io.vertx.core.buffer.Buffer.buffer(java.util.Base64.getDecoder().decode((String)item)));
+ });
+ }
+ break;
+ case "enabledCipherSuites":
+ if (member.getValue() instanceof JsonArray) {
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ obj.addEnabledCipherSuite((String)item);
+ });
+ }
+ break;
+ case "enabledSecureTransportProtocols":
+ if (member.getValue() instanceof JsonArray) {
+ java.util.LinkedHashSet<java.lang.String> list = new java.util.LinkedHashSet<>();
+ ((Iterable<Object>)member.getValue()).forEach( item -> {
+ if (item instanceof String)
+ list.add((String)item);
+ });
+ obj.setEnabledSecureTransportProtocols(list);
+ }
+ break;
+ case "idleTimeout":
+ if (member.getValue() instanceof Number) {
+ obj.setIdleTimeout(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "idleTimeoutUnit":
+ if (member.getValue() instanceof String) {
+ obj.setIdleTimeoutUnit(java.util.concurrent.TimeUnit.valueOf((String)member.getValue()));
+ }
+ break;
+ case "jdkSslEngineOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setJdkSslEngineOptions(new io.vertx.core.net.JdkSSLEngineOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "keyStoreOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setKeyStoreOptions(new io.vertx.core.net.JksOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "openSslEngineOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setOpenSslEngineOptions(new io.vertx.core.net.OpenSSLEngineOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "pemKeyCertOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setPemKeyCertOptions(new io.vertx.core.net.PemKeyCertOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "pemTrustOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setPemTrustOptions(new io.vertx.core.net.PemTrustOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "pfxKeyCertOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setPfxKeyCertOptions(new io.vertx.core.net.PfxOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "pfxTrustOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setPfxTrustOptions(new io.vertx.core.net.PfxOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "soLinger":
+ if (member.getValue() instanceof Number) {
+ obj.setSoLinger(((Number)member.getValue()).intValue());
+ }
+ break;
+ case "ssl":
+ if (member.getValue() instanceof Boolean) {
+ obj.setSsl((Boolean)member.getValue());
+ }
+ break;
+ case "sslHandshakeTimeout":
+ if (member.getValue() instanceof Number) {
+ obj.setSslHandshakeTimeout(((Number)member.getValue()).longValue());
+ }
+ break;
+ case "sslHandshakeTimeoutUnit":
+ if (member.getValue() instanceof String) {
+ obj.setSslHandshakeTimeoutUnit(java.util.concurrent.TimeUnit.valueOf((String)member.getValue()));
+ }
+ break;
+ case "tcpCork":
+ if (member.getValue() instanceof Boolean) {
+ obj.setTcpCork((Boolean)member.getValue());
+ }
+ break;
+ case "tcpFastOpen":
+ if (member.getValue() instanceof Boolean) {
+ obj.setTcpFastOpen((Boolean)member.getValue());
+ }
+ break;
+ case "tcpKeepAlive":
+ if (member.getValue() instanceof Boolean) {
+ obj.setTcpKeepAlive((Boolean)member.getValue());
+ }
+ break;
+ case "tcpNoDelay":
+ if (member.getValue() instanceof Boolean) {
+ obj.setTcpNoDelay((Boolean)member.getValue());
+ }
+ break;
+ case "tcpQuickAck":
+ if (member.getValue() instanceof Boolean) {
+ obj.setTcpQuickAck((Boolean)member.getValue());
+ }
+ break;
+ case "trustStoreOptions":
+ if (member.getValue() instanceof JsonObject) {
+ obj.setTrustStoreOptions(new io.vertx.core.net.JksOptions((JsonObject)member.getValue()));
+ }
+ break;
+ case "useAlpn":
+ if (member.getValue() instanceof Boolean) {
+ obj.setUseAlpn((Boolean)member.getValue());
+ }
+ break;
+ case "usePooledBuffers":
+ if (member.getValue() instanceof Boolean) {
+ obj.setUsePooledBuffers((Boolean)member.getValue());
+ }
+ break;
+ }
+ }
+ }
+
+ static void toJson(TCPSSLOptions obj, JsonObject json) {
+ toJson(obj, json.getMap());
+ }
+
+ static void toJson(TCPSSLOptions obj, java.util.Map<String, Object> json) {
+ if (obj.getCrlPaths() != null) {
+ JsonArray array = new JsonArray();
+ obj.getCrlPaths().forEach(item -> array.add(item));
+ json.put("crlPaths", array);
+ }
+ if (obj.getCrlValues() != null) {
+ JsonArray array = new JsonArray();
+ obj.getCrlValues().forEach(item -> array.add(java.util.Base64.getEncoder().encodeToString(item.getBytes())));
+ json.put("crlValues", array);
+ }
+ if (obj.getEnabledCipherSuites() != null) {
+ JsonArray array = new JsonArray();
+ obj.getEnabledCipherSuites().forEach(item -> array.add(item));
+ json.put("enabledCipherSuites", array);
+ }
+ if (obj.getEnabledSecureTransportProtocols() != null) {
+ JsonArray array = new JsonArray();
+ obj.getEnabledSecureTransportProtocols().forEach(item -> array.add(item));
+ json.put("enabledSecureTransportProtocols", array);
+ }
+ json.put("idleTimeout", obj.getIdleTimeout());
+ if (obj.getIdleTimeoutUnit() != null) {
+ json.put("idleTimeoutUnit", obj.getIdleTimeoutUnit().name());
+ }
+ if (obj.getJdkSslEngineOptions() != null) {
+ json.put("jdkSslEngineOptions", obj.getJdkSslEngineOptions().toJson());
+ }
+ if (obj.getKeyStoreOptions() != null) {
+ json.put("keyStoreOptions", obj.getKeyStoreOptions().toJson());
+ }
+ if (obj.getOpenSslEngineOptions() != null) {
+ json.put("openSslEngineOptions", obj.getOpenSslEngineOptions().toJson());
+ }
+ if (obj.getPemKeyCertOptions() != null) {
+ json.put("pemKeyCertOptions", obj.getPemKeyCertOptions().toJson());
+ }
+ if (obj.getPemTrustOptions() != null) {
+ json.put("pemTrustOptions", obj.getPemTrustOptions().toJson());
+ }
+ if (obj.getPfxKeyCertOptions() != null) {
+ json.put("pfxKeyCertOptions", obj.getPfxKeyCertOptions().toJson());
+ }
+ if (obj.getPfxTrustOptions() != null) {
+ json.put("pfxTrustOptions", obj.getPfxTrustOptions().toJson());
+ }
+ json.put("soLinger", obj.getSoLinger());
+ json.put("ssl", obj.isSsl());
+ json.put("sslHandshakeTimeout", obj.getSslHandshakeTimeout());
+ if (obj.getSslHandshakeTimeoutUnit() != null) {
+ json.put("sslHandshakeTimeoutUnit", obj.getSslHandshakeTimeoutUnit().name());
+ }
+ json.put("tcpCork", obj.isTcpCork());
+ json.put("tcpFastOpen", obj.isTcpFastOpen());
+ json.put("tcpKeepAlive", obj.isTcpKeepAlive());
+ json.put("tcpNoDelay", obj.isTcpNoDelay());
+ json.put("tcpQuickAck", obj.isTcpQuickAck());
+ if (obj.getTrustStoreOptions() != null) {
+ json.put("trustStoreOptions", obj.getTrustStoreOptions().toJson());
+ }
+ json.put("useAlpn", obj.isUseAlpn());
+ json.put("usePooledBuffers", obj.isUsePooledBuffers());
+ }
+}
diff --git a/src/main/java/io/vertx/core/http/HttpClientOptions.java b/src/main/java/io/vertx/core/http/HttpClientOptions.java
--- a/src/main/java/io/vertx/core/http/HttpClientOptions.java
+++ b/src/main/java/io/vertx/core/http/HttpClientOptions.java
@@ -193,9 +193,9 @@ public class HttpClientOptions extends ClientOptionsBase {
public static final int DEFAULT_POOL_CLEANER_PERIOD = 1000;
/**
- * Default WebSocket closing timeout = 10000 ms (10 second)
+ * Default WebSocket closing timeout = 10 second
*/
- public static final int DEFAULT_WEBSOCKET_CLOSING_TIMEOUT = 10_000;
+ public static final int DEFAULT_WEBSOCKET_CLOSING_TIMEOUT = 10;
private boolean verifyHost = true;
private int maxPoolSize;
@@ -1383,7 +1383,9 @@ public int getWebSocketClosingTimeout() {
* <p> When a WebSocket is closed, the server should close the TCP connection. This timeout will close
* the TCP connection on the client when it expires.
*
- * <p> Set {@code 0L} or a negative value to disable.
+ * <p> Set to {@code 0L} closes the TCP connection immediately after receiving the close frame.
+ *
+ * <p> Set to a negative value to disable it.
*
* @param webSocketClosingTimeout the timeout is seconds
* @return a reference to this, so the API can be used fluently
diff --git a/src/main/java/io/vertx/core/http/HttpServerOptions.java b/src/main/java/io/vertx/core/http/HttpServerOptions.java
--- a/src/main/java/io/vertx/core/http/HttpServerOptions.java
+++ b/src/main/java/io/vertx/core/http/HttpServerOptions.java
@@ -104,17 +104,17 @@ public class HttpServerOptions extends NetServerOptions {
* Default initial buffer size for HttpObjectDecoder = 128 bytes
*/
public static final int DEFAULT_DECODER_INITIAL_BUFFER_SIZE = 128;
-
+
/**
* Default support for WebSockets per-frame deflate compression extension = {@code true}
*/
public static final boolean DEFAULT_PER_FRAME_WEBSOCKET_COMPRESSION_SUPPORTED = true;
-
+
/**
* Default support for WebSockets per-message deflate compression extension = {@code true}
*/
public static final boolean DEFAULT_PER_MESSAGE_WEBSOCKET_COMPRESSION_SUPPORTED = true;
-
+
/**
* Default WebSocket deflate compression level = 6
*/
@@ -130,6 +130,11 @@ public class HttpServerOptions extends NetServerOptions {
*/
public static final boolean DEFAULT_WEBSOCKET_PREFERRED_CLIENT_NO_CONTEXT = false;
+ /**
+ * Default WebSocket closing timeout = 10 second)
+ */
+ public static final int DEFAULT_WEBSOCKET_CLOSING_TIMEOUT = 10;
+
private boolean compressionSupported;
private int compressionLevel;
private int maxWebSocketFrameSize;
@@ -150,6 +155,7 @@ public class HttpServerOptions extends NetServerOptions {
private int webSocketCompressionLevel;
private boolean webSocketAllowServerNoContext;
private boolean webSocketPreferredClientNoContext;
+ private int webSocketClosingTimeout;
/**
* Default constructor
@@ -187,6 +193,7 @@ public HttpServerOptions(HttpServerOptions other) {
this.webSocketCompressionLevel = other.webSocketCompressionLevel;
this.webSocketPreferredClientNoContext = other.webSocketPreferredClientNoContext;
this.webSocketAllowServerNoContext = other.webSocketAllowServerNoContext;
+ this.webSocketClosingTimeout = other.webSocketClosingTimeout;
}
/**
@@ -232,6 +239,7 @@ private void init() {
webSocketCompressionLevel = DEFAULT_WEBSOCKET_COMPRESSION_LEVEL;
webSocketPreferredClientNoContext = DEFAULT_WEBSOCKET_PREFERRED_CLIENT_NO_CONTEXT;
webSocketAllowServerNoContext = DEFAULT_WEBSOCKET_ALLOW_SERVER_NO_CONTEXT;
+ webSocketClosingTimeout = DEFAULT_WEBSOCKET_CLOSING_TIMEOUT;
}
@Override
@@ -1056,7 +1064,35 @@ public boolean getWebsocketPreferredClientNoContext() {
public boolean getWebSocketPreferredClientNoContext() {
return this.webSocketPreferredClientNoContext;
}
-
+
+ /**
+ * @return the amount of time (in seconds) a client WebSocket will wait until it closes TCP connection after receiving a close frame
+ */
+ public int getWebSocketClosingTimeout() {
+ return webSocketClosingTimeout;
+ }
+
+ /**
+ * Set the amount of time a server WebSocket will wait until it closes the TCP connection
+ * after sending a close frame.
+ *
+ * <p> When a server closes a WebSocket, it should wait the client close frame to close the TCP connection.
+ * This timeout will close the TCP connection on the server when it expires. When the TCP
+ * connection is closed receiving the close frame, the {@link WebSocket#exceptionHandler} instead
+ * of the {@link WebSocket#endHandler} will be called.
+ *
+ * <p> Set to {@code 0L} closes the TCP connection immediately after sending the close frame.
+ *
+ * <p> Set to a negative value to disable it.
+ *
+ * @param webSocketClosingTimeout the timeout is seconds
+ * @return a reference to this, so the API can be used fluently
+ */
+ public HttpServerOptions setWebSocketClosingTimeout(int webSocketClosingTimeout) {
+ this.webSocketClosingTimeout = webSocketClosingTimeout;
+ return this;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
diff --git a/src/main/java/io/vertx/core/http/impl/Http1xServerConnection.java b/src/main/java/io/vertx/core/http/impl/Http1xServerConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http1xServerConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http1xServerConnection.java
@@ -278,8 +278,14 @@ ServerWebSocketImpl createWebSocket(HttpServerRequestImpl request) {
if (handshaker == null) {
return null;
}
- ws = new ServerWebSocketImpl(this, handshaker.version() != WebSocketVersion.V00,
- request, handshaker, options.getMaxWebsocketFrameSize(), options.getMaxWebsocketMessageSize());
+ ws = new ServerWebSocketImpl(
+ this,
+ handshaker.version() != WebSocketVersion.V00,
+ options.getWebSocketClosingTimeout(),
+ request,
+ handshaker,
+ options.getMaxWebsocketFrameSize(),
+ options.getMaxWebsocketMessageSize());
if (METRICS_ENABLED && metrics != null) {
ws.setMetric(metrics.connected(metric(), request.metric(), ws));
}
diff --git a/src/main/java/io/vertx/core/http/impl/ServerWebSocketImpl.java b/src/main/java/io/vertx/core/http/impl/ServerWebSocketImpl.java
--- a/src/main/java/io/vertx/core/http/impl/ServerWebSocketImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/ServerWebSocketImpl.java
@@ -16,7 +16,6 @@
import io.netty.channel.ChannelPipeline;
import io.netty.channel.ChannelPromise;
import io.netty.handler.codec.http.HttpResponseStatus;
-import io.netty.handler.codec.http.websocketx.CloseWebSocketFrame;
import io.netty.handler.codec.http.websocketx.WebSocketServerHandshaker;
import io.vertx.codegen.annotations.Nullable;
import io.vertx.core.AsyncResult;
@@ -48,6 +47,7 @@
public class ServerWebSocketImpl extends WebSocketImplBase<ServerWebSocketImpl> implements ServerWebSocket {
private final Http1xServerConnection conn;
+ private final long closingTimeoutMS;
private final String uri;
private final String path;
private final String query;
@@ -58,12 +58,14 @@ public class ServerWebSocketImpl extends WebSocketImplBase<ServerWebSocketImpl>
ServerWebSocketImpl(Http1xServerConnection conn,
boolean supportsContinuation,
+ long closingTimeout,
HttpServerRequestImpl request,
WebSocketServerHandshaker handshaker,
int maxWebSocketFrameSize,
int maxWebSocketMessageSize) {
super(conn, supportsContinuation, maxWebSocketFrameSize, maxWebSocketMessageSize);
this.conn = conn;
+ this.closingTimeoutMS = closingTimeout >= 0 ? closingTimeout * 1000L : -1L;
this.uri = request.uri();
this.path = request.path();
this.query = request.query();
@@ -121,10 +123,10 @@ public X509Certificate[] peerCertificateChain() throws SSLPeerUnverifiedExceptio
}
@Override
- public void close(short statusCode, @Nullable String reason, Handler<AsyncResult<Void>> handler) {
+ ChannelPromise doClose(short statusCode, String reason, Handler<AsyncResult<Void>> handler) {
synchronized (conn) {
if (closed) {
- return;
+ return null;
}
if (status == null) {
if (handshakePromise == null) {
@@ -134,7 +136,15 @@ public void close(short statusCode, @Nullable String reason, Handler<AsyncResult
}
}
}
- super.close(statusCode, reason, handler);
+ ChannelPromise fut = super.doClose(statusCode, reason, handler);
+ fut.addListener(f -> {
+ if (closingTimeoutMS == 0L) {
+ closeConnection();
+ } else if (closingTimeoutMS > 0L) {
+ initiateConnectionCloseTimeout(closingTimeoutMS);
+ }
+ });
+ return fut;
}
@Override
@@ -231,6 +241,6 @@ public void setHandshake(Future<Integer> future, Handler<AsyncResult<Integer>> h
@Override
protected void doClose() {
- conn.channelHandlerContext().close();
+ closeConnection();
}
}
diff --git a/src/main/java/io/vertx/core/http/impl/WebSocketImpl.java b/src/main/java/io/vertx/core/http/impl/WebSocketImpl.java
--- a/src/main/java/io/vertx/core/http/impl/WebSocketImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/WebSocketImpl.java
@@ -25,7 +25,6 @@
*/
public class WebSocketImpl extends WebSocketImplBase<WebSocketImpl> implements WebSocket {
- private long timerID = -1L;
private final long closingTimeoutMS;
public WebSocketImpl(Http1xClientConnection conn,
@@ -40,9 +39,6 @@ public WebSocketImpl(Http1xClientConnection conn,
@Override
void handleClosed() {
synchronized (conn) {
- if (timerID != -1L) {
- conn.getContext().owner().cancelTimer(timerID);
- }
HttpClientMetrics metrics = ((Http1xClientConnection) conn).metrics();
if (metrics != null) {
metrics.disconnected(getMetric());
@@ -54,13 +50,10 @@ void handleClosed() {
@Override
protected void doClose() {
synchronized (conn) {
- if (closingTimeoutMS > 0L) {
- timerID = conn.getContext().owner().setTimer(closingTimeoutMS, id -> {
- synchronized (conn) {
- timerID = -1L;
- }
- conn.channelHandlerContext().close();
- });
+ if (closingTimeoutMS == 0L) {
+ closeConnection();
+ } else if (closingTimeoutMS > 0L) {
+ initiateConnectionCloseTimeout(closingTimeoutMS);
}
}
}
diff --git a/src/main/java/io/vertx/core/http/impl/WebSocketImplBase.java b/src/main/java/io/vertx/core/http/impl/WebSocketImplBase.java
--- a/src/main/java/io/vertx/core/http/impl/WebSocketImplBase.java
+++ b/src/main/java/io/vertx/core/http/impl/WebSocketImplBase.java
@@ -69,6 +69,7 @@ public abstract class WebSocketImplBase<S extends WebSocketBase> implements WebS
protected boolean closed;
private Short closeStatusCode;
private String closeReason;
+ private long closeTimeoutID = -1L;
private MultiMap headers;
private boolean closeFrameSent;
private Handler<AsyncResult<Void>> closeOpHandler;
@@ -144,10 +145,14 @@ public void close(short statusCode, String reason) {
}
@Override
- public void close(short statusCode, @Nullable String reason, Handler<AsyncResult<Void>> handler) {
+ public void close(short statusCode, String reason, Handler<AsyncResult<Void>> handler) {
+ doClose(statusCode, reason, handler);
+ }
+
+ ChannelPromise doClose(short statusCode, String reason, Handler<AsyncResult<Void>> handler) {
synchronized (conn) {
if (closed) {
- return;
+ return null;
}
closed = true;
closeFrameSent = true;
@@ -157,7 +162,9 @@ public void close(short statusCode, @Nullable String reason, Handler<AsyncResult
// close the WebSocket by sending a close frame with specified payload
ByteBuf byteBuf = HttpUtils.generateWSCloseFrameByteBuf(statusCode, reason);
CloseWebSocketFrame frame = new CloseWebSocketFrame(true, 0, byteBuf);
- conn.writeToChannel(frame);
+ ChannelPromise fut = conn.channelFuture();
+ conn.writeToChannel(frame, fut);
+ return fut;
}
@Override
@@ -432,6 +439,22 @@ protected void handleCloseFrame(boolean echo, short statusCode, String reason) {
protected abstract void doClose();
+ /**
+ * Close the connection.
+ */
+ void closeConnection() {
+ conn.channelHandlerContext().close();
+ }
+
+ void initiateConnectionCloseTimeout(long timeoutMillis) {
+ closeTimeoutID = conn.getContext().owner().setTimer(timeoutMillis, id -> {
+ synchronized (conn) {
+ closeTimeoutID = -1L;
+ }
+ closeConnection();
+ });
+ }
+
private class FrameAggregator implements Handler<WebSocketFrameInternal> {
private Handler<String> textMessageHandler;
private Handler<Buffer> binaryMessageHandler;
@@ -577,6 +600,9 @@ void handleClosed() {
Handler<AsyncResult<Void>> closeOpHandler;
Handler<Throwable> exceptionHandler;
synchronized (conn) {
+ if (closeTimeoutID != -1L) {
+ conn.getContext().owner().cancelTimer(closeTimeoutID);
+ }
closeHandler = this.closeHandler;
exceptionHandler = closeStatusCode == null ? this.exceptionHandler : null;
closeOpHandler = this.closeOpHandler;
| diff --git a/src/test/java/io/vertx/core/http/WebSocketTest.java b/src/test/java/io/vertx/core/http/WebSocketTest.java
--- a/src/test/java/io/vertx/core/http/WebSocketTest.java
+++ b/src/test/java/io/vertx/core/http/WebSocketTest.java
@@ -2846,8 +2846,17 @@ public void testClientCloseHandshake() {
}
@Test
- public void testClientCloseTimeout() {
- waitFor(2);
+ public void testClientConnectionCloseTimeout() {
+ testClientConnectionCloseTimeout(1);
+ }
+
+ @Test
+ public void testClientConnectionCloseImmediately() {
+ testClientConnectionCloseTimeout(0);
+ }
+
+ public void testClientConnectionCloseTimeout(int timeout) {
+ waitFor(3);
List<Object> received = Collections.synchronizedList(new ArrayList<>());
server = vertx.createHttpServer();
server.requestHandler(req -> {
@@ -2868,8 +2877,12 @@ public void testClientCloseTimeout() {
}
});
server.listen(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(v1 -> {
- client = vertx.createHttpClient(new HttpClientOptions().setWebSocketClosingTimeout(1));
+ client = vertx.createHttpClient(new HttpClientOptions().setWebSocketClosingTimeout(timeout));
client.webSocket(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/chat", onSuccess(ws -> {
+ ws.endHandler(v -> {
+ complete();
+ });
+ ws.exceptionHandler(err -> fail());
ws.closeHandler(v -> {
assertEquals(1, received.size());
assertEquals(received.get(0).getClass(), CloseWebSocketFrame.class);
@@ -2882,6 +2895,44 @@ public void testClientCloseTimeout() {
await();
}
+ @Test
+ public void testServerCloseTimeout() {
+ testServerConnectionClose(1);
+ }
+
+ @Test
+ public void testServerImmediateClose() {
+ testServerConnectionClose(0);
+ }
+
+ public void testServerConnectionClose(int timeout) {
+ waitFor(3);
+ server = vertx.createHttpServer(new HttpServerOptions().setWebSocketClosingTimeout(timeout))
+ .webSocketHandler(ws -> {
+ long now = System.currentTimeMillis();
+ ws.endHandler(v -> fail());
+ ws.exceptionHandler(ignore -> complete());
+ ws.closeHandler(v -> {
+ long elapsed = System.currentTimeMillis() - now;
+ assertTrue(timeout <= elapsed && elapsed < 5000);
+ complete();
+ });
+ ws.close();
+ }).listen(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(s -> {
+ client = vertx.createHttpClient();
+ HttpClientRequest request = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/");
+ handshake(request, sock -> {
+ NetSocketInternal soi = (NetSocketInternal) sock;
+ soi.channelHandlerContext().pipeline().addBefore("handler", "encoder", new WebSocket13FrameEncoder(true));
+ soi.channelHandlerContext().pipeline().addBefore("handler", "decoder", new WebSocket13FrameDecoder(false, false, 1000));
+ soi.closeHandler(v -> {
+ complete();
+ });
+ });
+ }));
+ await();
+ }
+
@Test
public void testCloseServer() {
client = vertx.createHttpClient();
| WebSocket server connection close timeout
When a WebSocket is closed by the server, it sends a close frame and waits until it receives the echo close frame to close the TCP connection. We should have a timeout to close the TCP connection to prevent a server from keeping a WebSocket without receiving a close frame.
NOTE: this will not happen when an idle timeout is configured on the server which is a recommended practice.
| 2020-12-18T10:22:17Z | 3.9 |
|
eclipse-vertx/vert.x | 3,663 | eclipse-vertx__vert.x-3663 | [
"3662"
] | 048d63e2539deb18b21c667e3c2b1e2375ca0506 | diff --git a/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java b/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java
--- a/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java
+++ b/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java
@@ -236,7 +236,6 @@ public MessageImpl createMessage(boolean send, String address, MultiMap headers,
}
protected <T> HandlerHolder<T> addRegistration(String address, HandlerRegistration<T> registration, boolean replyHandler, boolean localOnly, Promise<Void> promise) {
-// Objects.requireNonNull(registration.getHandler(), "handler");
HandlerHolder<T> holder = addLocalRegistration(address, registration, replyHandler, localOnly);
onLocalRegistration(holder, promise);
return holder;
diff --git a/src/main/java/io/vertx/core/eventbus/impl/clustered/ClusteredEventBus.java b/src/main/java/io/vertx/core/eventbus/impl/clustered/ClusteredEventBus.java
--- a/src/main/java/io/vertx/core/eventbus/impl/clustered/ClusteredEventBus.java
+++ b/src/main/java/io/vertx/core/eventbus/impl/clustered/ClusteredEventBus.java
@@ -11,7 +11,6 @@
package io.vertx.core.eventbus.impl.clustered;
-import io.vertx.core.Future;
import io.vertx.core.Handler;
import io.vertx.core.MultiMap;
import io.vertx.core.Promise;
@@ -145,7 +144,7 @@ protected <T> HandlerHolder<T> createHandlerHolder(HandlerRegistration<T> regist
}
@Override
- protected <T> void removeRegistration(HandlerHolder<T> handlerHolder, Promise<Void> completionHandler) {
+ protected <T> void onLocalUnregistration(HandlerHolder<T> handlerHolder, Promise<Void> completionHandler) {
if (!handlerHolder.isReplyHandler()) {
RegistrationInfo registrationInfo = new RegistrationInfo(
nodeId,
@@ -154,11 +153,7 @@ protected <T> void removeRegistration(HandlerHolder<T> handlerHolder, Promise<Vo
);
Promise<Void> promise = Promise.promise();
clusterManager.removeRegistration(handlerHolder.getHandler().address, registrationInfo, promise);
- if (completionHandler != null) {
- promise.future().onComplete(completionHandler);
- } else {
- promise.future().onFailure(t -> log.error("Failed to remove sub", t));
- }
+ promise.future().onComplete(completionHandler);
} else {
completionHandler.complete();
}
| diff --git a/src/test/java/io/vertx/core/eventbus/ClusteredEventBusTestBase.java b/src/test/java/io/vertx/core/eventbus/ClusteredEventBusTestBase.java
--- a/src/test/java/io/vertx/core/eventbus/ClusteredEventBusTestBase.java
+++ b/src/test/java/io/vertx/core/eventbus/ClusteredEventBusTestBase.java
@@ -23,6 +23,9 @@
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.hamcrest.CoreMatchers.is;
+
/**
* @author <a href="http://tfox.org">Tim Fox</a>
@@ -204,4 +207,30 @@ public void testMessageBodyInterceptor() throws Exception {
}).send("whatever", content, new DeliveryOptions().setCodecName(codec.name()));
await();
}
+
+ @Test
+ public void testClusteredUnregistration() {
+ startNodes(2);
+ MessageConsumer<Object> consumer = vertices[0].eventBus().consumer("foo", msg -> msg.reply(msg.body()));
+ consumer.completionHandler(onSuccess(reg -> {
+ vertices[0].eventBus().request("foo", "echo", onSuccess(reply1 -> {
+ assertEquals("echo", reply1.body());
+ vertices[1].eventBus().request("foo", "echo", onSuccess(reply2 -> {
+ assertEquals("echo", reply1.body());
+ consumer.unregister(onSuccess(unreg -> {
+ vertices[1].eventBus().request("foo", "echo", onFailure(fail1 -> {
+ assertThat(fail1, is(instanceOf(ReplyException.class)));
+ assertEquals(ReplyFailure.NO_HANDLERS, ((ReplyException) fail1).failureType());
+ vertices[0].eventBus().request("foo", "echo", onFailure(fail2 -> {
+ assertThat(fail2, is(instanceOf(ReplyException.class)));
+ assertEquals(ReplyFailure.NO_HANDLERS, ((ReplyException) fail2).failureType());
+ testComplete();
+ }));
+ }));
+ }));
+ }));
+ }));
+ }));
+ await();
+ }
}
| Clustered consumer keeps local registration when unregistered
A method name was changed in `EventBusImpl` but not updated in `ClusteredEventBus` while re-working the clustering SPI.
This might have happened while rebasing #3132 on `master`.
The bug was not caught because we didn't have a test for unregistration.
| 2020-11-23T10:58:58Z | 4 |
|
eclipse-vertx/vert.x | 3,657 | eclipse-vertx__vert.x-3657 | [
"3656"
] | c6ff7eeab6764b517bf0b1faab180261c836ce28 | diff --git a/src/main/java/io/vertx/core/file/impl/FileResolver.java b/src/main/java/io/vertx/core/file/impl/FileResolver.java
--- a/src/main/java/io/vertx/core/file/impl/FileResolver.java
+++ b/src/main/java/io/vertx/core/file/impl/FileResolver.java
@@ -22,7 +22,6 @@
import java.net.URL;
import java.util.Enumeration;
import java.util.function.IntPredicate;
-import java.util.regex.Pattern;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
@@ -80,8 +79,6 @@ public class FileResolver {
private static final String FILE_SEP = System.getProperty("file.separator");
private static final boolean NON_UNIX_FILE_SEP = !FILE_SEP.equals("/");
- private static final String JAR_URL_SEP = "!/";
- private static final Pattern JAR_URL_SEP_PATTERN = Pattern.compile(JAR_URL_SEP);
private final File cwd;
private final boolean enableCaching;
@@ -141,9 +138,6 @@ public File resolveFile(String fileName) {
}
// Look for file on classpath
ClassLoader cl = getClassLoader();
- if (NON_UNIX_FILE_SEP) {
- fileName = fileName.replace(FILE_SEP, "/");
- }
//https://github.com/eclipse/vert.x/issues/2126
//Cache all elements in the parent directory if it exists
@@ -151,12 +145,18 @@ public File resolveFile(String fileName) {
//been read works.
String parentFileName = file.getParent();
if (parentFileName != null) {
+ if (NON_UNIX_FILE_SEP) {
+ parentFileName = parentFileName.replace(FILE_SEP, "/");
+ }
URL directoryContents = getValidClassLoaderResource(cl, parentFileName);
if (directoryContents != null) {
unpackUrlResource(directoryContents, parentFileName, cl, true);
}
}
+ if (NON_UNIX_FILE_SEP) {
+ fileName = fileName.replace(FILE_SEP, "/");
+ }
URL url = getValidClassLoaderResource(cl, fileName);
if (url != null) {
return unpackUrlResource(url, fileName, cl, false);
@@ -254,26 +254,17 @@ private File unpackFromJarURL(URL url, String fileName, ClassLoader cl) {
zip = new ZipFile(file);
}
- String inJarPath = path.substring(idx1 + 6);
- String[] parts = JAR_URL_SEP_PATTERN.split(inJarPath);
- StringBuilder prefixBuilder = new StringBuilder();
- for (int i = 0; i < parts.length - 1; i++) {
- prefixBuilder.append(parts[i]).append("/");
- }
- String prefix = prefixBuilder.toString();
-
Enumeration<? extends ZipEntry> entries = zip.entries();
while (entries.hasMoreElements()) {
ZipEntry entry = entries.nextElement();
String name = entry.getName();
- if (name.startsWith(prefix.isEmpty() ? fileName : prefix + fileName)) {
- String p = prefix.isEmpty() ? name : name.substring(prefix.length());
+ if (name.startsWith(fileName)) {
if (name.endsWith("/")) {
// Directory
- cache.cacheDir(p);
+ cache.cacheDir(name);
} else {
try (InputStream is = zip.getInputStream(entry)) {
- cache.cacheFile(p, is, !enableCaching);
+ cache.cacheFile(name, is, !enableCaching);
}
}
| diff --git a/src/test/fileresolver/dir with spaces/files.jar b/src/test/fileresolver/dir with spaces/files.jar
new file mode 100644
Binary files /dev/null and b/src/test/fileresolver/dir with spaces/files.jar differ
diff --git a/src/test/fileresolver/files.jar b/src/test/fileresolver/files.jar
new file mode 100644
Binary files /dev/null and b/src/test/fileresolver/files.jar differ
diff --git a/src/test/fileresolver/files.zip b/src/test/fileresolver/files.zip
new file mode 100644
Binary files /dev/null and b/src/test/fileresolver/files.zip differ
diff --git a/src/test/resources/afile with spaces.html b/src/test/fileresolver/files/afile with spaces.html
similarity index 100%
rename from src/test/resources/afile with spaces.html
rename to src/test/fileresolver/files/afile with spaces.html
diff --git a/src/test/resources/afile.html b/src/test/fileresolver/files/afile.html
similarity index 100%
rename from src/test/resources/afile.html
rename to src/test/fileresolver/files/afile.html
diff --git a/src/test/resources/webroot/somefile.html b/src/test/fileresolver/files/webroot/somefile.html
similarity index 100%
rename from src/test/resources/webroot/somefile.html
rename to src/test/fileresolver/files/webroot/somefile.html
diff --git a/src/test/resources/webroot/someotherfile.html b/src/test/fileresolver/files/webroot/someotherfile.html
similarity index 100%
rename from src/test/resources/webroot/someotherfile.html
rename to src/test/fileresolver/files/webroot/someotherfile.html
diff --git a/src/test/resources/webroot/subdir/subdir2/subfile2.html b/src/test/fileresolver/files/webroot/subdir/subdir2/subfile2.html
similarity index 100%
rename from src/test/resources/webroot/subdir/subdir2/subfile2.html
rename to src/test/fileresolver/files/webroot/subdir/subdir2/subfile2.html
diff --git a/src/test/resources/webroot/subdir/subfile.html b/src/test/fileresolver/files/webroot/subdir/subfile.html
similarity index 100%
rename from src/test/resources/webroot/subdir/subfile.html
rename to src/test/fileresolver/files/webroot/subdir/subfile.html
diff --git a/src/test/resources/webroot/this+that b/src/test/fileresolver/files/webroot/this+that
similarity index 100%
rename from src/test/resources/webroot/this+that
rename to src/test/fileresolver/files/webroot/this+that
diff --git a/src/test/fileresolver/nested-files.jar b/src/test/fileresolver/nested-files.jar
new file mode 100644
Binary files /dev/null and b/src/test/fileresolver/nested-files.jar differ
diff --git a/src/test/fileresolver/nested-files.zip b/src/test/fileresolver/nested-files.zip
new file mode 100644
Binary files /dev/null and b/src/test/fileresolver/nested-files.zip differ
diff --git a/src/test/java/io/vertx/core/file/FileResolverTestBase.java b/src/test/java/io/vertx/core/file/FileResolverTestBase.java
--- a/src/test/java/io/vertx/core/file/FileResolverTestBase.java
+++ b/src/test/java/io/vertx/core/file/FileResolverTestBase.java
@@ -28,6 +28,7 @@
import java.io.File;
import java.net.URL;
+import java.net.URLClassLoader;
import java.nio.file.Files;
import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.PosixFilePermissions;
@@ -49,16 +50,26 @@ public abstract class FileResolverTestBase extends VertxTestBase {
protected FileResolver resolver;
- protected String webRoot;
+ private ClassLoader testCL;
@Override
public void setUp() throws Exception {
super.setUp();
+ testCL = Thread.currentThread().getContextClassLoader();
+ File baseDir = new File(new File(new File("src"), "test"), "fileresolver");
+ assertTrue(baseDir.exists() && baseDir.isDirectory());
+ ClassLoader resourcesLoader = resourcesLoader(baseDir);
+ Thread.currentThread().setContextClassLoader(resourcesLoader);
resolver = new FileResolver();
}
+ protected ClassLoader resourcesLoader(File baseDir) throws Exception {
+ return Thread.currentThread().getContextClassLoader();
+ }
+
@Override
protected void tearDown() throws Exception {
+ Thread.currentThread().setContextClassLoader(testCL);
resolver.close();
super.tearDown();
}
@@ -124,6 +135,8 @@ public void testResolveFileFromClasspathDisableCaching() throws Exception {
@Test
public void testResolveFileWithSpacesFromClasspath() throws Exception {
+ Assume.assumeFalse(Utils.isWindows());
+
for (int i = 0; i < 2; i++) {
File file = resolver.resolveFile("afile with spaces.html");
assertTrue(file.exists());
@@ -149,7 +162,7 @@ public void testCacheDirIsPosix0700() throws Exception {
@Test
public void testResolveDirectoryFromClasspath() throws Exception {
for (int i = 0; i < 2; i++) {
- File file = resolver.resolveFile(webRoot);
+ File file = resolver.resolveFile("webroot");
assertTrue(file.exists());
assertTrue(file.getPath().startsWith(cacheBaseDir + "-"));
assertTrue(file.isDirectory());
@@ -159,7 +172,7 @@ public void testResolveDirectoryFromClasspath() throws Exception {
@Test
public void testResolveFileInDirectoryFromClasspath() throws Exception {
for (int i = 0; i < 2; i++) {
- File file = resolver.resolveFile(webRoot + "/somefile.html");
+ File file = resolver.resolveFile("webroot/somefile.html");
assertTrue(file.exists());
assertTrue(file.getPath().startsWith(cacheBaseDir + "-"));
assertFalse(file.isDirectory());
@@ -170,7 +183,7 @@ public void testResolveFileInDirectoryFromClasspath() throws Exception {
@Test
public void testResolveSubDirectoryFromClasspath() throws Exception {
for (int i = 0; i < 2; i++) {
- File file = resolver.resolveFile(webRoot + "/subdir");
+ File file = resolver.resolveFile("webroot/subdir");
assertTrue(file.exists());
assertTrue(file.getPath().startsWith(cacheBaseDir + "-"));
assertTrue(file.isDirectory());
@@ -180,7 +193,7 @@ public void testResolveSubDirectoryFromClasspath() throws Exception {
@Test
public void testResolveFileInSubDirectoryFromClasspath() throws Exception {
for (int i = 0; i < 2; i++) {
- File file = resolver.resolveFile(webRoot + "/subdir/subfile.html");
+ File file = resolver.resolveFile("webroot/subdir/subfile.html");
assertTrue(file.exists());
assertTrue(file.getPath().startsWith(cacheBaseDir + "-"));
assertFalse(file.isDirectory());
@@ -190,7 +203,7 @@ public void testResolveFileInSubDirectoryFromClasspath() throws Exception {
@Test
public void testRecursivelyUnpack() throws Exception {
- File file = resolver.resolveFile(webRoot + "/subdir");
+ File file = resolver.resolveFile("webroot/subdir");
assertTrue(file.exists());
File sub = new File(file, "subfile.html");
assertTrue(sub.exists());
@@ -199,7 +212,7 @@ public void testRecursivelyUnpack() throws Exception {
@Test
public void testRecursivelyUnpack2() throws Exception {
- File file = resolver.resolveFile(webRoot + "/subdir");
+ File file = resolver.resolveFile("webroot/subdir");
assertTrue(file.exists());
File sub = new File(new File(file, "subdir2"), "subfile2.html");
assertTrue(sub.exists());
@@ -209,7 +222,7 @@ public void testRecursivelyUnpack2() throws Exception {
@Test
public void testDeleteCacheDir() throws Exception {
FileResolver resolver2 = new FileResolver();
- File file = resolver2.resolveFile(webRoot + "/somefile.html");
+ File file = resolver2.resolveFile("webroot/somefile.html");
assertTrue(file.exists());
File cacheDir = file.getParentFile().getParentFile();
assertTrue(cacheDir.exists());
@@ -220,7 +233,7 @@ public void testDeleteCacheDir() throws Exception {
@Test
public void testCacheDirDeletedOnVertxClose() {
VertxInternal vertx2 = (VertxInternal)vertx();
- File file = vertx2.resolveFile(webRoot + "/somefile.html");
+ File file = vertx2.resolveFile("webroot/somefile.html");
assertTrue(file.exists());
File cacheDir = file.getParentFile().getParentFile();
assertTrue(cacheDir.exists());
@@ -249,7 +262,7 @@ public void testFileSystemReadDirectory() {
@Test
public void testSendFileFromClasspath() {
vertx.createHttpServer(new HttpServerOptions().setPort(8080)).requestHandler(res -> {
- res.response().sendFile(webRoot + "/somefile.html");
+ res.response().sendFile("webroot/somefile.html");
}).listen(onSuccess(res -> {
vertx.createHttpClient(new HttpClientOptions())
.request(HttpMethod.GET, 8080, "localhost", "/")
@@ -351,14 +364,14 @@ private String readFile(File file) {
@Test
public void testResolveAfterCloseThrowsISE() throws Exception {
FileResolver resolver2 = new FileResolver();
- File file = resolver2.resolveFile(webRoot + "/somefile.html");
+ File file = resolver2.resolveFile("webroot/somefile.html");
assertTrue(file.exists());
File cacheDir = file.getParentFile().getParentFile();
assertTrue(cacheDir.exists());
resolver2.close();
assertFalse(cacheDir.exists());
try {
- resolver2.resolveFile(webRoot + "/somefile.html");
+ resolver2.resolveFile("webroot/somefile.html");
fail("Should fail");
} catch (IllegalStateException e) {
// OK
diff --git a/src/test/java/io/vertx/core/file/FileSystemFileResolverTest.java b/src/test/java/io/vertx/core/file/FileSystemFileResolverTest.java
--- a/src/test/java/io/vertx/core/file/FileSystemFileResolverTest.java
+++ b/src/test/java/io/vertx/core/file/FileSystemFileResolverTest.java
@@ -26,9 +26,8 @@
public class FileSystemFileResolverTest extends FileResolverTestBase {
@Override
- public void setUp() throws Exception {
- super.setUp();
- webRoot = "webroot";
+ protected ClassLoader resourcesLoader(File baseDir) throws Exception {
+ return new URLClassLoader(new URL[]{new File(baseDir, "files").toURI().toURL()}, Thread.currentThread().getContextClassLoader());
}
@Test
diff --git a/src/test/java/io/vertx/core/file/JarFileResolverTest.java b/src/test/java/io/vertx/core/file/JarFileResolverTest.java
--- a/src/test/java/io/vertx/core/file/JarFileResolverTest.java
+++ b/src/test/java/io/vertx/core/file/JarFileResolverTest.java
@@ -11,7 +11,9 @@
package io.vertx.core.file;
-import io.vertx.core.file.FileResolverTestBase;
+import java.io.File;
+import java.net.URL;
+import java.net.URLClassLoader;
/**
* @author <a href="http://tfox.org">Tim Fox</a>
@@ -19,10 +21,7 @@
public class JarFileResolverTest extends FileResolverTestBase {
@Override
- public void setUp() throws Exception {
- super.setUp();
- // This is inside the jar webroot2.jar
- webRoot = "webroot2";
+ protected ClassLoader resourcesLoader(File baseDir) throws Exception {
+ return new URLClassLoader(new URL[]{new File(baseDir, "files.jar").toURI().toURL()}, Thread.currentThread().getContextClassLoader());
}
-
}
diff --git a/src/test/java/io/vertx/core/file/JarFileResolverWithSpacesTest.java b/src/test/java/io/vertx/core/file/JarFileResolverWithSpacesTest.java
--- a/src/test/java/io/vertx/core/file/JarFileResolverWithSpacesTest.java
+++ b/src/test/java/io/vertx/core/file/JarFileResolverWithSpacesTest.java
@@ -20,23 +20,8 @@
*/
public class JarFileResolverWithSpacesTest extends FileResolverTestBase {
- private ClassLoader original;
-
@Override
- public void setUp() throws Exception {
- original = Thread.currentThread().getContextClassLoader();
- URLClassLoader someClassloader = new URLClassLoader(new URL[] { new File("src/test/resources/dir with " +
- "spaces/webroot3.jar").toURI().toURL()}, JarFileResolverWithSpacesTest.class.getClassLoader());
- Thread.currentThread().setContextClassLoader(someClassloader);
- super.setUp();
- // This is inside the jar webroot2.jar
- webRoot = "webroot3";
+ protected ClassLoader resourcesLoader(File baseDir) throws Exception {
+ return new URLClassLoader(new URL[]{new File(baseDir, "dir with spaces/files.jar").toURI().toURL()}, Thread.currentThread().getContextClassLoader());
}
-
- @Override
- public void tearDown() throws Exception {
- super.tearDown();
- Thread.currentThread().setContextClassLoader(original);
- }
-
}
diff --git a/src/test/java/io/vertx/core/file/NestedJarFileResolverTest.java b/src/test/java/io/vertx/core/file/NestedJarFileResolverTest.java
--- a/src/test/java/io/vertx/core/file/NestedJarFileResolverTest.java
+++ b/src/test/java/io/vertx/core/file/NestedJarFileResolverTest.java
@@ -11,8 +11,7 @@
package io.vertx.core.file;
-import io.vertx.core.file.FileResolverTestBase;
-
+import java.io.File;
import java.net.MalformedURLException;
import java.net.URL;
@@ -21,24 +20,21 @@
*/
public class NestedJarFileResolverTest extends FileResolverTestBase {
- private ClassLoader prevCL;
-
@Override
- public void setUp() throws Exception {
- super.setUp();
- // This folder is inside the embedded jar file called nested.jar, inside webroot4.jar
- webRoot = "webroot4";
-
- prevCL = Thread.currentThread().getContextClassLoader();
- URL webroot4URL = prevCL.getResource("webroot4.jar");
- ClassLoader loader = new ClassLoader(prevCL = Thread.currentThread().getContextClassLoader()) {
+ protected ClassLoader resourcesLoader(File baseDir) throws Exception {
+ URL webroot4URL = new File(baseDir, "nested-files.jar").toURI().toURL();
+ return new ClassLoader(Thread.currentThread().getContextClassLoader()) {
@Override
public URL getResource(String name) {
try {
if (name.startsWith("lib/")) {
return new URL("jar:" + webroot4URL + "!/" + name);
- } else if (name.startsWith("webroot4")) {
+ } else if (name.startsWith("webroot")) {
return new URL("jar:" + webroot4URL + "!/lib/nested.jar!/" + name.substring(7));
+ } else if (name.equals("afile.html")) {
+ return new URL("jar:" + webroot4URL + "!/lib/nested.jar!afile.html/");
+ } else if (name.equals("afile with spaces.html")) {
+ return new URL("jar:" + webroot4URL + "!/lib/nested.jar!afile with spaces.html/");
}
} catch (MalformedURLException e) {
throw new AssertionError(e);
@@ -46,14 +42,5 @@ public URL getResource(String name) {
return super.getResource(name);
}
};
- Thread.currentThread().setContextClassLoader(loader);
- }
-
- @Override
- public void after() throws Exception {
- if (prevCL != null) {
- Thread.currentThread().setContextClassLoader(prevCL);
- }
- super.after();
}
}
diff --git a/src/test/java/io/vertx/core/file/NestedRootJarResolverTest.java b/src/test/java/io/vertx/core/file/NestedRootJarResolverTest.java
deleted file mode 100644
--- a/src/test/java/io/vertx/core/file/NestedRootJarResolverTest.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2011-2019 Contributors to the Eclipse Foundation
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
- * which is available at https://www.apache.org/licenses/LICENSE-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
- */
-
-package io.vertx.core.file;
-
-import io.vertx.core.file.FileResolverTestBase;
-
-import java.net.URL;
-import java.net.URLClassLoader;
-
-/**
- * @author Thomas Segismont
- */
-public class NestedRootJarResolverTest extends FileResolverTestBase {
-
- private ClassLoader prevCL;
-
- @Override
- public void setUp() throws Exception {
- super.setUp();
- // This folder is inside the nested-inf/classes directory, inside nestedroot.jar
- webRoot = "webroot2";
-
- prevCL = Thread.currentThread().getContextClassLoader();
- URL jarUrl = prevCL.getResource("nestedroot.jar");
- URL rootUrl = new URL("jar:" + jarUrl + "!/nested-inf/classes!/");
- URLClassLoader urlClassLoader = new URLClassLoader(new URL[]{rootUrl}, prevCL);
- Thread.currentThread().setContextClassLoader(urlClassLoader);
- }
-
- @Override
- public void after() throws Exception {
- if (prevCL != null) {
- Thread.currentThread().setContextClassLoader(prevCL);
- }
- super.after();
- }
-}
diff --git a/src/test/java/io/vertx/core/file/NestedZipFileResolverTest.java b/src/test/java/io/vertx/core/file/NestedZipFileResolverTest.java
--- a/src/test/java/io/vertx/core/file/NestedZipFileResolverTest.java
+++ b/src/test/java/io/vertx/core/file/NestedZipFileResolverTest.java
@@ -11,8 +11,7 @@
package io.vertx.core.file;
-import io.vertx.core.file.FileResolverTestBase;
-
+import java.io.File;
import java.net.MalformedURLException;
import java.net.URL;
@@ -21,24 +20,21 @@
*/
public class NestedZipFileResolverTest extends FileResolverTestBase {
- private ClassLoader prevCL;
-
@Override
- public void setUp() throws Exception {
- super.setUp();
- // This folder is inside the embedded zip file called nested.zip, inside webroot6.zip
- webRoot = "webroot6";
- // We will store the current classloader
- prevCL = Thread.currentThread().getContextClassLoader();
- URL webroot6URL = prevCL.getResource("webroot6.zip");
- ClassLoader loader = new ClassLoader(prevCL = Thread.currentThread().getContextClassLoader()) {
+ protected ClassLoader resourcesLoader(File baseDir) throws Exception {
+ URL webroot6URL = new File(baseDir, "nested-files.zip").toURI().toURL();
+ return new ClassLoader(Thread.currentThread().getContextClassLoader()) {
@Override
public URL getResource(String name) {
try {
if (name.startsWith("lib/")) {
return new URL("jar:" + webroot6URL + "!/" + name);
- } else if (name.startsWith("webroot6")) {
+ } else if (name.startsWith("webroot")) {
return new URL("jar:" + webroot6URL + "!/lib/nested.zip!/" + name.substring(7));
+ } else if (name.equals("afile.html")) {
+ return new URL("jar:" + webroot6URL + "!/lib/nested.zip!afile.html/");
+ } else if (name.equals("afile with spaces.html")) {
+ return new URL("jar:" + webroot6URL + "!/lib/nested.zip!afile with spaces.html/");
}
} catch (MalformedURLException e) {
throw new AssertionError(e);
@@ -46,14 +42,5 @@ public URL getResource(String name) {
return super.getResource(name);
}
};
- Thread.currentThread().setContextClassLoader(loader);
- }
-
- @Override
- public void after() throws Exception {
- if (prevCL != null) {
- Thread.currentThread().setContextClassLoader(prevCL);
- }
- super.after();
}
}
diff --git a/src/test/java/io/vertx/core/file/ZipFileResolverTest.java b/src/test/java/io/vertx/core/file/ZipFileResolverTest.java
--- a/src/test/java/io/vertx/core/file/ZipFileResolverTest.java
+++ b/src/test/java/io/vertx/core/file/ZipFileResolverTest.java
@@ -11,7 +11,9 @@
package io.vertx.core.file;
-import io.vertx.core.file.FileResolverTestBase;
+import java.io.File;
+import java.net.URL;
+import java.net.URLClassLoader;
/**
* @author <a href="http://www.ernestojpg.com">Ernesto J. Perez</a>
@@ -19,10 +21,7 @@
public class ZipFileResolverTest extends FileResolverTestBase {
@Override
- public void setUp() throws Exception {
- super.setUp();
- // This is inside the jar webroot5.zip
- webRoot = "webroot5";
+ protected ClassLoader resourcesLoader(File baseDir) throws Exception {
+ return new URLClassLoader(new URL[]{new File(baseDir, "files.zip").toURI().toURL()}, Thread.currentThread().getContextClassLoader());
}
-
}
diff --git a/src/test/java/io/vertx/core/http/Http1xTest.java b/src/test/java/io/vertx/core/http/Http1xTest.java
--- a/src/test/java/io/vertx/core/http/Http1xTest.java
+++ b/src/test/java/io/vertx/core/http/Http1xTest.java
@@ -3585,15 +3585,16 @@ private void testHttpServerRequestDecodeError(Handler<NetSocket> bodySender, Han
public void testInvalidChunkInHttpClientResponse() throws Exception {
NetServer server = vertx.createNetServer();
CountDownLatch listenLatch = new CountDownLatch(1);
+ CompletableFuture<Void> cont = new CompletableFuture<>();
server.connectHandler(so -> {
- so.write("HTTP/1.1 200 OK\r\n");
- so.write("Transfer-Encoding: chunked\r\n");
- so.write("\r\n");
- so.write("invalid\r\n"); // Empty chunk
+ so.write("HTTP/1.1 200 OK\r\n" + "Transfer-Encoding: chunked\r\n" + "\r\n");
+ cont.whenComplete((v,e) -> {
+ so.write("invalid\r\n"); // Invalid chunk
+ });
}).listen(testAddress, onSuccess(v -> listenLatch.countDown()));
awaitLatch(listenLatch);
AtomicInteger status = new AtomicInteger();
- testHttpClientResponseDecodeError(err -> {
+ testHttpClientResponseDecodeError(cont::complete, err -> {
switch (status.incrementAndGet()) {
case 1:
assertTrue(err instanceof NumberFormatException);
@@ -3611,19 +3612,22 @@ public void testInvalidChunkInHttpClientResponse() throws Exception {
public void testInvalidTrailersInHttpClientResponse() throws Exception {
NetServer server = vertx.createNetServer();
CountDownLatch listenLatch = new CountDownLatch(1);
+ CompletableFuture<Void> cont = new CompletableFuture<>();
server.connectHandler(so -> {
- so.write("HTTP/1.1 200 OK\r\n");
- so.write("Transfer-Encoding: chunked\r\n");
- so.write("\r\n");
- so.write("0\r\n"); // Empty chunk
+ so.write("HTTP/1.1 200 OK\r\n" +
+ "Transfer-Encoding: chunked\r\n" +
+ "\r\n" +
+ "0\r\n"); // Empty chunk
// Send large trailer
- for (int i = 0;i < 2000;i++) {
- so.write("01234567");
- }
+ cont.whenComplete((v, e) -> {
+ for (int i = 0;i < 2000;i++) {
+ so.write("01234567");
+ }
+ });
}).listen(testAddress, onSuccess(v -> listenLatch.countDown()));
awaitLatch(listenLatch);
AtomicInteger status = new AtomicInteger();
- testHttpClientResponseDecodeError(err -> {
+ testHttpClientResponseDecodeError(cont::complete, err -> {
switch (status.incrementAndGet()) {
case 1:
assertTrue(err instanceof TooLongFrameException);
@@ -3637,11 +3641,12 @@ public void testInvalidTrailersInHttpClientResponse() throws Exception {
});
}
- private void testHttpClientResponseDecodeError(Handler<Throwable> errorHandler) throws Exception {
+ private void testHttpClientResponseDecodeError(Handler<Void> continuation, Handler<Throwable> errorHandler) throws Exception {
client.request(requestOptions)
.onComplete(onSuccess(req -> {
req.send(onSuccess(resp -> {
resp.exceptionHandler(errorHandler);
+ continuation.handle(null);
}));
}));
await();
@@ -4749,11 +4754,11 @@ public void start(Promise<Void> startFuture) {
public void testHttpServerWithIdleTimeoutSendChunkedFile() throws Exception {
// Does not pass reliably in CI (timeout)
Assume.assumeFalse(vertx.isNativeTransportEnabled());
- int expected = 16 * 1024 * 1024; // We estimate this will take more than 200ms to transfer with a 1ms pause in chunks
+ int expected = 64 * 1024 * 1024; // We estimate this will take more than 200ms to transfer with a 1ms pause in chunks
File sent = TestUtils.tmpFile(".dat", expected);
server.close();
server = vertx
- .createHttpServer(createBaseServerOptions().setIdleTimeout(400).setIdleTimeoutUnit(TimeUnit.MILLISECONDS))
+ .createHttpServer(createBaseServerOptions().setIdleTimeout(1000).setIdleTimeoutUnit(TimeUnit.MILLISECONDS))
.requestHandler(
req -> {
req.response().sendFile(sent.getAbsolutePath());
diff --git a/src/test/java/io/vertx/core/http/Http2ServerTest.java b/src/test/java/io/vertx/core/http/Http2ServerTest.java
--- a/src/test/java/io/vertx/core/http/Http2ServerTest.java
+++ b/src/test/java/io/vertx/core/http/Http2ServerTest.java
@@ -54,6 +54,7 @@
import io.vertx.core.buffer.Buffer;
import io.vertx.core.http.impl.Http1xOrH2CHandler;
import io.vertx.core.http.impl.HttpUtils;
+import io.vertx.core.impl.Utils;
import io.vertx.core.impl.VertxInternal;
import io.vertx.core.net.impl.SSLHelper;
import io.vertx.core.streams.ReadStream;
@@ -61,6 +62,7 @@
import io.vertx.test.core.DetectFileDescriptorLeaks;
import io.vertx.test.core.TestUtils;
import io.vertx.test.tls.Trust;
+import org.junit.Assume;
import org.junit.Test;
import java.io.ByteArrayInputStream;
@@ -1903,6 +1905,9 @@ public void testClientSendGoAwayNoError() throws Exception {
@Test
public void testClientSendGoAwayInternalError() throws Exception {
+ // On windows the client will close the channel immediately (since it's an error)
+ // and the server might see the channel inactive without receiving the close frame before
+ Assume.assumeFalse(Utils.isWindows());
Promise<Void> abc = Promise.promise();
Context ctx = vertx.getOrCreateContext();
Handler<HttpServerRequest> requestHandler = req -> {
diff --git a/src/test/java/io/vertx/core/http/HttpTLSTest.java b/src/test/java/io/vertx/core/http/HttpTLSTest.java
--- a/src/test/java/io/vertx/core/http/HttpTLSTest.java
+++ b/src/test/java/io/vertx/core/http/HttpTLSTest.java
@@ -1435,7 +1435,7 @@ private void testStore(HttpServerOptions serverOptions, List<String> expectedPos
ok |= cause.getMessage().startsWith(expectedPossiblePrefix);
}
if (!ok) {
- fail("Was expecting e.getCause().getMessage() to be prefixed by one of " + expectedPossiblePrefixes);
+ fail("Was expecting <" + cause.getMessage() + "> e.getCause().getMessage() to be prefixed by one of " + expectedPossiblePrefixes);
}
assertTrue(cause.getMessage().endsWith(expectedSuffix));
}
diff --git a/src/test/java/io/vertx/core/http/HttpTest.java b/src/test/java/io/vertx/core/http/HttpTest.java
--- a/src/test/java/io/vertx/core/http/HttpTest.java
+++ b/src/test/java/io/vertx/core/http/HttpTest.java
@@ -20,6 +20,7 @@
import io.vertx.core.buffer.Buffer;
import io.vertx.core.dns.AddressResolverOptions;
import io.vertx.core.file.AsyncFile;
+import io.vertx.core.impl.Utils;
import io.vertx.core.net.*;
import io.vertx.core.net.impl.HAProxyMessageCompletionHandler;
import io.vertx.core.streams.Pump;
@@ -2075,13 +2076,13 @@ public void testSendFileDirectoryWithHandler() throws Exception {
@Test
public void testSendOpenRangeFileFromClasspath() {
server.requestHandler(res -> {
- res.response().sendFile("webroot/somefile.html", 6);
+ res.response().sendFile("hosts_config.txt", 13);
}).listen(testAddress, onSuccess(res -> {
client.request(requestOptions).onComplete(onSuccess(req -> {
client.request(requestOptions)
.compose(HttpClientRequest::send)
.compose(HttpClientResponse::body).onComplete(onSuccess(body -> {
- assertTrue(body.toString().startsWith("<body>blah</body></html>"));
+ assertTrue(body.toString().startsWith("server.net"));
testComplete();
}));
}));
@@ -2092,12 +2093,12 @@ public void testSendOpenRangeFileFromClasspath() {
@Test
public void testSendRangeFileFromClasspath() {
server.requestHandler(res -> {
- res.response().sendFile("webroot/somefile.html", 6, 6);
+ res.response().sendFile("hosts_config.txt", 13, 10);
}).listen(testAddress, onSuccess(res -> {
client.request(requestOptions)
.compose(HttpClientRequest::send)
.compose(HttpClientResponse::body).onComplete(onSuccess(body -> {
- assertEquals("<body>", body.toString());
+ assertEquals("server.net", body.toString());
testComplete();
}));
}));
@@ -2330,9 +2331,10 @@ public void testRequestTimesOutWhenIndicatedPeriodExpiresWithoutAResponseFromRem
@Test
public void testRequestTimeoutCanceledWhenRequestHasAnOtherError() {
+ Assume.assumeFalse(Utils.isWindows());
AtomicReference<Throwable> exception = new AtomicReference<>();
// There is no server running, should fail to connect
- client.request(new RequestOptions().setTimeout(800))
+ client.request(new RequestOptions().setPort(5000).setTimeout(800))
.onComplete(onFailure(exception::set));
vertx.setTimer(1500, id -> {
@@ -4821,9 +4823,8 @@ public void testDisableIdleTimeoutInPool() throws Exception {
.setMaxPoolSize(1)
.setKeepAliveTimeout(10)
);
- client.request(requestOptions)
- .compose(HttpClientRequest::send)
- .onComplete(onSuccess(resp -> {
+ client.request(requestOptions, onSuccess(req -> {
+ req.send(onSuccess(resp -> {
resp.endHandler(v1 -> {
AtomicBoolean closed = new AtomicBoolean();
resp.request().connection().closeHandler(v2 -> {
@@ -4835,6 +4836,7 @@ public void testDisableIdleTimeoutInPool() throws Exception {
});
});
}));
+ }));
await();
}
diff --git a/src/test/java/io/vertx/core/impl/logging/LoggingBackendSelectionTest.java b/src/test/java/io/vertx/core/impl/logging/LoggingBackendSelectionTest.java
--- a/src/test/java/io/vertx/core/impl/logging/LoggingBackendSelectionTest.java
+++ b/src/test/java/io/vertx/core/impl/logging/LoggingBackendSelectionTest.java
@@ -15,12 +15,12 @@
import org.junit.Before;
import org.junit.Test;
+import java.io.ByteArrayOutputStream;
import java.io.IOException;
+import java.io.InputStream;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.net.URL;
-import java.nio.file.FileSystems;
-import java.nio.file.Files;
import java.util.HashSet;
import java.util.Set;
@@ -111,8 +111,14 @@ protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundE
if (url == null) {
throw new ClassNotFoundException(name);
}
- try {
- byte[] bytes = Files.readAllBytes(FileSystems.getDefault().getPath(url.getPath()));
+ try (InputStream in = url.openStream()) {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ byte[] buff = new byte[256];
+ int l;
+ while ((l = in.read(buff)) != -1) {
+ baos.write(buff, 0, l);;
+ }
+ byte[] bytes = baos.toByteArray();
Class<?> clazz = defineClass(name, bytes, 0, bytes.length);
if (resolve) {
resolveClass(clazz);
diff --git a/src/test/java/io/vertx/core/spi/metrics/MetricsTest.java b/src/test/java/io/vertx/core/spi/metrics/MetricsTest.java
--- a/src/test/java/io/vertx/core/spi/metrics/MetricsTest.java
+++ b/src/test/java/io/vertx/core/spi/metrics/MetricsTest.java
@@ -399,6 +399,7 @@ private void testHandlerProcessMessage(Vertx from, Vertx to, int expectedLocalCo
@Test
public void testHandlerMetricReply() throws Exception {
+ AtomicReference<HandlerMetric> replyRegistration = new AtomicReference<>();
CountDownLatch latch = new CountDownLatch(1);
FakeEventBusMetrics metrics = FakeMetricsBase.getMetrics(vertx.eventBus());
vertx.eventBus().consumer(ADDRESS1, msg -> {
@@ -409,6 +410,7 @@ public void testHandlerMetricReply() throws Exception {
assertEquals(0, registration.scheduleCount.get());
assertEquals(0, registration.deliveredCount.get());
assertEquals(0, registration.localDeliveredCount.get());
+ replyRegistration.set(registration);
msg.reply("pong");
}).completionHandler(ar -> {
assertTrue(ar.succeeded());
@@ -417,7 +419,7 @@ public void testHandlerMetricReply() throws Exception {
awaitLatch(latch);
vertx.eventBus().request(ADDRESS1, "ping", reply -> {
assertEquals(ADDRESS1, metrics.getRegistrations().get(0).address);
- HandlerMetric registration = metrics.getRegistrations().get(1);
+ HandlerMetric registration = replyRegistration.get();
assertEquals(ADDRESS1, registration.repliedAddress);
assertEquals(1, registration.scheduleCount.get());
assertEquals(1, registration.deliveredCount.get());
@@ -900,7 +902,7 @@ private void testDatagram(String host, Consumer<PacketMetric> checker) throws Ex
}
@Test
- public void testThreadPoolMetricsWithExecuteBlocking() {
+ public void testThreadPoolMetricsWithExecuteBlocking() throws Exception {
Map<String, PoolMetrics> all = FakePoolMetrics.getPoolMetrics();
FakePoolMetrics metrics = (FakePoolMetrics) all.get("vert.x-worker-thread");
@@ -910,7 +912,7 @@ public void testThreadPoolMetricsWithExecuteBlocking() {
Handler<Promise<Void>> job = getSomeDumbTask();
- AtomicInteger counter = new AtomicInteger();
+ CountDownLatch counter = new CountDownLatch(100);
AtomicBoolean hadWaitingQueue = new AtomicBoolean();
AtomicBoolean hadIdle = new AtomicBoolean();
AtomicBoolean hadRunning = new AtomicBoolean();
@@ -927,17 +929,15 @@ public void testThreadPoolMetricsWithExecuteBlocking() {
if (metrics.numberOfRunningTasks() > 0) {
hadRunning.set(true);
}
- if (counter.incrementAndGet() == 100) {
- testComplete();
- }
+ counter.countDown();
}
);
}
- await();
+ awaitLatch(counter);
- assertEquals(metrics.numberOfSubmittedTask(), 100);
- assertEquals(metrics.numberOfCompletedTasks(), 100);
+ assertWaitUntil(() -> metrics.numberOfSubmittedTask() == 100);
+ assertWaitUntil(() -> metrics.numberOfCompletedTasks() == 100);
assertTrue(hadIdle.get());
assertTrue(hadWaitingQueue.get());
assertTrue(hadRunning.get());
@@ -948,22 +948,23 @@ public void testThreadPoolMetricsWithExecuteBlocking() {
}
@Test
- public void testThreadPoolMetricsWithInternalExecuteBlocking() {
+ public void testThreadPoolMetricsWithInternalExecuteBlocking() throws InterruptedException {
Map<String, PoolMetrics> all = FakePoolMetrics.getPoolMetrics();
FakePoolMetrics metrics = (FakePoolMetrics) all.get("vert.x-internal-blocking");
assertThat(metrics.getPoolSize(), is(getOptions().getInternalBlockingPoolSize()));
assertThat(metrics.numberOfIdleThreads(), is(getOptions().getInternalBlockingPoolSize()));
- AtomicInteger counter = new AtomicInteger();
+ int num = VertxOptions.DEFAULT_INTERNAL_BLOCKING_POOL_SIZE;
+ int count = num * 5;
+
+ CountDownLatch counter = new CountDownLatch(count);
AtomicBoolean hadWaitingQueue = new AtomicBoolean();
AtomicBoolean hadIdle = new AtomicBoolean();
AtomicBoolean hadRunning = new AtomicBoolean();
VertxInternal v = (VertxInternal) vertx;
Map<Integer, CountDownLatch> latches = new HashMap<>();
- int num = VertxOptions.DEFAULT_INTERNAL_BLOCKING_POOL_SIZE;
- int count = num * 5;
for (int i = 0; i < count; i++) {
CountDownLatch latch = latches.computeIfAbsent(i / num, k -> new CountDownLatch(num));
v.executeBlockingInternal(fut -> {
@@ -986,13 +987,11 @@ public void testThreadPoolMetricsWithInternalExecuteBlocking() {
if (metrics.numberOfIdleThreads() > 0) {
hadIdle.set(true);
}
- if (counter.incrementAndGet() == count) {
- testComplete();
- }
+ counter.countDown();
});
}
- await();
+ awaitLatch(counter);
assertEquals(metrics.numberOfSubmittedTask(), 100);
assertEquals(metrics.numberOfCompletedTasks(), 100);
@@ -1075,7 +1074,7 @@ public void testThreadPoolMetricsWithWorkerVerticle() throws Exception {
}
@Test
- public void testThreadPoolMetricsWithNamedExecuteBlocking() {
+ public void testThreadPoolMetricsWithNamedExecuteBlocking() throws InterruptedException {
vertx.close(); // Close the instance automatically created
vertx = Vertx.vertx(new VertxOptions().setMetricsOptions(new MetricsOptions().setEnabled(true).setFactory(new FakeMetricsFactory())));
@@ -1090,7 +1089,7 @@ public void testThreadPoolMetricsWithNamedExecuteBlocking() {
Handler<Promise<Void>> job = getSomeDumbTask();
- AtomicInteger counter = new AtomicInteger();
+ CountDownLatch counter = new CountDownLatch(100);
AtomicBoolean hadWaitingQueue = new AtomicBoolean();
AtomicBoolean hadIdle = new AtomicBoolean();
AtomicBoolean hadRunning = new AtomicBoolean();
@@ -1108,13 +1107,11 @@ public void testThreadPoolMetricsWithNamedExecuteBlocking() {
if (metrics.numberOfRunningTasks() > 0) {
hadRunning.set(true);
}
- if (counter.incrementAndGet() == 100) {
- testComplete();
- }
+ counter.countDown();
});
}
- await();
+ awaitLatch(counter);
assertEquals(metrics.numberOfSubmittedTask(), 100);
assertEquals(metrics.numberOfCompletedTasks(), 100);
diff --git a/src/test/resources/dir with spaces/webroot3.jar b/src/test/resources/dir with spaces/webroot3.jar
deleted file mode 100644
Binary files a/src/test/resources/dir with spaces/webroot3.jar and /dev/null differ
diff --git a/src/test/resources/nestedroot.jar b/src/test/resources/nestedroot.jar
deleted file mode 100644
Binary files a/src/test/resources/nestedroot.jar and /dev/null differ
diff --git a/src/test/resources/webroot2.jar b/src/test/resources/webroot2.jar
deleted file mode 100644
Binary files a/src/test/resources/webroot2.jar and /dev/null differ
diff --git a/src/test/resources/webroot4.jar b/src/test/resources/webroot4.jar
deleted file mode 100644
Binary files a/src/test/resources/webroot4.jar and /dev/null differ
diff --git a/src/test/resources/webroot5.zip b/src/test/resources/webroot5.zip
deleted file mode 100644
Binary files a/src/test/resources/webroot5.zip and /dev/null differ
diff --git a/src/test/resources/webroot6.zip b/src/test/resources/webroot6.zip
deleted file mode 100644
Binary files a/src/test/resources/webroot6.zip and /dev/null differ
| File resolver should replace the file separator before parent file class loader lookup
The `FileResolver` replaces the file separator for a `ClassLoader` lookup however it does not do it for the parent file `ClassLoader` leading to issues on Windows.
| 2020-11-19T07:26:45Z | 4 |
|
eclipse-vertx/vert.x | 3,604 | eclipse-vertx__vert.x-3604 | [
"3600"
] | 4dd98060cf9a6e6fac9411e029f6625a14894b3f | diff --git a/src/main/java/io/vertx/core/impl/DeploymentManager.java b/src/main/java/io/vertx/core/impl/DeploymentManager.java
--- a/src/main/java/io/vertx/core/impl/DeploymentManager.java
+++ b/src/main/java/io/vertx/core/impl/DeploymentManager.java
@@ -561,7 +561,7 @@ private void doDeploy(String identifier,
deployment.child = true;
} else {
// Orphan
- deployment.undeploy(event -> reportFailure(new NoStackTraceThrowable("Verticle deployment failed.Could not be added as child of parent verticle"), context, completionHandler));
+ deployment.undeploy(event -> reportFailure(new NoStackTraceThrowable("Verticle deployment failed.Could not be added as child of parent verticle"), callingContext, completionHandler));
return;
}
}
| diff --git a/src/test/java/io/vertx/core/DeploymentTest.java b/src/test/java/io/vertx/core/DeploymentTest.java
--- a/src/test/java/io/vertx/core/DeploymentTest.java
+++ b/src/test/java/io/vertx/core/DeploymentTest.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 Contributors to the Eclipse Foundation
+ * Copyright (c) 2011-2020 Contributors to the Eclipse Foundation
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
@@ -12,13 +12,16 @@
package io.vertx.core;
import io.vertx.core.eventbus.Message;
-import io.vertx.core.impl.*;
+import io.vertx.core.impl.ContextInternal;
+import io.vertx.core.impl.Deployment;
+import io.vertx.core.impl.VertxInternal;
import io.vertx.core.impl.verticle.CompilingClassLoader;
import io.vertx.core.json.JsonArray;
import io.vertx.core.json.JsonObject;
-import io.vertx.test.core.*;
-import io.vertx.test.verticles.sourceverticle.SourceVerticle;
+import io.vertx.test.core.TestUtils;
+import io.vertx.test.core.VertxTestBase;
import io.vertx.test.verticles.*;
+import io.vertx.test.verticles.sourceverticle.SourceVerticle;
import org.junit.Test;
import java.io.File;
@@ -1598,6 +1601,44 @@ public void start(final Promise<Void> startFuture) throws Exception {
await();
}
+ @Test
+ public void testUndeployParentDuringChildDeployment() throws Exception {
+ CountDownLatch deployLatch = new CountDownLatch(2);
+ CountDownLatch undeployLatch = new CountDownLatch(1);
+
+ MyAsyncVerticle childVerticle = new MyAsyncVerticle(startPromise -> {
+ deployLatch.countDown();
+ Vertx.currentContext().<Void>executeBlocking(prom -> {
+ try {
+ undeployLatch.await();
+ prom.complete();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ prom.fail(e.getMessage());
+ }
+ }, startPromise);
+ }, Promise::complete);
+
+ MyAsyncVerticle verticle = new MyAsyncVerticle(startPromise -> {
+ Context parentVerticleContext = Vertx.currentContext();
+ parentVerticleContext.owner().deployVerticle(childVerticle, onFailure(t -> {
+ assertSame(parentVerticleContext, Vertx.currentContext());
+ testComplete();
+ }));
+ startPromise.complete();
+ }, stopPromise -> {
+ undeployLatch.countDown();
+ });
+ AtomicReference<String> deploymentID = new AtomicReference<>();
+ vertx.deployVerticle(verticle, onSuccess(id -> {
+ deploymentID.set(id);
+ deployLatch.countDown();
+ }));
+ awaitLatch(deployLatch);
+ vertx.undeploy(deploymentID.get());
+ await();
+ }
+
// TODO
// Multi-threaded workers
| Vertx.deployVerticle(…); calls completion handler on different context
I'm using `io.vertx:vertx-core:3.9.3` and I have some code in a *Verticle* that looks similar to this:
```java
assert Vertx.currentContext() == context;
vertx.deployVerticle(someVerticle, new DeploymentOptions().setWorker(true), result -> {
assert Vertx.currentContext() == context; // <-- AssertionError is thrown here
...
});
```
The second assertion sometimes fails. I expect `deployVerticle(…)` to always call the completion handler in the context from where it was called. Was this assumption wrong or is this a bug in *Vert.x*? The *Javadoc* does not say anything about it but here is a quote from [the manual](https://vertx.io/docs/vertx-core/groovy/#_standard_verticles):
> When you call any other methods that takes a handler on a core API from an event loop then Vert.x will guarantee that those handlers, when called, will be executed on the same event loop.
I also noticed that the assertion only seems to fail when the deployment fails with the following reason.
> Verticle deployment failed.Could not be added as child of parent verticle
In my experience, this means that the Verticle where I call the method was stopped. When the assertion fails, the context returned by `Vertx.currentContext()` outside and inside the completion handler are named like `EventLoopContext@35d0ced2` and `io.vertx.core.impl.WorkerContext@1bf4c13e` respectively. (Maybe Vert.x calls the handler from the context of the worker which was about to be started?)
| Thanks for the report, this looks like a bug.
When invoked from a verticle, the context of the deploy callback should be
the same as of the verticle, regardless of the result
Le lun. 12 oct. 2020 à 12:12, Johannes Spangenberg <notifications@github.com>
a écrit :
> I'm using io.vertx:vertx-core:3.9.3 and I have some code in a *Verticle*
> that looks similar to this:
>
> assert Vertx.currentContext() == context;
>
> vertx.deployVerticle(someVerticle, new DeploymentOptions().setWorker(true), result -> {
>
> assert Vertx.currentContext() == context; // <-- AssertionError is thrown here
>
> ...
>
> });
>
> The second assertion sometimes fails. I expect deployVerticle(…) to
> always call the completion handler in the context from where it was called.
> Was this assumption wrong or is this a bug in *Vert.x*? The *Javadoc*
> does not say anything about it but here is a quote from the manual
> <https://vertx.io/docs/vertx-core/groovy/#_standard_verticles>:
>
> When you call any other methods that takes a handler on a core API from an
> event loop then Vert.x will guarantee that those handlers, when called,
> will be executed on the same event loop.
>
> I also noticed that the assertion only seems to fail when the deployment
> fails with the following reason.
>
> Verticle deployment failed.Could not be added as child of parent verticle
>
> In my experience, this means that the Verticle where I call the method was
> stopped. When the assertion fails, the context returned by
> Vertx.currentContext() outside and inside the completion handlers are
> named like EventLoopContext@35d0ced2 and
> io.vertx.core.impl.WorkerContext@1bf4c13e respectively. (Maybe Vert.x
> calls the handler from the context of the worker which was about to be
> started?)
>
> —
> You are receiving this because you are subscribed to this thread.
> Reply to this email directly, view it on GitHub
> <https://github.com/eclipse-vertx/vert.x/issues/3600>, or unsubscribe
> <https://github.com/notifications/unsubscribe-auth/AALOLNWPITCMO7CMVB3S3IDSKLJCNANCNFSM4SMTF6HQ>
> .
>
I was able to reproduce the issue outside of the project were I found it. At least on my machine. The reproduction depends on a race condition. Not sure if it works for you.
https://github.com/JojOatXGME/vertx-issue-3600/blob/main/src/main/java/Issue3600.java | 2020-10-14T15:19:20Z | 3.9 |
eclipse-vertx/vert.x | 3,607 | eclipse-vertx__vert.x-3607 | [
"3606"
] | 2ca9508da3df7b7bc0581c354e3ddceb562a438a | diff --git a/src/main/java/io/vertx/core/DeploymentOptions.java b/src/main/java/io/vertx/core/DeploymentOptions.java
--- a/src/main/java/io/vertx/core/DeploymentOptions.java
+++ b/src/main/java/io/vertx/core/DeploymentOptions.java
@@ -45,6 +45,7 @@ public class DeploymentOptions {
private int instances;
private List<String> isolatedClasses;
private TimeUnit maxWorkerExecuteTimeUnit;
+ private ClassLoader classLoader;
/**
* Default constructor
@@ -360,6 +361,31 @@ public DeploymentOptions setMaxWorkerExecuteTimeUnit(TimeUnit maxWorkerExecuteTi
return this;
}
+ /**
+ * @return the classloader used for deploying the Verticle
+ */
+ public ClassLoader getClassLoader() {
+ return classLoader;
+ }
+
+ /**
+ * Set the classloader to use for deploying the Verticle.
+ *
+ * <p> The {@code VerticleFactory} will use this classloader for creating the Verticle
+ * and the Verticle {@link io.vertx.core.Context} will set this classloader as context
+ * classloader for the tasks execution on context.
+ *
+ * <p> By default no classloader is required and the deployment will use the current thread context
+ * classloader.
+ *
+ * @param classLoader the loader to use
+ * @return a reference to this, so the API can be used fluently
+ */
+ public DeploymentOptions setClassLoader(ClassLoader classLoader) {
+ this.classLoader = classLoader;
+ return this;
+ }
+
/**
* Throw {@code IllegalArgumentException} when loader isolation configuration has been defined.
*/
diff --git a/src/main/java/io/vertx/core/impl/DeploymentManager.java b/src/main/java/io/vertx/core/impl/DeploymentManager.java
--- a/src/main/java/io/vertx/core/impl/DeploymentManager.java
+++ b/src/main/java/io/vertx/core/impl/DeploymentManager.java
@@ -62,7 +62,13 @@ public Future<String> deployVerticle(Callable<Verticle> verticleSupplier, Deploy
}
options.checkIsolationNotDefined();
ContextInternal currentContext = vertx.getOrCreateContext();
- ClassLoader cl = getCurrentClassLoader();
+ ClassLoader cl = options.getClassLoader();
+ if (cl == null) {
+ cl = Thread.currentThread().getContextClassLoader();
+ if (cl == null) {
+ cl = getClass().getClassLoader();
+ }
+ }
return doDeploy(options, v -> "java:" + v.getClass().getName(), currentContext, currentContext, cl, verticleSupplier)
.map(Deployment::deploymentID);
}
@@ -116,14 +122,6 @@ public Future<Void> undeployAll() {
}
}
- private ClassLoader getCurrentClassLoader() {
- ClassLoader cl = Thread.currentThread().getContextClassLoader();
- if (cl == null) {
- cl = getClass().getClassLoader();
- }
- return cl;
- }
-
private <T> void reportFailure(Throwable t, Context context, Handler<AsyncResult<T>> completionHandler) {
if (completionHandler != null) {
reportResult(context, completionHandler, Future.failedFuture(t));
diff --git a/src/main/java/io/vertx/core/impl/VerticleManager.java b/src/main/java/io/vertx/core/impl/VerticleManager.java
--- a/src/main/java/io/vertx/core/impl/VerticleManager.java
+++ b/src/main/java/io/vertx/core/impl/VerticleManager.java
@@ -31,7 +31,7 @@
* @author <a href="http://tfox.org">Tim Fox</a>
*/
public class VerticleManager {
-
+
private final VertxInternal vertx;
private final DeploymentManager deploymentManager;
private final LoaderManager loaderManager = new LoaderManager();
@@ -145,8 +145,14 @@ private static String getSuffix(int pos, String str) {
public Future<Deployment> deployVerticle(String identifier,
DeploymentOptions options) {
ContextInternal callingContext = vertx.getOrCreateContext();
- ClassLoaderHolder holder = loaderManager.getClassLoader(options);
- ClassLoader loader = holder != null ? holder.loader : getCurrentClassLoader();
+ ClassLoaderHolder holder;
+ ClassLoader loader = options.getClassLoader();
+ if (loader == null) {
+ holder = loaderManager.getClassLoader(options);
+ loader = holder != null ? holder.loader : getCurrentClassLoader();
+ } else {
+ holder = null;
+ }
Future<Deployment> deployment = doDeployVerticle(identifier, options, callingContext, callingContext, loader);
if (holder != null) {
deployment.onComplete(ar -> {
diff --git a/src/main/java11/io/vertx/core/DeploymentOptions.java b/src/main/java11/io/vertx/core/DeploymentOptions.java
--- a/src/main/java11/io/vertx/core/DeploymentOptions.java
+++ b/src/main/java11/io/vertx/core/DeploymentOptions.java
@@ -41,6 +41,7 @@ public class DeploymentOptions {
private boolean ha;
private int instances;
private TimeUnit maxWorkerExecuteTimeUnit;
+ private ClassLoader classLoader;
/**
* Default constructor
@@ -267,6 +268,30 @@ public DeploymentOptions setMaxWorkerExecuteTimeUnit(TimeUnit maxWorkerExecuteTi
return this;
}
+ /**
+ * @return the classloader used for deploying the Verticle
+ */
+ public ClassLoader getClassLoader() {
+ return classLoader;
+ }
+
+ /**
+ * Set the classloader to use for deploying the Verticle.
+ *
+ * <p> The {@code VerticleFactory} will use this classloader for creating the Verticle
+ * and the Verticle {@link io.vertx.core.Context} will set this classloader as context
+ * classloader for the tasks execution on context.
+ *
+ * <p> By default no classloader is required and the deployment will use the current thread context
+ * classloader.
+ *
+ * @param classLoader the loader to use
+ * @return a reference to this, so the API can be used fluently
+ */
+ public DeploymentOptions setClassLoader(ClassLoader classLoader) {
+ this.classLoader = classLoader;
+ return this;
+ }
/**
* Does nothing.
*/
| diff --git a/src/test/java/io/vertx/core/DeploymentTest.java b/src/test/java/io/vertx/core/DeploymentTest.java
--- a/src/test/java/io/vertx/core/DeploymentTest.java
+++ b/src/test/java/io/vertx/core/DeploymentTest.java
@@ -1590,6 +1590,41 @@ private void testIsolationGroup(String group1, String group2, int count1, int co
}
}
+ @Test
+ public void testContextClassLoader() throws Exception {
+ File tmp = File.createTempFile("vertx-", ".txt");
+ tmp.deleteOnExit();
+ Files.write(tmp.toPath(), "hello".getBytes());
+ URL url = tmp.toURI().toURL();
+ AtomicBoolean used = new AtomicBoolean();
+ ClassLoader cl = new ClassLoader(Thread.currentThread().getContextClassLoader()) {
+ @Override
+ public URL getResource(String name) {
+ if (name.equals("/foo.txt")) {
+ used.set(true);
+ return url;
+ }
+ return super.getResource(name);
+ }
+ };
+ vertx.deployVerticle(new AbstractVerticle() {
+ @Override
+ public void start() {
+ assertSame(cl, Thread.currentThread().getContextClassLoader());
+ assertSame(cl, ((ContextInternal)context).classLoader());
+ vertx.fileSystem().props("/foo.txt", onSuccess(props -> {
+ assertEquals(5, props.size());
+ assertTrue(used.get());
+ vertx.undeploy(context.deploymentID(), onSuccess(v -> {
+ testComplete();
+ }));
+ }));
+ }
+ }, new DeploymentOptions().setClassLoader(cl), onSuccess(id -> {
+ }));
+ await();
+ }
+
private void assertDeployment(int instances, MyVerticle verticle, JsonObject config, AsyncResult<String> ar) {
assertTrue(ar.succeeded());
assertEquals(vertx, verticle.getVertx());
diff --git a/src/test/java/io/vertx/core/VerticleFactoryTest.java b/src/test/java/io/vertx/core/VerticleFactoryTest.java
--- a/src/test/java/io/vertx/core/VerticleFactoryTest.java
+++ b/src/test/java/io/vertx/core/VerticleFactoryTest.java
@@ -16,6 +16,7 @@
import org.junit.Test;
import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicReference;
/**
* @author <a href="http://tfox.org">Tim Fox</a>
@@ -385,4 +386,28 @@ public void stop() throws Exception {
}
}
-}
+
+ @Test
+ public void testClassLoader() {
+ ClassLoader loader = new ClassLoader(Thread.currentThread().getContextClassLoader()) {
+ };
+ AtomicReference<ClassLoader> createClassLoader = new AtomicReference<>();
+ VerticleFactory factory = new VerticleFactory() {
+ @Override
+ public String prefix() {
+ return "test";
+ }
+ @Override
+ public void createVerticle(String verticleName, ClassLoader classLoader, Promise<Callable<Verticle>> promise) {
+ createClassLoader.set(classLoader);
+ promise.complete(() -> new AbstractVerticle() {
+ });
+ }
+ };
+ vertx.registerVerticleFactory(factory);
+ vertx.deployVerticle("test:foo", new DeploymentOptions().setClassLoader(loader), onSuccess(id -> {
+ assertSame(loader, createClassLoader.get());
+ testComplete();
+ }));
+ await();
+ }}
| Verticle deployment class loader
Verticle deployment has been using the implicit thread context classloader for the `VerticleFactory` and the `io.vertx.core.Context` class loader. This classloader is used indirectly by code that needs to resolve file in some places (e.g `HttpServerResponse#sendFile(String)`). Until now the only possibility to use a specific classloader was the set a temporary thread context classloader in some places (such as deploying a verticle or sending a file).
The `DeploymentOptions` will now hold an optional classloader that will be passed to the `VerticleFactory` and associated with the `Context`. When no classloader is provided, the current thread context classloader is used.
| 2020-10-19T14:25:30Z | 4 |
|
eclipse-vertx/vert.x | 3,559 | eclipse-vertx__vert.x-3559 | [
"3560"
] | 26651deb950b67d68e417032460bead1e694233a | diff --git a/src/main/java/io/vertx/core/buffer/impl/BufferImpl.java b/src/main/java/io/vertx/core/buffer/impl/BufferImpl.java
--- a/src/main/java/io/vertx/core/buffer/impl/BufferImpl.java
+++ b/src/main/java/io/vertx/core/buffer/impl/BufferImpl.java
@@ -72,11 +72,11 @@ public BufferImpl() {
}
BufferImpl(int initialSizeHint) {
- buffer = Unpooled.buffer(initialSizeHint, Integer.MAX_VALUE);
+ buffer = VertxByteBufAllocator.DEFAULT.heapBuffer(initialSizeHint, Integer.MAX_VALUE);
}
BufferImpl(byte[] bytes) {
- buffer = Unpooled.buffer(bytes.length, Integer.MAX_VALUE).writeBytes(bytes);
+ buffer = VertxByteBufAllocator.DEFAULT.heapBuffer(bytes.length, Integer.MAX_VALUE).writeBytes(bytes);
}
BufferImpl(String str, String enc) {
@@ -507,8 +507,19 @@ public Buffer slice(int start, int end) {
return new BufferImpl(buffer.slice(start, end - start));
}
+ /**
+ * @return the buffer as is
+ */
+ public ByteBuf byteBuf() {
+ return buffer;
+ }
+
public ByteBuf getByteBuf() {
- return Unpooled.unreleasableBuffer(buffer.duplicate());
+ ByteBuf duplicate = buffer.duplicate();
+ if (buffer.getClass() != VertxHeapByteBuf.class && buffer.getClass() != VertxUnsafeHeapByteBuf.class) {
+ duplicate = Unpooled.unreleasableBuffer(duplicate);
+ }
+ return duplicate;
}
private Buffer append(String str, Charset charset) {
diff --git a/src/main/java/io/vertx/core/buffer/impl/VertxByteBufAllocator.java b/src/main/java/io/vertx/core/buffer/impl/VertxByteBufAllocator.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/buffer/impl/VertxByteBufAllocator.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2011-2020 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+package io.vertx.core.buffer.impl;
+
+import io.netty.buffer.AbstractByteBufAllocator;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.UnpooledByteBufAllocator;
+import io.netty.util.internal.PlatformDependent;
+
+abstract class VertxByteBufAllocator extends AbstractByteBufAllocator {
+
+ private static VertxByteBufAllocator UNSAFE_IMPL = new VertxByteBufAllocator() {
+ @Override
+ protected ByteBuf newHeapBuffer(int initialCapacity, int maxCapacity) {
+ return new VertxUnsafeHeapByteBuf(this, initialCapacity, maxCapacity);
+ }
+ };
+
+ private static VertxByteBufAllocator IMPL = new VertxByteBufAllocator() {
+ @Override
+ protected ByteBuf newHeapBuffer(int initialCapacity, int maxCapacity) {
+ return new VertxHeapByteBuf(this, initialCapacity, maxCapacity);
+ }
+ };
+
+ static final VertxByteBufAllocator DEFAULT = PlatformDependent.hasUnsafe() ? UNSAFE_IMPL : IMPL;
+
+ @Override
+ protected ByteBuf newDirectBuffer(int initialCapacity, int maxCapacity) {
+ return UnpooledByteBufAllocator.DEFAULT.directBuffer(initialCapacity, maxCapacity);
+ }
+
+ @Override
+ public boolean isDirectBufferPooled() {
+ return false;
+ }
+}
diff --git a/src/main/java/io/vertx/core/buffer/impl/VertxHeapByteBuf.java b/src/main/java/io/vertx/core/buffer/impl/VertxHeapByteBuf.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/buffer/impl/VertxHeapByteBuf.java
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2011-2020 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+package io.vertx.core.buffer.impl;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.ByteBufAllocator;
+import io.netty.buffer.UnpooledHeapByteBuf;
+import io.netty.buffer.UnpooledUnsafeHeapByteBuf;
+
+/**
+ * An un-releasable, un-pooled, un-instrumented heap {@code ByteBuf}.
+ */
+final class VertxHeapByteBuf extends UnpooledHeapByteBuf {
+
+ public VertxHeapByteBuf(ByteBufAllocator alloc, int initialCapacity, int maxCapacity) {
+ super(alloc, initialCapacity, maxCapacity);
+ }
+
+ @Override
+ public ByteBuf retain(int increment) {
+ return this;
+ }
+
+ @Override
+ public ByteBuf retain() {
+ return this;
+ }
+
+ @Override
+ public ByteBuf touch() {
+ return this;
+ }
+
+ @Override
+ public ByteBuf touch(Object hint) {
+ return this;
+ }
+
+ @Override
+ public boolean release() {
+ return false;
+ }
+
+ @Override
+ public boolean release(int decrement) {
+ return false;
+ }
+}
diff --git a/src/main/java/io/vertx/core/buffer/impl/VertxUnsafeHeapByteBuf.java b/src/main/java/io/vertx/core/buffer/impl/VertxUnsafeHeapByteBuf.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/buffer/impl/VertxUnsafeHeapByteBuf.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2011-2020 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+package io.vertx.core.buffer.impl;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.ByteBufAllocator;
+import io.netty.buffer.UnpooledUnsafeHeapByteBuf;
+
+/**
+ * An un-releasable, un-pooled, un-instrumented, un-safe heap {@code ByteBuf}.
+ */
+final class VertxUnsafeHeapByteBuf extends UnpooledUnsafeHeapByteBuf {
+
+ public VertxUnsafeHeapByteBuf(ByteBufAllocator alloc, int initialCapacity, int maxCapacity) {
+ super(alloc, initialCapacity, maxCapacity);
+ }
+
+ @Override
+ public ByteBuf retain(int increment) {
+ return this;
+ }
+
+ @Override
+ public ByteBuf retain() {
+ return this;
+ }
+
+ @Override
+ public ByteBuf touch() {
+ return this;
+ }
+
+ @Override
+ public ByteBuf touch(Object hint) {
+ return this;
+ }
+
+ @Override
+ public boolean release() {
+ return false;
+ }
+
+ @Override
+ public boolean release(int decrement) {
+ return false;
+ }
+}
| diff --git a/src/test/java/io/vertx/core/buffer/impl/VertxBufferTest.java b/src/test/java/io/vertx/core/buffer/impl/VertxBufferTest.java
new file mode 100644
--- /dev/null
+++ b/src/test/java/io/vertx/core/buffer/impl/VertxBufferTest.java
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2011-2020 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+
+package io.vertx.core.buffer.impl;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import io.netty.util.IllegalReferenceCountException;
+import io.vertx.core.buffer.Buffer;
+import io.vertx.core.json.DecodeException;
+import io.vertx.core.json.JsonArray;
+import io.vertx.core.json.JsonObject;
+import io.vertx.test.core.TestUtils;
+import org.junit.Test;
+
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.util.function.Function;
+
+import static io.vertx.test.core.TestUtils.assertIllegalArgumentException;
+import static io.vertx.test.core.TestUtils.assertIndexOutOfBoundsException;
+import static io.vertx.test.core.TestUtils.assertNullPointerException;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public class VertxBufferTest {
+
+ @Test
+ public void testAllocateVertxBuffer() {
+ BufferImpl buffer = new BufferImpl();
+ ByteBuf byteBuf = buffer.byteBuf();
+ assertTrue(byteBuf instanceof VertxHeapByteBuf || byteBuf instanceof VertxUnsafeHeapByteBuf);
+ }
+
+ @Test
+ public void testUnreleasable() {
+ BufferImpl buffer = new BufferImpl();
+ ByteBuf byteBuf = buffer.byteBuf();
+ assertEquals(1, byteBuf.refCnt());
+ byteBuf.release();
+ assertEquals(1, byteBuf.refCnt());
+ }
+
+ @Test
+ public void testDuplicate() {
+ BufferImpl buffer = new BufferImpl();
+ buffer.appendString("Hello World");
+ ByteBuf byteBuf = buffer.byteBuf();
+ ByteBuf duplicate = buffer.getByteBuf();
+ assertEquals(1, byteBuf.refCnt());
+ duplicate.release();
+ assertEquals(1, duplicate.refCnt());
+ assertEquals(1, byteBuf.refCnt());
+ duplicate.readerIndex(3);
+ assertEquals(3, duplicate.readerIndex());
+ assertEquals(0, byteBuf.readerIndex());
+ ByteBuf duplicateSlice = duplicate.slice(0, 5);
+ duplicateSlice.release();
+ assertEquals(1, duplicateSlice.refCnt());
+ assertEquals(1, duplicate.refCnt());
+ assertEquals(1, byteBuf.refCnt());
+ duplicateSlice.readerIndex(1);
+ assertEquals(1, duplicateSlice.readerIndex());
+ assertEquals(3, duplicate.readerIndex());
+ assertEquals(0, byteBuf.readerIndex());
+ }
+}
| Optimize heap buffers
Heap `Buffer` created with `Unpooled.buffer(...)` are instrumented and maintain a reference counting. This reference counting requires to wrap the current `ByteBuf` duplicate with an unreleasable wrapper so that the returned duplicate will not affect the actual `ByteBuf` hold by `BufferImpl`.
We can optimize these buffers by using instead custom subclasses of `UnpooledHeapByteBuf` and `UnpooledUnsafeHeapByteBuf` instead to avoid un-necessary operations. Such classes will be un-instrumented and reference counting methods can be overridden to disable reference counting which avoids to wrap the `ByteBuf` duplicate with an unreleasable wrapper.
| 2020-09-06T20:40:25Z | 4 |
|
eclipse-vertx/vert.x | 3,428 | eclipse-vertx__vert.x-3428 | [
"3427"
] | 6a92a49b07ee95cf7dfc2cc7a3e0ab9c51eb385f | diff --git a/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java b/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java
--- a/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java
+++ b/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java
@@ -523,13 +523,14 @@ private <T> void deliverToHandler(MessageImpl msg, HandlerHolder<T> holder) {
// Each handler gets a fresh copy
MessageImpl copied = msg.copyBeforeReceive();
DeliveryContext<T> receiveContext = new InboundDeliveryContext<>(copied, holder);
+
+ if (metrics != null) {
+ metrics.scheduleMessage(holder.getHandler().getMetric(), msg.isLocal());
+ }
+
holder.getContext().runOnContext((v) -> {
- // Need to check handler is still there - the handler might have been removed after the message were sent but
- // before it was received
try {
- if (!holder.isRemoved()) {
- receiveContext.next();
- }
+ receiveContext.next();
} finally {
if (holder.isReplyHandler()) {
holder.getHandler().unregister();
diff --git a/src/main/java/io/vertx/core/eventbus/impl/HandlerRegistration.java b/src/main/java/io/vertx/core/eventbus/impl/HandlerRegistration.java
--- a/src/main/java/io/vertx/core/eventbus/impl/HandlerRegistration.java
+++ b/src/main/java/io/vertx/core/eventbus/impl/HandlerRegistration.java
@@ -16,7 +16,6 @@
import io.vertx.core.eventbus.MessageConsumer;
import io.vertx.core.eventbus.ReplyException;
import io.vertx.core.eventbus.ReplyFailure;
-import io.vertx.core.eventbus.impl.clustered.ClusteredMessage;
import io.vertx.core.impl.Arguments;
import io.vertx.core.impl.ContextInternal;
import io.vertx.core.logging.Logger;
@@ -98,7 +97,7 @@ public MessageConsumer<T> setMaxBufferedMessages(int maxBufferedMessages) {
}
for (Message<T> msg : discarded) {
if (metrics != null) {
- metrics.discardMessage(metric, isLocal(msg), msg);
+ metrics.discardMessage(metric, ((MessageImpl)msg).isLocal(), msg);
}
if (discardHandler != null) {
discardHandler.handle(msg);
@@ -176,7 +175,7 @@ private void doUnregister(Handler<AsyncResult<Void>> doneHandler) {
if (metrics != null) {
Message<T> msg;
while ((msg = pending.poll()) != null) {
- metrics.discardMessage(metric, isLocal(msg), msg);
+ metrics.discardMessage(metric, ((MessageImpl)msg).isLocal(), msg);
}
} else {
pending.clear();
@@ -224,21 +223,24 @@ public synchronized void setResult(AsyncResult<Void> result) {
@Override
public void handle(Message<T> message) {
+ boolean local = ((MessageImpl) message).isLocal();
Handler<Message<T>> theHandler;
ContextInternal ctx;
synchronized (this) {
+ // Need to check handler is still there - the handler might have been removed after the message were sent but
+ // before it was received
if (registered == null) {
+ if (metrics != null) {
+ metrics.discardMessage(metric, local, message);
+ }
return;
}
- if (metrics != null) {
- metrics.scheduleMessage(metric, isLocal(message));
- }
if (demand == 0L) {
if (pending.size() < maxBufferedMessages) {
pending.add(message);
} else {
if (metrics != null) {
- metrics.discardMessage(metric, isLocal(message), message);
+ metrics.discardMessage(metric, local, message);
}
if (discardHandler != null) {
discardHandler.handle(message);
@@ -271,7 +273,7 @@ private void deliver(Handler<Message<T>> theHandler, Message<T> message, Context
}
try {
if (metrics != null) {
- metrics.beginHandleMessage(metric, isLocal(message));
+ metrics.beginHandleMessage(metric, ((MessageImpl)message).isLocal());
}
theHandler.handle(message);
if (metrics != null) {
@@ -391,16 +393,4 @@ public Handler<Message<T>> getHandler() {
public Object getMetric() {
return metric;
}
-
- private boolean isLocal(Message<?> message) {
- boolean local = true;
- if (message instanceof ClusteredMessage) {
- // A bit hacky
- ClusteredMessage cmsg = (ClusteredMessage)message;
- if (cmsg.isFromWire()) {
- return false;
- }
- }
- return true;
- }
}
| diff --git a/src/test/java/io/vertx/core/eventbus/EventBusRegistrationRaceTest.java b/src/test/java/io/vertx/core/eventbus/EventBusRegistrationRaceTest.java
--- a/src/test/java/io/vertx/core/eventbus/EventBusRegistrationRaceTest.java
+++ b/src/test/java/io/vertx/core/eventbus/EventBusRegistrationRaceTest.java
@@ -10,8 +10,11 @@
*/
package io.vertx.core.eventbus;
-import io.vertx.core.eventbus.EventBus;
-import io.vertx.core.eventbus.MessageConsumer;
+import io.vertx.core.VertxOptions;
+import io.vertx.core.metrics.MetricsOptions;
+import io.vertx.core.spi.VertxMetricsFactory;
+import io.vertx.core.spi.metrics.EventBusMetrics;
+import io.vertx.core.spi.metrics.VertxMetrics;
import io.vertx.test.core.VertxTestBase;
import org.junit.Test;
@@ -32,6 +35,39 @@ public class EventBusRegistrationRaceTest extends VertxTestBase {
private static final int NUM_MSG = 300_000;
private static String TEST_ADDR = "the-addr";
+ private final AtomicInteger count = new AtomicInteger();
+
+ @Override
+ protected VertxOptions getOptions() {
+ VertxOptions options = super.getOptions();
+ options.setMetricsOptions(new MetricsOptions().setEnabled(true).setFactory(new VertxMetricsFactory() {
+ @Override
+ public VertxMetrics metrics(VertxOptions options) {
+ return new VertxMetrics() {
+ @Override
+ public EventBusMetrics<Void> createEventBusMetrics() {
+ return new EventBusMetrics<Void>() {
+ @Override
+ public void scheduleMessage(Void handler, boolean local) {
+ count.incrementAndGet();
+ }
+
+ @Override
+ public void beginHandleMessage(Void handler, boolean local) {
+ count.decrementAndGet();
+ }
+ @Override
+ public void discardMessage(Void handler, boolean local, Message<?> msg) {
+ count.decrementAndGet();
+ }
+ };
+ }
+ };
+ }
+ }));
+ return options;
+ }
+
@Test
public void theTest() throws Exception {
AtomicInteger seq = new AtomicInteger();
@@ -43,6 +79,7 @@ public void theTest() throws Exception {
threadB.start();
threadA.join(20 * 1000);
threadB.join(20 * 1000);
+ assertEquals(0, count.get());
}
private void threadA(AtomicInteger seq) {
diff --git a/src/test/java/io/vertx/core/spi/metrics/MetricsContextTest.java b/src/test/java/io/vertx/core/spi/metrics/MetricsContextTest.java
--- a/src/test/java/io/vertx/core/spi/metrics/MetricsContextTest.java
+++ b/src/test/java/io/vertx/core/spi/metrics/MetricsContextTest.java
@@ -795,6 +795,7 @@ public void testMessageHandlerEventLoop() {
}
private void testMessageHandler(BiConsumer<Vertx, Handler<Void>> runOnContext, BiConsumer<Thread, Context> checker) {
+ AtomicReference<Thread> scheduleThread = new AtomicReference<>();
AtomicReference<Thread> consumerThread = new AtomicReference<>();
AtomicReference<Context> consumerContext = new AtomicReference<>();
AtomicBoolean registeredCalled = new AtomicBoolean();
@@ -819,6 +820,10 @@ public void handlerUnregistered(Void handler) {
unregisteredCalled.set(true);
}
@Override
+ public void scheduleMessage(Void handler, boolean local) {
+ scheduleThread.set(Thread.currentThread());
+ }
+ @Override
public void beginHandleMessage(Void handler, boolean local) {
consumerThread.set(Thread.currentThread());
consumerContext.set(Vertx.currentContext());
@@ -834,6 +839,9 @@ public void endHandleMessage(Void handler, Throwable failure) {
};
Vertx vertx = vertx(new VertxOptions().setMetricsOptions(new MetricsOptions().setEnabled(true).setFactory(factory)));
EventBus eb = vertx.eventBus();
+ Thread t = new Thread(() -> {
+ eb.send("the_address", "the_msg");
+ });
runOnContext.accept(vertx, v -> {
MessageConsumer<Object> consumer = eb.consumer("the_address");
consumer.handler(msg -> {
@@ -841,6 +849,7 @@ public void endHandleMessage(Void handler, Throwable failure) {
executeInVanillaThread(() -> {
vertx.getOrCreateContext().runOnContext(v2 -> {
consumer.unregister(onSuccess(v3 -> {
+ assertSame(t, scheduleThread.get());
assertTrue(registeredCalled.get());
assertTrue(beginHandleCalled.get());
assertTrue(endHandleCalled.get());
@@ -850,7 +859,7 @@ public void endHandleMessage(Void handler, Throwable failure) {
});
});
}).completionHandler(onSuccess(v2 -> {
- eb.send("the_address", "the_msg");
+ t.start();
}));
});
await();
| Ensure EventBusMetrics message schedule event is called before the event-loop action
The `EventBusMetrics#scheduleMessage` method was changed to be called on the event-loop in order to avoid incorrect counter in metrics implementations (that needed to compensate and correct the counter value) when a message consumer is unregistered.
It is more interesting to have the event delivered on the thread scheduling the delivery action on the event-loop as it provides a better observation of the latency of a message dispatch after is has been received.
We should compensate a consumer unregistration with a discard message event so the metrics implementation will be aware of this and compensate the actual value.
| 2020-05-26T07:35:41Z | 3.9 |
|
eclipse-vertx/vert.x | 3,418 | eclipse-vertx__vert.x-3418 | [
"3417"
] | 2bdf94b30016036cb5833b53ffb9f834f62ad5a6 | diff --git a/src/main/java/io/vertx/core/Context.java b/src/main/java/io/vertx/core/Context.java
--- a/src/main/java/io/vertx/core/Context.java
+++ b/src/main/java/io/vertx/core/Context.java
@@ -276,6 +276,6 @@ static boolean isOnVertxThread() {
void addCloseHook(Closeable hook);
@GenIgnore(GenIgnore.PERMITTED_TYPE)
- boolean removeCloseHook(Closeable hook);
+ void removeCloseHook(Closeable hook);
}
diff --git a/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java b/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java
--- a/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java
+++ b/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java
@@ -477,15 +477,5 @@ public void close(Promise<Void> completion) {
}
}
-
- @Override
- protected void finalize() throws Throwable {
- // Make sure this gets cleaned up if there are no more references to it
- // so as not to leave connections and resources dangling until the system is shutdown
- // which could make the JVM run out of file handles.
- close(Promise.promise());
- super.finalize();
- }
-
}
diff --git a/src/main/java/io/vertx/core/eventbus/impl/MessageProducerImpl.java b/src/main/java/io/vertx/core/eventbus/impl/MessageProducerImpl.java
--- a/src/main/java/io/vertx/core/eventbus/impl/MessageProducerImpl.java
+++ b/src/main/java/io/vertx/core/eventbus/impl/MessageProducerImpl.java
@@ -83,11 +83,4 @@ public void close(Handler<AsyncResult<Void>> handler) {
fut.onComplete(handler);
}
}
-
- // Just in case user forget to call close()
- @Override
- protected void finalize() throws Throwable {
- close();
- super.finalize();
- }
}
diff --git a/src/main/java/io/vertx/core/eventbus/impl/clustered/ConnectionHolder.java b/src/main/java/io/vertx/core/eventbus/impl/clustered/ConnectionHolder.java
--- a/src/main/java/io/vertx/core/eventbus/impl/clustered/ConnectionHolder.java
+++ b/src/main/java/io/vertx/core/eventbus/impl/clustered/ConnectionHolder.java
@@ -59,7 +59,7 @@ class ConnectionHolder {
NetClientOptions clientOptions = new NetClientOptions(options.toJson());
ClusteredEventBus.setCertOptions(clientOptions, options.getKeyCertOptions());
ClusteredEventBus.setTrustOptions(clientOptions, options.getTrustOptions());
- client = new NetClientImpl(eventBus.vertx(), clientOptions, false);
+ client = new NetClientImpl(eventBus.vertx(), clientOptions, null);
}
void connect() {
diff --git a/src/main/java/io/vertx/core/http/HttpClient.java b/src/main/java/io/vertx/core/http/HttpClient.java
--- a/src/main/java/io/vertx/core/http/HttpClient.java
+++ b/src/main/java/io/vertx/core/http/HttpClient.java
@@ -1387,4 +1387,9 @@ public interface HttpClient extends Measured {
*/
Future<Void> close();
+ /**
+ * @return a future closed when the client is closed
+ */
+ Future<Void> closeFuture();
+
}
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java b/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
@@ -14,7 +14,6 @@
import io.netty.channel.group.ChannelGroup;
import io.netty.channel.group.ChannelGroupFuture;
import io.netty.channel.group.DefaultChannelGroup;
-import io.netty.util.concurrent.GlobalEventExecutor;
import io.vertx.core.Closeable;
import io.vertx.core.Context;
import io.vertx.core.Future;
@@ -23,6 +22,7 @@
import io.vertx.core.*;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.http.*;
+import io.vertx.core.impl.CloseHooks;
import io.vertx.core.net.impl.clientconnection.ConnectionManager;
import io.vertx.core.impl.ContextInternal;
import io.vertx.core.impl.PromiseInternal;
@@ -40,6 +40,7 @@
import io.vertx.core.spi.metrics.MetricsProvider;
import io.vertx.core.streams.ReadStream;
+import java.lang.ref.WeakReference;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;
@@ -57,7 +58,7 @@
*
* @author <a href="http://tfox.org">Tim Fox</a>
*/
-public class HttpClientImpl implements HttpClient, MetricsProvider {
+public class HttpClientImpl implements HttpClient, MetricsProvider, Closeable {
// Pattern to check we are not dealing with an absoluate URI
private static final Pattern ABS_URI_START_PATTERN = Pattern.compile("^\\p{Alpha}[\\p{Alpha}\\p{Digit}+.\\-]*:");
@@ -111,25 +112,26 @@ public class HttpClientImpl implements HttpClient, MetricsProvider {
private final VertxInternal vertx;
private final ChannelGroup channelGroup;
private final HttpClientOptions options;
- private final ContextInternal context;
private final ConnectionManager<EndpointKey, HttpClientConnection> webSocketCM;
private final ConnectionManager<EndpointKey, HttpClientConnection> httpCM;
- private final Closeable closeHook;
+ private final CloseHooks closeHooks;
private final ProxyType proxyType;
private final SSLHelper sslHelper;
private final HttpClientMetrics metrics;
private final boolean keepAlive;
private final boolean pipelining;
+ private final PromiseInternal<Void> closePromise;
+ private final Future<Void> closeFuture;
private long timerID;
private volatile boolean closed;
private volatile Handler<HttpConnection> connectionHandler;
private volatile Function<HttpClientResponse, Future<HttpClientRequest>> redirectHandler = DEFAULT_HANDLER;
- public HttpClientImpl(VertxInternal vertx, HttpClientOptions options) {
+ public HttpClientImpl(VertxInternal vertx, CloseHooks hooks, HttpClientOptions options) {
this.vertx = vertx;
this.metrics = vertx.metricsSPI() != null ? vertx.metricsSPI().createHttpClientMetrics(options) : null;
this.options = new HttpClientOptions(options);
- this.channelGroup = new DefaultChannelGroup(GlobalEventExecutor.INSTANCE);
+ this.channelGroup = new DefaultChannelGroup(vertx.getAcceptorEventLoopGroup().next());
List<HttpVersion> alpnVersions = options.getAlpnVersions();
if (alpnVersions == null || alpnVersions.isEmpty()) {
switch (options.getProtocolVersion()) {
@@ -146,33 +148,56 @@ public HttpClientImpl(VertxInternal vertx, HttpClientOptions options) {
this.sslHelper = new SSLHelper(options, options.getKeyCertOptions(), options.getTrustOptions()).
setApplicationProtocols(alpnVersions);
sslHelper.validate(vertx);
- context = vertx.getOrCreateContext();
- closeHook = completionHandler -> {
- HttpClientImpl.this.close();
- completionHandler.handle(Future.succeededFuture());
- };
if(options.getProtocolVersion() == HttpVersion.HTTP_2 && Context.isOnWorkerThread()) {
throw new IllegalStateException("Cannot use HttpClient with HTTP_2 in a worker");
}
- if (context.deploymentID() != null) {
- context.addCloseHook(closeHook);
- }
if (!keepAlive && pipelining) {
throw new IllegalStateException("Cannot have pipelining with no keep alive");
}
+ closePromise = (PromiseInternal) Promise.promise();
+ if (metrics != null) {
+ closeFuture = closePromise.future().compose(v -> {
+ metrics.close();
+ return Future.succeededFuture();
+ });
+ } else {
+ closeFuture = closePromise.future();
+ }
+ closeHooks = hooks;
webSocketCM = webSocketConnectionManager();
httpCM = httpConnectionManager();
proxyType = options.getProxyOptions() != null ? options.getProxyOptions().getType() : null;
if (options.getPoolCleanerPeriod() > 0 && (options.getKeepAliveTimeout() > 0L || options.getHttp2KeepAliveTimeout() > 0L)) {
- timerID = vertx.setTimer(options.getPoolCleanerPeriod(), id -> checkExpired());
+ PoolChecker checker = new PoolChecker(this);
+ timerID = vertx.setTimer(options.getPoolCleanerPeriod(), checker);
}
}
- private void checkExpired() {
+ /**
+ * A weak ref to the client so it can be finalized.
+ */
+ private static class PoolChecker implements Handler<Long> {
+
+ final WeakReference<HttpClientImpl> ref;
+
+ private PoolChecker(HttpClientImpl client) {
+ ref = new WeakReference<>(client);
+ }
+
+ @Override
+ public void handle(Long event) {
+ HttpClientImpl client = ref.get();
+ if (client != null) {
+ client.checkExpired(this);
+ }
+ }
+ }
+
+ private void checkExpired(Handler<Long> checker) {
httpCM.forEach(EXPIRED_CHECKER);
synchronized (this) {
if (!closed) {
- timerID = vertx.setTimer(options.getPoolCleanerPeriod(), id -> checkExpired());
+ timerID = vertx.setTimer(options.getPoolCleanerPeriod(), checker);
}
}
}
@@ -1189,43 +1214,50 @@ public Future<HttpClientResponse> delete(String requestURI) {
return send(HttpMethod.DELETE, requestURI);
}
+ @Override
+ public void close(Promise<Void> completion) {
+ boolean close;
+ synchronized (this) {
+ close = !closed;
+ if (close) {
+ closed = true;
+ if (timerID >= 0) {
+ vertx.cancelTimer(timerID);
+ timerID = -1;
+ }
+ }
+ }
+ if (close) {
+ webSocketCM.close();
+ httpCM.close();
+ ChannelGroupFuture fut = channelGroup.close();
+ fut.addListener(closePromise);
+ }
+ if (completion != null) {
+ closePromise.future().onComplete(completion);
+ }
+ }
+
@Override
public void close(Handler<AsyncResult<Void>> handler) {
+ if (closeHooks != null) {
+ closeHooks.remove(this);
+ }
ContextInternal closingCtx = vertx.getOrCreateContext();
- close(closingCtx.promise(handler));
+ close(handler != null ? closingCtx.promise(handler) : null);
}
@Override
public Future<Void> close() {
+ if (closeHooks != null) {
+ closeHooks.remove(this);
+ }
ContextInternal closingCtx = vertx.getOrCreateContext();
Promise<Void> promise = closingCtx.promise();
close(promise);
return promise.future();
}
- private void close(PromiseInternal<Void> promise) {
- synchronized (this) {
- checkClosed();
- closed = true;
- if (timerID >= 0) {
- vertx.cancelTimer(timerID);
- timerID = -1;
- }
- }
- if (context.deploymentID() != null) {
- context.removeCloseHook(closeHook);
- }
- webSocketCM.close();
- httpCM.close();
- ChannelGroupFuture fut = channelGroup.close();
- fut.addListener(promise);
- promise.future().onComplete(ar -> {
- if (metrics != null) {
- metrics.close();
- }
- });
- }
-
@Override
public boolean isMetricsEnabled() {
return getMetrics() != null;
@@ -1344,12 +1376,17 @@ private synchronized void checkClosed() {
}
}
+ @Override
+ public Future<Void> closeFuture() {
+ return closePromise.future();
+ }
+
@Override
protected void finalize() throws Throwable {
// Make sure this gets cleaned up if there are no more references to it
// so as not to leave connections and resources dangling until the system is shutdown
// which could make the JVM run out of file handles.
- close((PromiseInternal<Void>) Promise.<Void>promise());
+ close((Handler<AsyncResult<Void>>) null);
super.finalize();
}
}
diff --git a/src/main/java/io/vertx/core/http/impl/HttpServerImpl.java b/src/main/java/io/vertx/core/http/impl/HttpServerImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpServerImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpServerImpl.java
@@ -265,15 +265,6 @@ public SSLHelper getSslHelper() {
return sslHelper;
}
- @Override
- protected void finalize() throws Throwable {
- // Make sure this gets cleaned up if there are no more references to it
- // so as not to leave connections and resources dangling until the system is shutdown
- // which could make the JVM run out of file handles.
- close();
- super.finalize();
- }
-
boolean requestAccept() {
return requestStream.accept();
}
diff --git a/src/main/java/io/vertx/core/impl/CloseHooks.java b/src/main/java/io/vertx/core/impl/CloseHooks.java
--- a/src/main/java/io/vertx/core/impl/CloseHooks.java
+++ b/src/main/java/io/vertx/core/impl/CloseHooks.java
@@ -18,22 +18,24 @@
import io.vertx.core.Promise;
import io.vertx.core.impl.logging.Logger;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.WeakHashMap;
import java.util.concurrent.atomic.AtomicInteger;
/**
* @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
*/
-class CloseHooks {
+public class CloseHooks {
private final Logger log;
private boolean closeHooksRun;
- private Set<Closeable> closeHooks;
+ private Map<Closeable, CloseHooks> closeHooks;
CloseHooks(Logger log) {
this.log = log;
+ this.closeHooks = new WeakHashMap<>();
}
/**
@@ -41,25 +43,21 @@ class CloseHooks {
*
* @param hook the hook to add
*/
- synchronized void add(Closeable hook) {
+ public synchronized void add(Closeable hook) {
if (closeHooks == null) {
- // Has to be concurrent as can be removed from non context thread
- closeHooks = new HashSet<>();
+ throw new IllegalStateException();
}
- closeHooks.add(hook);
+ closeHooks.put(hook, this);
}
/**
* Remove an existing hook.
*
* @param hook the hook to remove
- * @return {@code} true if the hook was removed
*/
- synchronized boolean remove(Closeable hook) {
+ public synchronized void remove(Closeable hook) {
if (closeHooks != null) {
- return closeHooks.remove(hook);
- } else {
- return false;
+ closeHooks.remove(hook);
}
}
@@ -69,51 +67,38 @@ synchronized boolean remove(Closeable hook) {
* @param completionHandler called when all hooks have beene executed
*/
void run(Handler<AsyncResult<Void>> completionHandler) {
- Set<Closeable> copy = null;
+ Map<Closeable, CloseHooks> copy;
synchronized (this) {
if (closeHooksRun) {
// Sanity check
throw new IllegalStateException("Close hooks already run");
}
closeHooksRun = true;
- if (closeHooks != null && !closeHooks.isEmpty()) {
- // Must copy before looping as can be removed during loop otherwise
- copy = new HashSet<>(closeHooks);
- }
+ copy = closeHooks;
+ closeHooks = null;
}
- if (copy != null && !copy.isEmpty()) {
- int num = copy.size();
- if (num != 0) {
- AtomicInteger count = new AtomicInteger();
- AtomicBoolean failed = new AtomicBoolean();
- for (Closeable hook : copy) {
- Promise<Void> promise = Promise.promise();
- promise.future().onComplete(ar -> {
- if (ar.failed()) {
- if (failed.compareAndSet(false, true)) {
- // Only report one failure
- completionHandler.handle(Future.failedFuture(ar.cause()));
- }
- } else {
- if (count.incrementAndGet() == num) {
- // closeHooksRun = true;
- completionHandler.handle(Future.succeededFuture());
- }
- }
- });
- try {
- hook.close(promise);
- } catch (Throwable t) {
- log.warn("Failed to run close hooks", t);
- promise.tryFail(t);
+ // We want an immutable version of the list holding strong references to avoid racing against finalization
+ List<Closeable> list = new ArrayList<>(copy.size());
+ copy.keySet().forEach(list::add);
+ int num = list.size();
+ if (num > 0) {
+ AtomicInteger count = new AtomicInteger();
+ for (Closeable hook : list) {
+ Promise<Void> promise = Promise.promise();
+ promise.future().onComplete(ar -> {
+ if (count.incrementAndGet() == num) {
+ completionHandler.handle(Future.succeededFuture());
}
+ });
+ try {
+ hook.close(promise);
+ } catch (Throwable t) {
+ log.warn("Failed to run close hook", t);
+ promise.tryFail(t);
}
- } else {
- completionHandler.handle(Future.succeededFuture());
}
} else {
completionHandler.handle(Future.succeededFuture());
}
}
-
}
diff --git a/src/main/java/io/vertx/core/impl/ContextImpl.java b/src/main/java/io/vertx/core/impl/ContextImpl.java
--- a/src/main/java/io/vertx/core/impl/ContextImpl.java
+++ b/src/main/java/io/vertx/core/impl/ContextImpl.java
@@ -12,7 +12,6 @@
package io.vertx.core.impl;
import io.netty.channel.EventLoop;
-import io.netty.channel.EventLoopGroup;
import io.vertx.core.*;
import io.vertx.core.impl.logging.Logger;
import io.vertx.core.impl.logging.LoggerFactory;
@@ -51,15 +50,6 @@ static void executeIsolated(Handler<Void> task) {
}
}
- private static EventLoop getEventLoop(VertxInternal vertx) {
- EventLoopGroup group = vertx.getEventLoopGroup();
- if (group != null) {
- return group.next();
- } else {
- return null;
- }
- }
-
private static final Logger log = LoggerFactory.getLogger(ContextImpl.class);
private static final String DISABLE_TIMINGS_PROP_NAME = "vertx.disableContextTimings";
@@ -80,13 +70,14 @@ private static EventLoop getEventLoop(VertxInternal vertx) {
final WorkerPool workerPool;
final TaskQueue orderedTasks;
- ContextImpl(VertxInternal vertx, VertxTracer<?, ?> tracer, WorkerPool internalBlockingPool, WorkerPool workerPool, Deployment deployment,
- ClassLoader tccl) {
- this(vertx, tracer, getEventLoop(vertx), internalBlockingPool, workerPool, deployment, tccl);
- }
-
- ContextImpl(VertxInternal vertx, VertxTracer<?, ?> tracer, EventLoop eventLoop, WorkerPool internalBlockingPool, WorkerPool workerPool, Deployment deployment,
- ClassLoader tccl) {
+ ContextImpl(VertxInternal vertx,
+ VertxTracer<?, ?> tracer,
+ EventLoop eventLoop,
+ WorkerPool internalBlockingPool,
+ WorkerPool workerPool,
+ Deployment deployment,
+ CloseHooks closeHooks,
+ ClassLoader tccl) {
if (VertxThread.DISABLE_TCCL && tccl != ClassLoader.getSystemClassLoader()) {
log.warn("You have disabled TCCL checks but you have a custom TCCL to set.");
}
@@ -97,26 +88,31 @@ private static EventLoop getEventLoop(VertxInternal vertx) {
this.tccl = tccl;
this.owner = vertx;
this.workerPool = workerPool;
+ this.closeHooks = closeHooks;
this.internalBlockingPool = internalBlockingPool;
this.orderedTasks = new TaskQueue();
this.internalOrderedTasks = new TaskQueue();
- this.closeHooks = new CloseHooks(log);
}
public Deployment getDeployment() {
return deployment;
}
- public void addCloseHook(Closeable hook) {
- closeHooks.add(hook);
+ @Override
+ public CloseHooks closeHooks() {
+ return closeHooks;
}
- public boolean removeCloseHook(Closeable hook) {
- return closeHooks.remove(hook);
+ public void addCloseHook(Closeable hook) {
+ if (closeHooks != null) {
+ closeHooks.add(hook);
+ }
}
- public void runCloseHooks(Handler<AsyncResult<Void>> completionHandler) {
- closeHooks.run(completionHandler);
+ public void removeCloseHook(Closeable hook) {
+ if (deployment != null) {
+ closeHooks.remove(hook);
+ }
}
@Override
@@ -299,8 +295,8 @@ public final void addCloseHook(Closeable hook) {
}
@Override
- public final boolean removeCloseHook(Closeable hook) {
- return delegate.removeCloseHook(hook);
+ public final void removeCloseHook(Closeable hook) {
+ delegate.removeCloseHook(hook);
}
@Override
diff --git a/src/main/java/io/vertx/core/impl/ContextInternal.java b/src/main/java/io/vertx/core/impl/ContextInternal.java
--- a/src/main/java/io/vertx/core/impl/ContextInternal.java
+++ b/src/main/java/io/vertx/core/impl/ContextInternal.java
@@ -261,4 +261,6 @@ static ContextInternal current() {
*/
long setTimer(long delay, Handler<Long> handler);
+ CloseHooks closeHooks();
+
}
diff --git a/src/main/java/io/vertx/core/impl/Deployment.java b/src/main/java/io/vertx/core/impl/Deployment.java
--- a/src/main/java/io/vertx/core/impl/Deployment.java
+++ b/src/main/java/io/vertx/core/impl/Deployment.java
@@ -57,4 +57,5 @@ default void doUndeploy(ContextInternal undeployingContext, Handler<AsyncResult<
void undeployHandler(Handler<Void> handler);
boolean isChild();
+
}
diff --git a/src/main/java/io/vertx/core/impl/DeploymentManager.java b/src/main/java/io/vertx/core/impl/DeploymentManager.java
--- a/src/main/java/io/vertx/core/impl/DeploymentManager.java
+++ b/src/main/java/io/vertx/core/impl/DeploymentManager.java
@@ -185,14 +185,16 @@ private Future<Deployment> doDeploy(String identifier,
AtomicInteger deployCount = new AtomicInteger();
AtomicBoolean failureReported = new AtomicBoolean();
for (Verticle verticle: verticles) {
+ CloseHooks closeHooks = new CloseHooks(log);
WorkerExecutorInternal workerExec = poolName != null ? vertx.createSharedWorkerExecutor(poolName, options.getWorkerPoolSize(), options.getMaxWorkerExecuteTime(), options.getMaxWorkerExecuteTimeUnit()) : null;
WorkerPool pool = workerExec != null ? workerExec.getPool() : null;
- ContextImpl context = (ContextImpl) (options.isWorker() ? vertx.createWorkerContext(deployment, pool, tccl) :
- vertx.createEventLoopContext(deployment, pool, tccl));
+ ContextImpl context = (ContextImpl) (options.isWorker() ? vertx.createWorkerContext(deployment, closeHooks, pool, tccl) :
+ vertx.createEventLoopContext(deployment, closeHooks, pool, tccl));
if (workerExec != null) {
context.addCloseHook(workerExec);
}
- deployment.addVerticle(new VerticleHolder(verticle, context));
+ VerticleHolder holder = new VerticleHolder(verticle, context, closeHooks);
+ deployment.addVerticle(holder);
context.runOnContext(v -> {
try {
verticle.init(vertx, context);
@@ -219,12 +221,12 @@ private Future<Deployment> doDeploy(String identifier,
promise.complete(deployment);
}
} else if (failureReported.compareAndSet(false, true)) {
- deployment.rollback(callingContext, promise, context, ar.cause());
+ deployment.rollback(callingContext, promise, context, holder.closeHooks, ar.cause());
}
});
} catch (Throwable t) {
if (failureReported.compareAndSet(false, true))
- deployment.rollback(callingContext, promise, context, t);
+ deployment.rollback(callingContext, promise, context, holder.closeHooks, t);
}
});
}
@@ -233,12 +235,15 @@ private Future<Deployment> doDeploy(String identifier,
}
static class VerticleHolder {
+
final Verticle verticle;
final ContextImpl context;
+ final CloseHooks closeHooks;
- VerticleHolder(Verticle verticle, ContextImpl context) {
+ VerticleHolder(Verticle verticle, ContextImpl context, CloseHooks closeHooks) {
this.verticle = verticle;
this.context = context;
+ this.closeHooks = closeHooks;
}
}
@@ -269,7 +274,7 @@ public void addVerticle(VerticleHolder holder) {
verticles.add(holder);
}
- private synchronized void rollback(ContextInternal callingContext, Handler<AsyncResult<Deployment>> completionHandler, ContextImpl context, Throwable cause) {
+ private synchronized void rollback(ContextInternal callingContext, Handler<AsyncResult<Deployment>> completionHandler, ContextImpl context, CloseHooks closeHooks, Throwable cause) {
if (status == ST_DEPLOYED) {
status = ST_UNDEPLOYING;
doUndeployChildren(callingContext).onComplete(childrenResult -> {
@@ -289,7 +294,7 @@ private synchronized void rollback(ContextInternal callingContext, Handler<Async
if (childrenResult.failed()) {
reportFailure(cause, callingContext, completionHandler);
} else {
- context.runCloseHooks(closeHookAsyncResult -> reportFailure(cause, callingContext, completionHandler));
+ closeHooks.run(closeHookAsyncResult -> reportFailure(cause, callingContext, completionHandler));
}
});
}
@@ -344,7 +349,7 @@ public synchronized Future<Void> doUndeploy(ContextInternal undeployingContext)
if (metrics != null) {
metrics.verticleUndeployed(verticleHolder.verticle);
}
- context.runCloseHooks(ar2 -> {
+ verticleHolder.closeHooks.run(ar2 -> {
if (ar2.failed()) {
// Log error but we report success anyway
log.error("Failed to run close hook", ar2.cause());
@@ -447,7 +452,6 @@ public boolean isChild() {
public String deploymentID() {
return deploymentID;
}
-
}
}
diff --git a/src/main/java/io/vertx/core/impl/EventLoopContext.java b/src/main/java/io/vertx/core/impl/EventLoopContext.java
--- a/src/main/java/io/vertx/core/impl/EventLoopContext.java
+++ b/src/main/java/io/vertx/core/impl/EventLoopContext.java
@@ -23,14 +23,15 @@
*/
public class EventLoopContext extends ContextImpl {
- EventLoopContext(VertxInternal vertx, VertxTracer<?, ?> tracer, WorkerPool internalBlockingPool, WorkerPool workerPool, Deployment deployment,
+ EventLoopContext(VertxInternal vertx,
+ VertxTracer<?, ?> tracer,
+ EventLoop eventLoop,
+ WorkerPool internalBlockingPool,
+ WorkerPool workerPool,
+ Deployment deployment,
+ CloseHooks closeHooks,
ClassLoader tccl) {
- super(vertx, tracer, internalBlockingPool, workerPool, deployment, tccl);
- }
-
- EventLoopContext(VertxInternal vertx, VertxTracer<?, ?> tracer, EventLoop eventLoop, WorkerPool internalBlockingPool, WorkerPool workerPool, Deployment deployment,
- ClassLoader tccl) {
- super(vertx, tracer, eventLoop, internalBlockingPool, workerPool, deployment, tccl);
+ super(vertx, tracer, eventLoop, internalBlockingPool, workerPool, deployment, closeHooks, tccl);
}
@Override
@@ -79,6 +80,11 @@ static class Duplicated extends ContextImpl.Duplicated<EventLoopContext> {
super(delegate);
}
+ @Override
+ public CloseHooks closeHooks() {
+ return delegate.closeHooks();
+ }
+
@Override
<T> void execute(T argument, Handler<T> task) {
nettyEventLoop().execute(() -> emit(argument, task));
diff --git a/src/main/java/io/vertx/core/impl/VertxImpl.java b/src/main/java/io/vertx/core/impl/VertxImpl.java
--- a/src/main/java/io/vertx/core/impl/VertxImpl.java
+++ b/src/main/java/io/vertx/core/impl/VertxImpl.java
@@ -277,7 +277,10 @@ public NetServer createNetServer() {
}
public NetClient createNetClient(NetClientOptions options) {
- return new NetClientImpl(this, options);
+ CloseHooks hooks = resolveHooks();
+ NetClientImpl client = new NetClientImpl(this, options, hooks);
+ hooks.add(client);
+ return client;
}
@Override
@@ -313,7 +316,10 @@ public HttpServer createHttpServer() {
}
public HttpClient createHttpClient(HttpClientOptions options) {
- return new HttpClientImpl(this, options);
+ CloseHooks hooks = resolveHooks();
+ HttpClientImpl client = new HttpClientImpl(this, hooks, options);
+ hooks.add(client);
+ return client;
}
@Override
@@ -382,7 +388,7 @@ public ContextInternal getOrCreateContext() {
ContextInternal ctx = getContext();
if (ctx == null) {
// We are running embedded - Create a context
- ctx = createEventLoopContext((Deployment) null, null, Thread.currentThread().getContextClassLoader());
+ ctx = createEventLoopContext(null, null, null, Thread.currentThread().getContextClassLoader());
}
return ctx;
}
@@ -427,26 +433,26 @@ public boolean cancelTimer(long id) {
}
@Override
- public EventLoopContext createEventLoopContext(Deployment deployment, WorkerPool workerPool, ClassLoader tccl) {
- return new EventLoopContext(this, tracer, internalBlockingPool, workerPool != null ? workerPool : this.workerPool, deployment, tccl);
+ public EventLoopContext createEventLoopContext(Deployment deployment, CloseHooks closeHooks, WorkerPool workerPool, ClassLoader tccl) {
+ return new EventLoopContext(this, tracer, eventLoopGroup.next(), internalBlockingPool, workerPool != null ? workerPool : this.workerPool, deployment, closeHooks, tccl);
}
@Override
public EventLoopContext createEventLoopContext(EventLoop eventLoop, WorkerPool workerPool, ClassLoader tccl) {
- return new EventLoopContext(this, tracer, eventLoop, internalBlockingPool, workerPool != null ? workerPool : this.workerPool, null, tccl);
+ return new EventLoopContext(this, tracer, eventLoop, internalBlockingPool, workerPool != null ? workerPool : this.workerPool, null, null, tccl);
}
@Override
- public ContextInternal createWorkerContext(Deployment deployment, WorkerPool workerPool, ClassLoader tccl) {
+ public ContextInternal createWorkerContext(Deployment deployment, CloseHooks closeHooks, WorkerPool workerPool, ClassLoader tccl) {
if (workerPool == null) {
workerPool = this.workerPool;
}
- return new WorkerContext(this, tracer, internalBlockingPool, workerPool, deployment, tccl);
+ return new WorkerContext(this, tracer, internalBlockingPool, workerPool, deployment, closeHooks, tccl);
}
@Override
public ContextInternal createWorkerContext() {
- return createWorkerContext(null, null, null);
+ return createWorkerContext(null, null, null, null);
}
@Override
@@ -532,7 +538,6 @@ public synchronized void close(Handler<AsyncResult<Void>> completionHandler) {
return;
}
closed = true;
-
closeHooks.run(ar -> {
deploymentManager.undeployAll().onComplete(ar1 -> {
HAManager haManager = haManager();
@@ -552,33 +557,7 @@ public synchronized void close(Handler<AsyncResult<Void>> completionHandler) {
ebClose.future().onComplete(ar4 -> {
closeClusterManager(ar5 -> {
// Copy set to prevent ConcurrentModificationException
- Set<HttpServerImpl> httpServers = new HashSet<>(sharedHttpServers.values());
- Set<NetServerImpl> netServers = new HashSet<>(sharedNetServers.values());
- sharedHttpServers.clear();
- sharedNetServers.clear();
-
- int serverCount = httpServers.size() + netServers.size();
-
- AtomicInteger serverCloseCount = new AtomicInteger();
-
- Handler<AsyncResult<Void>> serverCloseHandler = res -> {
- if (res.failed()) {
- log.error("Failure in shutting down server", res.cause());
- }
- if (serverCloseCount.incrementAndGet() == serverCount) {
- deleteCacheDirAndShutdown(completionHandler);
- }
- };
-
- for (HttpServerImpl server : httpServers) {
- server.closeAll(serverCloseHandler);
- }
- for (NetServerImpl server : netServers) {
- server.closeAll(serverCloseHandler);
- }
- if (serverCount == 0) {
- deleteCacheDirAndShutdown(completionHandler);
- }
+ deleteCacheDirAndShutdown(completionHandler);
});
});
});
@@ -1099,9 +1078,13 @@ public synchronized WorkerExecutorImpl createSharedWorkerExecutor(String name, i
} else {
sharedWorkerPool.refCount++;
}
- ContextInternal context = getOrCreateContext();
- WorkerExecutorImpl namedExec = new WorkerExecutorImpl(context, sharedWorkerPool);
- context.addCloseHook(namedExec);
+ ContextInternal ctx = getContext();
+ CloseHooks hooks = ctx != null ? ctx.closeHooks() : null;
+ if (hooks == null) {
+ hooks = closeHooks;
+ }
+ WorkerExecutorImpl namedExec = new WorkerExecutorImpl(this, closeHooks, sharedWorkerPool);
+ hooks.add(namedExec);
return namedExec;
}
@@ -1125,4 +1108,18 @@ public void addCloseHook(Closeable hook) {
public void removeCloseHook(Closeable hook) {
closeHooks.remove(hook);
}
+
+ @Override
+ public CloseHooks closeHooks() {
+ return closeHooks;
+ }
+
+ private CloseHooks resolveHooks() {
+ ContextInternal context = getContext();
+ CloseHooks hooks = context != null ? context.closeHooks() : null;
+ if (hooks == null) {
+ hooks = closeHooks();
+ }
+ return hooks;
+ }
}
diff --git a/src/main/java/io/vertx/core/impl/VertxInternal.java b/src/main/java/io/vertx/core/impl/VertxInternal.java
--- a/src/main/java/io/vertx/core/impl/VertxInternal.java
+++ b/src/main/java/io/vertx/core/impl/VertxInternal.java
@@ -83,14 +83,14 @@ public interface VertxInternal extends Vertx {
/**
* @return event loop context
*/
- ContextInternal createEventLoopContext(Deployment deployment, WorkerPool workerPool, ClassLoader tccl);
+ ContextInternal createEventLoopContext(Deployment deployment, CloseHooks closeHooks, WorkerPool workerPool, ClassLoader tccl);
ContextInternal createEventLoopContext(EventLoop eventLoop, WorkerPool workerPool, ClassLoader tccl);
/**
* @return worker loop context
*/
- ContextInternal createWorkerContext(Deployment deployment, WorkerPool pool, ClassLoader tccl);
+ ContextInternal createWorkerContext(Deployment deployment, CloseHooks closeHooks, WorkerPool pool, ClassLoader tccl);
ContextInternal createWorkerContext();
@@ -151,4 +151,6 @@ public interface VertxInternal extends Vertx {
void removeCloseHook(Closeable hook);
+ CloseHooks closeHooks();
+
}
diff --git a/src/main/java/io/vertx/core/impl/WorkerContext.java b/src/main/java/io/vertx/core/impl/WorkerContext.java
--- a/src/main/java/io/vertx/core/impl/WorkerContext.java
+++ b/src/main/java/io/vertx/core/impl/WorkerContext.java
@@ -26,9 +26,14 @@
*/
public class WorkerContext extends ContextImpl {
- WorkerContext(VertxInternal vertx, VertxTracer<?, ?> tracer, WorkerPool internalBlockingPool, WorkerPool workerPool, Deployment deployment,
+ WorkerContext(VertxInternal vertx,
+ VertxTracer<?, ?> tracer,
+ WorkerPool internalBlockingPool,
+ WorkerPool workerPool,
+ Deployment deployment,
+ CloseHooks closeHooks,
ClassLoader tccl) {
- super(vertx, tracer, internalBlockingPool, workerPool, deployment, tccl);
+ super(vertx, tracer, vertx.getEventLoopGroup().next(), internalBlockingPool, workerPool, deployment, closeHooks, tccl);
}
@Override
@@ -131,6 +136,11 @@ static class Duplicated extends ContextImpl.Duplicated<WorkerContext> {
super(delegate);
}
+ @Override
+ public CloseHooks closeHooks() {
+ return delegate.closeHooks();
+ }
+
@Override
<T> void execute(T argument, Handler<T> task) {
delegate.execute(this, orderedTasks, argument, task);
diff --git a/src/main/java/io/vertx/core/impl/WorkerExecutorImpl.java b/src/main/java/io/vertx/core/impl/WorkerExecutorImpl.java
--- a/src/main/java/io/vertx/core/impl/WorkerExecutorImpl.java
+++ b/src/main/java/io/vertx/core/impl/WorkerExecutorImpl.java
@@ -22,12 +22,14 @@
*/
class WorkerExecutorImpl implements MetricsProvider, WorkerExecutorInternal {
- private final ContextInternal ctx;
+ private final VertxInternal vertx;
+ private final CloseHooks closeHooks;
private final VertxImpl.SharedWorkerPool pool;
private boolean closed;
- public WorkerExecutorImpl(ContextInternal ctx, VertxImpl.SharedWorkerPool pool) {
- this.ctx = ctx;
+ public WorkerExecutorImpl(VertxInternal vertx, CloseHooks closeHooks, VertxImpl.SharedWorkerPool pool) {
+ this.vertx = vertx;
+ this.closeHooks = closeHooks;
this.pool = pool;
}
@@ -44,7 +46,7 @@ public boolean isMetricsEnabled() {
@Override
public Vertx vertx() {
- return ctx.owner();
+ return vertx;
}
@Override
@@ -57,7 +59,7 @@ public WorkerPool getPool() {
if (closed) {
throw new IllegalStateException("Worker executor closed");
}
- ContextInternal context = (ContextInternal) ctx.owner().getOrCreateContext();
+ ContextInternal context = (ContextInternal) vertx.getOrCreateContext();
ContextImpl impl = context instanceof ContextImpl.Duplicated ? ((ContextImpl.Duplicated)context).delegate : (ContextImpl) context;
return ContextImpl.executeBlocking(context, blockingCodeHandler, pool, ordered ? impl.orderedTasks : null);
}
@@ -71,7 +73,7 @@ public synchronized <T> void executeBlocking(Handler<Promise<T>> blockingCodeHan
@Override
public Future<Void> close() {
- PromiseInternal<Void> promise = ctx.promise();
+ PromiseInternal<Void> promise = vertx.promise();
close(promise);
return promise.future();
}
@@ -81,7 +83,7 @@ public void close(Promise<Void> completion) {
synchronized (this) {
if (!closed) {
closed = true;
- ctx.removeCloseHook(this);
+ closeHooks.remove(this);
pool.release();
}
}
diff --git a/src/main/java/io/vertx/core/net/NetClient.java b/src/main/java/io/vertx/core/net/NetClient.java
--- a/src/main/java/io/vertx/core/net/NetClient.java
+++ b/src/main/java/io/vertx/core/net/NetClient.java
@@ -114,4 +114,9 @@ public interface NetClient extends Measured {
*/
Future<Void> close();
+ /**
+ * @return a future closed when the client is closed
+ */
+ Future<Void> closeFuture();
+
}
diff --git a/src/main/java/io/vertx/core/net/impl/NetClientImpl.java b/src/main/java/io/vertx/core/net/impl/NetClientImpl.java
--- a/src/main/java/io/vertx/core/net/impl/NetClientImpl.java
+++ b/src/main/java/io/vertx/core/net/impl/NetClientImpl.java
@@ -21,12 +21,12 @@
import io.netty.handler.stream.ChunkedWriteHandler;
import io.netty.handler.timeout.IdleStateHandler;
import io.netty.util.concurrent.GenericFutureListener;
-import io.netty.util.concurrent.GlobalEventExecutor;
import io.vertx.core.AsyncResult;
import io.vertx.core.Closeable;
import io.vertx.core.Future;
import io.vertx.core.Handler;
import io.vertx.core.Promise;
+import io.vertx.core.impl.CloseHooks;
import io.vertx.core.impl.ContextInternal;
import io.vertx.core.impl.PromiseInternal;
import io.vertx.core.impl.VertxInternal;
@@ -52,7 +52,7 @@
*
* @author <a href="http://tfox.org">Tim Fox</a>
*/
-public class NetClientImpl implements MetricsProvider, NetClient {
+public class NetClientImpl implements MetricsProvider, NetClient, Closeable {
private static final Logger log = LoggerFactory.getLogger(NetClientImpl.class);
protected final int idleTimeout;
@@ -61,39 +61,33 @@ public class NetClientImpl implements MetricsProvider, NetClient {
private final VertxInternal vertx;
private final NetClientOptions options;
- protected final SSLHelper sslHelper;
+ private final SSLHelper sslHelper;
private final ChannelGroup channelGroup;
- private final Closeable closeHook;
- private final ContextInternal creatingContext;
+ private final CloseHooks closeHooks;
private final TCPMetrics metrics;
+ private final PromiseInternal<Void> closePromise;
+ private final Future<Void> closeFuture;
private volatile boolean closed;
- public NetClientImpl(VertxInternal vertx, NetClientOptions options) {
- this(vertx, options, true);
- }
-
- public NetClientImpl(VertxInternal vertx, NetClientOptions options, boolean useCreatingContext) {
+ public NetClientImpl(VertxInternal vertx, NetClientOptions options, CloseHooks closeHooks) {
this.vertx = vertx;
- this.channelGroup = new DefaultChannelGroup(GlobalEventExecutor.INSTANCE);
+ this.channelGroup = new DefaultChannelGroup(vertx.getAcceptorEventLoopGroup().next());
this.options = new NetClientOptions(options);
this.sslHelper = new SSLHelper(options, options.getKeyCertOptions(), options.getTrustOptions());
- this.closeHook = completionHandler -> {
- NetClientImpl.this.close();
- completionHandler.handle(Future.succeededFuture());
- };
- if (useCreatingContext) {
- creatingContext = vertx.getContext();
- if (creatingContext != null) {
- creatingContext.addCloseHook(closeHook);
- }
- } else {
- creatingContext = null;
- }
- VertxMetrics metrics = vertx.metricsSPI();
- this.metrics = metrics != null ? metrics.createNetClientMetrics(options) : null;
+ this.closeHooks = closeHooks;
+ this.metrics = vertx.metricsSPI() != null ? vertx.metricsSPI().createNetClientMetrics(options) : null;
logEnabled = options.getLogActivity();
idleTimeout = options.getIdleTimeout();
idleTimeoutUnit = options.getIdleTimeoutUnit();
+ closePromise = (PromiseInternal) Promise.promise();
+ if (metrics != null) {
+ closeFuture = closePromise.future().compose(v -> {
+ metrics.close();
+ return Future.succeededFuture();
+ });
+ } else {
+ closeFuture = closePromise.future();
+ }
}
protected void initChannel(ChannelPipeline pipeline) {
@@ -141,35 +135,43 @@ public NetClient connect(int port, String host, String serverName, Handler<Async
return connect(SocketAddress.inetSocketAddress(port, host), serverName, connectHandler);
}
+ @Override
+ public Future<Void> closeFuture() {
+ return closePromise.future();
+ }
+
@Override
public void close(Handler<AsyncResult<Void>> handler) {
- close(vertx.getOrCreateContext().promise(handler));
+ if (closeHooks != null) {
+ closeHooks.remove(this);
+ }
+ ContextInternal closingCtx = vertx.getOrCreateContext();
+ close(handler != null ? closingCtx.promise(handler) : null);
}
@Override
public Future<Void> close() {
- PromiseInternal<Void> promise = vertx.getOrCreateContext().promise();
+ if (closeHooks != null) {
+ closeHooks.remove(this);
+ }
+ ContextInternal closingCtx = vertx.getOrCreateContext();
+ PromiseInternal<Void> promise = closingCtx.promise();
close(promise);
return promise.future();
}
- private void close(PromiseInternal<Void> promise) {
- boolean closed;
+ @Override
+ public void close(Promise<Void> completion) {
+ boolean close;
synchronized (this) {
- closed = this.closed;
- this.closed = true;
+ close = !closed;
+ closed = true;
}
- if (closed) {
- promise.complete();
- return;
+ if (close) {
+ ChannelGroupFuture fut = channelGroup.close();
+ fut.addListener(closePromise);
}
- ChannelGroupFuture fut = channelGroup.close();
- fut.addListener(promise);
- promise.future().onComplete(ar -> {
- if (metrics != null) {
- metrics.close();
- }
- });
+ closeFuture.onComplete(completion);
}
@Override
@@ -277,7 +279,7 @@ protected void finalize() throws Throwable {
// Make sure this gets cleaned up if there are no more references to it
// so as not to leave connections and resources dangling until the system is shutdown
// which could make the JVM run out of file handles.
- close((PromiseInternal<Void>) Promise.<Void>promise());
+ close((Handler<AsyncResult<Void>>) null);
super.finalize();
}
}
diff --git a/src/main/java/io/vertx/core/net/impl/NetServerImpl.java b/src/main/java/io/vertx/core/net/impl/NetServerImpl.java
--- a/src/main/java/io/vertx/core/net/impl/NetServerImpl.java
+++ b/src/main/java/io/vertx/core/net/impl/NetServerImpl.java
@@ -228,15 +228,6 @@ public boolean isClosed() {
return !isListening();
}
- @Override
- protected void finalize() throws Throwable {
- // Make sure this gets cleaned up if there are no more references to it
- // so as not to leave connections and resources dangling until the system is shutdown
- // which could make the JVM run out of file handles.
- close();
- super.finalize();
- }
-
private class NetServerWorker implements Handler<Channel> {
private final ContextInternal context;
diff --git a/src/main/java/io/vertx/core/net/impl/TCPServerBase.java b/src/main/java/io/vertx/core/net/impl/TCPServerBase.java
--- a/src/main/java/io/vertx/core/net/impl/TCPServerBase.java
+++ b/src/main/java/io/vertx/core/net/impl/TCPServerBase.java
@@ -21,6 +21,7 @@
import io.vertx.core.Future;
import io.vertx.core.Handler;
import io.vertx.core.Promise;
+import io.vertx.core.impl.CloseHooks;
import io.vertx.core.impl.ContextInternal;
import io.vertx.core.impl.PromiseInternal;
import io.vertx.core.impl.VertxInternal;
@@ -53,6 +54,7 @@ public abstract class TCPServerBase implements Closeable, MetricsProvider {
protected final NetServerOptions options;
protected final ContextInternal creatingContext;
protected final SSLHelper sslHelper;
+ protected final CloseHooks closeHooks;
// Per server
private EventLoop eventLoop;
@@ -71,14 +73,20 @@ public abstract class TCPServerBase implements Closeable, MetricsProvider {
private TCPMetrics<?> metrics;
public TCPServerBase(VertxInternal vertx, NetServerOptions options) {
+
+ ContextInternal context = vertx.getContext();
+ CloseHooks hooks = context != null ? context.closeHooks() : null;
+ if (hooks == null) {
+ hooks = vertx.closeHooks();
+ }
+
this.vertx = vertx;
this.options = new NetServerOptions(options);
this.sslHelper = new SSLHelper(options, options.getKeyCertOptions(), options.getTrustOptions());
- this.creatingContext = vertx.getContext();
+ this.creatingContext = context;
+ this.closeHooks = hooks;
- if (creatingContext != null) {
- creatingContext.addCloseHook(this);
- }
+ closeHooks.add(this);
}
public int actualPort() {
@@ -188,9 +196,7 @@ public synchronized TCPMetrics<?> getMetrics() {
@Override
public synchronized void close(Promise<Void> completion) {
- if (creatingContext != null) {
- creatingContext.removeCloseHook(this);
- }
+ closeHooks.remove(this);
if (!listening) {
completion.complete();
return;
@@ -240,4 +246,15 @@ public void closeAll(Handler<AsyncResult<Void>> handler) {
CompositeFuture fut = CompositeFuture.all(futures);
fut.onComplete(ar -> handler.handle(ar.mapEmpty()));
}
+
+ public abstract Future<Void> close();
+
+ @Override
+ protected void finalize() throws Throwable {
+ // Make sure this gets cleaned up if there are no more references to it
+ // so as not to leave connections and resources dangling until the system is shutdown
+ // which could make the JVM run out of file handles.
+ close();
+ super.finalize();
+ }
}
| diff --git a/src/test/benchmarks/io/vertx/core/impl/BenchmarkContext.java b/src/test/benchmarks/io/vertx/core/impl/BenchmarkContext.java
--- a/src/test/benchmarks/io/vertx/core/impl/BenchmarkContext.java
+++ b/src/test/benchmarks/io/vertx/core/impl/BenchmarkContext.java
@@ -21,11 +21,16 @@ public class BenchmarkContext extends ContextImpl {
public static BenchmarkContext create(Vertx vertx) {
VertxImpl impl = (VertxImpl) vertx;
- return new BenchmarkContext(impl, impl.internalBlockingPool, impl.workerPool, null, Thread.currentThread().getContextClassLoader());
+ return new BenchmarkContext(
+ impl,
+ impl.internalBlockingPool,
+ impl.workerPool,
+ Thread.currentThread().getContextClassLoader()
+ );
}
- public BenchmarkContext(VertxInternal vertx, WorkerPool internalBlockingPool, WorkerPool workerPool, Deployment deployment, ClassLoader tccl) {
- super(vertx, null, internalBlockingPool, workerPool, deployment, tccl);
+ public BenchmarkContext(VertxInternal vertx, WorkerPool internalBlockingPool, WorkerPool workerPool, ClassLoader tccl) {
+ super(vertx, null, vertx.getEventLoopGroup().next(), internalBlockingPool, workerPool, null, null, tccl);
}
@Override
diff --git a/src/test/java/io/vertx/core/ContextTaskTest.java b/src/test/java/io/vertx/core/ContextTaskTest.java
--- a/src/test/java/io/vertx/core/ContextTaskTest.java
+++ b/src/test/java/io/vertx/core/ContextTaskTest.java
@@ -61,7 +61,7 @@ private ContextInternal createEventLoopContext() {
}
private ContextInternal createWorkerContext() {
- return ((VertxInternal) vertx).createWorkerContext(null, new WorkerPool(workerExecutor, null), Thread.currentThread().getContextClassLoader());
+ return ((VertxInternal) vertx).createWorkerContext(null, null, new WorkerPool(workerExecutor, null), Thread.currentThread().getContextClassLoader());
}
// SCHEDULE + DISPATCH
diff --git a/src/test/java/io/vertx/core/ContextTest.java b/src/test/java/io/vertx/core/ContextTest.java
--- a/src/test/java/io/vertx/core/ContextTest.java
+++ b/src/test/java/io/vertx/core/ContextTest.java
@@ -33,7 +33,7 @@ public class ContextTest extends VertxTestBase {
private ExecutorService workerExecutor;
private ContextInternal createWorkerContext() {
- return ((VertxInternal) vertx).createWorkerContext(null, new WorkerPool(workerExecutor, null), Thread.currentThread().getContextClassLoader());
+ return ((VertxInternal) vertx).createWorkerContext(null, null, new WorkerPool(workerExecutor, null), Thread.currentThread().getContextClassLoader());
}
@Override
diff --git a/src/test/java/io/vertx/core/NamedWorkerPoolTest.java b/src/test/java/io/vertx/core/NamedWorkerPoolTest.java
--- a/src/test/java/io/vertx/core/NamedWorkerPoolTest.java
+++ b/src/test/java/io/vertx/core/NamedWorkerPoolTest.java
@@ -328,7 +328,7 @@ public void testCloseWorkerPoolsWhenVertxCloses() {
try {
exec.executeBlocking(fut -> fail(), ar -> fail());
fail();
- } catch (RejectedExecutionException ignore) {
+ } catch (IllegalStateException ignore) {
}
// Check we can still close
exec.close();
diff --git a/src/test/java/io/vertx/core/VertxTest.java b/src/test/java/io/vertx/core/VertxTest.java
--- a/src/test/java/io/vertx/core/VertxTest.java
+++ b/src/test/java/io/vertx/core/VertxTest.java
@@ -11,22 +11,29 @@
package io.vertx.core;
-import io.vertx.core.AsyncResult;
-import io.vertx.core.Closeable;
-import io.vertx.core.Future;
-import io.vertx.core.Handler;
-import io.vertx.core.Vertx;
+import io.vertx.core.http.HttpClient;
+import io.vertx.core.http.HttpClientOptions;
import io.vertx.core.impl.VertxInternal;
+import io.vertx.core.net.NetClient;
+import io.vertx.core.net.NetSocket;
import io.vertx.test.core.AsyncTestBase;
import org.junit.Test;
+import org.openjdk.jmh.runner.Runner;
+import org.openjdk.jmh.runner.options.OptionsBuilder;
+import java.lang.ref.WeakReference;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
/**
* @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
*/
public class VertxTest extends AsyncTestBase {
+ private final org.openjdk.jmh.runner.Runner RUNNER = new Runner(new OptionsBuilder().shouldDoGC(true).build());
+
@Test
public void testCloseHooksCalled() throws Exception {
AtomicInteger closedCount = new AtomicInteger();
@@ -102,7 +109,7 @@ public void close(Promise<Void> completion) {
}
@Test
- public void testCloseFuture() throws Exception {
+ public void testCloseFuture() {
Vertx vertx = Vertx.vertx();
Future<Void> fut = vertx.close();
// Check that we can get a callback on the future as thread pools are closed by the operation
@@ -111,4 +118,89 @@ public void testCloseFuture() throws Exception {
}));
await();
}
+
+ @Test
+ public void testFinalizeHttpClient() throws Exception {
+ Vertx vertx = Vertx.vertx();
+ try {
+ CountDownLatch latch = new CountDownLatch(1);
+ AtomicReference<NetSocket> socketRef = new AtomicReference<>();
+ vertx.createNetServer()
+ .connectHandler(socketRef::set)
+ .listen(8080, "localhost")
+ .onComplete(onSuccess(server -> latch.countDown()));
+ awaitLatch(latch);
+ AtomicBoolean closed = new AtomicBoolean();
+ // No keep alive so the connection is not held in the pool ????
+ HttpClient client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(false));
+ client.closeFuture().onComplete(ar -> closed.set(true));
+ client.get(8080, "localhost", "/", onFailure(err -> {}));
+ WeakReference<HttpClient> ref = new WeakReference<>(client);
+ client = null;
+ assertWaitUntil(() -> socketRef.get() != null);
+ for (int i = 0;i < 10;i++) {
+ Thread.sleep(10);
+ RUNNER.runSystemGC();
+ assertFalse(closed.get());
+ assertNotNull(ref.get());
+ }
+ socketRef.get().close();
+ long now = System.currentTimeMillis();
+ while (true) {
+ assertTrue(System.currentTimeMillis() - now < 20_000);
+ RUNNER.runSystemGC();
+ if (ref.get() == null) {
+ assertTrue(closed.get());
+ break;
+ }
+ }
+ } finally {
+ vertx.close(ar -> {
+ testComplete();
+ });
+ }
+ await();
+ }
+
+ @Test
+ public void testFinalizeNetClient() throws Exception {
+ Vertx vertx = Vertx.vertx();
+ try {
+ CountDownLatch latch = new CountDownLatch(1);
+ AtomicReference<NetSocket> socketRef = new AtomicReference<>();
+ vertx.createNetServer()
+ .connectHandler(socketRef::set)
+ .listen(1234, "localhost")
+ .onComplete(onSuccess(server -> latch.countDown()));
+ awaitLatch(latch);
+ AtomicBoolean closed = new AtomicBoolean();
+ NetClient client = vertx.createNetClient();
+ client.closeFuture().onComplete(ar -> closed.set(true));
+ client.connect(1234, "localhost", onSuccess(so -> {}));
+ WeakReference<NetClient> ref = new WeakReference<>(client);
+ client = null;
+ assertWaitUntil(() -> socketRef.get() != null);
+ for (int i = 0;i < 10;i++) {
+ Thread.sleep(10);
+ RUNNER.runSystemGC();
+ assertFalse(closed.get());
+ assertNotNull(ref.get());
+ }
+ socketRef.get().close();
+ long now = System.currentTimeMillis();
+ while (true) {
+ assertTrue(System.currentTimeMillis() - now < 20_000);
+ RUNNER.runSystemGC();
+ if (ref.get() == null) {
+ assertTrue(closed.get());
+ break;
+ }
+ }
+ } finally {
+ vertx.close(ar -> {
+ testComplete();
+ });
+ }
+ await();
+ }
}
diff --git a/src/test/java/io/vertx/core/http/Http1xTest.java b/src/test/java/io/vertx/core/http/Http1xTest.java
--- a/src/test/java/io/vertx/core/http/Http1xTest.java
+++ b/src/test/java/io/vertx/core/http/Http1xTest.java
@@ -30,6 +30,7 @@
import io.vertx.test.verticles.SimpleServer;
import io.vertx.test.core.TestUtils;
import org.junit.Assume;
+import org.junit.Ignore;
import org.junit.Test;
import java.io.File;
@@ -66,6 +67,12 @@ protected VertxOptions getOptions() {
return options;
}
+ @Ignore
+ @Override
+ public void testBrokenFormUploadLargeFile() {
+ super.testBrokenFormUploadLargeFile();
+ }
+
@Test
public void testClientOptions() {
HttpClientOptions options = new HttpClientOptions();
| Resource lifecycle improvements
The core client lifecycle (`HttpClient` and `NetClient`) is correctly not bound to the Vert.x instance, i.e some clients might be not closed after a Vert.x instance is closed. This can raise an issue when a client is closed afterward and the related resources (event-loop) are not available anymore resulting in tasks rejections (exceptions).
Such exceptions can also be triggered by client finalisation that attempt to close a client when it is not referenced anymore.
Changes:
- any client will be closed when the creating scope is closed which can be
- the verticle that created this client
- the vertx instance
- the event-bus (that creates net client)
- the referenced client should not prevent GC gc-ing them when they are not reachable anymore which imply that the close hooks should use weak references
- a few useless finalisers are removed because there is no need to clean anything
- non deployment context do not need to create a close hooks because they will not be closed and thus we don't need to create useless objects
- TCP servers don't need to to be closed anymore in the vertx close process as they can be added as shutdown hooks of the vertx instance (that is a simplification)
| 2020-05-19T10:24:02Z | 4 |
|
eclipse-vertx/vert.x | 3,384 | eclipse-vertx__vert.x-3384 | [
"3382"
] | 82231fdce30ed9bda18352e55588243e24614a8a | diff --git a/src/main/java/io/vertx/core/http/HttpConnection.java b/src/main/java/io/vertx/core/http/HttpConnection.java
--- a/src/main/java/io/vertx/core/http/HttpConnection.java
+++ b/src/main/java/io/vertx/core/http/HttpConnection.java
@@ -116,21 +116,25 @@ default HttpConnection goAway(long errorCode, int lastStreamId) {
HttpConnection shutdownHandler(@Nullable Handler<Void> handler);
/**
- * Initiate a connection shutdown, a go away frame is sent and the connection is closed when all current active streams
- * are closed or after a time out of 30 seconds.
- * <p/>
- * This is not implemented for HTTP/1.x.
+ * Initiate a graceful connection shutdown, the connection is taken out of service and closed when all current requests
+ * are processed, otherwise after 30 seconds the connection will be closed. Client connection are immediately removed
+ * from the pool.
+ *
+ * <ul>
+ * <li>HTTP/2 connections will send a go away frame immediately to signal the other side the connection will close</li>
+ * <li>HTTP/1.x client connection supports this feature</li>
+ * <li>HTTP/1.x server connections do not support this feature</li>
+ * </ul>
*
* @return a reference to this, so the API can be used fluently
*/
@Fluent
- HttpConnection shutdown();
+ default HttpConnection shutdown() {
+ return shutdown(30000L);
+ }
/**
- * Initiate a connection shutdown, a go away frame is sent and the connection is closed when all current streams
- * will be closed or the {@code timeout} is fired.
- * <p/>
- * This is not implemented for HTTP/1.x.
+ * Like {@link #shutdown()} but with a configurable timeout value.
*
* @param timeoutMs the timeout in milliseconds
* @return a reference to this, so the API can be used fluently
diff --git a/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java b/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
@@ -73,6 +73,8 @@ class Http1xClientConnection extends Http1xConnectionBase<WebSocketImpl> impleme
private StreamImpl responseInProgress; // The request waiting for a response
private boolean close;
+ private long timerID;
+ private boolean shutdown;
private boolean upgraded;
private int keepAliveTimeout;
private long expirationTimestamp;
@@ -95,6 +97,7 @@ class Http1xClientConnection extends Http1xConnectionBase<WebSocketImpl> impleme
this.server = server;
this.metrics = metrics;
this.version = version;
+ this.timerID = -1L;
this.endpointMetric = endpointMetric;
this.keepAliveTimeout = options.getKeepAliveTimeout();
}
@@ -374,7 +377,6 @@ public void reset(Throwable cause) {
synchronized (conn) {
if (conn.requestInProgress == this) {
if (request == null) {
- // Is that possible in practice ???
conn.handleRequestEnd(true);
} else {
conn.close();
@@ -533,9 +535,15 @@ void handleException(Throwable cause) {
}
private void checkLifecycle() {
- if (upgraded) {
- // Do nothing
- } else if (close) {
+ boolean close;
+ synchronized (this) {
+ if (upgraded) {
+ // Do nothing
+ return;
+ }
+ close = this.close;
+ }
+ if (close) {
close();
} else {
recycle();
@@ -781,6 +789,10 @@ protected void handleClosed() {
WebSocketImpl ws;
List<StreamImpl> list = Collections.emptyList();
synchronized (this) {
+ if (timerID > 0L) {
+ vertx.cancelTimer(timerID);
+ timerID = -1L;
+ }
ws = this.ws;
for (StreamImpl r = responseInProgress;r != null;r = r.next) {
if (metrics != null) {
@@ -841,7 +853,39 @@ public boolean isValid() {
}
private void recycle() {
- expirationTimestamp = keepAliveTimeout == 0 ? 0L : System.currentTimeMillis() + keepAliveTimeout * 1000;
- listener.onRecycle();
+ if (shutdown) {
+ if (requestInProgress == null && responseInProgress == null) {
+ close();
+ }
+ } else {
+ expirationTimestamp = keepAliveTimeout == 0 ? 0L : System.currentTimeMillis() + keepAliveTimeout * 1000;
+ listener.onRecycle();
+ }
+ }
+
+ @Override
+ public HttpConnection shutdown(long timeoutMs) {
+ synchronized (this) {
+ if (upgraded) {
+ throw new IllegalStateException();
+ }
+ if (shutdown) {
+ return this;
+ }
+ if (timeoutMs > 0) {
+ timerID = vertx.setTimer(timeoutMs, id -> {
+ synchronized (Http1xClientConnection.this) {
+ timerID = -1L;
+ }
+ close();
+ });
+ } else {
+ close = true;
+ }
+ shutdown = true;
+ }
+ listener.onEvict();
+ checkLifecycle();
+ return this;
}
}
diff --git a/src/main/java/io/vertx/core/http/impl/Http1xConnectionBase.java b/src/main/java/io/vertx/core/http/impl/Http1xConnectionBase.java
--- a/src/main/java/io/vertx/core/http/impl/Http1xConnectionBase.java
+++ b/src/main/java/io/vertx/core/http/impl/Http1xConnectionBase.java
@@ -189,11 +189,6 @@ public HttpConnection shutdownHandler(@Nullable Handler<Void> handler) {
throw new UnsupportedOperationException("HTTP/1.x connections don't support GOAWAY");
}
- @Override
- public HttpConnection shutdown() {
- throw new UnsupportedOperationException("HTTP/1.x connections don't support GOAWAY");
- }
-
@Override
public HttpConnection shutdown(long timeoutMs) {
throw new UnsupportedOperationException("HTTP/1.x connections don't support GOAWAY");
diff --git a/src/main/java/io/vertx/core/http/impl/Http2ConnectionBase.java b/src/main/java/io/vertx/core/http/impl/Http2ConnectionBase.java
--- a/src/main/java/io/vertx/core/http/impl/Http2ConnectionBase.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2ConnectionBase.java
@@ -388,11 +388,6 @@ public HttpConnection shutdown(long timeout) {
return this;
}
- @Override
- public HttpConnection shutdown() {
- return shutdown(30000);
- }
-
@Override
public Http2ConnectionBase closeHandler(Handler<Void> handler) {
return (Http2ConnectionBase) super.closeHandler(handler);
diff --git a/src/main/java/io/vertx/core/http/impl/Http2UpgradedClientConnection.java b/src/main/java/io/vertx/core/http/impl/Http2UpgradedClientConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http2UpgradedClientConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2UpgradedClientConnection.java
@@ -423,11 +423,6 @@ public HttpConnection goAway(long errorCode, int lastStreamId, Buffer debugData)
return current.goAway(errorCode, lastStreamId, debugData);
}
- @Override
- public HttpConnection shutdown() {
- return current.shutdown();
- }
-
@Override
public HttpConnection shutdown(long timeoutMs) {
return current.shutdown(timeoutMs);
| diff --git a/src/test/java/io/vertx/core/http/Http1xTest.java b/src/test/java/io/vertx/core/http/Http1xTest.java
--- a/src/test/java/io/vertx/core/http/Http1xTest.java
+++ b/src/test/java/io/vertx/core/http/Http1xTest.java
@@ -4748,4 +4748,116 @@ public void testAsyncPipelinedRequestDequeuing() throws Exception {
}
await();
}
+
+ @Test
+ public void testClientConnectionGracefulShutdown() throws Exception {
+ int numReq = 3;
+ AtomicReference<HttpConnection> clientConnection = new AtomicReference<>();
+ AtomicInteger count = new AtomicInteger();
+ server.requestHandler(req -> {
+ if (count.getAndIncrement() == 0) {
+ clientConnection.get().shutdown();
+ }
+ req.response().end();
+ });
+ startServer(testAddress);
+ AtomicInteger responses = new AtomicInteger();
+ client = vertx.createHttpClient(createBaseClientOptions().setMaxPoolSize(1).setPipelining(true));
+ client.connectionHandler(conn -> {
+ conn.closeHandler(v -> {
+ assertEquals(3, responses.get());
+ testComplete();
+ });
+ clientConnection.set(conn);
+ });
+ for (int i = 0;i < numReq;i++) {
+ client.request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ responses.incrementAndGet();
+ }).end();
+ }
+ await();
+ }
+
+ @Test
+ public void testClientConnectionGracefulShutdownWhenRequestCompletedAfterResponse() throws Exception {
+ server.requestHandler(req -> {
+ req.response().end();
+ });
+ startServer(testAddress);
+ client.request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ AtomicBoolean requestEnded = new AtomicBoolean();
+ HttpClientRequest req = resp.request();
+ HttpConnection conn = req.connection();
+ conn.closeHandler(v -> {
+ assertTrue(requestEnded.get());
+ testComplete();
+ });
+ conn.shutdown();
+ resp.endHandler(v -> {
+ vertx.runOnContext(v2 -> {
+ requestEnded.set(true);
+ req.end();
+ });
+ });
+ }).setChunked(true).sendHead();
+ await();
+ }
+
+ @Test
+ public void testClientConnectionShutdownTimedOut() throws Exception {
+ AtomicReference<HttpConnection> clientConnectionRef = new AtomicReference<>();
+ int numReq = 3;
+ waitFor(numReq + 1);
+ server.requestHandler(req -> {
+ HttpConnection clientConnection = clientConnectionRef.getAndSet(null);
+ if (clientConnection != null) {
+ long now = System.currentTimeMillis();
+ clientConnection.closeHandler(v -> {
+ assertTrue(System.currentTimeMillis() - now >= 500L);
+ complete();
+ });
+ clientConnection.shutdown(500L);
+ }
+ });
+ startServer(testAddress);
+ client = vertx.createHttpClient(createBaseClientOptions().setMaxPoolSize(1).setPipelining(true));
+ client.connectionHandler(clientConnectionRef::set);
+ for (int i = 0;i < numReq;i++) {
+ AtomicBoolean failed = new AtomicBoolean();
+ client.request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ }).exceptionHandler(err -> {
+ if (failed.compareAndSet(false, true)) {
+ complete();
+ }
+ }).end();
+ }
+ await();
+ }
+
+ @Test
+ public void testClientConnectionShutdownNow() throws Exception {
+ AtomicReference<HttpConnection> clientConnectionRef = new AtomicReference<>();
+ waitFor(2);
+ server.requestHandler(req -> {
+ long now = System.currentTimeMillis();
+ HttpConnection clientConnection = clientConnectionRef.get();
+ clientConnection.closeHandler(v -> {
+ assertTrue(System.currentTimeMillis() - now <= 2000L);
+ complete();
+ });
+ clientConnection.shutdown(0L);
+ });
+ startServer(testAddress);
+ client = vertx.createHttpClient(createBaseClientOptions().setMaxPoolSize(1).setPipelining(true));
+ client.connectionHandler(clientConnectionRef::set);
+ AtomicBoolean failed = new AtomicBoolean();
+ client
+ .request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {})
+ .exceptionHandler(err -> {
+ if (failed.compareAndSet(false, true)) {
+ complete();
+ }
+ }).end();
+ await();
+ }
}
| HTTP/1.x client connection shutdown
HTTP/2 supports a graceful connection shutdown that relies on go away frames. We can provide a degraded support for HTTP/1.x for client connection that will have a similar effect since the client initiates the requests.
A HTTP/1.x client connection shutdown evicts the connection from the pool and waits until all current requests are terminated to close the connection.
| 2020-04-22T08:43:28Z | 3.9 |
|
eclipse-vertx/vert.x | 3,247 | eclipse-vertx__vert.x-3247 | [
"3246"
] | 5c1354f3bb7f2e64d89d56741995efd644cf02bf | diff --git a/src/main/java/io/vertx/core/http/HttpClientRequest.java b/src/main/java/io/vertx/core/http/HttpClientRequest.java
--- a/src/main/java/io/vertx/core/http/HttpClientRequest.java
+++ b/src/main/java/io/vertx/core/http/HttpClientRequest.java
@@ -99,20 +99,6 @@ public interface HttpClientRequest extends WriteStream<Buffer>, Future<HttpClien
*/
HttpMethod method();
- /**
- * @return the raw value of the method this request sends
- */
- String getRawMethod();
-
- /**
- * Set the value the method to send when the method {@link HttpMethod#OTHER} is used.
- *
- * @param method the raw method
- * @return a reference to this, so the API can be used fluently
- */
- @Fluent
- HttpClientRequest setRawMethod(String method);
-
/**
* @return the absolute URI corresponding to the the HTTP request
*/
diff --git a/src/main/java/io/vertx/core/http/HttpMethod.java b/src/main/java/io/vertx/core/http/HttpMethod.java
--- a/src/main/java/io/vertx/core/http/HttpMethod.java
+++ b/src/main/java/io/vertx/core/http/HttpMethod.java
@@ -12,13 +12,109 @@
package io.vertx.core.http;
import io.vertx.codegen.annotations.VertxGen;
+import io.vertx.core.http.impl.HttpMethodImpl;
+
+import java.util.Objects;
/**
- * Represents an HTTP method
+ * Represents an HTTP method.
*
* @author <a href="http://tfox.org">Tim Fox</a>
*/
@VertxGen
-public enum HttpMethod {
- OPTIONS, GET, HEAD, POST, PUT, DELETE, TRACE, CONNECT, PATCH, OTHER
+public interface HttpMethod {
+
+ /**
+ * The {@code OPTIONS} method, this instance is interned and uniquely used.
+ */
+ HttpMethod OPTIONS = new HttpMethodImpl(io.netty.handler.codec.http.HttpMethod.OPTIONS);
+
+ /**
+ * The {@code GET} method, this instance is interned and uniquely used.
+ */
+ HttpMethod GET = new HttpMethodImpl(io.netty.handler.codec.http.HttpMethod.GET);
+
+ /**
+ * The {@code HEAD} method, this instance is interned and uniquely used.
+ */
+ HttpMethod HEAD = new HttpMethodImpl(io.netty.handler.codec.http.HttpMethod.HEAD);
+
+ /**
+ * The {@code POST} method, this instance is interned and uniquely used.
+ */
+ HttpMethod POST = new HttpMethodImpl(io.netty.handler.codec.http.HttpMethod.POST);
+
+ /**
+ * The {@code PUT} method, this instance is interned and uniquely used.
+ */
+ HttpMethod PUT = new HttpMethodImpl(io.netty.handler.codec.http.HttpMethod.PUT);
+
+ /**
+ * The {@code DELETE} method, this instance is interned and uniquely used.
+ */
+ HttpMethod DELETE = new HttpMethodImpl(io.netty.handler.codec.http.HttpMethod.DELETE);
+
+ /**
+ * The {@code TRACE} method, this instance is interned and uniquely used.
+ */
+ HttpMethod TRACE = new HttpMethodImpl(io.netty.handler.codec.http.HttpMethod.TRACE);
+
+ /**
+ * The {@code CONNECT} method, this instance is interned and uniquely used.
+ */
+ HttpMethod CONNECT = new HttpMethodImpl(io.netty.handler.codec.http.HttpMethod.CONNECT);
+
+ /**
+ * The {@code PATCH} method, this instance is interned and uniquely used.
+ */
+ HttpMethod PATCH = new HttpMethodImpl(io.netty.handler.codec.http.HttpMethod.PATCH);
+
+ /**
+ * @return the method name
+ */
+ String name();
+
+ /**
+ * @return the same value than {@link #name()}
+ */
+ String toString();
+
+ /**
+ * Lookup the {@code HttpMethod} value for the specified {@code value}.
+ * <br/>
+ * The predefined method constants {@link #GET}, {@link #POST}, {@link #PUT}, {@link #HEAD}, {@link #OPTIONS},
+ * {@link #DELETE}, {@link #TRACE}, {@link #CONNECT} and {@link #PATCH} are interned and will be returned
+ * when case sensitively matching their string value (i.e {@code "GET"}, etc...)
+ * <br/>
+ * Otherwise a new instance is returned.
+ *
+ * @param value the value
+ * @return the {@code HttpMethod} instance for the specified string {@code value}
+ * @throws IllegalArgumentException when the value is incorrect, the value is empty or contains an invalid char
+ */
+ static HttpMethod valueOf(String value) {
+ Objects.requireNonNull(value, "value");
+ switch (value) {
+ case "OPTIONS":
+ return OPTIONS;
+ case "GET":
+ return GET;
+ case "HEAD":
+ return HEAD;
+ case "POST":
+ return POST;
+ case "PUT":
+ return PUT;
+ case "DELETE":
+ return DELETE;
+ case "TRACE":
+ return TRACE;
+ case "CONNECT":
+ return CONNECT;
+ case "PATCH":
+ return PATCH;
+ default:
+ return new HttpMethodImpl(io.netty.handler.codec.http.HttpMethod.valueOf(value));
+ }
+ }
}
diff --git a/src/main/java/io/vertx/core/http/HttpServerRequest.java b/src/main/java/io/vertx/core/http/HttpServerRequest.java
--- a/src/main/java/io/vertx/core/http/HttpServerRequest.java
+++ b/src/main/java/io/vertx/core/http/HttpServerRequest.java
@@ -72,11 +72,6 @@ public interface HttpServerRequest extends ReadStream<Buffer> {
*/
HttpMethod method();
- /**
- * @return the HTTP method as sent by the client
- */
- String rawMethod();
-
/**
* @return true if this {@link io.vertx.core.net.NetSocket} is encrypted via SSL/TLS
*/
diff --git a/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java b/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
@@ -117,12 +117,11 @@ ConnectionListener<HttpClientConnection> listener() {
private HttpRequest createRequest(
HttpMethod method,
- String rawMethod,
String uri,
MultiMap headerMap,
String hostHeader,
boolean chunked) {
- DefaultHttpRequest request = new DefaultHttpRequest(HttpUtils.toNettyHttpVersion(version), HttpUtils.toNettyHttpMethod(method, rawMethod), uri, false);
+ DefaultHttpRequest request = new DefaultHttpRequest(HttpUtils.toNettyHttpVersion(version), HttpMethodImpl.toNetty(method), uri, false);
HttpHeaders headers = request.headers();
if (headerMap != null) {
for (Map.Entry<String, String> header : headerMap) {
@@ -313,8 +312,8 @@ public ContextInternal getContext() {
}
@Override
- public void writeHead(HttpMethod method, String rawMethod, String uri, MultiMap headers, String hostHeader, boolean chunked, ByteBuf buf, boolean end, StreamPriority priority, Handler<AsyncResult<Void>> handler) {
- HttpRequest request = conn.createRequest(method, rawMethod, uri, headers, hostHeader, chunked);
+ public void writeHead(HttpMethod method, String uri, MultiMap headers, String hostHeader, boolean chunked, ByteBuf buf, boolean end, StreamPriority priority, Handler<AsyncResult<Void>> handler) {
+ HttpRequest request = conn.createRequest(method, uri, headers, hostHeader, chunked);
if (end) {
if (buf != null) {
request = new AssembledFullHttpRequest(request, buf);
diff --git a/src/main/java/io/vertx/core/http/impl/Http1xServerRequest.java b/src/main/java/io/vertx/core/http/impl/Http1xServerRequest.java
--- a/src/main/java/io/vertx/core/http/impl/Http1xServerRequest.java
+++ b/src/main/java/io/vertx/core/http/impl/Http1xServerRequest.java
@@ -65,7 +65,6 @@ public class Http1xServerRequest implements HttpServerRequest {
private HttpRequest request;
private io.vertx.core.http.HttpVersion version;
private io.vertx.core.http.HttpMethod method;
- private String rawMethod;
private String uri;
private String path;
private String query;
@@ -208,24 +207,11 @@ public io.vertx.core.http.HttpVersion version() {
@Override
public io.vertx.core.http.HttpMethod method() {
if (method == null) {
- String sMethod = request.method().toString();
- try {
- method = io.vertx.core.http.HttpMethod.valueOf(sMethod);
- } catch (IllegalArgumentException e) {
- method = io.vertx.core.http.HttpMethod.OTHER;
- }
+ method = io.vertx.core.http.impl.HttpMethodImpl.fromNetty(request.method());
}
return method;
}
- @Override
- public String rawMethod() {
- if (rawMethod == null) {
- rawMethod = request.method().toString();
- }
- return rawMethod;
- }
-
@Override
public String uri() {
if (uri == null) {
diff --git a/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java b/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java
@@ -17,7 +17,6 @@
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.handler.codec.http2.DefaultHttp2Headers;
-import io.netty.handler.codec.http2.Http2Connection;
import io.netty.handler.codec.http2.Http2Error;
import io.netty.handler.codec.http2.Http2Exception;
import io.netty.handler.codec.http2.Http2Headers;
@@ -166,7 +165,7 @@ public synchronized void onPushPromiseRead(ChannelHandlerContext ctx, int stream
Handler<HttpClientRequest> pushHandler = stream.pushHandler();
if (pushHandler != null) {
String rawMethod = headers.method().toString();
- HttpMethod method = HttpUtils.toVertxMethod(rawMethod);
+ HttpMethod method = HttpMethod.valueOf(rawMethod);
String uri = headers.path().toString();
String authority = headers.authority() != null ? headers.authority().toString() : null;
MultiMap headersMap = new Http2HeadersAdaptor(headers);
@@ -181,7 +180,7 @@ public synchronized void onPushPromiseRead(ChannelHandlerContext ctx, int stream
host = authority.substring(0, pos);
port = Integer.parseInt(authority.substring(pos + 1));
}
- HttpClientRequestPushPromise pushReq = new HttpClientRequestPushPromise(this, client, isSsl(), method, rawMethod, uri, host, port, headersMap);
+ HttpClientRequestPushPromise pushReq = new HttpClientRequestPushPromise(this, client, isSsl(), method, uri, host, port, headersMap);
pushReq.getStream().init(promisedStream);
if (metrics != null) {
((Stream)pushReq.getStream()).metric = metrics.responsePushed(queueMetric, metric(), localAddress(), remoteAddress(), pushReq);
@@ -455,9 +454,9 @@ Handler<HttpClientRequest> pushHandler() {
}
@Override
- public void writeHead(HttpMethod method, String rawMethod, String uri, MultiMap headers, String hostHeader, boolean chunked, ByteBuf content, boolean end, StreamPriority priority, Handler<AsyncResult<Void>> handler) {
+ public void writeHead(HttpMethod method, String uri, MultiMap headers, String hostHeader, boolean chunked, ByteBuf content, boolean end, StreamPriority priority, Handler<AsyncResult<Void>> handler) {
Http2Headers h = new DefaultHttp2Headers();
- h.method(method != HttpMethod.OTHER ? method.name() : rawMethod);
+ h.method(method.name());
boolean e;
if (method == HttpMethod.CONNECT) {
if (hostHeader == null) {
diff --git a/src/main/java/io/vertx/core/http/impl/Http2ServerConnection.java b/src/main/java/io/vertx/core/http/impl/Http2ServerConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http2ServerConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2ServerConnection.java
@@ -132,11 +132,7 @@ void sendPush(int streamId, String host, HttpMethod method, MultiMap headers, St
private synchronized void doSendPush(int streamId, String host, HttpMethod method, MultiMap headers, String path, StreamPriority streamPriority, Promise<HttpServerResponse> promise) {
Http2Headers headers_ = new DefaultHttp2Headers();
- if (method == HttpMethod.OTHER) {
- throw new IllegalArgumentException("Cannot push HttpMethod.OTHER");
- } else {
- headers_.method(method.name());
- }
+ headers_.method(method.name());
headers_.path(path);
headers_.scheme(isSsl() ? "https" : "http");
if (host != null) {
diff --git a/src/main/java/io/vertx/core/http/impl/Http2ServerRequestImpl.java b/src/main/java/io/vertx/core/http/impl/Http2ServerRequestImpl.java
--- a/src/main/java/io/vertx/core/http/impl/Http2ServerRequestImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2ServerRequestImpl.java
@@ -419,7 +419,7 @@ public HttpServerRequest setExpectMultipart(boolean expect) {
if (postRequestDecoder == null) {
String contentType = headersMap.get(HttpHeaderNames.CONTENT_TYPE);
if (contentType != null) {
- io.netty.handler.codec.http.HttpMethod method = io.netty.handler.codec.http.HttpMethod.valueOf(rawMethod);
+ io.netty.handler.codec.http.HttpMethod method = HttpMethodImpl.toNetty(this.method);
String lowerCaseContentType = contentType.toString().toLowerCase();
boolean isURLEncoded = lowerCaseContentType.startsWith(HttpHeaderValues.APPLICATION_X_WWW_FORM_URLENCODED.toString());
if ((lowerCaseContentType.startsWith(HttpHeaderValues.MULTIPART_FORM_DATA.toString()) || isURLEncoded) &&
diff --git a/src/main/java/io/vertx/core/http/impl/Http2ServerStream.java b/src/main/java/io/vertx/core/http/impl/Http2ServerStream.java
--- a/src/main/java/io/vertx/core/http/impl/Http2ServerStream.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2ServerStream.java
@@ -27,7 +27,6 @@
abstract class Http2ServerStream extends VertxHttp2Stream<Http2ServerConnection> {
protected final Http2Headers headers;
- protected final String rawMethod;
protected final HttpMethod method;
protected final String uri;
protected final String host;
@@ -43,7 +42,6 @@ abstract class Http2ServerStream extends VertxHttp2Stream<Http2ServerConnection>
this.headers = null;
this.method = method;
- this.rawMethod = method.name();
this.uri = uri;
this.host = null;
this.response = new Http2ServerResponseImpl(conn, this, true, contentEncoding, null);
@@ -61,8 +59,7 @@ abstract class Http2ServerStream extends VertxHttp2Stream<Http2ServerConnection>
this.headers = headers;
this.host = host;
this.uri = headers.get(":path") != null ? headers.get(":path").toString() : null;
- this.rawMethod = headers.get(":method") != null ? headers.get(":method").toString() : null;
- this.method = HttpUtils.toVertxMethod(rawMethod);
+ this.method = headers.get(":method") != null ? HttpMethod.valueOf(headers.get(":method").toString()) : null;
this.response = new Http2ServerResponseImpl(conn, this, false, contentEncoding, host);
}
@@ -115,10 +112,6 @@ public HttpMethod method() {
return method;
}
- public String rawMethod() {
- return rawMethod;
- }
-
@Override
void handleClose() {
super.handleClose();
diff --git a/src/main/java/io/vertx/core/http/impl/Http2UpgradedClientConnection.java b/src/main/java/io/vertx/core/http/impl/Http2UpgradedClientConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http2UpgradedClientConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2UpgradedClientConnection.java
@@ -103,7 +103,6 @@ public HttpClientConnection connection() {
*/
@Override
public void writeHead(HttpMethod method,
- String rawMethod,
String uri,
MultiMap headers,
String hostHeader,
@@ -171,7 +170,7 @@ public void upgradeTo(ChannelHandlerContext ctx, FullHttpResponse upgradeRespons
HttpClientUpgradeHandler upgradeHandler = new HttpClientUpgradeHandler(httpCodec, upgradeCodec, 65536);
pipeline.addAfter("codec", null, new UpgradeRequestHandler());
pipeline.addAfter("codec", null, upgradeHandler);
- stream.writeHead(method, rawMethod, uri, headers, hostHeader, chunked, buf, end, priority, listener);
+ stream.writeHead(method, uri, headers, hostHeader, chunked, buf, end, priority, listener);
}
@Override
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientRequestImpl.java b/src/main/java/io/vertx/core/http/impl/HttpClientRequestImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientRequestImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientRequestImpl.java
@@ -14,7 +14,6 @@
import io.netty.buffer.ByteBuf;
import io.netty.buffer.CompositeByteBuf;
import io.netty.buffer.Unpooled;
-import io.vertx.codegen.annotations.Nullable;
import io.vertx.core.*;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.http.HttpClientRequest;
@@ -57,7 +56,6 @@ public class HttpClientRequestImpl extends HttpClientRequestBase implements Http
private final Future<Void> endFuture;
private boolean chunked;
private String hostHeader;
- private String rawMethod;
private Handler<Void> continueHandler;
private Handler<Void> drainHandler;
private Handler<HttpClientRequest> pushHandler;
@@ -151,17 +149,6 @@ public synchronized boolean isChunked() {
return chunked;
}
- @Override
- public synchronized String getRawMethod() {
- return rawMethod;
- }
-
- @Override
- public synchronized HttpClientRequest setRawMethod(String method) {
- this.rawMethod = method;
- return this;
- }
-
@Override
public synchronized HttpClientRequest setHost(String host) {
this.hostHeader = host;
@@ -403,11 +390,6 @@ protected String hostHeader() {
private synchronized void connect(Handler<AsyncResult<HttpVersion>> headersHandler) {
if (!connecting) {
-
- if (method == HttpMethod.OTHER && rawMethod == null) {
- throw new IllegalStateException("You must provide a rawMethod when using an HttpMethod.OTHER method");
- }
-
SocketAddress peerAddress;
if (hostHeader != null) {
int idx = hostHeader.lastIndexOf(':');
@@ -478,7 +460,7 @@ private void connected(Handler<AsyncResult<HttpVersion>> headersHandler, HttpCli
}
};
}
- stream.writeHead(method, rawMethod, uri, headers, hostHeader(), chunked, pending, ended, priority, handler);
+ stream.writeHead(method, uri, headers, hostHeader(), chunked, pending, ended, priority, handler);
if (ended) {
tryComplete();
}
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientRequestPushPromise.java b/src/main/java/io/vertx/core/http/impl/HttpClientRequestPushPromise.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientRequestPushPromise.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientRequestPushPromise.java
@@ -27,7 +27,6 @@ class HttpClientRequestPushPromise extends HttpClientRequestBase {
private final Http2ClientConnection conn;
private final Http2ClientConnection.StreamImpl stream;
- private final String rawMethod;
private final MultiMap headers;
public HttpClientRequestPushPromise(
@@ -35,7 +34,6 @@ public HttpClientRequestPushPromise(
HttpClientImpl client,
boolean ssl,
HttpMethod method,
- String rawMethod,
String uri,
String host,
int port,
@@ -43,7 +41,6 @@ public HttpClientRequestPushPromise(
super(client, conn.getContext(), ssl, method, SocketAddress.inetSocketAddress(port, host), host, port, uri);
this.conn = conn;
this.stream = new Http2ClientConnection.StreamImpl(conn, conn.getContext(), this, null);
- this.rawMethod = rawMethod;
this.headers = headers;
}
@@ -82,16 +79,6 @@ public HttpMethod method() {
return method;
}
- @Override
- public String getRawMethod() {
- return rawMethod;
- }
-
- @Override
- public HttpClientRequest setRawMethod(String method) {
- throw new IllegalStateException();
- }
-
@Override
public String uri() {
return uri;
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientStream.java b/src/main/java/io/vertx/core/http/impl/HttpClientStream.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientStream.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientStream.java
@@ -12,17 +12,14 @@
package io.vertx.core.http.impl;
import io.netty.buffer.ByteBuf;
-import io.netty.util.concurrent.FutureListener;
import io.vertx.core.AsyncResult;
import io.vertx.core.Handler;
import io.vertx.core.MultiMap;
-import io.vertx.core.Promise;
import io.vertx.core.http.HttpConnection;
import io.vertx.core.http.HttpMethod;
import io.vertx.core.http.HttpVersion;
import io.vertx.core.http.StreamPriority;
import io.vertx.core.impl.ContextInternal;
-import io.vertx.core.net.NetSocket;
/**
* @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
@@ -45,7 +42,7 @@ public interface HttpClientStream {
HttpConnection connection();
ContextInternal getContext();
- void writeHead(HttpMethod method, String rawMethod, String uri, MultiMap headers, String hostHeader, boolean chunked, ByteBuf buf, boolean end, StreamPriority priority, Handler<AsyncResult<Void>> listener);
+ void writeHead(HttpMethod method, String uri, MultiMap headers, String hostHeader, boolean chunked, ByteBuf buf, boolean end, StreamPriority priority, Handler<AsyncResult<Void>> listener);
void writeBuffer(ByteBuf buf, boolean end, Handler<AsyncResult<Void>> listener);
void writeFrame(int type, int flags, ByteBuf payload);
diff --git a/src/main/java/io/vertx/core/http/impl/HttpMethodImpl.java b/src/main/java/io/vertx/core/http/impl/HttpMethodImpl.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/http/impl/HttpMethodImpl.java
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2011-2019 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+package io.vertx.core.http.impl;
+
+import io.vertx.core.http.HttpMethod;
+
+import java.util.Objects;
+
+public class HttpMethodImpl implements HttpMethod {
+
+ public static io.netty.handler.codec.http.HttpMethod toNetty(HttpMethod method) {
+ if (method instanceof HttpMethodImpl) {
+ return ((HttpMethodImpl) method).nettyMethod;
+ } else {
+ return io.netty.handler.codec.http.HttpMethod.valueOf(method.name());
+ }
+ }
+
+ /**
+ * Lookup the {@code HttpMethod} value for the specified {@code nettyMethod}.
+ * <br/>
+ * The predefined method constants {@link #GET}, {@link #POST}, {@link #PUT}, {@link #HEAD}, {@link #OPTIONS},
+ * {@link #DELETE}, {@link #TRACE}, {@link #CONNECT} and {@link #PATCH} are interned and will be returned
+ * when case sensitively matching their string value (i.e {@code "GET"}, etc...)
+ * <br/>
+ * Otherwise a new instance is returned.
+ *
+ * @param method the netty method
+ * @return the {@code HttpMethod} instance for the specified netty {@code method}
+ */
+ public static HttpMethod fromNetty(io.netty.handler.codec.http.HttpMethod method) {
+ // Fast path
+ if (method == io.netty.handler.codec.http.HttpMethod.GET) {
+ return GET;
+ } else if (method == io.netty.handler.codec.http.HttpMethod.POST) {
+ return POST;
+ } else {
+ // Keep method small
+ return _fromNetty(method);
+ }
+ }
+
+ private static HttpMethod _fromNetty(io.netty.handler.codec.http.HttpMethod sMethod) {
+ switch (sMethod.name()) {
+ case "OPTIONS":
+ return OPTIONS;
+ case "HEAD":
+ return HEAD;
+ case "PUT":
+ return PUT;
+ case "DELETE":
+ return DELETE;
+ case "TRACE":
+ return TRACE;
+ case "CONNECT":
+ return CONNECT;
+ case "PATCH":
+ return PATCH;
+ default:
+ return new HttpMethodImpl(sMethod);
+ }
+ }
+
+ private final io.netty.handler.codec.http.HttpMethod nettyMethod;
+
+ public HttpMethodImpl(io.netty.handler.codec.http.HttpMethod nettyMethod) {
+ this.nettyMethod = nettyMethod;
+ }
+
+ @Override
+ public String name() {
+ return nettyMethod.name();
+ }
+
+ @Override
+ public int hashCode() {
+ return nettyMethod.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (obj instanceof HttpMethod) {
+ HttpMethod that = (HttpMethod) obj;
+ return Objects.equals(name(), that.name());
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return name();
+ }
+}
diff --git a/src/main/java/io/vertx/core/http/impl/HttpUtils.java b/src/main/java/io/vertx/core/http/impl/HttpUtils.java
--- a/src/main/java/io/vertx/core/http/impl/HttpUtils.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpUtils.java
@@ -667,41 +667,6 @@ static String determineContentEncoding(Http2Headers headers) {
return null;
}
- static HttpMethod toNettyHttpMethod(io.vertx.core.http.HttpMethod method, String rawMethod) {
- switch (method) {
- case CONNECT: {
- return HttpMethod.CONNECT;
- }
- case GET: {
- return HttpMethod.GET;
- }
- case PUT: {
- return HttpMethod.PUT;
- }
- case POST: {
- return HttpMethod.POST;
- }
- case DELETE: {
- return HttpMethod.DELETE;
- }
- case HEAD: {
- return HttpMethod.HEAD;
- }
- case OPTIONS: {
- return HttpMethod.OPTIONS;
- }
- case TRACE: {
- return HttpMethod.TRACE;
- }
- case PATCH: {
- return HttpMethod.PATCH;
- }
- default: {
- return HttpMethod.valueOf(rawMethod);
- }
- }
- }
-
static HttpVersion toNettyHttpVersion(io.vertx.core.http.HttpVersion version) {
switch (version) {
case HTTP_1_0: {
@@ -716,11 +681,7 @@ static HttpVersion toNettyHttpVersion(io.vertx.core.http.HttpVersion version) {
}
static io.vertx.core.http.HttpMethod toVertxMethod(String method) {
- try {
- return io.vertx.core.http.HttpMethod.valueOf(method);
- } catch (IllegalArgumentException e) {
- return io.vertx.core.http.HttpMethod.OTHER;
- }
+ return io.vertx.core.http.HttpMethod.valueOf(method);
}
private static final AsciiString TIMEOUT_EQ = AsciiString.of("timeout=");
| diff --git a/src/test/java/io/vertx/core/http/Http1xTest.java b/src/test/java/io/vertx/core/http/Http1xTest.java
--- a/src/test/java/io/vertx/core/http/Http1xTest.java
+++ b/src/test/java/io/vertx/core/http/Http1xTest.java
@@ -3826,7 +3826,7 @@ public void testUnknownContentLengthIsSetToZeroWithHTTP_1_0() throws Exception {
@Test
public void testPartialH2CAmbiguousRequest() throws Exception {
server.requestHandler(req -> {
- assertEquals("POST", req.rawMethod());
+ assertEquals(HttpMethod.POST, req.method());
testComplete();
});
Buffer fullRequest = Buffer.buffer("POST /whatever HTTP/1.1\r\n\r\n");
diff --git a/src/test/java/io/vertx/core/http/HttpMethodTest.java b/src/test/java/io/vertx/core/http/HttpMethodTest.java
new file mode 100644
--- /dev/null
+++ b/src/test/java/io/vertx/core/http/HttpMethodTest.java
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2011-2019 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+package io.vertx.core.http;
+
+import io.vertx.core.http.impl.HttpMethodImpl;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.fail;
+
+public class HttpMethodTest {
+
+ @Test
+ public void testConstantNames() {
+ assertEquals("GET", HttpMethod.GET.name());
+ assertEquals("POST", HttpMethod.POST.name());
+ assertEquals("PUT", HttpMethod.PUT.name());
+ assertEquals("HEAD", HttpMethod.HEAD.name());
+ assertEquals("CONNECT", HttpMethod.CONNECT.name());
+ assertEquals("DELETE", HttpMethod.DELETE.name());
+ assertEquals("OPTIONS", HttpMethod.OPTIONS.name());
+ assertEquals("PATCH", HttpMethod.PATCH.name());
+ assertEquals("TRACE", HttpMethod.TRACE.name());
+ }
+
+ @Test
+ public void testConstants() {
+ for (HttpMethod method : Arrays.asList(
+ HttpMethod.GET,
+ HttpMethod.POST,
+ HttpMethod.HEAD,
+ HttpMethod.PUT,
+ HttpMethod.CONNECT,
+ HttpMethod.DELETE,
+ HttpMethod.OPTIONS,
+ HttpMethod.PATCH,
+ HttpMethod.TRACE
+ )) {
+ assertSame(HttpMethod.valueOf(method.name()), method);
+ assertSame(method.name(), method.toString());
+ }
+ }
+
+ @Test
+ public void testInvalidValueOf() {
+ for (String method : Arrays.asList("", " ")) {
+ try {
+ HttpMethod.valueOf(method);
+ fail();
+ } catch (IllegalArgumentException ignore) {
+ }
+ }
+ try {
+ HttpMethod.valueOf(null);
+ fail();
+ } catch (NullPointerException ignore) {
+ }
+ }
+
+ @Test
+ public void testValueOf() {
+ HttpMethod m1 = HttpMethod.valueOf("foo");
+ HttpMethod m2 = HttpMethod.valueOf("foo");
+ assertEquals("foo", m1.name());
+ assertEquals("foo", m1.toString());
+ assertNotSame(m1, m2);
+ assertEquals(m1.hashCode(), m2.hashCode());
+ assertEquals(m1, m2);
+ }
+
+ @Test
+ public void testCaseSensitive() {
+ HttpMethod m1 = HttpMethod.valueOf("Foo");
+ HttpMethod m2 = HttpMethod.valueOf("foo");
+ assertEquals("Foo", m1.name());
+ assertEquals("Foo", m1.toString());
+ assertNotSame(m1, m2);
+ assertNotEquals(m1.hashCode(), m2.hashCode());
+ assertNotEquals(m1, m2);
+ }
+
+ @Test
+ public void testNettyInterop() {
+ assertSame(HttpMethodImpl.toNetty(HttpMethod.GET), io.netty.handler.codec.http.HttpMethod.GET);
+ assertSame(HttpMethodImpl.toNetty(HttpMethod.POST), io.netty.handler.codec.http.HttpMethod.POST);
+ assertSame(HttpMethodImpl.toNetty(HttpMethod.PUT), io.netty.handler.codec.http.HttpMethod.PUT);
+ assertSame(HttpMethodImpl.toNetty(HttpMethod.HEAD), io.netty.handler.codec.http.HttpMethod.HEAD);
+ assertSame(HttpMethodImpl.toNetty(HttpMethod.CONNECT), io.netty.handler.codec.http.HttpMethod.CONNECT);
+ assertSame(HttpMethodImpl.toNetty(HttpMethod.DELETE), io.netty.handler.codec.http.HttpMethod.DELETE);
+ assertSame(HttpMethodImpl.toNetty(HttpMethod.OPTIONS), io.netty.handler.codec.http.HttpMethod.OPTIONS);
+ assertSame(HttpMethodImpl.toNetty(HttpMethod.PATCH), io.netty.handler.codec.http.HttpMethod.PATCH);
+ assertSame(HttpMethodImpl.toNetty(HttpMethod.TRACE), io.netty.handler.codec.http.HttpMethod.TRACE);
+ assertSame(HttpMethodImpl.fromNetty(io.netty.handler.codec.http.HttpMethod.GET), HttpMethod.GET);
+ assertSame(HttpMethodImpl.fromNetty(io.netty.handler.codec.http.HttpMethod.valueOf("GET")), HttpMethod.GET);
+ assertSame(HttpMethodImpl.fromNetty(io.netty.handler.codec.http.HttpMethod.POST), HttpMethod.POST);
+ assertSame(HttpMethodImpl.fromNetty(io.netty.handler.codec.http.HttpMethod.valueOf("POST")), HttpMethod.POST);
+ assertSame(HttpMethodImpl.fromNetty(io.netty.handler.codec.http.HttpMethod.PUT), HttpMethod.PUT);
+ assertSame(HttpMethodImpl.fromNetty(io.netty.handler.codec.http.HttpMethod.valueOf("PUT")), HttpMethod.PUT);
+ assertSame(HttpMethodImpl.fromNetty(io.netty.handler.codec.http.HttpMethod.HEAD), HttpMethod.HEAD);
+ assertSame(HttpMethodImpl.fromNetty(io.netty.handler.codec.http.HttpMethod.CONNECT), HttpMethod.CONNECT);
+ assertSame(HttpMethodImpl.fromNetty(io.netty.handler.codec.http.HttpMethod.DELETE), HttpMethod.DELETE);
+ assertSame(HttpMethodImpl.fromNetty(io.netty.handler.codec.http.HttpMethod.OPTIONS), HttpMethod.OPTIONS);
+ assertSame(HttpMethodImpl.fromNetty(io.netty.handler.codec.http.HttpMethod.PATCH), HttpMethod.PATCH);
+ assertSame(HttpMethodImpl.fromNetty(io.netty.handler.codec.http.HttpMethod.TRACE), HttpMethod.TRACE);
+ assertEquals(HttpMethodImpl.toNetty(HttpMethod.valueOf("foo")).name(), "foo");
+ assertEquals(HttpMethodImpl.fromNetty(io.netty.handler.codec.http.HttpMethod.valueOf("foo")).name(), "foo");
+ }
+}
diff --git a/src/test/java/io/vertx/core/http/HttpTest.java b/src/test/java/io/vertx/core/http/HttpTest.java
--- a/src/test/java/io/vertx/core/http/HttpTest.java
+++ b/src/test/java/io/vertx/core/http/HttpTest.java
@@ -3494,26 +3494,15 @@ public void testDumpManyRequestsOnQueue() throws Exception {
await();
}
- @Test
- public void testOtherMethodWithRawMethod() {
- try {
- client.request(HttpMethod.OTHER, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
- }).end();
- fail();
- } catch (IllegalStateException expected) {
- }
- }
-
@Test
public void testOtherMethodRequest() {
server.requestHandler(r -> {
- assertEquals(HttpMethod.OTHER, r.method());
- assertEquals("COPY", r.rawMethod());
+ assertEquals("COPY", r.method().name());
r.response().end();
}).listen(testAddress, onSuccess(s -> {
- client.request(HttpMethod.OTHER, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", onSuccess(resp -> {
+ client.request(HttpMethod.valueOf("COPY"), testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", onSuccess(resp -> {
testComplete();
- })).setRawMethod("COPY").end();
+ })).end();
}));
await();
}
diff --git a/src/test/java/io/vertx/core/json/JsonCodecTest.java b/src/test/java/io/vertx/core/json/JsonCodecTest.java
--- a/src/test/java/io/vertx/core/json/JsonCodecTest.java
+++ b/src/test/java/io/vertx/core/json/JsonCodecTest.java
@@ -12,7 +12,7 @@
import com.fasterxml.jackson.core.type.TypeReference;
import io.vertx.core.buffer.Buffer;
-import io.vertx.core.http.HttpMethod;
+import io.vertx.core.http.WebsocketVersion;
import io.vertx.core.impl.Utils;
import io.vertx.core.json.jackson.DatabindCodec;
import io.vertx.core.json.jackson.JacksonCodec;
@@ -401,9 +401,9 @@ public void testDecodeValue() {
@Test
public void testEnumValue() {
// just a random enum
- Buffer json = mapper.toBuffer(HttpMethod.CONNECT);
+ Buffer json = mapper.toBuffer(WebsocketVersion.V13);
assertNotNull(json);
- assertEquals("\"CONNECT\"", json.toString());
+ assertEquals("\"V13\"", json.toString());
}
@Test
| HttpMethod is an interface
In Vert.x 2 and 3, the `HttpMethod` is declared as an enum. This leashes the extensibility of HTTP and prevents modelling other HTTP methods with this type unless using `HttpMethod#OTHER` in addition with the `rawMethod` attribute on `HttpServerRequest` and `HttpClientRequest`.
We can turn this enum into an interface in Vert.x 4 with a few benefits:
1. non predefined HTTP methods are now handled like others without special handling
2. one can define its own HTTP method constant for other protocols (like
And at the expanse of 2 breakages:
1. the enum cannot be used as is dispatching a call with a `switch` construct. Instead the switch must be done on the `HttpMethod#name()` string value.
2. the `rawMethod` property is removed because now the `HttpMethod` can model the raw value.
| 2020-01-07T13:40:44Z | 4 |
|
eclipse-vertx/vert.x | 3,197 | eclipse-vertx__vert.x-3197 | [
"3171"
] | ab1464c8dfb6a0f927e68a426bf22405323aabce | diff --git a/src/main/java/io/vertx/core/impl/ConversionHelper.java b/src/main/java/io/vertx/core/impl/ConversionHelper.java
--- a/src/main/java/io/vertx/core/impl/ConversionHelper.java
+++ b/src/main/java/io/vertx/core/impl/ConversionHelper.java
@@ -15,11 +15,11 @@
import io.vertx.core.json.JsonArray;
import io.vertx.core.json.JsonObject;
-import java.util.ArrayList;
-import java.util.Base64;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
+import java.time.Instant;
+import java.util.*;
+
+import static io.vertx.core.json.impl.JsonUtil.BASE64_ENCODER;
+import static java.time.format.DateTimeFormatter.ISO_INSTANT;
/**
* An internal conversion helper, later it could be merged with JsonObject mapFrom/mapTo and moved in Json class
@@ -47,7 +47,7 @@ private static Object toJsonElement(Object obj) {
} else if (obj instanceof CharSequence) {
return obj.toString();
} else if (obj instanceof Buffer) {
- return Base64.getEncoder().encodeToString(((Buffer)obj).getBytes());
+ return BASE64_ENCODER.encodeToString(((Buffer) obj).getBytes());
}
return obj;
}
@@ -66,7 +66,7 @@ public static JsonArray toJsonArray(List<Object> list) {
return null;
}
list = new ArrayList<>(list);
- for (int i = 0;i < list.size();i++) {
+ for (int i = 0; i < list.size(); i++) {
list.set(i, toJsonElement(list.get(i)));
}
return new JsonArray(list);
@@ -75,11 +75,18 @@ public static JsonArray toJsonArray(List<Object> list) {
@SuppressWarnings("unchecked")
public static <T> T fromObject(Object obj) {
if (obj instanceof JsonObject) {
- return (T)fromJsonObject((JsonObject)obj);
+ return (T) fromJsonObject((JsonObject) obj);
} else if (obj instanceof JsonArray) {
- return (T)fromJsonArray((JsonArray)obj);
+ return (T) fromJsonArray((JsonArray) obj);
+ } else if (obj instanceof Instant) {
+ return (T) ISO_INSTANT.format((Instant) obj);
+ } else if (obj instanceof byte[]) {
+ return (T) BASE64_ENCODER.encodeToString((byte[]) obj);
+ } else if (obj instanceof Enum) {
+ return (T) ((Enum) obj).name();
}
- return (T)obj;
+
+ return (T) obj;
}
public static Map<String, Object> fromJsonObject(JsonObject json) {
@@ -98,7 +105,7 @@ public static List<Object> fromJsonArray(JsonArray json) {
return null;
}
List<Object> list = new ArrayList<>(json.getList());
- for (int i = 0;i < list.size();i++) {
+ for (int i = 0; i < list.size(); i++) {
list.set(i, fromObject(list.get(i)));
}
return list;
diff --git a/src/main/java/io/vertx/core/json/JsonArray.java b/src/main/java/io/vertx/core/json/JsonArray.java
--- a/src/main/java/io/vertx/core/json/JsonArray.java
+++ b/src/main/java/io/vertx/core/json/JsonArray.java
@@ -19,17 +19,18 @@
import java.util.*;
import java.util.stream.Stream;
+import static io.vertx.core.json.impl.JsonUtil.*;
import static java.time.format.DateTimeFormatter.ISO_INSTANT;
/**
* A representation of a <a href="http://json.org/">JSON</a> array in Java.
- * <p>
+ *
* Unlike some other languages Java does not have a native understanding of JSON. To enable JSON to be used easily
* in Vert.x code we use this class to encapsulate the notion of a JSON array.
*
* The implementation adheres to the <a href="http://rfc-editor.org/rfc/rfc7493.txt">RFC-7493</a> to support Temporal
* data types as well as binary data.
- * <p>
+ *
* Please see the documentation for more information.
*
* @author <a href="http://tfox.org">Tim Fox</a>
@@ -78,7 +79,7 @@ public JsonArray(List list) {
/**
* Create an instance from a Buffer of JSON.
*
- * @param buf the buffer of JSON.
+ * @param buf the buffer of JSON.
*/
public JsonArray(Buffer buf) {
if (buf == null) {
@@ -93,28 +94,43 @@ public JsonArray(Buffer buf) {
/**
* Get the String at position {@code pos} in the array,
*
- * @param pos the position in the array
- * @return the String, or null if a null value present
+ * @param pos the position in the array
+ * @return the String, or null if a null value present
* @throws java.lang.ClassCastException if the value cannot be converted to String
*/
public String getString(int pos) {
- CharSequence cs = (CharSequence)list.get(pos);
- return cs == null ? null : cs.toString();
+ Object val = list.get(pos);
+
+ if (val == null) {
+ return null;
+ }
+
+ if (val instanceof CharSequence) {
+ return val.toString();
+ } else if (val instanceof Instant) {
+ return ISO_INSTANT.format((Instant) val);
+ } else if (val instanceof byte[]) {
+ return BASE64_ENCODER.encodeToString((byte[]) val);
+ } else if (val instanceof Enum) {
+ return ((Enum) val).name();
+ }
+
+ throw new ClassCastException("class " + val.getClass().getName() + " cannot be cast to class java.lang.String");
}
/**
* Get the Integer at position {@code pos} in the array,
*
- * @param pos the position in the array
- * @return the Integer, or null if a null value present
+ * @param pos the position in the array
+ * @return the Integer, or null if a null value present
* @throws java.lang.ClassCastException if the value cannot be converted to Integer
*/
public Integer getInteger(int pos) {
- Number number = (Number)list.get(pos);
+ Number number = (Number) list.get(pos);
if (number == null) {
return null;
} else if (number instanceof Integer) {
- return (Integer)number; // Avoids unnecessary unbox/box
+ return (Integer) number; // Avoids unnecessary unbox/box
} else {
return number.intValue();
}
@@ -123,16 +139,16 @@ public Integer getInteger(int pos) {
/**
* Get the Long at position {@code pos} in the array,
*
- * @param pos the position in the array
- * @return the Long, or null if a null value present
+ * @param pos the position in the array
+ * @return the Long, or null if a null value present
* @throws java.lang.ClassCastException if the value cannot be converted to Long
*/
public Long getLong(int pos) {
- Number number = (Number)list.get(pos);
+ Number number = (Number) list.get(pos);
if (number == null) {
return null;
} else if (number instanceof Long) {
- return (Long)number; // Avoids unnecessary unbox/box
+ return (Long) number; // Avoids unnecessary unbox/box
} else {
return number.longValue();
}
@@ -141,16 +157,16 @@ public Long getLong(int pos) {
/**
* Get the Double at position {@code pos} in the array,
*
- * @param pos the position in the array
- * @return the Double, or null if a null value present
+ * @param pos the position in the array
+ * @return the Double, or null if a null value present
* @throws java.lang.ClassCastException if the value cannot be converted to Double
*/
public Double getDouble(int pos) {
- Number number = (Number)list.get(pos);
+ Number number = (Number) list.get(pos);
if (number == null) {
return null;
} else if (number instanceof Double) {
- return (Double)number; // Avoids unnecessary unbox/box
+ return (Double) number; // Avoids unnecessary unbox/box
} else {
return number.doubleValue();
}
@@ -159,16 +175,16 @@ public Double getDouble(int pos) {
/**
* Get the Float at position {@code pos} in the array,
*
- * @param pos the position in the array
- * @return the Float, or null if a null value present
+ * @param pos the position in the array
+ * @return the Float, or null if a null value present
* @throws java.lang.ClassCastException if the value cannot be converted to Float
*/
public Float getFloat(int pos) {
- Number number = (Number)list.get(pos);
+ Number number = (Number) list.get(pos);
if (number == null) {
return null;
} else if (number instanceof Float) {
- return (Float)number; // Avoids unnecessary unbox/box
+ return (Float) number; // Avoids unnecessary unbox/box
} else {
return number.floatValue();
}
@@ -177,391 +193,167 @@ public Float getFloat(int pos) {
/**
* Get the Boolean at position {@code pos} in the array,
*
- * @param pos the position in the array
- * @return the Boolean, or null if a null value present
+ * @param pos the position in the array
+ * @return the Boolean, or null if a null value present
* @throws java.lang.ClassCastException if the value cannot be converted to Integer
*/
public Boolean getBoolean(int pos) {
- return (Boolean)list.get(pos);
+ return (Boolean) list.get(pos);
}
/**
* Get the JsonObject at position {@code pos} in the array.
*
- * @param pos the position in the array
- * @return the JsonObject, or null if a null value present
+ * @param pos the position in the array
+ * @return the JsonObject, or null if a null value present
* @throws java.lang.ClassCastException if the value cannot be converted to JsonObject
*/
public JsonObject getJsonObject(int pos) {
Object val = list.get(pos);
if (val instanceof Map) {
- val = new JsonObject((Map)val);
+ val = new JsonObject((Map) val);
}
- return (JsonObject)val;
+ return (JsonObject) val;
}
/**
* Get the JsonArray at position {@code pos} in the array.
*
- * @param pos the position in the array
- * @return the Integer, or null if a null value present
+ * @param pos the position in the array
+ * @return the Integer, or null if a null value present
* @throws java.lang.ClassCastException if the value cannot be converted to JsonArray
*/
public JsonArray getJsonArray(int pos) {
Object val = list.get(pos);
if (val instanceof List) {
- val = new JsonArray((List)val);
+ val = new JsonArray((List) val);
}
- return (JsonArray)val;
+ return (JsonArray) val;
}
/**
* Get the byte[] at position {@code pos} in the array.
- * <p>
+ *
* JSON itself has no notion of a binary, so this method assumes there is a String value and
* it contains a Base64 encoded binary, which it decodes if found and returns.
- * <p>
- * This method should be used in conjunction with {@link #add(byte[])}
*
- * @param pos the position in the array
- * @return the byte[], or null if a null value present
- * @throws java.lang.ClassCastException if the value cannot be converted to String
+ * @param pos the position in the array
+ * @return the byte[], or null if a null value present
+ * @throws java.lang.ClassCastException if the value cannot be converted to String
* @throws java.lang.IllegalArgumentException if the String value is not a legal Base64 encoded value
*/
public byte[] getBinary(int pos) {
- String val = (String)list.get(pos);
+ Object val = list.get(pos);
+ // no-op
if (val == null) {
return null;
- } else {
- return Base64.getDecoder().decode(val);
}
+ // no-op if value is already an byte[]
+ if (val instanceof byte[]) {
+ return (byte[]) val;
+ }
+ // assume that the value is in String format as per RFC
+ String encoded = (String) val;
+ // parse to proper type
+ return BASE64_DECODER.decode(encoded);
}
/**
* Get the Instant at position {@code pos} in the array.
- * <p>
+ *
* JSON itself has no notion of a temporal types, this extension complies to the RFC-7493, so this method assumes
* there is a String value and it contains an ISO 8601 encoded date and time format such as "2017-04-03T10:25:41Z",
* which it decodes if found and returns.
- * <p>
- * This method should be used in conjunction with {@link #add(Instant)}
*
- * @param pos the position in the array
- * @return the Instant, or null if a null value present
- * @throws java.lang.ClassCastException if the value cannot be converted to String
+ * @param pos the position in the array
+ * @return the Instant, or null if a null value present
+ * @throws java.lang.ClassCastException if the value cannot be converted to String
* @throws java.time.format.DateTimeParseException if the String value is not a legal ISO 8601 encoded value
*/
public Instant getInstant(int pos) {
- String val = (String)list.get(pos);
+ Object val = list.get(pos);
+ // no-op
if (val == null) {
return null;
- } else {
- return Instant.from(ISO_INSTANT.parse(val));
}
+ // no-op if value is already an Instant
+ if (val instanceof Instant) {
+ return (Instant) val;
+ }
+ // assume that the value is in String format as per RFC
+ String encoded = (String) val;
+ // parse to proper type
+ return Instant.from(ISO_INSTANT.parse(encoded));
}
/**
- * Get the Object value at position {@code pos} in the array.
+ * Get the value with the specified key, as an Object with types respecting the limitations of JSON.
+ * <ul>
+ * <li>{@code Map} will be wrapped to {@code JsonObject}</li>
+ * <li>{@code List} will be wrapped to {@code JsonArray}</li>
+ * <li>{@code Instant} will be converted to {@code String}</li>
+ * <li>{@code byte[]} will be converted to {@code String}</li>
+ * <li>{@code Enum} will be converted to {@code String}</li>
+ * </ul>
*
- * @param pos the position in the array
- * @return the Integer, or null if a null value present
+ * @param pos the position in the array
+ * @return the Integer, or null if a null value present
*/
public Object getValue(int pos) {
- Object val = list.get(pos);
- if (val instanceof Map) {
- val = new JsonObject((Map)val);
- } else if (val instanceof List) {
- val = new JsonArray((List)val);
- }
- return val;
+ return wrapJsonValue(list.get(pos));
}
/**
* Is there a null value at position pos?
*
- * @param pos the position in the array
+ * @param pos the position in the array
* @return true if null value present, false otherwise
*/
public boolean hasNull(int pos) {
return list.get(pos) == null;
}
- /**
- * Add an enum to the JSON array.
- * <p>
- * JSON has no concept of encoding Enums, so the Enum will be converted to a String using the {@link java.lang.Enum#name()}
- * method and the value added as a String.
- *
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray add(Enum value) {
- list.add(value != null ? value.name() : null);
- return this;
- }
-
- /**
- * Add a CharSequence to the JSON array.
- *
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray add(CharSequence value) {
- list.add(value != null ? value.toString() : null);
- return this;
- }
-
- /**
- * Add a String to the JSON array.
- *
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray add(String value) {
- list.add(value);
- return this;
- }
-
- /**
- * Add an Integer to the JSON array.
- *
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray add(Integer value) {
- list.add(value);
- return this;
- }
-
- /**
- * Add a Long to the JSON array.
- *
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray add(Long value) {
- list.add(value);
- return this;
- }
-
- /**
- * Add a Double to the JSON array.
- *
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray add(Double value) {
- list.add(value);
- return this;
- }
-
- /**
- * Add a Float to the JSON array.
- *
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray add(Float value) {
- list.add(value);
- return this;
- }
-
- /**
- * Add a Boolean to the JSON array.
- *
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray add(Boolean value) {
- list.add(value);
- return this;
- }
-
/**
* Add a null value to the JSON array.
*
- * @return a reference to this, so the API can be used fluently
+ * @return a reference to this, so the API can be used fluently
*/
public JsonArray addNull() {
list.add(null);
return this;
}
- /**
- * Add a JSON object to the JSON array.
- *
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray add(JsonObject value) {
- list.add(value);
- return this;
- }
-
- /**
- * Add another JSON array to the JSON array.
- *
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray add(JsonArray value) {
- list.add(value);
- return this;
- }
-
- /**
- * Add a binary value to the JSON array.
- * <p>
- * JSON has no notion of binary so the binary will be base64 encoded to a String, and the String added.
- *
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray add(byte[] value) {
- list.add(value != null ? Base64.getEncoder().encodeToString(value) : null);
- return this;
- }
-
- /**
- * Add a Instant value to the JSON array.
- * <p>
- * JSON has no notion of Temporal data so the Instant will be ISOString encoded, and the String added.
- *
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray add(Instant value) {
- list.add(value != null ? ISO_INSTANT.format(value) : null);
- return this;
- }
-
/**
* Add an Object to the JSON array.
*
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray add(Object value) {
- value = JsonObject.checkAndCopy(value, false);
- list.add(value);
- return this;
- }
-
- /**
- * Appends all of the elements in the specified array to the end of this JSON array.
- *
- * @param array the array
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray addAll(JsonArray array) {
- list.addAll(array.list);
- return this;
- }
-
- /**
- * Set an enum to the JSON array at position {@code pos}.
- * <p>
- * JSON has no concept of encoding Enums, so the Enum will be converted to a String using the {@link java.lang.Enum#name()}
- * method and the value added as a String.
- *
- * @param pos position in the array
- * @param value the value
- *
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray set(int pos, Enum value) {
- list.set(pos, value != null ? value.name() : null);
- return this;
- }
-
- /**
- * Set a CharSequence to the JSON array at position {@code pos}.
- *
- * @param pos position in the array
- * @param value the value
- *
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray set(int pos, CharSequence value) {
- list.set(pos, value != null ? value.toString() : null);
- return this;
- }
-
- /**
- * Set a String to the JSON array at position {@code pos}.
- *
- * @param pos position in the array
- * @param value the value
- *
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray set(int pos, String value) {
- list.set(pos, value);
- return this;
- }
-
- /**
- * Set an Integer to the JSON array at position {@code pos}.
- *
- * @param pos position in the array
* @param value the value
- *
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray set(int pos, Integer value) {
- list.set(pos, value);
- return this;
- }
-
- /**
- * Set a Long to the JSON array at position {@code pos}.
- *
- * @param pos position in the array
- * @param value the value
- *
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray set(int pos, Long value) {
- list.set(pos, value);
- return this;
- }
-
- /**
- * Set a Double to the JSON array at position {@code pos}.
- *
- * @param pos position in the array
- * @param value the value
- *
* @return a reference to this, so the API can be used fluently
*/
- public JsonArray set(int pos, Double value) {
- list.set(pos, value);
+ public JsonArray add(Object value) {
+ list.add(value);
return this;
}
/**
- * Set a Float to the JSON array at position {@code pos}.
+ * Add an Object to the JSON array at given position {@code pos}.
*
- * @param pos position in the array
+ * @param pos the position
* @param value the value
- *
* @return a reference to this, so the API can be used fluently
*/
- public JsonArray set(int pos, Float value) {
- list.set(pos, value);
+ public JsonArray add(int pos, Object value) {
+ list.add(pos, value);
return this;
}
/**
- * Set a Boolean to the JSON array at position {@code pos}.
- *
- * @param pos position in the array
- * @param value the value
+ * Appends all of the elements in the specified array to the end of this JSON array.
*
+ * @param array the array
* @return a reference to this, so the API can be used fluently
*/
- public JsonArray set(int pos, Boolean value) {
- list.set(pos, value);
+ public JsonArray addAll(JsonArray array) {
+ list.addAll(array.list);
return this;
}
@@ -575,72 +367,14 @@ public JsonArray setNull(int pos) {
return this;
}
- /**
- * Set a JSON object to the JSON array at position {@code pos}.
- *
- * @param pos position in the array
- * @param value the value
- *
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray set(int pos, JsonObject value) {
- list.set(pos, value);
- return this;
- }
-
- /**
- * Set another JSON array to the JSON array at position {@code pos}.
- *
- * @param pos position in the array
- * @param value the value
- *
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray set(int pos, JsonArray value) {
- list.set(pos, value);
- return this;
- }
-
- /**
- * Set a binary value to the JSON array at position {@code pos}.
- * <p>
- * JSON has no notion of binary so the binary will be base64 encoded to a String, and the String added.
- *
- * @param pos position in the array
- * @param value the value
- *
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray set(int pos, byte[] value) {
- list.set(pos, value != null ? Base64.getEncoder().encodeToString(value) : null);
- return this;
- }
-
- /**
- * Set a Instant value to the JSON array at position {@code pos}.
- * <p>
- * JSON has no notion of Temporal data so the Instant will be ISOString encoded, and the String added.
- *
- * @param pos position in the array
- * @param value the value
- *
- * @return a reference to this, so the API can be used fluently
- */
- public JsonArray set(int pos, Instant value) {
- list.set(pos, value != null ? ISO_INSTANT.format(value) : null);
- return this;
- }
-
/**
* Set an Object to the JSON array at position {@code pos}.
*
- * @param pos position in the array
+ * @param pos position in the array
* @param value the value
- *
* @return a reference to this, so the API can be used fluently
*/
public JsonArray set(int pos, Object value) {
- value = JsonObject.checkAndCopy(value, false);
list.set(pos, value);
return this;
}
@@ -649,8 +383,8 @@ public JsonArray set(int pos, Object value) {
* Does the JSON array contain the specified value? This method will scan the entire array until it finds a value
* or reaches the end.
*
- * @param value the value
- * @return true if it contains the value, false if not
+ * @param value the value
+ * @return true if it contains the value, false if not
*/
public boolean contains(Object value) {
return list.contains(value);
@@ -660,28 +394,38 @@ public boolean contains(Object value) {
* Remove the specified value from the JSON array. This method will scan the entire array until it finds a value
* or reaches the end.
*
- * @param value the value to remove
+ * @param value the value to remove
* @return true if it removed it, false if not found
*/
public boolean remove(Object value) {
- return list.remove(value);
+ final Object wrappedValue = wrapJsonValue(value);
+ for (int i = 0; i < list.size(); i++) {
+ // perform comparision on wrapped types
+ final Object otherWrapperValue = getValue(i);
+ if (wrappedValue == null) {
+ if (otherWrapperValue == null) {
+ list.remove(i);
+ return true;
+ }
+ } else {
+ if (wrappedValue.equals(otherWrapperValue)) {
+ list.remove(i);
+ return true;
+ }
+ }
+ }
+ return false;
}
/**
* Remove the value at the specified position in the JSON array.
*
- * @param pos the position to remove the value at
+ * @param pos the position to remove the value at
* @return the removed value if removed, null otherwise. If the value is a Map, a {@link JsonObject} is built from
* this Map and returned. It the value is a List, a {@link JsonArray} is built form this List and returned.
*/
public Object remove(int pos) {
- Object removed = list.remove(pos);
- if (removed instanceof Map) {
- return new JsonObject((Map) removed);
- } else if (removed instanceof ArrayList) {
- return new JsonArray((List) removed);
- }
- return removed;
+ return wrapJsonValue(list.remove(pos));
}
/**
@@ -705,7 +449,7 @@ public boolean isEmpty() {
/**
* Get the unerlying List
*
- * @return the underlying List
+ * @return the underlying List
*/
public List getList() {
return list;
@@ -714,7 +458,7 @@ public List getList() {
/**
* Remove all entries from the JSON array
*
- * @return a reference to this, so the API can be used fluently
+ * @return a reference to this, so the API can be used fluently
*/
public JsonArray clear() {
list.clear();
@@ -766,9 +510,8 @@ public String encodePrettily() {
@Override
public JsonArray copy() {
List<Object> copiedList = new ArrayList<>(list.size());
- for (Object val: list) {
- val = JsonObject.checkAndCopy(val, true);
- copiedList.add(val);
+ for (Object val : list) {
+ copiedList.add(checkAndCopy(val));
}
return new JsonArray(copiedList);
}
@@ -789,32 +532,64 @@ public String toString() {
@Override
public boolean equals(Object o) {
+ // null check
+ if (o == null)
+ return false;
+ // self check
if (this == o)
return true;
- if (o == null || getClass() != o.getClass())
+ // type check and cast
+ if (getClass() != o.getClass())
return false;
- return arrayEquals(list, o);
- }
- static boolean arrayEquals(List<?> l1, Object o2) {
- List<?> l2;
- if (o2 instanceof JsonArray) {
- l2 = ((JsonArray) o2).list;
- } else if (o2 instanceof List<?>) {
- l2 = (List<?>) o2;
- } else {
- return false;
- }
- if (l1.size() != l2.size()) {
+ JsonArray other = (JsonArray) o;
+ // size check
+ if (this.size() != other.size())
return false;
- }
- Iterator<?> iter = l2.iterator();
- for (Object entry : l1) {
- Object other = iter.next();
- if (entry == null ? other != null : !JsonObject.equals(entry, other)) {
+ // value comparison
+ for (int i = 0; i < this.size(); i++) {
+ Object thisValue = this.getValue(i);
+ Object otherValue = other.getValue(i);
+ // identity check
+ if (thisValue == otherValue) {
+ continue;
+ }
+ // special case for numbers
+ if (thisValue instanceof Number && otherValue instanceof Number && thisValue.getClass() != otherValue.getClass()) {
+ Number n1 = (Number) thisValue;
+ Number n2 = (Number) otherValue;
+ // floating point values
+ if (thisValue instanceof Float || thisValue instanceof Double || otherValue instanceof Float || otherValue instanceof Double) {
+ // compare as floating point double
+ if (n1.doubleValue() == n2.doubleValue()) {
+ // same value check the next entry
+ continue;
+ }
+ }
+ if (thisValue instanceof Integer || thisValue instanceof Long || otherValue instanceof Integer || otherValue instanceof Long) {
+ // compare as integer long
+ if (n1.longValue() == n2.longValue()) {
+ // same value check the next entry
+ continue;
+ }
+ }
+ }
+ // special case for char sequences
+ if (thisValue instanceof CharSequence && otherValue instanceof CharSequence && thisValue.getClass() != otherValue.getClass()) {
+ CharSequence s1 = (CharSequence) thisValue;
+ CharSequence s2 = (CharSequence) otherValue;
+
+ if (Objects.equals(s1.toString(), s2.toString())) {
+ // same value check the next entry
+ continue;
+ }
+ }
+ // fallback to standard object equals checks
+ if (!Objects.equals(thisValue, otherValue)) {
return false;
}
}
+ // all checks passed
return true;
}
@@ -848,7 +623,7 @@ private void fromBuffer(Buffer buf) {
list = Json.CODEC.fromBuffer(buf, List.class);
}
- private class Iter implements Iterator<Object> {
+ private static class Iter implements Iterator<Object> {
final Iterator<Object> listIter;
@@ -863,13 +638,7 @@ public boolean hasNext() {
@Override
public Object next() {
- Object val = listIter.next();
- if (val instanceof Map) {
- val = new JsonObject((Map)val);
- } else if (val instanceof List) {
- val = new JsonArray((List)val);
- }
- return val;
+ return wrapJsonValue(listIter.next());
}
@Override
@@ -877,6 +646,4 @@ public void remove() {
listIter.remove();
}
}
-
-
}
diff --git a/src/main/java/io/vertx/core/json/JsonObject.java b/src/main/java/io/vertx/core/json/JsonObject.java
--- a/src/main/java/io/vertx/core/json/JsonObject.java
+++ b/src/main/java/io/vertx/core/json/JsonObject.java
@@ -14,26 +14,25 @@
import io.vertx.core.buffer.Buffer;
import io.vertx.core.shareddata.Shareable;
import io.vertx.core.shareddata.impl.ClusterSerializable;
-import io.vertx.core.spi.json.JsonCodec;
-import java.math.BigDecimal;
import java.nio.charset.StandardCharsets;
import java.time.Instant;
import java.util.*;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
+import static io.vertx.core.json.impl.JsonUtil.*;
import static java.time.format.DateTimeFormatter.ISO_INSTANT;
/**
* A representation of a <a href="http://json.org/">JSON</a> object in Java.
- * <p>
+ *
* Unlike some other languages Java does not have a native understanding of JSON. To enable JSON to be used easily
* in Vert.x code we use this class to encapsulate the notion of a JSON object.
*
* The implementation adheres to the <a href="http://rfc-editor.org/rfc/rfc7493.txt">RFC-7493</a> to support Temporal
* data types as well as binary data.
- * <p>
+ *
* Please see the documentation for more information.
*
* @author <a href="http://tfox.org">Tim Fox</a>
@@ -45,7 +44,7 @@ public class JsonObject implements Iterable<Map.Entry<String, Object>>, ClusterS
/**
* Create an instance from a string of JSON
*
- * @param json the string of JSON
+ * @param json the string of JSON
*/
public JsonObject(String json) {
if (json == null) {
@@ -67,7 +66,7 @@ public JsonObject() {
/**
* Create an instance from a Map. The Map is not copied.
*
- * @param map the map to create the instance from.
+ * @param map the map to create the instance from.
*/
public JsonObject(Map<String, Object> map) {
if (map == null) {
@@ -79,7 +78,7 @@ public JsonObject(Map<String, Object> map) {
/**
* Create an instance from a buffer.
*
- * @param buf the buffer to create the instance from.
+ * @param buf the buffer to create the instance from.
*/
public JsonObject(Buffer buf) {
if (buf == null) {
@@ -95,12 +94,10 @@ public JsonObject(Buffer buf) {
* Create a JsonObject from the fields of a Java object.
* Faster than calling `new JsonObject(Json.encode(obj))`.
* <p/
- * Returns {@ode null} when {@code obj} is {@code null}.
+ * Returns {@code null} when {@code obj} is {@code null}.
*
- * @param obj
- * The object to convert to a JsonObject.
- * @throws IllegalArgumentException
- * if conversion fails due to an incompatible type.
+ * @param obj The object to convert to a JsonObject.
+ * @throws IllegalArgumentException if conversion fails due to an incompatible type.
*/
@SuppressWarnings("unchecked")
public static JsonObject mapFrom(Object obj) {
@@ -115,42 +112,55 @@ public static JsonObject mapFrom(Object obj) {
* Instantiate a Java object from a JsonObject.
* Faster than calling `Json.decodeValue(Json.encode(jsonObject), type)`.
*
- * @param type
- * The type to instantiate from the JsonObject.
- * @throws IllegalArgumentException
- * if the type cannot be instantiated.
+ * @param type The type to instantiate from the JsonObject.
+ * @throws IllegalArgumentException if the type cannot be instantiated.
*/
public <T> T mapTo(Class<T> type) {
return Json.CODEC.fromValue(map, type);
}
/**
- * Get the string value with the specified key
+ * Get the string value with the specified key, special cases are addressed for extended JSON types {@code Instant},
+ * {@code byte[]} and {@code Enum} which can be converted to String.
*
- * @param key the key to return the value for
+ * @param key the key to return the value for
* @return the value or null if no value for that key
* @throws java.lang.ClassCastException if the value is not a String
*/
public String getString(String key) {
Objects.requireNonNull(key);
- CharSequence cs = (CharSequence)map.get(key);
- return cs == null ? null : cs.toString();
+ Object val = map.get(key);
+ if (val == null) {
+ return null;
+ }
+
+ if (val instanceof CharSequence) {
+ return val.toString();
+ } else if (val instanceof Instant) {
+ return ISO_INSTANT.format((Instant) val);
+ } else if (val instanceof byte[]) {
+ return BASE64_ENCODER.encodeToString((byte[]) val);
+ } else if (val instanceof Enum) {
+ return ((Enum) val).name();
+ }
+
+ throw new ClassCastException("class " + val.getClass().getName() + " cannot be cast to class java.lang.String");
}
/**
* Get the Integer value with the specified key
*
- * @param key the key to return the value for
+ * @param key the key to return the value for
* @return the value or null if no value for that key
* @throws java.lang.ClassCastException if the value is not an Integer
*/
public Integer getInteger(String key) {
Objects.requireNonNull(key);
- Number number = (Number)map.get(key);
+ Number number = (Number) map.get(key);
if (number == null) {
return null;
} else if (number instanceof Integer) {
- return (Integer)number; // Avoids unnecessary unbox/box
+ return (Integer) number; // Avoids unnecessary unbox/box
} else {
return number.intValue();
}
@@ -159,17 +169,17 @@ public Integer getInteger(String key) {
/**
* Get the Long value with the specified key
*
- * @param key the key to return the value for
+ * @param key the key to return the value for
* @return the value or null if no value for that key
* @throws java.lang.ClassCastException if the value is not a Long
*/
public Long getLong(String key) {
Objects.requireNonNull(key);
- Number number = (Number)map.get(key);
+ Number number = (Number) map.get(key);
if (number == null) {
return null;
} else if (number instanceof Long) {
- return (Long)number; // Avoids unnecessary unbox/box
+ return (Long) number; // Avoids unnecessary unbox/box
} else {
return number.longValue();
}
@@ -178,17 +188,17 @@ public Long getLong(String key) {
/**
* Get the Double value with the specified key
*
- * @param key the key to return the value for
+ * @param key the key to return the value for
* @return the value or null if no value for that key
* @throws java.lang.ClassCastException if the value is not a Double
*/
public Double getDouble(String key) {
Objects.requireNonNull(key);
- Number number = (Number)map.get(key);
+ Number number = (Number) map.get(key);
if (number == null) {
return null;
} else if (number instanceof Double) {
- return (Double)number; // Avoids unnecessary unbox/box
+ return (Double) number; // Avoids unnecessary unbox/box
} else {
return number.doubleValue();
}
@@ -197,17 +207,17 @@ public Double getDouble(String key) {
/**
* Get the Float value with the specified key
*
- * @param key the key to return the value for
+ * @param key the key to return the value for
* @return the value or null if no value for that key
* @throws java.lang.ClassCastException if the value is not a Float
*/
public Float getFloat(String key) {
Objects.requireNonNull(key);
- Number number = (Number)map.get(key);
+ Number number = (Number) map.get(key);
if (number == null) {
return null;
} else if (number instanceof Float) {
- return (Float)number; // Avoids unnecessary unbox/box
+ return (Float) number; // Avoids unnecessary unbox/box
} else {
return number.floatValue();
}
@@ -216,19 +226,19 @@ public Float getFloat(String key) {
/**
* Get the Boolean value with the specified key
*
- * @param key the key to return the value for
+ * @param key the key to return the value for
* @return the value or null if no value for that key
* @throws java.lang.ClassCastException if the value is not a Boolean
*/
public Boolean getBoolean(String key) {
Objects.requireNonNull(key);
- return (Boolean)map.get(key);
+ return (Boolean) map.get(key);
}
/**
* Get the JsonObject value with the specified key
*
- * @param key the key to return the value for
+ * @param key the key to return the value for
* @return the value or null if no value for that key
* @throws java.lang.ClassCastException if the value is not a JsonObject
*/
@@ -236,15 +246,15 @@ public JsonObject getJsonObject(String key) {
Objects.requireNonNull(key);
Object val = map.get(key);
if (val instanceof Map) {
- val = new JsonObject((Map)val);
+ val = new JsonObject((Map) val);
}
- return (JsonObject)val;
+ return (JsonObject) val;
}
/**
* Get the JsonArray value with the specified key
*
- * @param key the key to return the value for
+ * @param key the key to return the value for
* @return the value or null if no value for that key
* @throws java.lang.ClassCastException if the value is not a JsonArray
*/
@@ -252,252 +262,266 @@ public JsonArray getJsonArray(String key) {
Objects.requireNonNull(key);
Object val = map.get(key);
if (val instanceof List) {
- val = new JsonArray((List)val);
+ val = new JsonArray((List) val);
}
- return (JsonArray)val;
+ return (JsonArray) val;
}
/**
* Get the binary value with the specified key.
- * <p>
+ *
* JSON itself has no notion of a binary, this extension complies to the RFC-7493, so this method assumes there is a
* String value with the key and it contains a Base64 encoded binary, which it decodes if found and returns.
- * <p>
- * This method should be used in conjunction with {@link #put(String, byte[])}
*
- * @param key the key to return the value for
+ * @param key the key to return the value for
* @return the value or null if no value for that key
- * @throws java.lang.ClassCastException if the value is not a String
+ * @throws java.lang.ClassCastException if the value is not a String
* @throws java.lang.IllegalArgumentException if the String value is not a legal Base64 encoded value
*/
public byte[] getBinary(String key) {
Objects.requireNonNull(key);
- String encoded = (String) map.get(key);
- return encoded == null ? null : Base64.getDecoder().decode(encoded);
+ Object val = map.get(key);
+ // no-op
+ if (val == null) {
+ return null;
+ }
+ // no-op if value is already an byte[]
+ if (val instanceof byte[]) {
+ return (byte[]) val;
+ }
+ // assume that the value is in String format as per RFC
+ String encoded = (String) val;
+ // parse to proper type
+ return BASE64_DECODER.decode(encoded);
}
/**
* Get the instant value with the specified key.
- * <p>
+ *
* JSON itself has no notion of a temporal types, this extension complies to the RFC-7493, so this method assumes
* there is a String value with the key and it contains an ISO 8601 encoded date and time format
* such as "2017-04-03T10:25:41Z", which it decodes if found and returns.
- * <p>
- * This method should be used in conjunction with {@link #put(String, java.time.Instant)}
*
- * @param key the key to return the value for
+ * @param key the key to return the value for
* @return the value or null if no value for that key
- * @throws java.lang.ClassCastException if the value is not a String
+ * @throws java.lang.ClassCastException if the value is not a String
* @throws java.time.format.DateTimeParseException if the String value is not a legal ISO 8601 encoded value
*/
public Instant getInstant(String key) {
Objects.requireNonNull(key);
- String encoded = (String) map.get(key);
- return encoded == null ? null : Instant.from(ISO_INSTANT.parse(encoded));
+ Object val = map.get(key);
+ // no-op
+ if (val == null) {
+ return null;
+ }
+ // no-op if value is already an Instant
+ if (val instanceof Instant) {
+ return (Instant) val;
+ }
+ // assume that the value is in String format as per RFC
+ String encoded = (String) val;
+ // parse to proper type
+ return Instant.from(ISO_INSTANT.parse(encoded));
}
/**
- * Get the value with the specified key, as an Object
- * @param key the key to lookup
+ * Get the value with the specified key, as an Object with types respecting the limitations of JSON.
+ * <ul>
+ * <li>{@code Map} will be wrapped to {@code JsonObject}</li>
+ * <li>{@code List} will be wrapped to {@code JsonArray}</li>
+ * <li>{@code Instant} will be converted to {@code String}</li>
+ * <li>{@code byte[]} will be converted to {@code String}</li>
+ * <li>{@code Enum} will be converted to {@code String}</li>
+ * </ul>
+ *
+ * @param key the key to lookup
* @return the value
*/
public Object getValue(String key) {
Objects.requireNonNull(key);
- Object val = map.get(key);
- if (val instanceof Map) {
- val = new JsonObject((Map)val);
- } else if (val instanceof List) {
- val = new JsonArray((List)val);
- }
- return val;
+ return wrapJsonValue(map.get(key));
}
/**
* Like {@link #getString(String)} but specifying a default value to return if there is no entry.
*
- * @param key the key to lookup
- * @param def the default value to use if the entry is not present
+ * @param key the key to lookup
+ * @param def the default value to use if the entry is not present
* @return the value or {@code def} if no entry present
*/
public String getString(String key, String def) {
Objects.requireNonNull(key);
- CharSequence cs = (CharSequence)map.get(key);
- return cs != null || map.containsKey(key) ? cs == null ? null : cs.toString() : def;
+ if (map.containsKey(key)) {
+ return getString(key);
+ } else {
+ return def;
+ }
}
/**
* Like {@link #getInteger(String)} but specifying a default value to return if there is no entry.
*
- * @param key the key to lookup
- * @param def the default value to use if the entry is not present
+ * @param key the key to lookup
+ * @param def the default value to use if the entry is not present
* @return the value or {@code def} if no entry present
*/
public Integer getInteger(String key, Integer def) {
Objects.requireNonNull(key);
- Number val = (Number)map.get(key);
- if (val == null) {
- if (map.containsKey(key)) {
- return null;
- } else {
- return def;
- }
- } else if (val instanceof Integer) {
- return (Integer)val; // Avoids unnecessary unbox/box
+ if (map.containsKey(key)) {
+ return getInteger(key);
} else {
- return val.intValue();
+ return def;
}
}
/**
* Like {@link #getLong(String)} but specifying a default value to return if there is no entry.
*
- * @param key the key to lookup
- * @param def the default value to use if the entry is not present
+ * @param key the key to lookup
+ * @param def the default value to use if the entry is not present
* @return the value or {@code def} if no entry present
*/
public Long getLong(String key, Long def) {
Objects.requireNonNull(key);
- Number val = (Number)map.get(key);
- if (val == null) {
- if (map.containsKey(key)) {
- return null;
- } else {
- return def;
- }
- } else if (val instanceof Long) {
- return (Long)val; // Avoids unnecessary unbox/box
+ if (map.containsKey(key)) {
+ return getLong(key);
} else {
- return val.longValue();
+ return def;
}
}
/**
* Like {@link #getDouble(String)} but specifying a default value to return if there is no entry.
*
- * @param key the key to lookup
- * @param def the default value to use if the entry is not present
+ * @param key the key to lookup
+ * @param def the default value to use if the entry is not present
* @return the value or {@code def} if no entry present
*/
public Double getDouble(String key, Double def) {
Objects.requireNonNull(key);
- Number val = (Number)map.get(key);
- if (val == null) {
- if (map.containsKey(key)) {
- return null;
- } else {
- return def;
- }
- } else if (val instanceof Double) {
- return (Double)val; // Avoids unnecessary unbox/box
+ if (map.containsKey(key)) {
+ return getDouble(key);
} else {
- return val.doubleValue();
+ return def;
}
}
/**
* Like {@link #getFloat(String)} but specifying a default value to return if there is no entry.
*
- * @param key the key to lookup
- * @param def the default value to use if the entry is not present
+ * @param key the key to lookup
+ * @param def the default value to use if the entry is not present
* @return the value or {@code def} if no entry present
*/
public Float getFloat(String key, Float def) {
Objects.requireNonNull(key);
- Number val = (Number)map.get(key);
- if (val == null) {
- if (map.containsKey(key)) {
- return null;
- } else {
- return def;
- }
- } else if (val instanceof Float) {
- return (Float)val; // Avoids unnecessary unbox/box
+ if (map.containsKey(key)) {
+ return getFloat(key);
} else {
- return val.floatValue();
+ return def;
}
}
/**
* Like {@link #getBoolean(String)} but specifying a default value to return if there is no entry.
*
- * @param key the key to lookup
- * @param def the default value to use if the entry is not present
+ * @param key the key to lookup
+ * @param def the default value to use if the entry is not present
* @return the value or {@code def} if no entry present
*/
public Boolean getBoolean(String key, Boolean def) {
Objects.requireNonNull(key);
- Object val = map.get(key);
- return val != null || map.containsKey(key) ? (Boolean)val : def;
+ if (map.containsKey(key)) {
+ return getBoolean(key);
+ } else {
+ return def;
+ }
}
/**
* Like {@link #getJsonObject(String)} but specifying a default value to return if there is no entry.
*
- * @param key the key to lookup
- * @param def the default value to use if the entry is not present
+ * @param key the key to lookup
+ * @param def the default value to use if the entry is not present
* @return the value or {@code def} if no entry present
*/
public JsonObject getJsonObject(String key, JsonObject def) {
- JsonObject val = getJsonObject(key);
- return val != null || map.containsKey(key) ? val : def;
+ Objects.requireNonNull(key);
+ if (map.containsKey(key)) {
+ return getJsonObject(key);
+ } else {
+ return def;
+ }
}
/**
* Like {@link #getJsonArray(String)} but specifying a default value to return if there is no entry.
*
- * @param key the key to lookup
- * @param def the default value to use if the entry is not present
+ * @param key the key to lookup
+ * @param def the default value to use if the entry is not present
* @return the value or {@code def} if no entry present
*/
public JsonArray getJsonArray(String key, JsonArray def) {
- JsonArray val = getJsonArray(key);
- return val != null || map.containsKey(key) ? val : def;
+ Objects.requireNonNull(key);
+ if (map.containsKey(key)) {
+ return getJsonArray(key);
+ } else {
+ return def;
+ }
}
/**
* Like {@link #getBinary(String)} but specifying a default value to return if there is no entry.
*
- * @param key the key to lookup
- * @param def the default value to use if the entry is not present
+ * @param key the key to lookup
+ * @param def the default value to use if the entry is not present
* @return the value or {@code def} if no entry present
*/
public byte[] getBinary(String key, byte[] def) {
Objects.requireNonNull(key);
- Object val = map.get(key);
- return val != null || map.containsKey(key) ? (val == null ? null : Base64.getDecoder().decode((String)val)) : def;
+ if (map.containsKey(key)) {
+ return getBinary(key);
+ } else {
+ return def;
+ }
}
/**
* Like {@link #getInstant(String)} but specifying a default value to return if there is no entry.
*
- * @param key the key to lookup
- * @param def the default value to use if the entry is not present
+ * @param key the key to lookup
+ * @param def the default value to use if the entry is not present
* @return the value or {@code def} if no entry present
*/
public Instant getInstant(String key, Instant def) {
Objects.requireNonNull(key);
- Object val = map.get(key);
- return val != null || map.containsKey(key) ?
- (val == null ? null : Instant.from(ISO_INSTANT.parse((String) val))) : def;
+ if (map.containsKey(key)) {
+ return getInstant(key);
+ } else {
+ return def;
+ }
}
/**
* Like {@link #getValue(String)} but specifying a default value to return if there is no entry.
*
- * @param key the key to lookup
- * @param def the default value to use if the entry is not present
+ * @param key the key to lookup
+ * @param def the default value to use if the entry is not present
* @return the value or {@code def} if no entry present
*/
public Object getValue(String key, Object def) {
Objects.requireNonNull(key);
- Object val = getValue(key);
- return val != null || map.containsKey(key) ? val : def;
+ if (map.containsKey(key)) {
+ return getValue(key);
+ } else {
+ return def;
+ }
}
/**
* Does the JSON object contain the specified key?
*
- * @param key the key
+ * @param key the key
* @return true if it contains the key, false if not.
*/
public boolean containsKey(String key) {
@@ -514,113 +538,6 @@ public Set<String> fieldNames() {
return map.keySet();
}
- /**
- * Put an Enum into the JSON object with the specified key.
- * <p>
- * JSON has no concept of encoding Enums, so the Enum will be converted to a String using the {@link java.lang.Enum#name()}
- * method and the value put as a String.
- *
- * @param key the key
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonObject put(String key, Enum value) {
- Objects.requireNonNull(key);
- map.put(key, value == null ? null : value.name());
- return this;
- }
-
- /**
- * Put an CharSequence into the JSON object with the specified key.
- *
- * @param key the key
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonObject put(String key, CharSequence value) {
- Objects.requireNonNull(key);
- map.put(key, value == null ? null : value.toString());
- return this;
- }
-
- /**
- * Put a String into the JSON object with the specified key.
- *
- * @param key the key
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonObject put(String key, String value) {
- Objects.requireNonNull(key);
- map.put(key, value);
- return this;
- }
-
- /**
- * Put an Integer into the JSON object with the specified key.
- *
- * @param key the key
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonObject put(String key, Integer value) {
- Objects.requireNonNull(key);
- map.put(key, value);
- return this;
- }
-
- /**
- * Put a Long into the JSON object with the specified key.
- *
- * @param key the key
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonObject put(String key, Long value) {
- Objects.requireNonNull(key);
- map.put(key, value);
- return this;
- }
-
- /**
- * Put a Double into the JSON object with the specified key.
- *
- * @param key the key
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonObject put(String key, Double value) {
- Objects.requireNonNull(key);
- map.put(key, value);
- return this;
- }
-
- /**
- * Put a Float into the JSON object with the specified key.
- *
- * @param key the key
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonObject put(String key, Float value) {
- Objects.requireNonNull(key);
- map.put(key, value);
- return this;
- }
-
- /**
- * Put a Boolean into the JSON object with the specified key.
- *
- * @param key the key
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonObject put(String key, Boolean value) {
- Objects.requireNonNull(key);
- map.put(key, value);
- return this;
- }
-
/**
* Put a null value into the JSON object with the specified key.
*
@@ -633,73 +550,15 @@ public JsonObject putNull(String key) {
return this;
}
- /**
- * Put another JSON object into the JSON object with the specified key.
- *
- * @param key the key
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonObject put(String key, JsonObject value) {
- Objects.requireNonNull(key);
- map.put(key, value);
- return this;
- }
-
- /**
- * Put a JSON array into the JSON object with the specified key.
- *
- * @param key the key
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonObject put(String key, JsonArray value) {
- Objects.requireNonNull(key);
- map.put(key, value);
- return this;
- }
-
- /**
- * Put a byte[] into the JSON object with the specified key.
- * <p>
- * JSON extension RFC7493, binary will first be Base64 encoded before being put as a String.
- *
- * @param key the key
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonObject put(String key, byte[] value) {
- Objects.requireNonNull(key);
- map.put(key, value == null ? null : Base64.getEncoder().encodeToString(value));
- return this;
- }
-
- /**
- * Put a Instant into the JSON object with the specified key.
- * <p>
- * JSON extension RFC7493, instant will first be encoded to ISO 8601 date and time
- * String such as "2017-04-03T10:25:41Z".
- *
- * @param key the key
- * @param value the value
- * @return a reference to this, so the API can be used fluently
- */
- public JsonObject put(String key, Instant value) {
- Objects.requireNonNull(key);
- map.put(key, value == null ? null : ISO_INSTANT.format(value));
- return this;
- }
-
/**
* Put an Object into the JSON object with the specified key.
*
- * @param key the key
- * @param value the value
- * @return a reference to this, so the API can be used fluently
+ * @param key the key
+ * @param value the value
+ * @return a reference to this, so the API can be used fluently
*/
public JsonObject put(String key, Object value) {
Objects.requireNonNull(key);
- value = checkAndCopy(value, false);
map.put(key, value);
return this;
}
@@ -707,19 +566,21 @@ public JsonObject put(String key, Object value) {
/**
* Remove an entry from this object.
*
- * @param key the key
+ * @param key the key
* @return the value that was removed, or null if none
*/
public Object remove(String key) {
- return map.remove(key);
+ Objects.requireNonNull(key);
+ return wrapJsonValue(map.remove(key));
}
/**
* Merge in another JSON object.
- * <p>
+ *
* This is the equivalent of putting all the entries of the other JSON object into this object. This is not a deep
* merge, entries containing (sub) JSON objects will be replaced entirely.
- * @param other the other JSON object
+ *
+ * @param other the other JSON object
* @return a reference to this, so the API can be used fluently
*/
public JsonObject mergeIn(JsonObject other) {
@@ -730,8 +591,9 @@ public JsonObject mergeIn(JsonObject other) {
* Merge in another JSON object.
* A deep merge (recursive) matches (sub) JSON objects in the existing tree and replaces all
* matching entries. JsonArrays are treated like any other entry, i.e. replaced entirely.
+ *
* @param other the other JSON object
- * @param deep if true, a deep merge is performed
+ * @param deep if true, a deep merge is performed
* @return a reference to this, so the API can be used fluently
*/
public JsonObject mergeIn(JsonObject other, boolean deep) {
@@ -742,6 +604,7 @@ public JsonObject mergeIn(JsonObject other, boolean deep) {
* Merge in another JSON object.
* The merge is deep (recursive) to the specified level. If depth is 0, no merge is performed,
* if depth is greater than the depth of one of the objects, a full deep merge is performed.
+ *
* @param other the other JSON object
* @param depth depth of merge
* @return a reference to this, so the API can be used fluently
@@ -755,19 +618,19 @@ public JsonObject mergeIn(JsonObject other, int depth) {
map.putAll(other.map);
return this;
}
- for (Map.Entry<String, Object> e: other.map.entrySet()) {
- if (e.getValue() == null){
+ for (Map.Entry<String, Object> e : other.map.entrySet()) {
+ if (e.getValue() == null) {
map.put(e.getKey(), null);
} else {
map.merge(e.getKey(), e.getValue(), (oldVal, newVal) -> {
if (oldVal instanceof Map) {
- oldVal = new JsonObject((Map)oldVal);
+ oldVal = new JsonObject((Map) oldVal);
}
if (newVal instanceof Map) {
- newVal = new JsonObject((Map)newVal);
+ newVal = new JsonObject((Map) newVal);
}
if (oldVal instanceof JsonObject && newVal instanceof JsonObject) {
- return ((JsonObject) oldVal).mergeIn((JsonObject)newVal, depth - 1);
+ return ((JsonObject) oldVal).mergeIn((JsonObject) newVal, depth - 1);
}
return newVal;
});
@@ -817,9 +680,8 @@ public JsonObject copy() {
} else {
copiedMap = new HashMap<>(map.size());
}
- for (Map.Entry<String, Object> entry: map.entrySet()) {
- Object val = entry.getValue();
- val = checkAndCopy(val, true);
+ for (Map.Entry<String, Object> entry : map.entrySet()) {
+ Object val = checkAndCopy(entry.getValue());
copiedMap.put(entry.getKey(), val);
}
return new JsonObject(copiedMap);
@@ -857,6 +719,7 @@ public Iterator<Map.Entry<String, Object>> iterator() {
/**
* Get the number of entries in the JSON object
+ *
* @return the number of entries
*/
public int size() {
@@ -888,61 +751,69 @@ public String toString() {
@Override
public boolean equals(Object o) {
+ // null check
+ if (o == null)
+ return false;
+ // self check
if (this == o)
return true;
- if (o == null || getClass() != o.getClass())
+ // type check and cast
+ if (getClass() != o.getClass())
return false;
- return objectEquals(map, o);
- }
- private static boolean objectEquals(Map<?, ?> m1, Object o2) {
- Map<?, ?> m2;
- if (o2 instanceof JsonObject) {
- m2 = ((JsonObject) o2).map;
- } else if (o2 instanceof Map<?, ?>) {
- m2 = (Map<?, ?>) o2;
- } else {
- return false;
- }
- if (!m1.keySet().equals(m2.keySet())) {
+ JsonObject other = (JsonObject) o;
+ // size check
+ if (this.size() != other.size())
return false;
- }
- for (Map.Entry<?, ?> entry : m1.entrySet()) {
- Object val1 = entry.getValue();
- Object val2 = m2.get(entry.getKey());
- if (val1 == null ? val2 != null : !equals(val1, val2)) {
+ // value comparison
+ for (String key : map.keySet()) {
+ if (!other.containsKey(key)) {
return false;
}
- }
- return true;
- }
- static boolean equals(Object o1, Object o2) {
- if (o1 == o2) {
- return true;
- }
- if (o1 instanceof JsonObject) {
- return objectEquals(((JsonObject) o1).map, o2);
- }
- if (o1 instanceof Map<?, ?>) {
- return objectEquals((Map<?, ?>) o1, o2);
- }
- if (o1 instanceof JsonArray) {
- return JsonArray.arrayEquals(((JsonArray) o1).getList(), o2);
- }
- if (o1 instanceof List<?>) {
- return JsonArray.arrayEquals((List<?>) o1, o2);
- }
- if (o1 instanceof Number && o2 instanceof Number && o1.getClass() != o2.getClass()) {
- Number n1 = (Number) o1;
- Number n2 = (Number) o2;
- if (o1 instanceof Float || o1 instanceof Double || o2 instanceof Float || o2 instanceof Double) {
- return n1.doubleValue() == n2.doubleValue();
- } else {
- return n1.longValue() == n2.longValue();
+ Object thisValue = this.getValue(key);
+ Object otherValue = other.getValue(key);
+ // identity check
+ if (thisValue == otherValue) {
+ continue;
+ }
+ // special case for numbers
+ if (thisValue instanceof Number && otherValue instanceof Number && thisValue.getClass() != otherValue.getClass()) {
+ Number n1 = (Number) thisValue;
+ Number n2 = (Number) otherValue;
+ // floating point values
+ if (thisValue instanceof Float || thisValue instanceof Double || otherValue instanceof Float || otherValue instanceof Double) {
+ // compare as floating point double
+ if (n1.doubleValue() == n2.doubleValue()) {
+ // same value check the next entry
+ continue;
+ }
+ }
+ if (thisValue instanceof Integer || thisValue instanceof Long || otherValue instanceof Integer || otherValue instanceof Long) {
+ // compare as integer long
+ if (n1.longValue() == n2.longValue()) {
+ // same value check the next entry
+ continue;
+ }
+ }
+ }
+ // special case for char sequences
+ if (thisValue instanceof CharSequence && otherValue instanceof CharSequence && thisValue.getClass() != otherValue.getClass()) {
+ CharSequence s1 = (CharSequence) thisValue;
+ CharSequence s2 = (CharSequence) otherValue;
+
+ if (Objects.equals(s1.toString(), s2.toString())) {
+ // same value check the next entry
+ continue;
+ }
+ }
+ // fallback to standard object equals checks
+ if (!Objects.equals(thisValue, otherValue)) {
+ return false;
}
}
- return o1.equals(o2);
+ // all checks passed
+ return true;
}
@Override
@@ -975,7 +846,7 @@ private void fromBuffer(Buffer buf) {
map = Json.CODEC.fromBuffer(buf, Map.class);
}
- private class Iter implements Iterator<Map.Entry<String, Object>> {
+ private static class Iter implements Iterator<Map.Entry<String, Object>> {
final Iterator<Map.Entry<String, Object>> mapIter;
@@ -990,12 +861,15 @@ public boolean hasNext() {
@Override
public Map.Entry<String, Object> next() {
- Map.Entry<String, Object> entry = mapIter.next();
- if (entry.getValue() instanceof Map) {
- return new Entry(entry.getKey(), new JsonObject((Map)entry.getValue()));
- } else if (entry.getValue() instanceof List) {
- return new Entry(entry.getKey(), new JsonArray((List) entry.getValue()));
+ final Map.Entry<String, Object> entry = mapIter.next();
+ final Object val = entry.getValue();
+ // perform wrapping
+ final Object wrapped = wrapJsonValue(val);
+
+ if (val != wrapped) {
+ return new Entry(entry.getKey(), wrapped);
}
+
return entry;
}
@@ -1030,50 +904,6 @@ public Object setValue(Object value) {
}
}
- @SuppressWarnings("unchecked")
- static Object checkAndCopy(Object val, boolean copy) {
- if (val == null) {
- // OK
- } else if (val instanceof Number && !(val instanceof BigDecimal)) {
- // OK
- } else if (val instanceof Boolean) {
- // OK
- } else if (val instanceof String) {
- // OK
- } else if (val instanceof Character) {
- // OK
- } else if (val instanceof CharSequence) {
- val = val.toString();
- } else if (val instanceof JsonObject) {
- if (copy) {
- val = ((JsonObject) val).copy();
- }
- } else if (val instanceof JsonArray) {
- if (copy) {
- val = ((JsonArray) val).copy();
- }
- } else if (val instanceof Map) {
- if (copy) {
- val = (new JsonObject((Map)val)).copy();
- } else {
- val = new JsonObject((Map)val);
- }
- } else if (val instanceof List) {
- if (copy) {
- val = (new JsonArray((List)val)).copy();
- } else {
- val = new JsonArray((List)val);
- }
- } else if (val instanceof byte[]) {
- val = Base64.getEncoder().encodeToString((byte[])val);
- } else if (val instanceof Instant) {
- val = ISO_INSTANT.format((Instant) val);
- } else {
- throw new IllegalStateException("Illegal type in JsonObject: " + val.getClass());
- }
- return val;
- }
-
static <T> Stream<T> asStream(Iterator<T> sourceIterator) {
Iterable<T> iterable = () -> sourceIterator;
return StreamSupport.stream(iterable.spliterator(), false);
diff --git a/src/main/java/io/vertx/core/json/impl/JsonUtil.java b/src/main/java/io/vertx/core/json/impl/JsonUtil.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/json/impl/JsonUtil.java
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2011-2019 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+package io.vertx.core.json.impl;
+
+import io.vertx.core.json.JsonArray;
+import io.vertx.core.json.JsonObject;
+
+import java.math.BigDecimal;
+import java.time.Instant;
+import java.util.Base64;
+import java.util.List;
+import java.util.Map;
+
+import static java.time.format.DateTimeFormatter.ISO_INSTANT;
+
+/**
+ * Implementation utilities (details) affecting the way JSON objects are wrapped.
+ */
+public final class JsonUtil {
+
+ public static final Base64.Encoder BASE64_ENCODER;
+ public static final Base64.Decoder BASE64_DECODER;
+
+ static {
+ /*
+ * Vert.x 3.x Json supports RFC-7493, however the JSON encoder/decoder format was incorrect.
+ * Users who might need to interop with Vert.x 3.x applications should set the system property
+ * {@code vertx.json.base64} to {@code legacy}.
+ */
+ if ("legacy".equalsIgnoreCase(System.getProperty("vertx.json.base64"))) {
+ BASE64_ENCODER = Base64.getEncoder();
+ BASE64_DECODER = Base64.getDecoder();
+ } else {
+ BASE64_ENCODER = Base64.getUrlEncoder().withoutPadding();
+ BASE64_DECODER = Base64.getUrlDecoder();
+ }
+ }
+
+ /**
+ * Wraps well known java types to adhere to the Json expected types.
+ * <ul>
+ * <li>{@code Map} will be wrapped to {@code JsonObject}</li>
+ * <li>{@code List} will be wrapped to {@code JsonArray}</li>
+ * <li>{@code Instant} will be converted to iso date {@code String}</li>
+ * <li>{@code byte[]} will be converted to base64 {@code String}</li>
+ * <li>{@code Enum} will be converted to enum name {@code String}</li>
+ * </ul>
+ *
+ * @param val java type
+ * @return wrapped type or {@code val} if not applicable.
+ */
+ public static Object wrapJsonValue(Object val) {
+ if (val == null) {
+ return null;
+ }
+
+ // perform wrapping
+ if (val instanceof Map) {
+ val = new JsonObject((Map) val);
+ } else if (val instanceof List) {
+ val = new JsonArray((List) val);
+ } else if (val instanceof Instant) {
+ val = ISO_INSTANT.format((Instant) val);
+ } else if (val instanceof byte[]) {
+ val = BASE64_ENCODER.encodeToString((byte[]) val);
+ } else if (val instanceof Enum) {
+ val = ((Enum) val).name();
+ }
+
+ return val;
+ }
+
+ @SuppressWarnings("unchecked")
+ public static Object checkAndCopy(Object val) {
+ if (val == null) {
+ // OK
+ } else if (val instanceof Number && !(val instanceof BigDecimal)) {
+ // OK
+ } else if (val instanceof Boolean) {
+ // OK
+ } else if (val instanceof String) {
+ // OK
+ } else if (val instanceof Character) {
+ // OK
+ } else if (val instanceof CharSequence) {
+ val = val.toString();
+ } else if (val instanceof JsonObject) {
+ val = ((JsonObject) val).copy();
+ } else if (val instanceof JsonArray) {
+ val = ((JsonArray) val).copy();
+ } else if (val instanceof Map) {
+ val = (new JsonObject((Map) val)).copy();
+ } else if (val instanceof List) {
+ val = (new JsonArray((List) val)).copy();
+ } else if (val instanceof byte[]) {
+ // OK
+ } else if (val instanceof Instant) {
+ // OK
+ } else if (val instanceof Enum) {
+ // OK
+ } else {
+ throw new IllegalStateException("Illegal type in Json: " + val.getClass());
+ }
+ return val;
+ }
+}
diff --git a/src/main/java/io/vertx/core/json/jackson/ByteArrayDeserializer.java b/src/main/java/io/vertx/core/json/jackson/ByteArrayDeserializer.java
--- a/src/main/java/io/vertx/core/json/jackson/ByteArrayDeserializer.java
+++ b/src/main/java/io/vertx/core/json/jackson/ByteArrayDeserializer.java
@@ -18,7 +18,8 @@
import java.io.IOException;
import java.time.Instant;
-import java.util.Base64;
+
+import static io.vertx.core.json.impl.JsonUtil.BASE64_DECODER;
class ByteArrayDeserializer extends JsonDeserializer<byte[]> {
@@ -26,7 +27,7 @@ class ByteArrayDeserializer extends JsonDeserializer<byte[]> {
public byte[] deserialize(JsonParser p, DeserializationContext ctxt) throws IOException, JsonProcessingException {
String text = p.getText();
try {
- return Base64.getDecoder().decode(text);
+ return BASE64_DECODER.decode(text);
} catch (IllegalArgumentException e) {
throw new InvalidFormatException(p, "Expected a base64 encoded byte array", text, Instant.class);
}
diff --git a/src/main/java/io/vertx/core/json/jackson/ByteArraySerializer.java b/src/main/java/io/vertx/core/json/jackson/ByteArraySerializer.java
--- a/src/main/java/io/vertx/core/json/jackson/ByteArraySerializer.java
+++ b/src/main/java/io/vertx/core/json/jackson/ByteArraySerializer.java
@@ -15,12 +15,13 @@
import com.fasterxml.jackson.databind.SerializerProvider;
import java.io.IOException;
-import java.util.Base64;
+
+import static io.vertx.core.json.impl.JsonUtil.BASE64_ENCODER;
class ByteArraySerializer extends JsonSerializer<byte[]> {
@Override
public void serialize(byte[] value, JsonGenerator jgen, SerializerProvider provider) throws IOException {
- jgen.writeString(Base64.getEncoder().encodeToString(value));
+ jgen.writeString(BASE64_ENCODER.encodeToString(value));
}
}
diff --git a/src/main/java/io/vertx/core/json/jackson/JacksonCodec.java b/src/main/java/io/vertx/core/json/jackson/JacksonCodec.java
--- a/src/main/java/io/vertx/core/json/jackson/JacksonCodec.java
+++ b/src/main/java/io/vertx/core/json/jackson/JacksonCodec.java
@@ -37,11 +37,11 @@
import java.lang.reflect.Type;
import java.time.Instant;
import java.util.ArrayList;
-import java.util.Base64;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
+import static io.vertx.core.json.impl.JsonUtil.BASE64_ENCODER;
import static java.time.format.DateTimeFormatter.ISO_INSTANT;
/**
@@ -274,8 +274,8 @@ private static void encodeJson(Object json, JsonGenerator generator) throws Enco
encodeJson(item, generator);
}
generator.writeEndArray();
- } else if (json instanceof String) {
- generator.writeString((String)json);
+ } else if (json instanceof CharSequence) {
+ generator.writeString(((CharSequence) json).toString());
} else if (json instanceof Number) {
if (json instanceof Short) {
generator.writeNumber((Short) json);
@@ -295,7 +295,7 @@ private static void encodeJson(Object json, JsonGenerator generator) throws Enco
} else if (json instanceof Instant) {
generator.writeString((ISO_INSTANT.format((Instant)json)));
} else if (json instanceof byte[]) {
- generator.writeString(Base64.getEncoder().encodeToString((byte[]) json));
+ generator.writeString(BASE64_ENCODER.encodeToString((byte[]) json));
} else if (json == null) {
generator.writeNull();
} else {
diff --git a/src/main/java/io/vertx/core/parsetools/impl/JsonEventImpl.java b/src/main/java/io/vertx/core/parsetools/impl/JsonEventImpl.java
--- a/src/main/java/io/vertx/core/parsetools/impl/JsonEventImpl.java
+++ b/src/main/java/io/vertx/core/parsetools/impl/JsonEventImpl.java
@@ -21,8 +21,8 @@
import io.vertx.core.parsetools.JsonEventType;
import java.time.Instant;
-import java.util.Base64;
+import static io.vertx.core.json.impl.JsonUtil.BASE64_DECODER;
import static java.time.format.DateTimeFormatter.ISO_INSTANT;
/**
@@ -167,7 +167,7 @@ public String stringValue() {
@Override
public Buffer binaryValue() {
- return value != null ? Buffer.buffer(Base64.getDecoder().decode((String) value)) : null;
+ return value != null ? Buffer.buffer(BASE64_DECODER.decode((String) value)) : null;
}
@Override
| diff --git a/src/test/java/io/vertx/core/ConversionHelperTest.java b/src/test/java/io/vertx/core/ConversionHelperTest.java
--- a/src/test/java/io/vertx/core/ConversionHelperTest.java
+++ b/src/test/java/io/vertx/core/ConversionHelperTest.java
@@ -21,12 +21,12 @@
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.Base64;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import static io.vertx.core.json.impl.JsonUtil.BASE64_DECODER;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -106,7 +106,7 @@ public void testFromJsonObject() {
assertEquals("the_string", map.get("string"));
assertEquals(4, map.get("integer"));
assertEquals(true, map.get("boolean"));
- assertEquals("hello", new String(Base64.getDecoder().decode((String)map.get("binary"))));
+ assertEquals("hello", new String(BASE64_DECODER.decode((String)map.get("binary"))));
assertEquals(Collections.singletonMap("nested", 4), map.get("object"));
assertEquals(Arrays.asList(1, 2, 3), map.get("array"));
}
@@ -125,7 +125,7 @@ public void testFromJsonArray() {
assertEquals("the_string", map.get(0));
assertEquals(4, map.get(1));
assertEquals(true, map.get(2));
- assertEquals("hello", new String(Base64.getDecoder().decode((String)map.get(3))));
+ assertEquals("hello", new String(BASE64_DECODER.decode((String)map.get(3))));
assertEquals(Collections.singletonMap("nested", 4), map.get(4));
assertEquals(Arrays.asList(1, 2, 3), map.get(5));
}
diff --git a/src/test/java/io/vertx/core/json/JacksonDatabindTest.java b/src/test/java/io/vertx/core/json/JacksonDatabindTest.java
--- a/src/test/java/io/vertx/core/json/JacksonDatabindTest.java
+++ b/src/test/java/io/vertx/core/json/JacksonDatabindTest.java
@@ -24,6 +24,7 @@
import java.time.Instant;
import java.util.*;
+import static io.vertx.core.json.impl.JsonUtil.BASE64_ENCODER;
import static java.time.format.DateTimeFormatter.ISO_INSTANT;
/**
@@ -85,7 +86,7 @@ public void testNullInstantDecoding() {
public void testBytesDecoding() {
Pojo original = new Pojo();
original.bytes = TestUtils.randomByteArray(12);
- Pojo decoded = Json.decodeValue("{\"bytes\":\"" + Base64.getEncoder().encodeToString(original.bytes) + "\"}", Pojo.class);
+ Pojo decoded = Json.decodeValue("{\"bytes\":\"" + BASE64_ENCODER.encodeToString(original.bytes) + "\"}", Pojo.class);
assertArrayEquals(original.bytes, decoded.bytes);
}
diff --git a/src/test/java/io/vertx/core/json/JsonArrayTest.java b/src/test/java/io/vertx/core/json/JsonArrayTest.java
--- a/src/test/java/io/vertx/core/json/JsonArrayTest.java
+++ b/src/test/java/io/vertx/core/json/JsonArrayTest.java
@@ -21,6 +21,8 @@
import java.util.*;
import java.util.stream.Collectors;
+import static io.vertx.core.json.impl.JsonUtil.BASE64_DECODER;
+import static io.vertx.core.json.impl.JsonUtil.BASE64_ENCODER;
import static java.time.format.DateTimeFormatter.ISO_INSTANT;
import static org.junit.Assert.*;
@@ -233,8 +235,8 @@ public void testGetBinary() {
byte[] bytes = TestUtils.randomByteArray(10);
jsonArray.add(bytes);
assertArrayEquals(bytes, jsonArray.getBinary(0));
- assertEquals(Base64.getEncoder().encodeToString(bytes), jsonArray.getValue(0));
- assertArrayEquals(bytes, Base64.getDecoder().decode(jsonArray.getString(0)));
+ assertEquals(BASE64_ENCODER.encodeToString(bytes), jsonArray.getValue(0));
+ assertArrayEquals(bytes, BASE64_DECODER.decode(jsonArray.getString(0)));
try {
jsonArray.getBinary(-1);
fail();
@@ -381,7 +383,7 @@ public void testGetValue() {
assertEquals(arr, jsonArray.getValue(8));
byte[] bytes = TestUtils.randomByteArray(100);
jsonArray.add(bytes);
- assertEquals(Base64.getEncoder().encodeToString(bytes), jsonArray.getValue(9));
+ assertEquals(BASE64_ENCODER.encodeToString(bytes), jsonArray.getValue(9));
Instant now = Instant.now();
jsonArray.add(now);
assertEquals(now, jsonArray.getInstant(10));
@@ -519,7 +521,7 @@ public void testAddBinary() {
byte[] bytes = TestUtils.randomByteArray(10);
assertSame(jsonArray, jsonArray.add(bytes));
assertArrayEquals(bytes, jsonArray.getBinary(0));
- assertEquals(Base64.getEncoder().encodeToString(bytes), jsonArray.getValue(0));
+ assertEquals(BASE64_ENCODER.encodeToString(bytes), jsonArray.getValue(0));
jsonArray.add((byte[])null);
assertNull(jsonArray.getValue(1));
assertEquals(2, jsonArray.size());
@@ -559,28 +561,28 @@ public void testAddObject() {
assertEquals(Double.valueOf(1.23d), jsonArray.getDouble(4));
assertEquals(true, jsonArray.getBoolean(5));
assertArrayEquals(bytes, jsonArray.getBinary(6));
- assertEquals(Base64.getEncoder().encodeToString(bytes), jsonArray.getValue(6));
+ assertEquals(BASE64_ENCODER.encodeToString(bytes), jsonArray.getValue(6));
assertEquals(now, jsonArray.getInstant(7));
assertEquals(now.toString(), jsonArray.getValue(7));
assertEquals(obj, jsonArray.getJsonObject(8));
assertEquals(arr, jsonArray.getJsonArray(9));
try {
jsonArray.add(new SomeClass());
+ // OK (we can put anything, yet it should fail to encode if a codec is missing)
+ } catch (RuntimeException e) {
fail();
- } catch (IllegalStateException e) {
- // OK
}
try {
jsonArray.add(new BigDecimal(123));
+ // OK (we can put anything, yet it should fail to encode if a codec is missing)
+ } catch (RuntimeException e) {
fail();
- } catch (IllegalStateException e) {
- // OK
}
try {
jsonArray.add(new Date());
+ // OK (we can put anything, yet it should fail to encode if a codec is missing)
+ } catch (RuntimeException e) {
fail();
- } catch (IllegalStateException e) {
- // OK
}
}
@@ -648,6 +650,23 @@ public void testRemoveByObject() {
assertTrue(jsonArray.isEmpty());
}
+ @Test
+ public void testRemoveByWrappedObject() {
+ JsonArray arr = new JsonArray("[1, 2, 3]");
+ jsonArray.add(arr);
+ assertEquals(1, jsonArray.size());
+ assertTrue(jsonArray.remove(arr));
+ assertEquals(0, jsonArray.size());
+ assertTrue(jsonArray.isEmpty());
+ // this is OK,
+ // now test using unwrapped objects
+ jsonArray.add(arr.getList());
+ assertEquals(1, jsonArray.size());
+ assertTrue(jsonArray.remove(arr));
+ assertEquals(0, jsonArray.size());
+ assertTrue(jsonArray.isEmpty());
+ }
+
@Test
public void testRemoveByPos() {
jsonArray.add("wibble");
@@ -1137,7 +1156,7 @@ public void testSetBinary() {
}
jsonArray.add("bar");
assertSame(jsonArray, jsonArray.set(0, bytes));
- assertEquals(Base64.getEncoder().encodeToString(bytes), jsonArray.getValue(0));
+ assertEquals(BASE64_ENCODER.encodeToString(bytes), jsonArray.getValue(0));
assertEquals(1, jsonArray.size());
}
@@ -1161,21 +1180,21 @@ public void testSetObject() {
jsonArray.add("bar");
try {
jsonArray.set(0, new SomeClass());
+ // OK (we can put anything, yet it should fail to encode if a codec is missing)
+ } catch (RuntimeException e) {
fail();
- } catch (IllegalStateException e) {
- // OK
}
try {
jsonArray.set(0, new BigDecimal(123));
+ // OK (we can put anything, yet it should fail to encode if a codec is missing)
+ } catch (RuntimeException e) {
fail();
- } catch (IllegalStateException e) {
- // OK
}
try {
jsonArray.set(0, new Date());
+ // OK (we can put anything, yet it should fail to encode if a codec is missing)
+ } catch (RuntimeException e) {
fail();
- } catch (IllegalStateException e) {
- // OK
}
}
@@ -1192,4 +1211,43 @@ public void testSetNull() {
assertNull(jsonArray.getString(0));
assertEquals(1, jsonArray.size());
}
+
+ @Test
+ public void testAddWithPos() {
+ JsonArray arr = new JsonArray()
+ .add(1)
+ .add(2)
+ .add(3);
+
+ assertEquals(3, arr.size());
+
+ assertEquals(1, arr.getValue(0));
+ assertEquals(2, arr.getValue(1));
+ assertEquals(3, arr.getValue(2));
+
+ // add some values by index
+ arr.add(3, 4);
+
+ // assert that the new length changed
+ assertEquals(4, arr.size());
+ // assert the value got added
+ assertEquals(4, arr.getValue(3));
+ }
+
+ @Test
+ public void testNoEncode() {
+ Instant now = Instant.now();
+ JsonArray json = new JsonArray();
+ // bypass any custom validation
+ json.getList().add(now);
+ assertEquals(now, json.getInstant(0));
+ assertSame(now, json.getInstant(0));
+
+ // same for byte[]
+ byte[] bytes = "bytes".getBytes();
+ // bypass any custom validation
+ json.getList().add(bytes);
+ assertEquals(bytes, json.getBinary(1));
+ assertSame(bytes, json.getBinary(1));
+ }
}
diff --git a/src/test/java/io/vertx/core/json/JsonCodecTest.java b/src/test/java/io/vertx/core/json/JsonCodecTest.java
--- a/src/test/java/io/vertx/core/json/JsonCodecTest.java
+++ b/src/test/java/io/vertx/core/json/JsonCodecTest.java
@@ -25,12 +25,12 @@
import java.nio.charset.StandardCharsets;
import java.time.Instant;
import java.util.Arrays;
-import java.util.Base64;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Collection;
+import static io.vertx.core.json.impl.JsonUtil.BASE64_ENCODER;
import static java.time.format.DateTimeFormatter.ISO_INSTANT;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
@@ -81,7 +81,7 @@ public void testEncodeJsonObject() {
jsonObject.putNull("mynull");
jsonObject.put("myobj", new JsonObject().put("foo", "bar"));
jsonObject.put("myarr", new JsonArray().add("foo").add(123));
- String strBytes = Base64.getEncoder().encodeToString(bytes);
+ String strBytes = BASE64_ENCODER.encodeToString(bytes);
String expected = "{\"mystr\":\"foo\",\"mycharsequence\":\"oob\",\"myint\":123,\"mylong\":1234,\"myfloat\":1.23,\"mydouble\":2.34,\"" +
"myboolean\":true,\"mybinary\":\"" + strBytes + "\",\"myinstant\":\"" + ISO_INSTANT.format(now) + "\",\"mynull\":null,\"myobj\":{\"foo\":\"bar\"},\"myarr\":[\"foo\",123]}";
String json = mapper.toString(jsonObject);
@@ -102,7 +102,7 @@ public void testEncodeJsonArray() {
jsonArray.addNull();
jsonArray.add(new JsonObject().put("foo", "bar"));
jsonArray.add(new JsonArray().add("foo").add(123));
- String strBytes = Base64.getEncoder().encodeToString(bytes);
+ String strBytes = BASE64_ENCODER.encodeToString(bytes);
String expected = "[\"foo\",123,1234,1.23,2.34,true,\"" + strBytes + "\",null,{\"foo\":\"bar\"},[\"foo\",123]]";
String json = mapper.toString(jsonArray);
assertEquals(expected, json);
@@ -125,7 +125,7 @@ public void testEncodeJsonObjectToBuffer() {
jsonObject.putNull("mynull");
jsonObject.put("myobj", new JsonObject().put("foo", "bar"));
jsonObject.put("myarr", new JsonArray().add("foo").add(123));
- String strBytes = Base64.getEncoder().encodeToString(bytes);
+ String strBytes = BASE64_ENCODER.encodeToString(bytes);
Buffer expected = Buffer.buffer("{\"mystr\":\"foo\",\"mycharsequence\":\"oob\",\"myint\":123,\"mylong\":1234,\"myfloat\":1.23,\"mydouble\":2.34,\"" +
"myboolean\":true,\"mybinary\":\"" + strBytes + "\",\"myinstant\":\"" + ISO_INSTANT.format(now) + "\",\"mynull\":null,\"myobj\":{\"foo\":\"bar\"},\"myarr\":[\"foo\",123]}", "UTF-8");
@@ -148,7 +148,7 @@ public void testEncodeJsonArrayToBuffer() {
jsonArray.addNull();
jsonArray.add(new JsonObject().put("foo", "bar"));
jsonArray.add(new JsonArray().add("foo").add(123));
- String strBytes = Base64.getEncoder().encodeToString(bytes);
+ String strBytes = BASE64_ENCODER.encodeToString(bytes);
Buffer expected = Buffer.buffer("[\"foo\",123,1234,1.23,2.34,true,\"" + strBytes + "\",null,{\"foo\":\"bar\"},[\"foo\",123]]", "UTF-8");
Buffer json = mapper.toBuffer(jsonArray);
assertArrayEquals(expected.getBytes(), json.getBytes());
@@ -170,7 +170,7 @@ public void testEncodeJsonObjectPrettily() {
jsonObject.put("myinstant", now);
jsonObject.put("myobj", new JsonObject().put("foo", "bar"));
jsonObject.put("myarr", new JsonArray().add("foo").add(123));
- String strBytes = Base64.getEncoder().encodeToString(bytes);
+ String strBytes = BASE64_ENCODER.encodeToString(bytes);
String strInstant = ISO_INSTANT.format(now);
String expected = "{" + Utils.LINE_SEPARATOR +
" \"mystr\" : \"foo\"," + Utils.LINE_SEPARATOR +
@@ -204,7 +204,7 @@ public void testEncodeJsonArrayPrettily() {
jsonArray.addNull();
jsonArray.add(new JsonObject().put("foo", "bar"));
jsonArray.add(new JsonArray().add("foo").add(123));
- String strBytes = Base64.getEncoder().encodeToString(bytes);
+ String strBytes = BASE64_ENCODER.encodeToString(bytes);
String expected = "[ \"foo\", 123, 1234, 1.23, 2.34, true, \"" + strBytes + "\", null, {" + Utils.LINE_SEPARATOR +
" \"foo\" : \"bar\"" + Utils.LINE_SEPARATOR +
"}, [ \"foo\", 123 ] ]";
@@ -215,7 +215,7 @@ public void testEncodeJsonArrayPrettily() {
@Test
public void testDecodeJsonObject() {
byte[] bytes = TestUtils.randomByteArray(10);
- String strBytes = Base64.getEncoder().encodeToString(bytes);
+ String strBytes = BASE64_ENCODER.encodeToString(bytes);
Instant now = Instant.now();
String strInstant = ISO_INSTANT.format(now);
String json = "{\"mystr\":\"foo\",\"myint\":123,\"mylong\":1234,\"myfloat\":1.23,\"mydouble\":2.34,\"" +
@@ -229,7 +229,7 @@ public void testDecodeJsonObject() {
assertEquals(Double.valueOf(2.34d), obj.getDouble("mydouble"));
assertTrue(obj.getBoolean("myboolean"));
assertArrayEquals(bytes, obj.getBinary("mybinary"));
- assertEquals(Base64.getEncoder().encodeToString(bytes), obj.getValue("mybinary"));
+ assertEquals(BASE64_ENCODER.encodeToString(bytes), obj.getValue("mybinary"));
assertEquals(now, obj.getInstant("myinstant"));
assertEquals(now.toString(), obj.getValue("myinstant"));
assertTrue(obj.containsKey("mynull"));
@@ -243,7 +243,7 @@ public void testDecodeJsonObject() {
@Test
public void testDecodeJsonArray() {
byte[] bytes = TestUtils.randomByteArray(10);
- String strBytes = Base64.getEncoder().encodeToString(bytes);
+ String strBytes = BASE64_ENCODER.encodeToString(bytes);
Instant now = Instant.now();
String strInstant = ISO_INSTANT.format(now);
String json = "[\"foo\",123,1234,1.23,2.34,true,\"" + strBytes + "\",\"" + strInstant + "\",null,{\"foo\":\"bar\"},[\"foo\",123]]";
@@ -255,7 +255,7 @@ public void testDecodeJsonArray() {
assertEquals(Double.valueOf(2.34d), arr.getDouble(4));
assertEquals(true, arr.getBoolean(5));
assertArrayEquals(bytes, arr.getBinary(6));
- assertEquals(Base64.getEncoder().encodeToString(bytes), arr.getValue(6));
+ assertEquals(BASE64_ENCODER.encodeToString(bytes), arr.getValue(6));
assertEquals(now, arr.getInstant(7));
assertEquals(now.toString(), arr.getValue(7));
assertTrue(arr.hasNull(8));
@@ -357,7 +357,7 @@ public void encodeCustomTypeBinary() {
String json = mapper.toString(data);
assertNotNull(json);
// base64 encoded hello
- assertEquals("\"aGVsbG8=\"", json);
+ assertEquals("\"aGVsbG8\"", json);
}
@Test
diff --git a/src/test/java/io/vertx/core/json/JsonObjectTest.java b/src/test/java/io/vertx/core/json/JsonObjectTest.java
--- a/src/test/java/io/vertx/core/json/JsonObjectTest.java
+++ b/src/test/java/io/vertx/core/json/JsonObjectTest.java
@@ -12,6 +12,7 @@
package io.vertx.core.json;
import io.vertx.core.buffer.Buffer;
+import io.vertx.core.http.HttpMethod;
import io.vertx.test.core.TestUtils;
import org.junit.Before;
import org.junit.Test;
@@ -22,6 +23,8 @@
import java.util.*;
import java.util.stream.Collectors;
+import static io.vertx.core.json.impl.JsonUtil.BASE64_DECODER;
+import static io.vertx.core.json.impl.JsonUtil.BASE64_ENCODER;
import static java.time.format.DateTimeFormatter.ISO_INSTANT;
import static org.junit.Assert.*;
@@ -442,12 +445,12 @@ public void testGetBinary() {
byte[] bytes = TestUtils.randomByteArray(100);
jsonObject.put("foo", bytes);
assertArrayEquals(bytes, jsonObject.getBinary("foo"));
- assertEquals(Base64.getEncoder().encodeToString(bytes), jsonObject.getValue("foo"));
+ assertEquals(BASE64_ENCODER.encodeToString(bytes), jsonObject.getValue("foo"));
// Can also get as string:
String val = jsonObject.getString("foo");
assertNotNull(val);
- byte[] retrieved = Base64.getDecoder().decode(val);
+ byte[] retrieved = BASE64_DECODER.decode(val);
assertTrue(TestUtils.byteArraysEqual(bytes, retrieved));
jsonObject.put("foo", 123);
@@ -535,9 +538,9 @@ public void testGetBinaryDefault() {
byte[] defBytes = TestUtils.randomByteArray(100);
jsonObject.put("foo", bytes);
assertArrayEquals(bytes, jsonObject.getBinary("foo", defBytes));
- assertEquals(Base64.getEncoder().encodeToString(bytes), jsonObject.getValue("foo", Base64.getEncoder().encode(defBytes)));
+ assertEquals(BASE64_ENCODER.encodeToString(bytes), jsonObject.getValue("foo", BASE64_ENCODER.encode(defBytes)));
assertArrayEquals(bytes, jsonObject.getBinary("foo", null));
- assertEquals(Base64.getEncoder().encodeToString(bytes), jsonObject.getValue("foo", null));
+ assertEquals(BASE64_ENCODER.encodeToString(bytes), jsonObject.getValue("foo", null));
jsonObject.put("foo", 123);
try {
@@ -732,7 +735,7 @@ public void testGetValue() {
assertEquals(arr, jsonObject.getValue("foo"));
byte[] bytes = TestUtils.randomByteArray(100);
jsonObject.put("foo", bytes);
- assertTrue(TestUtils.byteArraysEqual(bytes, Base64.getDecoder().decode((String) jsonObject.getValue("foo"))));
+ assertTrue(TestUtils.byteArraysEqual(bytes, BASE64_DECODER.decode((String) jsonObject.getValue("foo"))));
jsonObject.putNull("foo");
assertNull(jsonObject.getValue("foo"));
assertNull(jsonObject.getValue("absent"));
@@ -787,8 +790,8 @@ public void testGetValueDefault() {
assertEquals(arr, jsonObject.getValue("foo", null));
byte[] bytes = TestUtils.randomByteArray(100);
jsonObject.put("foo", bytes);
- assertTrue(TestUtils.byteArraysEqual(bytes, Base64.getDecoder().decode((String) jsonObject.getValue("foo", "blah"))));
- assertTrue(TestUtils.byteArraysEqual(bytes, Base64.getDecoder().decode((String)jsonObject.getValue("foo", null))));
+ assertTrue(TestUtils.byteArraysEqual(bytes, BASE64_DECODER.decode((String) jsonObject.getValue("foo", "blah"))));
+ assertTrue(TestUtils.byteArraysEqual(bytes, BASE64_DECODER.decode((String)jsonObject.getValue("foo", null))));
jsonObject.putNull("foo");
assertNull(jsonObject.getValue("foo", "blah"));
assertNull(jsonObject.getValue("foo", null));
@@ -1044,15 +1047,15 @@ public void testPutBinary() {
assertSame(jsonObject, jsonObject.put("foo", bin1));
assertArrayEquals(bin1, jsonObject.getBinary("foo"));
- assertEquals(Base64.getEncoder().encodeToString(bin1), jsonObject.getValue("foo"));
+ assertEquals(BASE64_ENCODER.encodeToString(bin1), jsonObject.getValue("foo"));
jsonObject.put("quux", bin2);
assertArrayEquals(bin2, jsonObject.getBinary("quux"));
- assertEquals(Base64.getEncoder().encodeToString(bin2), jsonObject.getValue("quux"));
+ assertEquals(BASE64_ENCODER.encodeToString(bin2), jsonObject.getValue("quux"));
assertArrayEquals(bin1, jsonObject.getBinary("foo"));
- assertEquals(Base64.getEncoder().encodeToString(bin1), jsonObject.getValue("foo"));
+ assertEquals(BASE64_ENCODER.encodeToString(bin1), jsonObject.getValue("foo"));
jsonObject.put("foo", bin3);
assertArrayEquals(bin3, jsonObject.getBinary("foo"));
- assertEquals(Base64.getEncoder().encodeToString(bin3), jsonObject.getValue("foo"));
+ assertEquals(BASE64_ENCODER.encodeToString(bin3), jsonObject.getValue("foo"));
jsonObject.put("foo", (byte[]) null);
assertTrue(jsonObject.containsKey("foo"));
@@ -1130,28 +1133,28 @@ public void testPutValue() {
assertEquals(Float.valueOf(1.23f), jsonObject.getFloat("float"));
assertEquals(Double.valueOf(1.23d), jsonObject.getDouble("double"));
assertArrayEquals(bytes, jsonObject.getBinary("binary"));
- assertEquals(Base64.getEncoder().encodeToString(bytes), jsonObject.getValue("binary"));
+ assertEquals(BASE64_ENCODER.encodeToString(bytes), jsonObject.getValue("binary"));
assertEquals(now, jsonObject.getInstant("instant"));
assertEquals(now.toString(), jsonObject.getValue("instant"));
assertEquals(obj, jsonObject.getJsonObject("obj"));
assertEquals(arr, jsonObject.getJsonArray("arr"));
try {
jsonObject.put("inv", new SomeClass());
+ // OK (we can put anything, yet it should fail to encode if a codec is missing)
+ } catch (RuntimeException e) {
fail();
- } catch (IllegalStateException e) {
- // OK
}
try {
jsonObject.put("inv", new BigDecimal(123));
+ // OK (we can put anything, yet it should fail to encode if a codec is missing)
+ } catch (RuntimeException e) {
fail();
- } catch (IllegalStateException e) {
- // OK
}
try {
jsonObject.put("inv", new Date());
+ // OK (we can put anything, yet it should fail to encode if a codec is missing)
+ } catch (RuntimeException e) {
fail();
- } catch (IllegalStateException e) {
- // OK
}
}
@@ -1714,6 +1717,23 @@ public void testEquals() {
assertNotEquals(new JsonObject().putNull("a"), new JsonObject().put("a", 1));
assertEquals(new JsonObject().putNull("a"), new JsonObject().putNull("a"));
}
+
+ @Test
+ public void testNoEncode() {
+ Instant now = Instant.now();
+ JsonObject json = new JsonObject();
+ // bypass any custom validation
+ json.getMap().put("now", now);
+ assertEquals(now, json.getInstant("now"));
+ assertSame(now, json.getInstant("now"));
+
+ // same for byte[]
+ byte[] bytes = "bytes".getBytes();
+ // bypass any custom validation
+ json.getMap().put("bytes", bytes);
+ assertEquals(bytes, json.getBinary("bytes"));
+ assertSame(bytes, json.getBinary("bytes"));
+ }
}
diff --git a/src/test/java/io/vertx/core/parsetools/JsonParserTest.java b/src/test/java/io/vertx/core/parsetools/JsonParserTest.java
--- a/src/test/java/io/vertx/core/parsetools/JsonParserTest.java
+++ b/src/test/java/io/vertx/core/parsetools/JsonParserTest.java
@@ -24,7 +24,6 @@
import java.time.format.DateTimeParseException;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.Base64;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
@@ -35,6 +34,7 @@
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
+import static io.vertx.core.json.impl.JsonUtil.BASE64_ENCODER;
import static java.time.format.DateTimeFormatter.*;
import static junit.framework.TestCase.assertFalse;
import static org.junit.Assert.assertEquals;
@@ -248,7 +248,7 @@ public void testInstantValue() {
@Test
public void testBinaryValue() {
byte[] value = TestUtils.randomByteArray(10);
- String encoded = Base64.getEncoder().encodeToString(value);
+ String encoded = BASE64_ENCODER.encodeToString(value);
testValue('"' + encoded + '"', event -> {
assertEquals(encoded, event.value());
assertFalse(event.isArray());
| Feature Request: JsonArray.add(int index, Object value)
this is easy to add and is available in most json libs
PS: and for JsonArray.set(int index, Object value)
methods add and set have the same semantics as in java's List
| we might have `JsonArray` extends `List` in v4
If JsonArray extends List, add method is not going to be fluent (it's not going to return itself) because the signature will not be compatible with List interface. I mean, `JsonArray add(Object value)`is not going to be posible anymore. Is this acceptable?
I can work on it :)
I think @pmlopes has a coming PR for it soon.
> On 5 Nov 2019, at 22:06, Rogelio Orts <notifications@github.com> wrote:
>
> If JsonArray extends List, add method is not going to be fluent (it's not going to return itself) because the signature will not be compatible with List interface. I mean, JsonArray add(Object value)is not going to be posible anymore. Is this acceptable?
>
> I can work on it :)
>
> —
> You are receiving this because you commented.
> Reply to this email directly, view it on GitHub <https://github.com/eclipse-vertx/vert.x/issues/3171?email_source=notifications&email_token=AABXDCTXZNBT7PWDAGGDGW3QSHN5HA5CNFSM4JH3UTG2YY3PNVWWK3TUL52HS4DFVREXG43VMVBW63LNMVXHJKTDN5WW2ZLOORPWSZGOEDEKF6A#issuecomment-550019832>, or unsubscribe <https://github.com/notifications/unsubscribe-auth/AABXDCVPRS23FFXAJTSGHPLQSHN5HANCNFSM4JH3UTGQ>.
>
| 2019-11-15T10:42:55Z | 4 |
eclipse-vertx/vert.x | 3,101 | eclipse-vertx__vert.x-3101 | [
"3099"
] | 0af61a3ed8f60d666030e8873295cdb8a1cecebc | diff --git a/src/main/java/io/vertx/core/buffer/Buffer.java b/src/main/java/io/vertx/core/buffer/Buffer.java
--- a/src/main/java/io/vertx/core/buffer/Buffer.java
+++ b/src/main/java/io/vertx/core/buffer/Buffer.java
@@ -17,12 +17,12 @@
import io.vertx.codegen.annotations.GenIgnore;
import io.vertx.codegen.annotations.VertxGen;
import io.vertx.core.ServiceHelper;
-import io.vertx.core.json.Json;
import io.vertx.core.json.JsonArray;
import io.vertx.core.json.JsonObject;
import io.vertx.core.shareddata.Shareable;
import io.vertx.core.shareddata.impl.ClusterSerializable;
import io.vertx.core.spi.BufferFactory;
+import io.vertx.core.spi.json.JsonCodec;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
@@ -148,7 +148,7 @@ static Buffer buffer(ByteBuf byteBuf) {
* @return a JSON element which can be a {@link JsonArray}, {@link JsonObject}, {@link String}, ...etc if the buffer contains an array, object, string, ...etc
*/
default Object toJson() {
- return Json.decodeValue(this);
+ return JsonCodec.INSTANCE.fromBuffer(this, Object.class);
}
/**
diff --git a/src/main/java/io/vertx/core/json/ByteArrayDeserializer.java b/src/main/java/io/vertx/core/json/ByteArrayDeserializer.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/json/ByteArrayDeserializer.java
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2011-2017 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+package io.vertx.core.json;
+
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.DeserializationContext;
+import com.fasterxml.jackson.databind.JsonDeserializer;
+import com.fasterxml.jackson.databind.exc.InvalidFormatException;
+
+import java.io.IOException;
+import java.time.Instant;
+import java.util.Base64;
+
+class ByteArrayDeserializer extends JsonDeserializer<byte[]> {
+
+ @Override
+ public byte[] deserialize(JsonParser p, DeserializationContext ctxt) throws IOException, JsonProcessingException {
+ String text = p.getText();
+ try {
+ return Base64.getDecoder().decode(text);
+ } catch (IllegalArgumentException e) {
+ throw new InvalidFormatException(p, "Expected a base64 encoded byte array", text, Instant.class);
+ }
+ }
+}
diff --git a/src/main/java/io/vertx/core/json/ByteArraySerializer.java b/src/main/java/io/vertx/core/json/ByteArraySerializer.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/json/ByteArraySerializer.java
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2011-2017 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+package io.vertx.core.json;
+
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.databind.JsonSerializer;
+import com.fasterxml.jackson.databind.SerializerProvider;
+
+import java.io.IOException;
+import java.util.Base64;
+
+class ByteArraySerializer extends JsonSerializer<byte[]> {
+
+ @Override
+ public void serialize(byte[] value, JsonGenerator jgen, SerializerProvider provider) throws IOException {
+ jgen.writeString(Base64.getEncoder().encodeToString(value));
+ }
+}
diff --git a/src/main/java/io/vertx/core/json/InstantDeserializer.java b/src/main/java/io/vertx/core/json/InstantDeserializer.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/json/InstantDeserializer.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2011-2017 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+package io.vertx.core.json;
+
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.DeserializationContext;
+import com.fasterxml.jackson.databind.JsonDeserializer;
+import com.fasterxml.jackson.databind.exc.InvalidFormatException;
+
+import java.io.IOException;
+import java.time.DateTimeException;
+import java.time.Instant;
+
+import static java.time.format.DateTimeFormatter.ISO_INSTANT;
+
+class InstantDeserializer extends JsonDeserializer<Instant> {
+ @Override
+ public Instant deserialize(JsonParser p, DeserializationContext ctxt) throws IOException, JsonProcessingException {
+ String text = p.getText();
+ try {
+ return Instant.from(ISO_INSTANT.parse(text));
+ } catch (DateTimeException e) {
+ throw new InvalidFormatException(p, "Expected an ISO 8601 formatted date time", text, Instant.class);
+ }
+ }
+}
diff --git a/src/main/java/io/vertx/core/json/InstantSerializer.java b/src/main/java/io/vertx/core/json/InstantSerializer.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/json/InstantSerializer.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2011-2017 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+package io.vertx.core.json;
+
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.databind.JsonSerializer;
+import com.fasterxml.jackson.databind.SerializerProvider;
+
+import java.io.IOException;
+import java.time.Instant;
+
+import static java.time.format.DateTimeFormatter.ISO_INSTANT;
+
+class InstantSerializer extends JsonSerializer<Instant> {
+ @Override
+ public void serialize(Instant value, JsonGenerator jgen, SerializerProvider provider) throws IOException {
+ jgen.writeString(ISO_INSTANT.format(value));
+ }
+}
diff --git a/src/main/java/io/vertx/core/json/Json.java b/src/main/java/io/vertx/core/json/Json.java
--- a/src/main/java/io/vertx/core/json/Json.java
+++ b/src/main/java/io/vertx/core/json/Json.java
@@ -11,29 +11,16 @@
package io.vertx.core.json;
-import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.core.JsonParser;
-import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
-import com.fasterxml.jackson.databind.*;
-import com.fasterxml.jackson.databind.exc.InvalidFormatException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
import com.fasterxml.jackson.databind.module.SimpleModule;
-import io.netty.buffer.ByteBufInputStream;
import io.vertx.core.buffer.Buffer;
+import io.vertx.core.json.impl.JacksonCodec;
+import io.vertx.core.spi.json.JsonCodec;
-import java.io.IOException;
-import java.io.InputStream;
-import java.math.BigDecimal;
-import java.time.DateTimeException;
import java.time.Instant;
-import java.util.Base64;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Stream;
-import java.util.stream.StreamSupport;
-
-import static java.time.format.DateTimeFormatter.ISO_INSTANT;
/**
* @author <a href="http://tfox.org">Tim Fox</a>
@@ -44,6 +31,10 @@ public class Json {
public static ObjectMapper prettyMapper = new ObjectMapper();
static {
+ initialize();
+ }
+
+ private static void initialize() {
// Non-standard JSON but we allow C style comments in our JSON
mapper.configure(JsonParser.Feature.ALLOW_COMMENTS, true);
@@ -72,11 +63,7 @@ public class Json {
* @throws EncodeException if a property cannot be encoded.
*/
public static String encode(Object obj) throws EncodeException {
- try {
- return mapper.writeValueAsString(obj);
- } catch (Exception e) {
- throw new EncodeException("Failed to encode as JSON: " + e.getMessage());
- }
+ return JsonCodec.INSTANCE.toString(obj);
}
/**
@@ -87,11 +74,7 @@ public static String encode(Object obj) throws EncodeException {
* @throws EncodeException if a property cannot be encoded.
*/
public static Buffer encodeToBuffer(Object obj) throws EncodeException {
- try {
- return Buffer.buffer(mapper.writeValueAsBytes(obj));
- } catch (Exception e) {
- throw new EncodeException("Failed to encode as JSON: " + e.getMessage());
- }
+ return JsonCodec.INSTANCE.toBuffer(obj);
}
/**
@@ -102,11 +85,7 @@ public static Buffer encodeToBuffer(Object obj) throws EncodeException {
* @throws EncodeException if a property cannot be encoded.
*/
public static String encodePrettily(Object obj) throws EncodeException {
- try {
- return prettyMapper.writeValueAsString(obj);
- } catch (Exception e) {
- throw new EncodeException("Failed to encode as JSON: " + e.getMessage());
- }
+ return JsonCodec.INSTANCE.toString(obj, true);
}
/**
@@ -118,11 +97,7 @@ public static String encodePrettily(Object obj) throws EncodeException {
* @throws DecodeException when there is a parsing or invalid mapping.
*/
public static <T> T decodeValue(String str, Class<T> clazz) throws DecodeException {
- try {
- return mapper.readValue(str, clazz);
- } catch (Exception e) {
- throw new DecodeException("Failed to decode: " + e.getMessage());
- }
+ return JsonCodec.INSTANCE.fromString(str, clazz);
}
/**
@@ -134,20 +109,7 @@ public static <T> T decodeValue(String str, Class<T> clazz) throws DecodeExcepti
* @throws DecodeException when there is a parsing or invalid mapping.
*/
public static Object decodeValue(String str) throws DecodeException {
- try {
- Object value = mapper.readValue(str, Object.class);
- if (value instanceof List) {
- List list = (List) value;
- return new JsonArray(list);
- } else if (value instanceof Map) {
- @SuppressWarnings("unchecked")
- Map<String, Object> map = (Map<String, Object>) value;
- return new JsonObject(map);
- }
- return value;
- } catch (Exception e) {
- throw new DecodeException("Failed to decode: " + e.getMessage());
- }
+ return decodeValue(str, Object.class);
}
/**
@@ -159,11 +121,7 @@ public static Object decodeValue(String str) throws DecodeException {
* @throws DecodeException when there is a parsing or invalid mapping.
*/
public static <T> T decodeValue(String str, TypeReference<T> type) throws DecodeException {
- try {
- return mapper.readValue(str, type);
- } catch (Exception e) {
- throw new DecodeException("Failed to decode: " + e.getMessage(), e);
- }
+ return JacksonCodec.fromString(str, type);
}
/**
@@ -175,20 +133,7 @@ public static <T> T decodeValue(String str, TypeReference<T> type) throws Decode
* @throws DecodeException when there is a parsing or invalid mapping.
*/
public static Object decodeValue(Buffer buf) throws DecodeException {
- try {
- Object value = mapper.readValue((InputStream) new ByteBufInputStream(buf.getByteBuf()), Object.class);
- if (value instanceof List) {
- List list = (List) value;
- return new JsonArray(list);
- } else if (value instanceof Map) {
- @SuppressWarnings("unchecked")
- Map<String, Object> map = (Map<String, Object>) value;
- return new JsonObject(map);
- }
- return value;
- } catch (Exception e) {
- throw new DecodeException("Failed to decode: " + e.getMessage());
- }
+ return decodeValue(buf, Object.class);
}
/**
@@ -200,11 +145,7 @@ public static Object decodeValue(Buffer buf) throws DecodeException {
* @throws DecodeException when there is a parsing or invalid mapping.
*/
public static <T> T decodeValue(Buffer buf, TypeReference<T> type) throws DecodeException {
- try {
- return mapper.readValue(new ByteBufInputStream(buf.getByteBuf()), type);
- } catch (Exception e) {
- throw new DecodeException("Failed to decode:" + e.getMessage(), e);
- }
+ return JacksonCodec.fromBuffer(buf, type);
}
/**
@@ -216,113 +157,6 @@ public static <T> T decodeValue(Buffer buf, TypeReference<T> type) throws Decode
* @throws DecodeException when there is a parsing or invalid mapping.
*/
public static <T> T decodeValue(Buffer buf, Class<T> clazz) throws DecodeException {
- try {
- return mapper.readValue((InputStream) new ByteBufInputStream(buf.getByteBuf()), clazz);
- } catch (Exception e) {
- throw new DecodeException("Failed to decode:" + e.getMessage(), e);
- }
- }
-
- @SuppressWarnings("unchecked")
- static Object checkAndCopy(Object val, boolean copy) {
- if (val == null) {
- // OK
- } else if (val instanceof Number && !(val instanceof BigDecimal)) {
- // OK
- } else if (val instanceof Boolean) {
- // OK
- } else if (val instanceof String) {
- // OK
- } else if (val instanceof Character) {
- // OK
- } else if (val instanceof CharSequence) {
- val = val.toString();
- } else if (val instanceof JsonObject) {
- if (copy) {
- val = ((JsonObject) val).copy();
- }
- } else if (val instanceof JsonArray) {
- if (copy) {
- val = ((JsonArray) val).copy();
- }
- } else if (val instanceof Map) {
- if (copy) {
- val = (new JsonObject((Map)val)).copy();
- } else {
- val = new JsonObject((Map)val);
- }
- } else if (val instanceof List) {
- if (copy) {
- val = (new JsonArray((List)val)).copy();
- } else {
- val = new JsonArray((List)val);
- }
- } else if (val instanceof byte[]) {
- val = Base64.getEncoder().encodeToString((byte[])val);
- } else if (val instanceof Instant) {
- val = ISO_INSTANT.format((Instant) val);
- } else {
- throw new IllegalStateException("Illegal type in JsonObject: " + val.getClass());
- }
- return val;
- }
-
- static <T> Stream<T> asStream(Iterator<T> sourceIterator) {
- Iterable<T> iterable = () -> sourceIterator;
- return StreamSupport.stream(iterable.spliterator(), false);
- }
-
- private static class JsonObjectSerializer extends JsonSerializer<JsonObject> {
- @Override
- public void serialize(JsonObject value, JsonGenerator jgen, SerializerProvider provider) throws IOException {
- jgen.writeObject(value.getMap());
- }
- }
-
- private static class JsonArraySerializer extends JsonSerializer<JsonArray> {
- @Override
- public void serialize(JsonArray value, JsonGenerator jgen, SerializerProvider provider) throws IOException {
- jgen.writeObject(value.getList());
- }
- }
-
- private static class InstantSerializer extends JsonSerializer<Instant> {
- @Override
- public void serialize(Instant value, JsonGenerator jgen, SerializerProvider provider) throws IOException {
- jgen.writeString(ISO_INSTANT.format(value));
- }
- }
-
- private static class InstantDeserializer extends JsonDeserializer<Instant> {
- @Override
- public Instant deserialize(JsonParser p, DeserializationContext ctxt) throws IOException, JsonProcessingException {
- String text = p.getText();
- try {
- return Instant.from(ISO_INSTANT.parse(text));
- } catch (DateTimeException e) {
- throw new InvalidFormatException(p, "Expected an ISO 8601 formatted date time", text, Instant.class);
- }
- }
- }
-
- private static class ByteArraySerializer extends JsonSerializer<byte[]> {
-
- @Override
- public void serialize(byte[] value, JsonGenerator jgen, SerializerProvider provider) throws IOException {
- jgen.writeString(Base64.getEncoder().encodeToString(value));
- }
- }
-
- private static class ByteArrayDeserializer extends JsonDeserializer<byte[]> {
-
- @Override
- public byte[] deserialize(JsonParser p, DeserializationContext ctxt) throws IOException, JsonProcessingException {
- String text = p.getText();
- try {
- return Base64.getDecoder().decode(text);
- } catch (IllegalArgumentException e) {
- throw new InvalidFormatException(p, "Expected a base64 encoded byte array", text, Instant.class);
- }
- }
+ return JsonCodec.INSTANCE.fromBuffer(buf, clazz);
}
}
diff --git a/src/main/java/io/vertx/core/json/JsonArray.java b/src/main/java/io/vertx/core/json/JsonArray.java
--- a/src/main/java/io/vertx/core/json/JsonArray.java
+++ b/src/main/java/io/vertx/core/json/JsonArray.java
@@ -14,6 +14,7 @@
import io.vertx.core.buffer.Buffer;
import io.vertx.core.shareddata.Shareable;
import io.vertx.core.shareddata.impl.ClusterSerializable;
+import io.vertx.core.spi.json.JsonCodec;
import java.time.Instant;
import java.util.*;
@@ -289,7 +290,7 @@ public boolean hasNull(int pos) {
/**
* Add an enum to the JSON array.
* <p>
- * JSON has no concept of encoding Enums, so the Enum will be converted to a String using the {@link java.lang.Enum#name}
+ * JSON has no concept of encoding Enums, so the Enum will be converted to a String using the {@link java.lang.Enum#name()}
* method and the value added as a String.
*
* @param value the value
@@ -442,7 +443,7 @@ public JsonArray add(Instant value) {
* @return a reference to this, so the API can be used fluently
*/
public JsonArray add(Object value) {
- value = Json.checkAndCopy(value, false);
+ value = JsonObject.checkAndCopy(value, false);
list.add(value);
return this;
}
@@ -550,7 +551,7 @@ public Iterator<Object> iterator() {
* @return the string encoding
*/
public String encode() {
- return Json.encode(list);
+ return JsonCodec.INSTANCE.toString(list, false);
}
/**
@@ -559,7 +560,7 @@ public String encode() {
* @return the buffer encoding.
*/
public Buffer toBuffer() {
- return Json.encodeToBuffer(list);
+ return JsonCodec.INSTANCE.toBuffer(list, false);
}
/**
@@ -568,7 +569,7 @@ public Buffer toBuffer() {
* @return the string encoding
*/
public String encodePrettily() {
- return Json.encodePrettily(list);
+ return JsonCodec.INSTANCE.toString(list, true);
}
/**
@@ -580,7 +581,7 @@ public String encodePrettily() {
public JsonArray copy() {
List<Object> copiedList = new ArrayList<>(list.size());
for (Object val: list) {
- val = Json.checkAndCopy(val, true);
+ val = JsonObject.checkAndCopy(val, true);
copiedList.add(val);
}
return new JsonArray(copiedList);
@@ -592,7 +593,7 @@ public JsonArray copy() {
* @return a Stream
*/
public Stream<Object> stream() {
- return Json.asStream(iterator());
+ return JsonObject.asStream(iterator());
}
@Override
@@ -657,11 +658,11 @@ public int readFromBuffer(int pos, Buffer buffer) {
}
private void fromJson(String json) {
- list = Json.decodeValue(json, List.class);
+ list = JsonCodec.INSTANCE.fromString(json, List.class);
}
private void fromBuffer(Buffer buf) {
- list = Json.decodeValue(buf, List.class);
+ list = JsonCodec.INSTANCE.fromBuffer(buf, List.class);
}
private class Iter implements Iterator<Object> {
diff --git a/src/main/java/io/vertx/core/json/JsonArraySerializer.java b/src/main/java/io/vertx/core/json/JsonArraySerializer.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/json/JsonArraySerializer.java
@@ -0,0 +1,14 @@
+package io.vertx.core.json;
+
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.databind.JsonSerializer;
+import com.fasterxml.jackson.databind.SerializerProvider;
+
+import java.io.IOException;
+
+class JsonArraySerializer extends JsonSerializer<JsonArray> {
+ @Override
+ public void serialize(JsonArray value, JsonGenerator jgen, SerializerProvider provider) throws IOException {
+ jgen.writeObject(value.getList());
+ }
+}
diff --git a/src/main/java/io/vertx/core/json/JsonObject.java b/src/main/java/io/vertx/core/json/JsonObject.java
--- a/src/main/java/io/vertx/core/json/JsonObject.java
+++ b/src/main/java/io/vertx/core/json/JsonObject.java
@@ -14,11 +14,14 @@
import io.vertx.core.buffer.Buffer;
import io.vertx.core.shareddata.Shareable;
import io.vertx.core.shareddata.impl.ClusterSerializable;
+import io.vertx.core.spi.json.JsonCodec;
+import java.math.BigDecimal;
import java.nio.charset.StandardCharsets;
import java.time.Instant;
import java.util.*;
import java.util.stream.Stream;
+import java.util.stream.StreamSupport;
import static java.time.format.DateTimeFormatter.ISO_INSTANT;
@@ -104,7 +107,7 @@ public static JsonObject mapFrom(Object obj) {
if (obj == null) {
return null;
} else {
- return new JsonObject((Map<String, Object>) Json.mapper.convertValue(obj, Map.class));
+ return new JsonObject((Map<String, Object>) JsonCodec.INSTANCE.fromValue(obj, Map.class));
}
}
@@ -118,7 +121,7 @@ public static JsonObject mapFrom(Object obj) {
* if the type cannot be instantiated.
*/
public <T> T mapTo(Class<T> type) {
- return Json.mapper.convertValue(map, type);
+ return JsonCodec.INSTANCE.fromValue(map, type);
}
/**
@@ -514,7 +517,7 @@ public Set<String> fieldNames() {
/**
* Put an Enum into the JSON object with the specified key.
* <p>
- * JSON has no concept of encoding Enums, so the Enum will be converted to a String using the {@link java.lang.Enum#name}
+ * JSON has no concept of encoding Enums, so the Enum will be converted to a String using the {@link java.lang.Enum#name()}
* method and the value put as a String.
*
* @param key the key
@@ -696,7 +699,7 @@ public JsonObject put(String key, Instant value) {
*/
public JsonObject put(String key, Object value) {
Objects.requireNonNull(key);
- value = Json.checkAndCopy(value, false);
+ value = checkAndCopy(value, false);
map.put(key, value);
return this;
}
@@ -779,7 +782,7 @@ public JsonObject mergeIn(JsonObject other, int depth) {
* @return the string encoding.
*/
public String encode() {
- return Json.encode(map);
+ return JsonCodec.INSTANCE.toString(map, false);
}
/**
@@ -789,7 +792,7 @@ public String encode() {
* @return the pretty string encoding.
*/
public String encodePrettily() {
- return Json.encodePrettily(map);
+ return JsonCodec.INSTANCE.toString(map, true);
}
/**
@@ -798,7 +801,7 @@ public String encodePrettily() {
* @return the buffer encoding.
*/
public Buffer toBuffer() {
- return Json.encodeToBuffer(map);
+ return JsonCodec.INSTANCE.toBuffer(map, false);
}
/**
@@ -816,7 +819,7 @@ public JsonObject copy() {
}
for (Map.Entry<String, Object> entry: map.entrySet()) {
Object val = entry.getValue();
- val = Json.checkAndCopy(val, true);
+ val = checkAndCopy(val, true);
copiedMap.put(entry.getKey(), val);
}
return new JsonObject(copiedMap);
@@ -839,7 +842,7 @@ public Map<String, Object> getMap() {
* @return a stream of the entries.
*/
public Stream<Map.Entry<String, Object>> stream() {
- return Json.asStream(iterator());
+ return asStream(iterator());
}
/**
@@ -968,11 +971,11 @@ public int readFromBuffer(int pos, Buffer buffer) {
}
private void fromJson(String json) {
- map = Json.decodeValue(json, Map.class);
+ map = JsonCodec.INSTANCE.fromString(json, Map.class);
}
private void fromBuffer(Buffer buf) {
- map = Json.decodeValue(buf, Map.class);
+ map = JsonCodec.INSTANCE.fromBuffer(buf, Map.class);
}
private class Iter implements Iterator<Map.Entry<String, Object>> {
@@ -1029,4 +1032,54 @@ public Object setValue(Object value) {
throw new UnsupportedOperationException();
}
}
+
+ @SuppressWarnings("unchecked")
+ static Object checkAndCopy(Object val, boolean copy) {
+ if (val == null) {
+ // OK
+ } else if (val instanceof Number && !(val instanceof BigDecimal)) {
+ // OK
+ } else if (val instanceof Boolean) {
+ // OK
+ } else if (val instanceof String) {
+ // OK
+ } else if (val instanceof Character) {
+ // OK
+ } else if (val instanceof CharSequence) {
+ val = val.toString();
+ } else if (val instanceof JsonObject) {
+ if (copy) {
+ val = ((JsonObject) val).copy();
+ }
+ } else if (val instanceof JsonArray) {
+ if (copy) {
+ val = ((JsonArray) val).copy();
+ }
+ } else if (val instanceof Map) {
+ if (copy) {
+ val = (new JsonObject((Map)val)).copy();
+ } else {
+ val = new JsonObject((Map)val);
+ }
+ } else if (val instanceof List) {
+ if (copy) {
+ val = (new JsonArray((List)val)).copy();
+ } else {
+ val = new JsonArray((List)val);
+ }
+ } else if (val instanceof byte[]) {
+ val = Base64.getEncoder().encodeToString((byte[])val);
+ } else if (val instanceof Instant) {
+ val = ISO_INSTANT.format((Instant) val);
+ } else {
+ throw new IllegalStateException("Illegal type in JsonObject: " + val.getClass());
+ }
+ return val;
+ }
+
+ static <T> Stream<T> asStream(Iterator<T> sourceIterator) {
+ Iterable<T> iterable = () -> sourceIterator;
+ return StreamSupport.stream(iterable.spliterator(), false);
+ }
+
}
diff --git a/src/main/java/io/vertx/core/json/JsonObjectSerializer.java b/src/main/java/io/vertx/core/json/JsonObjectSerializer.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/json/JsonObjectSerializer.java
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2011-2017 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+package io.vertx.core.json;
+
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.databind.JsonSerializer;
+import com.fasterxml.jackson.databind.SerializerProvider;
+
+import java.io.IOException;
+
+class JsonObjectSerializer extends JsonSerializer<JsonObject> {
+ @Override
+ public void serialize(JsonObject value, JsonGenerator jgen, SerializerProvider provider) throws IOException {
+ jgen.writeObject(value.getMap());
+ }
+}
diff --git a/src/main/java/io/vertx/core/json/impl/JacksonCodec.java b/src/main/java/io/vertx/core/json/impl/JacksonCodec.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/json/impl/JacksonCodec.java
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2011-2017 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+
+package io.vertx.core.json.impl;
+
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import io.netty.buffer.ByteBufInputStream;
+import io.vertx.core.buffer.Buffer;
+import io.vertx.core.json.DecodeException;
+import io.vertx.core.json.EncodeException;
+import io.vertx.core.json.Json;
+import io.vertx.core.json.JsonArray;
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.spi.json.JsonCodec;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
+ */
+public class JacksonCodec implements JsonCodec {
+
+ @Override
+ public <T> T fromValue(Object json, Class<T> clazz) {
+ T value = Json.mapper.convertValue(json, clazz);
+ if (clazz == Object.class) {
+ value = (T) adapt(value);
+ }
+ return value;
+ }
+
+ public static <T> T fromValue(Object json, TypeReference<T> type) {
+ T value = Json.mapper.convertValue(json, type);
+ if (type.getType() == Object.class) {
+ value = (T) adapt(value);
+ }
+ return value;
+ }
+
+ @Override
+ public <T> T fromString(String str, Class<T> clazz) throws DecodeException {
+ return fromParser(createParser(str), clazz);
+ }
+
+ public static <T> T fromString(String str, TypeReference<T> type) throws DecodeException {
+ return fromParser(createParser(str), type);
+ }
+
+ @Override
+ public <T> T fromBuffer(Buffer buf, Class<T> clazz) throws DecodeException {
+ return fromParser(createParser(buf), clazz);
+ }
+
+ public static <T> T fromBuffer(Buffer buf, TypeReference<T> type) throws DecodeException {
+ return fromParser(createParser(buf), type);
+ }
+
+ private static JsonParser createParser(Buffer buf) {
+ try {
+ return Json.mapper.getFactory().createParser((InputStream) new ByteBufInputStream(buf.getByteBuf()));
+ } catch (IOException e) {
+ throw new DecodeException("Failed to decode:" + e.getMessage(), e);
+ }
+ }
+
+ private static JsonParser createParser(String str) {
+ try {
+ return Json.mapper.getFactory().createParser(str);
+ } catch (IOException e) {
+ throw new DecodeException("Failed to decode:" + e.getMessage(), e);
+ }
+ }
+
+ private static <T> T fromParser(JsonParser parser, Class<T> type) throws DecodeException {
+ T value;
+ try {
+ value = Json.mapper.readValue(parser, type);
+ } catch (Exception e) {
+ throw new DecodeException("Failed to decode:" + e.getMessage(), e);
+ } finally {
+ close(parser);
+ }
+ if (type == Object.class) {
+ value = (T) adapt(value);
+ }
+ return value;
+ }
+
+ private static <T> T fromParser(JsonParser parser, TypeReference<T> type) throws DecodeException {
+ T value;
+ try {
+ value = Json.mapper.readValue(parser, type);
+ } catch (Exception e) {
+ throw new DecodeException("Failed to decode:" + e.getMessage(), e);
+ } finally {
+ close(parser);
+ }
+ if (type.getType() == Object.class) {
+ value = (T) adapt(value);
+ }
+ return value;
+ }
+
+ private static void close(JsonParser parser) {
+ try {
+ parser.close();
+ } catch (IOException ignore) {
+ }
+ }
+
+ private static Object adapt(Object o) {
+ try {
+ if (o instanceof List) {
+ List list = (List) o;
+ return new JsonArray(list);
+ } else if (o instanceof Map) {
+ @SuppressWarnings("unchecked")
+ Map<String, Object> map = (Map<String, Object>) o;
+ return new JsonObject(map);
+ }
+ return o;
+ } catch (Exception e) {
+ throw new DecodeException("Failed to decode: " + e.getMessage());
+ }
+ }
+
+
+ @Override
+ public String toString(Object object, boolean pretty) throws EncodeException {
+ try {
+ ObjectMapper mapper = pretty ? Json.prettyMapper : Json.mapper;
+ return mapper.writeValueAsString(object);
+ } catch (Exception e) {
+ throw new EncodeException("Failed to encode as JSON: " + e.getMessage());
+ }
+ }
+
+ @Override
+ public Buffer toBuffer(Object object, boolean pretty) throws EncodeException {
+ try {
+ ObjectMapper mapper = pretty ? Json.prettyMapper : Json.mapper;
+ return Buffer.buffer(mapper.writeValueAsBytes(object));
+ } catch (Exception e) {
+ throw new EncodeException("Failed to encode as JSON: " + e.getMessage());
+ }
+ }
+}
diff --git a/src/main/java/io/vertx/core/parsetools/impl/JsonEventImpl.java b/src/main/java/io/vertx/core/parsetools/impl/JsonEventImpl.java
--- a/src/main/java/io/vertx/core/parsetools/impl/JsonEventImpl.java
+++ b/src/main/java/io/vertx/core/parsetools/impl/JsonEventImpl.java
@@ -18,8 +18,10 @@
import io.vertx.core.json.Json;
import io.vertx.core.json.JsonArray;
import io.vertx.core.json.JsonObject;
+import io.vertx.core.json.impl.JacksonCodec;
import io.vertx.core.parsetools.JsonEvent;
import io.vertx.core.parsetools.JsonEventType;
+import io.vertx.core.spi.json.JsonCodec;
import java.time.Instant;
import java.util.Base64;
@@ -101,7 +103,7 @@ public <T> T mapTo(Class<T> type) {
throw new DecodeException(e.getMessage());
}
} else {
- return Json.decodeValue(String.valueOf(value), type);
+ return JsonCodec.INSTANCE.fromValue(value, type);
}
}
@@ -114,7 +116,7 @@ public <T> T mapTo(TypeReference<T> type) {
throw new DecodeException(e.getMessage());
}
} else {
- return Json.decodeValue(String.valueOf(value), type);
+ return JacksonCodec.fromValue(value, type);
}
}
diff --git a/src/main/java/io/vertx/core/spi/json/JsonCodec.java b/src/main/java/io/vertx/core/spi/json/JsonCodec.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/spi/json/JsonCodec.java
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2011-2017 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+
+package io.vertx.core.spi.json;
+
+import io.vertx.core.ServiceHelper;
+import io.vertx.core.buffer.Buffer;
+import io.vertx.core.json.DecodeException;
+import io.vertx.core.json.EncodeException;
+
+/**
+ * @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
+ */
+public interface JsonCodec {
+
+ JsonCodec INSTANCE = ServiceHelper.loadFactory(JsonCodec.class);
+
+ /**
+ * Decode the provide {@code json} string to an object extending {@code clazz}.
+ *
+ * @param json the json string
+ * @param clazz the required object's class
+ * @return the instance
+ * @throws DecodeException anything preventing the decoding
+ */
+ <T> T fromString(String json, Class<T> clazz) throws DecodeException;
+
+ /**
+ * Like {@link #fromString(String, Class)} but with a json {@link Buffer}
+ */
+ <T> T fromBuffer(Buffer json, Class<T> clazz) throws DecodeException;
+
+ /**
+ * Like {@link #fromString(String, Class)} but with a json {@code Object}
+ */
+ <T> T fromValue(Object json, Class<T> toValueType);
+
+ /**
+ * Encode the specified {@code object} to a string.
+ */
+ default String toString(Object object) throws EncodeException {
+ return toString(object, false);
+ }
+
+ /**
+ * Encode the specified {@code object} to a string.
+ *
+ * @param object the object to encode
+ * @param pretty {@code true} to format the string prettily
+ * @return the json encoded string
+ * @throws DecodeException anything preventing the encoding
+ */
+ String toString(Object object, boolean pretty) throws EncodeException;
+
+ /**
+ * Like {@link #toString(Object, boolean)} but with a json {@link Buffer}
+ */
+ Buffer toBuffer(Object object, boolean pretty) throws EncodeException;
+
+ /**
+ * Like {@link #toString(Object)} but with a json {@link Buffer}
+ */
+ default Buffer toBuffer(Object object) throws EncodeException {
+ return toBuffer(object, false);
+ }
+}
| diff --git a/src/test/java/io/vertx/core/json/JacksonDatabindTest.java b/src/test/java/io/vertx/core/json/JacksonDatabindTest.java
new file mode 100644
--- /dev/null
+++ b/src/test/java/io/vertx/core/json/JacksonDatabindTest.java
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2014 Red Hat, Inc. and others
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+
+package io.vertx.core.json;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import io.vertx.core.buffer.Buffer;
+import io.vertx.test.core.TestUtils;
+import io.vertx.test.core.VertxTestBase;
+import org.junit.Test;
+
+import java.time.Instant;
+import java.util.*;
+
+import static java.time.format.DateTimeFormatter.ISO_INSTANT;
+
+/**
+ * @author <a href="http://tfox.org">Tim Fox</a>
+ */
+public class JacksonDatabindTest extends VertxTestBase {
+
+ @Test
+ public void testGetSetMapper() {
+ ObjectMapper mapper = Json.mapper;
+ assertNotNull(mapper);
+ ObjectMapper newMapper = new ObjectMapper();
+ Json.mapper = newMapper;
+ assertSame(newMapper, Json.mapper);
+ Json.mapper = mapper;
+ }
+
+ @Test
+ public void testGetSetPrettyMapper() {
+ ObjectMapper mapper = Json.prettyMapper;
+ assertNotNull(mapper);
+ ObjectMapper newMapper = new ObjectMapper();
+ Json.prettyMapper = newMapper;
+ assertSame(newMapper, Json.prettyMapper);
+ Json.prettyMapper = mapper;
+ }
+
+ @Test
+ public void testGenericDecoding() {
+ Pojo original = new Pojo();
+ original.value = "test";
+
+ String json = Json.encode(Collections.singletonList(original));
+ List<Pojo> correct;
+
+ correct = Json.decodeValue(json, new TypeReference<List<Pojo>>() {});
+ assertTrue(((List)correct).get(0) instanceof Pojo);
+ assertEquals(original.value, correct.get(0).value);
+
+ // same must apply if instead of string we use a buffer
+ correct = Json.decodeValue(Buffer.buffer(json, "UTF8"), new TypeReference<List<Pojo>>() {});
+ assertTrue(((List)correct).get(0) instanceof Pojo);
+ assertEquals(original.value, correct.get(0).value);
+
+ List incorrect = Json.decodeValue(json, List.class);
+ assertFalse(incorrect.get(0) instanceof Pojo);
+ assertTrue(incorrect.get(0) instanceof Map);
+ assertEquals(original.value, ((Map)(incorrect.get(0))).get("value"));
+ }
+
+ @Test
+ public void testInstantDecoding() {
+ Pojo original = new Pojo();
+ original.instant = Instant.from(ISO_INSTANT.parse("2018-06-20T07:25:38.397Z"));
+ Pojo decoded = Json.decodeValue("{\"instant\":\"2018-06-20T07:25:38.397Z\"}", Pojo.class);
+ assertEquals(original.instant, decoded.instant);
+ }
+
+ @Test
+ public void testNullInstantDecoding() {
+ Pojo original = new Pojo();
+ Pojo decoded = Json.decodeValue("{\"instant\":null}", Pojo.class);
+ assertEquals(original.instant, decoded.instant);
+ }
+
+ @Test
+ public void testBytesDecoding() {
+ Pojo original = new Pojo();
+ original.bytes = TestUtils.randomByteArray(12);
+ Pojo decoded = Json.decodeValue("{\"bytes\":\"" + Base64.getEncoder().encodeToString(original.bytes) + "\"}", Pojo.class);
+ assertArrayEquals(original.bytes, decoded.bytes);
+ }
+
+ @Test
+ public void testNullBytesDecoding() {
+ Pojo original = new Pojo();
+ Pojo decoded = Json.decodeValue("{\"bytes\":null}", Pojo.class);
+ assertEquals(original.bytes, decoded.bytes);
+ }
+
+ private static class Pojo {
+ @JsonProperty
+ String value;
+ @JsonProperty
+ Instant instant;
+ @JsonProperty
+ byte[] bytes;
+ }
+}
diff --git a/src/test/java/io/vertx/core/json/JsonArrayTest.java b/src/test/java/io/vertx/core/json/JsonArrayTest.java
--- a/src/test/java/io/vertx/core/json/JsonArrayTest.java
+++ b/src/test/java/io/vertx/core/json/JsonArrayTest.java
@@ -12,7 +12,6 @@
package io.vertx.core.json;
import io.vertx.core.buffer.Buffer;
-import io.vertx.core.impl.Utils;
import io.vertx.test.core.TestUtils;
import org.junit.Before;
import org.junit.Test;
@@ -792,129 +791,12 @@ public void testInvalidValsOnCopy3() {
class SomeClass {
}
- @Test
- public void testEncode() throws Exception {
- jsonArray.add("foo");
- jsonArray.add(123);
- jsonArray.add(1234l);
- jsonArray.add(1.23f);
- jsonArray.add(2.34d);
- jsonArray.add(true);
- byte[] bytes = TestUtils.randomByteArray(10);
- jsonArray.add(bytes);
- jsonArray.addNull();
- jsonArray.add(new JsonObject().put("foo", "bar"));
- jsonArray.add(new JsonArray().add("foo").add(123));
- String strBytes = Base64.getEncoder().encodeToString(bytes);
- String expected = "[\"foo\",123,1234,1.23,2.34,true,\"" + strBytes + "\",null,{\"foo\":\"bar\"},[\"foo\",123]]";
- String json = jsonArray.encode();
- assertEquals(expected, json);
- }
-
- @Test
- public void testEncodeToBuffer() throws Exception {
- jsonArray.add("foo");
- jsonArray.add(123);
- jsonArray.add(1234l);
- jsonArray.add(1.23f);
- jsonArray.add(2.34d);
- jsonArray.add(true);
- byte[] bytes = TestUtils.randomByteArray(10);
- jsonArray.add(bytes);
- jsonArray.addNull();
- jsonArray.add(new JsonObject().put("foo", "bar"));
- jsonArray.add(new JsonArray().add("foo").add(123));
- String strBytes = Base64.getEncoder().encodeToString(bytes);
- Buffer expected = Buffer.buffer("[\"foo\",123,1234,1.23,2.34,true,\"" + strBytes + "\",null,{\"foo\":\"bar\"},[\"foo\",123]]", "UTF-8");
- Buffer json = jsonArray.toBuffer();
- assertArrayEquals(expected.getBytes(), json.getBytes());
- }
-
- @Test
- public void testDecode() {
- byte[] bytes = TestUtils.randomByteArray(10);
- String strBytes = Base64.getEncoder().encodeToString(bytes);
- Instant now = Instant.now();
- String strInstant = ISO_INSTANT.format(now);
- String json = "[\"foo\",123,1234,1.23,2.34,true,\"" + strBytes + "\",\"" + strInstant + "\",null,{\"foo\":\"bar\"},[\"foo\",123]]";
- JsonArray arr = new JsonArray(json);
- assertEquals("foo", arr.getString(0));
- assertEquals(Integer.valueOf(123), arr.getInteger(1));
- assertEquals(Long.valueOf(1234l), arr.getLong(2));
- assertEquals(Float.valueOf(1.23f), arr.getFloat(3));
- assertEquals(Double.valueOf(2.34d), arr.getDouble(4));
- assertEquals(true, arr.getBoolean(5));
- assertArrayEquals(bytes, arr.getBinary(6));
- assertEquals(Base64.getEncoder().encodeToString(bytes), arr.getValue(6));
- assertEquals(now, arr.getInstant(7));
- assertEquals(now.toString(), arr.getValue(7));
- assertTrue(arr.hasNull(8));
- JsonObject obj = arr.getJsonObject(9);
- assertEquals("bar", obj.getString("foo"));
- JsonArray arr2 = arr.getJsonArray(10);
- assertEquals("foo", arr2.getString(0));
- assertEquals(Integer.valueOf(123), arr2.getInteger(1));
- }
-
- @Test
- public void testEncodePrettily() throws Exception {
- jsonArray.add("foo");
- jsonArray.add(123);
- jsonArray.add(1234l);
- jsonArray.add(1.23f);
- jsonArray.add(2.34d);
- jsonArray.add(true);
- byte[] bytes = TestUtils.randomByteArray(10);
- jsonArray.add(bytes);
- jsonArray.addNull();
- jsonArray.add(new JsonObject().put("foo", "bar"));
- jsonArray.add(new JsonArray().add("foo").add(123));
- String strBytes = Base64.getEncoder().encodeToString(bytes);
- String expected = "[ \"foo\", 123, 1234, 1.23, 2.34, true, \"" + strBytes + "\", null, {" + Utils.LINE_SEPARATOR +
- " \"foo\" : \"bar\"" + Utils.LINE_SEPARATOR +
- "}, [ \"foo\", 123 ] ]";
- String json = jsonArray.encodePrettily();
- assertEquals(expected, json);
- }
-
@Test
public void testToString() {
jsonArray.add("foo").add(123);
assertEquals(jsonArray.encode(), jsonArray.toString());
}
- // Strict JSON doesn't allow comments but we do so users can add comments to config files etc
- @Test
- public void testCommentsInJson() {
- String jsonWithComments =
- "// single line comment\n" +
- "/*\n" +
- " This is a multi \n" +
- " line comment\n" +
- "*/\n" +
- "[\n" +
- "// another single line comment this time inside the JSON array itself\n" +
- " \"foo\", \"bar\" // and a single line comment at end of line \n" +
- "/*\n" +
- " This is a another multi \n" +
- " line comment this time inside the JSON array itself\n" +
- "*/\n" +
- "]";
- JsonArray json = new JsonArray(jsonWithComments);
- assertEquals("[\"foo\",\"bar\"]", json.encode());
- }
-
- @Test
- public void testInvalidJson() {
- String invalid = "qiwjdoiqwjdiqwjd";
- try {
- new JsonArray(invalid);
- fail();
- } catch (DecodeException e) {
- // OK
- }
- }
-
@Test
public void testGetList() {
JsonObject obj = new JsonObject().put("quux", "wibble");
@@ -1085,46 +967,16 @@ private void testStreamCorrectTypes(JsonObject object) {
@Test
public void testInvalidConstruction() {
- try {
- new JsonArray("null");
- fail();
- } catch (DecodeException ignore) {
- }
try {
new JsonArray((String) null);
fail();
} catch (NullPointerException ignore) {
}
- try {
- new JsonArray("3");
- fail();
- } catch (DecodeException ignore) {
- }
- try {
- new JsonArray("\"3");
- fail();
- } catch (DecodeException ignore) {
- }
- try {
- new JsonArray(Buffer.buffer("null"));
- fail();
- } catch (DecodeException ignore) {
- }
try {
new JsonArray((Buffer) null);
fail();
} catch (NullPointerException ignore) {
}
- try {
- new JsonArray(Buffer.buffer("3"));
- fail();
- } catch (DecodeException ignore) {
- }
- try {
- new JsonArray(Buffer.buffer("\"3"));
- fail();
- } catch (DecodeException ignore) {
- }
try {
new JsonArray((List) null);
fail();
diff --git a/src/test/java/io/vertx/core/json/JsonCodecTest.java b/src/test/java/io/vertx/core/json/JsonCodecTest.java
new file mode 100644
--- /dev/null
+++ b/src/test/java/io/vertx/core/json/JsonCodecTest.java
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) 2011-2019 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+package io.vertx.core.json;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import io.vertx.core.buffer.Buffer;
+import io.vertx.core.impl.Utils;
+import io.vertx.test.core.TestUtils;
+import org.junit.Test;
+
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.nio.charset.StandardCharsets;
+import java.time.Instant;
+import java.util.Arrays;
+import java.util.Base64;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import static java.time.format.DateTimeFormatter.ISO_INSTANT;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public class JsonCodecTest {
+
+ private static final TypeReference<Integer> INTEGER_TYPE_REF = new TypeReference<Integer>() {};
+ private static final TypeReference<Long> LONG_TYPE_REF = new TypeReference<Long>() {};
+ private static final TypeReference<String> STRING_TYPE_REF = new TypeReference<String>() {};
+ private static final TypeReference<Float> FLOAT_TYPE_REF = new TypeReference<Float>() {};
+ private static final TypeReference<Double> DOUBLE_TYPE_REF = new TypeReference<Double>() {};
+ private static final TypeReference<Map<String, Object>> MAP_TYPE_REF = new TypeReference<Map<String, Object>>() {};
+ private static final TypeReference<List<Object>> LIST_TYPE_REF = new TypeReference<List<Object>>() {};
+ private static final TypeReference<Boolean> BOOLEAN_TYPE_REF = new TypeReference<Boolean>() {};
+
+ @Test
+ public void testEncodeJsonObject() {
+ JsonObject jsonObject = new JsonObject();
+ jsonObject.put("mystr", "foo");
+ jsonObject.put("mycharsequence", new StringBuilder("oob"));
+ jsonObject.put("myint", 123);
+ jsonObject.put("mylong", 1234l);
+ jsonObject.put("myfloat", 1.23f);
+ jsonObject.put("mydouble", 2.34d);
+ jsonObject.put("myboolean", true);
+ byte[] bytes = TestUtils.randomByteArray(10);
+ jsonObject.put("mybinary", bytes);
+ Instant now = Instant.now();
+ jsonObject.put("myinstant", now);
+ jsonObject.putNull("mynull");
+ jsonObject.put("myobj", new JsonObject().put("foo", "bar"));
+ jsonObject.put("myarr", new JsonArray().add("foo").add(123));
+ String strBytes = Base64.getEncoder().encodeToString(bytes);
+ String expected = "{\"mystr\":\"foo\",\"mycharsequence\":\"oob\",\"myint\":123,\"mylong\":1234,\"myfloat\":1.23,\"mydouble\":2.34,\"" +
+ "myboolean\":true,\"mybinary\":\"" + strBytes + "\",\"myinstant\":\"" + ISO_INSTANT.format(now) + "\",\"mynull\":null,\"myobj\":{\"foo\":\"bar\"},\"myarr\":[\"foo\",123]}";
+ String json = jsonObject.encode();
+ assertEquals(expected, json);
+ }
+
+ @Test
+ public void testEncodeJsonArray() {
+ JsonArray jsonArray = new JsonArray();
+ jsonArray.add("foo");
+ jsonArray.add(123);
+ jsonArray.add(1234L);
+ jsonArray.add(1.23f);
+ jsonArray.add(2.34d);
+ jsonArray.add(true);
+ byte[] bytes = TestUtils.randomByteArray(10);
+ jsonArray.add(bytes);
+ jsonArray.addNull();
+ jsonArray.add(new JsonObject().put("foo", "bar"));
+ jsonArray.add(new JsonArray().add("foo").add(123));
+ String strBytes = Base64.getEncoder().encodeToString(bytes);
+ String expected = "[\"foo\",123,1234,1.23,2.34,true,\"" + strBytes + "\",null,{\"foo\":\"bar\"},[\"foo\",123]]";
+ String json = jsonArray.encode();
+ assertEquals(expected, json);
+ }
+
+ @Test
+ public void testEncodeJsonObjectToBuffer() {
+ JsonObject jsonObject = new JsonObject();
+ jsonObject.put("mystr", "foo");
+ jsonObject.put("mycharsequence", new StringBuilder("oob"));
+ jsonObject.put("myint", 123);
+ jsonObject.put("mylong", 1234l);
+ jsonObject.put("myfloat", 1.23f);
+ jsonObject.put("mydouble", 2.34d);
+ jsonObject.put("myboolean", true);
+ byte[] bytes = TestUtils.randomByteArray(10);
+ jsonObject.put("mybinary", bytes);
+ Instant now = Instant.now();
+ jsonObject.put("myinstant", now);
+ jsonObject.putNull("mynull");
+ jsonObject.put("myobj", new JsonObject().put("foo", "bar"));
+ jsonObject.put("myarr", new JsonArray().add("foo").add(123));
+ String strBytes = Base64.getEncoder().encodeToString(bytes);
+
+ Buffer expected = Buffer.buffer("{\"mystr\":\"foo\",\"mycharsequence\":\"oob\",\"myint\":123,\"mylong\":1234,\"myfloat\":1.23,\"mydouble\":2.34,\"" +
+ "myboolean\":true,\"mybinary\":\"" + strBytes + "\",\"myinstant\":\"" + ISO_INSTANT.format(now) + "\",\"mynull\":null,\"myobj\":{\"foo\":\"bar\"},\"myarr\":[\"foo\",123]}", "UTF-8");
+
+ Buffer json = jsonObject.toBuffer();
+ assertArrayEquals(expected.getBytes(), json.getBytes());
+ }
+
+ @Test
+ public void testEncodeJsonArrayToBuffer() {
+ JsonArray jsonArray = new JsonArray();
+ jsonArray.add("foo");
+ jsonArray.add(123);
+ jsonArray.add(1234l);
+ jsonArray.add(1.23f);
+ jsonArray.add(2.34d);
+ jsonArray.add(true);
+ byte[] bytes = TestUtils.randomByteArray(10);
+ jsonArray.add(bytes);
+ jsonArray.addNull();
+ jsonArray.add(new JsonObject().put("foo", "bar"));
+ jsonArray.add(new JsonArray().add("foo").add(123));
+ String strBytes = Base64.getEncoder().encodeToString(bytes);
+ Buffer expected = Buffer.buffer("[\"foo\",123,1234,1.23,2.34,true,\"" + strBytes + "\",null,{\"foo\":\"bar\"},[\"foo\",123]]", "UTF-8");
+ Buffer json = jsonArray.toBuffer();
+ assertArrayEquals(expected.getBytes(), json.getBytes());
+ }
+
+
+ @Test
+ public void testEncodeJsonObjectPrettily() {
+ JsonObject jsonObject = new JsonObject();
+ jsonObject.put("mystr", "foo");
+ jsonObject.put("myint", 123);
+ jsonObject.put("mylong", 1234l);
+ jsonObject.put("myfloat", 1.23f);
+ jsonObject.put("mydouble", 2.34d);
+ jsonObject.put("myboolean", true);
+ byte[] bytes = TestUtils.randomByteArray(10);
+ jsonObject.put("mybinary", bytes);
+ Instant now = Instant.now();
+ jsonObject.put("myinstant", now);
+ jsonObject.put("myobj", new JsonObject().put("foo", "bar"));
+ jsonObject.put("myarr", new JsonArray().add("foo").add(123));
+ String strBytes = Base64.getEncoder().encodeToString(bytes);
+ String strInstant = ISO_INSTANT.format(now);
+ String expected = "{" + Utils.LINE_SEPARATOR +
+ " \"mystr\" : \"foo\"," + Utils.LINE_SEPARATOR +
+ " \"myint\" : 123," + Utils.LINE_SEPARATOR +
+ " \"mylong\" : 1234," + Utils.LINE_SEPARATOR +
+ " \"myfloat\" : 1.23," + Utils.LINE_SEPARATOR +
+ " \"mydouble\" : 2.34," + Utils.LINE_SEPARATOR +
+ " \"myboolean\" : true," + Utils.LINE_SEPARATOR +
+ " \"mybinary\" : \"" + strBytes + "\"," + Utils.LINE_SEPARATOR +
+ " \"myinstant\" : \"" + strInstant + "\"," + Utils.LINE_SEPARATOR +
+ " \"myobj\" : {" + Utils.LINE_SEPARATOR +
+ " \"foo\" : \"bar\"" + Utils.LINE_SEPARATOR +
+ " }," + Utils.LINE_SEPARATOR +
+ " \"myarr\" : [ \"foo\", 123 ]" + Utils.LINE_SEPARATOR +
+ "}";
+ String json = jsonObject.encodePrettily();
+ assertEquals(expected, json);
+ }
+
+ @Test
+ public void testEncodeJsonArrayPrettily() {
+ JsonArray jsonArray = new JsonArray();
+ jsonArray.add("foo");
+ jsonArray.add(123);
+ jsonArray.add(1234l);
+ jsonArray.add(1.23f);
+ jsonArray.add(2.34d);
+ jsonArray.add(true);
+ byte[] bytes = TestUtils.randomByteArray(10);
+ jsonArray.add(bytes);
+ jsonArray.addNull();
+ jsonArray.add(new JsonObject().put("foo", "bar"));
+ jsonArray.add(new JsonArray().add("foo").add(123));
+ String strBytes = Base64.getEncoder().encodeToString(bytes);
+ String expected = "[ \"foo\", 123, 1234, 1.23, 2.34, true, \"" + strBytes + "\", null, {" + Utils.LINE_SEPARATOR +
+ " \"foo\" : \"bar\"" + Utils.LINE_SEPARATOR +
+ "}, [ \"foo\", 123 ] ]";
+ String json = jsonArray.encodePrettily();
+ assertEquals(expected, json);
+ }
+
+ @Test
+ public void testDecodeJsonObject() {
+ byte[] bytes = TestUtils.randomByteArray(10);
+ String strBytes = Base64.getEncoder().encodeToString(bytes);
+ Instant now = Instant.now();
+ String strInstant = ISO_INSTANT.format(now);
+ String json = "{\"mystr\":\"foo\",\"myint\":123,\"mylong\":1234,\"myfloat\":1.23,\"mydouble\":2.34,\"" +
+ "myboolean\":true,\"mybinary\":\"" + strBytes + "\",\"myinstant\":\"" + strInstant + "\",\"mynull\":null,\"myobj\":{\"foo\":\"bar\"},\"myarr\":[\"foo\",123]}";
+ JsonObject obj = new JsonObject(json);
+ assertEquals(json, obj.encode());
+ assertEquals("foo", obj.getString("mystr"));
+ assertEquals(Integer.valueOf(123), obj.getInteger("myint"));
+ assertEquals(Long.valueOf(1234), obj.getLong("mylong"));
+ assertEquals(Float.valueOf(1.23f), obj.getFloat("myfloat"));
+ assertEquals(Double.valueOf(2.34d), obj.getDouble("mydouble"));
+ assertTrue(obj.getBoolean("myboolean"));
+ assertArrayEquals(bytes, obj.getBinary("mybinary"));
+ assertEquals(Base64.getEncoder().encodeToString(bytes), obj.getValue("mybinary"));
+ assertEquals(now, obj.getInstant("myinstant"));
+ assertEquals(now.toString(), obj.getValue("myinstant"));
+ assertTrue(obj.containsKey("mynull"));
+ JsonObject nestedObj = obj.getJsonObject("myobj");
+ assertEquals("bar", nestedObj.getString("foo"));
+ JsonArray nestedArr = obj.getJsonArray("myarr");
+ assertEquals("foo", nestedArr.getString(0));
+ assertEquals(Integer.valueOf(123), Integer.valueOf(nestedArr.getInteger(1)));
+ }
+
+ @Test
+ public void testDecodeJsonArray() {
+ byte[] bytes = TestUtils.randomByteArray(10);
+ String strBytes = Base64.getEncoder().encodeToString(bytes);
+ Instant now = Instant.now();
+ String strInstant = ISO_INSTANT.format(now);
+ String json = "[\"foo\",123,1234,1.23,2.34,true,\"" + strBytes + "\",\"" + strInstant + "\",null,{\"foo\":\"bar\"},[\"foo\",123]]";
+ JsonArray arr = new JsonArray(json);
+ assertEquals("foo", arr.getString(0));
+ assertEquals(Integer.valueOf(123), arr.getInteger(1));
+ assertEquals(Long.valueOf(1234l), arr.getLong(2));
+ assertEquals(Float.valueOf(1.23f), arr.getFloat(3));
+ assertEquals(Double.valueOf(2.34d), arr.getDouble(4));
+ assertEquals(true, arr.getBoolean(5));
+ assertArrayEquals(bytes, arr.getBinary(6));
+ assertEquals(Base64.getEncoder().encodeToString(bytes), arr.getValue(6));
+ assertEquals(now, arr.getInstant(7));
+ assertEquals(now.toString(), arr.getValue(7));
+ assertTrue(arr.hasNull(8));
+ JsonObject obj = arr.getJsonObject(9);
+ assertEquals("bar", obj.getString("foo"));
+ JsonArray arr2 = arr.getJsonArray(10);
+ assertEquals("foo", arr2.getString(0));
+ assertEquals(Integer.valueOf(123), arr2.getInteger(1));
+ }
+
+ // Strict JSON doesn't allow comments but we do so users can add comments to config files etc
+ @Test
+ public void testDecodeJsonObjectWithComments() {
+ String jsonWithComments =
+ "// single line comment\n" +
+ "/*\n" +
+ " This is a multi \n" +
+ " line comment\n" +
+ "*/\n" +
+ "{\n" +
+ "// another single line comment this time inside the JSON object itself\n" +
+ " \"foo\": \"bar\" // and a single line comment at end of line \n" +
+ "/*\n" +
+ " This is a another multi \n" +
+ " line comment this time inside the JSON object itself\n" +
+ "*/\n" +
+ "}";
+ JsonObject json = new JsonObject(jsonWithComments);
+ assertEquals("{\"foo\":\"bar\"}", json.encode());
+ }
+
+ // Strict JSON doesn't allow comments but we do so users can add comments to config files etc
+ @Test
+ public void testDecodeJsonArrayWithComments() {
+ String jsonWithComments =
+ "// single line comment\n" +
+ "/*\n" +
+ " This is a multi \n" +
+ " line comment\n" +
+ "*/\n" +
+ "[\n" +
+ "// another single line comment this time inside the JSON array itself\n" +
+ " \"foo\", \"bar\" // and a single line comment at end of line \n" +
+ "/*\n" +
+ " This is a another multi \n" +
+ " line comment this time inside the JSON array itself\n" +
+ "*/\n" +
+ "]";
+ JsonArray json = new JsonArray(jsonWithComments);
+ assertEquals("[\"foo\",\"bar\"]", json.encode());
+ }
+
+ @Test
+ public void testDecodeJsonObjectWithInvalidJson() {
+ for (String test : new String[] { "null", "3", "\"3", "qiwjdoiqwjdiqwjd" }) {
+ try {
+ new JsonObject(test);
+ fail();
+ } catch (DecodeException ignore) {
+ }
+ try {
+ new JsonObject(Buffer.buffer(test));
+ fail();
+ } catch (DecodeException ignore) {
+ }
+ }
+ }
+
+ @Test
+ public void testDecodeJsonArrayWithInvalidJson() {
+ for (String test : new String[] { "null", "3", "\"3", "qiwjdoiqwjdiqwjd" }) {
+ try {
+ new JsonArray(test);
+ fail();
+ } catch (DecodeException ignore) {
+ }
+ try {
+ new JsonArray(Buffer.buffer(test));
+ fail();
+ } catch (DecodeException ignore) {
+ }
+ }
+ }
+
+ @Test
+ public void encodeCustomTypeInstant() {
+ Instant now = Instant.now();
+ String json = Json.encode(now);
+ assertNotNull(json);
+ // the RFC is one way only
+ Instant decoded = Instant.from(ISO_INSTANT.parse(json.substring(1, json.length() - 1)));
+ assertEquals(now, decoded);
+
+ }
+
+ @Test
+ public void encodeCustomTypeBinary() {
+ byte[] data = new byte[] { 'h', 'e', 'l', 'l', 'o'};
+ String json = Json.encode(data);
+ assertNotNull(json);
+ // base64 encoded hello
+ assertEquals("\"aGVsbG8=\"", json);
+ }
+
+ @Test
+ public void encodeNull() {
+ String json = Json.encode(null);
+ assertNotNull(json);
+ assertEquals("null", json);
+ }
+
+ @Test
+ public void encodeToBuffer() {
+ Buffer json = Json.encodeToBuffer("Hello World!");
+ assertNotNull(json);
+ // json strings are always UTF8
+ assertEquals("\"Hello World!\"", json.toString());
+ }
+
+ @Test
+ public void encodeNullToBuffer() {
+ Buffer json = Json.encodeToBuffer(null);
+ assertNotNull(json);
+ assertEquals("null", json.toString());
+ }
+
+ @Test
+ public void testDecodeValue() {
+ assertDecodeValue(Buffer.buffer("42"), 42, INTEGER_TYPE_REF);
+ assertDecodeValue(Buffer.buffer("42"), 42L, LONG_TYPE_REF);
+ assertDecodeValue(Buffer.buffer("\"foobar\""), "foobar", STRING_TYPE_REF);
+ assertDecodeValue(Buffer.buffer("3.4"), 3.4f, FLOAT_TYPE_REF);
+ assertDecodeValue(Buffer.buffer("3.4"), 3.4d, DOUBLE_TYPE_REF);
+ assertDecodeValue(Buffer.buffer("{\"foo\":4}"), Collections.singletonMap("foo", 4), MAP_TYPE_REF);
+ assertDecodeValue(Buffer.buffer("[0,1,2]"), Arrays.asList(0, 1, 2), LIST_TYPE_REF);
+ assertDecodeValue(Buffer.buffer("true"), true, BOOLEAN_TYPE_REF);
+ assertDecodeValue(Buffer.buffer("false"), false, BOOLEAN_TYPE_REF);
+ }
+
+ private <T> void assertDecodeValue(Buffer buffer, T expected, TypeReference<T> ref) {
+ Type type = ref.getType();
+ Class<?> clazz = type instanceof Class ? (Class<?>) type : (Class<?>) ((ParameterizedType) type).getRawType();
+ assertEquals(expected, Json.decodeValue(buffer, clazz));
+ assertEquals(expected, Json.decodeValue(buffer, ref));
+ assertEquals(expected, Json.decodeValue(buffer.toString(StandardCharsets.UTF_8), clazz));
+ assertEquals(expected, Json.decodeValue(buffer.toString(StandardCharsets.UTF_8), ref));
+ Buffer nullValue = Buffer.buffer("null");
+ assertNull(Json.decodeValue(nullValue, clazz));
+ assertNull(Json.decodeValue(nullValue, ref));
+ assertNull(Json.decodeValue(nullValue.toString(StandardCharsets.UTF_8), clazz));
+ assertNull(Json.decodeValue(nullValue.toString(StandardCharsets.UTF_8), ref));
+ }
+
+ @Test
+ public void testDecodeBufferUnknowContent() {
+ testDecodeUnknowContent(true);
+ }
+
+ @Test
+ public void testDecodeStringUnknowContent() {
+ testDecodeUnknowContent(false);
+ }
+
+ private void testDecodeUnknowContent(boolean asBuffer) {
+ String number = String.valueOf(1);
+ assertEquals(1, asBuffer ? Json.decodeValue(Buffer.buffer(number)) : Json.decodeValue(number));
+
+ String bool = Boolean.TRUE.toString();
+ assertEquals(true, asBuffer ? Json.decodeValue(Buffer.buffer(bool)) : Json.decodeValue(bool));
+
+ String text = "\"whatever\"";
+ assertEquals("whatever", asBuffer ? Json.decodeValue(Buffer.buffer(text)) : Json.decodeValue(text));
+
+ String nullText = "null";
+ assertNull(asBuffer ? Json.decodeValue(Buffer.buffer(nullText)) : Json.decodeValue(nullText));
+
+ JsonObject obj = new JsonObject().put("foo", "bar");
+ assertEquals(obj, asBuffer ? Json.decodeValue(obj.toBuffer()) : Json.decodeValue(obj.toString()));
+
+ JsonArray arr = new JsonArray().add(1).add(false).add("whatever").add(obj);
+ assertEquals(arr, asBuffer ? Json.decodeValue(arr.toBuffer()) : Json.decodeValue(arr.toString()));
+
+ String invalidText = "\"invalid";
+ try {
+ if (asBuffer) {
+ Json.decodeValue(Buffer.buffer(invalidText));
+ } else {
+ Json.decodeValue(invalidText);
+ }
+ fail();
+ } catch (DecodeException ignore) {
+ }
+ }
+}
diff --git a/src/test/java/io/vertx/core/json/JsonMapperTest.java b/src/test/java/io/vertx/core/json/JsonMapperTest.java
deleted file mode 100644
--- a/src/test/java/io/vertx/core/json/JsonMapperTest.java
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Copyright (c) 2014 Red Hat, Inc. and others
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
- * which is available at https://www.apache.org/licenses/LICENSE-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
- */
-
-package io.vertx.core.json;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.core.type.TypeReference;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import io.vertx.core.buffer.Buffer;
-import io.vertx.test.core.TestUtils;
-import io.vertx.test.core.VertxTestBase;
-import org.junit.Test;
-
-import java.lang.reflect.ParameterizedType;
-import java.lang.reflect.Type;
-import java.nio.charset.StandardCharsets;
-import java.time.Instant;
-import java.util.*;
-
-import static java.time.format.DateTimeFormatter.ISO_INSTANT;
-
-/**
- * @author <a href="http://tfox.org">Tim Fox</a>
- */
-public class JsonMapperTest extends VertxTestBase {
-
- @Test
- public void testGetSetMapper() {
- ObjectMapper mapper = Json.mapper;
- assertNotNull(mapper);
- ObjectMapper newMapper = new ObjectMapper();
- Json.mapper = newMapper;
- assertSame(newMapper, Json.mapper);
- Json.mapper = mapper;
- }
-
- @Test
- public void testGetSetPrettyMapper() {
- ObjectMapper mapper = Json.prettyMapper;
- assertNotNull(mapper);
- ObjectMapper newMapper = new ObjectMapper();
- Json.prettyMapper = newMapper;
- assertSame(newMapper, Json.prettyMapper);
- Json.prettyMapper = mapper;
- }
-
- @Test
- public void encodeCustomTypeInstant() {
- Instant now = Instant.now();
- String json = Json.encode(now);
- assertNotNull(json);
- // the RFC is one way only
- Instant decoded = Instant.from(ISO_INSTANT.parse(json.substring(1, json.length() - 1)));
- assertEquals(now, decoded);
-
- }
-
- @Test
- public void encodeCustomTypeInstantNull() {
- Instant now = null;
- String json = Json.encode(now);
- assertNotNull(json);
- assertEquals("null", json);
- }
-
- @Test
- public void encodeCustomTypeBinary() {
- byte[] data = new byte[] { 'h', 'e', 'l', 'l', 'o'};
- String json = Json.encode(data);
- assertNotNull(json);
- // base64 encoded hello
- assertEquals("\"aGVsbG8=\"", json);
- }
-
- @Test
- public void encodeCustomTypeBinaryNull() {
- byte[] data = null;
- String json = Json.encode(data);
- assertNotNull(json);
- assertEquals("null", json);
- }
-
- @Test
- public void encodeToBuffer() {
- Buffer json = Json.encodeToBuffer("Hello World!");
- assertNotNull(json);
- // json strings are always UTF8
- assertEquals("\"Hello World!\"", json.toString("UTF-8"));
- }
-
- @Test
- public void testGenericDecoding() {
- Pojo original = new Pojo();
- original.value = "test";
-
- String json = Json.encode(Collections.singletonList(original));
- List<Pojo> correct;
-
- correct = Json.decodeValue(json, new TypeReference<List<Pojo>>() {});
- assertTrue(((List)correct).get(0) instanceof Pojo);
- assertEquals(original.value, correct.get(0).value);
-
- // same must apply if instead of string we use a buffer
- correct = Json.decodeValue(Buffer.buffer(json, "UTF8"), new TypeReference<List<Pojo>>() {});
- assertTrue(((List)correct).get(0) instanceof Pojo);
- assertEquals(original.value, correct.get(0).value);
-
- List incorrect = Json.decodeValue(json, List.class);
- assertFalse(incorrect.get(0) instanceof Pojo);
- assertTrue(incorrect.get(0) instanceof Map);
- assertEquals(original.value, ((Map)(incorrect.get(0))).get("value"));
- }
-
- @Test
- public void testInstantDecoding() {
- Pojo original = new Pojo();
- original.instant = Instant.from(ISO_INSTANT.parse("2018-06-20T07:25:38.397Z"));
- Pojo decoded = Json.decodeValue("{\"instant\":\"2018-06-20T07:25:38.397Z\"}", Pojo.class);
- assertEquals(original.instant, decoded.instant);
- }
-
- @Test
- public void testNullInstantDecoding() {
- Pojo original = new Pojo();
- Pojo decoded = Json.decodeValue("{\"instant\":null}", Pojo.class);
- assertEquals(original.instant, decoded.instant);
- }
-
- @Test
- public void testBytesDecoding() {
- Pojo original = new Pojo();
- original.bytes = TestUtils.randomByteArray(12);
- Pojo decoded = Json.decodeValue("{\"bytes\":\"" + Base64.getEncoder().encodeToString(original.bytes) + "\"}", Pojo.class);
- assertArrayEquals(original.bytes, decoded.bytes);
- }
-
- @Test
- public void testNullBytesDecoding() {
- Pojo original = new Pojo();
- Pojo decoded = Json.decodeValue("{\"bytes\":null}", Pojo.class);
- assertEquals(original.bytes, decoded.bytes);
- }
-
- private static class Pojo {
- @JsonProperty
- String value;
- @JsonProperty
- Instant instant;
- @JsonProperty
- byte[] bytes;
- }
-
- private static final TypeReference<Integer> INTEGER_TYPE_REF = new TypeReference<Integer>() {};
- private static final TypeReference<Long> LONG_TYPE_REF = new TypeReference<Long>() {};
- private static final TypeReference<String> STRING_TYPE_REF = new TypeReference<String>() {};
- private static final TypeReference<Float> FLOAT_TYPE_REF = new TypeReference<Float>() {};
- private static final TypeReference<Double> DOUBLE_TYPE_REF = new TypeReference<Double>() {};
- private static final TypeReference<Map<String, Object>> MAP_TYPE_REF = new TypeReference<Map<String, Object>>() {};
- private static final TypeReference<List<Object>> LIST_TYPE_REF = new TypeReference<List<Object>>() {};
- private static final TypeReference<Boolean> BOOLEAN_TYPE_REF = new TypeReference<Boolean>() {};
-
- @Test
- public void testDecodeValue() {
- assertDecodeValue(Buffer.buffer("42"), 42, INTEGER_TYPE_REF);
- assertDecodeValue(Buffer.buffer("42"), 42L, LONG_TYPE_REF);
- assertDecodeValue(Buffer.buffer("\"foobar\""), "foobar", STRING_TYPE_REF);
- assertDecodeValue(Buffer.buffer("3.4"), 3.4f, FLOAT_TYPE_REF);
- assertDecodeValue(Buffer.buffer("3.4"), 3.4d, DOUBLE_TYPE_REF);
- assertDecodeValue(Buffer.buffer("{\"foo\":4}"), Collections.singletonMap("foo", 4), MAP_TYPE_REF);
- assertDecodeValue(Buffer.buffer("[0,1,2]"), Arrays.asList(0, 1, 2), LIST_TYPE_REF);
- assertDecodeValue(Buffer.buffer("true"), true, BOOLEAN_TYPE_REF);
- assertDecodeValue(Buffer.buffer("false"), false, BOOLEAN_TYPE_REF);
- }
-
- private <T> void assertDecodeValue(Buffer buffer, T expected, TypeReference<T> ref) {
- Type type = ref.getType();
- Class<?> clazz = type instanceof Class ? (Class<?>) type : (Class<?>) ((ParameterizedType) type).getRawType();
- assertEquals(expected, Json.decodeValue(buffer, clazz));
- assertEquals(expected, Json.decodeValue(buffer, ref));
- assertEquals(expected, Json.decodeValue(buffer.toString(StandardCharsets.UTF_8), clazz));
- assertEquals(expected, Json.decodeValue(buffer.toString(StandardCharsets.UTF_8), ref));
- Buffer nullValue = Buffer.buffer("null");
- assertNull(Json.decodeValue(nullValue, clazz));
- assertNull(Json.decodeValue(nullValue, ref));
- assertNull(Json.decodeValue(nullValue.toString(StandardCharsets.UTF_8), clazz));
- assertNull(Json.decodeValue(nullValue.toString(StandardCharsets.UTF_8), ref));
- }
-
- @Test
- public void testDecodeBufferUnknowContent() {
- testDecodeUnknowContent(true);
- }
-
- @Test
- public void testDecodeStringUnknowContent() {
- testDecodeUnknowContent(false);
- }
-
- private void testDecodeUnknowContent(boolean asBuffer) {
- String number = String.valueOf(1);
- assertEquals(1, asBuffer ? Json.decodeValue(Buffer.buffer(number)) : Json.decodeValue(number));
-
- String bool = Boolean.TRUE.toString();
- assertEquals(true, asBuffer ? Json.decodeValue(Buffer.buffer(bool)) : Json.decodeValue(bool));
-
- String text = "\"whatever\"";
- assertEquals("whatever", asBuffer ? Json.decodeValue(Buffer.buffer(text)) : Json.decodeValue(text));
-
- String nullText = "null";
- assertNull(asBuffer ? Json.decodeValue(Buffer.buffer(nullText)) : Json.decodeValue(nullText));
-
- JsonObject obj = new JsonObject().put("foo", "bar");
- assertEquals(obj, asBuffer ? Json.decodeValue(obj.toBuffer()) : Json.decodeValue(obj.toString()));
-
- JsonArray arr = new JsonArray().add(1).add(false).add("whatever").add(obj);
- assertEquals(arr, asBuffer ? Json.decodeValue(arr.toBuffer()) : Json.decodeValue(arr.toString()));
-
- String invalidText = "\"invalid";
- try {
- if (asBuffer) {
- Json.decodeValue(Buffer.buffer(invalidText));
- } else {
- Json.decodeValue(invalidText);
- }
- fail();
- } catch (DecodeException ignore) {
- }
- }
-}
diff --git a/src/test/java/io/vertx/core/json/JsonObjectTest.java b/src/test/java/io/vertx/core/json/JsonObjectTest.java
--- a/src/test/java/io/vertx/core/json/JsonObjectTest.java
+++ b/src/test/java/io/vertx/core/json/JsonObjectTest.java
@@ -12,7 +12,6 @@
package io.vertx.core.json;
import io.vertx.core.buffer.Buffer;
-import io.vertx.core.impl.Utils;
import io.vertx.test.core.TestUtils;
import org.junit.Before;
import org.junit.Test;
@@ -1182,8 +1181,8 @@ public void testMergeIn2() {
@Test
public void testMergeInDepth0() {
- JsonObject obj1 = new JsonObject("{ \"foo\": { \"bar\": \"flurb\" }}");
- JsonObject obj2 = new JsonObject("{ \"foo\": { \"bar\": \"eek\" }}");
+ JsonObject obj1 = new JsonObject().put("foo", new JsonObject().put("bar", "flurb"));
+ JsonObject obj2 = new JsonObject().put("foo", new JsonObject().put("bar", "eek"));
obj1.mergeIn(obj2, 0);
assertEquals(1, obj1.size());
assertEquals(1, obj1.getJsonObject("foo").size());
@@ -1192,8 +1191,8 @@ public void testMergeInDepth0() {
@Test
public void testMergeInFlat() {
- JsonObject obj1 = new JsonObject("{ \"foo\": { \"bar\": \"flurb\", \"eek\": 32 }}");
- JsonObject obj2 = new JsonObject("{ \"foo\": { \"bar\": \"eek\" }}");
+ JsonObject obj1 = new JsonObject().put("foo", new JsonObject().put("bar", "flurb").put("eek", 32));
+ JsonObject obj2 = new JsonObject().put("foo", new JsonObject().put("bar", "eek"));
obj1.mergeIn(obj2, false);
assertEquals(1, obj1.size());
assertEquals(1, obj1.getJsonObject("foo").size());
@@ -1202,8 +1201,8 @@ public void testMergeInFlat() {
@Test
public void testMergeInDepth1() {
- JsonObject obj1 = new JsonObject("{ \"foo\": \"bar\", \"flurb\": { \"eek\": \"foo\", \"bar\": \"flurb\"}}");
- JsonObject obj2 = new JsonObject("{ \"flurb\": { \"bar\": \"flurb1\" }}");
+ JsonObject obj1 = new JsonObject().put("foo", "bar").put("flurb", new JsonObject().put("eek", "foo").put("bar", "flurb"));
+ JsonObject obj2 = new JsonObject().put("flurb", new JsonObject().put("bar", "flurb1"));
obj1.mergeIn(obj2, 1);
assertEquals(2, obj1.size());
assertEquals(1, obj1.getJsonObject("flurb").size());
@@ -1212,8 +1211,8 @@ public void testMergeInDepth1() {
@Test
public void testMergeInDepth2() {
- JsonObject obj1 = new JsonObject("{ \"foo\": \"bar\", \"flurb\": { \"eek\": \"foo\", \"bar\": \"flurb\"}}");
- JsonObject obj2 = new JsonObject("{ \"flurb\": { \"bar\": \"flurb1\" }}");
+ JsonObject obj1 = new JsonObject().put("foo", "bar").put("flurb", new JsonObject().put("eek", "foo").put("bar", "flurb"));
+ JsonObject obj2 = new JsonObject().put("flurb", new JsonObject().put("bar", "flurb1"));
obj1.mergeIn(obj2, 2);
assertEquals(2, obj1.size());
assertEquals(2, obj1.getJsonObject("flurb").size());
@@ -1221,154 +1220,12 @@ public void testMergeInDepth2() {
assertEquals("flurb1", obj1.getJsonObject("flurb").getString("bar"));
}
- @Test
- public void testEncode() throws Exception {
- jsonObject.put("mystr", "foo");
- jsonObject.put("mycharsequence", new StringBuilder("oob"));
- jsonObject.put("myint", 123);
- jsonObject.put("mylong", 1234l);
- jsonObject.put("myfloat", 1.23f);
- jsonObject.put("mydouble", 2.34d);
- jsonObject.put("myboolean", true);
- byte[] bytes = TestUtils.randomByteArray(10);
- jsonObject.put("mybinary", bytes);
- Instant now = Instant.now();
- jsonObject.put("myinstant", now);
- jsonObject.putNull("mynull");
- jsonObject.put("myobj", new JsonObject().put("foo", "bar"));
- jsonObject.put("myarr", new JsonArray().add("foo").add(123));
- String strBytes = Base64.getEncoder().encodeToString(bytes);
- String expected = "{\"mystr\":\"foo\",\"mycharsequence\":\"oob\",\"myint\":123,\"mylong\":1234,\"myfloat\":1.23,\"mydouble\":2.34,\"" +
- "myboolean\":true,\"mybinary\":\"" + strBytes + "\",\"myinstant\":\"" + ISO_INSTANT.format(now) + "\",\"mynull\":null,\"myobj\":{\"foo\":\"bar\"},\"myarr\":[\"foo\",123]}";
- String json = jsonObject.encode();
- assertEquals(expected, json);
- }
-
- @Test
- public void testEncodeToBuffer() throws Exception {
- jsonObject.put("mystr", "foo");
- jsonObject.put("mycharsequence", new StringBuilder("oob"));
- jsonObject.put("myint", 123);
- jsonObject.put("mylong", 1234l);
- jsonObject.put("myfloat", 1.23f);
- jsonObject.put("mydouble", 2.34d);
- jsonObject.put("myboolean", true);
- byte[] bytes = TestUtils.randomByteArray(10);
- jsonObject.put("mybinary", bytes);
- Instant now = Instant.now();
- jsonObject.put("myinstant", now);
- jsonObject.putNull("mynull");
- jsonObject.put("myobj", new JsonObject().put("foo", "bar"));
- jsonObject.put("myarr", new JsonArray().add("foo").add(123));
- String strBytes = Base64.getEncoder().encodeToString(bytes);
-
- Buffer expected = Buffer.buffer("{\"mystr\":\"foo\",\"mycharsequence\":\"oob\",\"myint\":123,\"mylong\":1234,\"myfloat\":1.23,\"mydouble\":2.34,\"" +
- "myboolean\":true,\"mybinary\":\"" + strBytes + "\",\"myinstant\":\"" + ISO_INSTANT.format(now) + "\",\"mynull\":null,\"myobj\":{\"foo\":\"bar\"},\"myarr\":[\"foo\",123]}", "UTF-8");
-
- Buffer json = jsonObject.toBuffer();
- assertArrayEquals(expected.getBytes(), json.getBytes());
- }
-
- @Test
- public void testDecode() throws Exception {
- byte[] bytes = TestUtils.randomByteArray(10);
- String strBytes = Base64.getEncoder().encodeToString(bytes);
- Instant now = Instant.now();
- String strInstant = ISO_INSTANT.format(now);
- String json = "{\"mystr\":\"foo\",\"myint\":123,\"mylong\":1234,\"myfloat\":1.23,\"mydouble\":2.34,\"" +
- "myboolean\":true,\"mybinary\":\"" + strBytes + "\",\"myinstant\":\"" + strInstant + "\",\"mynull\":null,\"myobj\":{\"foo\":\"bar\"},\"myarr\":[\"foo\",123]}";
- JsonObject obj = new JsonObject(json);
- assertEquals(json, obj.encode());
- assertEquals("foo", obj.getString("mystr"));
- assertEquals(Integer.valueOf(123), obj.getInteger("myint"));
- assertEquals(Long.valueOf(1234), obj.getLong("mylong"));
- assertEquals(Float.valueOf(1.23f), obj.getFloat("myfloat"));
- assertEquals(Double.valueOf(2.34d), obj.getDouble("mydouble"));
- assertTrue(obj.getBoolean("myboolean"));
- assertArrayEquals(bytes, obj.getBinary("mybinary"));
- assertEquals(Base64.getEncoder().encodeToString(bytes), obj.getValue("mybinary"));
- assertEquals(now, obj.getInstant("myinstant"));
- assertEquals(now.toString(), obj.getValue("myinstant"));
- assertTrue(obj.containsKey("mynull"));
- JsonObject nestedObj = obj.getJsonObject("myobj");
- assertEquals("bar", nestedObj.getString("foo"));
- JsonArray nestedArr = obj.getJsonArray("myarr");
- assertEquals("foo", nestedArr.getString(0));
- assertEquals(Integer.valueOf(123), Integer.valueOf(nestedArr.getInteger(1)));
- }
-
@Test
public void testToString() {
jsonObject.put("foo", "bar");
assertEquals(jsonObject.encode(), jsonObject.toString());
}
- @Test
- public void testEncodePrettily() throws Exception {
- jsonObject.put("mystr", "foo");
- jsonObject.put("myint", 123);
- jsonObject.put("mylong", 1234l);
- jsonObject.put("myfloat", 1.23f);
- jsonObject.put("mydouble", 2.34d);
- jsonObject.put("myboolean", true);
- byte[] bytes = TestUtils.randomByteArray(10);
- jsonObject.put("mybinary", bytes);
- Instant now = Instant.now();
- jsonObject.put("myinstant", now);
- jsonObject.put("myobj", new JsonObject().put("foo", "bar"));
- jsonObject.put("myarr", new JsonArray().add("foo").add(123));
- String strBytes = Base64.getEncoder().encodeToString(bytes);
- String strInstant = ISO_INSTANT.format(now);
- String expected = "{" + Utils.LINE_SEPARATOR +
- " \"mystr\" : \"foo\"," + Utils.LINE_SEPARATOR +
- " \"myint\" : 123," + Utils.LINE_SEPARATOR +
- " \"mylong\" : 1234," + Utils.LINE_SEPARATOR +
- " \"myfloat\" : 1.23," + Utils.LINE_SEPARATOR +
- " \"mydouble\" : 2.34," + Utils.LINE_SEPARATOR +
- " \"myboolean\" : true," + Utils.LINE_SEPARATOR +
- " \"mybinary\" : \"" + strBytes + "\"," + Utils.LINE_SEPARATOR +
- " \"myinstant\" : \"" + strInstant + "\"," + Utils.LINE_SEPARATOR +
- " \"myobj\" : {" + Utils.LINE_SEPARATOR +
- " \"foo\" : \"bar\"" + Utils.LINE_SEPARATOR +
- " }," + Utils.LINE_SEPARATOR +
- " \"myarr\" : [ \"foo\", 123 ]" + Utils.LINE_SEPARATOR +
- "}";
- String json = jsonObject.encodePrettily();
- assertEquals(expected, json);
- }
-
- // Strict JSON doesn't allow comments but we do so users can add comments to config files etc
- @Test
- public void testCommentsInJson() {
- String jsonWithComments =
- "// single line comment\n" +
- "/*\n" +
- " This is a multi \n" +
- " line comment\n" +
- "*/\n" +
- "{\n" +
- "// another single line comment this time inside the JSON object itself\n" +
- " \"foo\": \"bar\" // and a single line comment at end of line \n" +
- "/*\n" +
- " This is a another multi \n" +
- " line comment this time inside the JSON object itself\n" +
- "*/\n" +
- "}";
- JsonObject json = new JsonObject(jsonWithComments);
- assertEquals("{\"foo\":\"bar\"}", json.encode());
- }
-
- @Test
- public void testInvalidJson() {
- String invalid = "qiwjdoiqwjdiqwjd";
- try {
- new JsonObject(invalid);
- fail();
- } catch (DecodeException e) {
- // OK
- }
- }
-
@Test
public void testClear() {
jsonObject.put("foo", "bar");
@@ -1830,46 +1687,16 @@ private JsonObject createJsonObject() {
@Test
public void testInvalidConstruction() {
- try {
- new JsonObject("null");
- fail();
- } catch (DecodeException ignore) {
- }
try {
new JsonObject((String) null);
fail();
} catch (NullPointerException ignore) {
}
- try {
- new JsonObject("3");
- fail();
- } catch (DecodeException ignore) {
- }
- try {
- new JsonObject("\"3");
- fail();
- } catch (DecodeException ignore) {
- }
- try {
- new JsonObject(Buffer.buffer("null"));
- fail();
- } catch (DecodeException ignore) {
- }
try {
new JsonObject((Buffer) null);
fail();
} catch (NullPointerException ignore) {
}
- try {
- new JsonObject(Buffer.buffer("3"));
- fail();
- } catch (DecodeException ignore) {
- }
- try {
- new JsonObject(Buffer.buffer("\"3"));
- fail();
- } catch (DecodeException ignore) {
- }
try {
new JsonObject((Map) null);
fail();
diff --git a/src/test/java/io/vertx/it/JsonTest.java b/src/test/java/io/vertx/it/JsonTest.java
new file mode 100644
--- /dev/null
+++ b/src/test/java/io/vertx/it/JsonTest.java
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2011-2017 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+
+package io.vertx.it;
+
+import io.vertx.core.Vertx;
+import io.vertx.core.eventbus.EventBus;
+import io.vertx.core.http.HttpClient;
+import io.vertx.core.http.HttpClientOptions;
+import io.vertx.core.http.HttpServerOptions;
+import io.vertx.core.http.HttpTestBase;
+import io.vertx.core.impl.VertxInternal;
+import io.vertx.core.json.JsonArray;
+import io.vertx.core.json.JsonObject;
+import io.vertx.core.net.OpenSSLEngineOptions;
+import io.vertx.test.core.VertxTestBase;
+import io.vertx.test.tls.Cert;
+import io.vertx.test.tls.Trust;
+import org.junit.Test;
+
+import static io.vertx.core.http.HttpTestBase.DEFAULT_HTTP_HOST;
+import static io.vertx.core.http.HttpTestBase.DEFAULT_HTTP_PORT;
+
+/**
+ * @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
+ */
+public class JsonTest extends VertxTestBase {
+
+ @Test
+ public void testJsonObject() {
+ JsonObject obj = new JsonObject();
+ obj.put("foo", "bar");
+ try {
+ obj.toString();
+ fail();
+ } catch (NoClassDefFoundError ignore) {
+ }
+ assertTrue(obj.containsKey("foo"));
+ assertEquals(obj, obj.copy());
+ }
+
+ @Test
+ public void testJsonArray() {
+ JsonArray array = new JsonArray();
+ array.add("foo");
+ try {
+ array.toString();
+ fail();
+ } catch (NoClassDefFoundError ignore) {
+ }
+ assertTrue(array.contains("foo"));
+ assertEquals(array, array.copy());
+ }
+
+ @Test
+ public void testHttp() {
+ Vertx vertx = Vertx.vertx();
+ try {
+ vertx.createHttpServer().requestHandler(req -> {
+ req.response().end("hello");
+ }).listen(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(s -> {
+ HttpClient client = vertx.createHttpClient();
+ client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
+ resp.exceptionHandler(this::fail);
+ resp.bodyHandler(body -> {
+ assertEquals("hello", body.toString());
+ testComplete();
+ });
+ }).exceptionHandler(this::fail)
+ .end();
+ }));
+ await();
+ } finally {
+ vertx.close();
+ }
+ }
+
+ @Test
+ public void testEventBus() {
+ Vertx vertx = Vertx.vertx();
+ try {
+ EventBus eb = vertx.eventBus();
+ eb.consumer("the-address", msg -> {
+ assertEquals("ping", msg.body());
+ msg.reply("pong");
+ });
+ eb.request("the-address", "ping", onSuccess(resp -> {
+ assertEquals("pong", resp.body());
+ testComplete();
+ }));
+ await();
+ } finally {
+ vertx.close();
+ }
+ }
+}
| Vert.x should run base operations without requiring Jackson
In some scenario where Vert.x is embedded and used for base operation such as HTTP server and HTTP client, Jackson should not be required as long as there is no actual JSON operation such as encoding or decoding involved.
In this case, users can exclude Jackson from the classpath and have these operation still work fine.
| 2019-09-09T07:35:37Z | 3.8 |
|
eclipse-vertx/vert.x | 3,016 | eclipse-vertx__vert.x-3016 | [
"2982"
] | 9b85c1cc79d12b78587a41c9df8015ef32f2c014 | diff --git a/src/main/java/io/vertx/core/http/StreamResetException.java b/src/main/java/io/vertx/core/http/StreamResetException.java
--- a/src/main/java/io/vertx/core/http/StreamResetException.java
+++ b/src/main/java/io/vertx/core/http/StreamResetException.java
@@ -23,7 +23,7 @@ public class StreamResetException extends VertxException {
private final long code;
public StreamResetException(long code) {
- super("Stream reset: " + code);
+ super("Stream reset: " + code, true);
this.code = code;
}
diff --git a/src/main/java/io/vertx/core/http/impl/ConnectionManager.java b/src/main/java/io/vertx/core/http/impl/ConnectionManager.java
--- a/src/main/java/io/vertx/core/http/impl/ConnectionManager.java
+++ b/src/main/java/io/vertx/core/http/impl/ConnectionManager.java
@@ -148,21 +148,10 @@ void getConnection(ContextInternal ctx, SocketAddress peerAddress, boolean ssl,
}
if (endpoint.pool.getConnection(ar -> {
- if (ar.succeeded()) {
-
- HttpClientConnection conn = ar.result();
-
- if (metrics != null) {
- metrics.dequeueRequest(endpoint.metric, metric);
- }
-
- handler.handle(Future.succeededFuture(conn));
- } else {
- if (metrics != null) {
- metrics.dequeueRequest(endpoint.metric, metric);
- }
- handler.handle(Future.failedFuture(ar.cause()));
+ if (metrics != null) {
+ metrics.dequeueRequest(endpoint.metric, metric);
}
+ handler.handle(ar);
})) {
break;
}
diff --git a/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java b/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
@@ -191,11 +191,11 @@ private static class StreamImpl implements HttpClientStream {
private final Promise<HttpClientStream> fut;
private final InboundBuffer<Object> queue;
private HttpClientRequestImpl request;
+ private Handler<Void> continueHandler;
private HttpClientResponseImpl response;
private boolean requestEnded;
private boolean responseEnded;
private boolean reset;
- private MultiMap trailers;
private StreamImpl next;
private long bytesWritten;
private long bytesRead;
@@ -245,12 +245,13 @@ public Context getContext() {
}
@Override
- public void writeHead(HttpMethod method, String rawMethod, String uri, MultiMap headers, String hostHeader, boolean chunked, ByteBuf buf, boolean end, StreamPriority priority, Handler<AsyncResult<Void>> handler) {
+ public void writeHead(HttpMethod method, String rawMethod, String uri, MultiMap headers, String hostHeader, boolean chunked, ByteBuf buf, boolean end, StreamPriority priority, Handler<Void> contHandler, Handler<AsyncResult<Void>> handler) {
HttpRequest request = createRequest(method, rawMethod, uri, headers);
prepareRequestHeaders(request, hostHeader, chunked);
if (buf != null) {
bytesWritten += buf.readableBytes();
}
+ continueHandler = contHandler;
sendRequest(request, buf, end, handler);
if (conn.responseInProgress == null) {
conn.responseInProgress = this;
@@ -357,22 +358,26 @@ public void doFetch(long amount) {
}
@Override
- public void reset(long code) {
+ public void reset(Throwable cause) {
synchronized (conn) {
- if (!reset) {
- reset = true;
- if (conn.requestInProgress == this) {
- if (request == null) {
- conn.requestInProgress = null;
- conn.recycle();
- } else {
- conn.close();
- }
- } else if (!responseEnded) {
+ if (reset) {
+ return;
+ }
+ reset = true;
+ if (conn.requestInProgress == this) {
+ if (request == null) {
+ // Is that possible in practice ???
+ conn.handleRequestEnd(true);
+ } else {
conn.close();
}
+ } else if (!responseEnded) {
+ conn.close();
+ } else {
+ // ????
}
}
+ handleException(cause);
}
@Override
@@ -463,12 +468,12 @@ private HttpClientResponseImpl beginResponse(HttpResponse resp) {
}
}
}
- queue.handler(buf -> {
- if (buf == InboundBuffer.END_SENTINEL) {
+ queue.handler(item -> {
+ if (item instanceof MultiMap) {
conn.reportBytesRead(bytesRead);
- response.handleEnd(trailers);
+ response.handleEnd((MultiMap) item);
} else {
- response.handleChunk((Buffer) buf);
+ response.handleChunk((Buffer) item);
}
});
queue.drainHandler(v -> {
@@ -482,16 +487,10 @@ private HttpClientResponseImpl beginResponse(HttpResponse resp) {
private boolean endResponse(LastHttpContent trailer) {
synchronized (conn) {
if (conn.metrics != null) {
- HttpClientRequestBase req = request;
- if (req.exceptionOccurred != null) {
- conn.metrics.requestReset(metric);
- } else {
- conn.metrics.responseEnd(metric, response);
- }
+ conn.metrics.responseEnd(metric, response);
}
- trailers = new HeadersAdaptor(trailer.trailingHeaders());
}
- queue.write(InboundBuffer.END_SENTINEL);
+ queue.write(new HeadersAdaptor(trailer.trailingHeaders()));
synchronized (conn) {
responseEnded = true;
conn.close |= !conn.options.isKeepAlive();
@@ -512,7 +511,7 @@ void handleException(Throwable cause) {
requestEnded = this.requestEnded;
}
if (request != null) {
- if (response == null || response.statusCode() == 100) {
+ if (response == null) {
request.handleException(cause);
} else {
if (!requestEnded) {
@@ -582,27 +581,25 @@ private void handleHttpMessage(HttpObject obj) {
}
private void handleResponseBegin(HttpResponse resp) {
- StreamImpl stream;
- HttpClientResponseImpl response;
- HttpClientRequestImpl request;
- Exception err;
- synchronized (this) {
- stream = responseInProgress;
- request = stream.request;
- HttpClientResponseImpl r = null;
- Exception t = null;
- try {
- r = stream.beginResponse(resp);
- } catch (Exception e) {
- t = e;
- }
- response = r;
- err = t;
- }
- if (response != null) {
- request.handleResponse(response);
+ if (resp.status().code() == 100) {
+ Handler<Void> handler;
+ synchronized (this) {
+ StreamImpl stream = responseInProgress;
+ handler = stream.continueHandler;
+ }
+ if (handler != null) {
+ handler.handle(null);
+ }
} else {
- request.handleException(err);
+ StreamImpl stream;
+ HttpClientResponseImpl response;
+ HttpClientRequestImpl request;
+ synchronized (this) {
+ stream = responseInProgress;
+ request = stream.request;
+ response = stream.beginResponse(resp);
+ }
+ request.handleResponse(response);
}
}
@@ -622,8 +619,8 @@ private void handleResponseEnd(LastHttpContent trailer) {
StreamImpl stream;
synchronized (this) {
stream = responseInProgress;
- // We don't signal response end for a 100-continue response as a real response will follow
- if (stream.response.statusCode() == 100) {
+ if (stream.response == null) {
+ // 100-continue
return;
}
responseInProgress = stream.next;
@@ -640,7 +637,7 @@ private void handleRequestEnd(boolean recycle) {
requestInProgress = next;
}
if (recycle) {
- checkLifecycle();
+ recycle();
}
if (next != null) {
next.fut.complete(next);
diff --git a/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java b/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java
@@ -95,13 +95,12 @@ void onStreamClosed(Http2Stream nettyStream) {
super.onStreamClosed(nettyStream);
}
- void upgradeStream(HttpClientRequestImpl req, Object metric, Handler<AsyncResult<HttpClientStream>> completionHandler) {
+ void upgradeStream(Object metric, Handler<AsyncResult<HttpClientStream>> completionHandler) {
Future<HttpClientStream> fut;
synchronized (this) {
try {
Http2ClientStream stream = createStream(handler.connection().stream(1));
stream.metric = metric;
- stream.beginRequest(req);
fut = Future.succeededFuture(stream);
} catch (Exception e) {
fut = Future.failedFuture(e);
@@ -203,6 +202,7 @@ static class Http2ClientStream extends VertxHttp2Stream<Http2ClientConnection> i
private HttpClientRequestBase request;
private HttpClientResponseImpl response;
+ private Handler<Void> continueHandler;
private boolean requestEnded;
private boolean responseEnded;
private Object metric;
@@ -244,11 +244,7 @@ public Object metric() {
@Override
void handleEnd(MultiMap trailers) {
if (conn.metrics != null) {
- if (request.exceptionOccurred != null) {
- conn.metrics.requestReset(metric);
- } else {
- conn.metrics.responseEnd(metric, response);
- }
+ conn.metrics.responseEnd(metric, response);
}
responseEnded = true;
// Should use a shared immutable object for CaseInsensitiveHeaders ?
@@ -320,9 +316,10 @@ void handlePriorityChange(StreamPriority streamPriority) {
}
void handleHeaders(Http2Headers headers, StreamPriority streamPriority, boolean end) {
- if(streamPriority != null)
+ if(streamPriority != null) {
priority(streamPriority);
- if (response == null || response.statusCode() == 100) {
+ }
+ if (response == null) {
int status;
String statusMessage;
try {
@@ -333,9 +330,13 @@ void handleHeaders(Http2Headers headers, StreamPriority streamPriority, boolean
writeReset(0x01 /* PROTOCOL_ERROR */);
return;
}
-
+ if (status == 100) {
+ if (continueHandler != null) {
+ continueHandler.handle(null);
+ }
+ return;
+ }
headers.remove(":status");
-
response = new HttpClientResponseImpl(
request,
HttpVersion.HTTP_2,
@@ -376,7 +377,7 @@ Handler<HttpClientRequest> pushHandler() {
}
@Override
- public void writeHead(HttpMethod method, String rawMethod, String uri, MultiMap headers, String hostHeader, boolean chunked, ByteBuf content, boolean end, StreamPriority priority, Handler<AsyncResult<Void>> handler) {
+ public void writeHead(HttpMethod method, String rawMethod, String uri, MultiMap headers, String hostHeader, boolean chunked, ByteBuf content, boolean end, StreamPriority priority, Handler<Void> contHandler, Handler<AsyncResult<Void>> handler) {
Http2Headers h = new DefaultHttp2Headers();
h.method(method != HttpMethod.OTHER ? method.name() : rawMethod);
if (method == HttpMethod.CONNECT) {
@@ -399,6 +400,7 @@ public void writeHead(HttpMethod method, String rawMethod, String uri, MultiMap
if (conn.client.getOptions().isTryUseCompression() && h.get(HttpHeaderNames.ACCEPT_ENCODING) == null) {
h.set(HttpHeaderNames.ACCEPT_ENCODING, DEFLATE_GZIP);
}
+ continueHandler = contHandler;
if (conn.metrics != null) {
metric = conn.metrics.requestBegin(conn.queueMetric, conn.metric(), conn.localAddress(), conn.remoteAddress(), request);
}
@@ -458,11 +460,14 @@ public void endRequest() {
}
@Override
- public void reset(long code) {
+ public void reset(Throwable cause) {
+ long code = cause instanceof StreamResetException ? ((StreamResetException)cause).getCode() : 0;
if (request == null) {
+ // Not sure this is possible in practice
writeReset(code);
} else {
if (!(requestEnded && responseEnded)) {
+ handleException(cause);
requestEnded = true;
responseEnded = true;
writeReset(code);
diff --git a/src/main/java/io/vertx/core/http/impl/Http2UpgradedClientConnection.java b/src/main/java/io/vertx/core/http/impl/Http2UpgradedClientConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http2UpgradedClientConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2UpgradedClientConnection.java
@@ -104,6 +104,7 @@ public void writeHead(HttpMethod method,
ByteBuf buf,
boolean end,
StreamPriority priority,
+ Handler<Void> continueHandler,
Handler<AsyncResult<Void>> handler) {
ChannelPipeline pipeline = conn.channel().pipeline();
HttpClientCodec httpCodec = pipeline.get(HttpClientCodec.class);
@@ -138,10 +139,12 @@ public void upgradeTo(ChannelHandlerContext ctx, FullHttpResponse upgradeRespons
// Now we need to upgrade this to an HTTP2
ConnectionListener<HttpClientConnection> listener = conn.listener();
VertxHttp2ConnectionHandler<Http2ClientConnection> handler = Http2ClientConnection.createHttp2ConnectionHandler(client, conn.endpointMetric(), listener, conn.getContext(), current.metric(), (conn, concurrency) -> {
- conn.upgradeStream(request, stream.metric(), ar -> {
+ conn.upgradeStream(stream.metric(), ar -> {
UpgradingStream.this.conn.closeHandler(null);
UpgradingStream.this.conn.exceptionHandler(null);
if (ar.succeeded()) {
+ HttpClientStream upgradedStream = ar.result();
+ upgradedStream.beginRequest(request);
current = conn;
conn.closeHandler(closeHandler);
conn.exceptionHandler(exceptionHandler);
@@ -163,7 +166,7 @@ public void upgradeTo(ChannelHandlerContext ctx, FullHttpResponse upgradeRespons
HttpClientUpgradeHandler upgradeHandler = new HttpClientUpgradeHandler(httpCodec, upgradeCodec, 65536);
pipeline.addAfter("codec", null, new UpgradeRequestHandler());
pipeline.addAfter("codec", null, upgradeHandler);
- stream.writeHead(method, rawMethod, uri, headers, hostHeader, chunked, buf, end, priority, handler);
+ stream.writeHead(method, rawMethod, uri, headers, hostHeader, chunked, buf, end, priority, continueHandler, handler);
}
@Override
@@ -217,8 +220,8 @@ public void doFetch(long amount) {
}
@Override
- public void reset(long code) {
- stream.reset(code);
+ public void reset(Throwable cause) {
+ stream.reset(cause);
}
@Override
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientRequestBase.java b/src/main/java/io/vertx/core/http/impl/HttpClientRequestBase.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientRequestBase.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientRequestBase.java
@@ -11,15 +11,16 @@
package io.vertx.core.http.impl;
-import io.netty.handler.codec.http2.Http2CodecUtil;
+import io.vertx.core.Promise;
+import io.vertx.core.Future;
import io.vertx.core.Handler;
import io.vertx.core.http.HttpClientRequest;
import io.vertx.core.http.HttpClientResponse;
import io.vertx.core.http.HttpMethod;
+import io.vertx.core.http.StreamResetException;
import io.vertx.core.net.SocketAddress;
import io.vertx.core.logging.Logger;
import io.vertx.core.logging.LoggerFactory;
-import io.vertx.core.streams.ReadStream;
import java.util.concurrent.TimeoutException;
@@ -45,7 +46,7 @@ public abstract class HttpClientRequestBase implements HttpClientRequest {
private long lastDataReceived;
protected Throwable exceptionOccurred;
private boolean paused;
- private HttpClientResponseImpl response;
+ private HttpClientResponse response;
HttpClientRequestBase(HttpClientImpl client, boolean ssl, HttpMethod method, SocketAddress server, String host, int port, String uri) {
this.client = client;
@@ -59,8 +60,8 @@ public abstract class HttpClientRequestBase implements HttpClientRequest {
this.ssl = ssl;
}
- protected abstract void doHandleResponse(HttpClientResponseImpl resp, long timeoutMs);
- protected abstract void checkComplete();
+ protected void checkEnded() {
+ }
protected String hostHeader() {
if ((port == 80 && !ssl) || (port == 443 && ssl)) {
@@ -99,7 +100,7 @@ public HttpMethod method() {
@Override
public synchronized HttpClientRequest exceptionHandler(Handler<Throwable> handler) {
if (handler != null) {
- checkComplete();
+ checkEnded();
this.exceptionHandler = handler;
} else {
this.exceptionHandler = null;
@@ -113,7 +114,7 @@ synchronized Handler<Throwable> exceptionHandler() {
@Override
public synchronized HttpClientRequest setTimeout(long timeoutMs) {
- cancelOutstandingTimeoutTimer();
+ cancelTimeout();
currentTimeoutMs = timeoutMs;
currentTimeoutTimerId = client.getVertx().setTimer(timeoutMs, id -> handleTimeout(timeoutMs));
return this;
@@ -122,7 +123,7 @@ public synchronized HttpClientRequest setTimeout(long timeoutMs) {
public void handleException(Throwable t) {
Handler<Throwable> handler;
synchronized (this) {
- cancelOutstandingTimeoutTimer();
+ cancelTimeout();
exceptionOccurred = t;
if (exceptionHandler != null) {
handler = exceptionHandler;
@@ -133,7 +134,7 @@ public void handleException(Throwable t) {
handler.handle(t);
}
- void handleResponse(HttpClientResponseImpl resp) {
+ void handleResponse(HttpClientResponse resp) {
synchronized (this) {
response = resp;
}
@@ -141,38 +142,30 @@ void handleResponse(HttpClientResponseImpl resp) {
}
private void checkHandleResponse() {
- HttpClientResponseImpl resp;
+ long timeoutMS;
+ HttpClientResponse resp;
synchronized (this) {
if (response != null) {
if (paused) {
return;
}
+ timeoutMS = cancelTimeout();
resp = response;
response = null;
} else {
return;
}
}
- doHandleResponse(resp);
- }
-
- private synchronized void doHandleResponse(HttpClientResponseImpl resp) {
- long timeoutMS;
- synchronized (this) {
- // If an exception occurred (e.g. a timeout fired) we won't receive the response.
- if (exceptionOccurred != null) {
- return;
- }
- timeoutMS = cancelOutstandingTimeoutTimer();
- }
try {
- doHandleResponse(resp, timeoutMS);
+ handleResponse(resp, timeoutMS);
} catch (Throwable t) {
handleException(t);
}
}
- private long cancelOutstandingTimeoutTimer() {
+ abstract void handleResponse(HttpClientResponse resp, long timeoutMs);
+
+ private synchronized long cancelTimeout() {
long ret;
if ((ret = currentTimeoutTimerId) != -1) {
client.getVertx().cancelTimer(currentTimeoutTimerId);
@@ -184,31 +177,25 @@ private long cancelOutstandingTimeoutTimer() {
}
private void handleTimeout(long timeoutMs) {
- if (lastDataReceived == 0) {
- timeout(timeoutMs);
- } else {
- long now = System.currentTimeMillis();
- long timeSinceLastData = now - lastDataReceived;
- if (timeSinceLastData >= timeoutMs) {
- timeout(timeoutMs);
- } else {
- // reschedule
- lastDataReceived = 0;
- setTimeout(timeoutMs - timeSinceLastData);
+ synchronized (this) {
+ if (lastDataReceived > 0) {
+ long now = System.currentTimeMillis();
+ long timeSinceLastData = now - lastDataReceived;
+ if (timeSinceLastData < timeoutMs) {
+ // reschedule
+ lastDataReceived = 0;
+ setTimeout(timeoutMs - timeSinceLastData);
+ return;
+ }
}
}
- }
-
- private void timeout(long timeoutMs) {
String msg = "The timeout period of " + timeoutMs + "ms has been exceeded while executing " + method + " " + uri + " for server " + server;
- // Use a stack-less exception
- handleException(new TimeoutException(msg) {
+ reset(new TimeoutException(msg) {
@Override
public synchronized Throwable fillInStackTrace() {
return this;
}
});
- reset(0);
}
synchronized void dataReceived() {
@@ -217,6 +204,13 @@ synchronized void dataReceived() {
}
}
+ @Override
+ public boolean reset(long code) {
+ return reset(new StreamResetException(code));
+ }
+
+ abstract boolean reset(Throwable cause);
+
@Override
public HttpClientRequest pause() {
paused = true;
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientRequestImpl.java b/src/main/java/io/vertx/core/http/impl/HttpClientRequestImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientRequestImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientRequestImpl.java
@@ -53,8 +53,6 @@ public class HttpClientRequestImpl extends HttpClientRequestBase implements Http
static final Logger log = LoggerFactory.getLogger(HttpClientRequestImpl.class);
private final VertxInternal vertx;
- private Handler<HttpClientResponse> respHandler;
- private Handler<Void> endHandler;
private boolean chunked;
private String hostHeader;
private String rawMethod;
@@ -62,9 +60,11 @@ public class HttpClientRequestImpl extends HttpClientRequestBase implements Http
private Handler<Void> drainHandler;
private Handler<HttpClientRequest> pushHandler;
private Handler<HttpConnection> connectionHandler;
- private boolean completed;
- private Handler<Void> completionHandler;
- private Long reset;
+ private Handler<Throwable> exceptionHandler;
+ private Promise<Void> endPromise = Promise.promise();
+ private Future<Void> endFuture = endPromise.future();
+ private boolean ended;
+ private Throwable reset;
private ByteBuf pendingChunks;
private List<Handler<AsyncResult<Void>>> pendingHandlers;
private int pendingMaxSize = -1;
@@ -73,8 +73,8 @@ public class HttpClientRequestImpl extends HttpClientRequestBase implements Http
private StreamPriority priority;
private HttpClientStream stream;
private boolean connecting;
-
- // completed => drainHandler = null
+ private Handler<HttpClientResponse> respHandler;
+ private Handler<Void> endHandler;
HttpClientRequestImpl(HttpClientImpl client, boolean ssl, HttpMethod method, SocketAddress server,
String host, int port,
@@ -86,20 +86,14 @@ public class HttpClientRequestImpl extends HttpClientRequestBase implements Http
}
@Override
- public int streamId() {
- HttpClientStream s;
- synchronized (this) {
- if ((s = stream) == null) {
- return -1;
- }
- }
- return s.id();
+ public synchronized int streamId() {
+ return stream == null ? -1 : stream.id();
}
@Override
public synchronized HttpClientRequest handler(Handler<HttpClientResponse> handler) {
if (handler != null) {
- checkComplete();
+ checkEnded();
}
respHandler = handler;
return this;
@@ -108,7 +102,7 @@ public synchronized HttpClientRequest handler(Handler<HttpClientResponse> handle
@Override
public HttpClientRequest setFollowRedirects(boolean followRedirects) {
synchronized (this) {
- checkComplete();
+ checkEnded();
if (followRedirects) {
this.followRedirects = client.getOptions().getMaxRedirects() - 1;
} else {
@@ -122,7 +116,7 @@ public HttpClientRequest setFollowRedirects(boolean followRedirects) {
public HttpClientRequest endHandler(Handler<Void> handler) {
synchronized (this) {
if (handler != null) {
- checkComplete();
+ checkEnded();
}
endHandler = handler;
return this;
@@ -132,7 +126,7 @@ public HttpClientRequest endHandler(Handler<Void> handler) {
@Override
public HttpClientRequestImpl setChunked(boolean chunked) {
synchronized (this) {
- checkComplete();
+ checkEnded();
if (stream != null) {
throw new IllegalStateException("Cannot set chunked after data has been written on request");
}
@@ -181,14 +175,14 @@ public synchronized MultiMap headers() {
@Override
public synchronized HttpClientRequest putHeader(String name, String value) {
- checkComplete();
+ checkEnded();
headers().set(name, value);
return this;
}
@Override
public synchronized HttpClientRequest putHeader(String name, Iterable<String> values) {
- checkComplete();
+ checkEnded();
headers().set(name, values);
return this;
}
@@ -197,7 +191,7 @@ public synchronized HttpClientRequest putHeader(String name, Iterable<String> va
public HttpClientRequest setWriteQueueMaxSize(int maxSize) {
HttpClientStream s;
synchronized (this) {
- checkComplete();
+ checkEnded();
if ((s = stream) == null) {
pendingMaxSize = maxSize;
return this;
@@ -211,7 +205,7 @@ public HttpClientRequest setWriteQueueMaxSize(int maxSize) {
public boolean writeQueueFull() {
HttpClientStream s;
synchronized (this) {
- checkComplete();
+ checkEnded();
if ((s = stream) == null) {
// Should actually check with max queue size and not always blindly return false
return false;
@@ -224,7 +218,7 @@ public boolean writeQueueFull() {
public HttpClientRequest drainHandler(Handler<Void> handler) {
synchronized (this) {
if (handler != null) {
- checkComplete();
+ checkEnded();
drainHandler = handler;
HttpClientStream s;
if ((s = stream) == null) {
@@ -247,7 +241,7 @@ public HttpClientRequest drainHandler(Handler<Void> handler) {
@Override
public synchronized HttpClientRequest continueHandler(Handler<Void> handler) {
if (handler != null) {
- checkComplete();
+ checkEnded();
}
this.continueHandler = handler;
return this;
@@ -260,7 +254,7 @@ public HttpClientRequest sendHead() {
@Override
public synchronized HttpClientRequest sendHead(Handler<HttpVersion> headersHandler) {
- checkComplete();
+ checkEnded();
checkResponseHandler();
if (stream != null) {
throw new IllegalStateException("Head already written");
@@ -272,14 +266,14 @@ public synchronized HttpClientRequest sendHead(Handler<HttpVersion> headersHandl
@Override
public synchronized HttpClientRequest putHeader(CharSequence name, CharSequence value) {
- checkComplete();
+ checkEnded();
headers().set(name, value);
return this;
}
@Override
public synchronized HttpClientRequest putHeader(CharSequence name, Iterable<CharSequence> values) {
- checkComplete();
+ checkEnded();
headers().set(name, values);
return this;
}
@@ -291,34 +285,25 @@ public synchronized HttpClientRequest pushHandler(Handler<HttpClientRequest> han
}
@Override
- public boolean reset(long code) {
+ boolean reset(Throwable cause) {
HttpClientStream s;
synchronized (this) {
if (reset != null) {
return false;
}
- reset = code;
- if (tryComplete()) {
- if (completionHandler != null) {
- completionHandler.handle(null);
- }
- }
+ reset = cause;
s = stream;
}
if (s != null) {
- s.reset(code);
+ s.reset(cause);
+ } else {
+ handleException(cause);
}
return true;
}
- private boolean tryComplete() {
- if (!completed) {
- completed = true;
- drainHandler = null;
- return true;
- } else {
- return false;
- }
+ private void tryComplete() {
+ endPromise.tryComplete();
}
@Override
@@ -342,7 +327,7 @@ public synchronized HttpClientRequest connectionHandler(@Nullable Handler<HttpCo
public synchronized HttpClientRequest writeCustomFrame(int type, int flags, Buffer payload) {
HttpClientStream s;
synchronized (this) {
- checkComplete();
+ checkEnded();
if ((s = stream) == null) {
throw new IllegalStateException("Not yet connected");
}
@@ -354,7 +339,7 @@ public synchronized HttpClientRequest writeCustomFrame(int type, int flags, Buff
void handleDrained() {
Handler<Void> handler;
synchronized (this) {
- if ((handler = drainHandler) == null) {
+ if ((handler = drainHandler) == null || endFuture.isComplete()) {
return;
}
}
@@ -378,9 +363,7 @@ private void handleNextRequest(HttpClientRequestImpl next, long timeoutMs) {
if (headers != null && next.headers == null) {
next.headers().addAll(headers);
}
- Promise<Void> promise = Promise.promise();
- Future<Void> future = promise.future();
- future.setHandler(ar -> {
+ endFuture.setHandler(ar -> {
if (ar.succeeded()) {
if (timeoutMs > 0) {
next.setTimeout(timeoutMs);
@@ -390,26 +373,15 @@ private void handleNextRequest(HttpClientRequestImpl next, long timeoutMs) {
next.handleException(ar.cause());
}
});
- if (exceptionOccurred != null) {
- promise.fail(exceptionOccurred);
- }
- else if (completed) {
- promise.complete();
- } else {
- exceptionHandler(err -> {
- if (!future.isComplete()) {
- promise.fail(err);
- }
- });
- completionHandler = v -> {
- if (!future.isComplete()) {
- promise.complete();
- }
- };
- }
}
- protected void doHandleResponse(HttpClientResponseImpl resp, long timeoutMs) {
+ @Override
+ public void handleException(Throwable t) {
+ super.handleException(t);
+ endPromise.tryFail(t);
+ }
+
+ void handleResponse(HttpClientResponse resp, long timeoutMs) {
if (reset == null) {
int statusCode = resp.statusCode();
if (followRedirects > 0 && statusCode >= 300 && statusCode < 400) {
@@ -425,17 +397,11 @@ protected void doHandleResponse(HttpClientResponseImpl resp, long timeoutMs) {
return;
}
}
- if (statusCode == 100) {
- if (continueHandler != null) {
- continueHandler.handle(null);
- }
- } else {
- if (respHandler != null) {
- respHandler.handle(resp);
- }
- if (endHandler != null) {
- endHandler.handle(null);
- }
+ if (respHandler != null) {
+ respHandler.handle(resp);
+ }
+ if (endHandler != null) {
+ endHandler.handle(null);
}
}
}
@@ -499,8 +465,8 @@ private synchronized void connect(Handler<HttpVersion> headersHandler) {
}
// No need to synchronize as the thread is the same that set exceptionOccurred to true
// exceptionOccurred=true getting the connection => it's a TimeoutException
- if (exceptionOccurred != null || reset != null) {
- stream.reset(0);
+ if (reset != null) {
+ stream.reset(reset);
} else {
ctx.executeFromIO(v -> {
connected(headersHandler, stream);
@@ -527,35 +493,25 @@ private void connected(Handler<HttpVersion> headersHandler, HttpClientStream str
stream.doSetWriteQueueMaxSize(pendingMaxSize);
}
+ ByteBuf pending = null;
+ Handler<AsyncResult<Void>> handler = null;
if (pendingChunks != null) {
List<Handler<AsyncResult<Void>>> handlers = pendingHandlers;
- ByteBuf pending = pendingChunks;
- pendingChunks = null;
pendingHandlers = null;
- Handler<AsyncResult<Void>> handler;
+ pending = pendingChunks;
+ pendingChunks = null;
if (handlers != null) {
handler = ar -> {
handlers.forEach(h -> h.handle(ar));
};
- } else {
- handler = null;
- }
- if (completed) {
- // we also need to write the head so optimize this and write all out in once
- stream.writeHead(method, rawMethod, uri, headers, hostHeader(), chunked, pending, true, priority, handler);
- stream.endRequest();
- } else {
- stream.writeHead(method, rawMethod, uri, headers, hostHeader(), chunked, pending, false, priority, handler);
- }
- } else {
- if (completed) {
- // we also need to write the head so optimize this and write all out in once
- stream.writeHead(method, rawMethod, uri, headers, hostHeader(), chunked, null, true, priority, null);
- stream.endRequest();
- } else {
- stream.writeHead(method, rawMethod, uri, headers, hostHeader(), chunked, null, false, priority, null);
}
}
+ stream.writeHead(method, rawMethod, uri, headers, hostHeader(), chunked, pending, ended, priority, continueHandler, handler);
+ if (ended) {
+ // we also need to write the head so optimize this and write all out in once
+ stream.endRequest();
+ tryComplete();
+ }
this.connecting = false;
this.stream = stream;
}
@@ -564,10 +520,6 @@ private void connected(Handler<HttpVersion> headersHandler, HttpClientStream str
}
}
- private boolean contentLengthSet() {
- return headers != null && headers().contains(CONTENT_LENGTH);
- }
-
@Override
public void end(String chunk) {
end(chunk, (Handler<AsyncResult<Void>>) null);
@@ -644,26 +596,28 @@ public HttpClientRequest write(String chunk, String enc, Handler<AsyncResult<Voi
return this;
}
- private void write(ByteBuf buff, boolean end, Handler<AsyncResult<Void>> h) {
+ private boolean requiresContentLength() {
+ return !chunked && (headers == null || !headers.contains(CONTENT_LENGTH));
+ }
+
+ private void write(ByteBuf buff, boolean end, Handler<AsyncResult<Void>> completionHandler) {
+ if (buff == null && !end) {
+ return;
+ }
HttpClientStream s;
synchronized (this) {
- checkComplete();
+ checkEnded();
checkResponseHandler();
if (end) {
- if (buff != null && !chunked && !contentLengthSet()) {
+ if (buff != null && requiresContentLength()) {
headers().set(CONTENT_LENGTH, String.valueOf(buff.readableBytes()));
}
- } else {
- if (!chunked && !contentLengthSet()) {
- throw new IllegalStateException("You must set the Content-Length header to be the total size of the message "
- + "body BEFORE sending any data if you are not using HTTP chunked encoding.");
- }
- }
- if (buff == null && !end) {
- // nothing to write to the connection just return
- return;
+ } else if (requiresContentLength()) {
+ throw new IllegalStateException("You must set the Content-Length header to be the total size of the message "
+ + "body BEFORE sending any data if you are not using HTTP chunked encoding.");
}
- if ((s = stream) == null) {
+ ended |= end;
+ if (stream == null) {
if (buff != null) {
if (pendingChunks == null) {
pendingChunks = buff;
@@ -678,39 +632,27 @@ private void write(ByteBuf buff, boolean end, Handler<AsyncResult<Void>> h) {
}
pending.addComponent(true, buff);
}
- if (h != null) {
+ if (completionHandler != null) {
if (pendingHandlers == null) {
pendingHandlers = new ArrayList<>();
}
- pendingHandlers.add(h);
- }
- }
- if (end) {
- tryComplete();
- if (completionHandler != null) {
- completionHandler.handle(null);
+ pendingHandlers.add(completionHandler);
}
}
connect(null);
return;
}
+ s = stream;
}
- s.writeBuffer(buff, end, h);
+ s.writeBuffer(buff, end, completionHandler);
if (end) {
- Handler<Void> handler;
- synchronized (this) {
- tryComplete();
- s.endRequest();
- if ((handler = completionHandler) == null) {
- return;
- }
- }
- handler.handle(null);
+ s.endRequest();
+ tryComplete();
}
}
- protected void checkComplete() {
- if (completed) {
+ protected void checkEnded() {
+ if (ended) {
throw new IllegalStateException("Request already complete");
}
}
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientRequestPushPromise.java b/src/main/java/io/vertx/core/http/impl/HttpClientRequestPushPromise.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientRequestPushPromise.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientRequestPushPromise.java
@@ -54,7 +54,7 @@ Http2ClientConnection.Http2ClientStream getStream() {
}
@Override
- protected void doHandleResponse(HttpClientResponseImpl resp, long timeoutMs) {
+ void handleResponse(HttpClientResponse resp, long timeoutMs) {
Handler<HttpClientResponse> handler;
synchronized (this) {
if ((handler = respHandler) == null) {
@@ -64,10 +64,6 @@ protected void doHandleResponse(HttpClientResponseImpl resp, long timeoutMs) {
handler.handle(resp);
}
- @Override
- protected void checkComplete() {
- }
-
@Override
public synchronized HttpClientRequest handler(Handler<HttpClientResponse> handler) {
respHandler = handler;
@@ -85,11 +81,9 @@ public HttpClientRequest connectionHandler(@Nullable Handler<HttpConnection> han
}
@Override
- public boolean reset(long code) {
- synchronized (conn) {
- stream.reset(code);
- return true;
- }
+ boolean reset(Throwable cause) {
+ stream.reset(cause);
+ return true;
}
@Override
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientResponseImpl.java b/src/main/java/io/vertx/core/http/impl/HttpClientResponseImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientResponseImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientResponseImpl.java
@@ -92,12 +92,12 @@ public MultiMap headers() {
@Override
public String getHeader(String headerName) {
- return headers().get(headerName);
+ return headers.get(headerName);
}
@Override
public String getHeader(CharSequence headerName) {
- return headers().get(headerName);
+ return headers.get(headerName);
}
@Override
@@ -136,12 +136,12 @@ private void checkEnded() {
}
@Override
- public HttpClientResponse handler(Handler<Buffer> handle) {
+ public HttpClientResponse handler(Handler<Buffer> handler) {
synchronized (conn) {
- if (handle != null) {
+ if (handler != null) {
checkEnded();
}
- dataHandler = handle;
+ dataHandler = handler;
return this;
}
}
@@ -222,9 +222,9 @@ void handleUnknownFrame(HttpFrame frame) {
}
void handleChunk(Buffer data) {
+ request.dataReceived();
Handler<Buffer> handler;
synchronized (conn) {
- request.dataReceived();
handler = dataHandler;
}
if (handler != null) {
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientStream.java b/src/main/java/io/vertx/core/http/impl/HttpClientStream.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientStream.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientStream.java
@@ -43,7 +43,7 @@ interface HttpClientStream {
HttpConnection connection();
Context getContext();
- void writeHead(HttpMethod method, String rawMethod, String uri, MultiMap headers, String hostHeader, boolean chunked, ByteBuf buf, boolean end, StreamPriority priority, Handler<AsyncResult<Void>> handler);
+ void writeHead(HttpMethod method, String rawMethod, String uri, MultiMap headers, String hostHeader, boolean chunked, ByteBuf buf, boolean end, StreamPriority priority, Handler<Void> contHandler, Handler<AsyncResult<Void>> handler);
void writeBuffer(ByteBuf buf, boolean end, Handler<AsyncResult<Void>> handler);
void writeFrame(int type, int flags, ByteBuf payload);
@@ -52,7 +52,7 @@ interface HttpClientStream {
void doPause();
void doFetch(long amount);
- void reset(long code);
+ void reset(Throwable cause);
void beginRequest(HttpClientRequestImpl req);
void endRequest();
| diff --git a/src/test/java/io/vertx/core/http/Http1xTest.java b/src/test/java/io/vertx/core/http/Http1xTest.java
--- a/src/test/java/io/vertx/core/http/Http1xTest.java
+++ b/src/test/java/io/vertx/core/http/Http1xTest.java
@@ -2883,12 +2883,8 @@ public void testRecyclePipelinedConnection() throws Exception {
client = vertx.createHttpClient(createBaseClientOptions().setMaxPoolSize(1).setPipelining(true).setKeepAlive(true));
AtomicInteger connCount = new AtomicInteger();
client.connectionHandler(conn -> connCount.incrementAndGet());
- HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/first", resp -> {
- fail();
- });
- // Force connect
- req.sendHead(v -> {});
- req.reset();
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/first", resp -> fail());
+ req.reset(0);
CountDownLatch respLatch = new CountDownLatch(2);
client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/second", resp -> {
assertEquals(200, resp.statusCode());
@@ -3058,9 +3054,7 @@ private void testResetClientRequestNotYetSent(boolean keepAlive, boolean pipelin
// There might be a race between the request write and the request reset
// so we do it on the context thread to avoid it
vertx.runOnContext(v -> {
- HttpClientRequest post = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
- fail();
- });
+ HttpClientRequest post = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> fail());
post.setChunked(true).write(TestUtils.randomBuffer(1024));
assertTrue(post.reset());
client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
@@ -4475,12 +4469,13 @@ public void testHttpClientRequestShouldCallExceptionHandlerWhenTheClosedHandlerI
}).setChunked(true);
CheckingSender sender = new CheckingSender(vertx.getOrCreateContext(), req);
AtomicBoolean connected = new AtomicBoolean();
+ AtomicBoolean done = new AtomicBoolean();
req.exceptionHandler(err -> {
assertTrue(connected.get());
Throwable failure = sender.close();
if (failure != null) {
fail(failure);
- } else if (err == ConnectionBase.CLOSED_EXCEPTION) {
+ } else if (done.compareAndSet(false, true)) {
testComplete();
}
});
diff --git a/src/test/java/io/vertx/core/http/Http2ClientTest.java b/src/test/java/io/vertx/core/http/Http2ClientTest.java
--- a/src/test/java/io/vertx/core/http/Http2ClientTest.java
+++ b/src/test/java/io/vertx/core/http/Http2ClientTest.java
@@ -700,6 +700,7 @@ public void testServerResetClientStreamDuringResponse() throws Exception {
@Test
public void testClientResetServerStreamDuringRequest() throws Exception {
+ waitFor(2);
Promise<Void> bufReceived = Promise.promise();
server.requestHandler(req -> {
req.handler(buf -> {
@@ -719,13 +720,14 @@ public void testClientResetServerStreamDuringRequest() throws Exception {
});
req.response().closeHandler(v -> {
assertEquals(10L, reset.get());
- testComplete();
+ complete();
});
});
startServer();
- HttpClientRequest req = client.get(DEFAULT_HTTPS_PORT, DEFAULT_HTTPS_HOST, "/somepath", resp -> {
- fail();
- }).setChunked(true).write(Buffer.buffer("hello"));
+ HttpClientRequest req = client.get(DEFAULT_HTTPS_PORT, DEFAULT_HTTPS_HOST, "/somepath", resp -> fail())
+ .exceptionHandler(err -> complete())
+ .setChunked(true);
+ req.write(Buffer.buffer("hello"));
bufReceived.future().setHandler(ar -> {
req.reset(10);
});
@@ -751,13 +753,12 @@ public void testClientResetServerStreamDuringResponse() throws Exception {
req.response().setChunked(true).write(Buffer.buffer("some-data"));
});
startServer();
- HttpClientRequest req = client.get(DEFAULT_HTTPS_PORT, DEFAULT_HTTPS_HOST, "/somepath");
- req.handler(resp -> {
- resp.exceptionHandler(this::fail);
- req.reset(10);
- assertIllegalStateException(() -> req.write(Buffer.buffer()));
- assertIllegalStateException(req::end);
- }).end(Buffer.buffer("hello"));
+ client.get(DEFAULT_HTTPS_PORT, DEFAULT_HTTPS_HOST, "/somepath",
+ resp -> {
+ resp.request().reset(10);
+ assertIllegalStateException(() -> resp.request().write(Buffer.buffer()));
+ assertIllegalStateException(resp.request()::end);
+ }).end(Buffer.buffer("hello"));
await();
}
diff --git a/src/test/java/io/vertx/core/http/Http2Test.java b/src/test/java/io/vertx/core/http/Http2Test.java
--- a/src/test/java/io/vertx/core/http/Http2Test.java
+++ b/src/test/java/io/vertx/core/http/Http2Test.java
@@ -247,28 +247,24 @@ public void testClientStreamPausedWhenConnectionIsPaused() throws Exception {
@Test
public void testResetClientRequestNotYetSent() throws Exception {
- waitFor(2);
server.close();
server = vertx.createHttpServer(createBaseServerOptions().setInitialSettings(new Http2Settings().setMaxConcurrentStreams(1)));
AtomicInteger numReq = new AtomicInteger();
server.requestHandler(req -> {
- assertEquals(0, numReq.getAndIncrement());
- req.response().end();
- complete();
+ fail();
});
startServer(testAddress);
// There might be a race between the request write and the request reset
// so we do it on the context thread to avoid it
vertx.runOnContext(v -> {
- HttpClientRequest post = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
- fail();
+ HttpClientRequest post = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> fail());
+ post.exceptionHandler(err -> {
+ if (err instanceof StreamResetException) {
+ complete();
+ }
});
post.setChunked(true).write(TestUtils.randomBuffer(1024));
assertTrue(post.reset());
- client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
- assertEquals(1, numReq.get());
- complete();
- }).end();
});
await();
}
diff --git a/src/test/java/io/vertx/core/http/HttpTest.java b/src/test/java/io/vertx/core/http/HttpTest.java
--- a/src/test/java/io/vertx/core/http/HttpTest.java
+++ b/src/test/java/io/vertx/core/http/HttpTest.java
@@ -1255,11 +1255,14 @@ public void testConnectWithoutResponseHandler() throws Exception {
}
@Test
- public void testClientExceptionHandlerCalledWhenFailingToConnect() throws Exception {
- client.request(HttpMethod.GET, testAddress, 9998, "255.255.255.255", DEFAULT_TEST_URI, resp -> fail("Connect should not be called")).
- exceptionHandler(error -> testComplete()).
- endHandler(done -> fail()).
- end();
+ public void testClientExceptionHandlerCalledWhenFailingToConnect() {
+ waitFor(1);
+ client.request(HttpMethod.GET, testAddress, 9998, "255.255.255.255", DEFAULT_TEST_URI, resp -> {
+ fail();
+ }).exceptionHandler(error -> {
+ complete();
+ })
+ .end();
await();
}
@@ -1446,6 +1449,27 @@ public void testClientResponseExceptionHandlerCalledWhenConnectionClosed() throw
await();
}
+ @Test
+ public void testClientRequestExceptionHandlerCalledWhenRequestEnded() throws Exception {
+ waitFor(2);
+ server.requestHandler(req -> {
+ req.connection().close();
+ });
+ startServer(testAddress);
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp ->
+ fail()
+ );
+ req.exceptionHandler(err -> complete());
+ req.end();
+ try {
+ req.exceptionHandler(err -> fail());
+ fail();
+ } catch (Exception e) {
+ complete();
+ }
+ await();
+ }
+
@Test
public void testDefaultStatus() {
testStatusCode(-1, null);
@@ -2326,19 +2350,17 @@ public void testHttpClientRequestTimeoutResetsTheConnection() throws Exception {
@Test
public void testConnectInvalidPort() {
- client.request(HttpMethod.GET, 9998, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> fail("Connect should not be called")).
- exceptionHandler(t -> testComplete()).
- end();
-
+ client.request(HttpMethod.GET, 9998, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> fail())
+ .exceptionHandler(t -> complete())
+ .end();
await();
}
@Test
public void testConnectInvalidHost() {
- client.request(HttpMethod.GET, 9998, "255.255.255.255", DEFAULT_TEST_URI, resp -> fail("Connect should not be called")).
- exceptionHandler(t -> testComplete()).
- end();
-
+ client.request(HttpMethod.GET, 9998, "255.255.255.255", DEFAULT_TEST_URI, resp -> fail())
+ .exceptionHandler(t -> complete())
+ .end();
await();
}
@@ -3217,6 +3239,7 @@ public void testResponseBodyWriteFixedString() {
@Test
public void testResponseDataTimeout() {
+ waitFor(2);
Buffer expected = TestUtils.randomBuffer(1000);
server.requestHandler(req -> {
req.response().setChunked(true).write(expected);
@@ -3224,6 +3247,14 @@ public void testResponseDataTimeout() {
server.listen(testAddress, onSuccess(s -> {
Buffer received = Buffer.buffer();
HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ AtomicInteger count = new AtomicInteger();
+ resp.exceptionHandler(t -> {
+ if (count.getAndIncrement() == 0) {
+ assertTrue(t instanceof TimeoutException);
+ assertEquals(expected, received);
+ complete();
+ }
+ });
resp.request().setTimeout(500);
resp.handler(received::appendBuffer);
});
@@ -3232,7 +3263,7 @@ public void testResponseDataTimeout() {
if (count.getAndIncrement() == 0) {
assertTrue(t instanceof TimeoutException);
assertEquals(expected, received);
- testComplete();
+ complete();
}
});
req.sendHead();
@@ -5261,26 +5292,90 @@ public void testClientResponseWriteFailure() throws Exception {
await();
}
- /*
@Test
- public void testReset() throws Exception {
- CountDownLatch latch = new CountDownLatch(1);
+ public void testResetClientRequestBeforeActualSend() throws Exception {
server.requestHandler(req -> {
+ });
+ startServer(testAddress);
+ Context ctx = vertx.getOrCreateContext();
+ ctx.runOnContext(v -> {
+ HttpClientRequest req = client.request(
+ HttpMethod.GET,
+ testAddress,
+ new RequestOptions()
+ .setPort(DEFAULT_HTTP_PORT)
+ .setHost(DEFAULT_HTTP_HOST)
+ .setURI(DEFAULT_TEST_URI), resp -> {
+ fail();
+ });
req.exceptionHandler(err -> {
- System.out.println("GOT ERR");
+ if (err instanceof StreamResetException) {
+ assertTrue(err instanceof StreamResetException);
+ testComplete();
+ }
});
- req.endHandler(v -> {
- System.out.println("GOT END");
- latch.countDown();
+ req.sendHead(version -> fail());
+ req.reset();
+ });
+ await();
+ }
+
+ @Test
+ public void testResetClientRequestInProgress() throws Exception {
+ waitFor(1);
+ server.requestHandler(req -> {
+ });
+ startServer(testAddress);
+ Context ctx = vertx.getOrCreateContext();
+ ctx.runOnContext(v -> {
+ HttpClientRequest req = client.request(
+ HttpMethod.GET,
+ testAddress,
+ new RequestOptions()
+ .setPort(DEFAULT_HTTP_PORT)
+ .setHost(DEFAULT_HTTP_HOST)
+ .setURI(DEFAULT_TEST_URI), resp -> fail());
+ req.exceptionHandler(err -> {
+ if (err instanceof StreamResetException) {
+ assertTrue(err instanceof StreamResetException);
+ complete();
+ }
+ });
+ req.sendHead(version -> {
+ req.reset(0);
});
});
- startServer();
- HttpClientRequest req = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {});
- req.end();
- awaitLatch(latch);
- req.reset();
+ await();
+ }
+ @Test
+ public void testResetClientRequestAwaitingResponse() throws Exception {
+ CompletableFuture<Void> fut = new CompletableFuture<>();
+ server.requestHandler(req -> {
+ fut.complete(null);
+ });
+ startServer(testAddress);
+ Context ctx = vertx.getOrCreateContext();
+ ctx.runOnContext(v -> {
+ HttpClientRequest req = client.request(
+ HttpMethod.GET,
+ testAddress,
+ new RequestOptions()
+ .setPort(DEFAULT_HTTP_PORT)
+ .setHost(DEFAULT_HTTP_HOST)
+ .setURI(DEFAULT_TEST_URI), resp -> fail());
+ req.exceptionHandler(err -> {
+ if (err instanceof StreamResetException) {
+ testComplete();
+ }
+ });
+ req.end();
+ fut.thenAccept(v2 -> {
+ ctx.runOnContext(v3 -> {
+ req.reset(0);
+ });
+ });
+ });
await();
}
-*/
}
diff --git a/src/test/java/io/vertx/test/core/TestUtils.java b/src/test/java/io/vertx/test/core/TestUtils.java
--- a/src/test/java/io/vertx/test/core/TestUtils.java
+++ b/src/test/java/io/vertx/test/core/TestUtils.java
@@ -15,6 +15,7 @@
import io.netty.handler.codec.http2.Http2CodecUtil;
import io.netty.util.NetUtil;
import io.netty.util.internal.logging.InternalLoggerFactory;
+import io.vertx.core.Future;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.http.Http2Settings;
import io.vertx.core.net.*;
@@ -30,6 +31,7 @@
import java.util.List;
import java.util.Random;
import java.util.Set;
+import java.util.function.Supplier;
import java.util.zip.GZIPOutputStream;
import static org.junit.Assert.assertTrue;
| HTTP Client - Deadlock
Sometimes, it appears that some eventloops are blocked while doing some http client relative stuff (Vert.x 3.7.0)
Here is a threaddump:
```
"vert.x-eventloop-thread-0" #10 prio=5 os_prio=0 tid=0x00007f55410b0000 nid=0x855f waiting for monitor entry [0x00007f54fb771000]
java.lang.Thread.State: BLOCKED (on object monitor)
at io.vertx.core.http.impl.HttpClientRequestImpl.handleDrained(HttpClientRequestImpl.java:352)
- waiting to lock <0x00000000c5b865e0> (a io.vertx.core.http.impl.HttpClientRequestImpl)
at io.vertx.core.http.impl.Http1xClientConnection.handleInterestedOpsChanged(Http1xClientConnection.java:847)
- locked <0x00000000c541b4b8> (a io.vertx.core.http.impl.Http1xClientConnection)
at io.vertx.core.net.impl.VertxHandler.lambda$channelWritabilityChanged$3(VertxHandler.java:136)
at io.vertx.core.net.impl.VertxHandler$$Lambda$757/154866834.handle(Unknown Source)
at io.vertx.core.impl.ContextImpl.executeTask(ContextImpl.java:320)
at io.vertx.core.impl.EventLoopContext.execute(EventLoopContext.java:43)
at io.vertx.core.impl.ContextImpl.executeFromIO(ContextImpl.java:188)
at io.vertx.core.impl.ContextImpl.executeFromIO(ContextImpl.java:180)
at io.vertx.core.net.impl.VertxHandler.channelWritabilityChanged(VertxHandler.java:136)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelWritabilityChanged(AbstractChannelHandlerContext.java:434)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelWritabilityChanged(AbstractChannelHandlerContext.java:416)
at io.netty.channel.AbstractChannelHandlerContext.fireChannelWritabilityChanged(AbstractChannelHandlerContext.java:409)
at io.netty.channel.CombinedChannelDuplexHandler$DelegatingChannelHandlerContext.fireChannelWritabilityChanged(CombinedChannelDuplexHandler.java:450)
at io.netty.channel.ChannelInboundHandlerAdapter.channelWritabilityChanged(ChannelInboundHandlerAdapter.java:119)
at io.netty.channel.CombinedChannelDuplexHandler.channelWritabilityChanged(CombinedChannelDuplexHandler.java:273)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelWritabilityChanged(AbstractChannelHandlerContext.java:434)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelWritabilityChanged(AbstractChannelHandlerContext.java:416)
at io.netty.channel.AbstractChannelHandlerContext.fireChannelWritabilityChanged(AbstractChannelHandlerContext.java:409)
at io.netty.channel.DefaultChannelPipeline$HeadContext.channelWritabilityChanged(DefaultChannelPipeline.java:1457)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelWritabilityChanged(AbstractChannelHandlerContext.java:434)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelWritabilityChanged(AbstractChannelHandlerContext.java:416)
at io.netty.channel.DefaultChannelPipeline.fireChannelWritabilityChanged(DefaultChannelPipeline.java:977)
at io.netty.channel.ChannelOutboundBuffer.fireChannelWritabilityChanged(ChannelOutboundBuffer.java:607)
at io.netty.channel.ChannelOutboundBuffer.setWritable(ChannelOutboundBuffer.java:573)
at io.netty.channel.ChannelOutboundBuffer.decrementPendingOutboundBytes(ChannelOutboundBuffer.java:194)
at io.netty.channel.ChannelOutboundBuffer.remove(ChannelOutboundBuffer.java:259)
at io.netty.channel.ChannelOutboundBuffer.removeBytes(ChannelOutboundBuffer.java:338)
at io.netty.channel.socket.nio.NioSocketChannel.doWrite(NioSocketChannel.java:428)
at io.netty.channel.AbstractChannel$AbstractUnsafe.flush0(AbstractChannel.java:934)
at io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.flush0(AbstractNioChannel.java:360)
at io.netty.channel.AbstractChannel$AbstractUnsafe.flush(AbstractChannel.java:901)
at io.netty.channel.DefaultChannelPipeline$HeadContext.flush(DefaultChannelPipeline.java:1396)
at io.netty.channel.AbstractChannelHandlerContext.invokeFlush0(AbstractChannelHandlerContext.java:776)
at io.netty.channel.AbstractChannelHandlerContext.invokeFlush(AbstractChannelHandlerContext.java:768)
at io.netty.channel.AbstractChannelHandlerContext.flush(AbstractChannelHandlerContext.java:749)
at io.netty.channel.CombinedChannelDuplexHandler$DelegatingChannelHandlerContext.flush(CombinedChannelDuplexHandler.java:533)
at io.netty.channel.ChannelOutboundHandlerAdapter.flush(ChannelOutboundHandlerAdapter.java:115)
at io.netty.channel.CombinedChannelDuplexHandler.flush(CombinedChannelDuplexHandler.java:358)
at io.netty.channel.AbstractChannelHandlerContext.invokeFlush0(AbstractChannelHandlerContext.java:776)
at io.netty.channel.AbstractChannelHandlerContext.invokeWriteAndFlush(AbstractChannelHandlerContext.java:802)
at io.netty.channel.AbstractChannelHandlerContext.write(AbstractChannelHandlerContext.java:814)
at io.netty.channel.AbstractChannelHandlerContext.writeAndFlush(AbstractChannelHandlerContext.java:794)
at io.vertx.core.net.impl.ConnectionBase.write(ConnectionBase.java:102)
at io.vertx.core.net.impl.ConnectionBase.lambda$queueForWrite$0(ConnectionBase.java:123)
- locked <0x00000000c541b4b8> (a io.vertx.core.http.impl.Http1xClientConnection)
at io.vertx.core.net.impl.ConnectionBase$$Lambda$756/897948790.handle(Unknown Source)
at io.vertx.core.impl.ContextImpl.executeTask(ContextImpl.java:320)
at io.vertx.core.impl.EventLoopContext.lambda$executeAsync$0(EventLoopContext.java:38)
at io.vertx.core.impl.EventLoopContext$$Lambda$64/397318359.run(Unknown Source)
at io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:163)
at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:404)
at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:462)
at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:897)
at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
at java.lang.Thread.run(Thread.java:748)
```
```
"vert.x-eventloop-thread-3" #13 prio=5 os_prio=0 tid=0x00007f55410b6800 nid=0x8562 waiting for monitor entry [0x00007f54fb46d000]
java.lang.Thread.State: BLOCKED (on object monitor)
at io.vertx.core.http.impl.Http1xClientConnection.handleRequestEnd(Http1xClientConnection.java:659)
- waiting to lock <0x00000000c541b4b8> (a io.vertx.core.http.impl.Http1xClientConnection)
at io.vertx.core.http.impl.Http1xClientConnection.access$1100(Http1xClientConnection.java:59)
at io.vertx.core.http.impl.Http1xClientConnection$StreamImpl.endRequest(Http1xClientConnection.java:411)
at io.vertx.core.http.impl.HttpClientRequestImpl.write(HttpClientRequestImpl.java:653)
- locked <0x00000000c5b865e0> (a io.vertx.core.http.impl.HttpClientRequestImpl)
at io.vertx.core.http.impl.HttpClientRequestImpl.end(HttpClientRequestImpl.java:576)
at io.vertx.core.streams.impl.PipeImpl.lambda$to$4(PipeImpl.java:105)
at io.vertx.core.streams.impl.PipeImpl$$Lambda$480/828512551.handle(Unknown Source)
at io.vertx.core.impl.FutureImpl.setHandler(FutureImpl.java:79)
at io.vertx.core.streams.impl.PipeImpl.to(PipeImpl.java:89)
at io.vertx.core.streams.ReadStream.pipeTo(ReadStream.java:130)
at io.vertx.ext.web.client.impl.HttpContext.handleSendRequest(HttpContext.java:439)
at io.vertx.ext.web.client.impl.HttpContext.execute(HttpContext.java:266)
at io.vertx.ext.web.client.impl.HttpContext.next(HttpContext.java:250)
at io.gravitee.elasticsearch.client.http.HttpClient$1.handle(HttpClient.java:136)
at io.gravitee.elasticsearch.client.http.HttpClient$1.handle(HttpClient.java:123)
at io.vertx.ext.web.client.impl.HttpContext.next(HttpContext.java:247)
at io.vertx.ext.web.client.impl.predicate.PredicateInterceptor.handle(PredicateInterceptor.java:69)
at io.vertx.ext.web.client.impl.predicate.PredicateInterceptor.handle(PredicateInterceptor.java:32)
at io.vertx.ext.web.client.impl.HttpContext.next(HttpContext.java:247)
at io.vertx.ext.web.client.impl.HttpContext.fire(HttpContext.java:257)
at io.vertx.ext.web.client.impl.HttpContext.sendRequest(HttpContext.java:173)
at io.vertx.ext.web.client.impl.HttpContext.handlePrepareRequest(HttpContext.java:330)
at io.vertx.ext.web.client.impl.HttpContext.execute(HttpContext.java:263)
at io.vertx.ext.web.client.impl.HttpContext.next(HttpContext.java:250)
at io.gravitee.elasticsearch.client.http.HttpClient$1.handle(HttpClient.java:136)
at io.gravitee.elasticsearch.client.http.HttpClient$1.handle(HttpClient.java:123)
at io.vertx.ext.web.client.impl.HttpContext.next(HttpContext.java:247)
at io.vertx.ext.web.client.impl.predicate.PredicateInterceptor.handle(PredicateInterceptor.java:69)
at io.vertx.ext.web.client.impl.predicate.PredicateInterceptor.handle(PredicateInterceptor.java:32)
at io.vertx.ext.web.client.impl.HttpContext.next(HttpContext.java:247)
at io.vertx.ext.web.client.impl.HttpContext.fire(HttpContext.java:257)
at io.vertx.ext.web.client.impl.HttpContext.prepareRequest(HttpContext.java:160)
at io.vertx.ext.web.client.impl.HttpRequestImpl.send(HttpRequestImpl.java:263)
at io.vertx.ext.web.client.impl.HttpRequestImpl.sendStream(HttpRequestImpl.java:228)
at io.vertx.reactivex.ext.web.client.HttpRequest.sendStream(HttpRequest.java:376)
at io.vertx.reactivex.ext.web.client.HttpRequest.lambda$rxSendStream$3(HttpRequest.java:394)
at io.vertx.reactivex.ext.web.client.HttpRequest$$Lambda$533/1751013907.accept(Unknown Source)
at io.vertx.reactivex.impl.AsyncResultSingle.subscribeActual(AsyncResultSingle.java:48)
at io.reactivex.Single.subscribe(Single.java:3495)
at io.reactivex.internal.operators.single.SingleMap.subscribeActual(SingleMap.java:34)
at io.reactivex.Single.subscribe(Single.java:3495)
at io.reactivex.internal.operators.single.SingleSubscribeOn$SubscribeOnObserver.run(SingleSubscribeOn.java:89)
at io.reactivex.Scheduler$DisposeTask.run(Scheduler.java:578)
at io.vertx.reactivex.ContextScheduler$ContextWorker$TimedAction.run(ContextScheduler.java:184)
at io.vertx.reactivex.ContextScheduler$ContextWorker$TimedAction$$Lambda$535/208536531.handle(Unknown Source)
at io.vertx.core.impl.ContextImpl.executeTask(ContextImpl.java:320)
at io.vertx.core.impl.EventLoopContext.lambda$executeAsync$0(EventLoopContext.java:38)
at io.vertx.core.impl.EventLoopContext$$Lambda$64/397318359.run(Unknown Source)
at io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:163)
at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:404)
at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:462)
at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:897)
at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
at java.lang.Thread.run(Thread.java:748)
```
I'm trying to provide a simple reproducer but it seems that the deadlock is pretty hard to reproduce.
| yes it shall be pretty hard.
I think the main issue lies in:
```
synchronized (this) {
tryComplete();
s.endRequest();
if ((handler = completionHandler) == null) {
return;
}
}
```
the `endRequest()` call should not be in the block.
it also likely means that you are sharing an `HttpClient` between several event loops.
The problem I'm seeing here is that the `HttpClient` can be created and used outside an event-loop. For example, with my use-case, I'm working with some rx schedulers (mainly IO), and the request is initiated from a RX thread, and not from an event-loop thread.
So, In that case, what would be the best option for creating and using the correct httpclient (or webclient)?
Managing a single `HttpClient` per event-loop is quite easy when you're already in the context of an event-loop, but it's not always the case.
the simplest option would be to encapsulate the http client in a verticle and use verticle context#runOnContext method to execute a task that uses the HTTP client. | 2019-07-02T09:34:15Z | 3.7 |
eclipse-vertx/vert.x | 2,929 | eclipse-vertx__vert.x-2929 | [
"1674"
] | 510dd4b81f9df9fd3c7e2b07776e9ad977707276 | diff --git a/src/main/java/examples/CoreExamples.java b/src/main/java/examples/CoreExamples.java
--- a/src/main/java/examples/CoreExamples.java
+++ b/src/main/java/examples/CoreExamples.java
@@ -15,6 +15,8 @@
import io.vertx.core.buffer.Buffer;
import io.vertx.core.dns.AddressResolverOptions;
import io.vertx.core.file.FileSystem;
+import io.vertx.core.http.HttpClient;
+import io.vertx.core.http.HttpMethod;
import io.vertx.core.http.HttpServer;
import io.vertx.core.http.HttpServerOptions;
import io.vertx.core.http.HttpServerRequest;
@@ -394,7 +396,7 @@ public void configureBSDOptions(Vertx vertx, boolean reusePort) {
vertx.createHttpServer(new HttpServerOptions().setReusePort(reusePort));
}
- public void serverWithDomainSockets(Vertx vertx) {
+ public void tcpServerWithDomainSockets(Vertx vertx) {
// Only available on BSD and Linux
vertx.createNetServer().connectHandler(so -> {
// Handle application
@@ -413,11 +415,14 @@ public void httpServerWithDomainSockets(Vertx vertx) {
});
}
- public void clientWithDomainSockets(Vertx vertx) {
+ public void tcpClientWithDomainSockets(Vertx vertx) {
NetClient netClient = vertx.createNetClient();
// Only available on BSD and Linux
- netClient.connect(SocketAddress.domainSocketAddress("/var/tmp/myservice.sock"), ar -> {
+ SocketAddress addr = SocketAddress.domainSocketAddress("/var/tmp/myservice.sock");
+
+ // Connect to the server
+ netClient.connect(addr, ar -> {
if (ar.succeeded()) {
// Connected
} else {
@@ -425,4 +430,16 @@ public void clientWithDomainSockets(Vertx vertx) {
}
});
}
+
+ public void httpClientWithDomainSockets(Vertx vertx) {
+ HttpClient httpClient = vertx.createHttpClient();
+
+ // Only available on BSD and Linux
+ SocketAddress addr = SocketAddress.domainSocketAddress("/var/tmp/myservice.sock");
+
+ // Send request to the server
+ httpClient.request(HttpMethod.GET, addr, 8080, "localhost", "/", resp -> {
+ // Process response
+ }).end();
+ }
}
diff --git a/src/main/java/io/vertx/core/http/HttpClient.java b/src/main/java/io/vertx/core/http/HttpClient.java
--- a/src/main/java/io/vertx/core/http/HttpClient.java
+++ b/src/main/java/io/vertx/core/http/HttpClient.java
@@ -18,6 +18,7 @@
import io.vertx.core.Handler;
import io.vertx.core.MultiMap;
import io.vertx.core.metrics.Measured;
+import io.vertx.core.net.SocketAddress;
import io.vertx.core.streams.ReadStream;
import java.util.function.Function;
@@ -53,6 +54,16 @@
@VertxGen
public interface HttpClient extends Measured {
+ /**
+ * Like {@link #request(HttpMethod, RequestOptions)} using the {@code serverAddress} parameter to connect to the
+ * server instead of the {@code absoluteURI} parameter.
+ * <p>
+ * The request host header will still be created from the {@code options} parameter.
+ * <p>
+ * Use {@link SocketAddress#domainSocketAddress(String)} to connect to a unix domain socket server.
+ */
+ HttpClientRequest request(HttpMethod method, SocketAddress serverAddress, RequestOptions options);
+
/**
* Create an HTTP request to send to the server with the specified options.
*
@@ -72,6 +83,16 @@ public interface HttpClient extends Measured {
*/
HttpClientRequest request(HttpMethod method, int port, String host, String requestURI);
+ /**
+ * Like {@link #request(HttpMethod, int, String, String)} using the {@code serverAddress} parameter to connect to the
+ * server instead of the {@code absoluteURI} parameter.
+ * <p>
+ * The request host header will still be created from the {@code host} and {@code port} parameters.
+ * <p>
+ * Use {@link SocketAddress#domainSocketAddress(String)} to connect to a unix domain socket server.
+ */
+ HttpClientRequest request(HttpMethod method, SocketAddress serverAddress, int port, String host, String requestURI);
+
/**
* Create an HTTP request to send to the server at the specified host and default port.
* @param method the HTTP method
@@ -92,6 +113,16 @@ public interface HttpClient extends Measured {
@Deprecated
HttpClientRequest request(HttpMethod method, RequestOptions options, Handler<HttpClientResponse> responseHandler);
+ /**
+ * Like {@link #request(HttpMethod, RequestOptions, Handler)} using the {@code serverAddress} parameter to connect to the
+ * server instead of the {@code absoluteURI} parameter.
+ * <p>
+ * The request host header will still be created from the {@code options} parameter.
+ * <p>
+ * Use {@link SocketAddress#domainSocketAddress(String)} to connect to a unix domain socket server.
+ */
+ HttpClientRequest request(HttpMethod method, SocketAddress serverAddress, RequestOptions options, Handler<HttpClientResponse> responseHandler);
+
/**
* Create an HTTP request to send to the server at the specified host and port, specifying a response handler to receive
* the response
@@ -107,6 +138,16 @@ public interface HttpClient extends Measured {
@Deprecated
HttpClientRequest request(HttpMethod method, int port, String host, String requestURI, Handler<HttpClientResponse> responseHandler);
+ /**
+ * Like {@link #request(HttpMethod, int, String, String, Handler)} using the {@code serverAddress} parameter to connect to the
+ * server instead of the {@code absoluteURI} parameter.
+ * <p>
+ * The request host header will still be created from the {@code host} and {@code port} parameters.
+ * <p>
+ * Use {@link SocketAddress#domainSocketAddress(String)} to connect to a unix domain socket server.
+ */
+ HttpClientRequest request(HttpMethod method, SocketAddress serverAddress, int port, String host, String requestURI, Handler<HttpClientResponse> responseHandler);
+
/**
* Create an HTTP request to send to the server at the specified host and default port, specifying a response handler to receive
* the response
@@ -150,6 +191,16 @@ public interface HttpClient extends Measured {
*/
HttpClientRequest requestAbs(HttpMethod method, String absoluteURI);
+ /**
+ * Like {@link #requestAbs(HttpMethod, String)} using the {@code serverAddress} parameter to connect to the
+ * server instead of the {@code absoluteURI} parameter.
+ * <p>
+ * The request host header will still be created from the {@code absoluteURI} parameter.
+ * <p>
+ * Use {@link SocketAddress#domainSocketAddress(String)} to connect to a unix domain socket server.
+ */
+ HttpClientRequest requestAbs(HttpMethod method, SocketAddress serverAddress, String absoluteURI);
+
/**
* Create an HTTP request to send to the server using an absolute URI, specifying a response handler to receive
* the response
@@ -163,6 +214,16 @@ public interface HttpClient extends Measured {
@Deprecated
HttpClientRequest requestAbs(HttpMethod method, String absoluteURI, Handler<HttpClientResponse> responseHandler);
+ /**
+ * Like {@link #requestAbs(HttpMethod, String, Handler)} using the {@code serverAddress} parameter to connect to the
+ * server instead of the {@code absoluteURI} parameter.
+ * <p>
+ * The request host header will still be created from the {@code absoluteURI} parameter.
+ * <p>
+ * Use {@link SocketAddress#domainSocketAddress(String)} to connect to a unix domain socket server.
+ */
+ HttpClientRequest requestAbs(HttpMethod method, SocketAddress serverAddress, String absoluteURI, Handler<HttpClientResponse> responseHandler);
+
/**
* Create an HTTP GET request to send to the server with the specified options.
* @param options the request options
diff --git a/src/main/java/io/vertx/core/http/RequestOptions.java b/src/main/java/io/vertx/core/http/RequestOptions.java
--- a/src/main/java/io/vertx/core/http/RequestOptions.java
+++ b/src/main/java/io/vertx/core/http/RequestOptions.java
@@ -35,7 +35,7 @@ public class RequestOptions {
/**
* SSL enabled by default = false
*/
- public static final boolean DEFAULT_SSL = false;
+ public static final Boolean DEFAULT_SSL = null;
/**
* The default relative request URI = ""
@@ -44,7 +44,7 @@ public class RequestOptions {
private String host;
private int port;
- private boolean ssl;
+ private Boolean ssl;
private String uri;
/**
@@ -123,7 +123,7 @@ public RequestOptions setPort(int port) {
/**
* @return is SSL/TLS enabled?
*/
- public boolean isSsl() {
+ public Boolean isSsl() {
return ssl;
}
@@ -133,7 +133,7 @@ public boolean isSsl() {
* @param ssl true if enabled
* @return a reference to this, so the API can be used fluently
*/
- public RequestOptions setSsl(boolean ssl) {
+ public RequestOptions setSsl(Boolean ssl) {
this.ssl = ssl;
return this;
}
diff --git a/src/main/java/io/vertx/core/http/impl/ConnectionManager.java b/src/main/java/io/vertx/core/http/impl/ConnectionManager.java
--- a/src/main/java/io/vertx/core/http/impl/ConnectionManager.java
+++ b/src/main/java/io/vertx/core/http/impl/ConnectionManager.java
@@ -18,6 +18,7 @@
import io.vertx.core.http.HttpVersion;
import io.vertx.core.http.impl.pool.Pool;
import io.vertx.core.impl.ContextInternal;
+import io.vertx.core.net.SocketAddress;
import io.vertx.core.spi.metrics.HttpClientMetrics;
import java.util.*;
@@ -66,21 +67,19 @@ private synchronized void checkExpired(long period) {
private static final class EndpointKey {
private final boolean ssl;
- private final int port;
- private final String peerHost;
- private final String host;
+ private final SocketAddress server;
+ private final SocketAddress peerAddress;
- EndpointKey(boolean ssl, int port, String peerHost, String host) {
- if (host == null) {
+ EndpointKey(boolean ssl, SocketAddress server, SocketAddress peerAddress) {
+ if (server == null) {
throw new NullPointerException("No null host");
}
- if (peerHost == null) {
- throw new NullPointerException("No null peer host");
+ if (peerAddress == null) {
+ throw new NullPointerException("No null peer address");
}
this.ssl = ssl;
- this.peerHost = peerHost;
- this.host = host;
- this.port = port;
+ this.peerAddress = peerAddress;
+ this.server = server;
}
@Override
@@ -88,15 +87,14 @@ public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
EndpointKey that = (EndpointKey) o;
- return ssl == that.ssl && port == that.port && peerHost.equals(that.peerHost) && host.equals(that.host);
+ return ssl == that.ssl && server.equals(that.server) && peerAddress.equals(that.peerAddress);
}
@Override
public int hashCode() {
int result = ssl ? 1 : 0;
- result = 31 * result + peerHost.hashCode();
- result = 31 * result + host.hashCode();
- result = 31 * result + port;
+ result = 31 * result + peerAddress.hashCode();
+ result = 31 * result + server.hashCode();
return result;
}
}
@@ -112,13 +110,22 @@ public Endpoint(Pool<HttpClientConnection> pool, Object metric) {
}
}
- void getConnection(ContextInternal ctx, String peerHost, boolean ssl, int port, String host, Handler<AsyncResult<HttpClientConnection>> handler) {
- EndpointKey key = new EndpointKey(ssl, port, peerHost, host);
+ void getConnection(ContextInternal ctx, SocketAddress peerAddress, boolean ssl, SocketAddress server, Handler<AsyncResult<HttpClientConnection>> handler) {
+ EndpointKey key = new EndpointKey(ssl, server, peerAddress);
while (true) {
Endpoint endpoint = endpointMap.computeIfAbsent(key, targetAddress -> {
int maxPoolSize = Math.max(client.getOptions().getMaxPoolSize(), client.getOptions().getHttp2MaxPoolSize());
+ String host;
+ int port;
+ if (server.path() == null) {
+ host = server.host();
+ port = server.port();
+ } else {
+ host = server.path();
+ port = 0;
+ }
Object metric = metrics != null ? metrics.createEndpoint(host, port, maxPoolSize) : null;
- HttpChannelConnector connector = new HttpChannelConnector(client, metric, version, ssl, peerHost, host, port);
+ HttpChannelConnector connector = new HttpChannelConnector(client, metric, version, ssl, peerAddress, server);
Pool<HttpClientConnection> pool = new Pool<>(ctx, connector, maxWaitQueueSize, connector.weight(), maxSize,
v -> {
if (metrics != null) {
diff --git a/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java b/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
@@ -35,6 +35,7 @@
import io.vertx.core.logging.Logger;
import io.vertx.core.logging.LoggerFactory;
import io.vertx.core.net.NetSocket;
+import io.vertx.core.net.SocketAddress;
import io.vertx.core.net.impl.NetSocketImpl;
import io.vertx.core.net.impl.VertxHandler;
import io.vertx.core.spi.metrics.HttpClientMetrics;
@@ -63,8 +64,7 @@ class Http1xClientConnection extends Http1xConnectionBase<WebSocketImpl> impleme
private final HttpClientImpl client;
private final HttpClientOptions options;
private final boolean ssl;
- private final String host;
- private final int port;
+ private final SocketAddress server;
private final Object endpointMetric;
private final HttpClientMetrics metrics;
private final HttpVersion version;
@@ -85,8 +85,7 @@ class Http1xClientConnection extends Http1xConnectionBase<WebSocketImpl> impleme
Object endpointMetric,
ChannelHandlerContext channel,
boolean ssl,
- String host,
- int port,
+ SocketAddress server,
ContextInternal context,
HttpClientMetrics metrics) {
super(client.getVertx(), channel, context);
@@ -94,8 +93,7 @@ class Http1xClientConnection extends Http1xConnectionBase<WebSocketImpl> impleme
this.client = client;
this.options = client.getOptions();
this.ssl = ssl;
- this.host = host;
- this.port = port;
+ this.server = server;
this.metrics = metrics;
this.version = version;
this.endpointMetric = endpointMetric;
@@ -577,17 +575,19 @@ private void handleResponseBegin(HttpResponse resp) {
StreamImpl stream;
HttpClientResponseImpl response;
HttpClientRequestImpl request;
- Throwable err;
+ Exception err;
synchronized (this) {
stream = responseInProgress;
request = stream.request;
+ HttpClientResponseImpl r = null;
+ Exception t = null;
try {
- response = stream.beginResponse(resp);
- err = null;
+ r = stream.beginResponse(resp);
} catch (Exception e) {
- err = e;
- response = null;
+ t = e;
}
+ response = r;
+ err = t;
}
if (response != null) {
request.handleResponse(response);
@@ -651,7 +651,7 @@ synchronized void toWebSocket(String requestURI, MultiMap headers, WebsocketVers
URI wsuri = new URI(requestURI);
if (!wsuri.isAbsolute()) {
// Netty requires an absolute url
- wsuri = new URI((ssl ? "https:" : "http:") + "//" + host + ":" + port + requestURI);
+ wsuri = new URI((ssl ? "https:" : "http:") + "//" + server.host() + ":" + server.port() + requestURI);
}
WebSocketVersion version =
WebSocketVersion.valueOf((vers == null ?
@@ -812,8 +812,9 @@ private void handshakeComplete(ChannelHandlerContext ctx, FullHttpResponse respo
@Override
public synchronized void handleInterestedOpsChanged() {
if (!isNotWritable()) {
- if (requestInProgress != null) {
- requestInProgress.request.handleDrained();
+ StreamImpl current = requestInProgress;
+ if (current != null) {
+ current.request.handleDrained();
} else if (ws != null) {
ws.handleDrained();
}
diff --git a/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java b/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java
@@ -172,10 +172,19 @@ public synchronized void onPushPromiseRead(ChannelHandlerContext ctx, int stream
String rawMethod = headers.method().toString();
HttpMethod method = HttpUtils.toVertxMethod(rawMethod);
String uri = headers.path().toString();
- String host = headers.authority() != null ? headers.authority().toString() : null;
+ String authority = headers.authority() != null ? headers.authority().toString() : null;
MultiMap headersMap = new Http2HeadersAdaptor(headers);
Http2Stream promisedStream = handler.connection().stream(promisedStreamId);
- int port = remoteAddress().port();
+ int pos = authority.indexOf(':');
+ int port;
+ String host;
+ if (pos == -1) {
+ host = authority;
+ port = 80;
+ } else {
+ host = authority.substring(0, pos);
+ port = Integer.parseInt(authority.substring(pos + 1));
+ }
HttpClientRequestPushPromise pushReq = new HttpClientRequestPushPromise(this, promisedStream, client, isSsl(), method, rawMethod, uri, host, port, headersMap);
if (metrics != null) {
pushReq.metric(metrics.responsePushed(queueMetric, metric(), localAddress(), remoteAddress(), pushReq));
@@ -420,7 +429,7 @@ public void reportBytesRead(long numberOfBytes) {
}
@Override
- public Context getContext() {
+ public ContextInternal getContext() {
return context;
}
diff --git a/src/main/java/io/vertx/core/http/impl/HttpChannelConnector.java b/src/main/java/io/vertx/core/http/impl/HttpChannelConnector.java
--- a/src/main/java/io/vertx/core/http/impl/HttpChannelConnector.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpChannelConnector.java
@@ -51,18 +51,16 @@ class HttpChannelConnector implements ConnectionProvider<HttpClientConnection> {
private final long http2Weight;
private final long http1MaxConcurrency;
private final boolean ssl;
- private final String peerHost;
- private final String host;
- private final int port;
+ private final SocketAddress peerAddress;
+ private final SocketAddress server;
private final Object endpointMetric;
HttpChannelConnector(HttpClientImpl client,
Object endpointMetric,
HttpVersion version,
boolean ssl,
- String peerHost,
- String host,
- int port) {
+ SocketAddress peerAddress,
+ SocketAddress server) {
this.client = client;
this.endpointMetric = endpointMetric;
this.options = client.getOptions();
@@ -80,9 +78,8 @@ class HttpChannelConnector implements ConnectionProvider<HttpClientConnection> {
this.weight = version == HttpVersion.HTTP_2 ? http2Weight : http1Weight;
this.http1MaxConcurrency = options.isPipelining() ? options.getPipeliningLimit() : 1;
this.ssl = ssl;
- this.peerHost = peerHost;
- this.host = host;
- this.port = port;
+ this.peerAddress = peerAddress;
+ this.server = server;
}
public long weight() {
@@ -109,20 +106,21 @@ private void doConnect(
ContextInternal context,
Future<ConnectResult<HttpClientConnection>> future) {
+ boolean domainSocket = server.path() != null;
+ boolean useAlpn = options.isUseAlpn();
+
Bootstrap bootstrap = new Bootstrap();
bootstrap.group(context.nettyEventLoop());
- bootstrap.channelFactory(client.getVertx().transport().channelFactory(false));
+ bootstrap.channelFactory(client.getVertx().transport().channelFactory(domainSocket));
- applyConnectionOptions(false, bootstrap);
-
- boolean useAlpn = options.isUseAlpn();
+ applyConnectionOptions(domainSocket, bootstrap);
ProxyOptions options = this.options.getProxyOptions();
if (options != null && !ssl && options.getType()== ProxyType.HTTP) {
// http proxy requests are handled in HttpClientImpl, everything else can use netty proxy handler
options = null;
}
- ChannelProvider channelProvider = new ChannelProvider(bootstrap, sslHelper, ssl, context, options);
+ ChannelProvider channelProvider = new ChannelProvider(bootstrap, sslHelper, context, options);
Handler<AsyncResult<Channel>> channelHandler = res -> {
if (res.succeeded()) {
@@ -137,25 +135,25 @@ private void doConnect(
applyHttp1xConnectionOptions(ch.pipeline());
HttpVersion fallbackProtocol = "http/1.0".equals(protocol) ?
HttpVersion.HTTP_1_0 : HttpVersion.HTTP_1_1;
- http1xConnected(listener, fallbackProtocol, host, port, true, context, ch, http1Weight, future);
+ http1xConnected(listener, fallbackProtocol, server, true, context, ch, http1Weight, future);
}
} else {
applyHttp1xConnectionOptions(ch.pipeline());
- http1xConnected(listener, version, host, port, true, context, ch, http1Weight, future);
+ http1xConnected(listener, version, server, true, context, ch, http1Weight, future);
}
} else {
ChannelPipeline pipeline = ch.pipeline();
if (version == HttpVersion.HTTP_2) {
if (this.options.isHttp2ClearTextUpgrade()) {
applyHttp1xConnectionOptions(pipeline);
- http1xConnected(listener, version, host, port, false, context, ch, http2Weight, future);
+ http1xConnected(listener, version, server, false, context, ch, http2Weight, future);
} else {
applyHttp2ConnectionOptions(pipeline);
http2Connected(listener, context, ch, future);
}
} else {
applyHttp1xConnectionOptions(pipeline);
- http1xConnected(listener, version, host, port, false, context, ch, http1Weight, future);
+ http1xConnected(listener, version, server, false, context, ch, http1Weight, future);
}
}
} else {
@@ -163,7 +161,8 @@ private void doConnect(
}
};
- channelProvider.connect(SocketAddress.inetSocketAddress(port, host), SocketAddress.inetSocketAddress(port, peerHost), this.options.isForceSni() ? peerHost : null, channelHandler);
+ // SocketAddress.inetSocketAddress(server.port(), peerHost)
+ channelProvider.connect(server, peerAddress, this.options.isForceSni() ? peerAddress.host() : null, ssl, channelHandler);
}
private void applyConnectionOptions(boolean domainSocket, Bootstrap bootstrap) {
@@ -197,15 +196,14 @@ private void applyHttp1xConnectionOptions(ChannelPipeline pipeline) {
private void http1xConnected(ConnectionListener<HttpClientConnection> listener,
HttpVersion version,
- String host,
- int port,
+ SocketAddress server,
boolean ssl,
ContextInternal context,
Channel ch, long weight,
Future<ConnectResult<HttpClientConnection>> future) {
boolean upgrade = version == HttpVersion.HTTP_2 && options.isHttp2ClearTextUpgrade();
VertxHandler<Http1xClientConnection> clientHandler = VertxHandler.create(context, chctx -> {
- Http1xClientConnection conn = new Http1xClientConnection(listener, upgrade ? HttpVersion.HTTP_1_1 : version, client, endpointMetric, chctx, ssl, host, port, context, metrics);
+ Http1xClientConnection conn = new Http1xClientConnection(listener, upgrade ? HttpVersion.HTTP_1_1 : version, client, endpointMetric, chctx, ssl, server, context, metrics);
if (metrics != null) {
context.executeFromIO(v -> {
Object socketMetric = metrics.connected(conn.remoteAddress(), conn.remoteName());
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java b/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
@@ -25,6 +25,7 @@
import io.vertx.core.logging.LoggerFactory;
import io.vertx.core.net.ProxyOptions;
import io.vertx.core.net.ProxyType;
+import io.vertx.core.net.SocketAddress;
import io.vertx.core.net.impl.SSLHelper;
import io.vertx.core.spi.metrics.HttpClientMetrics;
import io.vertx.core.spi.metrics.Metrics;
@@ -84,7 +85,7 @@ public class HttpClientImpl implements HttpClient, MetricsProvider {
if (query != null) {
requestURI += "?" + query;
}
- return Future.succeededFuture(createRequest(m, uri.getHost(), port, ssl, requestURI, null));
+ return Future.succeededFuture(createRequest(m, null, uri.getHost(), port, ssl, requestURI, null));
}
return null;
} catch (Exception e) {
@@ -452,8 +453,12 @@ public WebSocketStream websocketStream(String requestURI, MultiMap headers, Webs
@Override
public HttpClientRequest requestAbs(HttpMethod method, String absoluteURI, Handler<HttpClientResponse> responseHandler) {
- Objects.requireNonNull(responseHandler, "no null responseHandler accepted");
- return requestAbs(method, absoluteURI).handler(responseHandler);
+ return requestAbs(method, null, absoluteURI, responseHandler);
+ }
+
+ @Override
+ public HttpClientRequest requestAbs(HttpMethod method, SocketAddress serverAddress, String absoluteURI, Handler<HttpClientResponse> responseHandler) {
+ return requestAbs(method, serverAddress, absoluteURI).handler(responseHandler);
}
@Override
@@ -467,6 +472,12 @@ public HttpClientRequest request(HttpMethod method, int port, String host, Strin
return request(method, port, host, requestURI).handler(responseHandler);
}
+ @Override
+ public HttpClientRequest request(HttpMethod method, SocketAddress serverAddress, int port, String host, String requestURI, Handler<HttpClientResponse> responseHandler) {
+ Objects.requireNonNull(responseHandler, "no null responseHandler accepted");
+ return request(method, serverAddress, port, host, requestURI).handler(responseHandler);
+ }
+
@Override
public HttpClientRequest request(HttpMethod method, String host, String requestURI, Handler<HttpClientResponse> responseHandler) {
return request(method, options.getDefaultPort(), host, requestURI, responseHandler);
@@ -484,6 +495,11 @@ public HttpClientRequest request(HttpMethod method, String requestURI, Handler<H
@Override
public HttpClientRequest requestAbs(HttpMethod method, String absoluteURI) {
+ return requestAbs(method, null, absoluteURI);
+ }
+
+ @Override
+ public HttpClientRequest requestAbs(HttpMethod method, SocketAddress serverAddress, String absoluteURI) {
URL url = parseUrl(absoluteURI);
Boolean ssl = false;
int port = url.getPort();
@@ -507,12 +523,17 @@ public HttpClientRequest requestAbs(HttpMethod method, String absoluteURI) {
}
}
// if we do not know the protocol, the port still may be -1, we will handle that below
- return createRequest(method, protocol, url.getHost(), port, ssl, relativeUri, null);
+ return createRequest(method, serverAddress, protocol, url.getHost(), port, ssl, relativeUri, null);
}
@Override
public HttpClientRequest request(HttpMethod method, int port, String host, String requestURI) {
- return createRequest(method, host, port, null, requestURI, null);
+ return createRequest(method, null, host, port, null, requestURI, null);
+ }
+
+ @Override
+ public HttpClientRequest request(HttpMethod method, SocketAddress serverAddress, int port, String host, String requestURI) {
+ return createRequest(method, serverAddress, host, port, null, requestURI, null);
}
@Override
@@ -520,9 +541,19 @@ public HttpClientRequest request(HttpMethod method, RequestOptions options, Hand
return request(method, options).handler(responseHandler);
}
+ @Override
+ public HttpClientRequest request(HttpMethod method, SocketAddress serverAddress, RequestOptions options, Handler<HttpClientResponse> responseHandler) {
+ return request(method, serverAddress, options).handler(responseHandler);
+ }
+
+ @Override
+ public HttpClientRequest request(HttpMethod method, SocketAddress serverAddress, RequestOptions options) {
+ return createRequest(method, serverAddress, options.getHost(), options.getPort(), options.isSsl(), options.getURI(), null);
+ }
+
@Override
public HttpClientRequest request(HttpMethod method, RequestOptions options) {
- return createRequest(method, options.getHost(), options.getPort(), options.isSsl(), options.getURI(), null);
+ return createRequest(method, null, options.getHost(), options.getPort(), options.isSsl(), options.getURI(), null);
}
@Override
@@ -951,7 +982,7 @@ private void getConnectionForWebsocket(ContextInternal ctx,
String host,
Handler<Http1xClientConnection> handler,
Handler<Throwable> connectionExceptionHandler) {
- websocketCM.getConnection(ctx, host, ssl, port, host, ar -> {
+ websocketCM.getConnection(ctx, SocketAddress.inetSocketAddress(port, host), ssl, SocketAddress.inetSocketAddress(port, host), ar -> {
if (ar.succeeded()) {
HttpClientConnection conn = ar.result();
conn.getContext().executeFromIO(v -> {
@@ -966,12 +997,11 @@ private void getConnectionForWebsocket(ContextInternal ctx,
}
void getConnectionForRequest(ContextInternal ctx,
- String peerHost,
+ SocketAddress peerAddress,
boolean ssl,
- int port,
- String host,
+ SocketAddress server,
Handler<AsyncResult<HttpClientStream>> handler) {
- httpCM.getConnection(ctx, peerHost, ssl, port, host, ar -> {
+ httpCM.getConnection(ctx, peerAddress, ssl, server, ar -> {
if (ar.succeeded()) {
ar.result().createStream(handler);
} else {
@@ -1001,15 +1031,15 @@ private URL parseUrl(String surl) {
}
private HttpClient requestNow(HttpMethod method, RequestOptions options, Handler<HttpClientResponse> responseHandler) {
- createRequest(method, options.getHost(), options.getPort(), options.isSsl(), options.getURI(), null).handler(responseHandler).end();
+ createRequest(method, null, options.getHost(), options.getPort(), options.isSsl(), options.getURI(), null).handler(responseHandler).end();
return this;
}
- private HttpClientRequest createRequest(HttpMethod method, String host, int port, Boolean ssl, String relativeURI, MultiMap headers) {
- return createRequest(method, ssl==null || ssl==false ? "http" : "https", host, port, ssl, relativeURI, headers);
+ private HttpClientRequest createRequest(HttpMethod method, SocketAddress serverAddress, String host, int port, Boolean ssl, String relativeURI, MultiMap headers) {
+ return createRequest(method, serverAddress, ssl==null || ssl==false ? "http" : "https", host, port, ssl, relativeURI, headers);
}
- private HttpClientRequest createRequest(HttpMethod method, String protocol, String host, int port, Boolean ssl, String relativeURI, MultiMap headers) {
+ private HttpClientRequest createRequest(HttpMethod method, SocketAddress server, String protocol, String host, int port, Boolean ssl, String relativeURI, MultiMap headers) {
Objects.requireNonNull(method, "no null method accepted");
Objects.requireNonNull(protocol, "no null protocol accepted");
Objects.requireNonNull(host, "no null host accepted");
@@ -1023,8 +1053,8 @@ private HttpClientRequest createRequest(HttpMethod method, String protocol, Stri
HttpClientRequest req;
boolean useProxy = !useSSL && proxyType == ProxyType.HTTP;
if (useProxy) {
- final int defaultPort = protocol.equals("ftp") ? 21 : 80;
- final String addPort = (port != -1 && port != defaultPort) ? (":" + port) : "";
+ int defaultPort = protocol.equals("ftp") ? 21 : 80;
+ String addPort = (port != -1 && port != defaultPort) ? (":" + port) : "";
relativeURI = protocol + "://" + host + addPort + relativeURI;
ProxyOptions proxyOptions = options.getProxyOptions();
if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) {
@@ -1034,11 +1064,13 @@ private HttpClientRequest createRequest(HttpMethod method, String protocol, Stri
headers.add("Proxy-Authorization", "Basic " + Base64.getEncoder()
.encodeToString((proxyOptions.getUsername() + ":" + proxyOptions.getPassword()).getBytes()));
}
- req = new HttpClientRequestImpl(this, useSSL, method, proxyOptions.getHost(), proxyOptions.getPort(),
- relativeURI, vertx);
- req.setHost(host + addPort);
+ req = new HttpClientRequestImpl(this, useSSL, method, SocketAddress.inetSocketAddress(proxyOptions.getPort(), proxyOptions.getHost()),
+ host, port, relativeURI, vertx);
} else {
- req = new HttpClientRequestImpl(this, useSSL, method, host, port, relativeURI, vertx);
+ if (server == null) {
+ server = SocketAddress.inetSocketAddress(port, host);
+ }
+ req = new HttpClientRequestImpl(this, useSSL, method, server, host, port, relativeURI, vertx);
}
if (headers != null) {
req.headers().setAll(headers);
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientRequestBase.java b/src/main/java/io/vertx/core/http/impl/HttpClientRequestBase.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientRequestBase.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientRequestBase.java
@@ -16,6 +16,7 @@
import io.vertx.core.http.HttpClientRequest;
import io.vertx.core.http.HttpClientResponse;
import io.vertx.core.http.HttpMethod;
+import io.vertx.core.net.SocketAddress;
import io.vertx.core.logging.Logger;
import io.vertx.core.logging.LoggerFactory;
import io.vertx.core.streams.ReadStream;
@@ -33,9 +34,10 @@ public abstract class HttpClientRequestBase implements HttpClientRequest {
protected final io.vertx.core.http.HttpMethod method;
protected final String uri;
protected final String path;
+ protected final String query;
protected final String host;
protected final int port;
- protected final String query;
+ protected final SocketAddress server;
protected final boolean ssl;
private Handler<Throwable> exceptionHandler;
private long currentTimeoutTimerId = -1;
@@ -46,10 +48,11 @@ public abstract class HttpClientRequestBase implements HttpClientRequest {
private boolean paused;
private HttpClientResponseImpl response;
- HttpClientRequestBase(HttpClientImpl client, boolean ssl, HttpMethod method, String host, int port, String uri) {
+ HttpClientRequestBase(HttpClientImpl client, boolean ssl, HttpMethod method, SocketAddress server, String host, int port, String uri) {
this.client = client;
this.uri = uri;
this.method = method;
+ this.server = server;
this.host = host;
this.port = port;
this.path = uri.length() > 0 ? HttpUtils.parsePath(uri) : "";
@@ -94,7 +97,7 @@ public String uri() {
}
public String host() {
- return host;
+ return server.host();
}
@Override
@@ -206,7 +209,7 @@ private void handleTimeout(long timeoutMs) {
}
private void timeout(long timeoutMs) {
- String msg = "The timeout period of " + timeoutMs + "ms has been exceeded while executing " + method + " " + uri + " for host " + host;
+ String msg = "The timeout period of " + timeoutMs + "ms has been exceeded while executing " + method + " " + uri + " for server " + server;
// Use a stack-less exception
handleException(new TimeoutException(msg) {
@Override
@@ -252,5 +255,5 @@ public synchronized HttpClientRequest fetch(long amount) {
}
return this;
}
-
+
}
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientRequestImpl.java b/src/main/java/io/vertx/core/http/impl/HttpClientRequestImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientRequestImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientRequestImpl.java
@@ -28,6 +28,7 @@
import io.vertx.core.impl.VertxInternal;
import io.vertx.core.logging.Logger;
import io.vertx.core.logging.LoggerFactory;
+import io.vertx.core.net.SocketAddress;
import java.util.Objects;
@@ -73,9 +74,10 @@ public class HttpClientRequestImpl extends HttpClientRequestBase implements Http
// completed => drainHandler = null
- HttpClientRequestImpl(HttpClientImpl client, boolean ssl, HttpMethod method, String host, int port,
+ HttpClientRequestImpl(HttpClientImpl client, boolean ssl, HttpMethod method, SocketAddress server,
+ String host, int port,
String relativeURI, VertxInternal vertx) {
- super(client, ssl, method, host, port, relativeURI);
+ super(client, ssl, method, server, host, port, relativeURI);
this.chunked = false;
this.vertx = vertx;
this.priority = HttpUtils.DEFAULT_STREAM_PRIORITY;
@@ -448,16 +450,16 @@ private synchronized void connect(Handler<HttpVersion> headersHandler) {
throw new IllegalStateException("You must provide a rawMethod when using an HttpMethod.OTHER method");
}
- String peerHost;
+ SocketAddress peerAddress;
if (hostHeader != null) {
int idx = hostHeader.lastIndexOf(':');
if (idx != -1) {
- peerHost = hostHeader.substring(0, idx);
+ peerAddress = SocketAddress.inetSocketAddress(Integer.parseInt(hostHeader.substring(idx + 1)), hostHeader.substring(0, idx));
} else {
- peerHost = hostHeader;
+ peerAddress = SocketAddress.inetSocketAddress(80, hostHeader);
}
} else {
- peerHost = host;
+ peerAddress = SocketAddress.inetSocketAddress(port, host);
}
// Capture some stuff
@@ -478,11 +480,13 @@ private synchronized void connect(Handler<HttpVersion> headersHandler) {
}
ContextInternal connectCtx = vertx.getOrCreateContext();
+
+
// We defer actual connection until the first part of body is written or end is called
// This gives the user an opportunity to set an exception handler before connecting so
// they can capture any exceptions on connection
connecting = true;
- client.getConnectionForRequest(connectCtx, peerHost, ssl, port, host, ar1 -> {
+ client.getConnectionForRequest(connectCtx, peerAddress, ssl, server, ar1 -> {
if (ar1.succeeded()) {
HttpClientStream stream = ar1.result();
ContextInternal ctx = (ContextInternal) stream.getContext();
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientRequestPushPromise.java b/src/main/java/io/vertx/core/http/impl/HttpClientRequestPushPromise.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientRequestPushPromise.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientRequestPushPromise.java
@@ -17,6 +17,7 @@
import io.vertx.core.MultiMap;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.http.*;
+import io.vertx.core.net.SocketAddress;
/**
* @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
@@ -40,7 +41,7 @@ public HttpClientRequestPushPromise(
String host,
int port,
MultiMap headers) {
- super(client, ssl, method, host, port, uri);
+ super(client, ssl, method, SocketAddress.inetSocketAddress(port, host), host, port, uri);
this.conn = conn;
this.stream = new Http2ClientConnection.Http2ClientStream(conn, this, stream, false);
this.rawMethod = rawMethod;
@@ -117,7 +118,7 @@ public String uri() {
@Override
public String getHost() {
- return host;
+ return server.host();
}
@Override
diff --git a/src/main/java/io/vertx/core/net/impl/ChannelProvider.java b/src/main/java/io/vertx/core/net/impl/ChannelProvider.java
--- a/src/main/java/io/vertx/core/net/impl/ChannelProvider.java
+++ b/src/main/java/io/vertx/core/net/impl/ChannelProvider.java
@@ -43,7 +43,6 @@ public final class ChannelProvider {
private final Bootstrap bootstrap;
private final SSLHelper sslHelper;
- private final boolean ssl;
private final ContextInternal context;
private final ProxyOptions proxyOptions;
private String applicationProtocol;
@@ -51,12 +50,10 @@ public final class ChannelProvider {
public ChannelProvider(Bootstrap bootstrap,
SSLHelper sslHelper,
- boolean ssl,
ContextInternal context,
ProxyOptions proxyOptions) {
this.bootstrap = bootstrap;
this.context = context;
- this.ssl = ssl;
this.sslHelper = sslHelper;
this.proxyOptions = proxyOptions;
}
@@ -75,7 +72,7 @@ public Channel channel() {
return channel;
}
- public void connect(SocketAddress remoteAddress, SocketAddress peerAddress, String serverName, Handler<AsyncResult<Channel>> channelHandler) {
+ public void connect(SocketAddress remoteAddress, SocketAddress peerAddress, String serverName, boolean ssl, Handler<AsyncResult<Channel>> channelHandler) {
Handler<AsyncResult<Channel>> handler = res -> {
if (Context.isOnEventLoopThread()) {
channelHandler.handle(res);
@@ -85,13 +82,13 @@ public void connect(SocketAddress remoteAddress, SocketAddress peerAddress, Stri
}
};
if (proxyOptions != null) {
- handleProxyConnect(remoteAddress, peerAddress, serverName, handler);
+ handleProxyConnect(remoteAddress, peerAddress, serverName, ssl, handler);
} else {
- handleConnect(remoteAddress, peerAddress, serverName, handler);
+ handleConnect(remoteAddress, peerAddress, serverName, ssl, handler);
}
}
- private void initSSL(SocketAddress peerAddress, String serverName, Channel ch, Handler<AsyncResult<Channel>> channelHandler) {
+ private void initSSL(SocketAddress peerAddress, String serverName, boolean ssl, Channel ch, Handler<AsyncResult<Channel>> channelHandler) {
if (ssl) {
SslHandler sslHandler = new SslHandler(sslHelper.createEngine(context.owner(), peerAddress, serverName));
ChannelPipeline pipeline = ch.pipeline();
@@ -124,19 +121,19 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
}
- private void handleConnect(SocketAddress remoteAddress, SocketAddress peerAddress, String serverName, Handler<AsyncResult<Channel>> channelHandler) {
+ private void handleConnect(SocketAddress remoteAddress, SocketAddress peerAddress, String serverName, boolean ssl, Handler<AsyncResult<Channel>> channelHandler) {
VertxInternal vertx = context.owner();
bootstrap.resolver(vertx.nettyAddressResolverGroup());
bootstrap.handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) {
- initSSL(peerAddress, serverName, ch, channelHandler);
+ initSSL(peerAddress, serverName, ssl, ch, channelHandler);
}
});
ChannelFuture fut = bootstrap.connect(vertx.transport().convert(remoteAddress, false));
fut.addListener(res -> {
if (res.isSuccess()) {
- connected(fut.channel(), channelHandler);
+ connected(fut.channel(), ssl, channelHandler);
} else {
channelHandler.handle(io.vertx.core.Future.failedFuture(res.cause()));
}
@@ -149,7 +146,7 @@ protected void initChannel(Channel ch) {
* @param channel the channel
* @param channelHandler the channel handler
*/
- private void connected(Channel channel, Handler<AsyncResult<Channel>> channelHandler) {
+ private void connected(Channel channel, boolean ssl, Handler<AsyncResult<Channel>> channelHandler) {
this.channel = channel;
if (!ssl) {
// No handshake
@@ -160,7 +157,7 @@ private void connected(Channel channel, Handler<AsyncResult<Channel>> channelHan
/**
* A channel provider that connects via a Proxy : HTTP or SOCKS
*/
- private void handleProxyConnect(SocketAddress remoteAddress, SocketAddress peerAddress, String serverName, Handler<AsyncResult<Channel>> channelHandler) {
+ private void handleProxyConnect(SocketAddress remoteAddress, SocketAddress peerAddress, String serverName, boolean ssl, Handler<AsyncResult<Channel>> channelHandler) {
final VertxInternal vertx = context.owner();
final String proxyHost = proxyOptions.getHost();
@@ -206,8 +203,8 @@ public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
if (evt instanceof ProxyConnectionEvent) {
pipeline.remove(proxy);
pipeline.remove(this);
- initSSL(peerAddress, serverName, ch, channelHandler);
- connected(ch, channelHandler);
+ initSSL(peerAddress, serverName, ssl, ch, channelHandler);
+ connected(ch, ssl, channelHandler);
}
ctx.fireUserEventTriggered(evt);
}
diff --git a/src/main/java/io/vertx/core/net/impl/ConnectionBase.java b/src/main/java/io/vertx/core/net/impl/ConnectionBase.java
--- a/src/main/java/io/vertx/core/net/impl/ConnectionBase.java
+++ b/src/main/java/io/vertx/core/net/impl/ConnectionBase.java
@@ -414,22 +414,28 @@ public ChannelPromise channelFuture() {
}
public String remoteName() {
- InetSocketAddress addr = (InetSocketAddress) chctx.channel().remoteAddress();
- if (addr == null) return null;
- // Use hostString that does not trigger a DNS resolution
- return addr.getHostString();
+ java.net.SocketAddress addr = chctx.channel().remoteAddress();
+ if (addr instanceof InetSocketAddress) {
+ // Use hostString that does not trigger a DNS resolution
+ return ((InetSocketAddress)addr).getHostString();
+ }
+ return null;
}
public SocketAddress remoteAddress() {
- InetSocketAddress addr = (InetSocketAddress) chctx.channel().remoteAddress();
- if (addr == null) return null;
- return new SocketAddressImpl(addr);
+ java.net.SocketAddress addr = chctx.channel().remoteAddress();
+ if (addr != null) {
+ return vertx.transport().convert(addr);
+ }
+ return null;
}
public SocketAddress localAddress() {
- InetSocketAddress addr = (InetSocketAddress) chctx.channel().localAddress();
- if (addr == null) return null;
- return new SocketAddressImpl(addr);
+ java.net.SocketAddress addr = chctx.channel().localAddress();
+ if (addr != null) {
+ return vertx.transport().convert(addr);
+ }
+ return null;
}
public void handleMessage(Object msg) {
diff --git a/src/main/java/io/vertx/core/net/impl/NetClientImpl.java b/src/main/java/io/vertx/core/net/impl/NetClientImpl.java
--- a/src/main/java/io/vertx/core/net/impl/NetClientImpl.java
+++ b/src/main/java/io/vertx/core/net/impl/NetClientImpl.java
@@ -113,7 +113,7 @@ public synchronized NetClient connect(int port, String host, Handler<AsyncResult
@Override
public NetClient connect(int port, String host, String serverName, Handler<AsyncResult<NetSocket>> connectHandler) {
- doConnect(SocketAddress.inetSocketAddress(port, host), serverName, connectHandler != null ? ar -> connectHandler.handle(ar.map(s -> (NetSocket) s)) : null);
+ doConnect(SocketAddress.inetSocketAddress(port, host), serverName, connectHandler);
return this;
}
@@ -180,7 +180,7 @@ protected void doConnect(SocketAddress remoteAddress, String serverName, Handler
applyConnectionOptions(remoteAddress.path() != null, bootstrap);
- ChannelProvider channelProvider = new ChannelProvider(bootstrap, sslHelper, sslHelper.isSSL(), context, options.getProxyOptions());
+ ChannelProvider channelProvider = new ChannelProvider(bootstrap, sslHelper, context, options.getProxyOptions());
Handler<AsyncResult<Channel>> channelHandler = res -> {
if (res.succeeded()) {
@@ -204,7 +204,7 @@ protected void doConnect(SocketAddress remoteAddress, String serverName, Handler
}
};
- channelProvider.connect(remoteAddress, remoteAddress, serverName, channelHandler);
+ channelProvider.connect(remoteAddress, remoteAddress, serverName, sslHelper.isSSL(), channelHandler);
}
private void connected(ContextInternal context, Channel ch, Handler<AsyncResult<NetSocket>> connectHandler, SocketAddress remoteAddress) {
diff --git a/src/main/java/io/vertx/core/net/impl/transport/EpollTransport.java b/src/main/java/io/vertx/core/net/impl/transport/EpollTransport.java
--- a/src/main/java/io/vertx/core/net/impl/transport/EpollTransport.java
+++ b/src/main/java/io/vertx/core/net/impl/transport/EpollTransport.java
@@ -28,6 +28,7 @@
import io.vertx.core.datagram.DatagramSocketOptions;
import io.vertx.core.net.ClientOptionsBase;
import io.vertx.core.net.NetServerOptions;
+import io.vertx.core.net.impl.SocketAddressImpl;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
@@ -77,6 +78,14 @@ public SocketAddress convert(io.vertx.core.net.SocketAddress address, boolean re
}
}
+ @Override
+ public io.vertx.core.net.SocketAddress convert(SocketAddress address) {
+ if (address instanceof DomainSocketAddress) {
+ return new SocketAddressImpl(((DomainSocketAddress) address).path());
+ }
+ return super.convert(address);
+ }
+
@Override
public boolean isAvailable() {
return Epoll.isAvailable();
diff --git a/src/main/java/io/vertx/core/net/impl/transport/KQueueTransport.java b/src/main/java/io/vertx/core/net/impl/transport/KQueueTransport.java
--- a/src/main/java/io/vertx/core/net/impl/transport/KQueueTransport.java
+++ b/src/main/java/io/vertx/core/net/impl/transport/KQueueTransport.java
@@ -22,6 +22,7 @@
import io.netty.channel.unix.DomainSocketAddress;
import io.vertx.core.datagram.DatagramSocketOptions;
import io.vertx.core.net.NetServerOptions;
+import io.vertx.core.net.impl.SocketAddressImpl;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
@@ -48,6 +49,14 @@ public SocketAddress convert(io.vertx.core.net.SocketAddress address, boolean re
}
}
+ @Override
+ public io.vertx.core.net.SocketAddress convert(SocketAddress address) {
+ if (address instanceof DomainSocketAddress) {
+ return new SocketAddressImpl(((DomainSocketAddress) address).path());
+ }
+ return super.convert(address);
+ }
+
@Override
public boolean isAvailable() {
return KQueue.isAvailable();
diff --git a/src/main/java/io/vertx/core/net/impl/transport/Transport.java b/src/main/java/io/vertx/core/net/impl/transport/Transport.java
--- a/src/main/java/io/vertx/core/net/impl/transport/Transport.java
+++ b/src/main/java/io/vertx/core/net/impl/transport/Transport.java
@@ -24,6 +24,7 @@
import io.vertx.core.net.ClientOptionsBase;
import io.vertx.core.net.NetServerOptions;
import io.vertx.core.net.impl.PartialPooledByteBufAllocator;
+import io.vertx.core.net.impl.SocketAddressImpl;
import java.net.InetSocketAddress;
import java.net.NetworkInterface;
@@ -114,6 +115,14 @@ public SocketAddress convert(io.vertx.core.net.SocketAddress address, boolean re
}
}
+ public io.vertx.core.net.SocketAddress convert(SocketAddress address) {
+ if (address instanceof InetSocketAddress) {
+ return new SocketAddressImpl((InetSocketAddress) address);
+ } else {
+ return null;
+ }
+ }
+
/**
* Return a channel option for given {@code name} or null if that options does not exist
* for this transport.
| diff --git a/src/test/java/io/vertx/core/http/Http1xProxyTest.java b/src/test/java/io/vertx/core/http/Http1xProxyTest.java
new file mode 100644
--- /dev/null
+++ b/src/test/java/io/vertx/core/http/Http1xProxyTest.java
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2011-2017 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+package io.vertx.core.http;
+
+import io.vertx.core.net.ProxyOptions;
+import io.vertx.core.net.ProxyType;
+import org.junit.Test;
+
+public class Http1xProxyTest extends HttpTestBase {
+
+ @Test
+ public void testHttpProxyRequest() throws Exception {
+ startProxy(null, ProxyType.HTTP);
+ client = vertx.createHttpClient(new HttpClientOptions()
+ .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.getPort())));
+ testHttpProxyRequest2(client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/"));
+ }
+
+ @Test
+ public void testHttpProxyRequestOverrideClientSsl() throws Exception {
+ startProxy(null, ProxyType.HTTP);
+ client = vertx.createHttpClient(new HttpClientOptions()
+ .setSsl(true).setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.getPort())));
+ testHttpProxyRequest2(client.get(new RequestOptions().setSsl(false).setHost("localhost").setPort(8080)));
+ }
+
+ private void testHttpProxyRequest2(HttpClientRequest clientReq) throws Exception {
+ server.requestHandler(req -> {
+ req.response().end();
+ });
+
+ server.listen(onSuccess(s -> {
+ clientReq.handler(resp -> {
+ assertEquals(200, resp.statusCode());
+ assertNotNull("request did not go through proxy", proxy.getLastUri());
+ assertEquals("Host header doesn't contain target host", "localhost:8080", proxy.getLastRequestHeaders().get("Host"));
+ testComplete();
+ });
+ clientReq.exceptionHandler(this::fail);
+ clientReq.end();
+ }));
+ await();
+ }
+
+ @Test
+ public void testHttpProxyRequestAuth() throws Exception {
+ startProxy("user", ProxyType.HTTP);
+
+ client = vertx.createHttpClient(new HttpClientOptions()
+ .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.getPort())
+ .setUsername("user").setPassword("user")));
+
+ server.requestHandler(req -> {
+ req.response().end();
+ });
+
+ server.listen(onSuccess(s -> {
+ client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
+ assertEquals(200, resp.statusCode());
+ assertNotNull("request did not go through proxy", proxy.getLastUri());
+ assertEquals("Host header doesn't contain target host", "localhost:8080", proxy.getLastRequestHeaders().get("Host"));
+ testComplete();
+ }).exceptionHandler(th -> fail(th)).end();
+ }));
+ await();
+ }
+
+ @Test
+ public void testHttpProxyFtpRequest() throws Exception {
+ startProxy(null, ProxyType.HTTP);
+ client = vertx.createHttpClient(new HttpClientOptions()
+ .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.getPort())));
+ final String url = "ftp://ftp.gnu.org/gnu/";
+ proxy.setForceUri("http://localhost:8080/");
+ HttpClientRequest clientReq = client.getAbs(url);
+ server.requestHandler(req -> {
+ req.response().end();
+ });
+
+ server.listen(onSuccess(s -> {
+ clientReq.handler(resp -> {
+ assertEquals(200, resp.statusCode());
+ assertEquals("request did sent the expected url", url, proxy.getLastUri());
+ testComplete();
+ });
+ clientReq.exceptionHandler(this::fail);
+ clientReq.end();
+ }));
+ await();
+ }
+
+ @Test
+ public void testHttpSocksProxyRequest() throws Exception {
+ startProxy(null, ProxyType.SOCKS5);
+
+ client = vertx.createHttpClient(new HttpClientOptions()
+ .setProxyOptions(new ProxyOptions().setType(ProxyType.SOCKS5).setHost("localhost").setPort(proxy.getPort())));
+
+ server.requestHandler(req -> {
+ req.response().end();
+ });
+
+ server.listen(onSuccess(s -> {
+ client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
+ assertEquals(200, resp.statusCode());
+ assertNotNull("request did not go through proxy", proxy.getLastUri());
+ testComplete();
+ }).exceptionHandler(th -> fail(th)).end();
+ }));
+ await();
+ }
+
+ @Test
+ public void testHttpSocksProxyRequestAuth() throws Exception {
+ startProxy("user", ProxyType.SOCKS5);
+
+ client = vertx.createHttpClient(new HttpClientOptions()
+ .setProxyOptions(new ProxyOptions().setType(ProxyType.SOCKS5).setHost("localhost").setPort(proxy.getPort())
+ .setUsername("user").setPassword("user")));
+
+ server.requestHandler(req -> {
+ req.response().end();
+ });
+
+ server.listen(onSuccess(s -> {
+ client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
+ assertEquals(200, resp.statusCode());
+ assertNotNull("request did not go through proxy", proxy.getLastUri());
+ testComplete();
+ }).exceptionHandler(th -> fail(th)).end();
+ }));
+ await();
+ }
+}
diff --git a/src/test/java/io/vertx/core/http/Http1xTLSTest.java b/src/test/java/io/vertx/core/http/Http1xTLSTest.java
--- a/src/test/java/io/vertx/core/http/Http1xTLSTest.java
+++ b/src/test/java/io/vertx/core/http/Http1xTLSTest.java
@@ -130,7 +130,7 @@ public void testRedirectFromSSL() throws Exception {
req.response().setStatusCode(303).putHeader("location", "http://" + DEFAULT_HTTP_HOST + ":4043/" + DEFAULT_TEST_URI).end();
});
startServer(redirectServer);
- RequestOptions options = new RequestOptions().setHost(DEFAULT_HTTP_HOST).setURI(DEFAULT_TEST_URI).setPort(4043);
+ RequestOptions options = new RequestOptions().setHost(DEFAULT_HTTP_HOST).setURI(DEFAULT_TEST_URI).setPort(4043).setSsl(false);
testTLS(Cert.NONE, Trust.SERVER_JKS, Cert.NONE, Trust.NONE)
.clientSSL(true)
.serverSSL(false)
diff --git a/src/test/java/io/vertx/core/http/Http1xTest.java b/src/test/java/io/vertx/core/http/Http1xTest.java
--- a/src/test/java/io/vertx/core/http/Http1xTest.java
+++ b/src/test/java/io/vertx/core/http/Http1xTest.java
@@ -28,6 +28,7 @@
import io.vertx.test.core.CheckingSender;
import io.vertx.test.verticles.SimpleServer;
import io.vertx.test.core.TestUtils;
+import org.junit.Assume;
import org.junit.Test;
import java.io.File;
@@ -46,7 +47,6 @@
import static io.vertx.test.core.TestUtils.*;
/**
- *
* @author <a href="http://tfox.org">Tim Fox</a>
* @author <a href="mailto:nscavell@redhat.com">Nick Scavelli</a>
*/
@@ -1105,6 +1105,7 @@ public void testCloseHandlerNotCalledWhenConnectionClosedAfterEnd() throws Excep
@Test
public void testTimedOutWaiterDoesntConnect() throws Exception {
+ Assume.assumeTrue("Domain socket don't pass this test", testAddress.path() == null);
long responseDelay = 300;
int requests = 6;
client.close();
@@ -1113,10 +1114,10 @@ public void testTimedOutWaiterDoesntConnect() throws Exception {
// Make sure server is closed before continuing
awaitLatch(firstCloseLatch);
- client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(false).setMaxPoolSize(1));
+ client = vertx.createHttpClient(createBaseClientOptions().setKeepAlive(false).setMaxPoolSize(1));
AtomicInteger requestCount = new AtomicInteger(0);
// We need a net server because we need to intercept the socket connection, not just full http requests
- NetServer server = vertx.createNetServer(new NetServerOptions().setHost(DEFAULT_HTTP_HOST).setPort(DEFAULT_HTTP_PORT));
+ NetServer server = vertx.createNetServer();
server.connectHandler(socket -> {
Buffer content = Buffer.buffer();
AtomicBoolean closed = new AtomicBoolean();
@@ -1137,14 +1138,20 @@ public void testTimedOutWaiterDoesntConnect() throws Exception {
CountDownLatch latch = new CountDownLatch(requests);
- server.listen(onSuccess(s -> {
+ server.listen(testAddress, onSuccess(s -> {
for(int count = 0; count < requests; count++) {
+ RequestOptions reqOptions = new RequestOptions()
+ .setPort(DEFAULT_HTTP_PORT)
+ .setHost(DEFAULT_HTTP_HOST)
+ .setURI(DEFAULT_TEST_URI);
+
HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI);
if (count % 2 == 0) {
req.handler(resp -> {
resp.bodyHandler(buff -> {
assertEquals("OK", buff.toString());
+ System.out.println("got resp");
latch.countDown();
});
});
@@ -1174,7 +1181,7 @@ public void testTimedOutWaiterDoesntConnect() throws Exception {
@Test
public void testPipeliningOrder() throws Exception {
client.close();
- client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(true).setPipelining(true).setMaxPoolSize(1));
+ client = vertx.createHttpClient(createBaseClientOptions().setKeepAlive(true).setPipelining(true).setMaxPoolSize(1));
int requests = 100;
AtomicInteger reqCount = new AtomicInteger(0);
@@ -1198,11 +1205,11 @@ public void testPipeliningOrder() throws Exception {
CountDownLatch latch = new CountDownLatch(requests);
- server.listen(onSuccess(s -> {
+ server.listen(testAddress, onSuccess(s -> {
vertx.setTimer(500, id -> {
for (int count = 0; count < requests; count++) {
int theCount = count;
- HttpClientRequest req = client.request(HttpMethod.POST, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ HttpClientRequest req = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(theCount, Integer.parseInt(resp.headers().get("count")));
resp.bodyHandler(buff -> {
assertEquals("This is content " + theCount, buff.toString());
@@ -1256,19 +1263,19 @@ public void testPipeliningLimit() throws Exception {
});
});
CountDownLatch listenLatch = new CountDownLatch(1);
- server.listen(onSuccess(v -> {
+ server.listen(testAddress, onSuccess(v -> {
listenLatch.countDown();
}));
awaitLatch(listenLatch);
AtomicInteger responses = new AtomicInteger();
for (int i = 0;i < requests;i++) {
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
assertEquals(200, resp.statusCode());
if (responses.incrementAndGet() == requests) {
testComplete();
}
- });
+ }).end();
}
await();
}
@@ -1282,12 +1289,12 @@ public void testCloseServerConnectionWithPendingMessages() throws Exception {
req.response().close();
});
});
- startServer();
+ startServer(testAddress);
client.close();
- client = vertx.createHttpClient(new HttpClientOptions().setMaxPoolSize(n).setPipelining(true));
+ client = vertx.createHttpClient(createBaseClientOptions().setMaxPoolSize(n).setPipelining(true));
AtomicBoolean completed = new AtomicBoolean();
for (int i = 0;i < n * 2;i++) {
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
fail();
}).connectionHandler(conn -> {
conn.closeHandler(v -> {
@@ -1316,7 +1323,7 @@ public void testPipeliningFailure() throws Exception {
req.response().end();
}
});
- startServer();
+ startServer(testAddress);
AtomicInteger succeeded = new AtomicInteger();
List<HttpClientRequest> requests = new CopyOnWriteArrayList<>();
Consumer<HttpClientRequest> checkEnd = req -> {
@@ -1327,14 +1334,15 @@ public void testPipeliningFailure() throws Exception {
}
};
for (int i = 0;i < n * 2;i++) {
- HttpClientRequest req = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/" + i);
- req.handler(resp -> {
+ AtomicReference<HttpClientRequest> ref = new AtomicReference<>();
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/" + i, resp -> {
succeeded.incrementAndGet();
- checkEnd.accept(req);
+ checkEnd.accept(ref.get());
});
req.exceptionHandler(err -> {
checkEnd.accept(req);
}).end();
+ ref.set(req);
requests.add(req);
}
closeFuture.complete(null);
@@ -1359,10 +1367,10 @@ public void testPipeliningPauseRequest() throws Exception {
req.resume();
});
});
- startServer();
+ startServer(testAddress);
AtomicInteger remaining = new AtomicInteger(n);
for (int i = 0;i < n;i++) {
- HttpClientRequest req = client.put(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ HttpClientRequest req = client.request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.endHandler(v -> {
if (remaining.decrementAndGet() == 0) {
testComplete();
@@ -1392,13 +1400,13 @@ public void testServerPipeliningConnectionConcurrency() throws Exception {
}
});
});
- startServer();
+ startServer(testAddress);
Buffer requests = Buffer.buffer();
for (int i = 0;i < n;i++) {
requests.appendString("GET " + DEFAULT_TEST_URI + " HTTP/1.1\r\n\r\n");
}
NetClient client = vertx.createNetClient();
- client.connect(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(so -> {
+ client.connect(testAddress, onSuccess(so -> {
so.closeHandler(v -> testComplete());
so.write(requests);
}));
@@ -1422,7 +1430,7 @@ private void testKeepAlive(boolean keepAlive, int poolSize, int numServers, int
// Make sure server is closed before continuing
awaitLatch(firstCloseLatch);
- client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(keepAlive).setPipelining(false).setMaxPoolSize(poolSize));
+ client = vertx.createHttpClient(createBaseClientOptions().setKeepAlive(keepAlive).setPipelining(false).setMaxPoolSize(poolSize));
int requests = 100;
// Start the servers
@@ -1435,10 +1443,9 @@ private void testKeepAlive(boolean keepAlive, int poolSize, int numServers, int
connectedServers.add(server);
req.response().end();
});
- server.listen(ar -> {
- assertTrue(ar.succeeded());
+ server.listen(testAddress, onSuccess(s -> {
startServerLatch.countDown();
- });
+ }));
servers[i] = server;
}
@@ -1451,7 +1458,7 @@ private void testKeepAlive(boolean keepAlive, int poolSize, int numServers, int
// number of total connections being > pool size (which is correct)
vertx.runOnContext(v -> {
for (int count = 0; count < requests; count++) {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(200, resp.statusCode());
reqLatch.countDown();
}).end();
@@ -1465,10 +1472,9 @@ private void testKeepAlive(boolean keepAlive, int poolSize, int numServers, int
CountDownLatch serverCloseLatch = new CountDownLatch(numServers);
for (HttpServer server: servers) {
- server.close(ar -> {
- assertTrue(ar.succeeded());
+ server.close(onSuccess(s -> {
serverCloseLatch.countDown();
- });
+ }));
}
awaitLatch(serverCloseLatch);
@@ -1494,7 +1500,11 @@ private void testPooling(boolean keepAlive, boolean pipelining) {
int numGets = 100;
int maxPoolSize = 10;
client.close();
- client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(keepAlive).setPipelining(pipelining).setMaxPoolSize(maxPoolSize));
+ client = vertx.createHttpClient(createBaseClientOptions()
+ .setKeepAlive(keepAlive)
+ .setPipelining(pipelining)
+ .setMaxPoolSize(maxPoolSize)
+ );
server.requestHandler(req -> {
String cnt = req.headers().get("count");
@@ -1502,14 +1512,13 @@ private void testPooling(boolean keepAlive, boolean pipelining) {
req.response().end();
});
- AtomicBoolean completeAlready = new AtomicBoolean();
-
- server.listen(onSuccess(s -> {
+ server.listen(testAddress, onSuccess(s -> {
AtomicInteger cnt = new AtomicInteger(0);
for (int i = 0; i < numGets; i++) {
int theCount = i;
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, path, resp -> {
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, path, resp -> {
+ resp.exceptionHandler(this::fail);
assertEquals(200, resp.statusCode());
assertEquals(theCount, Integer.parseInt(resp.headers().get("count")));
if (cnt.incrementAndGet() == numGets) {
@@ -1540,7 +1549,7 @@ public void testPoolingNoKeepAliveAndPipelining() {
public void testMaxWaitQueueSizeIsRespected() throws Exception {
client.close();
- client = vertx.createHttpClient(new HttpClientOptions().setDefaultHost(DEFAULT_HTTP_HOST).setDefaultPort(DEFAULT_HTTP_PORT)
+ client = vertx.createHttpClient(createBaseClientOptions().setDefaultHost(DEFAULT_HTTP_HOST).setDefaultPort(DEFAULT_HTTP_PORT)
.setPipelining(false).setMaxWaitQueueSize(0).setMaxPoolSize(2));
waitFor(3);
@@ -1551,17 +1560,17 @@ public void testMaxWaitQueueSizeIsRespected() throws Exception {
complete();
});
- startServer();
+ startServer(testAddress);
- HttpClientRequest req1 = client.get("/1", resp -> {
+ HttpClientRequest req1 = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/1", resp -> {
fail("Should not be called.");
});
- HttpClientRequest req2 = client.get("/2", resp -> {
+ HttpClientRequest req2 = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/2", resp -> {
fail("Should not be called.");
});
- HttpClientRequest req3 = client.get("/3", resp -> {
+ HttpClientRequest req3 = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/3", resp -> {
fail("Should not be called.");
});
req3.exceptionHandler(t -> {
@@ -1595,8 +1604,8 @@ public void testRequestTimeoutExtendedWhenResponseChunksReceived() {
});
});
- server.listen(onSuccess(s -> {
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(200, resp.statusCode());
resp.endHandler(v -> testComplete());
});
@@ -1611,7 +1620,7 @@ public void testRequestTimeoutExtendedWhenResponseChunksReceived() {
@Test
public void testServerWebsocketIdleTimeout() {
server.close();
- server = vertx.createHttpServer(new HttpServerOptions().setIdleTimeout(1).setPort(DEFAULT_HTTP_PORT).setHost(DEFAULT_HTTP_HOST));
+ server = vertx.createHttpServer(createBaseServerOptions().setIdleTimeout(1).setPort(DEFAULT_HTTP_PORT).setHost(DEFAULT_HTTP_HOST));
server.websocketHandler(ws -> {}).listen(ar -> {
assertTrue(ar.succeeded());
client.websocket(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", ws -> {
@@ -1640,7 +1649,7 @@ public void testClientWebsocketIdleTimeout() {
public void testSharedServersRoundRobin() throws Exception {
client.close();
server.close();
- client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(false));
+ client = vertx.createHttpClient(createBaseClientOptions().setKeepAlive(false));
int numServers = VertxOptions.DEFAULT_EVENT_LOOP_POOL_SIZE / 2- 1;
int numRequests = numServers * 100;
@@ -1670,9 +1679,11 @@ public void testSharedServersRoundRobin() throws Exception {
requestCount.put(theServer, icnt);
latchConns.countDown();
req.response().end();
- }).listen(onSuccess(s -> {
+ }).listen(testAddress, onSuccess(s -> {
+ if (s.actualPort() > 0) {
assertEquals(DEFAULT_HTTP_PORT, s.actualPort());
- latchListen.countDown();
+ }
+ latchListen.countDown();
}));
}
awaitLatch(latchListen);
@@ -1681,7 +1692,7 @@ public void testSharedServersRoundRobin() throws Exception {
// Create a bunch of connections
CountDownLatch latchClient = new CountDownLatch(numRequests);
for (int i = 0; i < numRequests; i++) {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, res -> latchClient.countDown()).end();
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, res -> latchClient.countDown()).end();
}
assertTrue(latchClient.await(10, TimeUnit.SECONDS));
@@ -1766,8 +1777,8 @@ public void testIncorrectHttpVersion() throws Exception {
so.write(Buffer.buffer("HTTP/1.2 200 OK\r\nContent-Length:5\r\n\r\nHELLO"));
so.close();
});
- startServer();
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> fail("Should not be called"));
+ startServer(testAddress);
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> fail("Should not be called"));
AtomicBoolean a = new AtomicBoolean();
req.exceptionHandler(err -> {
if (a.compareAndSet(false, true)) {
@@ -1817,9 +1828,9 @@ public void testHttp11NonPersistentConnectionClosed() throws Exception {
assertTrue(req.response().closed());
});
- server.listen(onSuccess(s -> {
- client = vertx.createHttpClient(new HttpClientOptions().setProtocolVersion(HttpVersion.HTTP_1_1).setKeepAlive(false));
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client = vertx.createHttpClient(createBaseClientOptions().setProtocolVersion(HttpVersion.HTTP_1_1).setKeepAlive(false));
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.endHandler(v -> {
assertEquals(resp.getHeader("Connection"), "close");
testComplete();
@@ -1842,9 +1853,9 @@ public void testHttp10KeepAliveConnectionNotClosed() throws Exception {
assertFalse(req.response().closed());
});
- server.listen(onSuccess(s -> {
- client = vertx.createHttpClient(new HttpClientOptions().setProtocolVersion(HttpVersion.HTTP_1_0).setKeepAlive(true));
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client = vertx.createHttpClient(createBaseClientOptions().setProtocolVersion(HttpVersion.HTTP_1_0).setKeepAlive(true));
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.endHandler(v -> {
assertEquals(resp.getHeader("Connection"), "keep-alive");
assertEquals(resp.getHeader("Content-Length"), "0");
@@ -1868,9 +1879,9 @@ public void testHttp10RequestNonKeepAliveConnectionClosed() throws Exception {
assertTrue(req.response().closed());
});
- server.listen(onSuccess(s -> {
- client = vertx.createHttpClient(new HttpClientOptions().setProtocolVersion(HttpVersion.HTTP_1_0).setKeepAlive(false));
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client = vertx.createHttpClient(createBaseClientOptions().setProtocolVersion(HttpVersion.HTTP_1_0).setKeepAlive(false));
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.endHandler(v -> {
assertNull(resp.getHeader("Connection"));
testComplete();
@@ -1900,11 +1911,11 @@ public void testHttp10ResponseNonKeepAliveConnectionClosed() throws Exception {
}
});
});
- server.listen(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(v1 -> {
+ server.listen(testAddress, onSuccess(v1 -> {
client.close();
- client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(true).setMaxPoolSize(1));
+ client = vertx.createHttpClient(createBaseClientOptions().setKeepAlive(true).setMaxPoolSize(1));
for (int i = 0;i < 3;i++) {
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
resp.endHandler(v2 -> {
complete();
});
@@ -1934,8 +1945,8 @@ public void testAccessNetSocket() throws Exception {
});
});
- server.listen(onSuccess(s -> {
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.endHandler(v -> {
assertNotNull(resp.netSocket());
testComplete();
@@ -1963,10 +1974,10 @@ public void testRequestsTimeoutInQueue() {
client.close();
client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(false).setMaxPoolSize(1));
- server.listen(onSuccess(s -> {
+ server.listen(testAddress, onSuccess(s -> {
// Add a few requests that should all timeout
for (int i = 0; i < 5; i++) {
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
fail("Should not be called");
});
AtomicBoolean failed = new AtomicBoolean();
@@ -1979,7 +1990,7 @@ public void testRequestsTimeoutInQueue() {
req.end();
}
// Now another request that should not timeout
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(200, resp.statusCode());
testComplete();
});
@@ -2034,14 +2045,14 @@ public void testClientOptionsCopiedBeforeUse() {
@Test
public void testClientContextWithKeepAlive() throws Exception {
client.close();
- client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(true).setPipelining(false).setMaxPoolSize(1));
+ client = vertx.createHttpClient(createBaseClientOptions().setKeepAlive(true).setPipelining(false).setMaxPoolSize(1));
testClientContext();
}
@Test
public void testClientContextWithPipelining() throws Exception {
client.close();
- client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(true).setPipelining(true).setMaxPoolSize(1));
+ client = vertx.createHttpClient(createBaseClientOptions().setKeepAlive(true).setPipelining(true).setMaxPoolSize(1));
testClientContext();
}
@@ -2049,21 +2060,21 @@ private void testClientContext() throws Exception {
server.requestHandler(req -> {
req.response().end();
});
- startServer();
+ startServer(testAddress);
Set<Context> contexts = Collections.synchronizedSet(new HashSet<>());
Set<HttpConnection> connections = Collections.synchronizedSet(new HashSet<>());
Handler<HttpClientResponse> checker = response -> {
contexts.add(Vertx.currentContext());
connections.add(response.request().connection());
};
- HttpClientRequest req1 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/2");
+ HttpClientRequest req1 = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/2");
req1.handler(checker).exceptionHandler(this::fail);
- HttpClientRequest req2 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/3");
+ HttpClientRequest req2 = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/3");
req2.handler(checker).exceptionHandler(this::fail);
CompletableFuture<HttpClientRequest> fut = new CompletableFuture<>();
Context ctx = vertx.getOrCreateContext();
ctx.runOnContext(v -> {
- HttpClientRequest req3 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/4");
+ HttpClientRequest req3 = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/4");
req3.handler(resp -> {
// This should warn in the log (console) as we are called back on the connection context
// and not on the context doing the request
@@ -2100,11 +2111,10 @@ public void testContexts() throws Exception {
});
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<ContextInternal> listenContext = new AtomicReference<>();
- server.listen(ar -> {
- assertTrue(ar.succeeded());
+ server.listen(testAddress, onSuccess(s -> {
listenContext.set(((VertxInternal) vertx).getContext());
latch.countDown();
- });
+ }));
awaitLatch(latch);
CountDownLatch latch2 = new CountDownLatch(1);
int numReqs = 16;
@@ -2113,7 +2123,7 @@ public void testContexts() throws Exception {
client.close();
client = vertx.createHttpClient(new HttpClientOptions().setMaxPoolSize(numConns));
for (int i = 0; i < numReqs; i++) {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
assertEquals(200, resp.statusCode());
contexts.add(((VertxInternal) vertx).getContext());
if (cnt.incrementAndGet() == numReqs) {
@@ -2126,15 +2136,14 @@ public void testContexts() throws Exception {
}
awaitLatch(latch2);
// Close should be in own context
- server.close(ar -> {
- assertTrue(ar.succeeded());
+ server.close(onSuccess(v -> {
ContextInternal closeContext = ((VertxInternal) vertx).getContext();
assertFalse(contexts.contains(closeContext));
assertNotSame(serverRequestContext.get(), closeContext);
assertFalse(contexts.contains(listenContext.get()));
assertSame(serverRequestContext.get(), listenContext.get());
testComplete();
- });
+ }));
server = null;
await();
@@ -2145,8 +2154,8 @@ public void testRequestHandlerNotCalledInvalidRequest() {
server.requestHandler(req -> {
fail();
});
- server.listen(onSuccess(s -> {
- vertx.createNetClient(new NetClientOptions()).connect(8080, "127.0.0.1", onSuccess(socket -> {
+ server.listen(testAddress, onSuccess(s -> {
+ vertx.createNetClient(new NetClientOptions()).connect(testAddress, onSuccess(socket -> {
socket.closeHandler(r -> {
testComplete();
});
@@ -2174,9 +2183,8 @@ public void testInWorker() throws Exception {
public void start() throws Exception {
assertTrue(Vertx.currentContext().isWorkerContext());
assertTrue(Context.isOnWorkerThread());
- HttpServer server1 = vertx.createHttpServer(new HttpServerOptions()
- .setHost(HttpTestBase.DEFAULT_HTTP_HOST).setPort(HttpTestBase.DEFAULT_HTTP_PORT));
- server1.requestHandler(req -> {
+ HttpServer server = vertx.createHttpServer();
+ server.requestHandler(req -> {
assertTrue(Vertx.currentContext().isWorkerContext());
assertTrue(Context.isOnWorkerThread());
Buffer buf = Buffer.buffer();
@@ -2185,11 +2193,11 @@ public void start() throws Exception {
assertEquals("hello", buf.toString());
req.response().end("bye");
});
- }).listen(onSuccess(s -> {
+ }).listen(testAddress, onSuccess(s -> {
assertTrue(Vertx.currentContext().isWorkerContext());
assertTrue(Context.isOnWorkerThread());
HttpClient client = vertx.createHttpClient();
- client.put(HttpTestBase.DEFAULT_HTTP_PORT, HttpTestBase.DEFAULT_HTTP_HOST, "/blah", resp -> {
+ client.request(HttpMethod.PUT, testAddress, HttpTestBase.DEFAULT_HTTP_PORT, HttpTestBase.DEFAULT_HTTP_HOST, "/blah", resp -> {
assertEquals(200, resp.statusCode());
assertTrue(Vertx.currentContext().isWorkerContext());
assertTrue(Context.isOnWorkerThread());
@@ -2223,30 +2231,29 @@ public void testPauseResumeClientResponse() {
}
request.response().end();
});
- server.listen(10000, onSuccess(hs -> {
+ server.listen(testAddress, onSuccess(hs -> {
HttpClient httpClient = vertx.createHttpClient();
- HttpClientRequest clientRequest = httpClient.get(10000, "localhost", "/");
- clientRequest.handler(resp -> {
- resp.handler(b -> {
- readBuffer.appendBuffer(b);
- for (int i = 0; i < 64; i++) {
- vertx.setTimer(1, n -> {
- try {
- Thread.sleep(0);
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
+ HttpClientRequest clientRequest = httpClient.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI,
+ resp -> {
+ resp.handler(b -> {
+ readBuffer.appendBuffer(b);
+ for (int i = 0; i < 64; i++) {
+ vertx.setTimer(1, n -> {
+ try {
+ Thread.sleep(0);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ });
+ }
+ resp.endHandler(v -> {
+ byte[] expectedData = buffer.getBytes();
+ byte[] actualData = readBuffer.getBytes();
+ assertTrue(Arrays.equals(expectedData, actualData));
+ testComplete();
});
- }
- ;
- resp.endHandler(v -> {
- byte[] expectedData = buffer.getBytes();
- byte[] actualData = readBuffer.getBytes();
- assertTrue(Arrays.equals(expectedData, actualData));
- testComplete();
});
});
- });
clientRequest.end();
}));
await();
@@ -2280,8 +2287,8 @@ public void testPauseResumeServerRequestFromAnotherThread() throws Exception {
testComplete();
});
});
- startServer();
- HttpClientRequest req = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTPS_HOST, "/", resp -> fail());
+ startServer(testAddress);
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTPS_HOST, "/", resp -> fail());
req.setChunked(true);
for (int i = 0; i < buffer.length() / 8192; i++) {
req.write(buffer.slice(i * 8192, (i + 1) * 8192));
@@ -2298,18 +2305,18 @@ public void testEndServerResponseResumeTheConnection() throws Exception {
req.response().end();
});
});
- startServer();
+ startServer(testAddress);
client.close();
waitFor(2);
client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(true).setMaxPoolSize(1));
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(200, resp.statusCode());
complete();
- });
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ }).end();
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(200, resp.statusCode());
complete();
- });
+ }).end();
await();
}
@@ -2321,15 +2328,15 @@ public void testEndServerRequestResumeTheConnection() throws Exception {
req.pause();
});
});
- startServer();
+ startServer(testAddress);
client.close();
waitFor(2);
- client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(true).setMaxPoolSize(1));
- client.put(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client = vertx.createHttpClient(createBaseClientOptions().setKeepAlive(true).setMaxPoolSize(1));
+ client.request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(200, resp.statusCode());
complete();
}).end("1");
- client.put(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(200, resp.statusCode());
complete();
}).end("2");
@@ -2343,12 +2350,11 @@ public void testMultipleRecursiveCallsAndPipelining() throws Exception {
server.requestHandler(x -> {
x.response().end("hello");
})
- .listen(8080, r -> {
+ .listen(testAddress, r -> {
if (r.succeeded()) {
- HttpClient client = vertx.createHttpClient(new HttpClientOptions()
+ HttpClient client = vertx.createHttpClient(createBaseClientOptions()
.setKeepAlive(true)
.setPipelining(true)
- .setDefaultPort(8080)
);
IntStream.range(0, 5).forEach(i -> recursiveCall(client, receivedRequests, sendRequests));
}
@@ -2357,14 +2363,14 @@ public void testMultipleRecursiveCallsAndPipelining() throws Exception {
}
private void recursiveCall(HttpClient client, AtomicInteger receivedRequests, int sendRequests){
- client.getNow("/", r -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTPS_HOST, DEFAULT_TEST_URI, r -> {
int numRequests = receivedRequests.incrementAndGet();
if (numRequests == sendRequests) {
testComplete();
} else if (numRequests < sendRequests) {
recursiveCall(client, receivedRequests, sendRequests);
}
- });
+ }).end();
}
@Test
@@ -2386,23 +2392,21 @@ private void testUnsupported(String rawReq, boolean method) throws Exception {
// OK
}
})
- .listen(8080, r -> {
- if (r.succeeded()) {
- NetClient client = vertx.createNetClient();
- // Send a raw request
- client.connect(8080, "localhost", onSuccess(conn -> {
- conn.write(rawReq);
- Buffer respBuff = Buffer.buffer();
- conn.handler(respBuff::appendBuffer);
- conn.closeHandler(v -> {
- // Server should automatically close it after sending back 501
- assertTrue(respBuff.toString().contains("501 Not Implemented"));
- client.close();
- testComplete();
- });
- }));
- }
- });
+ .listen(testAddress, onSuccess(s -> {
+ NetClient client = vertx.createNetClient();
+ // Send a raw request
+ client.connect(testAddress, onSuccess(conn -> {
+ conn.write(rawReq);
+ Buffer respBuff = Buffer.buffer();
+ conn.handler(respBuff::appendBuffer);
+ conn.closeHandler(v -> {
+ // Server should automatically close it after sending back 501
+ assertTrue(respBuff.toString().contains("501 Not Implemented"));
+ client.close();
+ testComplete();
+ });
+ }));
+ }));
await();
}
@@ -2414,24 +2418,27 @@ public void testTwoServersDifferentEventLoopsCloseOne() throws Exception {
server.requestHandler(req -> {
server1Count.incrementAndGet();
req.response().end();
- }).listen(8080, onSuccess(s -> {
+ }).listen(testAddress, onSuccess(s -> {
latch1.countDown();
}));
HttpServer server2 = vertx.createHttpServer().requestHandler(req -> {
server2Count.incrementAndGet();
req.response().end();
- }).listen(8080, onSuccess(s -> {
+ }).listen(testAddress, onSuccess(s -> {
latch1.countDown();
}));
awaitLatch(latch1);
- HttpClient client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(false).setDefaultPort(8080));
+ HttpClient[] clients = new HttpClient[5];
+ for (int i = 0;i < 5;i++) {
+ clients[i] = vertx.createHttpClient(createBaseClientOptions());
+ }
for (int i = 0; i < 2; i++) {
CountDownLatch latch2 = new CountDownLatch(1);
- client.getNow("/", resp -> {
+ clients[i].request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(200, resp.statusCode());
latch2.countDown();
- });
+ }).end();
awaitLatch(latch2);
}
@@ -2444,10 +2451,10 @@ public void testTwoServersDifferentEventLoopsCloseOne() throws Exception {
// Send some more requests
for (int i = 0; i < 2; i++) {
CountDownLatch latch2 = new CountDownLatch(1);
- client.getNow("/", resp -> {
+ clients[2 + i].request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(200, resp.statusCode());
latch2.countDown();
- });
+ }).end();
awaitLatch(latch2);
}
@@ -2468,11 +2475,11 @@ public void testSetWriteQueueMaxSize() throws Exception {
resp.setWriteQueueMaxSize(128 * 1024);
resp.setWriteQueueMaxSize(129 * 1024);
resp.end();
- }).listen(8080, onSuccess(s -> {
- client.getNow(8080, "localhost", "/", resp -> {
+ }).listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(200, resp.statusCode());
testComplete();
- });
+ }).end();
}));
await();
}
@@ -2491,24 +2498,24 @@ public void testServerMaxInitialLineLengthOption() {
private void testServerMaxInitialLineLength(int maxInitialLength) {
String longParam = TestUtils.randomAlphaString(5000);
server.close();
- server = vertx.createHttpServer(new HttpServerOptions().setMaxInitialLineLength(maxInitialLength)
- .setHost("localhost").setPort(8080)).requestHandler(req -> {
+ server = vertx.createHttpServer(createBaseServerOptions().setMaxInitialLineLength(maxInitialLength))
+ .requestHandler(req -> {
assertEquals(req.getParam("t"), longParam);
req.response().end();
- }).listen(onSuccess(res -> {
- HttpClientRequest req = vertx.createHttpClient(new HttpClientOptions())
- .request(HttpMethod.GET, 8080, "localhost", "/?t=" + longParam);
- req.handler(resp -> {
- if (maxInitialLength > HttpServerOptions.DEFAULT_MAX_INITIAL_LINE_LENGTH) {
- assertEquals(200, resp.statusCode());
- testComplete();
- } else {
- assertEquals(414, resp.statusCode());
- req.connection().closeHandler(v -> {
- testComplete();
- });
- }
- });
+ }).listen(testAddress, onSuccess(res -> {
+ HttpClientRequest req = vertx.createHttpClient()
+ .request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/?t=" + longParam,
+ resp -> {
+ if (maxInitialLength > HttpServerOptions.DEFAULT_MAX_INITIAL_LINE_LENGTH) {
+ assertEquals(200, resp.statusCode());
+ testComplete();
+ } else {
+ assertEquals(414, resp.statusCode());
+ resp.request().connection().closeHandler(v -> {
+ testComplete();
+ });
+ }
+ });
req.end();
}));
await();
@@ -2533,9 +2540,9 @@ public void testClientMaxInitialLineLengthOption() {
// 5017 = 5000 for longParam and 17 for the rest in the following line - "GET /?t=longParam HTTP/1.1"
try {
- server.listen(DEFAULT_HTTP_PORT, DEFAULT_HTTPS_HOST, onSuccess(v -> {
+ server.listen(testAddress, onSuccess(v -> {
vertx.createHttpClient(new HttpClientOptions().setMaxInitialLineLength(6000))
- .request(HttpMethod.GET, 8080, "localhost", "/?t=" + longParam, resp -> {
+ .request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/?t=" + longParam, resp -> {
resp.bodyHandler(body -> {
assertEquals("0123456789", body.toString());
testComplete();
@@ -2554,12 +2561,12 @@ public void testClientMaxHeaderSizeOption() {
String longHeader = TestUtils.randomAlphaString(9000);
// min 9023 = 9000 for longHeader and 23 for "Content-Length: 0 t: "
- vertx.createHttpServer(new HttpServerOptions().setHost(DEFAULT_HTTP_HOST).setPort(DEFAULT_HTTP_PORT)).requestHandler(req -> {
+ vertx.createHttpServer(createBaseServerOptions()).requestHandler(req -> {
// Add longHeader
req.response().putHeader("t", longHeader).end();
- }).listen(onSuccess(res -> {
+ }).listen(testAddress, onSuccess(res -> {
HttpClientRequest req = vertx.createHttpClient(new HttpClientOptions().setMaxHeaderSize(10000))
- .request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
+ .request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(200, resp.statusCode());
assertEquals(resp.getHeader("t"), longHeader);
testComplete();
@@ -2585,24 +2592,24 @@ private void testServerMaxHeaderSize(int maxHeaderSize) {
String longHeader = TestUtils.randomAlphaString(9000);
- vertx.createHttpServer(new HttpServerOptions().setMaxHeaderSize(maxHeaderSize)
- .setHost("localhost").setPort(8080)).requestHandler(req -> {
+ vertx.createHttpServer(createBaseServerOptions().setMaxHeaderSize(maxHeaderSize))
+ .requestHandler(req -> {
assertEquals(req.getHeader("t"), longHeader);
req.response().end();
- }).listen(onSuccess(res -> {
+ }).listen(testAddress, onSuccess(res -> {
HttpClientRequest req = vertx.createHttpClient(new HttpClientOptions())
- .request(HttpMethod.GET, 8080, "localhost", "/");
- req.handler(resp -> {
- if (maxHeaderSize > HttpServerOptions.DEFAULT_MAX_HEADER_SIZE) {
- assertEquals(200, resp.statusCode());
- testComplete();
- } else {
- assertEquals(400, resp.statusCode());
- req.connection().closeHandler(v -> {
- testComplete();
- });
- }
- });
+ .request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI,
+ resp -> {
+ if (maxHeaderSize > HttpServerOptions.DEFAULT_MAX_HEADER_SIZE) {
+ assertEquals(200, resp.statusCode());
+ testComplete();
+ } else {
+ assertEquals(400, resp.statusCode());
+ resp.request().connection().closeHandler(v -> {
+ testComplete();
+ });
+ }
+ });
// Add longHeader
req.putHeader("t", longHeader);
req.end();
@@ -2645,13 +2652,13 @@ public void testInvalidHttpResponse() {
}
}
});
- }).listen(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(s -> {
+ }).listen(testAddress, onSuccess(s -> {
// We force two pipelined requests to check that the second request does not get stuck after the first failing
- client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(true).setPipelining(true).setMaxPoolSize(1));
+ client = vertx.createHttpClient(createBaseClientOptions().setKeepAlive(true).setPipelining(true).setMaxPoolSize(1));
AtomicBoolean fail1 = new AtomicBoolean();
- HttpClientRequest req1 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ HttpClientRequest req1 = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
fail();
}).exceptionHandler(err -> {
if (fail1.compareAndSet(false, true)) {
@@ -2661,7 +2668,7 @@ public void testInvalidHttpResponse() {
});
AtomicBoolean fail2 = new AtomicBoolean();
- HttpClientRequest req2 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ HttpClientRequest req2 = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
fail();
}).exceptionHandler(err -> {
if (fail2.compareAndSet(false, true)) {
@@ -2686,15 +2693,15 @@ public void testHandleInvalid204Response() throws Exception {
server.requestHandler(r -> {
// Generate an invalid response for the pipe-lined
r.response().setChunked(true).setStatusCode(204).end();
- }).listen(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(v1 -> {
+ }).listen(testAddress, onSuccess(v1 -> {
for (int i = 0;i < numReq;i++) {
- HttpClientRequest post = client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath");
AtomicInteger count = new AtomicInteger();
- post.handler(r -> {
- r.endHandler(v2 -> {
- complete();
- });
- }).exceptionHandler(err -> {
+ client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI,
+ r -> {
+ r.endHandler(v2 -> {
+ complete();
+ });
+ }).exceptionHandler(err -> {
if (count.incrementAndGet() == 1) {
complete();
}
@@ -2713,11 +2720,11 @@ public void testConnectionCloseHttp_1_0_NoClose() throws Exception {
AtomicBoolean firstRequest = new AtomicBoolean(true);
socket.handler(RecordParser.newDelimited("\r\n\r\n", buffer -> {
if (firstRequest.getAndSet(false)) {
- socket.write("HTTP/1.0 200 OK\n" + "Content-Type: text/plain\n" + "Content-Length: 4\n"
- + "Connection: keep-alive\n" + "\n" + "xxx\n");
+ socket.write("HTTP/1.0 200 OK\r\n" + "Content-Type: text/plain\r\n" + "Content-Length: 4\r\n"
+ + "Connection: keep-alive\r\n" + "\r\n" + "xxx\n");
} else {
- socket.write("HTTP/1.0 200 OK\n" + "Content-Type: text/plain\n" + "Content-Length: 1\n"
- + "\n" + "\n");
+ socket.write("HTTP/1.0 200 OK\r\n" + "Content-Type: text/plain\r\n" + "Content-Length: 1\r\n"
+ + "\r\n" + "\n");
}
}));
});
@@ -2732,11 +2739,11 @@ public void testConnectionCloseHttp_1_0_Close() throws Exception {
AtomicBoolean firstRequest = new AtomicBoolean(true);
socket.handler(RecordParser.newDelimited("\r\n\r\n", buffer -> {
if (firstRequest.getAndSet(false)) {
- socket.write("HTTP/1.0 200 OK\n" + "Content-Type: text/plain\n" + "Content-Length: 4\n"
- + "Connection: keep-alive\n" + "\n" + "xxx\n");
+ socket.write("HTTP/1.0 200 OK\r\n" + "Content-Type: text/plain\r\n" + "Content-Length: 4\r\n"
+ + "Connection: keep-alive\r\n" + "\n" + "xxx\n");
} else {
- socket.write("HTTP/1.0 200 OK\n" + "Content-Type: text/plain\n" + "Content-Length: 1\n"
- + "\n" + "\n");
+ socket.write("HTTP/1.0 200 OK\r\n" + "Content-Type: text/plain\r\n" + "Content-Length: 1\r\n"
+ + "\r\n" + "\n");
socket.close();
}
}));
@@ -2753,7 +2760,7 @@ public void testConnectionCloseHttp_1_1_NoClose() throws Exception {
+ "\r\n" + "xxx\n");
} else {
socket.write("HTTP/1.1 200 OK\r\n" + "Content-Type: text/plain\r\n" + "Content-Length: 1\r\n"
- + "Connection: close\r\n" + "\r\n" + "\r\n");
+ + "Connection: close\r\n" + "\r\n" + "\n");
}
}));
});
@@ -2768,8 +2775,8 @@ public void testConnectionCloseHttp_1_1_Close() throws Exception {
socket.write("HTTP/1.1 200 OK\r\n" + "Content-Type: text/plain\r\n" + "Content-Length: 3\r\n"
+ "\r\n" + "xxx");
} else {
- socket.write("HTTP/1.1 200 OK\r\n" + "Content-Type: text/plain\r\n" + "Content-Length: 0\r\n"
- + "Connection: close\r\n" + "\r\n");
+ socket.write("HTTP/1.1 200 OK\r\n" + "Content-Type: text/plain\r\n" + "Content-Length: 1\r\n"
+ + "Connection: close\r\n" + "\r\n" + "\n");
socket.close();
}
}));
@@ -2780,6 +2787,8 @@ private void testConnectionClose(
Handler<HttpClientRequest> clientRequest,
Handler<NetSocket> connectHandler
) throws Exception {
+ // Cannot reliably pass due to https://github.com/netty/netty/issues/9113
+ Assume.assumeTrue(testAddress.path() == null);
client.close();
server.close();
@@ -2787,7 +2796,7 @@ private void testConnectionClose(
NetServerOptions serverOptions = new NetServerOptions();
CountDownLatch serverLatch = new CountDownLatch(1);
- vertx.createNetServer(serverOptions).connectHandler(connectHandler).listen(8080, result -> {
+ vertx.createNetServer(serverOptions).connectHandler(connectHandler).listen(testAddress, result -> {
if (result.succeeded()) {
serverLatch.countDown();
} else {
@@ -2799,7 +2808,6 @@ private void testConnectionClose(
HttpClientOptions clientOptions = new HttpClientOptions()
.setDefaultHost("localhost")
- .setDefaultPort(8080)
.setKeepAlive(true)
.setPipelining(false);
client = vertx.createHttpClient(clientOptions);
@@ -2808,7 +2816,7 @@ private void testConnectionClose(
AtomicInteger count = new AtomicInteger(requests);
for (int i = 0; i < requests; i++) {
- HttpClientRequest req = client.get("/", resp -> {
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.handler(buffer -> {
// Should check
});
@@ -2817,12 +2825,8 @@ private void testConnectionClose(
complete();
}
});
- resp.exceptionHandler(th -> {
- fail();
- });
- }).exceptionHandler(th -> {
- fail();
- });
+ resp.exceptionHandler(this::fail);
+ }).exceptionHandler(this::fail);
clientRequest.handle(req);
}
@@ -2836,26 +2840,21 @@ public void testDontReuseConnectionWhenResponseEndsDuringAnOngoingRequest() thro
server.requestHandler(req -> {
req.response().end();
});
- CountDownLatch serverLatch = new CountDownLatch(1);
- server.listen(ar -> {
- assertTrue(ar.succeeded());
- serverLatch.countDown();
- });
+ startServer(testAddress);
- awaitLatch(serverLatch);
- HttpClientRequest req1 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/");
- req1.handler(resp -> {
- resp.endHandler(v1 -> {
- // End request after the response ended
- vertx.setTimer(100, v2 -> {
- req1.end();
+ HttpClientRequest req1 = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI,
+ resp -> {
+ resp.endHandler(v1 -> {
+ // End request after the response ended
+ vertx.setTimer(100, v2 -> {
+ resp.request().end();
+ });
});
});
- });
// Send head to the server and trigger the request handler
req1.sendHead();
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
testComplete();
}).end();
@@ -2872,28 +2871,28 @@ public void testRecyclePipelinedConnection() throws Exception {
req.response().end();
doneLatch.countDown();
});
- server.listen(onSuccess(s -> {
+ server.listen(testAddress, onSuccess(s -> {
listenLatch.countDown();
}));
awaitLatch(listenLatch);
client.close();
- client = vertx.createHttpClient(new HttpClientOptions().setMaxPoolSize(1).setPipelining(true).setKeepAlive(true));
+ client = vertx.createHttpClient(createBaseClientOptions().setMaxPoolSize(1).setPipelining(true).setKeepAlive(true));
AtomicInteger connCount = new AtomicInteger();
client.connectionHandler(conn -> connCount.incrementAndGet());
- HttpClientRequest req = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/first", resp -> {
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/first", resp -> {
fail();
});
// Force connect
req.sendHead(v -> {});
req.reset();
CountDownLatch respLatch = new CountDownLatch(2);
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/second", resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/second", resp -> {
assertEquals(200, resp.statusCode());
resp.endHandler(v -> {
respLatch.countDown();
});
}).exceptionHandler(this::fail).end();
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/third", resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/third", resp -> {
assertEquals(200, resp.statusCode());
resp.endHandler(v -> {
respLatch.countDown();
@@ -2912,10 +2911,8 @@ public void testClientConnectionExceptionHandler() throws Exception {
NetSocket so = req.netSocket();
so.write(Buffer.buffer(TestUtils.randomAlphaString(40) + "\r\n"));
});
- CountDownLatch listenLatch = new CountDownLatch(1);
- server.listen(onSuccess(s -> listenLatch.countDown()));
- awaitLatch(listenLatch);
- HttpClientRequest req = client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ startServer(testAddress);
+ HttpClientRequest req = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
});
req.connectionHandler(conn -> {
conn.exceptionHandler(err -> {
@@ -2937,17 +2934,15 @@ public void testServerConnectionExceptionHandler() throws Exception {
server.requestHandler(req -> {
req.response().end();
});
- CountDownLatch listenLatch = new CountDownLatch(1);
- server.listen(onSuccess(s -> listenLatch.countDown()));
- awaitLatch(listenLatch);
+ startServer(testAddress);
client.close();
- client = vertx.createHttpClient(new HttpClientOptions().setMaxPoolSize(1));
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp1 -> {
- HttpClientRequest req = client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp2 -> {
+ client = vertx.createHttpClient(createBaseClientOptions().setMaxPoolSize(1));
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp1 -> {
+ HttpClientRequest req = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp2 -> {
});
req.putHeader("the_header", TestUtils.randomAlphaString(10000));
req.sendHead();
- });
+ }).end();
await();
}
@@ -2960,10 +2955,8 @@ public void testServerExceptionHandler() throws Exception {
server.requestHandler(req -> {
fail();
});
- CountDownLatch listenLatch = new CountDownLatch(1);
- server.listen(onSuccess(s -> listenLatch.countDown()));
- awaitLatch(listenLatch);
- HttpClientRequest req = client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ startServer(testAddress);
+ HttpClientRequest req = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
});
req.putHeader("the_header", TestUtils.randomAlphaString(10000));
req.sendHead();
@@ -2971,137 +2964,7 @@ public void testServerExceptionHandler() throws Exception {
}
@Test
- public void testHttpProxyRequest() throws Exception {
- startProxy(null, ProxyType.HTTP);
- client.close();
- client = vertx.createHttpClient(new HttpClientOptions()
- .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.getPort())));
- testHttpProxyRequest2(client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/"));
- }
-
- @Test
- public void testHttpProxyRequestOverrideClientSsl() throws Exception {
- startProxy(null, ProxyType.HTTP);
- client.close();
- client = vertx.createHttpClient(new HttpClientOptions()
- .setSsl(true).setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.getPort())));
- testHttpProxyRequest2(client.get(new RequestOptions().setSsl(false).setHost("localhost").setPort(8080)));
- }
-
- private void testHttpProxyRequest2(HttpClientRequest clientReq) throws Exception {
- server.requestHandler(req -> {
- req.response().end();
- });
-
- server.listen(onSuccess(s -> {
- clientReq.handler(resp -> {
- assertEquals(200, resp.statusCode());
- assertNotNull("request did not go through proxy", proxy.getLastUri());
- assertEquals("Host header doesn't contain target host", "localhost:8080", proxy.getLastRequestHeaders().get("Host"));
- testComplete();
- });
- clientReq.exceptionHandler(this::fail);
- clientReq.end();
- }));
- await();
- }
-
- @Test
- public void testHttpProxyRequestAuth() throws Exception {
- startProxy("user", ProxyType.HTTP);
-
- client.close();
- client = vertx.createHttpClient(new HttpClientOptions()
- .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.getPort())
- .setUsername("user").setPassword("user")));
-
- server.requestHandler(req -> {
- req.response().end();
- });
-
- server.listen(onSuccess(s -> {
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
- assertEquals(200, resp.statusCode());
- assertNotNull("request did not go through proxy", proxy.getLastUri());
- assertEquals("Host header doesn't contain target host", "localhost:8080", proxy.getLastRequestHeaders().get("Host"));
- testComplete();
- }).exceptionHandler(th -> fail(th)).end();
- }));
- await();
- }
-
- @Test
- public void testHttpProxyFtpRequest() throws Exception {
- startProxy(null, ProxyType.HTTP);
- client.close();
- client = vertx.createHttpClient(new HttpClientOptions()
- .setProxyOptions(new ProxyOptions().setType(ProxyType.HTTP).setHost("localhost").setPort(proxy.getPort())));
- final String url = "ftp://ftp.gnu.org/gnu/";
- proxy.setForceUri("http://localhost:8080/");
- HttpClientRequest clientReq = client.getAbs(url);
- server.requestHandler(req -> {
- req.response().end();
- });
-
- server.listen(onSuccess(s -> {
- clientReq.handler(resp -> {
- assertEquals(200, resp.statusCode());
- assertEquals("request did sent the expected url", url, proxy.getLastUri());
- testComplete();
- });
- clientReq.exceptionHandler(this::fail);
- clientReq.end();
- }));
- await();
- }
-
- @Test
- public void testHttpSocksProxyRequest() throws Exception {
- startProxy(null, ProxyType.SOCKS5);
-
- client.close();
- client = vertx.createHttpClient(new HttpClientOptions()
- .setProxyOptions(new ProxyOptions().setType(ProxyType.SOCKS5).setHost("localhost").setPort(proxy.getPort())));
-
- server.requestHandler(req -> {
- req.response().end();
- });
-
- server.listen(onSuccess(s -> {
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
- assertEquals(200, resp.statusCode());
- assertNotNull("request did not go through proxy", proxy.getLastUri());
- testComplete();
- }).exceptionHandler(th -> fail(th)).end();
- }));
- await();
- }
-
- @Test
- public void testHttpSocksProxyRequestAuth() throws Exception {
- startProxy("user", ProxyType.SOCKS5);
-
- client.close();
- client = vertx.createHttpClient(new HttpClientOptions()
- .setProxyOptions(new ProxyOptions().setType(ProxyType.SOCKS5).setHost("localhost").setPort(proxy.getPort())
- .setUsername("user").setPassword("user")));
-
- server.requestHandler(req -> {
- req.response().end();
- });
-
- server.listen(onSuccess(s -> {
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
- assertEquals(200, resp.statusCode());
- assertNotNull("request did not go through proxy", proxy.getLastUri());
- testComplete();
- }).exceptionHandler(th -> fail(th)).end();
- }));
- await();
- }
-
- @Test
- public void testRandomPorts() throws Exception {
+ public void testRandomPorts() {
int numServers = 10;
Set<Integer> ports = Collections.synchronizedSet(new HashSet<>());
AtomicInteger count = new AtomicInteger();
@@ -3136,9 +2999,9 @@ public void testContentDecompression() throws Exception {
});
});
- server.listen(onSuccess(server -> {
+ server.listen(testAddress, onSuccess(server -> {
client
- .request(HttpMethod.POST, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "some-uri", resp -> testComplete())
+ .request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "some-uri", resp -> testComplete())
.putHeader("Content-Encoding", "gzip")
.end(Buffer.buffer(dataGzipped));
}));
@@ -3182,24 +3045,24 @@ private void testResetClientRequestNotYetSent(boolean keepAlive, boolean pipelin
});
});
CountDownLatch latch = new CountDownLatch(1);
- server.listen(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(v -> {
+ server.listen(testAddress, onSuccess(v -> {
latch.countDown();
}));
awaitLatch(latch);
client.close();
- client = vertx.createHttpClient(new HttpClientOptions().setMaxPoolSize(1).setKeepAlive(keepAlive).setPipelining(pipelined));
- HttpClientRequest post = client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
- fail();
- });
+ client = vertx.createHttpClient(createBaseClientOptions().setMaxPoolSize(1).setKeepAlive(keepAlive).setPipelining(pipelined));
// There might be a race between the request write and the request reset
// so we do it on the context thread to avoid it
vertx.runOnContext(v -> {
+ HttpClientRequest post = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ fail();
+ });
post.setChunked(true).write(TestUtils.randomBuffer(1024));
assertTrue(post.reset());
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(1, numReq.get());
complete();
- });
+ }).end();
});
await();
} finally {
@@ -3235,14 +3098,14 @@ public void testResetKeepAliveClientRequest() throws Exception {
});
});
CountDownLatch listenLatch = new CountDownLatch(1);
- server.listen(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(v -> {
+ server.listen(testAddress, onSuccess(v -> {
listenLatch.countDown();
}));
awaitLatch(listenLatch);
client.close();
client = vertx.createHttpClient(new HttpClientOptions().setMaxPoolSize(1).setPipelining(false).setKeepAlive(true));
AtomicInteger status = new AtomicInteger();
- HttpClientRequest req1 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ HttpClientRequest req1 = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
assertEquals(0, status.getAndIncrement());
});
req1.connectionHandler(conn -> {
@@ -3252,7 +3115,7 @@ public void testResetKeepAliveClientRequest() throws Exception {
});
});
req1.end();
- HttpClientRequest req2 = client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ HttpClientRequest req2 = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
fail();
});
req2.sendHead(v -> {
@@ -3297,13 +3160,13 @@ public void testResetPipelinedClientRequest() throws Exception {
});
try {
CountDownLatch listenLatch = new CountDownLatch(1);
- server.listen(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(v -> {
+ server.listen(testAddress, onSuccess(v -> {
listenLatch.countDown();
}));
awaitLatch(listenLatch);
client.close();
- client = vertx.createHttpClient(new HttpClientOptions().setMaxPoolSize(1).setPipelining(true).setKeepAlive(true));
- HttpClientRequest req1 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ client = vertx.createHttpClient(createBaseClientOptions().setMaxPoolSize(1).setPipelining(true).setKeepAlive(true));
+ HttpClientRequest req1 = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
// We may or not receive the response
});
req1.connectionHandler(conn -> {
@@ -3312,7 +3175,7 @@ public void testResetPipelinedClientRequest() throws Exception {
});
});
req1.end();
- HttpClientRequest req2 = client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ HttpClientRequest req2 = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
fail();
});
req2.sendHead();
@@ -3380,24 +3243,24 @@ private void testCloseTheConnectionAfterResetPersistentClientRequest(boolean pip
}
});
CountDownLatch listenLatch = new CountDownLatch(1);
- server.listen(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(v -> {
+ server.listen(testAddress, onSuccess(v -> {
listenLatch.countDown();
}));
awaitLatch(listenLatch);
client.close();
client = vertx.createHttpClient(new HttpClientOptions().setMaxPoolSize(1).setPipelining(pipelined).setKeepAlive(true));
- HttpClientRequest req1 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ HttpClientRequest req1 = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
fail();
});
if (pipelined) {
req1.connectionHandler(conn -> conn.closeHandler(v2 -> {
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
assertEquals(200, resp.statusCode());
resp.bodyHandler(body -> {
assertEquals("Hello world", body.toString());
complete();
});
- });
+ }).end();
}));
req1.sendHead(v -> {
assertTrue(req1.reset());
@@ -3406,13 +3269,13 @@ private void testCloseTheConnectionAfterResetPersistentClientRequest(boolean pip
req1.sendHead(v -> {
assertTrue(req1.reset());
});
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
assertEquals(200, resp.statusCode());
resp.bodyHandler(body -> {
assertEquals("Hello world", body.toString());
complete();
});
- });
+ }).end();
}
await();
} finally {
@@ -3481,44 +3344,43 @@ private void testCloseTheConnectionAfterResetPersistentClientResponse(boolean pi
}
});
CountDownLatch listenLatch = new CountDownLatch(1);
- server.listen(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(v -> {
+ server.listen(testAddress, onSuccess(v -> {
listenLatch.countDown();
}));
awaitLatch(listenLatch);
client.close();
- client = vertx.createHttpClient(new HttpClientOptions().setMaxPoolSize(1).setPipelining(pipelined).setKeepAlive(true));
- HttpClientRequest req1 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath");
+ client = vertx.createHttpClient(createBaseClientOptions().setMaxPoolSize(1).setPipelining(pipelined).setKeepAlive(true));
if (pipelined) {
- req1.handler(resp1 -> {
- resp1.handler(buff -> {
- req1.reset();
- // Since we pipeline we must be sure that the first request is closed before running a new one
- req1.connection().closeHandler(v -> {
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
- assertEquals(200, resp.statusCode());
- resp.bodyHandler(body -> {
- assertEquals("Hello world", body.toString());
- complete();
- });
- }).end();
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath",
+ resp1 -> {
+ resp1.handler(buff -> {
+ resp1.request().reset();
+ // Since we pipeline we must be sure that the first request is closed before running a new one
+ resp1.request().connection().closeHandler(v -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ assertEquals(200, resp.statusCode());
+ resp.bodyHandler(body -> {
+ assertEquals("Hello world", body.toString());
+ complete();
+ });
+ }).end();
+ });
});
- });
- });
- req1.end();
+ }).end();
} else {
- req1.handler(resp -> {
- resp.handler(buff -> {
- req1.reset();
- });
- });
- req1.end();
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath",
+ resp -> {
+ resp.handler(buff -> {
+ resp.request().reset();
+ });
+ }).end();
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
assertEquals(200, resp.statusCode());
resp.bodyHandler(body -> {
assertEquals("Hello world", body.toString());
complete();
});
- });
+ }).end();
}
await();
} finally {
@@ -3589,25 +3451,25 @@ private void testCloseTheConnectionAfterResetBeforeResponseReceived(boolean pipe
}
});
CountDownLatch listenLatch = new CountDownLatch(1);
- server.listen(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(v -> {
+ server.listen(testAddress, onSuccess(v -> {
listenLatch.countDown();
}));
awaitLatch(listenLatch);
client.close();
- client = vertx.createHttpClient(new HttpClientOptions().setMaxPoolSize(1).setPipelining(pipelined).setKeepAlive(true));
- HttpClientRequest req1 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/1");
+ client = vertx.createHttpClient(createBaseClientOptions().setMaxPoolSize(1).setPipelining(pipelined).setKeepAlive(true));
+ HttpClientRequest req1 = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/1");
if (pipelined) {
requestReceived.thenAccept(v -> {
req1.reset();
});
req1.connectionHandler(conn -> conn.closeHandler(v2 -> {
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/2", resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/2", resp -> {
assertEquals(200, resp.statusCode());
resp.bodyHandler(body -> {
assertEquals("Hello world", body.toString());
complete();
});
- });
+ }).end();
}));
req1.handler(resp1 -> fail());
req1.end();
@@ -3617,13 +3479,13 @@ private void testCloseTheConnectionAfterResetBeforeResponseReceived(boolean pipe
});
req1.handler(resp -> fail());
req1.end();
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/2", resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/2", resp -> {
assertEquals(200, resp.statusCode());
resp.bodyHandler(body -> {
assertEquals("Hello world", body.toString());
complete();
});
- });
+ }).end();
}
await();
} finally {
@@ -3642,10 +3504,10 @@ public void testTooLongContentInHttpServerRequest() throws Exception {
testComplete();
});
});
- startServer();
+ startServer(testAddress);
NetClient client = vertx.createNetClient();
try {
- client.connect(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(so -> {
+ client.connect(testAddress, onSuccess(so -> {
so.write("POST / HTTP/1.1\r\nContent-Length: 4\r\n\r\ntoolong\r\n");
}));
await();
@@ -3691,9 +3553,9 @@ private void testHttpServerRequestDecodeError(Handler<NetSocket> bodySender, Han
req.exceptionHandler(handler);
bodySender.handle(current.get());
});
- startServer();
+ startServer(testAddress);
NetClient client = vertx.createNetClient();
- client.connect(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(so -> {
+ client.connect(testAddress, onSuccess(so -> {
current.set(so);
so.write("POST /somepath HTTP/1.1\r\n");
so.write("Transfer-Encoding: chunked\r\n");
@@ -3755,10 +3617,10 @@ public void testInvalidTrailersInHttpClientResponse() throws Exception {
}
private void testHttpClientResponseDecodeError(Handler<Throwable> errorHandler) throws Exception {
- startServer();
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ startServer(testAddress);
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
resp.exceptionHandler(errorHandler);
- });
+ }).end();
await();
}
@@ -3769,14 +3631,14 @@ public void testRequestTimeoutIsNotDelayedAfterResponseIsReceived() throws Excep
server.requestHandler(req -> {
req.response().end();
});
- startServer();
+ startServer(testAddress);
vertx.deployVerticle(new AbstractVerticle() {
@Override
public void start() throws Exception {
HttpClient client = vertx.createHttpClient(new HttpClientOptions().setMaxPoolSize(n));
for (int i = 0;i < n;i++) {
AtomicBoolean responseReceived = new AtomicBoolean();
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
try {
Thread.sleep(150);
} catch (InterruptedException e) {
@@ -3883,10 +3745,10 @@ public void testSendFileFailsWhenClientClosesConnection() throws Exception {
fail(e);
}
});
- startServer();
- vertx.createNetClient().connect(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, socket -> {
- socket.result().write("GET / HTTP/1.1\r\n\r\n").close();
- });
+ startServer(testAddress);
+ vertx.createNetClient().connect(testAddress, onSuccess(socket -> {
+ socket.write("GET / HTTP/1.1\r\n\r\n").close();
+ }));
await();
}
@@ -3898,11 +3760,11 @@ protected MultiMap checkEmptyHttpResponse(HttpMethod method, int sc, MultiMap re
resp.headers().addAll(reqHeaders);
resp.end();
});
- startServer();
+ startServer(testAddress);
NetClient client = vertx.createNetClient();
try {
CompletableFuture<MultiMap> result = new CompletableFuture<>();
- client.connect(DEFAULT_HTTP_PORT, DEFAULT_HTTPS_HOST, ar -> {
+ client.connect(testAddress, ar -> {
if (ar.succeeded()) {
NetSocket so = ar.result();
so.write(
@@ -3946,13 +3808,13 @@ public void testUnknownContentLengthIsSetToZeroWithHTTP_1_0() throws Exception {
server.requestHandler(req -> {
req.response().write("Some-String").end();
});
- startServer();
+ startServer(testAddress);
client.close();
client = vertx.createHttpClient(new HttpClientOptions().setProtocolVersion(HttpVersion.HTTP_1_0));
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertNull(resp.getHeader("Content-Length"));
testComplete();
- });
+ }).end();
await();
}
@@ -3963,9 +3825,9 @@ public void testPartialH2CAmbiguousRequest() throws Exception {
testComplete();
});
Buffer fullRequest = Buffer.buffer("POST /whatever HTTP/1.1\r\n\r\n");
- startServer();
+ startServer(testAddress);
NetClient client = vertx.createNetClient();
- client.connect(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(so -> {
+ client.connect(testAddress, onSuccess(so -> {
so.write(fullRequest.slice(0, 1));
vertx.setTimer(1000, id -> {
so.write(fullRequest.slice(1, fullRequest.length()));
@@ -3984,9 +3846,9 @@ public void testIdleTimeoutWithPartialH2CRequest() throws Exception {
server.requestHandler(req -> {
testComplete();
});
- startServer();
+ startServer(testAddress);
NetClient client = vertx.createNetClient();
- client.connect(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(so -> {
+ client.connect(testAddress, onSuccess(so -> {
so.closeHandler(v -> {
testComplete();
});
@@ -3997,16 +3859,13 @@ public void testIdleTimeoutWithPartialH2CRequest() throws Exception {
@Test
public void testIdleTimeoutInfiniteSkipOfControlCharactersState() throws Exception {
server.close();
- server = vertx.createHttpServer(new HttpServerOptions()
- .setPort(DEFAULT_HTTP_PORT)
- .setHost(DEFAULT_HTTP_HOST)
- .setIdleTimeout(1));
+ server = vertx.createHttpServer(createBaseServerOptions().setIdleTimeout(1));
server.requestHandler(req -> {
testComplete();
});
- startServer();
+ startServer(testAddress);
NetClient client = vertx.createNetClient();
- client.connect(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, onSuccess(so -> {
+ client.connect(testAddress, onSuccess(so -> {
long id = vertx.setPeriodic(1, timerID -> {
so.write(Buffer.buffer().setInt(0, 0xD));
});
@@ -4022,15 +3881,12 @@ public void testIdleTimeoutInfiniteSkipOfControlCharactersState() throws Excepti
public void testCompressedResponseWithConnectionCloseAndNoCompressionHeader() throws Exception {
Buffer expected = Buffer.buffer(TestUtils.randomAlphaString(2048));
server.close();
- server = vertx.createHttpServer(new HttpServerOptions()
- .setPort(DEFAULT_HTTP_PORT)
- .setHost(DEFAULT_HTTP_HOST)
- .setCompressionSupported(true));
+ server = vertx.createHttpServer(createBaseServerOptions().setCompressionSupported(true));
server.requestHandler(req -> {
req.response().end(expected);
});
- startServer();
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ startServer(testAddress);
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.bodyHandler(buff -> {
assertEquals(expected, buff);
complete();
@@ -4091,13 +3947,13 @@ public void testKeepAliveTimeoutHeaderOverwritePrevious() throws Exception {
}
private void testKeepAliveTimeout(HttpClientOptions options, int numReqs) throws Exception {
- startServer();
+ startServer(testAddress);
client.close();
client = vertx.createHttpClient(options.setPoolCleanerPeriod(1));
AtomicInteger respCount = new AtomicInteger();
for (int i = 0;i < numReqs;i++) {
int current = 1 + i;
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
respCount.incrementAndGet();
if (current == numReqs) {
long now = System.currentTimeMillis();
@@ -4111,7 +3967,7 @@ private void testKeepAliveTimeout(HttpClientOptions options, int numReqs) throws
testComplete();
});
}
- });
+ }).end();
}
await();
}
@@ -4126,10 +3982,10 @@ public void testPoolNotExpiring() throws Exception {
req.connection().close();
});
});
- startServer();
+ startServer(testAddress);
client.close();
client = vertx.createHttpClient(new HttpClientOptions().setPoolCleanerPeriod(0).setKeepAliveTimeout(100));
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.endHandler(v1 -> {
resp.request().connection().closeHandler(v2 -> {
long time = System.currentTimeMillis() - now.get();
@@ -4137,7 +3993,7 @@ public void testPoolNotExpiring() throws Exception {
testComplete();
});
});
- });
+ }).end();
await();
}
@@ -4161,11 +4017,11 @@ public void testPausedHttpServerRequestPauseTheConnectionAtRequestEnd() throws E
req.response().end();
});
});
- startServer();
+ startServer(testAddress);
client.close();
client = vertx.createHttpClient(createBaseClientOptions().setMaxPoolSize(1));
for (int i = 0; i < numRequests; i++) {
- client.put(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/someuri", resp -> {
+ client.request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/someuri", resp -> {
complete();
}).end("small");
}
@@ -4195,19 +4051,19 @@ public void testHttpClientResponsePauseIsIgnoredAtRequestEnd() throws Exception
private void testHttpClientResponsePause(Handler<HttpClientResponse> h) throws Exception {
server.requestHandler(req -> req.response().end("ok"));
- startServer();
+ startServer(testAddress);
client.close();
- client = vertx.createHttpClient(new HttpClientOptions().setMaxPoolSize(1).setKeepAlive(true));
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp1 -> {
+ client = vertx.createHttpClient(createBaseClientOptions().setMaxPoolSize(1).setKeepAlive(true));
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp1 -> {
h.handle(resp1);
vertx.setTimer(10, timerId -> {
// The connection should be resumed as it's ended
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp2 -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp2 -> {
assertSame(resp1.request().connection(), resp2.request().connection());
resp2.endHandler(v -> testComplete());
- });
+ }).end();
});
- });
+ }).end();
await();
}
@@ -4225,7 +4081,7 @@ public void testPoolLIFOPolicy() throws Exception {
break;
}
});
- startServer();
+ startServer(testAddress);
client.close();
client = vertx.createHttpClient(createBaseClientOptions().setMaxPoolSize(2));
// Make two concurrent requests and finish one first
@@ -4234,7 +4090,7 @@ public void testPoolLIFOPolicy() throws Exception {
// Use one event loop to be sure about response ordering
vertx.runOnContext(v0 -> {
for (int i = 0;i < 2;i++) {
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/someuri", resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/someuri", resp -> {
resp.endHandler(v1 -> {
// Use runOnContext to be sure the connections is put back in the pool
vertx.runOnContext(v2 -> {
@@ -4242,14 +4098,14 @@ public void testPoolLIFOPolicy() throws Exception {
latch.countDown();
});
});
- });
+ }).end();
}
});
awaitLatch(latch);
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/someuri", resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/someuri", resp -> {
assertSame(resp.request().connection(), connections.get(1));
testComplete();
- });
+ }).end();
await();
}
@@ -4304,13 +4160,13 @@ private void testHttpClientResponseThrowsExceptionInHandler(
resp.end();
}
});
- startServer();
+ startServer(testAddress);
int num = 50;
CountDownLatch latch = new CountDownLatch(num);
for (int i = 0;i < num;i++) {
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/someuri", resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/someuri", resp -> {
handler.accept(resp, latch);
- });
+ }).end();
}
awaitLatch(latch);
}
@@ -4323,8 +4179,8 @@ public void testConnectionCloseDuringShouldCallHandleExceptionOnlyOnce() throws
});
});
AtomicInteger count = new AtomicInteger();
- startServer();
- HttpClientRequest post = client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", res -> fail());
+ startServer(testAddress);
+ HttpClientRequest post = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", res -> fail());
post.setChunked(true);
post.write(TestUtils.randomBuffer(10000));
CountDownLatch latch = new CountDownLatch(1);
@@ -4352,9 +4208,9 @@ public void testDeferredRequestEnd() throws Exception {
req.resume();
});
});
- startServer();
+ startServer(testAddress);
Buffer expected = Buffer.buffer(TestUtils.randomAlphaString(1024));
- client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
+ client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
resp.bodyHandler(body -> {
assertEquals(expected, body);
testComplete();
@@ -4384,12 +4240,12 @@ public void testPipelinedWithResponseSent() throws Exception {
req.resume();
});
});
- startServer();
+ startServer(testAddress);
client.close();
- client = vertx.createHttpClient(new HttpClientOptions().setPipelining(true).setMaxPoolSize(1).setKeepAlive(true));
+ client = vertx.createHttpClient(createBaseClientOptions().setPipelining(true).setMaxPoolSize(1).setKeepAlive(true));
for (int i = 0;i < numReq;i++) {
String expected = "" + i;
- client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
+ client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
resp.bodyHandler(body -> {
assertEquals(expected, body.toString());
complete();
@@ -4413,12 +4269,12 @@ public void testPipelinedWithPendingResponse() throws Exception {
req.response().end("" + val);
});
});
- startServer();
+ startServer(testAddress);
client.close();
- client = vertx.createHttpClient(new HttpClientOptions().setPipelining(true).setMaxPoolSize(1).setKeepAlive(true));
+ client = vertx.createHttpClient(createBaseClientOptions().setPipelining(true).setMaxPoolSize(1).setKeepAlive(true));
for (int i = 0;i < numReq;i++) {
String expected = "" + i;
- client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
+ client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
resp.bodyHandler(body -> {
assertEquals(expected, body.toString());
complete();
@@ -4454,12 +4310,12 @@ public void testPipelinedPostRequestStartedByResponseSent() throws Exception {
break;
}
});
- startServer();
+ startServer(testAddress);
client.close();
client = vertx.createHttpClient(new HttpClientOptions().setPipelining(true).setMaxPoolSize(1).setKeepAlive(true));
- client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
+ client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
}).end(TestUtils.randomAlphaString(1024));
- HttpClientRequest req = client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
+ HttpClientRequest req = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
testComplete();
}).setChunked(true).write(chunk1);
awaitLatch(latch);
@@ -4480,14 +4336,14 @@ public void testBeginPipelinedRequestByResponseSentOnRequestCompletion() throws
req.response().end();
});
});
- startServer();
+ startServer(testAddress);
client.close();
- client = vertx.createHttpClient(new HttpClientOptions().setPipelining(true).setMaxPoolSize(1).setKeepAlive(true));
+ client = vertx.createHttpClient(createBaseClientOptions().setPipelining(true).setMaxPoolSize(1).setKeepAlive(true));
client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
}).end(TestUtils.randomAlphaString(1024));
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
testComplete();
- });
+ }).end();
await();
}
@@ -4506,14 +4362,14 @@ public void testBeginPipelinedRequestByResponseSentBeforeRequestCompletion() thr
req.response().end();
}
});
- startServer();
+ startServer(testAddress);
client.close();
- client = vertx.createHttpClient(new HttpClientOptions().setPipelining(true).setMaxPoolSize(1).setKeepAlive(true));
- client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
+ client = vertx.createHttpClient(createBaseClientOptions().setPipelining(true).setMaxPoolSize(1).setKeepAlive(true));
+ client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
}).end(TestUtils.randomAlphaString(1024));
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
testComplete();
- });
+ }).end();
await();
}
@@ -4523,15 +4379,15 @@ public void testHttpClientResponseBufferedWithPausedEnd() throws Exception {
server.requestHandler(req -> {
req.response().end("HelloWorld" + i.incrementAndGet());
});
- startServer();
+ startServer(testAddress);
client.close();
client = vertx.createHttpClient(new HttpClientOptions().setMaxPoolSize(1).setKeepAlive(true));
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp1 -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp1 -> {
// Response is paused but request is put back in the pool since the HTTP response fully arrived
// but the response it's not yet delivered to the application as we pause the response
resp1.pause();
// Do a request on the same connection
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp2 -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp2 -> {
resp2.bodyHandler(body2 -> {
// When the response arrives -> resume the first request
assertEquals("HelloWorld2", body2.toString());
@@ -4541,33 +4397,33 @@ public void testHttpClientResponseBufferedWithPausedEnd() throws Exception {
});
resp1.resume();
});
- });
- });
+ }).end();
+ }).end();
await();
}
@Test
public void testHttpClientResumeConnectionOnResponseOnLastMessage() throws Exception {
server.requestHandler(req -> req.response().end("ok"));
- startServer();
+ startServer(testAddress);
client.close();
client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(true).setMaxPoolSize(1));
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp1 -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp1 -> {
resp1.pause();
// The connection resume is asynchronous and the end message will be received before connection resume happens
resp1.resume();
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp2 -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp2 -> {
testComplete();
- });
- });
+ }).end();
+ }).end();
await();
}
@Test
public void testSetChunkedToFalse() throws Exception {
server.requestHandler(req -> req.response().setChunked(false).end());
- startServer();
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
+ startServer(testAddress);
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
testComplete();
}).setChunked(false).end();
await();
@@ -4589,12 +4445,12 @@ public void testHttpServerRequestShouldCallExceptionHandlerWhenTheClosedHandlerI
}
});
});
- server.listen(onSuccess(s -> {
- client.getNow(DEFAULT_HTTP_PORT, HttpTestBase.DEFAULT_HTTP_HOST, "/someuri", resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, HttpTestBase.DEFAULT_HTTP_HOST, "/someuri", resp -> {
vertx.setTimer(1000, id -> {
resp.request().connection().close();
});
- });
+ }).end();
}));
await();
}
@@ -4606,8 +4462,8 @@ public void testHttpClientRequestShouldCallExceptionHandlerWhenTheClosedHandlerI
req.response().close();
});
});
- startServer();
- HttpClientRequest req = client.put(DEFAULT_HTTP_PORT, HttpTestBase.DEFAULT_HTTP_HOST, "/someuri", resp -> {
+ startServer(testAddress);
+ HttpClientRequest req = client.request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, HttpTestBase.DEFAULT_HTTP_HOST, "/someuri", resp -> {
}).setChunked(true);
CheckingSender sender = new CheckingSender(vertx.getOrCreateContext(), req);
AtomicBoolean connected = new AtomicBoolean();
@@ -4666,14 +4522,14 @@ public void testChunkedServerResponse() {
vertx.setTimer(1, id -> {
resp.end();
});
- }).listen(onSuccess(server -> {
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", res -> {
+ }).listen(testAddress, onSuccess(server -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", res -> {
assertEquals("chunked", res.getHeader("transfer-encoding"));
res.bodyHandler(body -> {
assertEquals("the-chunk", body.toString());
testComplete();
});
- });
+ }).end();
}));
await();
}
@@ -4687,8 +4543,8 @@ public void testChunkedClientRequest() {
assertEquals("the-chunk", body.toString());
req.response().end();
});
- }).listen(onSuccess(server -> {
- HttpClientRequest req = client.put(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", res -> {
+ }).listen(testAddress, onSuccess(server -> {
+ HttpClientRequest req = client.request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", res -> {
testComplete();
});
req.setChunked(true);
@@ -4733,8 +4589,8 @@ public void testHttpServerWithIdleTimeoutSendChunkedFile() throws Exception {
req -> {
req.response().sendFile(sent.getAbsolutePath());
});
- startServer();
- client.getNow(8080, "localhost", "/", resp -> {
+ startServer(testAddress);
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> {
long now = System.currentTimeMillis();
int[] length = {0};
resp.handler(buff -> {
@@ -4750,7 +4606,7 @@ public void testHttpServerWithIdleTimeoutSendChunkedFile() throws Exception {
assertTrue(System.currentTimeMillis() - now > 1000);
testComplete();
});
- });
+ }).end();
await();
}
@@ -4763,16 +4619,16 @@ public void testSendFilePipelined() throws Exception {
req -> {
req.response().sendFile(sent.getAbsolutePath());
});
- startServer();
+ startServer(testAddress);
client.close();
client = vertx.createHttpClient(createBaseClientOptions().setPipelining(true).setMaxPoolSize(1));
for (int i = 0;i < n;i++) {
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.exceptionHandler(this::fail);
resp.bodyHandler(body -> {
complete();
});
- }).exceptionHandler(this::fail).end();
+ }).end();
}
await();
}
diff --git a/src/test/java/io/vertx/core/http/Http2Test.java b/src/test/java/io/vertx/core/http/Http2Test.java
--- a/src/test/java/io/vertx/core/http/Http2Test.java
+++ b/src/test/java/io/vertx/core/http/Http2Test.java
@@ -11,6 +11,7 @@
package io.vertx.core.http;
+import io.netty.channel.socket.DuplexChannel;
import io.netty.channel.socket.SocketChannel;
import io.netty.handler.codec.http2.Http2CodecUtil;
import io.vertx.core.Context;
@@ -66,8 +67,8 @@ public void testServerResponseWriteBufferFromOtherThread() throws Exception {
runAsync(() -> {
req.response().write("hello ").end("world");
});
- }).listen(onSuccess(v -> {
- client.get(8080, "localhost", "/somepath", resp -> {
+ }).listen(testAddress, onSuccess(v -> {
+ client.request(HttpMethod.GET, testAddress, 8080, "localhost", "/somepath", resp -> {
assertEquals(200, resp.statusCode());
resp.bodyHandler(buff -> {
assertEquals(Buffer.buffer("hello world"), buff);
@@ -84,8 +85,8 @@ public void testServerResponseResetFromOtherThread() throws Exception {
runAsync(() -> {
req.response().reset(0);
});
- }).listen(onSuccess(v -> {
- client.get(8080, "localhost", "/somepath", resp -> {
+ }).listen(testAddress, onSuccess(v -> {
+ client.request(HttpMethod.GET, testAddress, 8080, "localhost", "/somepath", resp -> {
fail();
}).exceptionHandler(err -> {
assertTrue(err instanceof StreamResetException);
@@ -114,11 +115,11 @@ public void testClientRequestWriteFromOtherThread() throws Exception {
req.endHandler(v -> {
req.response().end();
});
- }).listen(onSuccess(v -> {
+ }).listen(testAddress, onSuccess(v -> {
latch1.countDown();
}));
awaitLatch(latch1);
- HttpClientRequest req = client.get(8080, "localhost", "/somepath", resp -> {
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, 8080, "localhost", "/somepath", resp -> {
assertEquals(200, resp.statusCode());
testComplete();
}).setChunked(true).sendHead();
@@ -143,16 +144,8 @@ public void testServerOpenSSL() throws Exception {
server.requestHandler(req -> {
req.response().end();
});
- CountDownLatch latch = new CountDownLatch(1);
- System.out.println("starting");
- try {
- server.listen(onSuccess(v -> latch.countDown()));
- } catch (Throwable e) {
- e.printStackTrace();
- }
- System.out.println("listening");
- awaitLatch(latch);
- client.get(DEFAULT_HTTPS_PORT, DEFAULT_HTTPS_HOST, "/somepath", resp -> {
+ startServer(testAddress);
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTPS_PORT, DEFAULT_HTTPS_HOST, "/somepath", resp -> {
assertEquals(200, resp.statusCode());
testComplete();
}).exceptionHandler(this::fail).end();
@@ -187,8 +180,8 @@ public void testServerStreamPausedWhenConnectionIsPaused() throws Exception {
}
}
});
- startServer();
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/0", resp -> {
+ startServer(testAddress);
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/0", resp -> {
resp.pause();
Context ctx = vertx.getOrCreateContext();
resumeLatch.thenAccept(v1 -> {
@@ -199,13 +192,13 @@ public void testServerStreamPausedWhenConnectionIsPaused() throws Exception {
resp.resume();
});
});
- });
+ }).end();
awaitLatch(fullLatch);
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/1", resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/1", resp -> {
resp.endHandler(v -> {
complete();
});
- });
+ }).end();
resumeLatch.get(20, TimeUnit.SECONDS); // Make sure it completes
await();
}
@@ -236,15 +229,15 @@ public void testClientStreamPausedWhenConnectionIsPaused() throws Exception {
}
}
});
- startServer();
- HttpClientRequest req1 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/0", resp -> {
+ startServer(testAddress);
+ HttpClientRequest req1 = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/0", resp -> {
complete();
}).setChunked(true);
while (!req1.writeQueueFull()) {
req1.write(Buffer.buffer(TestUtils.randomAlphaString(512)));
Thread.sleep(1);
}
- HttpClientRequest req2 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/1", resp -> {
+ HttpClientRequest req2 = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/1", resp -> {
complete();
}).setChunked(true);
assertFalse(req2.writeQueueFull());
@@ -270,19 +263,19 @@ public void testResetClientRequestNotYetSent() throws Exception {
req.response().end();
complete();
});
- startServer();
+ startServer(testAddress);
// There might be a race between the request write and the request reset
// so we do it on the context thread to avoid it
vertx.runOnContext(v -> {
- HttpClientRequest post = client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ HttpClientRequest post = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
fail();
});
post.setChunked(true).write(TestUtils.randomBuffer(1024));
assertTrue(post.reset());
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(1, numReq.get());
complete();
- });
+ }).end();
});
await();
}
@@ -293,19 +286,19 @@ public void testDiscardConnectionWhenChannelBecomesInactive() throws Exception {
server.requestHandler(req -> {
if (count.getAndIncrement() == 0) {
Http2ServerConnection a = (Http2ServerConnection) req.connection();
- SocketChannel channel = (SocketChannel) a.channel();
+ DuplexChannel channel = (DuplexChannel) a.channel();
channel.shutdown();
} else {
req.response().end();
}
});
- startServer();
+ startServer(testAddress);
AtomicBoolean closed = new AtomicBoolean();
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
fail();
}).connectionHandler(conn -> conn.closeHandler(v -> closed.set(true))).end();
AsyncTestBase.assertWaitUntil(closed::get);
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
testComplete();
}).exceptionHandler(err -> {
fail();
@@ -321,10 +314,10 @@ public void testClientDoesNotSupportAlpn() throws Exception {
req.response().end();
complete();
});
- startServer();
+ startServer(testAddress);
client.close();
client = vertx.createHttpClient(createBaseClientOptions().setProtocolVersion(HttpVersion.HTTP_1_1).setUseAlpn(false));
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(HttpVersion.HTTP_1_1, resp.version());
complete();
}).exceptionHandler(this::fail).end();
@@ -341,8 +334,8 @@ public void testServerDoesNotSupportAlpn() throws Exception {
req.response().end();
complete();
});
- startServer();
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ startServer(testAddress);
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(HttpVersion.HTTP_1_1, resp.version());
complete();
}).exceptionHandler(this::fail).end();
@@ -354,7 +347,7 @@ public void testClientMakeRequestHttp2WithSSLWithoutAlpn() throws Exception {
client.close();
client = vertx.createHttpClient(createBaseClientOptions().setUseAlpn(false));
try {
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI);
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI);
fail();
} catch (IllegalArgumentException ignore) {
// Expected
@@ -377,9 +370,9 @@ public void testServePendingRequests() throws Exception {
}
}
});
- startServer();
+ startServer(testAddress);
for (int i = 0;i < n;i++) {
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> complete()).end();
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> complete()).end();
}
await();
}
@@ -396,8 +389,8 @@ public void testInitialMaxConcurrentStreamZero() throws Exception {
conn.updateSettings(new Http2Settings().setMaxConcurrentStreams(10));
});
});
- startServer();
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ startServer(testAddress);
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
testComplete();
}).connectionHandler(conn -> {
assertEquals(10, conn.remoteSettings().getMaxConcurrentStreams());
@@ -415,14 +408,14 @@ public void testFoo() throws Exception {
assertNull(resp.headers().get("content-length"));
complete();
});
- startServer();
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ startServer(testAddress);
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertNull(resp.getHeader("content-length"));
resp.bodyHandler(body -> {
assertEquals("HelloWorld", body.toString());
complete();
});
- });
+ }).end();
await();
}
@@ -431,10 +424,10 @@ public void testKeepAliveTimeout() throws Exception {
server.requestHandler(req -> {
req.response().end();
});
- startServer();
+ startServer(testAddress);
client.close();
client = vertx.createHttpClient(createBaseClientOptions().setHttp2KeepAliveTimeout(3).setPoolCleanerPeriod(1));
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
long now = System.currentTimeMillis();
resp.request().connection().closeHandler(v -> {
long timeout = System.currentTimeMillis() - now;
@@ -445,7 +438,7 @@ public void testKeepAliveTimeout() throws Exception {
assertTrue("Expected actual close timeout " + timeout + " to be < " + high, timeout < high);
testComplete();
});
- });
+ }).end();
await();
}
@@ -466,9 +459,9 @@ public void testStreamWeightAndDependency() throws Exception {
req.response().end();
complete();
});
- startServer();
+ startServer(testAddress);
client = vertx.createHttpClient(createBaseClientOptions().setHttp2KeepAliveTimeout(3).setPoolCleanerPeriod(1));
- HttpClientRequest request = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ HttpClientRequest request = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(responseStreamWeight, resp.request().getStreamPriority().getWeight());
assertEquals(responseStreamDependency, resp.request().getStreamPriority().getDependency());
complete();
@@ -515,9 +508,9 @@ public void testStreamWeightAndDependencyChange() throws Exception {
req.response().end("world");
complete();
});
- startServer();
+ startServer(testAddress);
client = vertx.createHttpClient(createBaseClientOptions().setHttp2KeepAliveTimeout(3).setPoolCleanerPeriod(1));
- HttpClientRequest request = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ HttpClientRequest request = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(responseStreamWeight, resp.request().getStreamPriority().getWeight());
assertEquals(responseStreamDependency, resp.request().getStreamPriority().getDependency());
resp.streamPriorityHandler( sp -> {
@@ -569,9 +562,9 @@ public void testStreamWeightAndDependencyNoChange() throws Exception {
req.response().end("world");
complete();
});
- startServer();
+ startServer(testAddress);
client = vertx.createHttpClient(createBaseClientOptions().setHttp2KeepAliveTimeout(3).setPoolCleanerPeriod(1));
- HttpClientRequest request = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ HttpClientRequest request = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(responseStreamWeight, resp.request().getStreamPriority().getWeight());
assertEquals(responseStreamDependency, resp.request().getStreamPriority().getDependency());
resp.streamPriorityHandler( sp -> {
@@ -606,9 +599,9 @@ public void testStreamWeightAndDependencyInheritance() throws Exception {
req.response().end();
complete();
});
- startServer();
+ startServer(testAddress);
client = vertx.createHttpClient(createBaseClientOptions().setHttp2KeepAliveTimeout(3).setPoolCleanerPeriod(1));
- HttpClientRequest request = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ HttpClientRequest request = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(requestStreamWeight, resp.request().getStreamPriority().getWeight());
assertEquals(requestStreamDependency, resp.request().getStreamPriority().getDependency());
complete();
@@ -632,9 +625,9 @@ public void testDefaultStreamWeightAndDependency() throws Exception {
req.response().end();
complete();
});
- startServer();
+ startServer(testAddress);
client = vertx.createHttpClient(createBaseClientOptions().setHttp2KeepAliveTimeout(3).setPoolCleanerPeriod(1));
- HttpClientRequest request = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ HttpClientRequest request = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(defaultStreamWeight, resp.request().getStreamPriority().getWeight());
assertEquals(defaultStreamDependency, resp.request().getStreamPriority().getDependency());
complete();
@@ -661,9 +654,9 @@ public void testStreamWeightAndDependencyPushPromise() throws Exception {
req.response().end();
complete();
});
- startServer();
+ startServer(testAddress);
client = vertx.createHttpClient(createBaseClientOptions().setHttp2KeepAliveTimeout(3).setPoolCleanerPeriod(1));
- HttpClientRequest request = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ HttpClientRequest request = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
complete();
});
request.pushHandler(pushReq -> {
@@ -692,9 +685,9 @@ public void testStreamWeightAndDependencyInheritancePushPromise() throws Excepti
req.response().end();
complete();
});
- startServer();
+ startServer(testAddress);
client = vertx.createHttpClient(createBaseClientOptions().setHttp2KeepAliveTimeout(3).setPoolCleanerPeriod(1));
- HttpClientRequest request = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ HttpClientRequest request = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
complete();
});
request.pushHandler(pushReq -> {
diff --git a/src/test/java/io/vertx/core/http/HttpTest.java b/src/test/java/io/vertx/core/http/HttpTest.java
--- a/src/test/java/io/vertx/core/http/HttpTest.java
+++ b/src/test/java/io/vertx/core/http/HttpTest.java
@@ -56,11 +56,17 @@ public abstract class HttpTest extends HttpTestBase {
public TemporaryFolder testFolder = new TemporaryFolder();
protected File testDir;
+ private File tmp;
@Override
public void setUp() throws Exception {
super.setUp();
testDir = testFolder.newFolder();
+ if (USE_DOMAIN_SOCKETS) {
+ assertTrue("Native transport not enabled", USE_NATIVE_TRANSPORT);
+ tmp = TestUtils.tmpFile(".sock");
+ testAddress = SocketAddress.domainSocketAddress(tmp.getAbsolutePath());
+ }
}
protected HttpServerOptions createBaseServerOptions() {
@@ -92,8 +98,8 @@ public void testClientRequestArguments() throws Exception {
public void testClientChaining() {
server.requestHandler(noOpHandler());
- server.listen(onSuccess(server -> {
- HttpClientRequest req = client.request(HttpMethod.PUT, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler());
+ server.listen(testAddress, onSuccess(server -> {
+ HttpClientRequest req = client.request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler());
assertTrue(req.setChunked(true) == req);
assertTrue(req.sendHead() == req);
assertTrue(req.write("foo", "UTF-8") == req);
@@ -131,18 +137,16 @@ public void testListenSocketAddress() {
public void testListenDomainSocketAddress() throws Exception {
Vertx vx = Vertx.vertx(new VertxOptions().setPreferNativeTransport(true));
Assume.assumeTrue("Native transport must be enabled", vx.isNativeTransportEnabled());
- NetClient netClient = vx.createNetClient();
- HttpServer httpserver = vx.createHttpServer().requestHandler(req -> req.response().end());
+ HttpServer httpserver = vx.createHttpServer(createBaseServerOptions()).requestHandler(req -> req.response().end());
File sockFile = TestUtils.tmpFile(".sock");
SocketAddress sockAddress = SocketAddress.domainSocketAddress(sockFile.getAbsolutePath());
httpserver.listen(sockAddress, onSuccess(server -> {
- netClient.connect(sockAddress, onSuccess(sock -> {
- sock.handler(buf -> {
- assertTrue("Response is not an http 200", buf.toString("UTF-8").startsWith("HTTP/1.1 200 OK"));
- testComplete();
- });
- sock.write("GET / HTTP/1.1\r\n\r\n");
- }));
+ client.request(HttpMethod.GET, sockAddress, new RequestOptions()
+ .setHost(DEFAULT_HTTP_HOST).setPort(DEFAULT_HTTP_PORT).setURI(DEFAULT_TEST_URI), resp -> {
+ resp.endHandler(v -> {
+ testComplete();
+ });
+ }).end();
}));
try {
@@ -154,7 +158,7 @@ public void testListenDomainSocketAddress() throws Exception {
@Test
- public void testLowerCaseHeaders() {
+ public void testLowerCaseHeaders() throws Exception {
server.requestHandler(req -> {
assertEquals("foo", req.headers().get("Foo"));
assertEquals("foo", req.headers().get("foo"));
@@ -175,8 +179,8 @@ public void testLowerCaseHeaders() {
req.response().end();
});
- server.listen(onSuccess(server -> {
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(server -> {
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals("quux", resp.headers().get("Quux"));
assertEquals("quux", resp.headers().get("quux"));
assertEquals("quux", resp.headers().get("qUUX"));
@@ -293,8 +297,8 @@ public void testPutHeadersOnRequest() {
assertEquals("bar", req.getHeader("foo"));
req.response().end();
});
- server.listen(onSuccess(server -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(server -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(200, resp.statusCode());
testComplete();
}).putHeader("foo", "bar").end();
@@ -309,8 +313,8 @@ public void testPutHeaderReplacesPreviousHeaders() throws Exception {
.putHeader("Location", "http://example1.org")
.putHeader("location", "http://example2.org")
.end());
- server.listen(onSuccess(server -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(server -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(singletonList("http://example2.org"), resp.headers().getAll("LocatioN"));
testComplete();
}).end();
@@ -446,9 +450,9 @@ private void testSimpleRequest(String uri, HttpMethod method, boolean absolute,
boolean ssl = this instanceof Http2Test;
HttpClientRequest req;
if (absolute) {
- req = client.requestAbs(method, (ssl ? "https://" : "http://") + DEFAULT_HTTP_HOST + ":" + DEFAULT_HTTP_PORT + uri, handler);
+ req = client.requestAbs(method, testAddress, (ssl ? "https://" : "http://") + DEFAULT_HTTP_HOST + ":" + DEFAULT_HTTP_PORT + uri, handler);
} else {
- req = client.request(method, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, uri, handler);
+ req = client.request(method, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, uri, handler);
}
testSimpleRequest(uri, method, req, absolute);
}
@@ -474,7 +478,7 @@ private void testSimpleRequest(String uri, HttpMethod method, HttpClientRequest
req.response().end();
});
- server.listen(onSuccess(server -> request.end()));
+ server.listen(testAddress, onSuccess(server -> request.end()));
await();
}
@@ -488,8 +492,8 @@ public void testServerChaining() {
testComplete();
});
- server.listen(onSuccess(server -> {
- client.request(HttpMethod.PUT, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler()).end();
+ server.listen(testAddress, onSuccess(server -> {
+ client.request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler()).end();
}));
await();
@@ -505,8 +509,8 @@ public void testServerChainingSendFile() throws Exception {
testComplete();
});
- server.listen(onSuccess(server -> {
- client.request(HttpMethod.PUT, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler()).end();
+ server.listen(testAddress, onSuccess(server -> {
+ client.request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler()).end();
}));
await();
@@ -528,8 +532,8 @@ public void testResponseEndHandlers1() {
complete();
});
req.response().end();
- }).listen(onSuccess(server -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", res -> {
+ }).listen(testAddress, onSuccess(server -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", res -> {
assertEquals(200, res.statusCode());
assertEquals("wibble", res.headers().get("extraheader"));
complete();
@@ -555,8 +559,8 @@ public void testResponseEndHandlers2() {
complete();
});
req.response().end(content);
- }).listen(onSuccess(server -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", res -> {
+ }).listen(testAddress, onSuccess(server -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", res -> {
assertEquals(200, res.statusCode());
assertEquals("wibble", res.headers().get("extraheader"));
res.bodyHandler(buff -> {
@@ -592,8 +596,8 @@ public void testResponseEndHandlersChunkedResponse() {
IntStream.range(0, numChunks - 1).forEach(x -> req.response().write(chunk));
// End with a chunk to ensure size is correctly calculated
req.response().end(chunk);
- }).listen(onSuccess(server -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", res -> {
+ }).listen(testAddress, onSuccess(server -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", res -> {
assertEquals(200, res.statusCode());
assertEquals("wibble", res.headers().get("extraheader"));
res.bodyHandler(buff -> {
@@ -623,8 +627,8 @@ public void testResponseEndHandlersSendFile() throws Exception {
complete();
});
req.response().sendFile(toSend.getAbsolutePath());
- }).listen(onSuccess(server -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", res -> {
+ }).listen(testAddress, onSuccess(server -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", res -> {
assertEquals(200, res.statusCode());
assertEquals("wibble", res.headers().get("extraheader"));
res.bodyHandler(buff -> {
@@ -668,8 +672,8 @@ private void testURIAndPath(String uri, String path) {
req.response().end();
});
- server.listen(onSuccess(server -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, uri, resp -> testComplete()).end();
+ server.listen(testAddress, onSuccess(server -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, uri, resp -> testComplete()).end();
}));
await();
@@ -716,8 +720,8 @@ private void testParamDecoding(String value) throws UnsupportedEncodingException
req.response().end();
});
String postData = "param=" + URLEncoder.encode(value,"UTF-8");
- server.listen(onSuccess(server -> {
- client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/")
+ server.listen(testAddress, onSuccess(server -> {
+ client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", resp -> testComplete())
.putHeader(HttpHeaders.CONTENT_TYPE, HttpHeaders.APPLICATION_X_WWW_FORM_URLENCODED)
.putHeader(HttpHeaders.CONTENT_LENGTH, String.valueOf(postData.length()))
.handler(resp -> {
@@ -742,7 +746,6 @@ public void testParamsSemiColon() {
private void testParams(char delim) {
Map<String, String> params = genMap(10);
String query = generateQueryString(params, delim);
-
server.requestHandler(req -> {
assertEquals(query, req.query());
assertEquals(params.size(), req.params().size());
@@ -752,8 +755,8 @@ private void testParams(char delim) {
req.response().end();
});
- server.listen(onSuccess(server -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "some-uri/?" + query, resp -> testComplete()).end();
+ server.listen(testAddress, onSuccess(server -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "some-uri/?" + query, resp -> testComplete()).end();
}));
await();
@@ -790,8 +793,8 @@ public void testDefaultRequestHeaders() {
req.response().end();
});
- server.listen(onSuccess(server -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> testComplete()).end();
+ server.listen(testAddress, onSuccess(server -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> testComplete()).end();
}));
await();
@@ -814,8 +817,8 @@ public void testRequestHeadersWithCharSequence() {
req.response().end();
});
- server.listen(onSuccess(server -> {
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> testComplete());
+ server.listen(testAddress, onSuccess(server -> {
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> testComplete());
headers.forEach((k, v) -> req.headers().add(k, v));
@@ -847,8 +850,8 @@ private void testRequestHeaders(boolean individually) {
req.response().end();
});
- server.listen(onSuccess(server -> {
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> testComplete());
+ server.listen(testAddress, onSuccess(server -> {
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> testComplete());
if (individually) {
for (Map.Entry<String, String> header : headers) {
req.headers().add(header.getKey(), header.getValue());
@@ -886,8 +889,8 @@ private void testResponseHeaders(boolean individually) {
req.response().end();
});
- server.listen(onSuccess(server -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(server -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertTrue(headers.size() < resp.headers().size());
for (Map.Entry<String, String> entry : headers) {
assertEquals(entry.getValue(), resp.headers().get(entry.getKey()));
@@ -912,8 +915,8 @@ public void testResponseHeadersWithCharSequence() {
req.response().end();
});
- server.listen(onSuccess(server -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(server -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertTrue(headers.size() < resp.headers().size());
headers.forEach((k,v) -> assertEquals(v, resp.headers().get(k)));
@@ -963,8 +966,8 @@ private void testResponseMultipleSetCookie(boolean inHeader, boolean inTrailer)
req.response().end();
});
- server.listen(onSuccess(server -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(server -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.endHandler(v -> {
assertEquals(cookies.size(), resp.cookies().size());
for (int i = 0; i < cookies.size(); ++i) {
@@ -982,8 +985,8 @@ private void testResponseMultipleSetCookie(boolean inHeader, boolean inTrailer)
public void testUseRequestAfterComplete() {
server.requestHandler(noOpHandler());
- server.listen(onSuccess(server -> {
- HttpClientRequest req = client.request(HttpMethod.POST, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler());
+ server.listen(testAddress, onSuccess(server -> {
+ HttpClientRequest req = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler());
req.end();
Buffer buff = Buffer.buffer();
@@ -1016,8 +1019,8 @@ public void testRequestBodyBufferAtEnd() {
req.response().end();
}));
- server.listen(onSuccess(server -> {
- client.request(HttpMethod.POST, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> testComplete()).end(body);
+ server.listen(testAddress, onSuccess(server -> {
+ client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> testComplete()).end(body);
}));
await();
@@ -1055,8 +1058,8 @@ private void testRequestBodyStringAtEnd(String encoding) {
});
});
- server.listen(onSuccess(server -> {
- HttpClientRequest req = client.request(HttpMethod.POST, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler());
+ server.listen(testAddress, onSuccess(server -> {
+ HttpClientRequest req = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler());
if (encoding == null) {
req.end(body);
} else {
@@ -1087,8 +1090,8 @@ private void testRequestBodyWrite(boolean chunked) {
});
});
- server.listen(onSuccess(server -> {
- HttpClientRequest req = client.request(HttpMethod.POST, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> testComplete());
+ server.listen(testAddress, onSuccess(server -> {
+ HttpClientRequest req = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> testComplete());
int numWrites = 10;
int chunkSize = 100;
@@ -1155,8 +1158,8 @@ private void testRequestBodyWriteString(boolean chunked, String encoding) {
});
});
- server.listen(onSuccess(server -> {
- HttpClientRequest req = client.request(HttpMethod.POST, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler());
+ server.listen(testAddress, onSuccess(server -> {
+ HttpClientRequest req = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler());
if (chunked) {
req.setChunked(true);
@@ -1189,8 +1192,8 @@ public void testRequestWrite() {
testComplete();
});
});
- server.listen(onSuccess(s -> {
- HttpClientRequest req = client.request(HttpMethod.POST, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler());
+ server.listen(testAddress, onSuccess(s -> {
+ HttpClientRequest req = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler());
req.setChunked(true);
int padding = 5;
for (int i = 0;i < times;i++) {
@@ -1250,7 +1253,7 @@ public void testConnectWithoutResponseHandler() throws Exception {
@Test
public void testClientExceptionHandlerCalledWhenFailingToConnect() throws Exception {
- client.request(HttpMethod.GET, 9998, "255.255.255.255", DEFAULT_TEST_URI, resp -> fail("Connect should not be called")).
+ client.request(HttpMethod.GET, testAddress, 9998, "255.255.255.255", DEFAULT_TEST_URI, resp -> fail("Connect should not be called")).
exceptionHandler(error -> testComplete()).
endHandler(done -> fail()).
end();
@@ -1260,17 +1263,17 @@ public void testClientExceptionHandlerCalledWhenFailingToConnect() throws Except
@Test
public void testClientExceptionHandlerCalledWhenServerTerminatesConnection() throws Exception {
int numReqs = 10;
- CountDownLatch latch = new CountDownLatch(numReqs);
+ waitFor(numReqs);
server.requestHandler(request -> {
request.response().close();
- }).listen(DEFAULT_HTTP_PORT, onSuccess(s -> {
+ }).listen(testAddress, onSuccess(s -> {
// Exception handler should be called for any requests in the pipeline if connection is closed
for (int i = 0; i < numReqs; i++) {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> fail("Connect should not be called")).
- exceptionHandler(error -> latch.countDown()).endHandler(done -> fail()).end();
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> fail("Connect should not be called")).
+ exceptionHandler(error -> complete()).endHandler(done -> fail()).end();
}
}));
- awaitLatch(latch);
+ await();
}
@Test
@@ -1278,9 +1281,9 @@ public void testClientExceptionHandlerCalledWhenServerTerminatesConnectionAfterP
server.requestHandler(request -> {
//Write partial response then close connection before completing it
request.response().setChunked(true).write("foo").close();
- }).listen(DEFAULT_HTTP_PORT, onSuccess(s -> {
+ }).listen(testAddress, onSuccess(s -> {
// Exception handler should be called for any requests in the pipeline if connection is closed
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp ->
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp ->
resp.exceptionHandler(t -> testComplete())).exceptionHandler(error -> fail()).end();
}));
await();
@@ -1290,9 +1293,15 @@ public void testClientExceptionHandlerCalledWhenServerTerminatesConnectionAfterP
public void testClientExceptionHandlerCalledWhenExceptionOnDataHandler() throws Exception {
server.requestHandler(request -> {
request.response().end("foo");
- }).listen(DEFAULT_HTTP_PORT, onSuccess(s -> {
+ }).listen(testAddress, onSuccess(s -> {
// Exception handler should be called for any exceptions in the data handler
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ RuntimeException cause = new RuntimeException("should be caught");
+ resp.exceptionHandler(err -> {
+ if (err == cause) {
+ testComplete();
+ }
+ });
resp.handler(data -> {
throw new RuntimeException("should be caught");
});
@@ -1306,9 +1315,15 @@ public void testClientExceptionHandlerCalledWhenExceptionOnDataHandler() throws
public void testClientExceptionHandlerCalledWhenExceptionOnBodyHandler() throws Exception {
server.requestHandler(request -> {
request.response().end("foo");
- }).listen(DEFAULT_HTTP_PORT, onSuccess(s -> {
+ }).listen(testAddress, onSuccess(s -> {
// Exception handler should be called for any exceptions in the data handler
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ RuntimeException cause = new RuntimeException("should be caught");
+ resp.exceptionHandler(err -> {
+ if (err == cause) {
+ testComplete();
+ }
+ });
resp.bodyHandler(data -> {
throw new RuntimeException("should be caught");
});
@@ -1324,11 +1339,11 @@ public void testNoExceptionHandlerCalledWhenResponseEnded() throws Exception {
HttpServerResponse resp = req.response();
req.exceptionHandler(this::fail);
resp.exceptionHandler(err -> {
- err.printStackTrace();
+ fail(err);
});
resp.end();
- }).listen(DEFAULT_HTTP_PORT, onSuccess(s -> {
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ }).listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.endHandler(v -> {
vertx.setTimer(100, tid -> testComplete());
});
@@ -1377,9 +1392,9 @@ public void testServerExceptionHandlerOnClose() {
assertEquals(1, respEndHandlerCount.get());
testComplete();
});
- }).listen(DEFAULT_HTTP_PORT, ar -> {
+ }).listen(testAddress, ar -> {
HttpClient client = vertx.createHttpClient();
- HttpClientRequest req = client.put(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somerui", handler -> {
+ HttpClientRequest req = client.request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somerui", handler -> {
}).setChunked(true);
req.sendHead(v -> {
@@ -1397,7 +1412,7 @@ public void testClientRequestExceptionHandlerCalledWhenConnectionClosed() throws
});
});
startServer();
- HttpClientRequest req = client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ HttpClientRequest req = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
resp.handler(chunk -> {
resp.request().connection().close();
});
@@ -1416,15 +1431,15 @@ public void testClientResponseExceptionHandlerCalledWhenConnectionClosed() throw
conn.set(req.connection());
req.response().setChunked(true).write("chunk");
});
- startServer();
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ startServer(testAddress);
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
resp.handler(buff -> {
conn.get().close();
});
resp.exceptionHandler(err -> {
testComplete();
});
- });
+ }).end();
await();
}
@@ -1460,8 +1475,8 @@ private void testStatusCode(int code, String statusMessage) {
req.response().end();
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
int theCode;
if (code == -1) {
// Default code - 200
@@ -1507,8 +1522,8 @@ private void testResponseTrailers(boolean individually) {
req.response().end();
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.endHandler(v -> {
assertEquals(trailers.size(), resp.trailers().size());
for (Map.Entry<String, String> entry : trailers) {
@@ -1530,8 +1545,8 @@ public void testResponseNoTrailers() {
req.response().end();
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.endHandler(v -> {
assertTrue(resp.trailers().isEmpty());
testComplete();
@@ -1552,8 +1567,8 @@ public void testUseResponseAfterComplete() throws Exception {
checkHttpServerResponse(resp);
testComplete();
});
- startServer();
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler());
+ startServer(testAddress);
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler()).end();
await();
}
@@ -1583,8 +1598,8 @@ public void testResponseBodyBufferAtEnd() {
req.response().end(body);
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.bodyHandler(buff -> {
assertEquals(body, buff);
testComplete();
@@ -1629,8 +1644,8 @@ private void testResponseBodyWrite(boolean chunked) {
assertTrue(req.response().headWritten());
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.bodyHandler(buff -> {
assertEquals(body, buff);
testComplete();
@@ -1695,8 +1710,8 @@ private void testResponseBodyWriteString(boolean chunked, String encoding) {
req.response().end();
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.bodyHandler(buff -> {
assertEquals(bodyBuff, buff);
testComplete();
@@ -1717,8 +1732,8 @@ public void testResponseWrite() {
req.response().end();
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.POST, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.bodyHandler(buff -> {
assertEquals(body, buff);
testComplete();
@@ -1733,14 +1748,14 @@ public void testResponseWrite() {
public void testSendFile() throws Exception {
String content = TestUtils.randomUnicodeString(10000);
sendFile("test-send-file.html", content, false,
- handler -> client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, handler));
+ handler -> client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, handler));
}
@Test
public void testSendFileWithHandler() throws Exception {
String content = TestUtils.randomUnicodeString(10000);
sendFile("test-send-file.html", content, true,
- handler -> client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, handler));
+ handler -> client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, handler));
}
@Test
@@ -1748,7 +1763,7 @@ public void testSendFileWithConnectionCloseHeader() throws Exception {
String content = TestUtils.randomUnicodeString(1024 * 1024 * 2);
sendFile("test-send-file.html", content, false,
handler -> client
- .get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, handler)
+ .request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, handler)
.putHeader(HttpHeaders.CONNECTION, "close"));
}
@@ -1764,7 +1779,7 @@ private void sendFile(String fileName, String contentExpected, boolean useHandle
complete();
}
});
- startServer();
+ startServer(testAddress);
requestFact.apply(resp -> {
assertEquals(200, resp.statusCode());
assertEquals("text/html", resp.headers().get("Content-Type"));
@@ -1791,8 +1806,8 @@ public void testSendNonExistingFile() throws Exception {
});
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.bodyHandler(buff -> {
assertEquals("failed", buff.toString());
testComplete();
@@ -1813,8 +1828,8 @@ public void testSendFileOverrideHeaders() throws Exception {
req.response().sendFile(file.getAbsolutePath());
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(file.length(), Long.parseLong(resp.headers().get("content-length")));
assertEquals("wibble", resp.headers().get("content-type"));
resp.bodyHandler(buff -> {
@@ -1837,7 +1852,7 @@ public void testSendFileNotFound() throws Exception {
});
server.listen(onSuccess(s -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
fail("Should not receive response");
}).end();
vertx.setTimer(100, tid -> testComplete());
@@ -1857,8 +1872,8 @@ public void testSendFileNotFoundWithHandler() throws Exception {
}));
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
fail("Should not receive response");
}).end();
}));
@@ -1879,8 +1894,8 @@ public void testSendFileDirectoryWithHandler() throws Exception {
}));
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
fail("Should not receive response");
}).end();
}));
@@ -1890,10 +1905,10 @@ public void testSendFileDirectoryWithHandler() throws Exception {
@Test
public void testSendOpenRangeFileFromClasspath() {
- vertx.createHttpServer(new HttpServerOptions().setPort(8080)).requestHandler(res -> {
+ server.requestHandler(res -> {
res.response().sendFile("webroot/somefile.html", 6);
- }).listen(onSuccess(res -> {
- vertx.createHttpClient(new HttpClientOptions()).request(HttpMethod.GET, 8080, "localhost", "/", resp -> {
+ }).listen(testAddress, onSuccess(res -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.bodyHandler(buff -> {
assertTrue(buff.toString().startsWith("<body>blah</body></html>"));
testComplete();
@@ -1905,10 +1920,10 @@ public void testSendOpenRangeFileFromClasspath() {
@Test
public void testSendRangeFileFromClasspath() {
- vertx.createHttpServer(new HttpServerOptions().setPort(8080)).requestHandler(res -> {
+ server.requestHandler(res -> {
res.response().sendFile("webroot/somefile.html", 6, 6);
- }).listen(onSuccess(res -> {
- vertx.createHttpClient(new HttpClientOptions()).request(HttpMethod.GET, 8080, "localhost", "/", resp -> {
+ }).listen(testAddress, onSuccess(res -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.bodyHandler(buff -> {
assertEquals("<body>", buff.toString());
testComplete();
@@ -1919,7 +1934,7 @@ public void testSendRangeFileFromClasspath() {
}
@Test
- public void test100ContinueHandledAutomatically() throws Exception {
+ public void test100ContinueHandledAutomatically() {
Buffer toSend = TestUtils.randomBuffer(1000);
server.requestHandler(req -> {
@@ -1929,8 +1944,8 @@ public void test100ContinueHandledAutomatically() throws Exception {
});
});
- server.listen(onSuccess(s -> {
- HttpClientRequest req = client.request(HttpMethod.PUT, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ HttpClientRequest req = client.request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.endHandler(v -> testComplete());
});
req.headers().set("Expect", "100-continue");
@@ -1946,7 +1961,7 @@ public void test100ContinueHandledAutomatically() throws Exception {
}
@Test
- public void test100ContinueHandledManually() throws Exception {
+ public void test100ContinueHandledManually() {
server.close();
server = vertx.createHttpServer(createBaseServerOptions());
@@ -1961,8 +1976,8 @@ public void test100ContinueHandledManually() throws Exception {
});
});
- server.listen(onSuccess(s -> {
- HttpClientRequest req = client.request(HttpMethod.PUT, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ HttpClientRequest req = client.request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.endHandler(v -> testComplete());
});
req.headers().set("Expect", "100-continue");
@@ -1978,7 +1993,7 @@ public void test100ContinueHandledManually() throws Exception {
}
@Test
- public void test100ContinueRejectedManually() throws Exception {
+ public void test100ContinueRejectedManually() {
server.close();
server = vertx.createHttpServer(createBaseServerOptions());
@@ -1990,8 +2005,8 @@ public void test100ContinueRejectedManually() throws Exception {
});
});
- server.listen(onSuccess(s -> {
- HttpClientRequest req = client.request(HttpMethod.PUT, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ HttpClientRequest req = client.request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(405, resp.statusCode());
testComplete();
});
@@ -2009,7 +2024,7 @@ public void test100ContinueRejectedManually() throws Exception {
@Test
public void testClientDrainHandler() {
pausingServer(resumeFuture -> {
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler());
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler());
req.setChunked(true);
assertFalse(req.writeQueueFull());
req.setWriteQueueMaxSize(1000);
@@ -2035,7 +2050,7 @@ public void testClientDrainHandler() {
@Test
public void testClientRequestExceptionHandlerCalledWhenExceptionOnDrainHandler() {
pausingServer(resumeFuture -> {
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler());
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler());
req.setChunked(true);
assertFalse(req.writeQueueFull());
req.setWriteQueueMaxSize(1000);
@@ -2080,13 +2095,13 @@ private void pausingServer(Consumer<Future<Void>> consumer) {
});
});
- server.listen(onSuccess(s -> consumer.accept(resumeFuture)));
+ server.listen(testAddress, onSuccess(s -> consumer.accept(resumeFuture)));
}
@Test
public void testServerDrainHandler() {
drainingServer(resumeFuture -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.pause();
resumeFuture.setHandler(ar -> resp.resume());
}).end();
@@ -2121,7 +2136,7 @@ private void drainingServer(Consumer<Future<Void>> consumer) {
});
});
- server.listen(onSuccess(s -> consumer.accept(resumeFuture)));
+ server.listen(testAddress, onSuccess(s -> consumer.accept(resumeFuture)));
}
@Test
@@ -2133,7 +2148,7 @@ public void testConnectionErrorsGetReportedToRequest() throws InterruptedExcepti
CountDownLatch latch = new CountDownLatch(3);
// This one should cause an error in the Client Exception handler, because it has no exception handler set specifically.
- HttpClientRequest req1 = client.request(HttpMethod.GET, 9998, DEFAULT_HTTP_HOST, "someurl1", resp -> {
+ HttpClientRequest req1 = client.request(HttpMethod.GET, testAddress, 9998, DEFAULT_HTTP_HOST, "someurl1", resp -> {
fail("Should never get a response on a bad port, if you see this message than you are running an http server on port 9998");
});
@@ -2142,7 +2157,7 @@ public void testConnectionErrorsGetReportedToRequest() throws InterruptedExcepti
latch.countDown();
});
- HttpClientRequest req2 = client.request(HttpMethod.GET, 9998, DEFAULT_HTTP_HOST, "someurl2", resp -> {
+ HttpClientRequest req2 = client.request(HttpMethod.GET, testAddress, 9998, DEFAULT_HTTP_HOST, "someurl2", resp -> {
fail("Should never get a response on a bad port, if you see this message than you are running an http server on port 9998");
});
@@ -2151,7 +2166,7 @@ public void testConnectionErrorsGetReportedToRequest() throws InterruptedExcepti
latch.countDown();
});
- HttpClientRequest req3 = client.request(HttpMethod.GET, 9998, DEFAULT_HTTP_HOST, "someurl2", resp -> {
+ HttpClientRequest req3 = client.request(HttpMethod.GET, testAddress, 9998, DEFAULT_HTTP_HOST, "someurl2", resp -> {
fail("Should never get a response on a bad port, if you see this message than you are running an http server on port 9998");
});
@@ -2172,8 +2187,8 @@ public void testConnectionErrorsGetReportedToRequest() throws InterruptedExcepti
public void testRequestTimesoutWhenIndicatedPeriodExpiresWithoutAResponseFromRemoteServer() {
server.requestHandler(noOpHandler()); // No response handler so timeout triggers
AtomicBoolean failed = new AtomicBoolean();
- server.listen(onSuccess(s -> {
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
fail("End should not be called because the request should timeout");
});
req.exceptionHandler(t -> {
@@ -2215,11 +2230,11 @@ public void testRequestTimeoutCanceledWhenRequestHasAnOtherError() {
public void testRequestTimeoutCanceledWhenRequestEndsNormally() {
server.requestHandler(req -> req.response().end());
- server.listen(onSuccess(s -> {
+ server.listen(testAddress, onSuccess(s -> {
AtomicReference<Throwable> exception = new AtomicReference<>();
// There is no server running, should fail to connect
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler());
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, noOpHandler());
req.exceptionHandler(exception::set);
req.setTimeout(500);
req.end();
@@ -2244,8 +2259,8 @@ public void testHttpClientRequestTimeoutResetsTheConnection() throws Exception {
}
});
});
- startServer();
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> fail("Response should not be handled"));
+ startServer(testAddress);
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> fail("Response should not be handled"));
req.exceptionHandler(err -> {
complete();
});
@@ -2275,10 +2290,10 @@ public void testConnectInvalidHost() {
}
@Test
- public void testSetHandlersAfterListening() throws Exception {
+ public void testSetHandlersAfterListening() {
server.requestHandler(noOpHandler());
- server.listen(onSuccess(s -> {
+ server.listen(testAddress, onSuccess(s -> {
assertIllegalStateException(() -> server.requestHandler(noOpHandler()));
assertIllegalStateException(() -> server.websocketHandler(noOpHandler()));
testComplete();
@@ -2288,47 +2303,46 @@ public void testSetHandlersAfterListening() throws Exception {
}
@Test
- public void testSetHandlersAfterListening2() throws Exception {
+ public void testSetHandlersAfterListening2() {
server.requestHandler(noOpHandler());
- server.listen(onSuccess(v -> testComplete()));
+ server.listen(testAddress, onSuccess(v -> testComplete()));
assertIllegalStateException(() -> server.requestHandler(noOpHandler()));
assertIllegalStateException(() -> server.websocketHandler(noOpHandler()));
await();
}
@Test
- public void testListenNoHandlers() throws Exception {
+ public void testListenNoHandlers() {
assertIllegalStateException(() -> server.listen(ar -> {
}));
}
@Test
- public void testListenNoHandlers2() throws Exception {
+ public void testListenNoHandlers2() {
assertIllegalStateException(() -> server.listen());
}
@Test
- public void testListenTwice() throws Exception {
+ public void testListenTwice() {
server.requestHandler(noOpHandler());
- server.listen(onSuccess(v -> testComplete()));
+ server.listen(testAddress, onSuccess(v -> testComplete()));
assertIllegalStateException(() -> server.listen());
await();
}
@Test
- public void testListenTwice2() throws Exception {
+ public void testListenTwice2() {
server.requestHandler(noOpHandler());
- server.listen(ar -> {
- assertTrue(ar.succeeded());
+ server.listen(testAddress, onSuccess(s -> {
assertIllegalStateException(() -> server.listen());
testComplete();
- });
+ }));
await();
}
@Test
- public void testHeadCanSetContentLength() {
+ public void testHeadCanSetContentLength() throws Exception {
server.requestHandler(req -> {
assertEquals(HttpMethod.HEAD, req.method());
// Head never contains a body but it can contain a Content-Length header
@@ -2336,14 +2350,16 @@ public void testHeadCanSetContentLength() {
req.response().headers().set("Content-Length", String.valueOf(41));
req.response().end();
});
-
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.HEAD, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
- assertEquals("41", resp.headers().get("Content-Length"));
- resp.endHandler(v -> testComplete());
- }).end();
- }));
-
+ startServer(testAddress);
+ client.request(HttpMethod.HEAD, testAddress, new RequestOptions()
+ .setHost(DEFAULT_HTTP_HOST)
+ .setPort(DEFAULT_HTTP_PORT)
+ .setURI(DEFAULT_TEST_URI), resp -> {
+ assertEquals("41", resp.headers().get("Content-Length"));
+ resp.endHandler(v -> testComplete());
+ assertEquals("41", resp.headers().get("Content-Length"));
+ resp.endHandler(v -> testComplete());
+ }).end();
await();
}
@@ -2438,10 +2454,10 @@ protected MultiMap checkEmptyHttpResponse(HttpMethod method, int sc, MultiMap re
resp.headers().addAll(reqHeaders);
resp.end();
});
- startServer();
+ startServer(testAddress);
try {
CompletableFuture<MultiMap> result = new CompletableFuture<>();
- client.request(method, DEFAULT_HTTP_PORT, DEFAULT_HTTPS_HOST, "/", resp -> {
+ client.request(method, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTPS_HOST, "/", resp -> {
Buffer body = Buffer.buffer();
resp.exceptionHandler(result::completeExceptionally);
resp.handler(body::appendBuffer);
@@ -2469,8 +2485,8 @@ public void testHeadHasNoContentLengthByDefault() {
req.response().end();
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.HEAD, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.HEAD, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertNull(resp.headers().get(HttpHeaders.CONTENT_LENGTH));
resp.endHandler(v -> testComplete());
}).end();
@@ -2488,8 +2504,8 @@ public void testHeadButCanSetContentLength() {
req.response().putHeader(HttpHeaders.CONTENT_LENGTH, "41").end();
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.HEAD, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.HEAD, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals("41", resp.headers().get(HttpHeaders.CONTENT_LENGTH));
resp.endHandler(v -> testComplete());
}).end();
@@ -2501,12 +2517,16 @@ public void testHeadButCanSetContentLength() {
@Test
public void testRemoteAddress() {
server.requestHandler(req -> {
- assertEquals("127.0.0.1", req.remoteAddress().host());
+ if (testAddress.host() != null) {
+ assertEquals("127.0.0.1", req.remoteAddress().host());
+ } else {
+ // Returns null for domain sockets
+ }
req.response().end();
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> resp.endHandler(v -> testComplete())).end();
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> resp.endHandler(v -> testComplete())).end();
}));
await();
@@ -2519,8 +2539,8 @@ public void testGetAbsoluteURI() {
req.response().end();
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/foo/bar", resp -> resp.endHandler(v -> testComplete())).end();
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/foo/bar", resp -> resp.endHandler(v -> testComplete())).end();
}));
await();
@@ -2550,8 +2570,8 @@ public void testPauseResumeClientResponseWontCallEndHandlePrematurely() throws E
server.requestHandler(req -> {
req.response().end(expected);
});
- startServer();
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ startServer(testAddress);
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.bodyHandler(body -> {
assertEquals(expected, body);
testComplete();
@@ -2559,7 +2579,7 @@ public void testPauseResumeClientResponseWontCallEndHandlePrematurely() throws E
// Check that pause resume won't call the end handler prematurely
resp.pause();
resp.resume();
- });
+ }).end();
await();
}
@@ -2578,7 +2598,7 @@ public void testPauseClientResponse() {
AtomicBoolean paused = new AtomicBoolean();
Buffer totBuff = Buffer.buffer();
- HttpClientRequest clientRequest = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ HttpClientRequest clientRequest = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.pause();
paused.set(true);
resp.handler(chunk -> {
@@ -2602,7 +2622,7 @@ public void testPauseClientResponse() {
});
});
- server.listen(onSuccess(s -> clientRequest.end()));
+ server.listen(testAddress, onSuccess(s -> clientRequest.end()));
await();
}
@@ -2646,12 +2666,12 @@ private void testDeliverPausedBufferWhenResume(Consumer<Runnable> scheduler) thr
});
resp.setChunked(true).write(data);
});
- startServer();
+ startServer(testAddress);
client.close();
client = vertx.createHttpClient(createBaseClientOptions().setMaxPoolSize(1).setKeepAlive(true));
for (int i = 0;i < num;i++) {
int idx = i;
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/" + i, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/" + i, resp -> {
Buffer body = Buffer.buffer();
Thread t = Thread.currentThread();
resp.handler(buff -> {
@@ -2678,11 +2698,11 @@ public void testClearPausedBuffersWhenResponseEnds() throws Exception {
server.requestHandler(req -> {
req.response().end(data);
});
- startServer();
+ startServer(testAddress);
client.close();
client = vertx.createHttpClient(createBaseClientOptions().setMaxPoolSize(1).setKeepAlive(true));
for (int i = 0;i < num;i++) {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.bodyHandler(buff -> {
assertEquals(data, buff);
complete();
@@ -2717,8 +2737,8 @@ public void testPausedHttpServerRequest() throws Exception {
req.response().end();
});
});
- startServer();
- HttpClientRequest req = client.put(DEFAULT_HTTP_PORT, DEFAULT_HTTPS_HOST, DEFAULT_TEST_URI, resp -> {
+ startServer(testAddress);
+ HttpClientRequest req = client.request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTPS_HOST, DEFAULT_TEST_URI, resp -> {
resp.endHandler(v -> {
testComplete();
});
@@ -2768,10 +2788,10 @@ private void testHttpServerRequestPausedDuringLastChunk(boolean fetching) throws
req.response().end();
});
});
- startServer();
+ startServer(testAddress);
client.close();
client = vertx.createHttpClient(createBaseClientOptions().setMaxPoolSize(1));
- client.put(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/someuri", resp -> {
+ client.request(HttpMethod.PUT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/someuri", resp -> {
complete();
}).end("small");
await();
@@ -2791,10 +2811,10 @@ private void testHttpClientResponsePausedDuringLastChunk(boolean fetching) throw
server.requestHandler(req -> {
req.response().end("small");
});
- startServer();
+ startServer(testAddress);
client.close();
client = vertx.createHttpClient(createBaseClientOptions().setMaxPoolSize(1));
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/someuri", resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/someuri", resp -> {
AtomicBoolean ended = new AtomicBoolean();
AtomicBoolean paused = new AtomicBoolean();
resp.handler(buff -> {
@@ -2816,91 +2836,91 @@ private void testHttpClientResponsePausedDuringLastChunk(boolean fetching) throw
ended.set(true);
complete();
});
- });
+ }).end();
await();
}
@Test
- public void testFormUploadEmptyFile() throws Exception {
+ public void testFormUploadEmptyFile() {
testFormUploadFile("", false, false);
}
@Test
- public void testFormUploadSmallFile() throws Exception {
+ public void testFormUploadSmallFile() {
testFormUploadFile(TestUtils.randomAlphaString(100), false, false);
}
@Test
- public void testFormUploadMediumFile() throws Exception {
+ public void testFormUploadMediumFile() {
testFormUploadFile(TestUtils.randomAlphaString(20000), false, false);
}
@Test
- public void testFormUploadLargeFile() throws Exception {
+ public void testFormUploadLargeFile() {
testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), false, false);
}
@Test
- public void testFormUploadEmptyFileStreamToDisk() throws Exception {
+ public void testFormUploadEmptyFileStreamToDisk() {
testFormUploadFile("", true, false);
}
@Test
- public void testFormUploadSmallFileStreamToDisk() throws Exception {
+ public void testFormUploadSmallFileStreamToDisk() {
testFormUploadFile(TestUtils.randomAlphaString(100), true, false);
}
@Test
- public void testFormUploadMediumFileStreamToDisk() throws Exception {
+ public void testFormUploadMediumFileStreamToDisk() {
testFormUploadFile(TestUtils.randomAlphaString(20 * 1024), true, false);
}
@Test
- public void testFormUploadLargeFileStreamToDisk() throws Exception {
+ public void testFormUploadLargeFileStreamToDisk() {
testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), true, false);
}
@Test
- public void testBrokenFormUploadEmptyFile() throws Exception {
+ public void testBrokenFormUploadEmptyFile() {
testFormUploadFile("", true, true);
}
@Test
- public void testBrokenFormUploadSmallFile() throws Exception {
+ public void testBrokenFormUploadSmallFile() {
testFormUploadFile(TestUtils.randomAlphaString(100), true, true);
}
@Test
- public void testBrokenFormUploadMediumFile() throws Exception {
+ public void testBrokenFormUploadMediumFile() {
testFormUploadFile(TestUtils.randomAlphaString(20 * 1024), true, true);
}
@Test
- public void testBrokenFormUploadLargeFile() throws Exception {
+ public void testBrokenFormUploadLargeFile() {
testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), true, true);
}
@Test
- public void testBrokenFormUploadEmptyFileStreamToDisk() throws Exception {
+ public void testBrokenFormUploadEmptyFileStreamToDisk() {
testFormUploadFile("", true, true);
}
@Test
- public void testBrokenFormUploadSmallFileStreamToDisk() throws Exception {
+ public void testBrokenFormUploadSmallFileStreamToDisk() {
testFormUploadFile(TestUtils.randomAlphaString(100), true, true);
}
@Test
- public void testBrokenFormUploadMediumFileStreamToDisk() throws Exception {
+ public void testBrokenFormUploadMediumFileStreamToDisk() {
testFormUploadFile(TestUtils.randomAlphaString(20 * 1024), true, true);
}
@Test
- public void testBrokenFormUploadLargeFileStreamToDisk() throws Exception {
+ public void testBrokenFormUploadLargeFileStreamToDisk() {
testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), true, true);
}
- private void testFormUploadFile(String contentStr, boolean streamToDisk, boolean abortClient) throws Exception {
+ private void testFormUploadFile(String contentStr, boolean streamToDisk, boolean abortClient) {
waitFor(2);
@@ -2972,9 +2992,9 @@ private void testFormUploadFile(String contentStr, boolean streamToDisk, boolean
}
});
- server.listen(onSuccess(s -> {
+ server.listen(testAddress, onSuccess(s -> {
AtomicBoolean failed = new AtomicBoolean();
- HttpClientRequest req = client.request(HttpMethod.POST, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/form", resp -> {
+ HttpClientRequest req = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/form", resp -> {
assertFalse(abortClient);
// assert the response
assertEquals(200, resp.statusCode());
@@ -3015,7 +3035,7 @@ private void testFormUploadFile(String contentStr, boolean streamToDisk, boolean
}
@Test
- public void testFormUploadAttributes() throws Exception {
+ public void testFormUploadAttributes() {
AtomicInteger attributeCount = new AtomicInteger();
server.requestHandler(req -> {
if (req.method() == HttpMethod.POST) {
@@ -3037,8 +3057,8 @@ public void testFormUploadAttributes() throws Exception {
}
});
- server.listen(onSuccess(s -> {
- HttpClientRequest req = client.request(HttpMethod.POST, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/form", resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ HttpClientRequest req = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/form", resp -> {
// assert the response
assertEquals(200, resp.statusCode());
resp.bodyHandler(body -> {
@@ -3063,7 +3083,7 @@ public void testFormUploadAttributes() throws Exception {
}
@Test
- public void testFormUploadAttributes2() throws Exception {
+ public void testFormUploadAttributes2() {
AtomicInteger attributeCount = new AtomicInteger();
server.requestHandler(req -> {
if (req.method() == HttpMethod.POST) {
@@ -3083,8 +3103,8 @@ public void testFormUploadAttributes2() throws Exception {
}
});
- server.listen(onSuccess(s -> {
- HttpClientRequest req = client.request(HttpMethod.POST, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/form", resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ HttpClientRequest req = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/form", resp -> {
// assert the response
assertEquals(200, resp.statusCode());
resp.bodyHandler(body -> {
@@ -3110,8 +3130,8 @@ public void testHostHeaderOverridePossible() {
req.response().end();
});
- server.listen(onSuccess(s -> {
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> testComplete());
+ server.listen(testAddress, onSuccess(s -> {
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> testComplete());
req.setHost("localhost:4444");
req.end();
}));
@@ -3130,8 +3150,8 @@ public void testResponseBodyWriteFixedString() {
req.response().end();
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.bodyHandler(buff -> {
assertEquals(bodyBuff, buff);
testComplete();
@@ -3148,11 +3168,10 @@ public void testResponseDataTimeout() {
server.requestHandler(req -> {
req.response().setChunked(true).write(expected);
});
- server.listen(onSuccess(s -> {
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI);
+ server.listen(testAddress, onSuccess(s -> {
Buffer received = Buffer.buffer();
- req.handler(resp -> {
- req.setTimeout(500);
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ resp.request().setTimeout(500);
resp.handler(received::appendBuffer);
});
AtomicInteger count = new AtomicInteger();
@@ -3176,13 +3195,12 @@ public void testClientMultiThreaded() throws Exception {
server.requestHandler(req -> {
req.response().putHeader("count", req.headers().get("count"));
req.response().end();
- }).listen(ar -> {
- assertTrue(ar.succeeded());
+ }).listen(testAddress, onSuccess(s -> {
for (int i = 0; i < numThreads; i++) {
int index = i;
threads[i] = new Thread() {
public void run() {
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", res -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", res -> {
assertEquals(200, res.statusCode());
assertEquals(String.valueOf(index), res.headers().get("count"));
latch.countDown();
@@ -3191,7 +3209,7 @@ public void run() {
};
threads[i].start();
}
- });
+ }));
awaitLatch(latch);
for (int i = 0; i < numThreads; i++) {
threads[i].join();
@@ -3203,7 +3221,7 @@ public void testInVerticle() throws Exception {
testInVerticle(false);
}
- private void testInVerticle(boolean worker) throws Exception {
+ private void testInVerticle(boolean worker) {
client.close();
server.close();
class MyVerticle extends AbstractVerticle {
@@ -3225,14 +3243,13 @@ public void start() {
assertSame(thr, Thread.currentThread());
}
});
- server.listen(ar -> {
- assertTrue(ar.succeeded());
+ server.listen(testAddress, onSuccess(s -> {
assertSame(ctx, Vertx.currentContext());
if (!worker) {
assertSame(thr, Thread.currentThread());
}
client = vertx.createHttpClient(new HttpClientOptions());
- client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", res -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", res -> {
assertSame(ctx, Vertx.currentContext());
if (!worker) {
assertSame(thr, Thread.currentThread());
@@ -3240,7 +3257,7 @@ public void start() {
assertEquals(200, res.statusCode());
testComplete();
}).end();
- });
+ }));
}
}
MyVerticle verticle = new MyVerticle();
@@ -3429,13 +3446,12 @@ public void testRequestEnded() {
req.response().setStatusCode(200).end();
});
});
- server.listen(ar -> {
- assertTrue(ar.succeeded());
- client.getNow(HttpTestBase.DEFAULT_HTTP_PORT, HttpTestBase.DEFAULT_HTTP_HOST, "/blah", resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.GET, testAddress, HttpTestBase.DEFAULT_HTTP_PORT, HttpTestBase.DEFAULT_HTTP_HOST, "/blah", resp -> {
assertEquals(200, resp.statusCode());
testComplete();
- });
- });
+ }).end();
+ }));
await();
}
@@ -3476,12 +3492,11 @@ public void testRequestEndedNoEndHandler() {
testComplete();
});
});
- server.listen(ar -> {
- assertTrue(ar.succeeded());
- client.getNow(HttpTestBase.DEFAULT_HTTP_PORT, HttpTestBase.DEFAULT_HTTP_HOST, "/blah", resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.GET, testAddress, HttpTestBase.DEFAULT_HTTP_PORT, HttpTestBase.DEFAULT_HTTP_HOST, "/blah", resp -> {
assertEquals(200, resp.statusCode());
- });
- });
+ }).end();
+ }));
await();
}
@@ -3521,14 +3536,14 @@ public void testAbsoluteURIServer() {
assertEquals(req.scheme() + "://localhost:8080/path", absURI);
req.response().end();
});
- server.listen(onSuccess(s -> {
+ server.listen(testAddress, onSuccess(s -> {
String host = "localhost";
String path = "/path";
int port = 8080;
- client.getNow(port, host, path, resp -> {
+ client.request(HttpMethod.GET, testAddress, port, host, path, resp -> {
assertEquals(200, resp.statusCode());
testComplete();
- });
+ }).end();
}));
await();
@@ -3538,26 +3553,27 @@ public void testAbsoluteURIServer() {
public void testDumpManyRequestsOnQueue() throws Exception {
int sendRequests = 10000;
AtomicInteger receivedRequests = new AtomicInteger();
+ HttpClientOptions ops = createBaseClientOptions()
+ .setDefaultPort(DEFAULT_HTTP_PORT)
+ .setPipelining(true)
+ .setKeepAlive(true);
+ client.close();
+ client = vertx.createHttpClient(ops);
vertx.createHttpServer(createBaseServerOptions()).requestHandler(r-> {
r.response().end();
if (receivedRequests.incrementAndGet() == sendRequests) {
testComplete();
}
- }).listen(onSuccess(s -> {
- HttpClientOptions ops = createBaseClientOptions()
- .setDefaultPort(DEFAULT_HTTP_PORT)
- .setPipelining(true)
- .setKeepAlive(true);
- HttpClient client = vertx.createHttpClient(ops);
- IntStream.range(0, sendRequests).forEach(x -> client.getNow("/", r -> {}));
+ }).listen(testAddress, onSuccess(s -> {
+ IntStream.range(0, sendRequests).forEach(x -> client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, r -> {}).end());
}));
await();
}
@Test
- public void testOtherMethodWithRawMethod() throws Exception {
+ public void testOtherMethodWithRawMethod() {
try {
- client.request(HttpMethod.OTHER, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ client.request(HttpMethod.OTHER, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
}).end();
fail();
} catch (IllegalStateException expected) {
@@ -3565,13 +3581,13 @@ public void testOtherMethodWithRawMethod() throws Exception {
}
@Test
- public void testOtherMethodRequest() throws Exception {
+ public void testOtherMethodRequest() {
server.requestHandler(r -> {
assertEquals(HttpMethod.OTHER, r.method());
assertEquals("COPY", r.rawMethod());
r.response().end();
- }).listen(onSuccess(s -> {
- client.request(HttpMethod.OTHER, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ }).listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.OTHER, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
testComplete();
}).setRawMethod("COPY").end();
}));
@@ -3597,15 +3613,13 @@ private void testClientConnectionHandler(boolean local, boolean global) throws E
server.requestHandler(req -> {
req.response().end();
});
- CountDownLatch listenLatch = new CountDownLatch(1);
- server.listen(onSuccess(s -> listenLatch.countDown()));
- awaitLatch(listenLatch);
+ startServer(testAddress);
AtomicInteger status = new AtomicInteger();
Handler<HttpConnection> handler = conn -> status.getAndIncrement();
if (global) {
client.connectionHandler(handler);
}
- HttpClientRequest req = client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ HttpClientRequest req = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
assertEquals((local ? 1 : 0) + (global ? 1 : 0), status.getAndIncrement());
testComplete();
});
@@ -3620,6 +3634,7 @@ private void testClientConnectionHandler(boolean local, boolean global) throws E
public void testServerConnectionHandler() throws Exception {
AtomicInteger status = new AtomicInteger();
AtomicReference<HttpConnection> connRef = new AtomicReference<>();
+ Context serverCtx = vertx.getOrCreateContext();
server.connectionHandler(conn -> {
assertEquals(0, status.getAndIncrement());
assertNull(connRef.getAndSet(conn));
@@ -3629,12 +3644,10 @@ public void testServerConnectionHandler() throws Exception {
assertSame(connRef.get(), req.connection());
req.response().end();
});
- CountDownLatch listenLatch = new CountDownLatch(1);
- server.listen(onSuccess(s -> listenLatch.countDown()));
- awaitLatch(listenLatch);
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ startServer(testAddress, serverCtx, server);
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
testComplete();
- });
+ }).end();
await();
}
@@ -3653,10 +3666,8 @@ public void testClientConnectionClose() throws Exception {
testComplete();
});
});
- CountDownLatch listenLatch = new CountDownLatch(1);
- server.listen(onSuccess(s -> listenLatch.countDown()));
- awaitLatch(listenLatch);
- HttpClientRequest req = client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ startServer(testAddress);
+ HttpClientRequest req = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
fail();
});
req.setChunked(true);
@@ -3672,10 +3683,8 @@ public void testServerConnectionClose() throws Exception {
server.requestHandler(req -> {
req.connection().close();
});
- CountDownLatch listenLatch = new CountDownLatch(1);
- server.listen(onSuccess(s -> listenLatch.countDown()));
- awaitLatch(listenLatch);
- HttpClientRequest req = client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ startServer(testAddress);
+ HttpClientRequest req = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
fail();
});
req.connectionHandler(conn -> {
@@ -3727,10 +3736,10 @@ public void testClientLocalAddress() throws Exception {
req.response().end();
});
startServer();
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
assertEquals(200, resp.statusCode());
testComplete();
- });
+ }).end();
await();
}
@@ -4316,9 +4325,9 @@ public void testEventHandlersNotHoldingLock() throws Exception {
break;
}
});
- startServer();
+ startServer(testAddress);
for (int i = 0;i < 2;i++) {
- client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/" + i, resp -> {
+ client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/" + i, resp -> {
assertEquals(200, resp.statusCode());
HttpConnection conn = resp.request().connection();
switch (resp.request().path()) {
@@ -4385,8 +4394,8 @@ public void testEventHandlersNotHoldingLockOnClose() throws Exception {
});
resp.setChunked(true).write("hello");
});
- startServer();
- HttpClientRequest req = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ startServer(testAddress);
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
assertEquals(200, resp.statusCode());
HttpConnection conn = resp.request().connection();
resp.exceptionHandler(err -> {
@@ -4415,12 +4424,12 @@ public void testCloseHandlerWhenConnectionEnds() throws Exception {
});
req.response().setChunked(true).write("some-data");
});
- startServer();
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ startServer(testAddress);
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
resp.handler(v -> {
resp.request().connection().close();
});
- });
+ }).end();
await();
}
@@ -4463,12 +4472,12 @@ protected void testCloseHandlerNotCalledWhenConnectionClosedAfterEnd(int expecte
});
req.response().end("some-data");
});
- startServer();
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
+ startServer(testAddress);
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
resp.endHandler(v -> {
resp.request().connection().close();
});
- });
+ }).end();
await();
}
@@ -4497,11 +4506,11 @@ public void testClientDecompressionError() throws Exception {
.putHeader("Content-Encoding", "gzip")
.end("long response with mismatched encoding causes connection leaks");
});
- startServer();
+ startServer(testAddress);
AtomicInteger exceptionCount = new AtomicInteger();
client.close();
client = vertx.createHttpClient(createBaseClientOptions().setTryUseCompression(true));
- client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.exceptionHandler(err -> {
if (exceptionCount.incrementAndGet() == 1) {
if (err instanceof Http2Exception) {
@@ -4531,8 +4540,8 @@ public void testContainsValueString() {
req.response().putHeader("quux", "quux");
req.response().end();
});
- server.listen(onSuccess(server -> {
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(server -> {
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertTrue(resp.headers().contains("Quux", "quux", false));
assertFalse(resp.headers().contains("Quux", "quUx", false));
testComplete();
@@ -4551,8 +4560,8 @@ public void testContainsValueStringIgnoreCase() {
req.response().putHeader("quux", "quux");
req.response().end();
});
- server.listen(onSuccess(server -> {
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(server -> {
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertTrue(resp.headers().contains("Quux", "quux", true));
assertTrue(resp.headers().contains("Quux", "quUx", true));
testComplete();
@@ -4579,8 +4588,8 @@ public void testContainsValueCharSequence() {
req.response().putHeader(quux, quux);
req.response().end();
});
- server.listen(onSuccess(server -> {
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(server -> {
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertTrue(resp.headers().contains(Quux, quux, false));
assertFalse(resp.headers().contains(Quux, quUx, false));
testComplete();
@@ -4607,8 +4616,8 @@ public void testContainsValueCharSequenceIgnoreCase() {
req.response().putHeader(quux, quux);
req.response().end();
});
- server.listen(onSuccess(server -> {
- HttpClientRequest req = client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(server -> {
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertTrue(resp.headers().contains(Quux, quux, true));
assertTrue(resp.headers().contains(Quux, quUx, true));
testComplete();
@@ -4629,8 +4638,8 @@ public void testBytesReadRequest() throws Exception {
req.response().end();
});
});
- startServer();
- client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ startServer(testAddress);
+ client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.bodyHandler(buff -> {
testComplete();
});
@@ -4672,14 +4681,12 @@ public void testClientSynchronousConnectFailures() {
@Test
public void testClientConnectInvalidPort() {
- client.get(-1, "localhost", "/someuri", resp -> {
- fail();
- }).exceptionHandler(err -> {
- assertEquals(err.getClass(), IllegalArgumentException.class);
- assertEquals(err.getMessage(), "port p must be in range 0 <= p <= 65535");
- testComplete();
- }).end();
- await();
+ try {
+ client.request(HttpMethod.GET, testAddress, -1, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, ar -> fail());
+ } catch (Exception e) {
+ assertEquals(e.getClass(), IllegalArgumentException.class);
+ assertEquals(e.getMessage(), "port p must be in range 0 <= p <= 65535");
+ }
}
protected File setupFile(String fileName, String content) throws Exception {
@@ -4744,8 +4751,8 @@ public void testHttpClientRequestHeadersDontContainCROrLF() throws Exception {
});
testComplete();
});
- startServer();
- HttpClientRequest req = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {});
+ startServer(testAddress);
+ HttpClientRequest req = client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {});
List<BiConsumer<String, String>> list = Arrays.asList(
req::putHeader,
req.headers()::set,
@@ -4781,8 +4788,8 @@ public void testHttpServerResponseHeadersDontContainCROrLF() throws Exception {
assertEquals(Collections.emptySet(), req.response().headers().names());
req.response().end();
});
- startServer();
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ startServer(testAddress);
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.headers().forEach(header -> {
String name = header.getKey();
switch (name.toLowerCase()) {
@@ -4794,7 +4801,7 @@ public void testHttpServerResponseHeadersDontContainCROrLF() throws Exception {
}
});
testComplete();
- });
+ }).end();
await();
}
@@ -4803,14 +4810,14 @@ public void testDisableIdleTimeoutInPool() throws Exception {
server.requestHandler(req -> {
req.response().end();
});
- startServer();
+ startServer(testAddress);
client.close();
client = vertx.createHttpClient(createBaseClientOptions()
.setIdleTimeout(1)
.setMaxPoolSize(1)
.setKeepAliveTimeout(10)
);
- client.getNow(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.endHandler(v1 -> {
AtomicBoolean closed = new AtomicBoolean();
resp.request().connection().closeHandler(v2 -> {
@@ -4821,7 +4828,7 @@ public void testDisableIdleTimeoutInPool() throws Exception {
testComplete();
});
});
- });
+ }).end();
await();
}
@@ -4830,14 +4837,14 @@ public void testHttpConnect() {
Buffer buffer = TestUtils.randomBuffer(128);
Buffer received = Buffer.buffer();
CompletableFuture<Void> closeSocket = new CompletableFuture<>();
- vertx.createNetServer(new NetServerOptions().setPort(1235)).connectHandler(socket -> {
+ vertx.createNetServer(new NetServerOptions().setPort(1235).setHost("localhost")).connectHandler(socket -> {
socket.handler(socket::write);
closeSocket.thenAccept(v -> {
socket.close();
});
}).listen(onSuccess(netServer -> {
server.requestHandler(req -> {
- vertx.createNetClient(new NetClientOptions()).connect(netServer.actualPort(), "localhost", onSuccess(dst -> {
+ vertx.createNetClient(new NetClientOptions()).connect(1235, "localhost", onSuccess(dst -> {
req.response().setStatusCode(200);
req.response().setStatusMessage("Connection established");
@@ -4855,8 +4862,8 @@ public void testHttpConnect() {
});
}));
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.CONNECT, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.CONNECT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(200, resp.statusCode());
NetSocket socket = resp.netSocket();
socket.handler(buff -> {
@@ -4893,8 +4900,8 @@ private void testAccessNetSocketPendingResponseData(boolean pause) {
so.write("hello");
});
- server.listen(onSuccess(s -> {
- HttpClientRequest req = client.request(HttpMethod.CONNECT, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ HttpClientRequest req = client.request(HttpMethod.CONNECT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
NetSocket so = resp.netSocket();
assertNotNull(so);
so.handler(buff -> {
@@ -4927,8 +4934,8 @@ public void testHttpInvalidConnectResponseEnded() {
complete();
}
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.CONNECT, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.CONNECT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(200, resp.statusCode());
complete();
}).end();
@@ -4949,8 +4956,8 @@ public void testHttpInvalidConnectResponseChunked() {
complete();
}
});
- server.listen(onSuccess(s -> {
- client.request(HttpMethod.CONNECT, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ server.listen(testAddress, onSuccess(s -> {
+ client.request(HttpMethod.CONNECT, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
assertEquals(200, resp.statusCode());
complete();
}).end();
@@ -4959,6 +4966,27 @@ public void testHttpInvalidConnectResponseChunked() {
await();
}
+ @Test
+ public void testEndFromAnotherThread() throws Exception {
+ waitFor(2);
+ disableThreadChecks();
+ server.requestHandler(req -> {
+ req.response().endHandler(v -> {
+ complete();
+ });
+ new Thread(() -> {
+ req.response().end();
+ }).start();
+ });
+ startServer(testAddress);
+ client.request(HttpMethod.GET, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ assertEquals(200, resp.statusCode());
+ complete();
+ }).end();
+
+ await();
+ }
+
// This test check that ending an HttpClientRequest will not hold a lock when sending Netty messages
// holding suck lock might deadlock when the ChannelOutboundBuffer is full and becomes drained
// doing an HttpClientRequest reentrant during the drain
@@ -4966,10 +4994,10 @@ public void testHttpInvalidConnectResponseChunked() {
@Test
public void testClientRequestEndDeadlock() throws Exception {
server.requestHandler(req -> req.endHandler(v -> req.response().end()));
- startServer();
+ startServer(testAddress);
Context ctx = vertx.getOrCreateContext();
ctx.runOnContext(v1 -> {
- HttpClientRequest request = client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ HttpClientRequest request = client.request(HttpMethod.POST, testAddress, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
resp.endHandler(v2 -> {
testComplete();
});
diff --git a/src/test/java/io/vertx/core/http/HttpTestBase.java b/src/test/java/io/vertx/core/http/HttpTestBase.java
--- a/src/test/java/io/vertx/core/http/HttpTestBase.java
+++ b/src/test/java/io/vertx/core/http/HttpTestBase.java
@@ -11,9 +11,11 @@
package io.vertx.core.http;
+import io.vertx.core.AsyncResult;
import io.vertx.core.Context;
import io.vertx.core.Handler;
import io.vertx.core.net.ProxyType;
+import io.vertx.core.net.SocketAddress;
import io.vertx.test.proxy.HttpProxy;
import io.vertx.test.proxy.SocksProxy;
import io.vertx.test.proxy.TestProxyBase;
@@ -37,10 +39,12 @@ public class HttpTestBase extends VertxTestBase {
protected HttpServer server;
protected HttpClient client;
protected TestProxyBase proxy;
+ protected SocketAddress testAddress;
public void setUp() throws Exception {
super.setUp();
server = vertx.createHttpServer(new HttpServerOptions().setPort(DEFAULT_HTTP_PORT).setHost(DEFAULT_HTTP_HOST));
+ testAddress = SocketAddress.inetSocketAddress(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST);
}
protected void tearDown() throws Exception {
@@ -77,18 +81,39 @@ protected void startServer() throws Exception {
startServer(vertx.getOrCreateContext());
}
+ protected void startServer(SocketAddress bindAddress) throws Exception {
+ startServer(bindAddress, vertx.getOrCreateContext());
+ }
+
protected void startServer(HttpServer server) throws Exception {
startServer(vertx.getOrCreateContext(), server);
}
+ protected void startServer(SocketAddress bindAddress, HttpServer server) throws Exception {
+ startServer(bindAddress, vertx.getOrCreateContext(), server);
+ }
+
protected void startServer(Context context) throws Exception {
startServer(context, server);
}
+ protected void startServer(SocketAddress bindAddress, Context context) throws Exception {
+ startServer(bindAddress, context, server);
+ }
+
protected void startServer(Context context, HttpServer server) throws Exception {
+ startServer(null, context, server);
+ }
+
+ protected void startServer(SocketAddress bindAddress, Context context, HttpServer server) throws Exception {
CountDownLatch latch = new CountDownLatch(1);
context.runOnContext(v -> {
- server.listen(onSuccess(s -> latch.countDown()));
+ Handler<AsyncResult<HttpServer>> onListen = onSuccess(s -> latch.countDown());
+ if (bindAddress != null) {
+ server.listen(bindAddress, onListen);
+ } else {
+ server.listen(onListen);
+ }
});
awaitLatch(latch);
}
| Support UNIX Domain Sockets in HttpClient
Netty already supports DomainSockets/DomainChannels, so I was hoping it might be possible to implement support (via HttpOptions/SocketOptions) to support UNIX Domain sockets (AF_UNIX)??? We do this quite often for secured APIs across docker containers. We mount the socket file as a volume in the container and then the different applications communicate via the UNIX Domain socket. We have also taken to communicating with the Docker daemon itself in this manner instead of exposing the Docker HTTP API.
My suggestion for implementation would be one of the following:
Add an overloaded method for Vertx.create(Net/Http)(Client/Server) which could take a "Path" object to point to the location for the UNIX domain socket..
Add logic to the Net/HTTP Client/Server implementations to parse a URL of the format "unix+http://" or "unix+socket://"
Probably many more questions to be answered, but that should be a good start.
| @InfoSec812 where is this unix+http:// format described ?
I actually talked about this last week with someone because of the docker client use case
see https://github.com/vert-x3/issues/issues/188 also
ah but it's you :-)
so it's expected to be discussed at F2F meeting next week, is that something you could contribute ?
and I told you to open an issue here :-)
see also https://github.com/eclipse/vert.x/pull/1388
@vietj The format I was conceptualizing was the same one used by Twisted (The Python NIO framework). It is described [HERE](http://twistedmatrix.com/documents/13.1.0/core/howto/endpoints.html#auto9) under Server endpoint types.
@vietj Is the F2F meeting going to be on-line via something like a Google Hangout? I'd be happy to attend depending on the timing.
@InfoSec812 we will for sure take minutes and make slides available, can't promise for hangout
https://docs.docker.com/engine/reference/api/docker_remote_api/
thats my use case for this.
@samart yes that's a very valid use case
@vietj , we plan to look at implementations like https://github.com/gesellix/unix-socket-factory with apache http client :( until something emerges natively for vert.x http client
note that you can workaround using docker registry via socket domains with proper configuration.
You mean http? Yes, but trying to avoid exposing http for our docker daemons as it's a security and certificate mgmt. issue. Also, don't want to muck w/iptables to effect local access only, hence the domain socket preference. Other option is to write this service in golang, but trying to stay w/vert.x for now.
https://netty.io/4.0/api/io/netty/channel/unix/package-summary.html
Any plans on adding this support in vert.x http client?
we might support epoll for 3.5
Domain socket support will be provided by https://github.com/eclipse/vert.x/issues/2135, I will rename this issue so it only scopes HTTP server and client
http server supports it
Is there any update on this?
not yet but it is still a, important feature. I've done some experiments locally and it might be back-portable to 3.x | 2019-05-01T18:51:11Z | 3.7 |
eclipse-vertx/vert.x | 2,883 | eclipse-vertx__vert.x-2883 | [
"2873"
] | bdd603e6fd13fe1d9b8398be72d2c4c18854d93e | diff --git a/src/main/java/io/vertx/core/Starter.java b/src/main/java/io/vertx/core/Starter.java
--- a/src/main/java/io/vertx/core/Starter.java
+++ b/src/main/java/io/vertx/core/Starter.java
@@ -211,7 +211,7 @@ private Vertx startVertx(boolean clustered, boolean ha, Args args) {
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<AsyncResult<Vertx>> result = new AtomicReference<>();
- options.setClusterHost(clusterHost).setClusterPort(clusterPort).setClustered(true);
+ options.getEventBusOptions().setClustered(true).setHost(clusterHost).setPort(clusterPort);
if (ha) {
String haGroup = args.map.get("-hagroup");
int quorumSize = args.getInt("-quorum");
diff --git a/src/main/java/io/vertx/core/VertxOptions.java b/src/main/java/io/vertx/core/VertxOptions.java
--- a/src/main/java/io/vertx/core/VertxOptions.java
+++ b/src/main/java/io/vertx/core/VertxOptions.java
@@ -50,37 +50,58 @@ public class VertxOptions {
/**
* The default value of whether Vert.x is clustered = false.
+ *
+ * @deprecated as of 3.7, use {@link EventBusOptions#DEFAULT_CLUSTERED} instead
*/
+ @Deprecated
public static final boolean DEFAULT_CLUSTERED = false;
/**
* The default hostname to use when clustering = "localhost"
+ *
+ * @deprecated as of 3.7, use {@link EventBusOptions#DEFAULT_CLUSTER_HOST} instead
*/
+ @Deprecated
public static final String DEFAULT_CLUSTER_HOST = "localhost";
/**
* The default port to use when clustering = 0 (meaning assign a random port)
+ *
+ * @deprecated as of 3.7, use {@link EventBusOptions#DEFAULT_CLUSTER_PORT} instead
*/
+ @Deprecated
public static final int DEFAULT_CLUSTER_PORT = 0;
/**
* The default cluster public host to use = null which means use the same as the cluster host
+ *
+ * @deprecated as of 3.7, use {@link EventBusOptions#DEFAULT_CLUSTER_PUBLIC_HOST} instead
*/
+ @Deprecated
public static final String DEFAULT_CLUSTER_PUBLIC_HOST = null;
/**
* The default cluster public port to use = -1 which means use the same as the cluster port
+ *
+ * @deprecated as of 3.7, use {@link EventBusOptions#DEFAULT_CLUSTER_PUBLIC_PORT} instead
*/
+ @Deprecated
public static final int DEFAULT_CLUSTER_PUBLIC_PORT = -1;
/**
* The default value of cluster ping interval = 20000 ms.
+ *
+ * @deprecated as of 3.7, use {@link EventBusOptions#DEFAULT_CLUSTER_PING_INTERVAL} instead
*/
+ @Deprecated
public static final long DEFAULT_CLUSTER_PING_INTERVAL = 20000;
/**
* The default value of cluster ping reply interval = 20000 ms.
+ *
+ * @deprecated as of 3.7, use {@link EventBusOptions#DEFAULT_CLUSTER_PING_REPLY_INTERVAL} instead
*/
+ @Deprecated
public static final long DEFAULT_CLUSTER_PING_REPLY_INTERVAL = 20000;
/**
@@ -261,7 +282,10 @@ public VertxOptions setWorkerPoolSize(int workerPoolSize) {
* Is the Vert.x instance clustered?
*
* @return true if clustered, false if not
+ *
+ * @deprecated as of 3.7, use {@link #getEventBusOptions()} and then {@link EventBusOptions#isClustered()} instead
*/
+ @Deprecated
public boolean isClustered() {
return eventBusOptions.isClustered();
}
@@ -271,7 +295,10 @@ public boolean isClustered() {
*
* @param clustered if true, the Vert.x instance will be clustered, otherwise not
* @return a reference to this, so the API can be used fluently
+ *
+ * @deprecated as of 3.7, use {@link #getEventBusOptions()} and then {@link EventBusOptions#setClustered(boolean)} instead
*/
+ @Deprecated
public VertxOptions setClustered(boolean clustered) {
eventBusOptions.setClustered(clustered);
return this;
@@ -281,7 +308,10 @@ public VertxOptions setClustered(boolean clustered) {
* Get the host name to be used for clustering.
*
* @return The host name
+ *
+ * @deprecated as of 3.7, use {@link #getEventBusOptions()} and then {@link EventBusOptions#getHost()} instead
*/
+ @Deprecated
public String getClusterHost() {
return eventBusOptions.getHost();
}
@@ -291,7 +321,10 @@ public String getClusterHost() {
*
* @param clusterHost the host name to use
* @return a reference to this, so the API can be used fluently
+ *
+ * @deprecated as of 3.7, use {@link #getEventBusOptions()} and then {@link EventBusOptions#setHost(String)} instead
*/
+ @Deprecated
public VertxOptions setClusterHost(String clusterHost) {
this.eventBusOptions.setHost(clusterHost);
return this;
@@ -301,7 +334,10 @@ public VertxOptions setClusterHost(String clusterHost) {
* Get the public facing hostname to be used when clustering.
*
* @return the public facing hostname
+ *
+ * @deprecated as of 3.7, use {@link #getEventBusOptions()} and then {@link EventBusOptions#getClusterPublicHost()} instead
*/
+ @Deprecated
public String getClusterPublicHost() {
return getEventBusOptions().getClusterPublicHost();
}
@@ -315,7 +351,10 @@ public String getClusterPublicHost() {
*
* @param clusterPublicHost the public host name to use
* @return a reference to this, so the API can be used fluently
+ *
+ * @deprecated as of 3.7, use {@link #getEventBusOptions()} and then {@link EventBusOptions#setClusterPublicHost(String)} instead
*/
+ @Deprecated
public VertxOptions setClusterPublicHost(String clusterPublicHost) {
getEventBusOptions().setClusterPublicHost(clusterPublicHost);
return this;
@@ -325,7 +364,10 @@ public VertxOptions setClusterPublicHost(String clusterPublicHost) {
* Get the port to be used for clustering
*
* @return the port
+ *
+ * @deprecated as of 3.7, use {@link #getEventBusOptions()} and then {@link EventBusOptions#getPort()} instead
*/
+ @Deprecated
public int getClusterPort() {
return eventBusOptions.getPort();
}
@@ -335,7 +377,10 @@ public int getClusterPort() {
*
* @param clusterPort the port
* @return a reference to this, so the API can be used fluently
+ *
+ * @deprecated as of 3.7, use {@link #getEventBusOptions()} and then {@link EventBusOptions#setPort(int)} instead
*/
+ @Deprecated
public VertxOptions setClusterPort(int clusterPort) {
eventBusOptions.setPort(clusterPort);
return this;
@@ -345,7 +390,10 @@ public VertxOptions setClusterPort(int clusterPort) {
* Get the public facing port to be used when clustering.
*
* @return the public facing port
+ *
+ * @deprecated as of 3.7, use {@link #getEventBusOptions()} and then {@link EventBusOptions#getClusterPublicPort()} instead
*/
+ @Deprecated
public int getClusterPublicPort() {
return eventBusOptions.getClusterPublicPort();
}
@@ -355,7 +403,10 @@ public int getClusterPublicPort() {
*
* @param clusterPublicPort the public port to use
* @return a reference to this, so the API can be used fluently
+ *
+ * @deprecated as of 3.7, use {@link #getEventBusOptions()} and then {@link EventBusOptions#setClusterPublicPort(int)} instead
*/
+ @Deprecated
public VertxOptions setClusterPublicPort(int clusterPublicPort) {
getEventBusOptions().setClusterPublicPort(clusterPublicPort);
return this;
@@ -367,7 +418,10 @@ public VertxOptions setClusterPublicPort(int clusterPublicPort) {
* Nodes in the cluster ping each other at this interval to determine whether they are still running.
*
* @return The value of cluster ping interval
+ *
+ * @deprecated as of 3.7, use {@link #getEventBusOptions()} and then {@link EventBusOptions#getClusterPingInterval()} instead
*/
+ @Deprecated
public long getClusterPingInterval() {
return getEventBusOptions().getClusterPingInterval();
}
@@ -377,7 +431,10 @@ public long getClusterPingInterval() {
*
* @param clusterPingInterval The value of cluster ping interval, in ms.
* @return a reference to this, so the API can be used fluently
+ *
+ * @deprecated as of 3.7, use {@link #getEventBusOptions()} and then {@link EventBusOptions#setClusterPingInterval(long)} instead
*/
+ @Deprecated
public VertxOptions setClusterPingInterval(long clusterPingInterval) {
eventBusOptions.setClusterPingInterval(clusterPingInterval);
return this;
@@ -389,7 +446,9 @@ public VertxOptions setClusterPingInterval(long clusterPingInterval) {
* After sending a ping, if a pong is not received in this time, the node will be considered dead.
*
* @return the value of cluster ping reply interval
+ * @deprecated as of 3.7, use {@link #getEventBusOptions()} and then {@link EventBusOptions#getClusterPingReplyInterval()} instead
*/
+ @Deprecated
public long getClusterPingReplyInterval() {
return eventBusOptions.getClusterPingReplyInterval();
}
@@ -399,7 +458,10 @@ public long getClusterPingReplyInterval() {
*
* @param clusterPingReplyInterval The value of cluster ping reply interval, in ms.
* @return a reference to this, so the API can be used fluently
+ *
+ * @deprecated as of 3.7, use {@link #getEventBusOptions()} and then {@link EventBusOptions#setClusterPingReplyInterval(long)} instead
*/
+ @Deprecated
public VertxOptions setClusterPingReplyInterval(long clusterPingReplyInterval) {
eventBusOptions.setClusterPingReplyInterval(clusterPingReplyInterval);
return this;
diff --git a/src/main/java/io/vertx/core/eventbus/EventBusOptions.java b/src/main/java/io/vertx/core/eventbus/EventBusOptions.java
--- a/src/main/java/io/vertx/core/eventbus/EventBusOptions.java
+++ b/src/main/java/io/vertx/core/eventbus/EventBusOptions.java
@@ -29,23 +29,58 @@
@DataObject(generateConverter = true, inheritConverter = true, publicConverter = false)
public class EventBusOptions extends TCPSSLOptions {
- private boolean clustered = VertxOptions.DEFAULT_CLUSTERED;
- private String clusterPublicHost = VertxOptions.DEFAULT_CLUSTER_PUBLIC_HOST;
- private int clusterPublicPort = VertxOptions.DEFAULT_CLUSTER_PUBLIC_PORT;
- private long clusterPingInterval = VertxOptions.DEFAULT_CLUSTER_PING_INTERVAL;
- private long clusterPingReplyInterval = VertxOptions.DEFAULT_CLUSTER_PING_REPLY_INTERVAL;
+ /**
+ * The default value of whether Vert.x is clustered = false.
+ */
+ public static final boolean DEFAULT_CLUSTERED = VertxOptions.DEFAULT_CLUSTERED;
+
+ /**
+ * The default hostname to use when clustering = "localhost"
+ */
+ public static final String DEFAULT_CLUSTER_HOST = VertxOptions.DEFAULT_CLUSTER_HOST;
+
+ /**
+ * The default port to use when clustering = 0 (meaning assign a random port)
+ */
+ public static final int DEFAULT_CLUSTER_PORT = VertxOptions.DEFAULT_CLUSTER_PORT;
+
+ /**
+ * The default cluster public host to use = null which means use the same as the cluster host
+ */
+ public static final String DEFAULT_CLUSTER_PUBLIC_HOST = VertxOptions.DEFAULT_CLUSTER_PUBLIC_HOST;
+
+ /**
+ * The default cluster public port to use = -1 which means use the same as the cluster port
+ */
+ public static final int DEFAULT_CLUSTER_PUBLIC_PORT = VertxOptions.DEFAULT_CLUSTER_PUBLIC_PORT;
+
+ /**
+ * The default value of cluster ping interval = 20000 ms.
+ */
+ public static final long DEFAULT_CLUSTER_PING_INTERVAL = VertxOptions.DEFAULT_CLUSTER_PING_INTERVAL;
+
+ /**
+ * The default value of cluster ping reply interval = 20000 ms.
+ */
+ public static final long DEFAULT_CLUSTER_PING_REPLY_INTERVAL = VertxOptions.DEFAULT_CLUSTER_PING_REPLY_INTERVAL;
+
+ private boolean clustered = DEFAULT_CLUSTERED;
+ private String clusterPublicHost = DEFAULT_CLUSTER_PUBLIC_HOST;
+ private int clusterPublicPort = DEFAULT_CLUSTER_PUBLIC_PORT;
+ private long clusterPingInterval = DEFAULT_CLUSTER_PING_INTERVAL;
+ private long clusterPingReplyInterval = DEFAULT_CLUSTER_PING_REPLY_INTERVAL;
// Attributes used to configure the server of the event bus when the event bus is clustered.
/**
* The default port to listen on = 0 (meaning a random ephemeral free port will be chosen)
*/
- public static final int DEFAULT_PORT = VertxOptions.DEFAULT_CLUSTER_PORT;
+ public static final int DEFAULT_PORT = DEFAULT_CLUSTER_PORT;
/**
* The default host to listen on = "0.0.0.0" (meaning listen on all available interfaces).
*/
- public static final String DEFAULT_HOST = VertxOptions.DEFAULT_CLUSTER_HOST;
+ public static final String DEFAULT_HOST = DEFAULT_CLUSTER_HOST;
/**
* The default accept backlog = 1024
@@ -96,7 +131,7 @@ public class EventBusOptions extends TCPSSLOptions {
public EventBusOptions() {
super();
- clustered = VertxOptions.DEFAULT_CLUSTERED;
+ clustered = DEFAULT_CLUSTERED;
port = DEFAULT_PORT;
host = DEFAULT_HOST;
@@ -201,9 +236,7 @@ public EventBusOptions setAcceptBacklog(int acceptBacklog) {
}
/**
- * @return the host, which can be configured from the {@link VertxOptions#setClusterHost(String)}, or using
- * the {@code --cluster-host} command line option.
- * @see NetServerOptions#getHost()
+ * @return the host
*/
public String getHost() {
return host;
@@ -476,9 +509,7 @@ public EventBusOptions setLogActivity(boolean logEnabled) {
}
/**
- * @return whether or not the event bus is clustered. This can be configured from the
- * {@link VertxOptions#setClustered(boolean)} method or from the {@code --cluster} option from the command
- * line.
+ * @return whether or not the event bus is clustered
*/
public boolean isClustered() {
return clustered;
@@ -538,8 +569,6 @@ public EventBusOptions setConnectTimeout(int connectTimeout) {
/**
* Get the value of cluster ping reply interval, in ms.
* After sending a ping, if a pong is not received in this time, the node will be considered dead.
- * <p>
- * The value can be configured from {@link VertxOptions#setClusterPingInterval(long)}.
*
* @return the value of cluster ping reply interval
*/
@@ -564,8 +593,6 @@ public EventBusOptions setClusterPingInterval(long clusterPingInterval) {
/**
* Get the value of cluster ping reply interval, in ms.
* After sending a ping, if a pong is not received in this time, the node will be considered dead.
- * <p>
- * The value can be configured from {@link VertxOptions#setClusterPingReplyInterval(long)}}.
*
* @return the value of cluster ping reply interval
*/
@@ -588,9 +615,7 @@ public EventBusOptions setClusterPingReplyInterval(long clusterPingReplyInterval
}
/**
- * Get the public facing port to be used when clustering.
- * <p>
- * It can be configured using {@link VertxOptions#setClusterPublicHost(String)}
+ * Get the public facing host to be used when clustering.
*
* @return the public facing port
*/
@@ -617,8 +642,6 @@ public EventBusOptions setClusterPublicHost(String clusterPublicHost) {
/**
* Gets the public facing port to be used when clustering.
- * <p>
- * This can be configured from {@link VertxOptions#setClusterPublicPort(int)}.
*
* @return the public facing port
*/
diff --git a/src/main/java/io/vertx/core/impl/VertxFactoryImpl.java b/src/main/java/io/vertx/core/impl/VertxFactoryImpl.java
--- a/src/main/java/io/vertx/core/impl/VertxFactoryImpl.java
+++ b/src/main/java/io/vertx/core/impl/VertxFactoryImpl.java
@@ -11,11 +11,7 @@
package io.vertx.core.impl;
-import io.vertx.core.AsyncResult;
-import io.vertx.core.Context;
-import io.vertx.core.Handler;
-import io.vertx.core.Vertx;
-import io.vertx.core.VertxOptions;
+import io.vertx.core.*;
import io.vertx.core.spi.VertxFactory;
/**
@@ -32,7 +28,7 @@ public Vertx vertx() {
@Override
public Vertx vertx(VertxOptions options) {
- if (options.isClustered()) {
+ if (options.getEventBusOptions().isClustered()) {
throw new IllegalArgumentException("Please use Vertx.clusteredVertx() to create a clustered Vert.x instance");
}
return VertxImpl.vertx(options);
@@ -41,7 +37,7 @@ public Vertx vertx(VertxOptions options) {
@Override
public void clusteredVertx(VertxOptions options, final Handler<AsyncResult<Vertx>> resultHandler) {
// We don't require the user to set clustered to true if they use this method
- options.setClustered(true);
+ options.getEventBusOptions().setClustered(true);
VertxImpl.clusteredVertx(options, resultHandler);
}
diff --git a/src/main/java/io/vertx/core/impl/VertxImpl.java b/src/main/java/io/vertx/core/impl/VertxImpl.java
--- a/src/main/java/io/vertx/core/impl/VertxImpl.java
+++ b/src/main/java/io/vertx/core/impl/VertxImpl.java
@@ -16,8 +16,8 @@
import io.netty.resolver.AddressResolverGroup;
import io.netty.util.ResourceLeakDetector;
import io.netty.util.concurrent.GenericFutureListener;
-import io.vertx.core.*;
import io.vertx.core.Future;
+import io.vertx.core.*;
import io.vertx.core.datagram.DatagramSocket;
import io.vertx.core.datagram.DatagramSocketOptions;
import io.vertx.core.datagram.impl.DatagramSocketImpl;
@@ -169,7 +169,7 @@ private VertxImpl(VertxOptions options, Transport transport) {
this.addressResolverOptions = options.getAddressResolverOptions();
this.addressResolver = new AddressResolver(this, options.getAddressResolverOptions());
this.deploymentManager = new DeploymentManager(this);
- if (options.isClustered()) {
+ if (options.getEventBusOptions().isClustered()) {
this.clusterManager = getClusterManager(options);
this.eventBus = new ClusteredEventBus(this, options, clusterManager);
} else {
diff --git a/src/main/java/io/vertx/core/impl/launcher/commands/BareCommand.java b/src/main/java/io/vertx/core/impl/launcher/commands/BareCommand.java
--- a/src/main/java/io/vertx/core/impl/launcher/commands/BareCommand.java
+++ b/src/main/java/io/vertx/core/impl/launcher/commands/BareCommand.java
@@ -13,6 +13,7 @@
import io.vertx.core.*;
import io.vertx.core.cli.annotations.*;
+import io.vertx.core.eventbus.EventBusOptions;
import io.vertx.core.impl.launcher.VertxLifecycleHooks;
import io.vertx.core.json.DecodeException;
import io.vertx.core.json.JsonObject;
@@ -212,17 +213,18 @@ protected Vertx startVertx() {
Vertx instance;
if (isClustered()) {
log.info("Starting clustering...");
- if (!Objects.equals(options.getClusterHost(), VertxOptions.DEFAULT_CLUSTER_HOST)) {
- clusterHost = options.getClusterHost();
+ EventBusOptions eventBusOptions = options.getEventBusOptions();
+ if (!Objects.equals(eventBusOptions.getHost(), EventBusOptions.DEFAULT_CLUSTER_HOST)) {
+ clusterHost = eventBusOptions.getHost();
}
- if (options.getClusterPort() != VertxOptions.DEFAULT_CLUSTER_PORT) {
- clusterPort = options.getClusterPort();
+ if (eventBusOptions.getPort() != EventBusOptions.DEFAULT_CLUSTER_PORT) {
+ clusterPort = eventBusOptions.getPort();
}
- if (!Objects.equals(options.getClusterPublicHost(), VertxOptions.DEFAULT_CLUSTER_PUBLIC_HOST)) {
- clusterPublicHost = options.getClusterPublicHost();
+ if (!Objects.equals(eventBusOptions.getClusterPublicHost(), EventBusOptions.DEFAULT_CLUSTER_PUBLIC_HOST)) {
+ clusterPublicHost = eventBusOptions.getClusterPublicHost();
}
- if (options.getClusterPublicPort() != VertxOptions.DEFAULT_CLUSTER_PUBLIC_PORT) {
- clusterPublicPort = options.getClusterPublicPort();
+ if (eventBusOptions.getClusterPublicPort() != EventBusOptions.DEFAULT_CLUSTER_PUBLIC_PORT) {
+ clusterPublicPort = eventBusOptions.getClusterPublicPort();
}
if (clusterHost == null) {
clusterHost = getDefaultAddress();
@@ -236,11 +238,11 @@ protected Vertx startVertx() {
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<AsyncResult<Vertx>> result = new AtomicReference<>();
- options.setClustered(true)
- .setClusterHost(clusterHost).setClusterPort(clusterPort)
+ eventBusOptions.setClustered(true)
+ .setHost(clusterHost).setPort(clusterPort)
.setClusterPublicHost(clusterPublicHost);
if (clusterPublicPort != -1) {
- options.setClusterPublicPort(clusterPublicPort);
+ eventBusOptions.setClusterPublicPort(clusterPublicPort);
}
if (getHA()) {
options.setHAEnabled(true);
diff --git a/src/main/java/io/vertx/core/impl/launcher/commands/RunCommand.java b/src/main/java/io/vertx/core/impl/launcher/commands/RunCommand.java
--- a/src/main/java/io/vertx/core/impl/launcher/commands/RunCommand.java
+++ b/src/main/java/io/vertx/core/impl/launcher/commands/RunCommand.java
@@ -226,7 +226,7 @@ public void setUp(ExecutionContext context) throws CLIException {
*/
@Override
public boolean isClustered() {
- return cluster || ha || (options != null && options.isClustered());
+ return cluster || ha || (options != null && options.getEventBusOptions().isClustered());
}
@Override
| diff --git a/src/test/java/io/vertx/core/CreateVertxTest.java b/src/test/java/io/vertx/core/CreateVertxTest.java
--- a/src/test/java/io/vertx/core/CreateVertxTest.java
+++ b/src/test/java/io/vertx/core/CreateVertxTest.java
@@ -12,10 +12,8 @@
package io.vertx.core;
import io.vertx.test.core.VertxTestBase;
-import org.junit.Test;
-
-import io.vertx.core.*;
import io.vertx.test.fakecluster.FakeClusterManager;
+import org.junit.Test;
/**
* @author <a href="http://tfox.org">Tim Fox</a>
@@ -39,7 +37,7 @@ public void testCreateVertxWithOptions() {
@Test
public void testFailCreateClusteredVertxSynchronously() {
VertxOptions options = new VertxOptions();
- options.setClustered(true);
+ options.getEventBusOptions().setClustered(true);
try {
Vertx.vertx(options);
fail("Should throw exception");
@@ -51,7 +49,7 @@ public void testFailCreateClusteredVertxSynchronously() {
@Test
public void testCreateClusteredVertxAsync() {
VertxOptions options = new VertxOptions();
- options.setClustered(true);
+ options.getEventBusOptions().setClustered(true);
clusteredVertx(options, ar -> {
assertTrue(ar.succeeded());
assertNotNull(ar.result());
@@ -74,7 +72,7 @@ public void testCreateClusteredVertxAsyncDontSetClustered() {
clusteredVertx(options, ar -> {
assertTrue(ar.succeeded());
assertNotNull(ar.result());
- assertTrue(options.isClustered());
+ assertTrue(options.getEventBusOptions().isClustered());
assertTrue(ar.result().isClustered());
Vertx v = ar.result();
v.close(ar2 -> {
diff --git a/src/test/java/io/vertx/core/HATest.java b/src/test/java/io/vertx/core/HATest.java
--- a/src/test/java/io/vertx/core/HATest.java
+++ b/src/test/java/io/vertx/core/HATest.java
@@ -15,10 +15,10 @@
import io.vertx.core.impl.VertxInternal;
import io.vertx.core.json.JsonObject;
import io.vertx.core.spi.cluster.ClusterManager;
-import io.vertx.test.verticles.HAVerticle1;
-import io.vertx.test.verticles.HAVerticle2;
import io.vertx.test.core.VertxTestBase;
import io.vertx.test.fakecluster.FakeClusterManager;
+import io.vertx.test.verticles.HAVerticle1;
+import io.vertx.test.verticles.HAVerticle2;
import org.junit.Test;
import java.util.concurrent.CountDownLatch;
@@ -380,8 +380,11 @@ protected Vertx startVertx(String haGroup, int quorumSize) throws Exception {
}
protected Vertx startVertx(String haGroup, int quorumSize, boolean ha) throws Exception {
- VertxOptions options = new VertxOptions().setHAEnabled(ha).setClustered(true).
- setClusterHost("localhost").setClusterManager(getClusterManager());
+ VertxOptions options = new VertxOptions()
+ .setHAEnabled(ha)
+ .setClusterManager(getClusterManager());
+ options.getEventBusOptions()
+ .setClustered(true).setHost("localhost");
if (ha) {
options.setQuorumSize(quorumSize);
if (haGroup != null) {
diff --git a/src/test/java/io/vertx/core/LauncherTest.java b/src/test/java/io/vertx/core/LauncherTest.java
--- a/src/test/java/io/vertx/core/LauncherTest.java
+++ b/src/test/java/io/vertx/core/LauncherTest.java
@@ -492,8 +492,8 @@ private void testConfigureFromJson(boolean jsonFile) throws Exception {
assertEquals(123, opts.getEventLoopPoolSize(), 0);
assertEquals(123767667L, opts.getMaxEventLoopExecuteTime());
assertEquals(true, opts.getMetricsOptions().isEnabled());
- assertEquals(true, opts.isClustered());
- assertEquals("mars", opts.getClusterPublicHost());
+ assertEquals(true, opts.getEventBusOptions().isClustered());
+ assertEquals("mars", opts.getEventBusOptions().getClusterPublicHost());
assertEquals("somegroup", opts.getHAGroup());
assertEquals(TimeUnit.SECONDS, opts.getMaxEventLoopExecuteTimeUnit());
}
@@ -632,10 +632,10 @@ public void testConfigureClusterHostPortFromProperties() throws Exception {
String[] args = {"run", "java:" + TestVerticle.class.getCanonicalName(), "-cluster"};
launcher.dispatch(args);
assertWaitUntil(() -> TestVerticle.instanceCount.get() == 1);
- assertEquals("127.0.0.1", launcher.options.getClusterHost());
- assertEquals(clusterPort, launcher.options.getClusterPort());
- assertNull(launcher.options.getClusterPublicHost());
- assertEquals(-1, launcher.options.getClusterPublicPort());
+ assertEquals("127.0.0.1", launcher.options.getEventBusOptions().getHost());
+ assertEquals(clusterPort, launcher.options.getEventBusOptions().getPort());
+ assertNull(launcher.options.getEventBusOptions().getClusterPublicHost());
+ assertEquals(-1, launcher.options.getEventBusOptions().getClusterPublicPort());
}
@Test
@@ -645,10 +645,10 @@ public void testConfigureClusterHostPortFromCommandLine() throws Exception {
String[] args = {"run", "java:" + TestVerticle.class.getCanonicalName(), "-cluster", "--cluster-host", "127.0.0.1", "--cluster-port", Integer.toString(clusterPort)};
launcher.dispatch(args);
assertWaitUntil(() -> TestVerticle.instanceCount.get() == 1);
- assertEquals("127.0.0.1", launcher.options.getClusterHost());
- assertEquals(clusterPort, launcher.options.getClusterPort());
- assertNull(launcher.options.getClusterPublicHost());
- assertEquals(-1, launcher.options.getClusterPublicPort());
+ assertEquals("127.0.0.1", launcher.options.getEventBusOptions().getHost());
+ assertEquals(clusterPort, launcher.options.getEventBusOptions().getPort());
+ assertNull(launcher.options.getEventBusOptions().getClusterPublicHost());
+ assertEquals(-1, launcher.options.getEventBusOptions().getClusterPublicPort());
}
@Test
@@ -658,8 +658,8 @@ public void testConfigureClusterPublicHostPortFromCommandLine() throws Exception
String[] args = {"run", "java:" + TestVerticle.class.getCanonicalName(), "-cluster", "--cluster-public-host", "127.0.0.1", "--cluster-public-port", Integer.toString(clusterPublicPort)};
launcher.dispatch(args);
assertWaitUntil(() -> TestVerticle.instanceCount.get() == 1);
- assertEquals("127.0.0.1", launcher.options.getClusterPublicHost());
- assertEquals(clusterPublicPort, launcher.options.getClusterPublicPort());
+ assertEquals("127.0.0.1", launcher.options.getEventBusOptions().getClusterPublicHost());
+ assertEquals(clusterPublicPort, launcher.options.getEventBusOptions().getClusterPublicPort());
}
@Test
@@ -677,10 +677,10 @@ public void testOverrideClusterHostPortFromProperties() throws Exception {
String[] args = {"run", "java:" + TestVerticle.class.getCanonicalName(), "-cluster"};
launcher.dispatch(args);
assertWaitUntil(() -> TestVerticle.instanceCount.get() == 1);
- assertEquals("127.0.0.1", launcher.options.getClusterHost());
- assertEquals(newClusterPort, launcher.options.getClusterPort());
- assertEquals("127.0.0.3", launcher.options.getClusterPublicHost());
- assertEquals(newClusterPublicPort, launcher.options.getClusterPublicPort());
+ assertEquals("127.0.0.1", launcher.options.getEventBusOptions().getHost());
+ assertEquals(newClusterPort, launcher.options.getEventBusOptions().getPort());
+ assertEquals("127.0.0.3", launcher.options.getEventBusOptions().getClusterPublicHost());
+ assertEquals(newClusterPublicPort, launcher.options.getEventBusOptions().getClusterPublicPort());
}
@Test
@@ -702,10 +702,10 @@ public void testOverrideClusterHostPortFromCommandLine() throws Exception {
};
launcher.dispatch(args);
assertWaitUntil(() -> TestVerticle.instanceCount.get() == 1);
- assertEquals("127.0.0.1", launcher.options.getClusterHost());
- assertEquals(newClusterPort, launcher.options.getClusterPort());
- assertEquals("127.0.0.3", launcher.options.getClusterPublicHost());
- assertEquals(newClusterPublicPort, launcher.options.getClusterPublicPort());
+ assertEquals("127.0.0.1", launcher.options.getEventBusOptions().getHost());
+ assertEquals(newClusterPort, launcher.options.getEventBusOptions().getPort());
+ assertEquals("127.0.0.3", launcher.options.getEventBusOptions().getClusterPublicHost());
+ assertEquals(newClusterPublicPort, launcher.options.getEventBusOptions().getClusterPublicPort());
}
class MyLauncher extends Launcher {
@@ -751,10 +751,11 @@ public void beforeStartingVertx(VertxOptions options) {
beforeStartingVertxInvoked = true;
this.options = options;
if (clusterHost != null) {
- options.setClusterHost(clusterHost);
- options.setClusterPort(clusterPort);
- options.setClusterPublicHost(clusterPublicHost);
- options.setClusterPublicPort(clusterPublicPort);
+ options.getEventBusOptions()
+ .setHost(clusterHost)
+ .setPort(clusterPort)
+ .setClusterPublicHost(clusterPublicHost)
+ .setClusterPublicPort(clusterPublicPort);
super.beforeStartingVertx(options);
}
}
diff --git a/src/test/java/io/vertx/core/VertxOptionsTest.java b/src/test/java/io/vertx/core/VertxOptionsTest.java
--- a/src/test/java/io/vertx/core/VertxOptionsTest.java
+++ b/src/test/java/io/vertx/core/VertxOptionsTest.java
@@ -61,66 +61,66 @@ public void testOptions() {
} catch (IllegalArgumentException e) {
// OK
}
- assertFalse(options.isClustered());
- assertEquals(options, options.setClustered(true));
- assertTrue(options.isClustered());
- assertEquals(0, options.getClusterPort());
- assertEquals(options, options.setClusterPort(1234));
- assertEquals(1234, options.getClusterPort());
+ assertFalse(options.getEventBusOptions().isClustered());
+ options.getEventBusOptions().setClustered(true);
+ assertTrue(options.getEventBusOptions().isClustered());
+ assertEquals(0, options.getEventBusOptions().getPort());
+ options.getEventBusOptions().setPort(1234);
+ assertEquals(1234, options.getEventBusOptions().getPort());
try {
- options.setClusterPort(-1);
+ options.getEventBusOptions().setPort(-1);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
try {
- options.setClusterPort(65536);
+ options.getEventBusOptions().setPort(65536);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
- assertEquals(-1, options.getClusterPublicPort());
- assertEquals(options, options.setClusterPublicPort(1234));
- assertEquals(1234, options.getClusterPublicPort());
+ assertEquals(-1, options.getEventBusOptions().getClusterPublicPort());
+ options.getEventBusOptions().setClusterPublicPort(1234);
+ assertEquals(1234, options.getEventBusOptions().getClusterPublicPort());
try {
- options.setClusterPublicPort(-1);
+ options.getEventBusOptions().setClusterPublicPort(-1);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
try {
- options.setClusterPublicPort(65536);
+ options.getEventBusOptions().setClusterPublicPort(65536);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
// OK
}
- assertEquals("localhost", options.getClusterHost());
+ assertEquals("localhost", options.getEventBusOptions().getHost());
String randString = TestUtils.randomUnicodeString(100);
- assertEquals(options, options.setClusterHost(randString));
- assertEquals(randString, options.getClusterHost());
- assertEquals(null, options.getClusterPublicHost());
+ options.getEventBusOptions().setHost(randString);
+ assertEquals(randString, options.getEventBusOptions().getHost());
+ assertEquals(null, options.getEventBusOptions().getClusterPublicHost());
randString = TestUtils.randomUnicodeString(100);
- assertEquals(options, options.setClusterPublicHost(randString));
- assertEquals(randString, options.getClusterPublicHost());
- assertEquals(20000, options.getClusterPingInterval());
+ options.getEventBusOptions().setClusterPublicHost(randString);
+ assertEquals(randString, options.getEventBusOptions().getClusterPublicHost());
+ assertEquals(20000, options.getEventBusOptions().getClusterPingInterval());
long randomLong = TestUtils.randomPositiveLong();
- assertEquals(options, options.setClusterPingInterval(randomLong));
- assertEquals(randomLong, options.getClusterPingInterval());
+ options.getEventBusOptions().setClusterPingInterval(randomLong);
+ assertEquals(randomLong, options.getEventBusOptions().getClusterPingInterval());
try {
- options.setClusterPingInterval(-1);
+ options.getEventBusOptions().setClusterPingInterval(-1);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
- assertEquals(randomLong, options.getClusterPingInterval());
+ assertEquals(randomLong, options.getEventBusOptions().getClusterPingInterval());
}
- assertEquals(20000, options.getClusterPingReplyInterval());
+ assertEquals(20000, options.getEventBusOptions().getClusterPingReplyInterval());
randomLong = TestUtils.randomPositiveLong();
- assertEquals(options, options.setClusterPingReplyInterval(randomLong));
- assertEquals(randomLong, options.getClusterPingReplyInterval());
+ options.getEventBusOptions().setClusterPingReplyInterval(randomLong);
+ assertEquals(randomLong, options.getEventBusOptions().getClusterPingReplyInterval());
try {
- options.setClusterPingReplyInterval(-1);
+ options.getEventBusOptions().setClusterPingReplyInterval(-1);
fail("Should throw exception");
} catch (IllegalArgumentException e) {
- assertEquals(randomLong, options.getClusterPingReplyInterval());
+ assertEquals(randomLong, options.getEventBusOptions().getClusterPingReplyInterval());
}
assertEquals(1000, options.getBlockedThreadCheckInterval());
rand = TestUtils.randomPositiveInt();
@@ -234,16 +234,16 @@ public void testCopyOptions() {
TimeUnit maxWorkerExecuteTimeUnit = TimeUnit.MILLISECONDS;
TimeUnit warningExceptionTimeUnit = TimeUnit.MINUTES;
TimeUnit blockedThreadCheckIntervalUnit = TimeUnit.MINUTES;
- options.setClusterPort(clusterPort);
- options.setClusterPublicPort(clusterPublicPort);
+ options.getEventBusOptions().setPort(clusterPort);
+ options.getEventBusOptions().setClusterPublicPort(clusterPublicPort);
options.setEventLoopPoolSize(eventLoopPoolSize);
options.setInternalBlockingPoolSize(internalBlockingPoolSize);
options.setWorkerPoolSize(workerPoolSize);
options.setBlockedThreadCheckInterval(blockedThreadCheckInterval);
- options.setClusterHost(clusterHost);
- options.setClusterPublicHost(clusterPublicHost);
- options.setClusterPingInterval(clusterPingInterval);
- options.setClusterPingReplyInterval(clusterPingReplyInterval);
+ options.getEventBusOptions().setHost(clusterHost);
+ options.getEventBusOptions().setClusterPublicHost(clusterPublicHost);
+ options.getEventBusOptions().setClusterPingInterval(clusterPingInterval);
+ options.getEventBusOptions().setClusterPingReplyInterval(clusterPingReplyInterval);
options.setMaxEventLoopExecuteTime(maxEventLoopExecuteTime);
options.setMaxWorkerExecuteTime(maxWorkerExecuteTime);
options.setHAEnabled(haEnabled);
@@ -260,16 +260,16 @@ public void testCopyOptions() {
options.setBlockedThreadCheckIntervalUnit(blockedThreadCheckIntervalUnit);
options = new VertxOptions(options);
- assertEquals(clusterPort, options.getClusterPort());
- assertEquals(clusterPublicPort, options.getClusterPublicPort());
- assertEquals(clusterPingInterval, options.getClusterPingInterval());
- assertEquals(clusterPingReplyInterval, options.getClusterPingReplyInterval());
+ assertEquals(clusterPort, options.getEventBusOptions().getPort());
+ assertEquals(clusterPublicPort, options.getEventBusOptions().getClusterPublicPort());
+ assertEquals(clusterPingInterval, options.getEventBusOptions().getClusterPingInterval());
+ assertEquals(clusterPingReplyInterval, options.getEventBusOptions().getClusterPingReplyInterval());
assertEquals(eventLoopPoolSize, options.getEventLoopPoolSize());
assertEquals(internalBlockingPoolSize, options.getInternalBlockingPoolSize());
assertEquals(workerPoolSize, options.getWorkerPoolSize());
assertEquals(blockedThreadCheckInterval, options.getBlockedThreadCheckInterval());
- assertEquals(clusterHost, options.getClusterHost());
- assertEquals(clusterPublicHost, options.getClusterPublicHost());
+ assertEquals(clusterHost, options.getEventBusOptions().getHost());
+ assertEquals(clusterPublicHost, options.getEventBusOptions().getClusterPublicHost());
assertEquals(maxEventLoopExecuteTime, options.getMaxEventLoopExecuteTime());
assertEquals(maxWorkerExecuteTime, options.getMaxWorkerExecuteTime());
assertEquals(haEnabled, options.isHAEnabled());
@@ -292,12 +292,12 @@ public void testDefaultJsonOptions() {
VertxOptions json = new VertxOptions(new JsonObject());
assertEquals(def.getEventLoopPoolSize(), json.getEventLoopPoolSize());
assertEquals(def.getWorkerPoolSize(), json.getWorkerPoolSize());
- assertEquals(def.isClustered(), json.isClustered());
- assertEquals(def.getClusterHost(), json.getClusterHost());
- assertEquals(def.getClusterPublicHost(), json.getClusterPublicHost());
- assertEquals(def.getClusterPublicPort(), json.getClusterPublicPort());
- assertEquals(def.getClusterPingInterval(), json.getClusterPingInterval());
- assertEquals(def.getClusterPingReplyInterval(), json.getClusterPingReplyInterval());
+ assertEquals(def.getEventBusOptions().isClustered(), json.getEventBusOptions().isClustered());
+ assertEquals(def.getEventBusOptions().getHost(), json.getEventBusOptions().getHost());
+ assertEquals(def.getEventBusOptions().getClusterPublicHost(), json.getEventBusOptions().getClusterPublicHost());
+ assertEquals(def.getEventBusOptions().getClusterPublicPort(), json.getEventBusOptions().getClusterPublicPort());
+ assertEquals(def.getEventBusOptions().getClusterPingInterval(), json.getEventBusOptions().getClusterPingInterval());
+ assertEquals(def.getEventBusOptions().getClusterPingReplyInterval(), json.getEventBusOptions().getClusterPingReplyInterval());
assertEquals(def.getBlockedThreadCheckInterval(), json.getBlockedThreadCheckInterval());
assertEquals(def.getMaxEventLoopExecuteTime(), json.getMaxEventLoopExecuteTime());
assertEquals(def.getMaxWorkerExecuteTime(), json.getMaxWorkerExecuteTime());
@@ -317,16 +317,16 @@ public void testDefaultJsonOptions() {
public void testJsonOptions() {
VertxOptions options = new VertxOptions(new JsonObject());
- assertEquals(0, options.getClusterPort());
- assertEquals(-1, options.getClusterPublicPort());
- assertEquals(20000, options.getClusterPingInterval());
- assertEquals(20000, options.getClusterPingReplyInterval());
+ assertEquals(0, options.getEventBusOptions().getPort());
+ assertEquals(-1, options.getEventBusOptions().getClusterPublicPort());
+ assertEquals(20000, options.getEventBusOptions().getClusterPingInterval());
+ assertEquals(20000, options.getEventBusOptions().getClusterPingReplyInterval());
assertEquals(2 * Runtime.getRuntime().availableProcessors(), options.getEventLoopPoolSize());
assertEquals(20, options.getInternalBlockingPoolSize());
assertEquals(20, options.getWorkerPoolSize());
assertEquals(1000, options.getBlockedThreadCheckInterval());
- assertEquals("localhost", options.getClusterHost());
- assertNull(options.getClusterPublicHost());
+ assertEquals("localhost", options.getEventBusOptions().getHost());
+ assertNull(options.getEventBusOptions().getClusterPublicHost());
assertEquals(null, options.getClusterManager());
assertEquals(2000l * 1000000, options.getMaxEventLoopExecuteTime());
assertEquals(1l * 60 * 1000 * 1000000, options.getMaxWorkerExecuteTime());
@@ -397,16 +397,16 @@ public void testJsonOptions() {
put("warningExceptionTimeUnit", warningExceptionTimeUnit).
put("blockedThreadCheckIntervalUnit", blockedThreadCheckIntervalUnit)
);
- assertEquals(clusterPort, options.getClusterPort());
- assertEquals(clusterPublicPort, options.getClusterPublicPort());
- assertEquals(clusterPublicHost, options.getClusterPublicHost());
- assertEquals(clusterPingInterval, options.getClusterPingInterval());
- assertEquals(clusterPingReplyInterval, options.getClusterPingReplyInterval());
+ assertEquals(clusterPort, options.getEventBusOptions().getPort());
+ assertEquals(clusterPublicPort, options.getEventBusOptions().getClusterPublicPort());
+ assertEquals(clusterPublicHost, options.getEventBusOptions().getClusterPublicHost());
+ assertEquals(clusterPingInterval, options.getEventBusOptions().getClusterPingInterval());
+ assertEquals(clusterPingReplyInterval, options.getEventBusOptions().getClusterPingReplyInterval());
assertEquals(eventLoopPoolSize, options.getEventLoopPoolSize());
assertEquals(internalBlockingPoolSize, options.getInternalBlockingPoolSize());
assertEquals(workerPoolSize, options.getWorkerPoolSize());
assertEquals(blockedThreadCheckInterval, options.getBlockedThreadCheckInterval());
- assertEquals(clusterHost, options.getClusterHost());
+ assertEquals(clusterHost, options.getEventBusOptions().getHost());
assertEquals(null, options.getClusterManager());
assertEquals(maxEventLoopExecuteTime, options.getMaxEventLoopExecuteTime());
assertEquals(maxWorkerExecuteTime, options.getMaxWorkerExecuteTime());
diff --git a/src/test/java/io/vertx/core/eventbus/ClusteredEventBusStartFailureTest.java b/src/test/java/io/vertx/core/eventbus/ClusteredEventBusStartFailureTest.java
--- a/src/test/java/io/vertx/core/eventbus/ClusteredEventBusStartFailureTest.java
+++ b/src/test/java/io/vertx/core/eventbus/ClusteredEventBusStartFailureTest.java
@@ -34,8 +34,8 @@ public void testCallbackInvokedOnFailure() throws Exception {
String hostName = "zoom.zoom.zen.tld";
VertxOptions options = new VertxOptions()
- .setClusterManager(new FakeClusterManager())
- .setClusterHost(hostName);
+ .setClusterManager(new FakeClusterManager());
+ options.getEventBusOptions().setHost(hostName);
AtomicReference<AsyncResult<Vertx>> resultRef = new AtomicReference<>();
diff --git a/src/test/java/io/vertx/core/eventbus/ClusteredEventBusTest.java b/src/test/java/io/vertx/core/eventbus/ClusteredEventBusTest.java
--- a/src/test/java/io/vertx/core/eventbus/ClusteredEventBusTest.java
+++ b/src/test/java/io/vertx/core/eventbus/ClusteredEventBusTest.java
@@ -147,7 +147,9 @@ public void testDefaultCodecReplyExceptionSubclass() throws Exception {
// Make sure ping/pong works ok
@Test
public void testClusteredPong() throws Exception {
- startNodes(2, new VertxOptions().setClusterPingInterval(500).setClusterPingReplyInterval(500));
+ VertxOptions options = new VertxOptions();
+ options.getEventBusOptions().setClusterPingInterval(500).setClusterPingReplyInterval(500);
+ startNodes(2, options);
AtomicBoolean sending = new AtomicBoolean();
MessageConsumer<String> consumer = vertices[0].eventBus().<String>consumer("foobar").handler(msg -> {
if (!sending.get()) {
diff --git a/src/test/java/io/vertx/core/impl/launcher/LauncherExtensibilityTest.java b/src/test/java/io/vertx/core/impl/launcher/LauncherExtensibilityTest.java
--- a/src/test/java/io/vertx/core/impl/launcher/LauncherExtensibilityTest.java
+++ b/src/test/java/io/vertx/core/impl/launcher/LauncherExtensibilityTest.java
@@ -125,7 +125,7 @@ public void afterStartingVertx(Vertx vertx) {
@Override
public void beforeStartingVertx(VertxOptions options) {
- options.setClustered(true);
+ options.getEventBusOptions().setClustered(true);
}
@Override
diff --git a/src/test/java/io/vertx/core/spi/metrics/MetricsContextTest.java b/src/test/java/io/vertx/core/spi/metrics/MetricsContextTest.java
--- a/src/test/java/io/vertx/core/spi/metrics/MetricsContextTest.java
+++ b/src/test/java/io/vertx/core/spi/metrics/MetricsContextTest.java
@@ -11,41 +11,18 @@
package io.vertx.core.spi.metrics;
-import io.vertx.core.AbstractVerticle;
-import io.vertx.core.Context;
-import io.vertx.core.DeploymentOptions;
-import io.vertx.core.Handler;
-import io.vertx.core.Verticle;
-import io.vertx.core.Vertx;
-import io.vertx.core.VertxOptions;
+import io.vertx.core.*;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.datagram.DatagramSocket;
import io.vertx.core.datagram.DatagramSocketOptions;
import io.vertx.core.eventbus.EventBus;
+import io.vertx.core.eventbus.EventBusOptions;
import io.vertx.core.eventbus.MessageConsumer;
-import io.vertx.core.http.HttpClient;
-import io.vertx.core.http.HttpClientOptions;
-import io.vertx.core.http.HttpClientRequest;
-import io.vertx.core.http.HttpClientResponse;
-import io.vertx.core.http.HttpServer;
-import io.vertx.core.http.HttpServerOptions;
-import io.vertx.core.http.HttpServerRequest;
-import io.vertx.core.http.HttpServerResponse;
-import io.vertx.core.http.ServerWebSocket;
-import io.vertx.core.http.WebSocket;
+import io.vertx.core.http.*;
import io.vertx.core.metrics.MetricsOptions;
import io.vertx.core.metrics.impl.DummyVertxMetrics;
-import io.vertx.core.net.NetClient;
-import io.vertx.core.net.NetClientOptions;
-import io.vertx.core.net.NetServer;
-import io.vertx.core.net.NetServerOptions;
-import io.vertx.core.net.SocketAddress;
+import io.vertx.core.net.*;
import io.vertx.core.spi.VertxMetricsFactory;
-import io.vertx.core.spi.metrics.DatagramSocketMetrics;
-import io.vertx.core.spi.metrics.EventBusMetrics;
-import io.vertx.core.spi.metrics.HttpClientMetrics;
-import io.vertx.core.spi.metrics.HttpServerMetrics;
-import io.vertx.core.spi.metrics.TCPMetrics;
import io.vertx.test.core.VertxTestBase;
import org.junit.Ignore;
import org.junit.Test;
@@ -85,7 +62,10 @@ public void testFactoryInCluster() throws Exception {
metricsContext.set(Vertx.currentContext());
return DummyVertxMetrics.INSTANCE;
};
- clusteredVertx(new VertxOptions().setClustered(true).setMetricsOptions(new MetricsOptions().setEnabled(true).setFactory(factory)), onSuccess(vertx -> {
+ VertxOptions options = new VertxOptions()
+ .setMetricsOptions(new MetricsOptions().setEnabled(true).setFactory(factory))
+ .setEventBusOptions(new EventBusOptions().setClustered(true));
+ clusteredVertx(options, onSuccess(vertx -> {
assertSame(testThread, metricsThread.get());
assertNull(metricsContext.get());
testComplete();
diff --git a/src/test/java/io/vertx/test/core/VertxTestBase.java b/src/test/java/io/vertx/test/core/VertxTestBase.java
--- a/src/test/java/io/vertx/test/core/VertxTestBase.java
+++ b/src/test/java/io/vertx/test/core/VertxTestBase.java
@@ -11,26 +11,15 @@
package io.vertx.test.core;
-import io.vertx.core.AbstractVerticle;
-import io.vertx.core.AsyncResult;
-import io.vertx.core.Context;
-import io.vertx.core.DeploymentOptions;
-import io.vertx.core.Handler;
-import io.vertx.core.Vertx;
-import io.vertx.core.VertxOptions;
+import io.vertx.core.*;
import io.vertx.core.logging.Logger;
import io.vertx.core.logging.LoggerFactory;
-import io.vertx.core.net.JksOptions;
-import io.vertx.core.net.KeyCertOptions;
-import io.vertx.core.net.PemKeyCertOptions;
-import io.vertx.core.net.PfxOptions;
-import io.vertx.core.net.TCPSSLOptions;
+import io.vertx.core.net.*;
import io.vertx.core.spi.cluster.ClusterManager;
import io.vertx.test.fakecluster.FakeClusterManager;
import org.junit.Rule;
import javax.net.ssl.SSLContext;
-import javax.net.ssl.SSLSocketFactory;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Collections;
@@ -156,8 +145,9 @@ protected void startNodes(int numNodes, VertxOptions options) {
vertices = new Vertx[numNodes];
for (int i = 0; i < numNodes; i++) {
int index = i;
- clusteredVertx(options.setClusterHost("localhost").setClusterPort(0).setClustered(true)
- .setClusterManager(getClusterManager()), ar -> {
+ options.setClusterManager(getClusterManager())
+ .getEventBusOptions().setHost("localhost").setPort(0).setClustered(true);
+ clusteredVertx(options, ar -> {
try {
if (ar.failed()) {
ar.cause().printStackTrace();
| Deprecate EventBusOptions delegates in VertxOptions
We had a 2 days effort because the API is misleading:
VertxOptions options = new VertxOptions()
.setClustered(true)
.setClusterHost("10.x.y.z")
.setClusterPort(60000);
System.out.println(options);
options
.setEventBusOptions(new EventBusOptions()
.setSsl(true)
.setKeyStoreOptions(new JksOptions())
);
System.out.println(options);
This will result in:
VertxOptions{..., eventbus={"clustered":true,"host":"10.x.y.z","port":60000,"ssl":false, ...}
VertxOptions{..., eventbus={"clustered":false,"host":"localhost","port":0,"ssl":true, ...}
But I expected:
VertxOptions{..., eventbus={"clustered":true,"host":"10.x.y.z","port":60000,"ssl":false, ...}
VertxOptions{..., eventbus={"clustered":true,"host":"10.x.y.z","port":60000,"ssl":true, ...}
In my opinion the API should be changed in order to reduce the confusion. If I configure an option it should not be changed after setting other options.
Workaround #1:
VertxOptions options = new VertxOptions()
// set event bus options at first
.setEventBusOptions(new EventBusOptions()
.setSsl(true)
.setKeyStoreOptions(new JksOptions()))
.setClustered(true)
.setClusterHost("10.x.y.z")
.setClusterPort(60000);
Workaround #2:
VertxOptions options = new VertxOptions()
.setClustered(true)
.setClusterHost("10.x.y.z")
.setClusterPort(60000);
// do not overwrite event bus options but change the default one
options.getEventBusOptions()
.setSsl(true)
.setKeyStoreOptions(new JksOptions());
| @gofabian I agree with you this should be changed in vertx 4 and perhaps we need to remove the top level setters on VertxOptions
we do have something similar with metrics and file system options
+1
I believe this was added because initially we didn't have the -options arg
on the command line (externalized options).
It's less useful now. If we do this we should deprecate the corresponding
options in 3.7 though
yes pretty much like FileSystemOptions
> On 14 Mar 2019, at 17:12, Thomas Segismont <notifications@github.com> wrote:
>
> +1
>
> I believe this was added because initially we didn't have the -options arg
> on the command line (externalized options).
>
> It's less useful now. If we do this we should deprecate the corresponding
> options in 3.7 though
> —
> You are receiving this because you commented.
> Reply to this email directly, view it on GitHub <https://github.com/eclipse-vertx/vert.x/issues/2873#issuecomment-472940633>, or mute the thread <https://github.com/notifications/unsubscribe-auth/AANxinZSW_lQf-xu_seE3uOGDjPDPh54ks5vWnT5gaJpZM4boCrM>.
>
I'll take care of it then.
what do you mean ?
we can't remove that in 3.7 only mark it as deprecated and remove it in master
Yes, I didn't mean anything else.
😅 that was not clear enough to me :-) | 2019-03-19T15:34:49Z | 3.6 |
eclipse-vertx/vert.x | 2,726 | eclipse-vertx__vert.x-2726 | [
"2725"
] | 3f5b55b80093d79775fb8b5cc2f00410aee2ef6e | diff --git a/src/main/java/io/vertx/core/impl/launcher/commands/BareCommand.java b/src/main/java/io/vertx/core/impl/launcher/commands/BareCommand.java
--- a/src/main/java/io/vertx/core/impl/launcher/commands/BareCommand.java
+++ b/src/main/java/io/vertx/core/impl/launcher/commands/BareCommand.java
@@ -29,6 +29,7 @@
import java.net.NetworkInterface;
import java.net.SocketException;
import java.util.Enumeration;
+import java.util.Objects;
import java.util.Properties;
import java.util.Scanner;
import java.util.concurrent.CountDownLatch;
@@ -51,11 +52,13 @@ public class BareCommand extends ClasspathHandler {
public static final String METRICS_OPTIONS_PROP_PREFIX = "vertx.metrics.options.";
protected Vertx vertx;
- protected int clusterPort;
+ protected int clusterPort;
protected String clusterHost;
- protected int quorum;
+ protected int clusterPublicPort;
+ protected String clusterPublicHost;
+ protected int quorum;
protected String haGroup;
protected String vertxOptions;
@@ -113,6 +116,29 @@ public void setClusterHost(String host) {
this.clusterHost = host;
}
+ /**
+ * Sets the cluster public port.
+ *
+ * @param port the port
+ */
+ @Option(longName = "cluster-public-port", argName = "public-port")
+ @Description("Public port to use for cluster communication. Default is -1 which means same as cluster port.")
+ @DefaultValue("-1")
+ public void setClusterPublicPort(int port) {
+ this.clusterPublicPort = port;
+ }
+
+ /**
+ * Sets the cluster public host.
+ *
+ * @param host the host
+ */
+ @Option(longName = "cluster-public-host", argName = "public-host")
+ @Description("Public host to bind to for cluster communication. If not specified, Vert.x will use the same as cluster host.")
+ public void setClusterPublicHost(String host) {
+ this.clusterPublicHost = host;
+ }
+
/**
* The Vert.x options, it can be a json file or a json string.
*
@@ -186,12 +212,18 @@ protected Vertx startVertx() {
Vertx instance;
if (isClustered()) {
log.info("Starting clustering...");
- if (!options.getClusterHost().equals(VertxOptions.DEFAULT_CLUSTER_HOST)) {
+ if (!Objects.equals(options.getClusterHost(), VertxOptions.DEFAULT_CLUSTER_HOST)) {
clusterHost = options.getClusterHost();
}
if (options.getClusterPort() != VertxOptions.DEFAULT_CLUSTER_PORT) {
clusterPort = options.getClusterPort();
}
+ if (!Objects.equals(options.getClusterPublicHost(), VertxOptions.DEFAULT_CLUSTER_PUBLIC_HOST)) {
+ clusterPublicHost = options.getClusterPublicHost();
+ }
+ if (options.getClusterPublicPort() != VertxOptions.DEFAULT_CLUSTER_PUBLIC_PORT) {
+ clusterPublicPort = options.getClusterPublicPort();
+ }
if (clusterHost == null) {
clusterHost = getDefaultAddress();
if (clusterHost == null) {
@@ -204,7 +236,12 @@ protected Vertx startVertx() {
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<AsyncResult<Vertx>> result = new AtomicReference<>();
- options.setClusterHost(clusterHost).setClusterPort(clusterPort).setClustered(true);
+ options.setClustered(true)
+ .setClusterHost(clusterHost).setClusterPort(clusterPort)
+ .setClusterPublicHost(clusterPublicHost);
+ if (clusterPublicPort != -1) {
+ options.setClusterPublicPort(clusterPublicPort);
+ }
if (getHA()) {
options.setHAEnabled(true);
if (haGroup != null) {
diff --git a/src/main/java/io/vertx/core/impl/launcher/commands/RunCommand.java b/src/main/java/io/vertx/core/impl/launcher/commands/RunCommand.java
--- a/src/main/java/io/vertx/core/impl/launcher/commands/RunCommand.java
+++ b/src/main/java/io/vertx/core/impl/launcher/commands/RunCommand.java
@@ -201,14 +201,14 @@ public void setRedeployStopWaitingTime(long period) {
public void setUp(ExecutionContext context) throws CLIException {
super.setUp(context);
- // If cluster-host and / or port is set, cluster need to have been explicitly set
- io.vertx.core.cli.Option clusterHostOption = executionContext.cli().getOption("cluster-host");
- io.vertx.core.cli.Option clusterPortOption = executionContext.cli().getOption("cluster-port");
CommandLine commandLine = executionContext.commandLine();
- if ((!isClustered()) &&
- (commandLine.isOptionAssigned(clusterHostOption)
- || commandLine.isOptionAssigned(clusterPortOption))) {
- throw new CLIException("The option -cluster-host and -cluster-port requires -cluster to be enabled");
+ if (!isClustered() && (
+ commandLine.isOptionAssigned(executionContext.cli().getOption("cluster-host"))
+ || commandLine.isOptionAssigned(executionContext.cli().getOption("cluster-port"))
+ || commandLine.isOptionAssigned(executionContext.cli().getOption("cluster-public-host"))
+ || commandLine.isOptionAssigned(executionContext.cli().getOption("cluster-public-port"))
+ )) {
+ throw new CLIException("The -cluster-xxx options require -cluster to be enabled");
}
// If quorum and / or ha-group, ha need to have been explicitly set
@@ -357,6 +357,12 @@ protected void startAsBackgroundApplication(Handler<Void> onCompletion) {
if (clusterPort != 0) {
args.add("--cluster-port=" + clusterPort);
}
+ if (clusterPublicHost != null) {
+ args.add("--cluster-public-host=" + clusterPublicHost);
+ }
+ if (clusterPublicPort != -1) {
+ args.add("--cluster-public-port=" + clusterPublicPort);
+ }
if (ha) {
args.add("--ha");
}
| diff --git a/src/test/java/io/vertx/core/LauncherTest.java b/src/test/java/io/vertx/core/LauncherTest.java
--- a/src/test/java/io/vertx/core/LauncherTest.java
+++ b/src/test/java/io/vertx/core/LauncherTest.java
@@ -651,6 +651,17 @@ public void testConfigureClusterHostPortFromCommandLine() throws Exception {
assertEquals(-1, launcher.options.getClusterPublicPort());
}
+ @Test
+ public void testConfigureClusterPublicHostPortFromCommandLine() throws Exception {
+ int clusterPublicPort = TestUtils.randomHighPortInt();
+ MyLauncher launcher = new MyLauncher();
+ String[] args = {"run", "java:" + TestVerticle.class.getCanonicalName(), "-cluster", "--cluster-public-host", "127.0.0.1", "--cluster-public-port", Integer.toString(clusterPublicPort)};
+ launcher.dispatch(args);
+ assertWaitUntil(() -> TestVerticle.instanceCount.get() == 1);
+ assertEquals("127.0.0.1", launcher.options.getClusterPublicHost());
+ assertEquals(clusterPublicPort, launcher.options.getClusterPublicPort());
+ }
+
@Test
public void testOverrideClusterHostPortFromProperties() throws Exception {
int clusterPort = TestUtils.randomHighPortInt();
@@ -675,6 +686,7 @@ public void testOverrideClusterHostPortFromProperties() throws Exception {
@Test
public void testOverrideClusterHostPortFromCommandLine() throws Exception {
int clusterPort = TestUtils.randomHighPortInt();
+ int clusterPublicPort = TestUtils.randomHighPortInt();
int newClusterPort = TestUtils.randomHighPortInt();
int newClusterPublicPort = TestUtils.randomHighPortInt();
MyLauncher launcher = new MyLauncher();
@@ -682,7 +694,12 @@ public void testOverrideClusterHostPortFromCommandLine() throws Exception {
launcher.clusterPort = newClusterPort;
launcher.clusterPublicHost = "127.0.0.3";
launcher.clusterPublicPort = newClusterPublicPort;
- String[] args = {"run", "java:" + TestVerticle.class.getCanonicalName(), "-cluster", "--cluster-host", "127.0.0.2", "--cluster-port", Integer.toString(clusterPort)};
+ String[] args = {
+ "run", "java:" + TestVerticle.class.getCanonicalName(),
+ "-cluster",
+ "--cluster-host", "127.0.0.2", "--cluster-port", Integer.toString(clusterPort),
+ "--cluster-public-host", "127.0.0.4", "--cluster-public-port", Integer.toString(clusterPublicPort)
+ };
launcher.dispatch(args);
assertWaitUntil(() -> TestVerticle.instanceCount.get() == 1);
assertEquals("127.0.0.1", launcher.options.getClusterHost());
| Configure cluster public host and port from command line
Currently, it's only possible to configure cluster public host and port with sysprops.
This will help having a consistent user experience (cluster host and port can be configured via the command line).
| 2018-11-20T14:44:18Z | 3.6 |
|
eclipse-vertx/vert.x | 2,723 | eclipse-vertx__vert.x-2723 | [
"2722"
] | 25565f6a69af53dbe4ff92e41d115a4188de11ee | diff --git a/src/main/java/io/vertx/core/http/impl/HttpHandlers.java b/src/main/java/io/vertx/core/http/impl/HttpHandlers.java
--- a/src/main/java/io/vertx/core/http/impl/HttpHandlers.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpHandlers.java
@@ -25,16 +25,19 @@
*/
public class HttpHandlers {
+ final HttpServerImpl server;
final Handler<HttpServerRequest> requestHandler;
final Handler<ServerWebSocket> wsHandler;
final Handler<HttpConnection> connectionHandler;
final Handler<Throwable> exceptionHandler;
public HttpHandlers(
+ HttpServerImpl server,
Handler<HttpServerRequest> requestHandler,
Handler<ServerWebSocket> wsHandler,
Handler<HttpConnection> connectionHandler,
Handler<Throwable> exceptionHandler) {
+ this.server = server;
this.requestHandler = requestHandler;
this.wsHandler = wsHandler;
this.connectionHandler = connectionHandler;
diff --git a/src/main/java/io/vertx/core/http/impl/HttpServerImpl.java b/src/main/java/io/vertx/core/http/impl/HttpServerImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpServerImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpServerImpl.java
@@ -31,10 +31,7 @@
import io.netty.handler.timeout.IdleStateEvent;
import io.netty.handler.timeout.IdleStateHandler;
import io.netty.util.concurrent.GlobalEventExecutor;
-import io.vertx.core.AsyncResult;
-import io.vertx.core.Closeable;
-import io.vertx.core.Future;
-import io.vertx.core.Handler;
+import io.vertx.core.*;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.http.*;
import io.vertx.core.http.HttpVersion;
@@ -422,6 +419,7 @@ private void configureHttp1(ChannelPipeline pipeline, HandlerHolder<HttpHandlers
// some casting and a header check
} else {
holder = new HandlerHolder<>(holder.context, new HttpHandlers(
+ this,
new WebSocketRequestHandler(metrics, holder.handler),
holder.handler.wsHandler,
holder.handler.connectionHandler,
@@ -504,6 +502,18 @@ void configureHttp2(ChannelPipeline pipeline) {
}
}
+ /**
+ * Internal method that closes all servers when Vert.x is closing
+ */
+ public void closeAll(Handler<AsyncResult<Void>> handler) {
+ List<HttpHandlers> list = httpHandlerMgr.handlers();
+ List<Future> futures = list.stream()
+ .<Future<Void>>map(handlers -> Future.future(handlers.server::close))
+ .collect(Collectors.toList());
+ CompositeFuture fut = CompositeFuture.all(futures);
+ fut.setHandler(ar -> handler.handle(ar.mapEmpty()));
+ }
+
@Override
public void close() {
close(null);
@@ -545,6 +555,7 @@ public synchronized void close(Handler<AsyncResult<Void>> done) {
actualServer.httpHandlerMgr.removeHandler(
new HttpHandlers(
+ this,
requestStream.handler(),
wsStream.handler(),
connectionHandler,
@@ -569,6 +580,10 @@ public synchronized void close(Handler<AsyncResult<Void>> done) {
}
}
+ public synchronized boolean isClosed() {
+ return !listening;
+ }
+
@Override
public Metrics getMetrics() {
return metrics;
@@ -591,6 +606,7 @@ private void applyConnectionOptions(boolean domainSocket, ServerBootstrap bootst
private void addHandlers(HttpServerImpl server, ContextInternal context) {
server.httpHandlerMgr.addHandler(
new HttpHandlers(
+ this,
requestStream.handler(),
wsStream.handler(),
connectionHandler,
diff --git a/src/main/java/io/vertx/core/impl/VertxImpl.java b/src/main/java/io/vertx/core/impl/VertxImpl.java
--- a/src/main/java/io/vertx/core/impl/VertxImpl.java
+++ b/src/main/java/io/vertx/core/impl/VertxImpl.java
@@ -554,7 +554,7 @@ public synchronized void close(Handler<AsyncResult<Void>> completionHandler) {
eventBus.close(ar4 -> {
closeClusterManager(ar5 -> {
// Copy set to prevent ConcurrentModificationException
- Set<HttpServer> httpServers = new HashSet<>(sharedHttpServers.values());
+ Set<HttpServerImpl> httpServers = new HashSet<>(sharedHttpServers.values());
Set<NetServerImpl> netServers = new HashSet<>(sharedNetServers.values());
sharedHttpServers.clear();
sharedNetServers.clear();
@@ -572,11 +572,11 @@ public synchronized void close(Handler<AsyncResult<Void>> completionHandler) {
}
};
- for (HttpServer server : httpServers) {
- server.close(serverCloseHandler);
+ for (HttpServerImpl server : httpServers) {
+ server.closeAll(serverCloseHandler);
}
for (NetServerImpl server : netServers) {
- server.close(serverCloseHandler);
+ server.closeAll(serverCloseHandler);
}
if (serverCount == 0) {
deleteCacheDirAndShutdown(completionHandler);
diff --git a/src/main/java/io/vertx/core/net/impl/HandlerManager.java b/src/main/java/io/vertx/core/net/impl/HandlerManager.java
--- a/src/main/java/io/vertx/core/net/impl/HandlerManager.java
+++ b/src/main/java/io/vertx/core/net/impl/HandlerManager.java
@@ -20,6 +20,7 @@
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.stream.Collectors;
/**
* @author <a href="http://tfox.org">Tim Fox</a>
@@ -44,6 +45,13 @@ public boolean hasHandlers() {
return hasHandlers;
}
+ public synchronized List<T> handlers() {
+ return handlerMap.values().stream()
+ .flatMap(handlers -> handlers.list.stream())
+ .map(holder -> holder.handler)
+ .collect(Collectors.toList());
+ }
+
public HandlerHolder<T> chooseHandler(EventLoop worker) {
Handlers<T> handlers = handlerMap.get(worker);
return handlers == null ? null : handlers.chooseHandler();
diff --git a/src/main/java/io/vertx/core/net/impl/NetServerImpl.java b/src/main/java/io/vertx/core/net/impl/NetServerImpl.java
--- a/src/main/java/io/vertx/core/net/impl/NetServerImpl.java
+++ b/src/main/java/io/vertx/core/net/impl/NetServerImpl.java
@@ -22,10 +22,7 @@
import io.netty.handler.stream.ChunkedWriteHandler;
import io.netty.handler.timeout.IdleStateHandler;
import io.netty.util.concurrent.GlobalEventExecutor;
-import io.vertx.core.AsyncResult;
-import io.vertx.core.Closeable;
-import io.vertx.core.Future;
-import io.vertx.core.Handler;
+import io.vertx.core.*;
import io.vertx.core.impl.ContextInternal;
import io.vertx.core.impl.VertxInternal;
import io.vertx.core.logging.Logger;
@@ -41,9 +38,12 @@
import io.vertx.core.streams.ReadStream;
import java.net.InetSocketAddress;
+import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
/**
*
@@ -221,7 +221,7 @@ protected void initChannel(Channel ch) throws Exception {
applyConnectionOptions(socketAddress.path() != null, bootstrap);
- handlerManager.addHandler(new Handlers(handler, exceptionHandler), listenContext);
+ handlerManager.addHandler(new Handlers(this, handler, exceptionHandler), listenContext);
try {
bindFuture = AsyncResolveConnectHelper.doBind(vertx, socketAddress, bootstrap);
@@ -266,7 +266,7 @@ protected void initChannel(Channel ch) throws Exception {
this.actualPort = shared.actualPort();
VertxMetrics metrics = vertx.metricsSPI();
this.metrics = metrics != null ? metrics.createNetServerMetrics(options, new SocketAddressImpl(id.port, id.host)) : null;
- actualServer.handlerManager.addHandler(new Handlers(handler, exceptionHandler), listenContext);
+ actualServer.handlerManager.addHandler(new Handlers(this, handler, exceptionHandler), listenContext);
}
// just add it to the future so it gets notified once the bind is complete
@@ -348,6 +348,18 @@ public ReadStream<NetSocket> connectStream() {
return connectStream;
}
+ /**
+ * Internal method that closes all servers when Vert.x is closing
+ */
+ public void closeAll(Handler<AsyncResult<Void>> handler) {
+ List<Handlers> list = handlerManager.handlers();
+ List<Future> futures = list.stream()
+ .<Future<Void>>map(handlers -> Future.future(handlers.server::close))
+ .collect(Collectors.toList());
+ CompositeFuture fut = CompositeFuture.all(futures);
+ fut.setHandler(ar -> handler.handle(ar.mapEmpty()));
+ }
+
@Override
public synchronized void close(Handler<AsyncResult<Void>> completionHandler) {
if (creatingContext != null) {
@@ -379,7 +391,7 @@ public synchronized void close(Handler<AsyncResult<Void>> completionHandler) {
synchronized (vertx.sharedNetServers()) {
if (actualServer != null) {
- actualServer.handlerManager.removeHandler(new Handlers(registeredHandler, exceptionHandler), listenContext);
+ actualServer.handlerManager.removeHandler(new Handlers(this, registeredHandler, exceptionHandler), listenContext);
if (actualServer.handlerManager.hasHandlers()) {
// The actual server still has handlers so we don't actually close it
@@ -400,6 +412,10 @@ public synchronized void close(Handler<AsyncResult<Void>> completionHandler) {
}
}
+ public synchronized boolean isClosed() {
+ return !listening;
+ }
+
public synchronized int actualPort() {
return actualPort;
}
@@ -532,9 +548,11 @@ public NetSocketStream exceptionHandler(Handler<Throwable> handler) {
}
static class Handlers {
+ final NetServer server;
final Handler<NetSocket> connectionHandler;
final Handler<Throwable> exceptionHandler;
- public Handlers(Handler<NetSocket> connectionHandler, Handler<Throwable> exceptionHandler) {
+ public Handlers(NetServer server, Handler<NetSocket> connectionHandler, Handler<Throwable> exceptionHandler) {
+ this.server = server;
this.connectionHandler = connectionHandler;
this.exceptionHandler = exceptionHandler;
}
| diff --git a/src/test/benchmarks/io/vertx/benchmarks/HttpServerHandlerBenchmark.java b/src/test/benchmarks/io/vertx/benchmarks/HttpServerHandlerBenchmark.java
--- a/src/test/benchmarks/io/vertx/benchmarks/HttpServerHandlerBenchmark.java
+++ b/src/test/benchmarks/io/vertx/benchmarks/HttpServerHandlerBenchmark.java
@@ -233,7 +233,7 @@ protected void encodeHeaders(HttpHeaders headers, ByteBuf buf) {
.add(HEADER_CONTENT_LENGTH, HELLO_WORLD_LENGTH);
response.end(HELLO_WORLD_BUFFER);
};
- HandlerHolder<HttpHandlers> holder = new HandlerHolder<>(context, new HttpHandlers(app, null, null, null));
+ HandlerHolder<HttpHandlers> holder = new HandlerHolder<>(context, new HttpHandlers(null, app, null, null, null));
VertxHandler<Http1xServerConnection> handler = VertxHandler.create(holder.context, chctx -> new Http1xServerConnection(
holder.context.owner(),
null,
diff --git a/src/test/java/io/vertx/core/http/Http1xTest.java b/src/test/java/io/vertx/core/http/Http1xTest.java
--- a/src/test/java/io/vertx/core/http/Http1xTest.java
+++ b/src/test/java/io/vertx/core/http/Http1xTest.java
@@ -15,6 +15,7 @@
import io.vertx.core.*;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.http.impl.HttpClientRequestImpl;
+import io.vertx.core.http.impl.HttpServerImpl;
import io.vertx.core.http.impl.HttpUtils;
import io.vertx.core.impl.ConcurrentHashSet;
import io.vertx.core.impl.ContextInternal;
@@ -4639,4 +4640,26 @@ public void testChunkedClientRequest() {
}));
await();
}
+
+ @Test
+ public void testClosingVertxCloseSharedServers() throws Exception {
+ int numServers = 2;
+ Vertx vertx = Vertx.vertx();
+ List<HttpServerImpl> servers = new ArrayList<>();
+ for (int i = 0;i < numServers;i++) {
+ HttpServer server = vertx.createHttpServer(createBaseServerOptions()).requestHandler(req -> {
+
+ });
+ startServer(server);
+ servers.add((HttpServerImpl) server);
+ }
+ CountDownLatch latch = new CountDownLatch(1);
+ vertx.close(onSuccess(v -> {
+ latch.countDown();
+ }));
+ awaitLatch(latch);
+ servers.forEach(server -> {
+ assertTrue(server.isClosed());
+ });
+ }
}
diff --git a/src/test/java/io/vertx/core/net/NetTest.java b/src/test/java/io/vertx/core/net/NetTest.java
--- a/src/test/java/io/vertx/core/net/NetTest.java
+++ b/src/test/java/io/vertx/core/net/NetTest.java
@@ -30,6 +30,7 @@
import io.vertx.core.json.JsonObject;
import io.vertx.core.logging.Logger;
import io.vertx.core.logging.LoggerFactory;
+import io.vertx.core.net.impl.NetServerImpl;
import io.vertx.core.net.impl.SocketAddressImpl;
import io.vertx.core.streams.ReadStream;
import io.vertx.test.core.*;
@@ -1817,6 +1818,28 @@ public void testSharedServersRoundRobinButFirstStartAndStopServer() throws Excep
testSharedServersRoundRobin();
}
+ @Test
+ public void testClosingVertxCloseSharedServers() throws Exception {
+ int numServers = 2;
+ Vertx vertx = Vertx.vertx();
+ List<NetServerImpl> servers = new ArrayList<>();
+ for (int i = 0;i < numServers;i++) {
+ NetServer server = vertx.createNetServer().connectHandler(so -> {
+ fail();
+ });
+ startServer(server);
+ servers.add((NetServerImpl) server);
+ }
+ CountDownLatch latch = new CountDownLatch(1);
+ vertx.close(onSuccess(v -> {
+ latch.countDown();
+ }));
+ awaitLatch(latch);
+ servers.forEach(server -> {
+ assertTrue(server.isClosed());
+ });
+ }
+
@Test
// This tests using NetSocket.writeHandlerID (on the server side)
// Send some data and make sure it is fanned out to all connections
| Close all shared TCP based servers when Vert.x is closing
When closing Vert.x, only the first shared server is closed and not the other servers, which actually does not close the server, since the other shared servers are not closed and prevent closing to happen.
On Vert.x close we should instead close all the servers with a special method as this is not a regular close.
| 2018-11-20T09:11:04Z | 3.6 |
|
eclipse-vertx/vert.x | 2,724 | eclipse-vertx__vert.x-2724 | [
"1720"
] | 4ab19ce106dd4bca6da52529c6bac7a6a8935d02 | diff --git a/src/main/java/io/vertx/core/http/impl/HttpServerResponseImpl.java b/src/main/java/io/vertx/core/http/impl/HttpServerResponseImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpServerResponseImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpServerResponseImpl.java
@@ -13,8 +13,10 @@
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
+import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelPromise;
import io.netty.handler.codec.http.*;
+import io.netty.util.concurrent.GenericFutureListener;
import io.vertx.codegen.annotations.Nullable;
import io.vertx.core.AsyncResult;
import io.vertx.core.Future;
@@ -449,11 +451,12 @@ private void doSendFile(String filename, long offset, long length, Handler<Async
}
prepareHeaders(bytesWritten);
+ ChannelFuture channelFuture;
RandomAccessFile raf = null;
try {
raf = new RandomAccessFile(file, "r");
conn.writeToChannel(new AssembledHttpResponse(head, version, status, headers));
- conn.sendFile(raf, Math.min(offset, file.length()), contentLength);
+ channelFuture = conn.sendFile(raf, Math.min(offset, file.length()), contentLength);
} catch (IOException e) {
try {
if (raf != null) {
@@ -469,12 +472,14 @@ private void doSendFile(String filename, long offset, long length, Handler<Async
}
return;
}
+ written = true;
// write an empty last content to let the http encoder know the response is complete
- ChannelPromise channelFuture = conn.channelFuture();
- conn.writeToChannel(LastHttpContent.EMPTY_LAST_CONTENT, channelFuture);
- written = true;
+ channelFuture.addListener(future -> {
+ conn.writeToChannel(LastHttpContent.EMPTY_LAST_CONTENT);
+ });
+ // signal completion handler when there is one
if (resultHandler != null) {
ContextInternal ctx = vertx.getOrCreateContext();
channelFuture.addListener(future -> {
diff --git a/src/main/java/io/vertx/core/net/NetSocket.java b/src/main/java/io/vertx/core/net/NetSocket.java
--- a/src/main/java/io/vertx/core/net/NetSocket.java
+++ b/src/main/java/io/vertx/core/net/NetSocket.java
@@ -53,6 +53,12 @@ public interface NetSocket extends ReadStream<Buffer>, WriteStream<Buffer> {
@Override
NetSocket fetch(long amount);
+ /**
+ * {@inheritDoc}
+ * <p>
+ * This handler might be called after the close handler when the socket is paused and there are still
+ * buffers to deliver.
+ */
@Override
NetSocket endHandler(Handler<Void> endHandler);
diff --git a/src/main/java/io/vertx/core/net/impl/ConnectionBase.java b/src/main/java/io/vertx/core/net/impl/ConnectionBase.java
--- a/src/main/java/io/vertx/core/net/impl/ConnectionBase.java
+++ b/src/main/java/io/vertx/core/net/impl/ConnectionBase.java
@@ -49,6 +49,7 @@ public abstract class ConnectionBase {
*/
public static final VertxException CLOSED_EXCEPTION = new VertxException("Connection was closed", true);
private static final Logger log = LoggerFactory.getLogger(ConnectionBase.class);
+ private static final int MAX_REGION_SIZE = 1024 * 1024;
private final VoidChannelPromise voidPromise;
protected final VertxInternal vertx;
@@ -271,6 +272,38 @@ public boolean isSSL() {
return chctx.pipeline().get(SslHandler.class) != null;
}
+ /**
+ * Send a file as a file region for zero copy transfer to the socket.
+ *
+ * The implementation splits the file into multiple regions to avoid stalling the pipeline
+ * and producing idle timeouts for very large files.
+ *
+ * @param file the file to send
+ * @param offset the file offset
+ * @param length the file length
+ * @param writeFuture the write future to be completed when the transfer is done or failed
+ */
+ private void sendFileRegion(RandomAccessFile file, long offset, long length, ChannelPromise writeFuture) {
+ if (length < MAX_REGION_SIZE) {
+ writeToChannel(new DefaultFileRegion(file.getChannel(), offset, length), writeFuture);
+ } else {
+ ChannelPromise promise = chctx.newPromise();
+ FileRegion region = new DefaultFileRegion(file.getChannel(), offset, MAX_REGION_SIZE);
+ // Retain explicitly this file region so the underlying channel is not closed by the NIO channel when it
+ // as been sent as we need it again
+ region.retain();
+ writeToChannel(region, promise);
+ promise.addListener(future -> {
+ if (future.isSuccess()) {
+ sendFileRegion(file, offset + MAX_REGION_SIZE, length - MAX_REGION_SIZE, writeFuture);
+ } else {
+ future.cause().printStackTrace();
+ writeFuture.setFailure(future.cause());
+ }
+ });
+ }
+ }
+
protected ChannelFuture sendFile(RandomAccessFile raf, long offset, long length) throws IOException {
// Write the content.
ChannelPromise writeFuture = chctx.newPromise();
@@ -279,8 +312,7 @@ protected ChannelFuture sendFile(RandomAccessFile raf, long offset, long length)
writeToChannel(new ChunkedFile(raf, offset, length, 8192), writeFuture);
} else {
// No encryption - use zero-copy.
- FileRegion region = new DefaultFileRegion(raf.getChannel(), offset, length);
- writeToChannel(region, writeFuture);
+ sendFileRegion(raf, offset, length, writeFuture);
}
if (writeFuture != null) {
writeFuture.addListener(fut -> raf.close());
diff --git a/src/main/java/io/vertx/core/net/impl/NetSocketImpl.java b/src/main/java/io/vertx/core/net/impl/NetSocketImpl.java
--- a/src/main/java/io/vertx/core/net/impl/NetSocketImpl.java
+++ b/src/main/java/io/vertx/core/net/impl/NetSocketImpl.java
@@ -90,6 +90,7 @@ public NetSocketImpl(VertxInternal vertx, ChannelHandlerContext channel, SocketA
pending = new InboundBuffer<>(context);
pending.drainHandler(v -> doResume());
pending.handler(NULL_MSG_HANDLER);
+ pending.emptyHandler(v -> checkEnd());
}
synchronized void registerEventBusHandler() {
@@ -337,7 +338,6 @@ public void end() {
@Override
protected void handleClosed() {
- Handler<Void> handler;
MessageConsumer consumer;
synchronized (this) {
if (closed) {
@@ -346,17 +346,24 @@ protected void handleClosed() {
closed = true;
consumer = registration;
registration = null;
- handler = endHandler;
- }
- if (handler != null) {
- handler.handle(null);
}
+ checkEnd();
super.handleClosed();
if (consumer != null) {
consumer.unregister();
}
}
+ private void checkEnd() {
+ Handler<Void> handler;
+ synchronized (this) {
+ if (!closed || pending.size() > 0 || (handler = endHandler) == null) {
+ return;
+ }
+ }
+ handler.handle(null);
+ }
+
public synchronized void handleMessage(Object msg) {
checkContext();
if (!pending.write(msg)) {
| diff --git a/src/test/java/io/vertx/core/http/Http1xTest.java b/src/test/java/io/vertx/core/http/Http1xTest.java
--- a/src/test/java/io/vertx/core/http/Http1xTest.java
+++ b/src/test/java/io/vertx/core/http/Http1xTest.java
@@ -31,6 +31,7 @@
import org.junit.Test;
import java.io.File;
+import java.io.RandomAccessFile;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -4662,4 +4663,36 @@ public void testClosingVertxCloseSharedServers() throws Exception {
assertTrue(server.isClosed());
});
}
+
+ @Test
+ public void testHttpServerWithIdleTimeoutSendChunkedFile() throws Exception {
+ int expected = 16 * 1024 * 1024; // We estimate this will take more than 200ms to transfer with a 1ms pause in chunks
+ File sent = TestUtils.tmpFile(".dat", expected);
+ server.close();
+ server = vertx
+ .createHttpServer(createBaseServerOptions().setIdleTimeout(400).setIdleTimeoutUnit(TimeUnit.MILLISECONDS))
+ .requestHandler(
+ req -> {
+ req.response().sendFile(sent.getAbsolutePath());
+ });
+ startServer();
+ client.getNow(8080, "localhost", "/", resp -> {
+ long now = System.currentTimeMillis();
+ int[] length = {0};
+ resp.handler(buff -> {
+ length[0] += buff.length();
+ resp.pause();
+ vertx.setTimer(1, id -> {
+ resp.resume();
+ });
+ });
+ resp.exceptionHandler(this::fail);
+ resp.endHandler(v -> {
+ assertEquals(expected, length[0]);
+ assertTrue(System.currentTimeMillis() - now > 1000);
+ testComplete();
+ });
+ });
+ await();
+ }
}
diff --git a/src/test/java/io/vertx/core/http/HttpTest.java b/src/test/java/io/vertx/core/http/HttpTest.java
--- a/src/test/java/io/vertx/core/http/HttpTest.java
+++ b/src/test/java/io/vertx/core/http/HttpTest.java
@@ -131,7 +131,7 @@ public void testListenDomainSocketAddress() throws Exception {
Assume.assumeTrue("Native transport must be enabled", vx.isNativeTransportEnabled());
NetClient netClient = vx.createNetClient();
HttpServer httpserver = vx.createHttpServer().requestHandler(req -> req.response().end());
- File sockFile = TestUtils.tmpFile("vertx", ".sock");
+ File sockFile = TestUtils.tmpFile(".sock");
SocketAddress sockAddress = SocketAddress.domainSocketAddress(sockFile.getAbsolutePath());
httpserver.listen(sockAddress, onSuccess(server -> {
netClient.connect(sockAddress, onSuccess(sock -> {
diff --git a/src/test/java/io/vertx/core/net/NetTest.java b/src/test/java/io/vertx/core/net/NetTest.java
--- a/src/test/java/io/vertx/core/net/NetTest.java
+++ b/src/test/java/io/vertx/core/net/NetTest.java
@@ -49,10 +49,7 @@
import javax.net.ssl.SSLPeerUnverifiedException;
import javax.security.cert.X509Certificate;
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.OutputStreamWriter;
+import java.io.*;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.concurrent.*;
@@ -83,7 +80,7 @@ public void setUp() throws Exception {
super.setUp();
if (USE_DOMAIN_SOCKETS) {
assertTrue("Native transport not enabled", USE_NATIVE_TRANSPORT);
- tmp = TestUtils.tmpFile("vertx", ".sock");
+ tmp = TestUtils.tmpFile(".sock");
testAddress = SocketAddress.domainSocketAddress(tmp.getAbsolutePath());
} else {
testAddress = SocketAddress.inetSocketAddress(1234, "localhost");
@@ -1821,7 +1818,7 @@ public void testSharedServersRoundRobinButFirstStartAndStopServer() throws Excep
@Test
public void testClosingVertxCloseSharedServers() throws Exception {
int numServers = 2;
- Vertx vertx = Vertx.vertx();
+ Vertx vertx = Vertx.vertx(getOptions());
List<NetServerImpl> servers = new ArrayList<>();
for (int i = 0;i < numServers;i++) {
NetServer server = vertx.createNetServer().connectHandler(so -> {
@@ -3356,6 +3353,91 @@ public void testServerNetSocketShouldBeClosedWhenTheClosedHandlerIsCalled() thro
await();
}
+ @Test
+ public void testServerWithIdleTimeoutSendChunkedFile() throws Exception {
+ testdleTimeoutSendChunkedFile(true);
+ }
+
+ @Test
+ public void testClientWithIdleTimeoutSendChunkedFile() throws Exception {
+ testdleTimeoutSendChunkedFile(false);
+ }
+
+ private void testdleTimeoutSendChunkedFile(boolean idleOnServer) throws Exception {
+ int expected = 16 * 1024 * 1024; // We estimate this will take more than 200ms to transfer with a 1ms pause in chunks
+ File sent = TestUtils.tmpFile(".dat", expected);
+ server.close();
+ Consumer<NetSocket> sender = so -> {
+ so.sendFile(sent.getAbsolutePath());
+ };
+ Consumer<NetSocket> receiver = so -> {
+ int[] len = { 0 };
+ long now = System.currentTimeMillis();
+ so.handler(buff -> {
+ len[0] += buff.length();
+ so.pause();
+ vertx.setTimer(1, id -> {
+ so.resume();
+ });
+ });
+ so.exceptionHandler(this::fail);
+ so.endHandler(v -> {
+ assertEquals(0, expected - len[0]);
+ assertTrue(System.currentTimeMillis() - now > 200);
+ testComplete();
+ });
+ };
+ server = vertx
+ .createNetServer(new NetServerOptions().setIdleTimeout(200).setIdleTimeoutUnit(TimeUnit.MILLISECONDS))
+ .connectHandler((idleOnServer ? sender : receiver)::accept);
+ startServer();
+ client.close();
+ client = vertx.createNetClient(new NetClientOptions().setIdleTimeout(200).setIdleTimeoutUnit(TimeUnit.MILLISECONDS));
+ client.connect(testAddress, onSuccess(idleOnServer ? receiver : sender));
+ await();
+ }
+
+ @Test
+ public void testHalfCloseCallsEndHandlerAfterBuffersAreDelivered() throws Exception {
+ // Synchronized on purpose
+ StringBuffer expected = new StringBuffer();
+ server.connectHandler(so -> {
+ Context ctx = vertx.getOrCreateContext();
+ for (int i = 0;i < 8;i++) {
+ int val = i;
+ ctx.runOnContext(v -> {
+ String chunk = "chunk-" + val + "\r\n";
+ so.write(chunk);
+ expected.append(chunk);
+ });
+ }
+ ctx.runOnContext(v -> {
+ // This will half close the connection
+ so.close();
+ });
+ });
+ startServer();
+ client.connect(testAddress, "localhost", onSuccess(so -> {
+ so.pause();
+ AtomicBoolean closed = new AtomicBoolean();
+ AtomicBoolean ended = new AtomicBoolean();
+ Buffer received = Buffer.buffer();
+ so.handler(received::appendBuffer);
+ so.closeHandler(v -> {
+ assertFalse(ended.get());
+ assertEquals(Buffer.buffer(), received);
+ closed.set(true);
+ so.resume();
+ });
+ so.endHandler(v -> {
+ assertEquals(expected.toString(), received.toString());
+ ended.set(true);
+ testComplete();
+ });
+ }));
+ await();
+ }
+
protected void startServer(SocketAddress remoteAddress) throws Exception {
startServer(remoteAddress, vertx.getOrCreateContext());
}
diff --git a/src/test/java/io/vertx/core/streams/InboundBufferTest.java b/src/test/java/io/vertx/core/streams/InboundBufferTest.java
--- a/src/test/java/io/vertx/core/streams/InboundBufferTest.java
+++ b/src/test/java/io/vertx/core/streams/InboundBufferTest.java
@@ -17,6 +17,7 @@
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
@@ -678,4 +679,23 @@ public void testCheckThatPauseAfterResumeWontDoAnyEmission() {
});
await();
}
+
+ @Test
+ public void testBufferSignalingFullImmediately() {
+ context.runOnContext(v1 -> {
+ buffer = new InboundBuffer<>(context, 0L);
+ List<Integer> emitted = new ArrayList<>();
+ buffer.drainHandler(v -> {
+ assertEquals(Arrays.asList(0, 1), emitted);
+ testComplete();
+ });
+ buffer.handler(emitted::add);
+ assertTrue(emit());
+ assertEquals(Collections.singletonList(0), emitted);
+ buffer.pause();
+ assertFalse(emit());
+ buffer.resume();
+ });
+ await();
+ }
}
diff --git a/src/test/java/io/vertx/it/TransportTest.java b/src/test/java/io/vertx/it/TransportTest.java
--- a/src/test/java/io/vertx/it/TransportTest.java
+++ b/src/test/java/io/vertx/it/TransportTest.java
@@ -87,7 +87,7 @@ private void testNetServer(VertxOptions options) {
@Test
public void testDomainSocketServer() throws Exception {
- File sock = TestUtils.tmpFile("vertx", ".sock");
+ File sock = TestUtils.tmpFile(".sock");
vertx = Vertx.vertx();
NetServer server = vertx.createNetServer();
server.connectHandler(so -> {});
@@ -100,7 +100,7 @@ public void testDomainSocketServer() throws Exception {
@Test
public void testDomainSocketClient() throws Exception {
- File sock = TestUtils.tmpFile("vertx", ".sock");
+ File sock = TestUtils.tmpFile(".sock");
vertx = Vertx.vertx();
NetClient client = vertx.createNetClient();
client.connect(SocketAddress.domainSocketAddress(sock.getAbsolutePath()), onFailure(err -> {
diff --git a/src/test/java/io/vertx/test/core/TestUtils.java b/src/test/java/io/vertx/test/core/TestUtils.java
--- a/src/test/java/io/vertx/test/core/TestUtils.java
+++ b/src/test/java/io/vertx/test/core/TestUtils.java
@@ -24,6 +24,7 @@
import javax.security.cert.X509Certificate;
import java.io.ByteArrayOutputStream;
import java.io.File;
+import java.io.RandomAccessFile;
import java.nio.file.Files;
import java.util.EnumSet;
import java.util.List;
@@ -409,12 +410,23 @@ public static String loopbackAddress() {
/**
* Create a temp file that does not exists.
*/
- public static File tmpFile(String prefix, String suffix) throws Exception {
- File tmp = Files.createTempFile(prefix, suffix).toFile();
+ public static File tmpFile(String suffix) throws Exception {
+ File tmp = Files.createTempFile("vertx", suffix).toFile();
assertTrue(tmp.delete());
return tmp;
}
+ /**
+ * Create a temp file that exists and with a specified {@code length}. The file will be deleted at VM exit.
+ */
+ public static File tmpFile(String suffix, long length) throws Exception {
+ File tmp = File.createTempFile("vertx", suffix);
+ tmp.deleteOnExit();
+ RandomAccessFile f = new RandomAccessFile(tmp, "rw");
+ f.setLength(length);
+ return tmp;
+ }
+
public static TestLoggerFactory testLogging(Runnable runnable) {
InternalLoggerFactory prev = InternalLoggerFactory.getDefaultFactory();
TestLoggerFactory factory = new TestLoggerFactory();
| Socket idle-closed in middle of slowly progressing sendFile() response
I tried streaming music to a music player from vertx using `sendFile()`, but the connection is closed after the time given idleTimeout (I have 60 seconds) set in HttpServerOptions has passed after the request started.
I tracked the socket closing down and it seems this triggers the closing:
`channelIdle(ctx, event);` in `io.netty.handler.timeout.IdleStateHandler.AllIdleTimeoutTask`. I guess some other piece of code forgets to mark the connection "busy" while sendFile is active..
vert.x 3.3.3, netty 4.1.1.Final
| can you run wireshark to monitor the TCP activity over the wire ?
it could be also that lot of data is sent to the client at once and it buffers more than 60 seconds of music and then the buffer is full which cause no data to be sent.
Ok wireshark showed a lot of data at the beginning and then got throttled by "tcp window full" but kept sending packets of 65kB (local TCP connections allows big packets) every 3.5 seconds or so. Fits the ~140kbit/s quality of the music. At 60.015 seconds after response was sent I got a ClosedChannelException to the sendFile result handler. The data kept flowing though on wireshark. At 290.893 seconds the FIN flag was finally raised in a packet sent from the server and data flow stopped. Finally at 341.009 the buffer was empty in the music player and the client responded with FIN, completing the closure of the connection.
So I did a new test. I created a 120G sparse file using the command `truncate -s 120000000000 zero`. I'm testing on the same machine, so throughput is about 1GB/s so 120G is enough for >100 seconds, well above the 60 seconds idle timeout.
I then ran wget against the url serving that file, downloading at maximum speed all the time. Here's the result:
```
--2016-11-20 19:21:17-- http://localhost:8200/zero
Resolving localhost (localhost)... 127.0.0.1
Connecting to localhost (localhost)|127.0.0.1|:8200... connected.
HTTP request sent, awaiting response... 200 OK
Length: 120000000000 (112G) [application/octet-stream]
Saving to: ‘/dev/null’
/dev/null 70%[=============> ] 78.56G 1.09GB/s in 79s
2016-11-20 19:22:36 (1023 MB/s) - Connection closed at byte 84356054245. Retrying.
```
So the connection still breaks even when sending at full speed.
For reference, the documentation of `idleTimeout` is:
> Set the idle timeout, in seconds. zero means don't timeout. This determines if a connection will timeout and be closed if no data is received within the timeout.
If the Linux sendfile syscall is used to send the file (I didn't manage to find out), then it means that it's not actually possible to monitor whether there is activity on the socket or not in software since the operating system is transfering the whole file in a single call.
I don't know if the bug is in vertx or in netty but the idle timeout originated from netty. If either is indeed using syscalls for the transfer then I guess the idle timeout should be disabled for the time of the syscall..
So I wrote a workaround to use AsyncFile + pump instead -- speed dropped to 300MB/s but no more idle problems..
Another workaround would be to disable the idle timeout and track the idle state myself, then I could still use sendFile.. Would allow me to have separate timeouts for "no request active" and "request active".
can you provide a reproducer for this case, so one can have a look at it ?
@vietj sure, here you go: https://github.com/xkr47/vertx-i1720
See [MainVerticle.java](https://github.com/xkr47/vertx-i1720/blob/master/src/main/java/io/vertx/i1720/MainVerticle.java) for instructions.
This method is apparently used for implementing sendFile by netty: `java.nio.channels.FileChannel.transferTo(..., socket);` which in the implementation calls a native function with the filedescriptor of the socket, so highly probable that it uses the sendfile operating system call.
yes indeed you are right
So any solutions ? I am also facing similar issue
someone needs to investigate it! | 2018-11-20T12:50:35Z | 3.6 |
eclipse-vertx/vert.x | 2,631 | eclipse-vertx__vert.x-2631 | [
"2607"
] | 62023b832614a6257b88639e3419f42a8136e9b8 | diff --git a/src/main/java/io/vertx/core/http/impl/Http2ServerConnection.java b/src/main/java/io/vertx/core/http/impl/Http2ServerConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http2ServerConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2ServerConnection.java
@@ -34,6 +34,8 @@
import java.net.URISyntaxException;
import java.util.ArrayDeque;
+import static io.vertx.core.spi.metrics.Metrics.METRICS_ENABLED;
+
/**
* @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
*/
@@ -278,7 +280,10 @@ void handleClose() {
void complete() {
synchronized (Http2ServerConnection.this) {
- response = new Http2ServerResponseImpl(Http2ServerConnection.this, this, method, uri, true, contentEncoding);
+ response = new Http2ServerResponseImpl(Http2ServerConnection.this, this, method, true, contentEncoding, null);
+ if (METRICS_ENABLED && metrics != null) {
+ response.metric(metrics.responsePushed(conn.metric(), method, uri, response));
+ }
completionHandler.complete(response);
}
}
diff --git a/src/main/java/io/vertx/core/http/impl/Http2ServerRequestImpl.java b/src/main/java/io/vertx/core/http/impl/Http2ServerRequestImpl.java
--- a/src/main/java/io/vertx/core/http/impl/Http2ServerRequestImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2ServerRequestImpl.java
@@ -92,8 +92,10 @@ public Http2ServerRequestImpl(Http2ServerConnection conn, Http2Stream stream, Ht
int idx = serverOrigin.indexOf("://");
host = serverOrigin.substring(idx + 3);
}
- Object metric = (METRICS_ENABLED && metrics != null) ? metrics.requestBegin(conn.metric(), this) : null;
- this.response = new Http2ServerResponseImpl(conn, this, metric, false, contentEncoding, host);
+ this.response = new Http2ServerResponseImpl(conn, this, method(), false, contentEncoding, host);
+ if (METRICS_ENABLED && metrics != null) {
+ response.metric(metrics.requestBegin(conn.metric(), this));
+ }
}
@Override
diff --git a/src/main/java/io/vertx/core/http/impl/Http2ServerResponseImpl.java b/src/main/java/io/vertx/core/http/impl/Http2ServerResponseImpl.java
--- a/src/main/java/io/vertx/core/http/impl/Http2ServerResponseImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2ServerResponseImpl.java
@@ -17,6 +17,7 @@
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.handler.codec.http.HttpResponseStatus;
+import io.netty.handler.codec.http.HttpStatusClass;
import io.netty.handler.codec.http2.DefaultHttp2Headers;
import io.netty.handler.codec.http2.Http2Headers;
import io.vertx.codegen.annotations.Nullable;
@@ -26,6 +27,7 @@
import io.vertx.core.Handler;
import io.vertx.core.MultiMap;
import io.vertx.core.buffer.Buffer;
+import io.vertx.core.http.HttpHeaders;
import io.vertx.core.http.HttpMethod;
import io.vertx.core.http.HttpServerResponse;
import io.vertx.core.http.StreamResetException;
@@ -53,17 +55,18 @@ public class Http2ServerResponseImpl implements HttpServerResponse {
private final VertxHttp2Stream stream;
private final ChannelHandlerContext ctx;
private final Http2ServerConnection conn;
+ private final boolean head;
private final boolean push;
- private final Object metric;
private final String host;
private Http2Headers headers = new DefaultHttp2Headers();
+ private Object metric;
private Http2HeadersAdaptor headersMap;
private Http2Headers trailers;
private Http2HeadersAdaptor trailedMap;
private boolean chunked;
private boolean headWritten;
private boolean ended;
- private int statusCode = 200;
+ private HttpResponseStatus status = HttpResponseStatus.OK;
private String statusMessage; // Not really used but we keep the message for the getStatusMessage()
private Handler<Void> drainHandler;
private Handler<Throwable> exceptionHandler;
@@ -76,12 +79,16 @@ public class Http2ServerResponseImpl implements HttpServerResponse {
private boolean inHandler;
private NetSocket netSocket;
- public Http2ServerResponseImpl(Http2ServerConnection conn, VertxHttp2Stream stream, Object metric, boolean push, String contentEncoding, String host) {
-
- this.metric = metric;
+ public Http2ServerResponseImpl(Http2ServerConnection conn,
+ VertxHttp2Stream stream,
+ HttpMethod method,
+ boolean push,
+ String contentEncoding,
+ String host) {
this.stream = stream;
this.ctx = conn.handlerContext;
this.conn = conn;
+ this.head = method == HttpMethod.HEAD;
this.push = push;
this.host = host;
@@ -90,25 +97,8 @@ public Http2ServerResponseImpl(Http2ServerConnection conn, VertxHttp2Stream stre
}
}
- public Http2ServerResponseImpl(
- Http2ServerConnection conn,
- VertxHttp2Stream stream,
- HttpMethod method,
- String path,
- boolean push,
- String contentEncoding) {
- this.stream = stream;
- this.ctx = conn.handlerContext;
- this.conn = conn;
- this.push = push;
- this.host = null;
-
- if (contentEncoding != null) {
- putHeader(HttpHeaderNames.CONTENT_ENCODING, contentEncoding);
- }
-
- HttpServerMetrics metrics = conn.metrics();
- this.metric = (METRICS_ENABLED && metrics != null) ? metrics.responsePushed(conn.metric(), method, path, this) : null;
+ void metric(Object metric) {
+ this.metric = metric;
}
synchronized void beginRequest() {
@@ -155,7 +145,7 @@ public HttpServerResponse exceptionHandler(Handler<Throwable> handler) {
@Override
public int getStatusCode() {
synchronized (conn) {
- return statusCode;
+ return status.code();
}
}
@@ -166,7 +156,7 @@ public HttpServerResponse setStatusCode(int statusCode) {
}
synchronized (conn) {
checkHeadWritten();
- this.statusCode = statusCode;
+ this.status = HttpResponseStatus.valueOf(statusCode);
return this;
}
}
@@ -175,7 +165,7 @@ public HttpServerResponse setStatusCode(int statusCode) {
public String getStatusMessage() {
synchronized (conn) {
if (statusMessage == null) {
- return HttpResponseStatus.valueOf(statusCode).reasonPhrase();
+ return status.reasonPhrase();
}
return statusMessage;
}
@@ -374,7 +364,7 @@ public void end() {
NetSocket netSocket() {
checkEnded();
if (netSocket == null) {
- statusCode = 200;
+ status = HttpResponseStatus.OK;
if (!checkSendHeaders(false)) {
throw new IllegalStateException("Response for CONNECT already sent");
}
@@ -391,16 +381,29 @@ private void end(ByteBuf chunk) {
}
}
+ private void sanitizeHeaders() {
+ if (head || status == HttpResponseStatus.NOT_MODIFIED) {
+ headers.remove(HttpHeaders.TRANSFER_ENCODING);
+ } else if (status == HttpResponseStatus.RESET_CONTENT) {
+ headers.remove(HttpHeaders.TRANSFER_ENCODING);
+ headers.set(HttpHeaders.CONTENT_LENGTH, "0");
+ } else if (status.codeClass() == HttpStatusClass.INFORMATIONAL || status == HttpResponseStatus.NO_CONTENT) {
+ headers.remove(HttpHeaders.TRANSFER_ENCODING);
+ headers.remove(HttpHeaders.CONTENT_LENGTH);
+ }
+ }
+
private boolean checkSendHeaders(boolean end) {
if (!headWritten) {
if (headersEndHandler != null) {
headersEndHandler.handle(null);
}
+ sanitizeHeaders();
if (Metrics.METRICS_ENABLED && metric != null) {
conn.metrics().responseBegin(metric, this);
}
headWritten = true;
- headers.status(Integer.toString(statusCode));
+ headers.status(Integer.toString(status.code())); // Could be optimized for usual case ?
stream.writeHeaders(headers, end);
if (end) {
ctx.flush();
@@ -422,7 +425,7 @@ void write(ByteBuf chunk, boolean end) {
chunk = Unpooled.EMPTY_BUFFER;
}
if (end) {
- if (!headWritten && !headers.contains(HttpHeaderNames.CONTENT_LENGTH)) {
+ if (!headWritten && !head && status != HttpResponseStatus.NOT_MODIFIED && !headers.contains(HttpHeaderNames.CONTENT_LENGTH)) {
headers().set(HttpHeaderNames.CONTENT_LENGTH, String.valueOf(chunk.readableBytes()));
}
handleEnded(false);
diff --git a/src/main/java/io/vertx/core/http/impl/HttpServerResponseImpl.java b/src/main/java/io/vertx/core/http/impl/HttpServerResponseImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpServerResponseImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpServerResponseImpl.java
@@ -70,7 +70,6 @@ public class HttpServerResponseImpl implements HttpServerResponse {
private Handler<Void> endHandler;
private Handler<Void> headersEndHandler;
private Handler<Void> bodyEndHandler;
- private boolean chunked;
private boolean closed;
private final VertxHttpHeaders headers;
private MultiMap trailers;
@@ -137,7 +136,7 @@ public HttpServerResponseImpl setChunked(boolean chunked) {
checkValid();
// HTTP 1.0 does not support chunking so we ignore this if HTTP 1.0
if (version != HttpVersion.HTTP_1_0) {
- this.chunked = chunked;
+ headers.set(HttpHeaders.TRANSFER_ENCODING, chunked ? "chunked" : null);
}
return this;
}
@@ -146,7 +145,7 @@ public HttpServerResponseImpl setChunked(boolean chunked) {
@Override
public boolean isChunked() {
synchronized (conn) {
- return chunked;
+ return HttpHeaders.CHUNKED.equals(headers.get(HttpHeaders.TRANSFER_ENCODING));
}
}
@@ -446,10 +445,10 @@ private void doSendFile(String filename, long offset, long length, Handler<Async
long contentLength = Math.min(length, file.length() - offset);
bytesWritten = contentLength;
- if (!headers.contentTypeSet()) {
+ if (!headers.contains(HttpHeaders.CONTENT_TYPE)) {
String contentType = MimeMapping.getMimeTypeForFilename(filename);
if (contentType != null) {
- putHeader(HttpHeaders.CONTENT_TYPE, contentType);
+ headers.set(HttpHeaders.CONTENT_TYPE, contentType);
}
}
prepareHeaders(bytesWritten);
@@ -560,10 +559,14 @@ private void prepareHeaders(long contentLength) {
} else if (version == HttpVersion.HTTP_1_1 && !keepAlive) {
headers.set(HttpHeaders.CONNECTION, HttpHeaders.CLOSE);
}
- if (!head) {
- if (chunked) {
- headers.set(HttpHeaders.TRANSFER_ENCODING, HttpHeaders.CHUNKED);
- } else if (!headers.contentLengthSet() && contentLength >= 0) {
+ if (head || status == HttpResponseStatus.NOT_MODIFIED) {
+ // For HEAD request or NOT_MODIFIED response
+ // don't set automatically the content-length
+ // and remove the transfer-encoding
+ headers.remove(HttpHeaders.TRANSFER_ENCODING);
+ } else {
+ // Set content-length header automatically
+ if (!headers.contains(HttpHeaders.TRANSFER_ENCODING) && !headers.contains(HttpHeaders.CONTENT_LENGTH) && contentLength >= 0) {
String value = contentLength == 0 ? "0" : String.valueOf(contentLength);
headers.set(HttpHeaders.CONTENT_LENGTH, value);
}
@@ -586,7 +589,7 @@ private void reportResponseBegin() {
private HttpServerResponseImpl write(ByteBuf chunk) {
synchronized (conn) {
checkValid();
- if (!headWritten && !chunked && !headers.contentLengthSet()) {
+ if (!headWritten && !headers.contains(HttpHeaders.TRANSFER_ENCODING) && !headers.contains(HttpHeaders.CONTENT_LENGTH)) {
if (version != HttpVersion.HTTP_1_0) {
throw new IllegalStateException("You must set the Content-Length header to be the total size of the message "
+ "body BEFORE sending any data if you are not using HTTP chunked encoding.");
diff --git a/src/main/java/io/vertx/core/http/impl/VertxHttpResponseEncoder.java b/src/main/java/io/vertx/core/http/impl/VertxHttpResponseEncoder.java
--- a/src/main/java/io/vertx/core/http/impl/VertxHttpResponseEncoder.java
+++ b/src/main/java/io/vertx/core/http/impl/VertxHttpResponseEncoder.java
@@ -42,6 +42,6 @@ public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
protected boolean isContentAlwaysEmpty(HttpResponse msg) {
// In HttpServerCodec this is tracked via a FIFO queue of HttpMethod
// here we track it in the assembled response as we don't use HttpServerCodec
- return msg instanceof AssembledHttpResponse && ((AssembledHttpResponse) msg).head();
+ return (msg instanceof AssembledHttpResponse && ((AssembledHttpResponse) msg).head()) || super.isContentAlwaysEmpty(msg);
}
}
diff --git a/src/main/java/io/vertx/core/http/impl/headers/VertxHttpHeaders.java b/src/main/java/io/vertx/core/http/impl/headers/VertxHttpHeaders.java
--- a/src/main/java/io/vertx/core/http/impl/headers/VertxHttpHeaders.java
+++ b/src/main/java/io/vertx/core/http/impl/headers/VertxHttpHeaders.java
@@ -61,14 +61,6 @@ public VertxHttpHeaders() {
head.before = head.after = head;
}
- public boolean contentLengthSet() {
- return contains(io.vertx.core.http.HttpHeaders.CONTENT_LENGTH);
- }
-
- public boolean contentTypeSet() {
- return contains(io.vertx.core.http.HttpHeaders.CONTENT_TYPE);
- }
-
@Override
public VertxHttpHeaders add(CharSequence name, CharSequence value) {
int h = AsciiString.hashCode(name);
@@ -383,7 +375,7 @@ public HttpHeaders addShort(CharSequence name, short value) {
@Override
public HttpHeaders setInt(CharSequence name, int value) {
- throw new UnsupportedOperationException();
+ return set(name, Integer.toString(value));
}
@Override
@@ -503,7 +495,10 @@ private VertxHttpHeaders set0(final CharSequence name, final CharSequence strVal
int h = AsciiString.hashCode(name);
int i = index(h);
remove0(h, i, name);
- add0(h, i, name, strVal);
+ if (strVal != null) {
+ // test me
+ add0(h, i, name, strVal);
+ }
return this;
}
| diff --git a/src/test/java/io/vertx/core/http/Http1xTest.java b/src/test/java/io/vertx/core/http/Http1xTest.java
--- a/src/test/java/io/vertx/core/http/Http1xTest.java
+++ b/src/test/java/io/vertx/core/http/Http1xTest.java
@@ -3835,66 +3835,55 @@ public void testSendFileFailsWhenClientClosesConnection() throws Exception {
await();
}
- @Test
- public void testHeadMustNotAutomaticallySetContentHeaders() throws Exception {
- testHeadAutomaticallySet(MultiMap.caseInsensitiveMultiMap(), respHeaders -> {
- assertFalse(respHeaders.contains("Content-Length"));
- assertFalse(respHeaders.contains("Transfer-Encoding"));
- });
- }
-
- @Test
- public void testHeadMustNotSendBodyWhenContentLengthSet() throws Exception {
- MultiMap reqHeaders = MultiMap.caseInsensitiveMultiMap();
- reqHeaders.set("Content-Length", "10");
- testHeadAutomaticallySet(reqHeaders, respHeaders -> {
- assertEquals(" 10", respHeaders.get("Content-Length"));
- assertNull(respHeaders.get("Transfer-Encoding"));
- });
- }
-
- @Test
- public void testHeadMustNotSendBodyWhenTransferEncodingSet() throws Exception {
- MultiMap reqHeaders = MultiMap.caseInsensitiveMultiMap();
- reqHeaders.set("Transfer-Encoding", "chunked");
- testHeadAutomaticallySet(reqHeaders, respHeaders -> {
- assertNull(respHeaders.get("Content-Length"));
- assertEquals(" chunked", respHeaders.get("Transfer-Encoding"));
- });
- }
-
- private void testHeadAutomaticallySet(MultiMap reqHeaders, Consumer<MultiMap> headersChecker) throws Exception {
+ // Use a raw socket to check the body response is effectively empty (it could be an empty chunk)
+ protected MultiMap checkEmptyHttpResponse(HttpMethod method, int sc, MultiMap reqHeaders) throws Exception {
server.requestHandler(req -> {
HttpServerResponse resp = req.response();
+ resp.setStatusCode(sc);
resp.headers().addAll(reqHeaders);
resp.end();
});
startServer();
NetClient client = vertx.createNetClient();
- client.connect(DEFAULT_HTTP_PORT, DEFAULT_HTTPS_HOST, onSuccess(so -> {
- so.write(
- "HEAD / HTTP/1.1\r\n" +
- "Connection: close\r\n" +
- "\r\n");
- Buffer buff = Buffer.buffer();
- so.handler(buff::appendBuffer);
- so.endHandler(v -> {
- String content = buff.toString();
- int idx = content.indexOf("\r\n\r\n");
- LinkedList<String> records = new LinkedList<String>(Arrays.asList(content.substring(0, idx).split("\\r\\n")));
- assertEquals("HTTP/1.1 200 OK", records.removeFirst());
- assertEquals("", content.substring(idx + 4));
- MultiMap respHeaders = MultiMap.caseInsensitiveMultiMap();
- records.forEach(record -> {
- int index = record.indexOf(":");
- String value = record.substring(0, index);
- respHeaders.add(value, record.substring(index + 1));
- });
- headersChecker.accept(respHeaders);
- testComplete();
+ try {
+ CompletableFuture<MultiMap> result = new CompletableFuture<>();
+ client.connect(DEFAULT_HTTP_PORT, DEFAULT_HTTPS_HOST, ar -> {
+ if (ar.succeeded()) {
+ NetSocket so = ar.result();
+ so.write(
+ method.name() + " / HTTP/1.1\r\n" +
+ "Connection: close\r\n" +
+ "\r\n");
+ Buffer body = Buffer.buffer();
+ so.exceptionHandler(result::completeExceptionally);
+ so.handler(body::appendBuffer);
+ so.endHandler(v -> {
+ String content = body.toString();
+ int idx = content.indexOf("\r\n\r\n");
+ if (idx == content.length() - 4) {
+ LinkedList<String> records = new LinkedList<>(Arrays.asList(content.substring(0, idx).split("\\r\\n")));
+ String statusLine = records.removeFirst();
+ assertEquals("HTTP/1.1 " + sc, statusLine.substring(0, statusLine.indexOf(' ', 9)));
+ assertEquals("", content.substring(idx + 4));
+ MultiMap respHeaders = MultiMap.caseInsensitiveMultiMap();
+ records.forEach(record -> {
+ int index = record.indexOf(":");
+ String value = record.substring(0, index);
+ respHeaders.add(value.trim(), record.substring(index + 1).trim());
+ });
+ result.complete(respHeaders);
+ } else {
+ result.completeExceptionally(new Exception());
+ }
+ });
+ } else {
+ result.completeExceptionally(ar.cause());
+ }
});
- }));
- await();
+ return result.get(20, TimeUnit.SECONDS);
+ } finally {
+ client.close();
+ }
}
@Test
diff --git a/src/test/java/io/vertx/core/http/HttpTest.java b/src/test/java/io/vertx/core/http/HttpTest.java
--- a/src/test/java/io/vertx/core/http/HttpTest.java
+++ b/src/test/java/io/vertx/core/http/HttpTest.java
@@ -2320,7 +2320,7 @@ public void testListenTwice2() throws Exception {
}
@Test
- public void testHeadNoBody() {
+ public void testHeadCanSetContentLength() {
server.requestHandler(req -> {
assertEquals(HttpMethod.HEAD, req.method());
// Head never contains a body but it can contain a Content-Length header
@@ -2331,7 +2331,158 @@ public void testHeadNoBody() {
server.listen(onSuccess(s -> {
client.request(HttpMethod.HEAD, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
- assertEquals(41, Integer.parseInt(resp.headers().get("Content-Length")));
+ assertEquals("41", resp.headers().get("Content-Length"));
+ resp.endHandler(v -> testComplete());
+ }).end();
+ }));
+
+ await();
+ }
+
+ @Test
+ public void testHeadDoesNotSetAutomaticallySetContentLengthHeader() throws Exception {
+ MultiMap respHeaders = checkEmptyHttpResponse(HttpMethod.HEAD, 200, MultiMap.caseInsensitiveMultiMap());
+ assertNull(respHeaders.get("content-length"));
+ assertNull(respHeaders.get("transfer-encoding"));
+ }
+
+ @Test
+ public void testHeadAllowsContentLengthHeader() throws Exception {
+ MultiMap respHeaders = checkEmptyHttpResponse(HttpMethod.HEAD, 200, MultiMap.caseInsensitiveMultiMap().set("content-length", "34"));
+ assertEquals("34", respHeaders.get("content-length"));
+ assertNull(respHeaders.get("transfer-encoding"));
+ }
+
+ @Test
+ public void testHeadRemovesTransferEncodingHeader() throws Exception {
+ MultiMap respHeaders = checkEmptyHttpResponse(HttpMethod.HEAD, 200, MultiMap.caseInsensitiveMultiMap().set("transfer-encoding", "chunked"));
+ assertNull(respHeaders.get("content-length"));
+ assertNull(respHeaders.get("transfer-encoding"));
+ }
+
+ @Test
+ public void testNoContentRemovesContentLengthHeader() throws Exception {
+ MultiMap respHeaders = checkEmptyHttpResponse(HttpMethod.GET, 204, MultiMap.caseInsensitiveMultiMap().set("content-length", "34"));
+ assertNull(respHeaders.get("content-length"));
+ assertNull(respHeaders.get("transfer-encoding"));
+ }
+
+ @Test
+ public void testNoContentRemovesTransferEncodingHeader() throws Exception {
+ MultiMap respHeaders = checkEmptyHttpResponse(HttpMethod.GET, 204, MultiMap.caseInsensitiveMultiMap().set("transfer-encoding", "chunked"));
+ assertNull(respHeaders.get("content-length"));
+ assertNull(respHeaders.get("transfer-encoding"));
+ }
+
+ @Test
+ public void testResetContentSetsContentLengthHeader() throws Exception {
+ MultiMap respHeaders = checkEmptyHttpResponse(HttpMethod.GET, 205, MultiMap.caseInsensitiveMultiMap());
+ assertEquals("0", respHeaders.get("content-length"));
+ assertNull(respHeaders.get("transfer-encoding"));
+ }
+
+ @Test
+ public void testResetContentRemovesTransferEncodingHeader() throws Exception {
+ MultiMap respHeaders = checkEmptyHttpResponse(HttpMethod.GET, 205, MultiMap.caseInsensitiveMultiMap().set("transfer-encoding", "chunked"));
+ assertEquals("0", respHeaders.get("content-length"));
+ assertNull(respHeaders.get("transfer-encoding"));
+ }
+
+ @Test
+ public void testNotModifiedDoesNotSetAutomaticallySetContentLengthHeader() throws Exception {
+ MultiMap respHeaders = checkEmptyHttpResponse(HttpMethod.GET, 304, MultiMap.caseInsensitiveMultiMap());
+ assertNull(respHeaders.get("content-length"));
+ assertNull(respHeaders.get("transfer-encoding"));
+ }
+
+ @Test
+ public void testNotModifiedAllowsContentLengthHeader() throws Exception {
+ MultiMap respHeaders = checkEmptyHttpResponse(HttpMethod.GET, 304, MultiMap.caseInsensitiveMultiMap().set("content-length", "34"));
+ assertEquals("34", respHeaders.get("Content-Length"));
+ assertNull(respHeaders.get("transfer-encoding"));
+ }
+
+ @Test
+ public void testNotModifiedRemovesTransferEncodingHeader() throws Exception {
+ MultiMap respHeaders = checkEmptyHttpResponse(HttpMethod.GET, 304, MultiMap.caseInsensitiveMultiMap().set("transfer-encoding", "chunked"));
+ assertNull(respHeaders.get("content-length"));
+ assertNull(respHeaders.get("transfer-encoding"));
+ }
+
+ @Test
+ public void test1xxRemovesContentLengthHeader() throws Exception {
+ MultiMap respHeaders = checkEmptyHttpResponse(HttpMethod.GET, 102, MultiMap.caseInsensitiveMultiMap().set("content-length", "34"));
+ assertNull(respHeaders.get("content-length"));
+ assertNull(respHeaders.get("transfer-encoding"));
+ }
+
+ @Test
+ public void test1xxRemovesTransferEncodingHeader() throws Exception {
+ MultiMap respHeaders = checkEmptyHttpResponse(HttpMethod.GET, 102, MultiMap.caseInsensitiveMultiMap().set("transfer-encoding", "chunked"));
+ assertNull(respHeaders.get("content-length"));
+ assertNull(respHeaders.get("transfer-encoding"));
+ }
+
+ protected MultiMap checkEmptyHttpResponse(HttpMethod method, int sc, MultiMap reqHeaders) throws Exception {
+ server.requestHandler(req -> {
+ HttpServerResponse resp = req.response();
+ resp.setStatusCode(sc);
+ resp.headers().addAll(reqHeaders);
+ resp.end();
+ });
+ startServer();
+ try {
+ CompletableFuture<MultiMap> result = new CompletableFuture<>();
+ client.request(method, DEFAULT_HTTP_PORT, DEFAULT_HTTPS_HOST, "/", resp -> {
+ Buffer body = Buffer.buffer();
+ resp.exceptionHandler(result::completeExceptionally);
+ resp.handler(body::appendBuffer);
+ resp.endHandler(v -> {
+ if (body.length() > 0) {
+ result.completeExceptionally(new Exception());
+ } else {
+ result.complete(resp.headers());
+ }
+ });
+ }).setFollowRedirects(false)
+ .exceptionHandler(result::completeExceptionally)
+ .end();
+ return result.get(20, TimeUnit.SECONDS);
+ } finally {
+ client.close();
+ }
+ }
+
+ @Test
+ public void testHeadHasNoContentLengthByDefault() {
+ server.requestHandler(req -> {
+ assertEquals(HttpMethod.HEAD, req.method());
+ // By default HEAD does not have a content-length header
+ req.response().end();
+ });
+
+ server.listen(onSuccess(s -> {
+ client.request(HttpMethod.HEAD, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ assertNull(resp.headers().get(HttpHeaders.CONTENT_LENGTH));
+ resp.endHandler(v -> testComplete());
+ }).end();
+ }));
+
+ await();
+ }
+
+ @Test
+ public void testHeadButCanSetContentLength() {
+ server.requestHandler(req -> {
+ assertEquals(HttpMethod.HEAD, req.method());
+ // By default HEAD does not have a content-length header but it can contain a content-length header
+ // if explicitly set
+ req.response().putHeader(HttpHeaders.CONTENT_LENGTH, "41").end();
+ });
+
+ server.listen(onSuccess(s -> {
+ client.request(HttpMethod.HEAD, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ assertEquals("41", resp.headers().get(HttpHeaders.CONTENT_LENGTH));
resp.endHandler(v -> testComplete());
}).end();
}));
| Extra chunk for request with no content.
The following code produces an extra 0 end trailer chunk that makes HTTP clients parse it as a second invalid response:
```java
Vertx vertx = Vertx.vertx();
HttpServer server = vertx.createHttpServer();
server.requestStream().toObservable().subscribe(req -> {
System.err.println("Got request");
req.response().setChunked(true).setStatusCode(204).end();
});
server.rxListen(9001, "localhost").subscribe(s -> System.err.println("started"), Throwable::printStackTrace);
```
My guess is that if there is no body, there should not be any trailer end chunk.
Related to #1986 (was only for HEAD).
https://tools.ietf.org/html/rfc7231#section-6.3.5 indicates:
A 204 response is terminated by the first empty line after the header
fields because it cannot contain a message body.
Perhaps there are other HTTP status codes that can't contain a message body?
| taht should be easy to fix
1xx code should not contain response body
I found the following conditions where the spec forbids a body:
```
(status >= 100 && status < 200)
|| status == Status.NO_CONTENT.getStatusCode()
|| status == Status.NOT_MODIFIED.getStatusCode()
```
currently I do have:
```
boolean isChunked = chunked && !head
&& status != HttpResponseStatus.NO_CONTENT
&& status.code() / 100 > 1;
``` | 2018-09-19T08:14:33Z | 3.5 |
eclipse-vertx/vert.x | 2,458 | eclipse-vertx__vert.x-2458 | [
"2456"
] | 47d15d66dcd56381ee07754545258d3c48537784 | diff --git a/src/main/java/io/vertx/core/dns/impl/DnsClientImpl.java b/src/main/java/io/vertx/core/dns/impl/DnsClientImpl.java
--- a/src/main/java/io/vertx/core/dns/impl/DnsClientImpl.java
+++ b/src/main/java/io/vertx/core/dns/impl/DnsClientImpl.java
@@ -82,11 +82,14 @@ public DnsClientImpl(VertxInternal vertx, DnsClientOptions options) {
}
this.dnsServer = new InetSocketAddress(options.getHost(), options.getPort());
+ if (this.dnsServer.isUnresolved()) {
+ throw new IllegalArgumentException("Cannot resolve the host to a valid ip address");
+ }
this.vertx = vertx;
Transport transport = vertx.transport();
actualCtx = vertx.getOrCreateContext();
- channel = transport.datagramChannel(InternetProtocolFamily.IPv4);
+ channel = transport.datagramChannel(this.dnsServer.getAddress() instanceof Inet4Address ? InternetProtocolFamily.IPv4 : InternetProtocolFamily.IPv6);
channel.config().setOption(ChannelOption.DATAGRAM_CHANNEL_ACTIVE_ON_REGISTRATION, true);
channel.config().setMaxMessagesPerRead(1);
channel.config().setAllocator(PartialPooledByteBufAllocator.INSTANCE);
| diff --git a/src/test/java/io/vertx/test/core/DNSTest.java b/src/test/java/io/vertx/test/core/DNSTest.java
--- a/src/test/java/io/vertx/test/core/DNSTest.java
+++ b/src/test/java/io/vertx/test/core/DNSTest.java
@@ -110,6 +110,36 @@ public void testResolveA() throws Exception {
dnsServer.stop();
}
+ @Test
+ public void testUnresolvedDnsServer() throws Exception {
+ final String ip = "10.0.0.1";
+ try {
+ DnsClient dns = vertx.createDnsClient(new DnsClientOptions().setHost("iamanunresolvablednsserver.com").setPort(53));
+ fail();
+ } catch (Exception e) {
+ assertTrue(e instanceof IllegalArgumentException);
+ assertEquals("Cannot resolve the host to a valid ip address", e.getMessage());
+ }
+ }
+
+ @Test
+ public void testResolveAIpV6() throws Exception {
+ final String ip = "10.0.0.1";
+ // force the fake dns server to Ipv6
+ DnsClient dns = prepareDns(FakeDNSServer.testResolveA(ip).ipAddress("::1"));
+ dns.resolveA("vertx.io", onSuccess(result -> {
+ assertFalse(result.isEmpty());
+ assertEquals(1, result.size());
+ assertEquals(ip, result.get(0));
+ ((DnsClientImpl) dns).inProgressQueries(num -> {
+ assertEquals(0, (int)num);
+ testComplete();
+ });
+ }));
+ await();
+ dnsServer.stop();
+ }
+
@Test
public void testResolveAAAA() throws Exception {
DnsClient dns = prepareDns(FakeDNSServer.testResolveAAAA("::1"));
diff --git a/src/test/java/io/vertx/test/fakedns/FakeDNSServer.java b/src/test/java/io/vertx/test/fakedns/FakeDNSServer.java
--- a/src/test/java/io/vertx/test/fakedns/FakeDNSServer.java
+++ b/src/test/java/io/vertx/test/fakedns/FakeDNSServer.java
@@ -59,7 +59,9 @@ public static RecordStore A_store(Map<String, String> entries) {
}
public static final int PORT = 53530;
+ public static final String IP_ADDRESS = "127.0.0.1";
+ private String ipAddress = IP_ADDRESS;
private int port = PORT;
private final RecordStore store;
private DatagramAcceptor acceptor;
@@ -77,6 +79,11 @@ public InetSocketAddress localAddress() {
return (InetSocketAddress) getTransports()[0].getAcceptor().getLocalAddress();
}
+ public FakeDNSServer ipAddress(String ipAddress) {
+ this.ipAddress = ipAddress;
+ return this;
+ }
+
public FakeDNSServer port(int p) {
port = p;
return this;
@@ -325,7 +332,7 @@ public Set<ResourceRecord> getRecords(QuestionRecord questionRecord)
@Override
public void start() throws IOException {
- UdpTransport transport = new UdpTransport("127.0.0.1", port);
+ UdpTransport transport = new UdpTransport(ipAddress, port);
setTransports( transport );
acceptor = transport.getAcceptor();
| DNS client should support calling a DNS server over IpV6
Step to reproduce:
`vertx.createDnsClient(53, "2001:4860:4860::8888").lookup4("vertx.io", res -> {});
`
=> the result fails with the following exception: 'java.net.SocketException: Address family not supported by protocol'
Expected: The DnsClient should be able to call a Dns server over IpV4 or IpV6
As a sidenote, I also tried using the full IpV6 address "2001:4860:4860:0:0:0:0:8888" but it doesn't work as well.
If time permits, I'll investigate this issue and provide a fix for it
| 2018-05-16T15:33:07Z | 3.5 |
|
eclipse-vertx/vert.x | 2,392 | eclipse-vertx__vert.x-2392 | [
"1531",
"1531"
] | 47bc80cd615dab4c5980e4d6d0015f64f4d41d81 | diff --git a/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java b/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java
--- a/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java
+++ b/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java
@@ -28,6 +28,7 @@
import io.vertx.core.eventbus.ReplyFailure;
import io.vertx.core.eventbus.SendContext;
import io.vertx.core.impl.VertxInternal;
+import io.vertx.core.impl.utils.ConcurrentCyclicSequence;
import io.vertx.core.logging.Logger;
import io.vertx.core.logging.LoggerFactory;
import io.vertx.core.spi.metrics.EventBusMetrics;
@@ -55,7 +56,7 @@ public class EventBusImpl implements EventBus, MetricsProvider {
private final AtomicLong replySequence = new AtomicLong(0);
protected final VertxInternal vertx;
protected final EventBusMetrics metrics;
- protected final ConcurrentMap<String, Handlers> handlerMap = new ConcurrentHashMap<>();
+ protected final ConcurrentMap<String, ConcurrentCyclicSequence<HandlerHolder>> handlerMap = new ConcurrentHashMap<>();
protected final CodecManager codecManager = new CodecManager();
protected volatile boolean started;
@@ -227,11 +228,12 @@ protected MessageImpl createMessage(boolean send, String address, MultiMap heade
return msg;
}
- protected <T> void addRegistration(String address, HandlerRegistration<T> registration,
+ protected <T> HandlerHolder<T> addRegistration(String address, HandlerRegistration<T> registration,
boolean replyHandler, boolean localOnly) {
Objects.requireNonNull(registration.getHandler(), "handler");
- boolean newAddress = addLocalRegistration(address, registration, replyHandler, localOnly);
- addRegistration(newAddress, address, replyHandler, localOnly, registration::setResult);
+ LocalRegistrationResult<T> result = addLocalRegistration(address, registration, replyHandler, localOnly);
+ addRegistration(result.newAddress, address, replyHandler, localOnly, registration::setResult);
+ return result.holder;
}
protected <T> void addRegistration(boolean newAddress, String address,
@@ -240,8 +242,17 @@ protected <T> void addRegistration(boolean newAddress, String address,
completionHandler.handle(Future.succeededFuture());
}
- protected <T> boolean addLocalRegistration(String address, HandlerRegistration<T> registration,
- boolean replyHandler, boolean localOnly) {
+ private static class LocalRegistrationResult<T> {
+ final HandlerHolder<T> holder;
+ final boolean newAddress;
+ LocalRegistrationResult(HandlerHolder<T> holder, boolean newAddress) {
+ this.holder = holder;
+ this.newAddress = newAddress;
+ }
+ }
+
+ private <T> LocalRegistrationResult<T> addLocalRegistration(String address, HandlerRegistration<T> registration,
+ boolean replyHandler, boolean localOnly) {
Objects.requireNonNull(address, "address");
Context context = Vertx.currentContext();
@@ -252,63 +263,46 @@ protected <T> boolean addLocalRegistration(String address, HandlerRegistration<T
}
registration.setHandlerContext(context);
- boolean newAddress = false;
-
- HandlerHolder holder = new HandlerHolder<>(metrics, registration, replyHandler, localOnly, context);
+ HandlerHolder<T> holder = new HandlerHolder<>(metrics, registration, replyHandler, localOnly, context);
- Handlers handlers = handlerMap.get(address);
- if (handlers == null) {
- handlers = new Handlers();
- Handlers prevHandlers = handlerMap.putIfAbsent(address, handlers);
- if (prevHandlers != null) {
- handlers = prevHandlers;
- }
- newAddress = true;
- }
- handlers.list.add(holder);
+ ConcurrentCyclicSequence<HandlerHolder> handlers = new ConcurrentCyclicSequence<HandlerHolder>().add(holder);
+ ConcurrentCyclicSequence<HandlerHolder> actualHandlers = handlerMap.merge(
+ address,
+ handlers,
+ (old, prev) -> old.add(prev.first()));
if (hasContext) {
HandlerEntry entry = new HandlerEntry<>(address, registration);
context.addCloseHook(entry);
}
- return newAddress;
+ boolean newAddress = handlers == actualHandlers;
+ return new LocalRegistrationResult<>(holder, newAddress);
}
- protected <T> void removeRegistration(String address, HandlerRegistration<T> handler, Handler<AsyncResult<Void>> completionHandler) {
- HandlerHolder holder = removeLocalRegistration(address, handler);
- removeRegistration(holder, address, completionHandler);
+ protected <T> void removeRegistration(HandlerHolder<T> holder, Handler<AsyncResult<Void>> completionHandler) {
+ boolean last = removeLocalRegistration(holder);
+ removeRegistration(last ? holder : null, holder.getHandler().address(), completionHandler);
}
- protected <T> void removeRegistration(HandlerHolder handlerHolder, String address,
+ protected <T> void removeRegistration(HandlerHolder<T> handlerHolder, String address,
Handler<AsyncResult<Void>> completionHandler) {
callCompletionHandlerAsync(completionHandler);
}
- protected <T> HandlerHolder removeLocalRegistration(String address, HandlerRegistration<T> handler) {
- Handlers handlers = handlerMap.get(address);
- HandlerHolder lastHolder = null;
- if (handlers != null) {
- synchronized (handlers) {
- int size = handlers.list.size();
- // Requires a list traversal. This is tricky to optimise since we can't use a set since
- // we need fast ordered traversal for the round robin
- for (int i = 0; i < size; i++) {
- HandlerHolder holder = handlers.list.get(i);
- if (holder.getHandler() == handler) {
- handlers.list.remove(i);
- holder.setRemoved();
- if (handlers.list.isEmpty()) {
- handlerMap.remove(address);
- lastHolder = holder;
- }
- holder.getContext().removeCloseHook(new HandlerEntry<>(address, holder.getHandler()));
- break;
- }
- }
+ private <T> boolean removeLocalRegistration(HandlerHolder<T> holder) {
+ String address = holder.getHandler().address();
+ boolean last = handlerMap.compute(address, (key, val) -> {
+ if (val == null) {
+ return null;
}
+ ConcurrentCyclicSequence<HandlerHolder> next = val.remove(holder);
+ return next.size() == 0 ? null : next;
+ }) == null;
+ if (holder.setRemoved()) {
+ holder.getContext().removeCloseHook(new HandlerEntry<>(address, holder.getHandler()));
}
- return lastHolder;
+ return last;
}
protected <T> void sendReply(MessageImpl replyMessage, MessageImpl replierMessage, DeliveryOptions options,
@@ -377,11 +371,11 @@ protected boolean isMessageLocal(MessageImpl msg) {
protected <T> boolean deliverMessageLocally(MessageImpl msg) {
msg.setBus(this);
- Handlers handlers = handlerMap.get(msg.address());
+ ConcurrentCyclicSequence<HandlerHolder> handlers = handlerMap.get(msg.address());
if (handlers != null) {
if (msg.isSend()) {
//Choose one
- HandlerHolder holder = handlers.choose();
+ HandlerHolder holder = handlers.next();
if (metrics != null) {
metrics.messageReceived(msg.address(), !msg.isSend(), isMessageLocal(msg), holder != null ? 1 : 0);
}
@@ -391,9 +385,9 @@ protected <T> boolean deliverMessageLocally(MessageImpl msg) {
} else {
// Publish
if (metrics != null) {
- metrics.messageReceived(msg.address(), !msg.isSend(), isMessageLocal(msg), handlers.list.size());
+ metrics.messageReceived(msg.address(), !msg.isSend(), isMessageLocal(msg), handlers.size());
}
- for (HandlerHolder holder: handlers.list) {
+ for (HandlerHolder holder: handlers) {
deliverToHandler(msg, holder);
}
}
@@ -509,8 +503,8 @@ public void next() {
private void unregisterAll() {
// Unregister all handlers explicitly - don't rely on context hooks
- for (Handlers handlers: handlerMap.values()) {
- for (HandlerHolder holder: handlers.list) {
+ for (ConcurrentCyclicSequence<HandlerHolder> handlers: handlerMap.values()) {
+ for (HandlerHolder holder: handlers) {
holder.getHandler().unregister();
}
}
diff --git a/src/main/java/io/vertx/core/eventbus/impl/HandlerHolder.java b/src/main/java/io/vertx/core/eventbus/impl/HandlerHolder.java
--- a/src/main/java/io/vertx/core/eventbus/impl/HandlerHolder.java
+++ b/src/main/java/io/vertx/core/eventbus/impl/HandlerHolder.java
@@ -36,17 +36,18 @@ public HandlerHolder(EventBusMetrics metrics, HandlerRegistration<T> handler, bo
}
// We use a synchronized block to protect removed as it can be unregistered from a different thread
- public void setRemoved() {
- boolean unregisterMetric = false;
+ public boolean setRemoved() {
+ boolean unregistered = false;
synchronized (this) {
if (!removed) {
removed = true;
- unregisterMetric = true;
+ unregistered = true;
}
}
- if (metrics != null && unregisterMetric) {
+ if (metrics != null && unregistered) {
metrics.handlerUnregistered(handler.getMetric());
}
+ return unregistered;
}
// Because of biased locks the overhead of the synchronized lock should be very low as it's almost always
diff --git a/src/main/java/io/vertx/core/eventbus/impl/HandlerRegistration.java b/src/main/java/io/vertx/core/eventbus/impl/HandlerRegistration.java
--- a/src/main/java/io/vertx/core/eventbus/impl/HandlerRegistration.java
+++ b/src/main/java/io/vertx/core/eventbus/impl/HandlerRegistration.java
@@ -48,7 +48,7 @@ public class HandlerRegistration<T> implements MessageConsumer<T>, Handler<Messa
private final boolean localOnly;
private final Handler<AsyncResult<Message<T>>> asyncResultHandler;
private long timeoutID = -1;
- private boolean registered;
+ private HandlerHolder<T> registered;
private Handler<Message<T>> handler;
private Context handlerContext;
private AsyncResult<Void> result;
@@ -141,9 +141,10 @@ private void doUnregister(Handler<AsyncResult<Void>> completionHandler) {
}
};
}
- if (registered) {
- registered = false;
- eventBus.removeRegistration(address, this, completionHandler);
+ HandlerHolder<T> holder = registered;
+ if (holder != null) {
+ registered = null;
+ eventBus.removeRegistration(holder, completionHandler);
} else {
callCompletionHandlerAsync(completionHandler);
}
@@ -260,10 +261,9 @@ public synchronized void discardHandler(Handler<Message<T>> handler) {
@Override
public synchronized MessageConsumer<T> handler(Handler<Message<T>> handler) {
this.handler = handler;
- if (this.handler != null && !registered) {
- registered = true;
- eventBus.addRegistration(address, this, repliedAddress != null, localOnly);
- } else if (this.handler == null && registered) {
+ if (this.handler != null && registered == null) {
+ registered = eventBus.addRegistration(address, this, repliedAddress != null, localOnly);
+ } else if (this.handler == null && registered != null) {
// This will set registered to false
this.unregister();
}
@@ -277,7 +277,7 @@ public ReadStream<T> bodyStream() {
@Override
public synchronized boolean isRegistered() {
- return registered;
+ return registered != null;
}
@Override
diff --git a/src/main/java/io/vertx/core/eventbus/impl/Handlers.java b/src/main/java/io/vertx/core/eventbus/impl/Handlers.java
deleted file mode 100644
--- a/src/main/java/io/vertx/core/eventbus/impl/Handlers.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2011-2017 Contributors to the Eclipse Foundation
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License 2.0 which is available at
- * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
- * which is available at https://www.apache.org/licenses/LICENSE-2.0.
- *
- * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
- */
-
-package io.vertx.core.eventbus.impl;
-
-import java.util.List;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * @author <a href="http://tfox.org">Tim Fox</a>
- */
-public class Handlers {
-
- private final AtomicInteger pos = new AtomicInteger(0);
- public final List<HandlerHolder> list = new CopyOnWriteArrayList<>();
-
- public HandlerHolder choose() {
- while (true) {
- int size = list.size();
- if (size == 0) {
- return null;
- }
- int p = pos.getAndIncrement();
- if (p >= size - 1) {
- pos.set(0);
- }
- try {
- return list.get(p);
- } catch (IndexOutOfBoundsException e) {
- // Can happen
- pos.set(0);
- }
- }
- }
-}
-
diff --git a/src/main/java/io/vertx/core/eventbus/impl/clustered/ClusteredEventBus.java b/src/main/java/io/vertx/core/eventbus/impl/clustered/ClusteredEventBus.java
--- a/src/main/java/io/vertx/core/eventbus/impl/clustered/ClusteredEventBus.java
+++ b/src/main/java/io/vertx/core/eventbus/impl/clustered/ClusteredEventBus.java
@@ -215,7 +215,7 @@ protected <T> void addRegistration(boolean newAddress, String address,
}
@Override
- protected <T> void removeRegistration(HandlerHolder lastHolder, String address,
+ protected <T> void removeRegistration(HandlerHolder<T> lastHolder, String address,
Handler<AsyncResult<Void>> completionHandler) {
if (lastHolder != null && subs != null && !lastHolder.isLocalOnly()) {
ownSubs.remove(address);
diff --git a/src/main/java/io/vertx/core/impl/utils/ConcurrentCyclicSequence.java b/src/main/java/io/vertx/core/impl/utils/ConcurrentCyclicSequence.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/impl/utils/ConcurrentCyclicSequence.java
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2011-2018 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+
+package io.vertx.core.impl.utils;
+
+import java.util.*;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * A concurrent cyclic sequence of elements that can be used for round robin.
+ * <p/>
+ * The sequence is immutable and modifications are done with copy-on-write using
+ * {@link #add(Object)} and {@link #remove(Object)} to return a modified copy of the current instance.
+ * <p/>
+ * The internal counter uses a volatile index, so it can be incremented concurrently by several
+ * threads without locking.
+ *
+ * @author <a href="http://tfox.org">Tim Fox</a>
+ * @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
+ */
+public class ConcurrentCyclicSequence<T> implements Iterable<T>, Iterator<T> {
+
+ private static final Object[] EMPTY_ARRAY = new Object[0];
+
+ private final AtomicInteger pos;
+ private final Object[] elements;
+
+ /**
+ * Create a new empty sequence.
+ */
+ public ConcurrentCyclicSequence() {
+ this(0, EMPTY_ARRAY);
+ }
+
+ /**
+ * Create a new empty sequence.
+ */
+ @SafeVarargs
+ public ConcurrentCyclicSequence(T... elements) {
+ this(0, Arrays.copyOf(elements.clone(), elements.length, Object[].class));
+ }
+
+ private ConcurrentCyclicSequence(int pos, Object[] elements) {
+ this.pos = new AtomicInteger(pos);
+ this.elements = elements;
+ }
+
+ /**
+ * @return the current index
+ */
+ public int index() {
+ return elements.length > 0 ? pos.get() % elements.length : 0;
+ }
+
+ /**
+ * @return the first element or {@code null} when the sequence is empty
+ */
+ @SuppressWarnings("unchecked")
+ public T first() {
+ return (T) (elements.length > 0 ? elements[0] : null);
+ }
+
+ /**
+ * Copy the current sequence, add {@code element} at the tail of this sequence and returns it.
+ * @param element the element to add
+ * @return the resulting sequence
+ */
+ public ConcurrentCyclicSequence<T> add(T element) {
+ int len = elements.length;
+ Object[] copy = Arrays.copyOf(elements, len + 1);
+ copy[len] = element;
+ return new ConcurrentCyclicSequence<>(pos.get(), copy);
+ }
+
+ /**
+ * Remove the first occurrence of {@code element} in this sequence and returns it.
+ * <p/>
+ * If the sequence does not contains {@code element}, this instance is returned instead.
+ *
+ * @param element the element to remove
+ * @return the resulting sequence
+ */
+ public ConcurrentCyclicSequence<T> remove(T element) {
+ int len = elements.length;
+ for (int i = 0;i < len;i++) {
+ if (Objects.equals(element, elements[i])) {
+ if (len > 1) {
+ Object[] copy = new Object[len - 1];
+ System.arraycopy(elements,0, copy, 0, i);
+ System.arraycopy(elements, i + 1, copy, i, len - i - 1);
+ return new ConcurrentCyclicSequence<>(pos.get() % copy.length, copy);
+ } else {
+ return new ConcurrentCyclicSequence<>();
+ }
+ }
+ }
+ return this;
+ }
+
+ /**
+ * @return always {@code true}
+ */
+ @Override
+ public boolean hasNext() {
+ return true;
+ }
+
+ /**
+ * @return the next element in the sequence
+ */
+ @SuppressWarnings("unchecked")
+ @Override
+ public T next() {
+ int len = elements.length;
+ switch (len) {
+ case 0:
+ return null;
+ case 1:
+ return (T) elements[0];
+ default:
+ int p;
+ p = pos.getAndIncrement();
+ return (T) elements[Math.abs(p % len)];
+ }
+ }
+
+ /**
+ * @return the size of this sequence
+ */
+ public int size() {
+ return elements.length;
+ }
+
+ /**
+ * @return an iterator starting at the first element of the sequence, the iterator will not throw {@link ConcurrentModificationException}
+ */
+ @Override
+ @SuppressWarnings("unchecked")
+ public Iterator<T> iterator() {
+ return Arrays.asList((T[]) elements).iterator();
+ }
+}
+
| diff --git a/src/test/benchmarks/io/vertx/benchmarks/ConcurrentCyclicSequenceBenchmark.java b/src/test/benchmarks/io/vertx/benchmarks/ConcurrentCyclicSequenceBenchmark.java
new file mode 100644
--- /dev/null
+++ b/src/test/benchmarks/io/vertx/benchmarks/ConcurrentCyclicSequenceBenchmark.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2011-2018 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+package io.vertx.benchmarks;
+
+import io.vertx.core.impl.utils.ConcurrentCyclicSequence;
+import org.openjdk.jmh.annotations.*;
+
+import java.util.stream.IntStream;
+
+@State(Scope.Benchmark)
+@Threads(8)
+public class ConcurrentCyclicSequenceBenchmark extends BenchmarkBase {
+
+ private ConcurrentCyclicSequence<String> seq1;
+ private ConcurrentCyclicSequence<String> seq2;
+ private ConcurrentCyclicSequence<String> seq4;
+ private ConcurrentCyclicSequence<String> seq8;
+ private ConcurrentCyclicSequence<String> seq16;
+
+ private static ConcurrentCyclicSequence<String> gen(int size) {
+ return new ConcurrentCyclicSequence<>(IntStream.range(0, size + 1).mapToObj(i -> "" + i).toArray(String[]::new));
+ }
+
+ @Setup
+ public void setup() {
+ seq1 = gen(1);
+ seq2 = gen(2);
+ seq4 = gen(4);
+ seq8 = gen(8);
+ seq16 = gen(16);
+ }
+
+ @Benchmark
+ public String size1() {
+ return seq1.next();
+ }
+
+ @Benchmark
+ public String size2() {
+ return seq2.next();
+ }
+
+ @Benchmark
+ public String size4() {
+ return seq4.next();
+ }
+
+ @Benchmark
+ public String size8() {
+ return seq8.next();
+ }
+
+ @Benchmark
+ public String size16() {
+ return seq16.next();
+ }
+}
diff --git a/src/test/java/io/vertx/test/core/EventBusRegistrationRaceTest.java b/src/test/java/io/vertx/test/core/EventBusRegistrationRaceTest.java
new file mode 100644
--- /dev/null
+++ b/src/test/java/io/vertx/test/core/EventBusRegistrationRaceTest.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2011-2018 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+package io.vertx.test.core;
+
+import io.vertx.core.eventbus.EventBus;
+import io.vertx.core.eventbus.MessageConsumer;
+import org.junit.Test;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * This test checks race conditions in EventBus registration and unregistration.
+ *
+ * We use two separate threads to register and unregister consumers on the same address.
+ *
+ * Thread are coordinated by an volatile sequence and we check at each iteration that Thread-A
+ * registration / un-registration of a consumer does not prevent the Thread-B registration
+ * to receive the message it sends to its consumer, since both threads will access the same
+ * handlers list concurrently.
+ */
+public class EventBusRegistrationRaceTest extends VertxTestBase {
+
+ private static final int NUM_MSG = 300_000;
+ private static String TEST_ADDR = "the-addr";
+
+ @Test
+ public void theTest() throws Exception {
+ AtomicInteger seq = new AtomicInteger();
+ Thread threadA = new Thread(() -> threadA(seq));
+ threadA.setName("Thread-A");
+ Thread threadB = new Thread(() -> threadB(seq));
+ threadB.setName("Thread-B");
+ threadA.start();
+ threadB.start();
+ threadA.join(20 * 1000);
+ threadB.join(20 * 1000);
+ }
+
+ private void threadA(AtomicInteger seq) {
+ EventBus eventBus = vertx.eventBus();
+ int count = 0;
+ while (count < NUM_MSG) {
+ while (count > seq.get()) {
+ Thread.yield();
+ }
+ count++;
+ MessageConsumer<Object> consumer = eventBus.consumer(TEST_ADDR, msg -> { });
+ consumer.unregister();
+ }
+ }
+
+ private void threadB(AtomicInteger seq) {
+ EventBus eventBus = vertx.eventBus();
+ MessageConsumer<Object> consumer = null;
+ int count = 0;
+ while (count < NUM_MSG) {
+ while (count > seq.get()) {
+ Thread.yield();
+ }
+ count++;
+ if (consumer != null) {
+ consumer.unregister();
+ }
+ consumer = eventBus.consumer(TEST_ADDR);
+ consumer.handler(event -> {
+ // Missing a message prevents the progression of the test
+ seq.incrementAndGet();
+ });
+ // We use publish because send might deliver the message to a Thread-A's consumer
+ // so we are sure we always get a message
+ eventBus.publish(TEST_ADDR, count);
+ }
+ }
+}
diff --git a/src/test/java/io/vertx/test/core/utils/ConcurrentCyclicSequenceTest.java b/src/test/java/io/vertx/test/core/utils/ConcurrentCyclicSequenceTest.java
new file mode 100644
--- /dev/null
+++ b/src/test/java/io/vertx/test/core/utils/ConcurrentCyclicSequenceTest.java
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2011-2018 Contributors to the Eclipse Foundation
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License 2.0 which is available at
+ * http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
+ * which is available at https://www.apache.org/licenses/LICENSE-2.0.
+ *
+ * SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
+ */
+package io.vertx.test.core.utils;
+
+import io.vertx.core.impl.utils.ConcurrentCyclicSequence;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class ConcurrentCyclicSequenceTest {
+
+ @Test
+ public void testEmpty() {
+ ConcurrentCyclicSequence<String> empty = new ConcurrentCyclicSequence<>();
+ for (int j = 0;j < 1;j++) {
+ for (int i = 0;i < 3;i++) {
+ assertEquals(0, empty.size());
+ assertEquals(0, empty.index());
+ assertEquals(null, empty.first());
+ empty.next();
+ }
+ empty = empty.remove("does-not-exist");
+ }
+ }
+
+ @Test
+ public void testAdd() {
+ ConcurrentCyclicSequence<String> seq = new ConcurrentCyclicSequence<String>().add("s1");
+ assertEquals(Collections.singletonList("s1"), toList(seq));
+ assertEquals(Arrays.asList("s1", "s2"), toList(seq.add("s2")));
+ assertEquals(Collections.singletonList("s1"), toList(seq));
+ }
+
+ @Test
+ public void testRemove() {
+ ConcurrentCyclicSequence<String> seq = new ConcurrentCyclicSequence<String>().add("s1").add("s2").add("s1").add("s2");
+ assertEquals(Arrays.asList("s1", "s2", "s1", "s2"), toList(seq));
+ assertEquals(Arrays.asList("s1", "s1", "s2"), toList(seq.remove("s2")));
+ assertEquals(Arrays.asList("s1", "s1"), toList(seq.remove("s2").remove("s2")));
+ assertEquals(Arrays.asList("s2", "s1", "s2"), toList(seq.remove("s1")));
+ assertEquals(Arrays.asList("s2", "s2"), toList(seq.remove("s1").remove("s1")));
+ assertEquals(Arrays.asList("s1", "s2"), toList(seq.remove("s1").remove("s2")));
+ assertEquals(Collections.emptyList(), toList(seq.remove("s1").remove("s2").remove("s1").remove("s2")));
+ assertEquals(Arrays.asList("s1", "s2", "s1", "s2"), toList(seq));
+ }
+
+ @Test
+ public void testNullElement() {
+ ConcurrentCyclicSequence<String> seq = new ConcurrentCyclicSequence<>("s1", null, "s2", null);
+ assertEquals(Arrays.asList("s1", null, "s2", null), toList(seq));
+ assertEquals(Arrays.asList("s1", "s2", null), toList(seq.remove(null)));
+ }
+
+ @Test
+ public void testRoundRobin() throws Exception {
+ int iter = 1_000_000;
+ int range = 10;
+ ConcurrentCyclicSequence<AtomicInteger> tmp = new ConcurrentCyclicSequence<>();
+ for (int i = 0; i < range; i++) {
+ tmp = tmp.add(new AtomicInteger());
+ }
+ ConcurrentCyclicSequence<AtomicInteger> handlers = tmp;
+ AtomicBoolean failed = new AtomicBoolean();
+ int numThreads = 10;
+ Thread[] threads = new Thread[numThreads];
+ for (int i = 0;i < numThreads;i++) {
+ threads[i] = new Thread(() -> {
+ try {
+ for (int j = 0;j < iter;j++) {
+ handlers.next().incrementAndGet();
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ failed.set(true);
+ }
+ });
+ }
+ for (Thread thread : threads) {
+ thread.start();
+ }
+ for (Thread thread : threads) {
+ thread.join();
+ }
+ for (AtomicInteger i : handlers) {
+ assertEquals(iter, i.get());
+ }
+ assertFalse(failed.get());
+ int pos = handlers.index();
+ assertTrue("Incorrect pos value " + pos, pos <= range);
+ }
+
+ private static <T> List<T> toList(ConcurrentCyclicSequence<T> seq) {
+ ArrayList<T> ret = new ArrayList<>();
+ for (T elt : seq) {
+ ret.add(elt);
+ }
+ return ret;
+ }
+}
| Possible concurrency issue in Event Bus
It looks like, there is possible concurrency issue in Event Bus, when we're adding and removing local registrations.
**addLocalRegistration**: https://github.com/eclipse/vert.x/blob/5338f6353a49a4e7e9a6350e4878bcc5f11028a0/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java#L232
**removeLocalRegistration**: https://github.com/eclipse/vert.x/blob/5338f6353a49a4e7e9a6350e4878bcc5f11028a0/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java#L277
There is very very little possibility (but non 0), that when we're removing last handler in one thread and adding the new one for the same address in another, we can lost handlers list for that address.
Is this real issue or not, because of it's very little possibility, or I'm missing some additional synchronization?
Topic in user group: https://groups.google.com/forum/#!topic/vertx/T3F3CuG1n3w
Possible concurrency issue in Event Bus
It looks like, there is possible concurrency issue in Event Bus, when we're adding and removing local registrations.
**addLocalRegistration**: https://github.com/eclipse/vert.x/blob/5338f6353a49a4e7e9a6350e4878bcc5f11028a0/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java#L232
**removeLocalRegistration**: https://github.com/eclipse/vert.x/blob/5338f6353a49a4e7e9a6350e4878bcc5f11028a0/src/main/java/io/vertx/core/eventbus/impl/EventBusImpl.java#L277
There is very very little possibility (but non 0), that when we're removing last handler in one thread and adding the new one for the same address in another, we can lost handlers list for that address.
Is this real issue or not, because of it's very little possibility, or I'm missing some additional synchronization?
Topic in user group: https://groups.google.com/forum/#!topic/vertx/T3F3CuG1n3w
| 2018-04-16T09:06:31Z | 3.5 |
|
eclipse-vertx/vert.x | 2,366 | eclipse-vertx__vert.x-2366 | [
"2365"
] | 354abf0332a5218959c32f237b622bdfcd26faae | diff --git a/src/main/java/io/vertx/core/http/impl/Http1xServerConnection.java b/src/main/java/io/vertx/core/http/impl/Http1xServerConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http1xServerConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http1xServerConnection.java
@@ -81,7 +81,6 @@ public class Http1xServerConnection extends Http1xConnectionBase implements Http
private static final int CHANNEL_PAUSE_QUEUE_SIZE = 5;
- private final Deque<Object> pending = new ArrayDeque<>(8);
private final String serverOrigin;
private final SSLHelper sslHelper;
final HttpServerOptions options;
@@ -94,12 +93,15 @@ public class Http1xServerConnection extends Http1xConnectionBase implements Http
private HttpServerRequestImpl currentRequest;
private HttpServerResponseImpl pendingResponse;
private ServerWebSocketImpl ws;
- private boolean channelPaused;
- private boolean paused;
- private boolean sentCheck;
private long bytesRead;
private long bytesWritten;
+
+ // (pending == true) <=> (paused && pending.size() > 0)
+ private final Deque<Object> pending = new ArrayDeque<>(8);
+ private boolean paused;
+ private boolean sentCheck;
private boolean queueing;
+ private boolean channelPaused;
public Http1xServerConnection(VertxInternal vertx,
SSLHelper sslHelper,
@@ -130,30 +132,40 @@ synchronized void pause() {
synchronized void resume() {
if (paused) {
paused = false;
- checkNextTick();
+ if (pending.isEmpty()) {
+ unsetQueueing();
+ } else {
+ checkNextTick();
+ }
}
}
synchronized void handleMessage(Object msg) {
- if (queueing) {
+ if (queueing || !processMessage(msg)) {
enqueue(msg);
- } else {
- if (processMessage(msg)) {
- checkNextTick();
- } else {
- enqueue(msg);
- }
+ }
+ }
+
+ /**
+ * Set in non queueing mode, i.e (pending.size() == 0 && !paused)
+ */
+ private void unsetQueueing() {
+ queueing = false;
+ if (channelPaused) {
+ // Resume the actual channel
+ channelPaused = false;
+ doResume();
}
}
private void enqueue(Object msg) {
- //We queue requests if paused or a request is in progress to prevent responses being written in the wrong order
+ // We queue requests if paused or a request is in progress to prevent responses being written in the wrong order
queueing = true;
pending.add(msg);
if (pending.size() == CHANNEL_PAUSE_QUEUE_SIZE) {
- //We pause the channel too, to prevent the queue growing too large, but we don't do this
- //until the queue reaches a certain size, to avoid pausing it too often
- super.doPause();
+ // We pause the channel too, to prevent the queue growing too large, but we don't do this
+ // until the queue reaches a certain size, to avoid pausing it too often
+ doPause();
channelPaused = true;
}
}
@@ -475,30 +487,23 @@ private void handleOther(Object msg) {
private void checkNextTick() {
// Check if there are more pending messages in the queue that can be processed next time around
- if (!paused && !sentCheck) {
+ if (!paused && queueing && !sentCheck) {
sentCheck = true;
- vertx.runOnContext(v -> {
+ context.runOnContext(v -> {
synchronized (Http1xServerConnection.this) {
sentCheck = false;
if (!paused) {
+ // The only place we poll the pending queue, so we are sure that pending.size() > 0
+ // since we got there because queueing was true
Object msg = pending.poll();
- if (msg != null) {
- if (processMessage(msg)) {
- checkNextTick();
+ if (processMessage(msg)) {
+ if (pending.isEmpty()) {
+ unsetQueueing();
} else {
- pending.addFirst(msg);
- }
- }
- if (pending.isEmpty()) {
- queueing = false;
- if (channelPaused) {
- // Resume the actual channel
- channelPaused = false;
- Http1xServerConnection.super.doResume();
+ checkNextTick();
}
} else {
- queueing = true;
- checkNextTick();
+ pending.addFirst(msg);
}
}
}
| diff --git a/src/test/java/io/vertx/test/core/Http1xTest.java b/src/test/java/io/vertx/test/core/Http1xTest.java
--- a/src/test/java/io/vertx/test/core/Http1xTest.java
+++ b/src/test/java/io/vertx/test/core/Http1xTest.java
@@ -1233,6 +1233,33 @@ public void testPipeliningLimit() throws Exception {
await();
}
+ @Repeat(times = 10)
+ @Test
+ public void testCloseServerConnectionWithPendingMessages() throws Exception {
+ int n = 5;
+ server.requestHandler(req -> {
+ vertx.setTimer(100, id -> {
+ req.response().close();
+ });
+ });
+ startServer();
+ client.close();
+ client = vertx.createHttpClient(new HttpClientOptions().setMaxPoolSize(n).setPipelining(true));
+ AtomicBoolean completed = new AtomicBoolean();
+ for (int i = 0;i < n * 2;i++) {
+ client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ fail();
+ }).connectionHandler(conn -> {
+ conn.closeHandler(v -> {
+ if (completed.compareAndSet(false, true)) {
+ testComplete();
+ }
+ });
+ }).end();
+ }
+ await();
+ }
+
@Test
public void testPipeliningFailure() throws Exception {
int n = 5;
| HTTP server connection keeps processing pending pipelined requests when connection closed
Currently the HTTP server keeps processing pipelined HTTP messages when the connection is closed during the processing of a request and there are pending HTTP request in the server pipeline (i.e the response has not been sent). This is due to a bug that when a message has not been processed during a check, the connection redo the check and it must not do it.
Changes: we only trigger an message processing check from the message check itself when a message has been effectively processed.
| 2018-04-04T04:46:16Z | 3.5 |
|
eclipse-vertx/vert.x | 2,309 | eclipse-vertx__vert.x-2309 | [
"2263"
] | 37d66d54317958c6c0b59436410f336f09a935fd | diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java b/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
@@ -11,8 +11,23 @@
package io.vertx.core.http.impl;
-import io.vertx.core.*;
-import io.vertx.core.http.*;
+import io.vertx.core.Closeable;
+import io.vertx.core.Context;
+import io.vertx.core.Future;
+import io.vertx.core.Handler;
+import io.vertx.core.MultiMap;
+import io.vertx.core.VertxException;
+import io.vertx.core.http.HttpClient;
+import io.vertx.core.http.HttpClientOptions;
+import io.vertx.core.http.HttpClientRequest;
+import io.vertx.core.http.HttpClientResponse;
+import io.vertx.core.http.HttpConnection;
+import io.vertx.core.http.HttpHeaders;
+import io.vertx.core.http.HttpMethod;
+import io.vertx.core.http.HttpVersion;
+import io.vertx.core.http.RequestOptions;
+import io.vertx.core.http.WebSocket;
+import io.vertx.core.http.WebsocketVersion;
import io.vertx.core.impl.ContextImpl;
import io.vertx.core.impl.ContextInternal;
import io.vertx.core.impl.VertxInternal;
@@ -75,8 +90,9 @@ public class HttpClientImpl implements HttpClient, MetricsProvider {
return null;
}
String requestURI = uri.getPath();
- if (uri.getQuery() != null) {
- requestURI += "?" + uri.getQuery();
+ String query = uri.getQuery();
+ if (query != null) {
+ requestURI += "?" + query;
}
return Future.succeededFuture(createRequest(m, uri.getHost(), port, ssl, requestURI, null));
}
diff --git a/src/main/java/io/vertx/core/http/impl/HttpUtils.java b/src/main/java/io/vertx/core/http/impl/HttpUtils.java
--- a/src/main/java/io/vertx/core/http/impl/HttpUtils.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpUtils.java
@@ -34,12 +34,7 @@
import java.util.List;
import java.util.Map;
-import static io.vertx.core.http.Http2Settings.DEFAULT_ENABLE_PUSH;
-import static io.vertx.core.http.Http2Settings.DEFAULT_HEADER_TABLE_SIZE;
-import static io.vertx.core.http.Http2Settings.DEFAULT_INITIAL_WINDOW_SIZE;
-import static io.vertx.core.http.Http2Settings.DEFAULT_MAX_CONCURRENT_STREAMS;
-import static io.vertx.core.http.Http2Settings.DEFAULT_MAX_FRAME_SIZE;
-import static io.vertx.core.http.Http2Settings.DEFAULT_MAX_HEADER_LIST_SIZE;
+import static io.vertx.core.http.Http2Settings.*;
/**
* Various http utils.
@@ -175,19 +170,19 @@ public static URI resolveURIReference(URI base, String ref) throws URISyntaxExce
scheme = _ref.getScheme();
authority = _ref.getAuthority();
path = removeDots(_ref.getPath());
- query = _ref.getQuery();
+ query = _ref.getRawQuery();
} else {
if (_ref.getAuthority() != null) {
authority = _ref.getAuthority();
path = _ref.getPath();
- query = _ref.getQuery();
+ query = _ref.getRawQuery();
} else {
if (_ref.getPath().length() == 0) {
path = base.getPath();
- if (_ref.getQuery() != null) {
- query = _ref.getQuery();
+ if (_ref.getRawQuery() != null) {
+ query = _ref.getRawQuery();
} else {
- query = base.getQuery();
+ query = base.getRawQuery();
}
} else {
if (_ref.getPath().startsWith("/")) {
@@ -208,7 +203,7 @@ public static URI resolveURIReference(URI base, String ref) throws URISyntaxExce
}
path = removeDots(mergedPath);
}
- query = _ref.getQuery();
+ query = _ref.getRawQuery();
}
authority = base.getAuthority();
}
| diff --git a/src/test/java/io/vertx/test/core/HttpTest.java b/src/test/java/io/vertx/test/core/HttpTest.java
--- a/src/test/java/io/vertx/test/core/HttpTest.java
+++ b/src/test/java/io/vertx/test/core/HttpTest.java
@@ -644,7 +644,7 @@ private void testURIAndPath(String uri, String path) {
@Test
public void testParamUmlauteDecoding() throws UnsupportedEncodingException {
- testParamDecoding("äüö");
+ testParamDecoding("\u00e4\u00fc\u00f6");
}
@Test
@@ -669,7 +669,7 @@ public void testParamNormalDecoding() throws UnsupportedEncodingException {
@Test
public void testParamAltogetherDecoding() throws UnsupportedEncodingException {
- testParamDecoding("äüö+% hello");
+ testParamDecoding("\u00e4\u00fc\u00f6+% hello");
}
private void testParamDecoding(String value) throws UnsupportedEncodingException {
@@ -3813,6 +3813,46 @@ class MockResp implements HttpClientResponse {
}
}
+ @Test
+ public void testFollowRedirectEncodedParams() throws Exception {
+ String value1 = "\ud55c\uae00", value2 = "A B+C", value3 = "123 \u20ac";
+ server.requestHandler(req -> {
+ switch (req.path()) {
+ case "/first/call/from/client":
+ StringBuilder location = null;
+ try {
+ location = new StringBuilder()
+ .append(req.scheme()).append("://").append(DEFAULT_HTTP_HOST).append(':').append(DEFAULT_HTTP_PORT)
+ .append("/redirected/from/client?")
+ .append("encoded1=").append(URLEncoder.encode(value1, "UTF-8")).append('&')
+ .append("encoded2=").append(URLEncoder.encode(value2, "UTF-8")).append('&')
+ .append("encoded3=").append(URLEncoder.encode(value3, "UTF-8"));
+ } catch (UnsupportedEncodingException e) {
+ fail(e);
+ }
+ req.response()
+ .setStatusCode(302)
+ .putHeader("location", location.toString())
+ .end();
+ break;
+ case "/redirected/from/client":
+ assertEquals(value1, req.params().get("encoded1"));
+ assertEquals(value2, req.params().get("encoded2"));
+ assertEquals(value3, req.params().get("encoded3"));
+ req.response().end();
+ break;
+ default:
+ fail("Unknown path: " + req.path());
+ }
+ });
+ startServer();
+ client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/first/call/from/client", resp -> {
+ assertEquals(200, resp.statusCode());
+ testComplete();
+ }).setFollowRedirects(true).end();
+ await();
+ }
+
@Test
public void testServerResponseCloseHandlerNotHoldingLock() throws Exception {
server.requestHandler(req -> {
| HttpClientImple DEFAULT_HANDLER uses uri.getQuery instead of getRawQuery when extract redirection uri from response location header
Hi,
I found that Vertx HttpClient DEFAULT_HANDLER uses java.net.URI.getQuery instead of getRawQuery when extracts a redirection uri from response location header.
Even Vertx HttpUitls.resolveURIReference uses java.net.URI.getQuery and then redirects it with decoded query.
java.net.URI.getQuery returns decoded query according to documents.
```
String java.net.URI.getQuery()
Returns the decoded query component of this URI.
The string returned by this method is equal to that returned by the getRawQuery method except that all sequences of escaped octets are decoded.
```
In my case, there are many cases that location header value includes URIEncoded query because my service covers not only english alphabet but also ascii unsafe language.
Redirection target server still needs URIEncoded query for ascii unsafe language.
I've made a patch for that referring to Apache httpClient where uses getRawQuery and then applied it with HttpClient.setRedirectinHandler.
I guess this issue not only for mine.
I think java.net.URI.getRawQuery is proper rather than getQuery.
Do you have any reason for using getQuery I'm not aware of?
| I don't think there is a specific reason to getQuery. If you provide a
patch with a test which fails before the fix and passes after, it would
most certainly be merged.
2018-01-08 4:56 GMT+01:00 Jaehee Ahn <notifications@github.com>:
> Hi,
>
> I found that Vertx HttpClient DEFAULT_HANDLER uses java.net.URI.getQuery
> instead of getRawQuery when extracts a redirection uri from response
> location header.
>
> Even Vertx HttpUitls.resolveURIReference uses java.net.URI.getQuery and
> then redirects it with decoded query.
>
> java.net.URI.getQuery returns decoded query according to documents.
>
> String java.net.URI.getQuery()
>
> Returns the decoded query component of this URI.
>
> The string returned by this method is equal to that returned by the getRawQuery method except that all sequences of escaped octets are decoded.
>
> In my case, there are many cases that location header value includes
> URIEncoded query because my service covers not only english alphabet but
> also ascii unsafe language.
>
> Redirection target server still needs URIEncoded query for ascii unsafe
> language.
>
> I've made a patch for that referring to Apache httpClient where uses
> getRawQuery and then applied it with HttpClient.setRedirectinHandler.
>
> I guess this issue not only for mine.
> I think java.net.URI.getRawQuery is proper rather than getQuery.
>
> or Do you have any reason for using getQuery?
>
> —
> You are receiving this because you are subscribed to this thread.
> Reply to this email directly, view it on GitHub
> <https://github.com/eclipse/vert.x/issues/2263>, or mute the thread
> <https://github.com/notifications/unsubscribe-auth/ABbltvsUtqaJLc05jalK8P6G5yQqqozyks5tIZHlgaJpZM4RV7Hx>
> .
>
can you give an example where this is raising an issue ?
This is a sample code for raising this issue.
You can just run main and then the console output tells what happens in each redirection handler, one is DEFAULT_HANDLER and the other is my patch version handler.
You just change QUERY_WITH_ASCII_UNSAFE variable assignment code between 2 cases
CASE 1. query is "한글", korean languae
CASE 2. query is "A B+C", which has '+'
The console output tells the results about each case
```
DEFAULT_HANDLER
ORIGINAL TEXT [한글] TARGET SERVER RECEIVED TEXT [■ユワ↑ᄌタ]
ORIGINAL TEXT [A B+C] TARGET SERVER RECEIVED TEXT [A B C]
PATCH_HANDLER
ORIGINAL TEXT [한글] TARGET SERVER RECEIVED TEXT [한글]
ORIGINAL TEXT [A B+C] TARGET SERVER RECEIVED TEXT [A B+C]
```
```
#DELETE CODE
```
I'm sorry for too much long comment. if it's not proper, please let me know. I will attach the file instead.
--------------------- UPDATED
I just attached a small reproducer to the new comment
https://github.com/eclipse/vert.x/issues/2263#issuecomment-356842787
can you jsut provide a small reproducer of the issue ?
After below the server started, call
```java
vertx.createHttpClient().getAbs("http://localhost:7002/first/call/from/client", resp->{}).setFollowRedirects(true).end();
```
and then check what the target receives for query param.
### server
```java
Router router = Router.router(vertx);
//------------------------------------------
// path for sending response with location to client
//------------------------------------------
router.route("/first/call/from/client").handler(rc ->{
rc.request().bodyHandler(buf -> {
String encoded1 = "한글";
String encoded2 = "A B+C";
try {
encoded1 = URLEncoder.encode(encoded1, "UTF-8");
encoded2 = URLEncoder.encode(encoded2, "UTF-8");
} catch (UnsupportedEncodingException e) {
e.printStackTrace();
}
rc.response()
.putHeader("location", "htttp://localhost:7002/redirected/from/client?encoded1=" + encoded1 + "&encoded2=" + encoded2)
.setStatusCode(302)
.end();
});
});
//------------------------------------------
// path for redirection target
//------------------------------------------
router.route("/redirected/from/client").handler(rc ->{
String encoded1 = rc.request().params().get("encoded1");
String encoded2 = rc.request().params().get("encoded2");
Buffer body = Buffer.buffer();
// what the redirection target have received compared with original query of location header value
body.appendString("TARGET SERVER RECEIVED TEXT [" + encoded1 + "] [" + encoded2 + "]");
rc.response()
.putHeader("Content-Length", String.valueOf(body.length()))
.write(body.toString())
.end();
});
//------------------------------------------
// start server with port 7002
//------------------------------------------
Future<Void> started = Future.<Void>future();
vertx.createHttpServer()
.requestHandler(router::accept)
.listen(7002);
``` | 2018-01-30T16:26:08Z | 3.5 |
eclipse-vertx/vert.x | 2,354 | eclipse-vertx__vert.x-2354 | [
"2353"
] | 087d9f48420ff657ca67cd6a36ce76c845abcac2 | diff --git a/src/main/java/io/vertx/core/buffer/Buffer.java b/src/main/java/io/vertx/core/buffer/Buffer.java
--- a/src/main/java/io/vertx/core/buffer/Buffer.java
+++ b/src/main/java/io/vertx/core/buffer/Buffer.java
@@ -19,6 +19,7 @@
import io.vertx.core.ServiceHelper;
import io.vertx.core.json.JsonArray;
import io.vertx.core.json.JsonObject;
+import io.vertx.core.shareddata.Shareable;
import io.vertx.core.shareddata.impl.ClusterSerializable;
import io.vertx.core.spi.BufferFactory;
@@ -36,7 +37,7 @@
* @author <a href="http://tfox.org">Tim Fox</a>
*/
@VertxGen
-public interface Buffer extends ClusterSerializable {
+public interface Buffer extends ClusterSerializable, Shareable {
/**
* Create a new, empty buffer.
diff --git a/src/main/java/io/vertx/core/json/JsonArray.java b/src/main/java/io/vertx/core/json/JsonArray.java
--- a/src/main/java/io/vertx/core/json/JsonArray.java
+++ b/src/main/java/io/vertx/core/json/JsonArray.java
@@ -12,6 +12,7 @@
package io.vertx.core.json;
import io.vertx.core.buffer.Buffer;
+import io.vertx.core.shareddata.Shareable;
import io.vertx.core.shareddata.impl.ClusterSerializable;
import java.time.Instant;
@@ -33,7 +34,7 @@
*
* @author <a href="http://tfox.org">Tim Fox</a>
*/
-public class JsonArray implements Iterable<Object>, ClusterSerializable {
+public class JsonArray implements Iterable<Object>, ClusterSerializable, Shareable {
private List<Object> list;
@@ -571,6 +572,7 @@ public String encodePrettily() {
*
* @return a copy
*/
+ @Override
public JsonArray copy() {
List<Object> copiedList = new ArrayList<>(list.size());
for (Object val: list) {
diff --git a/src/main/java/io/vertx/core/json/JsonObject.java b/src/main/java/io/vertx/core/json/JsonObject.java
--- a/src/main/java/io/vertx/core/json/JsonObject.java
+++ b/src/main/java/io/vertx/core/json/JsonObject.java
@@ -12,6 +12,7 @@
import io.vertx.codegen.annotations.Fluent;
import io.vertx.core.buffer.Buffer;
+import io.vertx.core.shareddata.Shareable;
import io.vertx.core.shareddata.impl.ClusterSerializable;
import java.nio.charset.StandardCharsets;
@@ -34,7 +35,7 @@
*
* @author <a href="http://tfox.org">Tim Fox</a>
*/
-public class JsonObject implements Iterable<Map.Entry<String, Object>>, ClusterSerializable {
+public class JsonObject implements Iterable<Map.Entry<String, Object>>, ClusterSerializable, Shareable {
private Map<String, Object> map;
@@ -784,6 +785,7 @@ public Buffer toBuffer() {
*
* @return a copy of the object
*/
+ @Override
public JsonObject copy() {
Map<String, Object> copiedMap;
if (map instanceof LinkedHashMap) {
diff --git a/src/main/java/io/vertx/core/shareddata/LocalMap.java b/src/main/java/io/vertx/core/shareddata/LocalMap.java
--- a/src/main/java/io/vertx/core/shareddata/LocalMap.java
+++ b/src/main/java/io/vertx/core/shareddata/LocalMap.java
@@ -24,13 +24,12 @@
/**
* Local maps can be used to share data safely in a single Vert.x instance.
* <p>
- * The map only allows immutable keys and values in the map, OR certain mutable objects such as {@link io.vertx.core.buffer.Buffer}
- * instances which will be copied when they are added to the map.
+ * By default the map allows immutable keys and values.
+ * Custom keys and values should implement {@link Shareable} interface. The map returns their copies.
* <p>
* This ensures there is no shared access to mutable state from different threads (e.g. different event loops) in the
* Vert.x instance, and means you don't have to protect access to that state using synchronization or locks.
* <p>
- * Your own objects can be marked as immutable by implementing the {@link Shareable} interface.
*
* Since the version 3.4, this class extends the {@link Map} interface. However some methods are only accessible in Java.
*
diff --git a/src/main/java/io/vertx/core/shareddata/Shareable.java b/src/main/java/io/vertx/core/shareddata/Shareable.java
--- a/src/main/java/io/vertx/core/shareddata/Shareable.java
+++ b/src/main/java/io/vertx/core/shareddata/Shareable.java
@@ -12,18 +12,28 @@
package io.vertx.core.shareddata;
/**
- * A marker interface which allows you to put arbitrary objects into a {@link io.vertx.core.shareddata.LocalMap}.
+ * An interface which allows you to put arbitrary objects into a {@link io.vertx.core.shareddata.LocalMap}.
* <p>
- * Normally local maps only allow immutable objects or other copiable objects such as {@link io.vertx.core.buffer.Buffer}
- * instances in order to avoid shared access to mutable state.
+ * Normally local maps only allow immutable or copiable objects in order to avoid shared access to mutable state.
* <p>
* However if you have an object that you know is thread-safe you can mark it with this interface and then you
* will be able to add it to {@link io.vertx.core.shareddata.LocalMap} instances.
* <p>
+ * Mutable object that you want to store in a {@link io.vertx.core.shareddata.LocalMap}
+ * should override {@link Shareable#copy()} method.
+ * <p>
* Use this interface with caution.
* <p>
*
* @author <a href="http://tfox.org">Tim Fox</a>
*/
public interface Shareable {
+
+ /**
+ * Returns a copy of the object.
+ * Only mutable objects should provide a custom implementation of the method.
+ */
+ default Shareable copy() {
+ return this;
+ }
}
diff --git a/src/main/java/io/vertx/core/shareddata/impl/Checker.java b/src/main/java/io/vertx/core/shareddata/impl/Checker.java
--- a/src/main/java/io/vertx/core/shareddata/impl/Checker.java
+++ b/src/main/java/io/vertx/core/shareddata/impl/Checker.java
@@ -32,9 +32,6 @@ static void checkType(Object obj) {
obj instanceof Byte ||
obj instanceof Character ||
obj instanceof byte[] ||
- obj instanceof Buffer ||
- obj instanceof JsonObject ||
- obj instanceof JsonArray ||
obj instanceof Shareable) {
} else {
throw new IllegalArgumentException("Invalid type for shareddata data structure: " + obj.getClass().getName());
@@ -42,19 +39,14 @@ static void checkType(Object obj) {
}
static <T> T copyIfRequired(T obj) {
- if (obj instanceof JsonObject) {
- return (T)((JsonObject)obj).copy();
- } else if (obj instanceof JsonArray) {
- return (T) ((JsonArray) obj).copy();
- } else if (obj instanceof byte[]) {
+ if (obj instanceof byte[]) {
//Copy it
byte[] bytes = (byte[]) obj;
byte[] copy = new byte[bytes.length];
System.arraycopy(bytes, 0, copy, 0, bytes.length);
return (T) copy;
- } else if (obj instanceof Buffer) {
- //Copy it
- return (T) ((Buffer) obj).copy();
+ } else if (obj instanceof Shareable) {
+ return (T) ((Shareable) obj).copy();
} else {
return obj;
}
| diff --git a/src/test/java/io/vertx/test/core/LocalSharedDataTest.java b/src/test/java/io/vertx/test/core/LocalSharedDataTest.java
--- a/src/test/java/io/vertx/test/core/LocalSharedDataTest.java
+++ b/src/test/java/io/vertx/test/core/LocalSharedDataTest.java
@@ -15,11 +15,13 @@
import io.vertx.core.json.JsonArray;
import io.vertx.core.json.JsonObject;
import io.vertx.core.shareddata.LocalMap;
+import io.vertx.core.shareddata.Shareable;
import io.vertx.core.shareddata.SharedData;
import org.junit.Test;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.BiFunction;
import static io.vertx.test.core.TestUtils.*;
@@ -326,7 +328,58 @@ public void testValuesCopied() {
assertFalse(containsExact(values, json3));
}
+ @Test
+ public void testCopyOnGet() {
+ testMapOperationResult(LocalMap::get);
+ }
+
+ @Test
+ public void testCopyOnPutIfAbsent() {
+ testMapOperationResult((map, key) -> map.putIfAbsent(key, new ShareableObject("some other test data")));
+ }
+ @Test
+ public void testCopyOnRemove() {
+ testMapOperationResult(LocalMap::remove);
+ }
+
+ private void testMapOperationResult(BiFunction<LocalMap<String, ShareableObject>, String, ShareableObject> operation) {
+ final String key = "key";
+ final ShareableObject value = new ShareableObject("some test data");
+ final LocalMap<String, ShareableObject> map = vertx.sharedData().getLocalMap("foo");
+ assertNull(map.put(key, value));
+
+ final ShareableObject result = operation.apply(map, key);
+
+ assertEquals(value, result);
+ assertNotSame(value, result);
+ }
+
+ private static class ShareableObject implements Shareable {
+ private final String data;
+
+ public ShareableObject(String data) {
+ this.data = data;
+ }
+
+ @Override
+ public Shareable copy() {
+ return new ShareableObject(data);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ final ShareableObject another = (ShareableObject) o;
+ return data.equals(another.data);
+ }
+
+ @Override
+ public int hashCode() {
+ return data.hashCode();
+ }
+ }
class SomeOtherClass {
}
| There is no proper way to store mutable objects in a local map
It is possible only for JsonObject, byte array and some other copyable objects known to vert.x.
It would be usefull to have mechanism to store mutable user objects in a local map if they can be copied.
| 2018-03-22T16:22:32Z | 3.5 |
|
eclipse-vertx/vert.x | 2,209 | eclipse-vertx__vert.x-2209 | [
"2218"
] | fb75176407134802a23cb1f165dc0743ba308f9e | diff --git a/src/main/java/io/vertx/core/http/HttpClientRequest.java b/src/main/java/io/vertx/core/http/HttpClientRequest.java
--- a/src/main/java/io/vertx/core/http/HttpClientRequest.java
+++ b/src/main/java/io/vertx/core/http/HttpClientRequest.java
@@ -328,7 +328,7 @@ default boolean reset() {
* <p/>
* <ul>
* <li>for HTTP/2, this performs send an HTTP/2 reset frame with the specified error {@code code}</li>
- * <li>for HTTP/1.x, this closes the connection after the current in-flight requests are ended</li>
+ * <li>for HTTP/1.x, this closes the connection when the current request is inflight</li>
* </ul>
* <p/>
* When the request has not yet been sent, the request will be aborted and false is returned as indicator.
diff --git a/src/main/java/io/vertx/core/http/HttpConnection.java b/src/main/java/io/vertx/core/http/HttpConnection.java
--- a/src/main/java/io/vertx/core/http/HttpConnection.java
+++ b/src/main/java/io/vertx/core/http/HttpConnection.java
@@ -68,11 +68,11 @@ default HttpConnection setWindowSize(int windowSize) {
}
/**
- * Like {@link #goAway(long, int)} with a last stream id {@code 2^31-1}.
+ * Like {@link #goAway(long, int)} with a last stream id {@code -1} which means to disallow any new stream creation.
*/
@Fluent
default HttpConnection goAway(long errorCode) {
- return goAway(errorCode, Integer.MAX_VALUE);
+ return goAway(errorCode, -1);
}
/**
diff --git a/src/main/java/io/vertx/core/http/impl/ClientConnection.java b/src/main/java/io/vertx/core/http/impl/ClientConnection.java
deleted file mode 100644
--- a/src/main/java/io/vertx/core/http/impl/ClientConnection.java
+++ /dev/null
@@ -1,623 +0,0 @@
-/*
- * Copyright (c) 2011-2013 The original author or authors
- * ------------------------------------------------------
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Apache License v2.0 which accompanies this distribution.
- *
- * The Eclipse Public License is available at
- * http://www.eclipse.org/legal/epl-v10.html
- *
- * The Apache License v2.0 is available at
- * http://www.opensource.org/licenses/apache2.0.php
- *
- * You may elect to redistribute this code under either of these licenses.
- */
-
-package io.vertx.core.http.impl;
-
-import io.netty.buffer.ByteBuf;
-import io.netty.channel.*;
-import io.netty.handler.codec.http.*;
-import io.netty.handler.codec.http.HttpHeaders;
-import io.netty.handler.codec.http.websocketx.*;
-import io.netty.util.ReferenceCountUtil;
-import io.vertx.core.Handler;
-import io.vertx.core.MultiMap;
-import io.vertx.core.VertxException;
-import io.vertx.core.buffer.Buffer;
-import io.vertx.core.http.*;
-import io.vertx.core.http.HttpMethod;
-import io.vertx.core.http.HttpVersion;
-import io.vertx.core.http.impl.ws.WebSocketFrameInternal;
-import io.vertx.core.impl.ContextImpl;
-import io.vertx.core.logging.Logger;
-import io.vertx.core.logging.LoggerFactory;
-import io.vertx.core.net.NetSocket;
-import io.vertx.core.net.impl.NetSocketImpl;
-import io.vertx.core.net.impl.VertxNetHandler;
-import io.vertx.core.spi.metrics.HttpClientMetrics;
-
-import java.net.URI;
-import java.util.ArrayDeque;
-import java.util.Deque;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Queue;
-
-import static io.vertx.core.http.HttpHeaders.ACCEPT_ENCODING;
-import static io.vertx.core.http.HttpHeaders.CLOSE;
-import static io.vertx.core.http.HttpHeaders.CONNECTION;
-import static io.vertx.core.http.HttpHeaders.DEFLATE_GZIP;
-import static io.vertx.core.http.HttpHeaders.HOST;
-import static io.vertx.core.http.HttpHeaders.KEEP_ALIVE;
-import static io.vertx.core.http.HttpHeaders.TRANSFER_ENCODING;
-
-/**
- *
- * This class is optimised for performance when used on the same event loop. However it can be used safely from other threads.
- *
- * The internal state is protected using the synchronized keyword. If always used on the same event loop, then
- * we benefit from biased locking which makes the overhead of synchronized near zero.
- *
- * @author <a href="http://tfox.org">Tim Fox</a>
- */
-class ClientConnection extends Http1xConnectionBase implements HttpClientConnection, HttpClientStream {
-
- private static final Logger log = LoggerFactory.getLogger(ClientConnection.class);
-
- private final HttpClientImpl client;
- private final boolean ssl;
- private final String host;
- private final int port;
- private final Http1xPool pool;
- private final Object endpointMetric;
- // Requests can be pipelined so we need a queue to keep track of requests
- private final Deque<HttpClientRequestImpl> requests = new ArrayDeque<>();
- private final HttpClientMetrics metrics;
- private final HttpVersion version;
-
- private WebSocketClientHandshaker handshaker;
- private HttpClientRequestImpl currentRequest;
- private HttpClientResponseImpl currentResponse;
- private HttpClientRequestImpl requestForResponse;
- private WebSocketImpl ws;
-
- private boolean reset;
- private boolean paused;
- private Buffer pausedChunk;
-
- ClientConnection(HttpVersion version, HttpClientImpl client, Object endpointMetric, ChannelHandlerContext channel, boolean ssl, String host,
- int port, ContextImpl context, Http1xPool pool, HttpClientMetrics metrics) {
- super(client.getVertx(), channel, context);
- this.client = client;
- this.ssl = ssl;
- this.host = host;
- this.port = port;
- this.pool = pool;
- this.metrics = metrics;
- this.version = version;
- this.endpointMetric = endpointMetric;
- }
-
- public HttpClientMetrics metrics() {
- return metrics;
- }
-
- synchronized HttpClientRequestImpl getCurrentRequest() {
- return currentRequest;
- }
-
- synchronized void toWebSocket(String requestURI, MultiMap headers, WebsocketVersion vers, String subProtocols,
- int maxWebSocketFrameSize, Handler<WebSocket> wsConnect) {
- if (ws != null) {
- throw new IllegalStateException("Already websocket");
- }
-
- try {
- URI wsuri = new URI(requestURI);
- if (!wsuri.isAbsolute()) {
- // Netty requires an absolute url
- wsuri = new URI((ssl ? "https:" : "http:") + "//" + host + ":" + port + requestURI);
- }
- WebSocketVersion version =
- WebSocketVersion.valueOf((vers == null ?
- WebSocketVersion.V13 : vers).toString());
- HttpHeaders nettyHeaders;
- if (headers != null) {
- nettyHeaders = new DefaultHttpHeaders();
- for (Map.Entry<String, String> entry: headers) {
- nettyHeaders.add(entry.getKey(), entry.getValue());
- }
- } else {
- nettyHeaders = null;
- }
- handshaker = WebSocketClientHandshakerFactory.newHandshaker(wsuri, version, subProtocols, false,
- nettyHeaders, maxWebSocketFrameSize,!client.getOptions().isSendUnmaskedFrames(),false);
- ChannelPipeline p = chctx.pipeline();
- p.addBefore("handler", "handshakeCompleter", new HandshakeInboundHandler(wsConnect, version != WebSocketVersion.V00));
- handshaker.handshake(chctx.channel()).addListener(future -> {
- Handler<Throwable> handler = exceptionHandler();
- if (!future.isSuccess() && handler != null) {
- handler.handle(future.cause());
- }
- });
- } catch (Exception e) {
- handleException(e);
- }
- }
-
- private final class HandshakeInboundHandler extends ChannelInboundHandlerAdapter {
-
- private final boolean supportsContinuation;
- private final Handler<WebSocket> wsConnect;
- private final ContextImpl context;
- private final Queue<Object> buffered = new ArrayDeque<>();
- private FullHttpResponse response;
- private boolean handshaking = true;
-
- public HandshakeInboundHandler(Handler<WebSocket> wsConnect, boolean supportsContinuation) {
- this.supportsContinuation = supportsContinuation;
- this.wsConnect = wsConnect;
- this.context = vertx.getContext();
- }
-
- @Override
- public void channelInactive(ChannelHandlerContext ctx) throws Exception {
- super.channelInactive(ctx);
- // if still handshaking this means we not got any response back from the server and so need to notify the client
- // about it as otherwise the client would never been notified.
- if (handshaking) {
- handleException(new WebSocketHandshakeException("Connection closed while handshake in process"));
- }
- }
-
- @Override
- public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
- if (handshaker != null && handshaking) {
- if (msg instanceof HttpResponse) {
- HttpResponse resp = (HttpResponse) msg;
- HttpResponseStatus status = resp.status();
- if (status.code() != 101) {
- handshaker = null;
- close();
- handleException(new WebsocketRejectedException(status.code()));
- return;
- }
- response = new DefaultFullHttpResponse(resp.protocolVersion(), status);
- response.headers().add(resp.headers());
- }
-
- if (msg instanceof HttpContent) {
- if (response != null) {
- response.content().writeBytes(((HttpContent) msg).content());
- if (msg instanceof LastHttpContent) {
- response.trailingHeaders().add(((LastHttpContent) msg).trailingHeaders());
- try {
- handshakeComplete(ctx, response);
- chctx.pipeline().remove(HandshakeInboundHandler.this);
- for (; ; ) {
- Object m = buffered.poll();
- if (m == null) {
- break;
- }
- ctx.fireChannelRead(m);
- }
- } catch (WebSocketHandshakeException e) {
- close();
- handleException(e);
- }
- }
- }
- }
- } else {
- buffered.add(msg);
- }
- }
-
- private void handleException(Exception e) {
- handshaking = false;
- buffered.clear();
- Handler<Throwable> handler = exceptionHandler();
- if (handler != null) {
- context.executeFromIO(() -> {
- handler.handle(e);
- });
- } else {
- log.error("Error in websocket handshake", e);
- }
- }
-
- private void handshakeComplete(ChannelHandlerContext ctx, FullHttpResponse response) {
- handshaking = false;
- ChannelHandler handler = ctx.pipeline().get(HttpContentDecompressor.class);
- if (handler != null) {
- // remove decompressor as its not needed anymore once connection was upgraded to websockets
- ctx.pipeline().remove(handler);
- }
- // Need to set context before constructor is called as writehandler registration needs this
- ContextImpl.setContext(context);
- WebSocketImpl webSocket = new WebSocketImpl(vertx, ClientConnection.this, supportsContinuation,
- client.getOptions().getMaxWebsocketFrameSize(),
- client.getOptions().getMaxWebsocketMessageSize());
- ws = webSocket;
- handshaker.finishHandshake(chctx.channel(), response);
- ws.subProtocol(handshaker.actualSubprotocol());
- context.executeFromIO(() -> {
- log.debug("WebSocket handshake complete");
- if (metrics != null ) {
- webSocket.setMetric(metrics.connected(endpointMetric, metric(), webSocket));
- }
- wsConnect.handle(webSocket);
- });
- }
- }
-
- public boolean isValid() {
- return !reset && chctx.channel().isOpen();
- }
-
- int getOutstandingRequestCount() {
- return requests.size();
- }
-
- @Override
- public void checkDrained() {
- handleInterestedOpsChanged();
- }
-
- @Override
- public synchronized void handleInterestedOpsChanged() {
- if (!isNotWritable()) {
- if (currentRequest != null) {
- currentRequest.handleDrained();
- } else if (ws != null) {
- ws.writable();
- }
- }
- }
-
- void handleResponse(HttpResponse resp) {
- if (resp.status().code() == 100) {
- //If we get a 100 continue it will be followed by the real response later, so we don't remove it yet
- requestForResponse = requests.peek();
- } else {
- requestForResponse = requests.poll();
- }
- if (requestForResponse == null) {
- throw new IllegalStateException("No response handler");
- }
- io.netty.handler.codec.http.HttpVersion nettyVersion = resp.protocolVersion();
- HttpVersion vertxVersion;
- if (nettyVersion == io.netty.handler.codec.http.HttpVersion.HTTP_1_0) {
- vertxVersion = HttpVersion.HTTP_1_0;
- } else if (nettyVersion == io.netty.handler.codec.http.HttpVersion.HTTP_1_1) {
- vertxVersion = HttpVersion.HTTP_1_1;
- } else {
- vertxVersion = null;
- }
- HttpClientResponseImpl nResp = new HttpClientResponseImpl(requestForResponse, vertxVersion, this, resp.status().code(), resp.status().reasonPhrase(), new HeadersAdaptor(resp.headers()));
- currentResponse = nResp;
- if (metrics != null) {
- metrics.responseBegin(requestForResponse.metric(), nResp);
- }
- if (vertxVersion != null) {
- requestForResponse.handleResponse(nResp);
- } else {
- requestForResponse.handleException(new IllegalStateException("Unsupported HTTP version: " + nettyVersion));
- }
- }
-
- public void doPause() {
- super.doPause();
- paused = true;
- }
-
- public void doResume() {
- super.doResume();
- paused = false;
- if (pausedChunk != null) {
- vertx.runOnContext(v -> {
- if (pausedChunk != null) {
- Buffer chunk = pausedChunk;
- pausedChunk = null;
- currentResponse.handleChunk(chunk);
- }
- });
- }
- }
-
- void handleResponseChunk(Buffer buff) {
- if (paused) {
- if (pausedChunk == null) {
- pausedChunk = buff.copy();
- } else {
- pausedChunk.appendBuffer(buff);
- }
- } else {
- if (pausedChunk != null) {
- buff = pausedChunk.appendBuffer(buff);
- pausedChunk = null;
- }
- currentResponse.handleChunk(buff);
- }
- }
-
- void handleResponseEnd(LastHttpContent trailer) {
- if (metrics != null) {
- HttpClientRequestBase req = currentResponse.request();
- Object reqMetric = req.metric();
- if (req.exceptionOccurred != null) {
- metrics.requestReset(reqMetric);
- } else {
- metrics.responseEnd(reqMetric, currentResponse);
- }
- }
- Buffer last = pausedChunk;
- pausedChunk = null;
- currentResponse.handleEnd(last, new HeadersAdaptor(trailer.trailingHeaders()));
-
- // We don't signal response end for a 100-continue response as a real response will follow
- // Also we keep the connection open for an HTTP CONNECT
- if (currentResponse.statusCode() != 100 && requestForResponse.method() != io.vertx.core.http.HttpMethod.CONNECT) {
-
- boolean close = false;
- // See https://tools.ietf.org/html/rfc7230#section-6.3
- String responseConnectionHeader = currentResponse.getHeader(HttpHeaders.Names.CONNECTION);
- io.vertx.core.http.HttpVersion protocolVersion = client.getOptions().getProtocolVersion();
- String requestConnectionHeader = requestForResponse.headers().get(HttpHeaders.Names.CONNECTION);
- // We don't need to protect against concurrent changes on forceClose as it only goes from false -> true
- if (HttpHeaders.Values.CLOSE.equalsIgnoreCase(responseConnectionHeader) || HttpHeaders.Values.CLOSE.equalsIgnoreCase(requestConnectionHeader)) {
- // In all cases, if we have a close connection option then we SHOULD NOT treat the connection as persistent
- close = true;
- } else if (protocolVersion == io.vertx.core.http.HttpVersion.HTTP_1_0 && !HttpHeaders.Values.KEEP_ALIVE.equalsIgnoreCase(responseConnectionHeader)) {
- // In the HTTP/1.0 case both request/response need a keep-alive connection header the connection to be persistent
- // currently Vertx forces the Connection header if keepalive is enabled for 1.0
- close = true;
- }
-
- if (close) {
- pool.responseEnded(this, true);
- } else {
- if (reset) {
- if (requests.isEmpty()) {
- pool.responseEnded(this, true);
- }
- } else {
- pool.responseEnded(this, false);
- }
- }
- }
- requestForResponse = null;
- currentResponse = null;
- }
-
- synchronized void handleWsFrame(WebSocketFrameInternal frame) {
- if (ws != null) {
- ws.handleFrame(frame);
- }
- }
-
- protected synchronized void handleClosed() {
- super.handleClosed();
- if (ws != null) {
- ws.handleClosed();
- }
- Exception e = new VertxException("Connection was closed");
-
- // Signal requests failed
- if (metrics != null) {
- for (HttpClientRequestImpl req: requests) {
- metrics.requestReset(req.metric());
- }
- if (currentResponse != null) {
- metrics.requestReset(currentResponse.request().metric());
- }
- }
-
- // Connection was closed - call exception handlers for any requests in the pipeline or one being currently written
- for (HttpClientRequestImpl req: requests) {
- if (req != currentRequest) {
- req.handleException(e);
- }
- }
- if (currentRequest != null) {
- currentRequest.handleException(e);
- } else if (currentResponse != null) {
- currentResponse.handleException(e);
- }
- }
-
- public ContextImpl getContext() {
- return super.getContext();
- }
-
- @Override
- public void reset(long code) {
- if (!reset) {
- reset = true;
- if (currentRequest != null) {
- requests.removeLast();
- }
- if (requests.size() == 0) {
- pool.responseEnded(this, true);
- }
- }
- }
-
- private HttpRequest createRequest(HttpVersion version, HttpMethod method, String rawMethod, String uri, MultiMap headers) {
- DefaultHttpRequest request = new DefaultHttpRequest(HttpUtils.toNettyHttpVersion(version), HttpUtils.toNettyHttpMethod(method, rawMethod), uri, false);
- if (headers != null) {
- for (Map.Entry<String, String> header : headers) {
- // Todo : multi valued headers
- request.headers().add(header.getKey(), header.getValue());
- }
- }
- return request;
- }
-
- private void prepareHeaders(HttpRequest request, String hostHeader, boolean chunked) {
- HttpHeaders headers = request.headers();
- headers.remove(TRANSFER_ENCODING);
- if (!headers.contains(HOST)) {
- request.headers().set(HOST, hostHeader);
- }
- if (chunked) {
- HttpHeaders.setTransferEncodingChunked(request);
- }
- if (client.getOptions().isTryUseCompression() && request.headers().get(ACCEPT_ENCODING) == null) {
- // if compression should be used but nothing is specified by the user support deflate and gzip.
- request.headers().set(ACCEPT_ENCODING, DEFLATE_GZIP);
- }
- if (!client.getOptions().isKeepAlive() && client.getOptions().getProtocolVersion() == io.vertx.core.http.HttpVersion.HTTP_1_1) {
- request.headers().set(CONNECTION, CLOSE);
- } else if (client.getOptions().isKeepAlive() && client.getOptions().getProtocolVersion() == io.vertx.core.http.HttpVersion.HTTP_1_0) {
- request.headers().set(CONNECTION, KEEP_ALIVE);
- }
- }
-
- public void writeHead(HttpMethod method, String rawMethod, String uri, MultiMap headers, String hostHeader, boolean chunked) {
- HttpRequest request = createRequest(version, method, rawMethod, uri, headers);
- prepareHeaders(request, hostHeader, chunked);
- writeToChannel(request);
- }
-
- public void writeHeadWithContent(HttpMethod method, String rawMethod, String uri, MultiMap headers, String hostHeader, boolean chunked, ByteBuf buf, boolean end) {
- HttpRequest request = createRequest(version, method, rawMethod, uri, headers);
- prepareHeaders(request, hostHeader, chunked);
- if (end) {
- if (buf != null) {
- writeToChannel(new AssembledFullHttpRequest(request, buf));
- } else {
- writeToChannel(new AssembledFullHttpRequest(request));
- }
- } else {
- writeToChannel(new AssembledHttpRequest(request, buf));
- }
- }
-
- @Override
- public void writeBuffer(ByteBuf buff, boolean end) {
- if (end) {
- if (buff != null && buff.isReadable()) {
- writeToChannel(new DefaultLastHttpContent(buff, false));
- } else {
- writeToChannel(LastHttpContent.EMPTY_LAST_CONTENT);
- }
- } else if (buff != null) {
- writeToChannel(new DefaultHttpContent(buff));
- }
- }
-
- @Override
- public void writeFrame(int type, int flags, ByteBuf payload) {
- throw new IllegalStateException("Cannot write an HTTP/2 frame over an HTTP/1.x connection");
- }
-
- @Override
- protected synchronized void handleException(Throwable e) {
- super.handleException(e);
- if (currentRequest != null) {
- currentRequest.handleException(e);
- } else {
- HttpClientRequestImpl req = requests.poll();
- if (req != null) {
- req.handleException(e);
- } else if (currentResponse != null) {
- currentResponse.handleException(e);
- }
- }
- }
-
- public synchronized void beginRequest(HttpClientRequestImpl req) {
- if (currentRequest != null) {
- throw new IllegalStateException("Connection is already writing a request");
- }
- if (metrics != null) {
- Object reqMetric = metrics.requestBegin(endpointMetric, metric(), localAddress(), remoteAddress(), req);
- req.metric(reqMetric);
- }
- this.currentRequest = req;
- this.requests.add(req);
- }
-
- public synchronized void endRequest() {
- if (currentRequest == null) {
- throw new IllegalStateException("No write in progress");
- }
- if (metrics != null) {
- metrics.requestEnd(currentRequest.metric());
- }
- currentRequest = null;
- pool.requestEnded(this);
- }
-
- @Override
- public synchronized void close() {
- if (handshaker == null) {
- super.close();
- } else {
- // make sure everything is flushed out on close
- endReadAndFlush();
- // close the websocket connection by sending a close frame.
- handshaker.close(chctx.channel(), new CloseWebSocketFrame(1000, null));
- }
- }
-
- public NetSocket createNetSocket() {
- // connection was upgraded to raw TCP socket
- NetSocketImpl socket = new NetSocketImpl(vertx, chctx, context, client.getSslHelper(), metrics);
- socket.metric(metric());
- Map<Channel, NetSocketImpl> connectionMap = new HashMap<>(1);
- connectionMap.put(chctx.channel(), socket);
-
- // Flush out all pending data
- endReadAndFlush();
-
- // remove old http handlers and replace the old handler with one that handle plain sockets
- ChannelPipeline pipeline = chctx.pipeline();
- ChannelHandler inflater = pipeline.get(HttpContentDecompressor.class);
- if (inflater != null) {
- pipeline.remove(inflater);
- }
- pipeline.remove("codec");
- pipeline.replace("handler", "handler", new VertxNetHandler(socket) {
- @Override
- public void channelRead(ChannelHandlerContext chctx, Object msg) throws Exception {
- if (msg instanceof HttpContent) {
- if (msg instanceof LastHttpContent) {
- handleResponseEnd((LastHttpContent) msg);
- }
- ReferenceCountUtil.release(msg);
- return;
- }
- super.channelRead(chctx, msg);
- }
- @Override
- protected void handleMessage(NetSocketImpl connection, ContextImpl context, ChannelHandlerContext chctx, Object msg) throws Exception {
- ByteBuf buf = (ByteBuf) msg;
- connection.handleMessageReceived(buf);
- }
- }.removeHandler(sock -> {
- pool.removeChannel(chctx.channel());
- }));
- return socket;
- }
-
- @Override
- public HttpClientConnection connection() {
- return this;
- }
-
- @Override
- public HttpVersion version() {
- // Used to determine the http version in the HttpClientRequest#sendHead handler , for HTTP/1.1 it will
- // not yet know but it will for HTTP/2
- return null;
- }
-
- @Override
- public int id() {
- return -1;
- }
-}
diff --git a/src/main/java/io/vertx/core/http/impl/ConnectionManager.java b/src/main/java/io/vertx/core/http/impl/ConnectionManager.java
--- a/src/main/java/io/vertx/core/http/impl/ConnectionManager.java
+++ b/src/main/java/io/vertx/core/http/impl/ConnectionManager.java
@@ -16,93 +16,55 @@
package io.vertx.core.http.impl;
-import io.netty.bootstrap.Bootstrap;
import io.netty.channel.Channel;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.ChannelInboundHandlerAdapter;
-import io.netty.channel.ChannelPipeline;
-import io.netty.handler.codec.http.DefaultFullHttpRequest;
-import io.netty.handler.codec.http.FullHttpResponse;
-import io.netty.handler.codec.http.HttpClientCodec;
-import io.netty.handler.codec.http.HttpClientUpgradeHandler;
-import io.netty.handler.codec.http.HttpContentDecompressor;
-import io.netty.handler.codec.http.HttpHeaderNames;
-import io.netty.handler.codec.http.HttpMethod;
-import io.netty.handler.codec.http.LastHttpContent;
-import io.netty.handler.codec.http2.Http2Exception;
-import io.netty.handler.logging.LoggingHandler;
-import io.netty.handler.ssl.SslHandler;
-import io.netty.handler.timeout.IdleStateHandler;
-import io.vertx.core.AsyncResult;
import io.vertx.core.Handler;
-import io.vertx.core.http.ConnectionPoolTooBusyException;
-import io.vertx.core.http.HttpClientOptions;
+import io.vertx.core.http.HttpConnection;
import io.vertx.core.http.HttpVersion;
-import io.vertx.core.impl.ContextImpl;
-import io.vertx.core.impl.VertxInternal;
-import io.vertx.core.logging.Logger;
-import io.vertx.core.logging.LoggerFactory;
-import io.vertx.core.net.ProxyType;
-import io.vertx.core.net.SocketAddress;
-import io.vertx.core.net.impl.ChannelProvider;
-import io.vertx.core.net.impl.ProxyChannelProvider;
-import io.vertx.core.net.impl.SSLHelper;
+import io.vertx.core.http.impl.pool.Pool;
+import io.vertx.core.http.impl.pool.Waiter;
+import io.vertx.core.impl.ContextInternal;
import io.vertx.core.spi.metrics.HttpClientMetrics;
-import javax.net.ssl.SSLHandshakeException;
-import java.util.ArrayDeque;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Queue;
+import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
/**
+ * The connection manager associates remote hosts with pools, it also tracks all connections so they can be closed
+ * when the manager is closed.
*
* @author <a href="http://tfox.org">Tim Fox</a>
*/
-public class ConnectionManager {
+class ConnectionManager {
- static final Logger log = LoggerFactory.getLogger(ConnectionManager.class);
-
- private final QueueManager wsQM = new QueueManager(); // The queue manager for websockets
- private final QueueManager requestQM = new QueueManager(); // The queue manager for requests
- private final VertxInternal vertx;
- private final SSLHelper sslHelper;
- private final HttpClientOptions options;
- private final HttpClientImpl client;
- private final boolean keepAlive;
- private final boolean pipelining;
private final int maxWaitQueueSize;
- private final int http2MaxConcurrency;
- private final boolean logEnabled;
- private final ChannelConnector connector;
- private final HttpClientMetrics metrics;
-
- ConnectionManager(HttpClientImpl client, HttpClientMetrics metrics) {
+ private final HttpClientMetrics metrics; // Shall be removed later combining the PoolMetrics with HttpClientMetrics
+ private final HttpClientImpl client;
+ private final Map<Channel, HttpClientConnection> connectionMap = new ConcurrentHashMap<>();
+ private final Map<EndpointKey, Endpoint> endpointMap = new ConcurrentHashMap<>();
+ private final HttpVersion version;
+ private final long maxSize;
+
+ ConnectionManager(HttpClientImpl client,
+ HttpClientMetrics metrics,
+ HttpVersion version,
+ long maxSize,
+ int maxWaitQueueSize) {
this.client = client;
- this.sslHelper = client.getSslHelper();
- this.options = client.getOptions();
- this.vertx = client.getVertx();
- this.keepAlive = client.getOptions().isKeepAlive();
- this.pipelining = client.getOptions().isPipelining();
- this.maxWaitQueueSize = client.getOptions().getMaxWaitQueueSize();
- this.http2MaxConcurrency = options.getHttp2MultiplexingLimit() < 1 ? Integer.MAX_VALUE : options.getHttp2MultiplexingLimit();
- this.logEnabled = client.getOptions().getLogActivity();
- this.connector = new ChannelConnector();
+ this.maxWaitQueueSize = maxWaitQueueSize;
this.metrics = metrics;
+ this.maxSize = maxSize;
+ this.version = version;
}
- HttpClientMetrics metrics() {
- return metrics;
- }
-
- static final class ConnectionKey {
+ private static final class EndpointKey {
private final boolean ssl;
private final int port;
private final String host;
- public ConnectionKey(boolean ssl, int port, String host) {
+ EndpointKey(boolean ssl, int port, String host) {
this.ssl = ssl;
this.host = host;
this.port = port;
@@ -113,7 +75,7 @@ public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
- ConnectionKey that = (ConnectionKey) o;
+ EndpointKey that = (EndpointKey) o;
if (ssl != that.ssl) return false;
if (port != that.port) return false;
@@ -131,434 +93,77 @@ public int hashCode() {
}
}
- /**
- * The queue manager manages the connection queues for a given usage, the idea is to split
- * queues for HTTP requests and websockets. A websocket uses a pool of connections
- * usually ugpraded from HTTP/1.1, HTTP requests may ask for HTTP/2 connections but obtain
- * only HTTP/1.1 connections.
- */
- private class QueueManager {
-
- private final Map<Channel, HttpClientConnection> connectionMap = new ConcurrentHashMap<>();
- private final Map<ConnectionKey, ConnQueue> queueMap = new ConcurrentHashMap<>();
+ class Endpoint {
- ConnQueue getConnQueue(String peerHost, boolean ssl, int port, String host, HttpVersion version) {
- ConnectionKey key = new ConnectionKey(ssl, port, peerHost);
- return queueMap.computeIfAbsent(key, targetAddress -> new ConnQueue(version, this, peerHost, host, port, ssl, key));
- }
+ private final Pool<HttpClientConnection> pool;
+ private final Object metric;
- public void close() {
- for (ConnQueue queue: queueMap.values()) {
- queue.closeAllConnections();
- }
- queueMap.clear();
- for (HttpClientConnection conn : connectionMap.values()) {
- conn.close();
- }
+ public Endpoint(Pool<HttpClientConnection> pool, Object metric) {
+ this.pool = pool;
+ this.metric = metric;
}
}
- public void getConnectionForWebsocket(boolean ssl, int port, String host, Waiter waiter) {
- ConnQueue connQueue = wsQM.getConnQueue(host, ssl, port, host, HttpVersion.HTTP_1_1);
- connQueue.getConnection(waiter);
- }
-
- public void getConnectionForRequest(HttpVersion version, String peerHost, boolean ssl, int port, String host, Waiter waiter) {
- if (!keepAlive && pipelining) {
- waiter.handleFailure(new IllegalStateException("Cannot have pipelining with no keep alive"));
- } else {
- ConnQueue connQueue = requestQM.getConnQueue(peerHost, ssl, port, host, version);
- connQueue.getConnection(waiter);
- }
- }
-
- public void close() {
- wsQM.close();
- requestQM.close();
- if (metrics != null) {
- metrics.close();
- }
- }
-
- /**
- * The connection queue delegates to the connection pool, the pooling strategy.
- *
- * - HTTP/1.x pools several connections
- * - HTTP/2 uses a single connection
- *
- * After a queue is initialized with an HTTP/2 pool, this pool changed to an HTTP/1/1
- * pool if the server does not support HTTP/2 or after negotiation. In this situation
- * all waiters on this queue will use HTTP/1.1 connections.
- */
- public class ConnQueue {
-
- private final QueueManager mgr;
- private final String peerHost;
- private final boolean ssl;
- private final int port;
- private final String host;
- private final ConnectionKey key;
- private final Queue<Waiter> waiters = new ArrayDeque<>();
- private Pool<HttpClientConnection> pool;
- private int connCount;
- private final int maxSize;
- final Object metric;
-
- ConnQueue(HttpVersion version, QueueManager mgr, String peerHost, String host, int port, boolean ssl, ConnectionKey key) {
- this.key = key;
- this.host = host;
- this.port = port;
- this.ssl = ssl;
- this.peerHost = peerHost;
- this.mgr = mgr;
- if (version == HttpVersion.HTTP_2) {
- maxSize = options.getHttp2MaxPoolSize();
- pool = (Pool)new Http2Pool(this, client, metrics, mgr.connectionMap, http2MaxConcurrency, logEnabled, options.getHttp2MaxPoolSize(), options.getHttp2ConnectionWindowSize());
- } else {
- maxSize = options.getMaxPoolSize();
- pool = (Pool)new Http1xPool(client, metrics, options, this, mgr.connectionMap, version, options.getMaxPoolSize(), host, port);
- }
- this.metric = metrics != null ? metrics.createEndpoint(host, port, maxSize) : null;
- }
-
- public synchronized void getConnection(Waiter waiter) {
- HttpClientConnection conn = pool.pollConnection();
- if (conn != null && conn.isValid()) {
- ContextImpl context = waiter.context;
- if (context == null) {
- context = conn.getContext();
- } else if (context != conn.getContext()) {
- ConnectionManager.log.warn("Reusing a connection with a different context: an HttpClient is probably shared between different Verticles");
- }
- context.runOnContext(v -> deliverStream(conn, waiter));
- } else {
- if (pool.canCreateConnection(connCount)) {
- // Create a new connection
- createNewConnection(waiter);
- } else {
- // Wait in queue
- if (maxWaitQueueSize < 0 || waiters.size() < maxWaitQueueSize) {
+ void getConnection(String peerHost, boolean ssl, int port, String host,
+ Handler<HttpConnection> connectionHandler,
+ BiFunction<ContextInternal, HttpClientConnection, Boolean> onSuccess,
+ BiConsumer<ContextInternal, Throwable> onFailure) {
+ EndpointKey key = new EndpointKey(ssl, port, peerHost);
+ while (true) {
+ Endpoint endpoint = endpointMap.computeIfAbsent(key, targetAddress -> {
+ int maxPoolSize = Math.max(client.getOptions().getMaxPoolSize(), client.getOptions().getHttp2MaxPoolSize());
+ Object metric = metrics != null ? metrics.createEndpoint(host, port, maxPoolSize) : null;
+ HttpChannelConnector connector = new HttpChannelConnector(client, metric, version, ssl, peerHost, host, port);
+ Pool<HttpClientConnection> pool = new Pool<>(connector, maxWaitQueueSize, maxSize,
+ v -> {
if (metrics != null) {
- waiter.metric = metrics.enqueueRequest(metric);
+ metrics.closeEndpoint(host, port, metric);
}
- waiters.add(waiter);
- } else {
- waiter.handleFailure(new ConnectionPoolTooBusyException("Connection pool reached max wait queue size of " + maxWaitQueueSize));
- }
- }
- }
- }
-
- /**
- * Handle the connection if the waiter is not cancelled, otherwise recycle the connection.
- *
- * @param conn the connection
- */
- void deliverStream(HttpClientConnection conn, Waiter waiter) {
- if (!conn.isValid()) {
- // The connection has been closed - closed connections can be in the pool
- // Get another connection - Note that we DO NOT call connectionClosed() on the pool at this point
- // that is done asynchronously in the connection closeHandler()
- getConnection(waiter);
- } else if (waiter.isCancelled()) {
- pool.recycle(conn);
- } else {
- HttpClientStream stream;
- try {
- stream = pool.createStream(conn);
- } catch (Exception e) {
- getConnection(waiter);
- return;
- }
- waiter.handleStream(stream);
- }
- }
-
- void closeAllConnections() {
- pool.closeAllConnections();
- }
-
- private void createNewConnection(Waiter waiter) {
- connCount++;
- ContextImpl context;
- if (waiter.context == null) {
- // Embedded
- context = vertx.getOrCreateContext();
- } else {
- context = waiter.context;
- }
- sslHelper.validate(vertx);
- Bootstrap bootstrap = new Bootstrap();
- bootstrap.group(context.nettyEventLoop());
- bootstrap.channel(vertx.transport().channelType(false));
- connector.connect(this, bootstrap, context, peerHost, ssl, pool.version(), host, port, waiter);
- }
-
- /**
- * @return the next non-canceled waiters in the queue
- */
- Waiter getNextWaiter() {
- Waiter waiter = waiters.poll();
- if (metrics != null && waiter != null) {
- metrics.dequeueRequest(metric, waiter.metric);
- }
- while (waiter != null && waiter.isCancelled()) {
- waiter = waiters.poll();
- if (metrics != null && waiter != null) {
- metrics.dequeueRequest(metric, waiter.metric);
- }
- }
- return waiter;
- }
-
- // Called if the connection is actually closed OR the connection attempt failed
- public synchronized void connectionClosed() {
- connCount--;
- Waiter waiter = getNextWaiter();
- if (waiter != null) {
- // There's a waiter - so it can have a new connection
- createNewConnection(waiter);
- } else if (connCount == 0) {
- // No waiters and no connections - remove the ConnQueue
- mgr.queueMap.remove(key);
- if (metrics != null) {
- metrics.closeEndpoint(host, port, metric);
- }
- }
- }
-
- private void handshakeFailure(ContextImpl context, Channel ch, Throwable cause, Waiter waiter) {
- SSLHandshakeException sslException = new SSLHandshakeException("Failed to create SSL connection");
- if (cause != null) {
- sslException.initCause(cause);
- }
- connectionFailed(context, ch, waiter::handleFailure, sslException);
- }
-
- private void fallbackToHttp1x(Channel ch, ContextImpl context, HttpVersion fallbackVersion, int port, String host, Waiter waiter) {
- // change the pool to Http1xPool
- synchronized (this) {
- pool = (Pool)new Http1xPool(client, ConnectionManager.this.metrics, options, this, mgr.connectionMap, fallbackVersion, options.getMaxPoolSize(), host, port);
- }
- http1xConnected(fallbackVersion, context, port, host, ch, waiter);
- }
-
- private void http1xConnected(HttpVersion version, ContextImpl context, int port, String host, Channel ch, Waiter waiter) {
- ((Http1xPool)(Pool)pool).createConn(context, ch, waiter);
- }
-
- private void http2Connected(ContextImpl context, Channel ch, Waiter waiter, boolean upgrade) {
- context.executeFromIO(() -> {
- try {
- ((Http2Pool)(Pool)pool).createConn(context, ch, waiter, upgrade);
- } catch (Http2Exception e) {
- connectionFailed(context, ch, waiter::handleFailure, e);
- }
+ endpointMap.remove(key);
+ },
+ connectionMap::put,
+ connectionMap::remove);
+ return new Endpoint(pool, metric);
});
- }
-
- private void connectionFailed(ContextImpl context, Channel ch, Handler<Throwable> connectionExceptionHandler,
- Throwable t) {
- // If no specific exception handler is provided, fall back to the HttpClient's exception handler.
- // If that doesn't exist just log it
- Handler<Throwable> exHandler =
- connectionExceptionHandler == null ? log::error : connectionExceptionHandler;
-
- context.executeFromIO(() -> {
- connectionClosed();
- try {
- ch.close();
- } catch (Exception ignore) {
- }
- exHandler.handle(t);
- });
- }
- }
-
- /**
- * The logic for the connection pool because HTTP/1 and HTTP/2 have different pooling logics.
- */
- interface Pool<C extends HttpClientConnection> {
-
- HttpVersion version();
-
- C pollConnection();
-
- /**
- * Determine when a new connection should be created
- *
- * @param connCount the actual connection count including the one being created
- * @return true whether or not a new connection can be created
- */
- boolean canCreateConnection(int connCount);
-
- void closeAllConnections();
-
- void recycle(C conn);
-
- HttpClientStream createStream(C conn) throws Exception;
-
- }
-
- /**
- * The ChannelConnector performs the channel configuration and connection according to the
- * client options and the protocol version.
- * When the channel connects or fails to connect, it calls back the ConnQueue that initiated the
- * connection.
- */
- private class ChannelConnector {
-
- protected void connect(
- ConnQueue queue,
- Bootstrap bootstrap,
- ContextImpl context,
- String peerHost,
- boolean ssl,
- HttpVersion version,
- String host,
- int port,
- Waiter waiter) {
-
- applyConnectionOptions(options, bootstrap);
-
- ChannelProvider channelProvider;
- // http proxy requests are handled in HttpClientImpl, everything else can use netty proxy handler
- if (options.getProxyOptions() == null || !ssl && options.getProxyOptions().getType()==ProxyType.HTTP ) {
- channelProvider = ChannelProvider.INSTANCE;
+ Object metric;
+ if (metrics != null) {
+ metric = metrics.enqueueRequest(endpoint.metric);
} else {
- channelProvider = ProxyChannelProvider.INSTANCE;
- }
-
- boolean useAlpn = options.isUseAlpn();
- Handler<Channel> channelInitializer = ch -> {
-
- // Configure pipeline
- ChannelPipeline pipeline = ch.pipeline();
- if (ssl) {
- SslHandler sslHandler = new SslHandler(sslHelper.createEngine(client.getVertx(), peerHost, port, options.isForceSni() ? peerHost : null));
- ch.pipeline().addLast("ssl", sslHandler);
- // TCP connected, so now we must do the SSL handshake
- sslHandler.handshakeFuture().addListener(fut -> {
- if (fut.isSuccess()) {
- String protocol = sslHandler.applicationProtocol();
- if (useAlpn) {
- if ("h2".equals(protocol)) {
- applyHttp2ConnectionOptions(ch.pipeline());
- queue.http2Connected(context, ch, waiter, false);
- } else {
- applyHttp1xConnectionOptions(ch.pipeline(), context);
- HttpVersion fallbackProtocol = "http/1.0".equals(protocol) ?
- HttpVersion.HTTP_1_0 : HttpVersion.HTTP_1_1;
- queue.fallbackToHttp1x(ch, context, fallbackProtocol, port, host, waiter);
- }
- } else {
- applyHttp1xConnectionOptions(ch.pipeline(), context);
- queue.http1xConnected(version, context, port, host, ch, waiter);
- }
- } else {
- queue.handshakeFailure(context, ch, fut.cause(), waiter);
- }
- });
- } else {
- if (version == HttpVersion.HTTP_2) {
- if (options.isHttp2ClearTextUpgrade()) {
- HttpClientCodec httpCodec = new HttpClientCodec();
- class UpgradeRequestHandler extends ChannelInboundHandlerAdapter {
- @Override
- public void channelActive(ChannelHandlerContext ctx) throws Exception {
- DefaultFullHttpRequest upgradeRequest =
- new DefaultFullHttpRequest(io.netty.handler.codec.http.HttpVersion.HTTP_1_1, HttpMethod.GET, "/");
- String hostHeader = peerHost;
- if (port != 80) {
- hostHeader += ":" + port;
- }
- upgradeRequest.headers().set(HttpHeaderNames.HOST, hostHeader);
- ctx.writeAndFlush(upgradeRequest);
- ctx.fireChannelActive();
- }
- @Override
- public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
- if (msg instanceof LastHttpContent) {
- ChannelPipeline p = ctx.pipeline();
- p.remove(httpCodec);
- p.remove(this);
- // Upgrade handler will remove itself
- applyHttp1xConnectionOptions(ch.pipeline(), context);
- queue.fallbackToHttp1x(ch, context, HttpVersion.HTTP_1_1, port, host, waiter);
- }
- }
- @Override
- public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
- super.userEventTriggered(ctx, evt);
- if (evt == HttpClientUpgradeHandler.UpgradeEvent.UPGRADE_SUCCESSFUL) {
- ctx.pipeline().remove(this);
- // Upgrade handler will remove itself
- }
- }
- }
- VertxHttp2ClientUpgradeCodec upgradeCodec = new VertxHttp2ClientUpgradeCodec(client.getOptions().getInitialSettings()) {
- @Override
- public void upgradeTo(ChannelHandlerContext ctx, FullHttpResponse upgradeResponse) throws Exception {
- applyHttp2ConnectionOptions(pipeline);
- queue.http2Connected(context, ch, waiter, true);
- }
- };
- HttpClientUpgradeHandler upgradeHandler = new HttpClientUpgradeHandler(httpCodec, upgradeCodec, 65536);
- ch.pipeline().addLast(httpCodec, upgradeHandler, new UpgradeRequestHandler());
- } else {
- applyHttp2ConnectionOptions(pipeline);
- }
- } else {
- applyHttp1xConnectionOptions(pipeline, context);
+ metric = null;
+ }
+ if (endpoint.pool.getConnection(new Waiter<HttpClientConnection>(client.getVertx().getOrCreateContext()) {
+ @Override
+ public void initConnection(ContextInternal ctx, HttpClientConnection conn) {
+ if (connectionHandler != null) {
+ ctx.executeFromIO(() -> {
+ connectionHandler.handle(conn);
+ });
}
}
- };
-
- Handler<AsyncResult<Channel>> channelHandler = res -> {
-
- if (res.succeeded()) {
- Channel ch = res.result();
- if (!ssl) {
- if (ch.pipeline().get(HttpClientUpgradeHandler.class) != null) {
- // Upgrade handler do nothing
- } else {
- if (version == HttpVersion.HTTP_2 && !options.isHttp2ClearTextUpgrade()) {
- queue.http2Connected(context, ch, waiter, false);
- } else {
- queue.http1xConnected(version, context, port, host, ch, waiter);
- }
- }
+ @Override
+ public void handleFailure(ContextInternal ctx, Throwable failure) {
+ if (metrics != null) {
+ metrics.dequeueRequest(endpoint.metric, metric);
}
- } else {
- queue.connectionFailed(context, null, waiter::handleFailure, res.cause());
+ onFailure.accept(ctx, failure);
}
- };
-
- channelProvider.connect(vertx, bootstrap, options.getProxyOptions(), SocketAddress.inetSocketAddress(port, host), channelInitializer, channelHandler);
- }
-
- void applyConnectionOptions(HttpClientOptions options, Bootstrap bootstrap) {
- vertx.transport().configure(options, bootstrap);
- }
-
- void applyHttp2ConnectionOptions(ChannelPipeline pipeline) {
- if (options.getIdleTimeout() > 0) {
- pipeline.addLast("idle", new IdleStateHandler(0, 0, options.getIdleTimeout()));
+ @Override
+ public boolean handleConnection(ContextInternal ctx, HttpClientConnection conn) throws Exception {
+ if (metrics != null) {
+ metrics.dequeueRequest(endpoint.metric, metric);
+ }
+ return onSuccess.apply(ctx, conn);
+ }
+ })) {
+ break;
}
}
+ }
- void applyHttp1xConnectionOptions(ChannelPipeline pipeline, ContextImpl context) {
- if (logEnabled) {
- pipeline.addLast("logging", new LoggingHandler());
- }
- pipeline.addLast("codec", new HttpClientCodec(options.getMaxInitialLineLength(), options.getMaxHeaderSize(),
- options.getMaxChunkSize(), false, false, options.getDecoderInitialBufferSize()));
- if (options.isTryUseCompression()) {
- pipeline.addLast("inflater", new HttpContentDecompressor(true));
- }
- if (options.getIdleTimeout() > 0) {
- pipeline.addLast("idle", new IdleStateHandler(0, 0, options.getIdleTimeout()));
- }
+ public void close() {
+ endpointMap.clear();
+ for (HttpClientConnection conn : connectionMap.values()) {
+ conn.close();
}
}
}
diff --git a/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java b/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/http/impl/Http1xClientConnection.java
@@ -0,0 +1,735 @@
+/*
+ * Copyright (c) 2011-2013 The original author or authors
+ * ------------------------------------------------------
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * and Apache License v2.0 which accompanies this distribution.
+ *
+ * The Eclipse Public License is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * The Apache License v2.0 is available at
+ * http://www.opensource.org/licenses/apache2.0.php
+ *
+ * You may elect to redistribute this code under either of these licenses.
+ */
+
+package io.vertx.core.http.impl;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.channel.*;
+import io.netty.handler.codec.http.*;
+import io.netty.handler.codec.http.HttpHeaders;
+import io.netty.handler.codec.http.websocketx.*;
+import io.netty.util.ReferenceCountUtil;
+import io.vertx.core.*;
+import io.vertx.core.buffer.Buffer;
+import io.vertx.core.http.*;
+import io.vertx.core.http.HttpMethod;
+import io.vertx.core.http.HttpVersion;
+import io.vertx.core.http.impl.pool.ConnectionListener;
+import io.vertx.core.http.impl.ws.WebSocketFrameInternal;
+import io.vertx.core.impl.ContextImpl;
+import io.vertx.core.logging.Logger;
+import io.vertx.core.logging.LoggerFactory;
+import io.vertx.core.net.NetSocket;
+import io.vertx.core.net.impl.NetSocketImpl;
+import io.vertx.core.net.impl.VertxNetHandler;
+import io.vertx.core.spi.metrics.HttpClientMetrics;
+
+import java.net.URI;
+import java.util.ArrayDeque;
+import java.util.Deque;
+import java.util.Map;
+import java.util.Queue;
+
+import static io.vertx.core.http.HttpHeaders.ACCEPT_ENCODING;
+import static io.vertx.core.http.HttpHeaders.CLOSE;
+import static io.vertx.core.http.HttpHeaders.CONNECTION;
+import static io.vertx.core.http.HttpHeaders.DEFLATE_GZIP;
+import static io.vertx.core.http.HttpHeaders.HOST;
+import static io.vertx.core.http.HttpHeaders.KEEP_ALIVE;
+import static io.vertx.core.http.HttpHeaders.TRANSFER_ENCODING;
+
+/**
+ *
+ * This class is optimised for performance when used on the same event loop. However it can be used safely from other threads.
+ *
+ * The internal state is protected using the synchronized keyword. If always used on the same event loop, then
+ * we benefit from biased locking which makes the overhead of synchronized near zero.
+ *
+ * @author <a href="http://tfox.org">Tim Fox</a>
+ */
+class Http1xClientConnection extends Http1xConnectionBase implements HttpClientConnection {
+
+ private static final Logger log = LoggerFactory.getLogger(Http1xClientConnection.class);
+
+ private final ConnectionListener<HttpClientConnection> listener;
+ private final HttpClientImpl client;
+ private final boolean ssl;
+ private final String host;
+ private final int port;
+ private final Object endpointMetric;
+ private final HttpClientMetrics metrics;
+ private final HttpVersion version;
+
+ private WebSocketClientHandshaker handshaker;
+ private WebSocketImpl ws;
+
+ private final Deque<StreamImpl> pending = new ArrayDeque<>();
+ private final Deque<StreamImpl> inflight = new ArrayDeque<>();
+ private StreamImpl currentRequest;
+ private StreamImpl currentResponse;
+
+ private boolean paused;
+ private Buffer pausedChunk;
+
+ Http1xClientConnection(ConnectionListener<HttpClientConnection> listener,
+ HttpVersion version,
+ HttpClientImpl client,
+ Object endpointMetric,
+ ChannelHandlerContext channel,
+ boolean ssl,
+ String host,
+ int port,
+ ContextImpl context,
+ HttpClientMetrics metrics) {
+ super(client.getVertx(), channel, context);
+ this.listener = listener;
+ this.client = client;
+ this.ssl = ssl;
+ this.host = host;
+ this.port = port;
+ this.metrics = metrics;
+ this.version = version;
+ this.endpointMetric = endpointMetric;
+ }
+
+ private static class StreamImpl implements HttpClientStream {
+
+ private final Http1xClientConnection conn;
+ private final Handler<AsyncResult<HttpClientStream>> handler;
+ private final HttpClientRequestImpl request;
+ private HttpClientResponseImpl response;
+ private boolean requestEnded;
+ private boolean responseEnded;
+ private boolean reset;
+ private boolean close;
+ private boolean upgraded;
+
+ StreamImpl(Http1xClientConnection conn, HttpClientRequestImpl request, Handler<AsyncResult<HttpClientStream>> handler) {
+ this.request = request;
+ this.conn = conn;
+ this.handler = handler;
+ }
+
+ @Override
+ public int id() {
+ return -1;
+ }
+
+ @Override
+ public HttpVersion version() {
+ return conn.version;
+ }
+
+ @Override
+ public HttpClientConnection connection() {
+ return conn;
+ }
+
+ @Override
+ public Context getContext() {
+ return conn.context;
+ }
+
+ private HttpRequest createRequest(HttpVersion version, HttpMethod method, String rawMethod, String uri, MultiMap headers) {
+ DefaultHttpRequest request = new DefaultHttpRequest(HttpUtils.toNettyHttpVersion(version), HttpUtils.toNettyHttpMethod(method, rawMethod), uri, false);
+ if (headers != null) {
+ for (Map.Entry<String, String> header : headers) {
+ // Todo : multi valued headers
+ request.headers().add(header.getKey(), header.getValue());
+ }
+ }
+ return request;
+ }
+
+ private void prepareHeaders(HttpRequest request, String hostHeader, boolean chunked) {
+ HttpHeaders headers = request.headers();
+ headers.remove(TRANSFER_ENCODING);
+ if (!headers.contains(HOST)) {
+ request.headers().set(HOST, hostHeader);
+ }
+ if (chunked) {
+ HttpUtil.setTransferEncodingChunked(request, true);
+ }
+ if (conn.client.getOptions().isTryUseCompression() && request.headers().get(ACCEPT_ENCODING) == null) {
+ // if compression should be used but nothing is specified by the user support deflate and gzip.
+ request.headers().set(ACCEPT_ENCODING, DEFLATE_GZIP);
+ }
+ if (!conn.client.getOptions().isKeepAlive() && conn.client.getOptions().getProtocolVersion() == io.vertx.core.http.HttpVersion.HTTP_1_1) {
+ request.headers().set(CONNECTION, CLOSE);
+ } else if (conn.client.getOptions().isKeepAlive() && conn.client.getOptions().getProtocolVersion() == io.vertx.core.http.HttpVersion.HTTP_1_0) {
+ request.headers().set(CONNECTION, KEEP_ALIVE);
+ }
+ }
+
+ public void writeHead(HttpMethod method, String rawMethod, String uri, MultiMap headers, String hostHeader, boolean chunked) {
+ HttpRequest request = createRequest(conn.version, method, rawMethod, uri, headers);
+ prepareHeaders(request, hostHeader, chunked);
+ conn.writeToChannel(request);
+ }
+
+ public void writeHeadWithContent(HttpMethod method, String rawMethod, String uri, MultiMap headers, String hostHeader, boolean chunked, ByteBuf buf, boolean end) {
+ HttpRequest request = createRequest(conn.version, method, rawMethod, uri, headers);
+ prepareHeaders(request, hostHeader, chunked);
+ if (end) {
+ if (buf != null) {
+ conn.writeToChannel(new AssembledFullHttpRequest(request, buf));
+ } else {
+ conn.writeToChannel(new AssembledFullHttpRequest(request));
+ }
+ } else {
+ conn.writeToChannel(new AssembledHttpRequest(request, buf));
+ }
+ }
+
+ @Override
+ public void writeBuffer(ByteBuf buff, boolean end) {
+ if (end) {
+ if (buff != null && buff.isReadable()) {
+ conn.writeToChannel(new DefaultLastHttpContent(buff, false));
+ } else {
+ conn.writeToChannel(LastHttpContent.EMPTY_LAST_CONTENT);
+ }
+ } else if (buff != null) {
+ conn.writeToChannel(new DefaultHttpContent(buff));
+ }
+ }
+
+ @Override
+ public void writeFrame(int type, int flags, ByteBuf payload) {
+ throw new IllegalStateException("Cannot write an HTTP/2 frame over an HTTP/1.x connection");
+ }
+
+ @Override
+ public void doSetWriteQueueMaxSize(int size) {
+ conn.doSetWriteQueueMaxSize(size);
+ }
+
+ @Override
+ public boolean isNotWritable() {
+ return conn.isNotWritable();
+ }
+
+ @Override
+ public void checkDrained() {
+ conn.handleInterestedOpsChanged();
+ }
+
+ @Override
+ public void doPause() {
+ conn.doPause();
+ }
+
+ @Override
+ public void doResume() {
+ conn.doResume();
+ }
+
+ @Override
+ public void reset(long code) {
+ synchronized (conn) {
+ if (request == null) {
+ throw new IllegalStateException("Sanity check");
+ }
+ if (!reset) {
+ reset = true;
+ if (!requestEnded || !responseEnded) {
+ conn.close();
+ }
+ }
+ }
+ }
+
+ public void beginRequest() {
+ synchronized (conn) {
+ if (conn.currentRequest != this) {
+ throw new IllegalStateException("Connection is already writing another request");
+ }
+ if (conn.metrics != null) {
+ Object reqMetric = conn.metrics.requestBegin(conn.endpointMetric, conn.metric(), conn.localAddress(), conn.remoteAddress(), request);
+ request.metric(reqMetric);
+ }
+ conn.inflight.add(conn.currentRequest);
+ }
+ }
+
+ public void endRequest() {
+ StreamImpl next;
+ synchronized (conn) {
+ if (conn.currentRequest != this) {
+ throw new IllegalStateException("No write in progress");
+ }
+ if (conn.metrics != null) {
+ conn.metrics.requestEnd(conn.currentRequest.request.metric());
+ }
+ requestEnded = true;
+ // Should take care of pending list ????
+ checkLifecycle();
+ // Check pipelined pending request
+ next = conn.currentRequest = conn.pending.poll();
+ if (next == null) {
+ return;
+ }
+ }
+ // Should trampoline ?
+ next.handler.handle(Future.succeededFuture(next));
+ }
+
+ @Override
+ public NetSocket createNetSocket() {
+ synchronized (conn) {
+ if (responseEnded) {
+ throw new IllegalStateException("Request already ended");
+ }
+ if (upgraded) {
+ throw new IllegalStateException("Request already upgraded to NetSocket");
+ }
+ upgraded = true;
+
+
+ // connection was upgraded to raw TCP socket
+ NetSocketImpl socket = new NetSocketImpl(conn.vertx, conn.chctx, conn.context, conn.client.getSslHelper(), conn.metrics);
+ socket.metric(conn.metric());
+
+ // Flush out all pending data
+ conn.endReadAndFlush();
+
+ // remove old http handlers and replace the old handler with one that handle plain sockets
+ ChannelPipeline pipeline = conn.chctx.pipeline();
+ ChannelHandler inflater = pipeline.get(HttpContentDecompressor.class);
+ if (inflater != null) {
+ pipeline.remove(inflater);
+ }
+ pipeline.remove("codec");
+ pipeline.replace("handler", "handler", new VertxNetHandler(socket) {
+ @Override
+ public void channelRead(ChannelHandlerContext chctx, Object msg) throws Exception {
+ if (msg instanceof HttpContent) {
+ if (msg instanceof LastHttpContent) {
+ endResponse((LastHttpContent) msg);
+ }
+ ReferenceCountUtil.release(msg);
+ return;
+ }
+ super.channelRead(chctx, msg);
+ }
+ @Override
+ protected void handleMessage(NetSocketImpl connection, ContextImpl context, ChannelHandlerContext chctx, Object msg) throws Exception {
+ ByteBuf buf = (ByteBuf) msg;
+ connection.handleMessageReceived(buf);
+ }
+ }.removeHandler(sock -> conn.listener.onDiscard()));
+
+ return socket;
+ }
+ }
+
+ HttpClientResponseImpl beginResponse(HttpResponse resp) {
+ if (conn.metrics != null) {
+ conn.metrics.responseBegin(request.metric(), response);
+ }
+ if (resp.status().code() != 100 && request.method() != io.vertx.core.http.HttpMethod.CONNECT) {
+ // See https://tools.ietf.org/html/rfc7230#section-6.3
+ String responseConnectionHeader = resp.headers().get(HttpHeaders.Names.CONNECTION);
+ io.vertx.core.http.HttpVersion protocolVersion = conn.client.getOptions().getProtocolVersion();
+ String requestConnectionHeader = request.headers().get(HttpHeaders.Names.CONNECTION);
+ // We don't need to protect against concurrent changes on forceClose as it only goes from false -> true
+ if (HttpHeaders.Values.CLOSE.equalsIgnoreCase(responseConnectionHeader) || HttpHeaders.Values.CLOSE.equalsIgnoreCase(requestConnectionHeader)) {
+ // In all cases, if we have a close connection option then we SHOULD NOT treat the connection as persistent
+ close = true;
+ } else if (protocolVersion == io.vertx.core.http.HttpVersion.HTTP_1_0 && !HttpHeaders.Values.KEEP_ALIVE.equalsIgnoreCase(responseConnectionHeader)) {
+ // In the HTTP/1.0 case both request/response need a keep-alive connection header the connection to be persistent
+ // currently Vertx forces the Connection header if keepalive is enabled for 1.0
+ close = true;
+ }
+ }
+ HttpVersion version = HttpUtils.toVertxHttpVersion(resp.protocolVersion());
+ if (version != null) {
+ return response = new HttpClientResponseImpl(request, version, this, resp.status().code(), resp.status().reasonPhrase(), new HeadersAdaptor(resp.headers()));
+ } else {
+ return null;
+ }
+ }
+
+ void endResponse(LastHttpContent trailer) {
+ synchronized (conn) {
+ if (conn.metrics != null) {
+ HttpClientRequestBase req = request;
+ Object reqMetric = req.metric();
+ if (req.exceptionOccurred != null) {
+ conn.metrics.requestReset(reqMetric);
+ } else {
+ conn.metrics.responseEnd(reqMetric, response);
+ }
+ }
+
+ Buffer last = conn.pausedChunk;
+ conn.pausedChunk = null;
+ if (response != null) {
+ response.handleEnd(last, new HeadersAdaptor(trailer.trailingHeaders()));
+ }
+
+ // Also we keep the connection open for an HTTP CONNECT
+ responseEnded = true;
+ if (!conn.client.getOptions().isKeepAlive()) {
+ close = true;
+ }
+ checkLifecycle();
+ }
+ }
+
+ void checkLifecycle() {
+ if (requestEnded && responseEnded) {
+ if (upgraded) {
+ // Do nothing
+ } else if (close) {
+ conn.close();
+ } else {
+ conn.listener.onRecycle(1, true);
+ }
+ }
+ }
+ }
+
+ void handleResponse(HttpResponse resp) {
+ HttpClientResponseImpl response;
+ HttpClientRequestImpl request;
+ synchronized (this) {
+ StreamImpl requestForResponse;
+ if (resp.status().code() == 100) {
+ //If we get a 100 continue it will be followed by the real response later, so we don't remove it yet
+ requestForResponse = inflight.peek();
+ } else {
+ requestForResponse = inflight.poll();
+ }
+ if (requestForResponse == null) {
+ throw new IllegalStateException("No response handler");
+ }
+ currentResponse = requestForResponse;
+ response = currentResponse.beginResponse(resp);
+ request = currentResponse.request;
+ }
+ if (response != null) {
+ request.handleResponse(response);
+ } else {
+ request.handleException(new IllegalStateException("Unsupported HTTP version: " + resp.protocolVersion()));
+ }
+ }
+
+ void handleResponseChunk(Buffer buff) {
+ HttpClientResponseImpl resp;
+ synchronized (this) {
+ if (paused) {
+ if (pausedChunk == null) {
+ pausedChunk = buff.copy();
+ } else {
+ pausedChunk.appendBuffer(buff);
+ }
+ return;
+ } else {
+ if (pausedChunk != null) {
+ buff = pausedChunk.appendBuffer(buff);
+ pausedChunk = null;
+ }
+ resp = currentResponse.response;
+ if (resp == null) {
+ return;
+ }
+ }
+ }
+ resp.handleChunk(buff);
+ }
+
+ void handleResponseEnd(LastHttpContent trailer) {
+ synchronized (this) {
+ StreamImpl resp = currentResponse;
+ currentResponse = null;
+ // We don't signal response end for a 100-continue response as a real response will follow
+ if (resp.response == null || resp.response.statusCode() != 100) {
+ resp.endResponse(trailer);
+ }
+ }
+ }
+
+ public HttpClientMetrics metrics() {
+ return metrics;
+ }
+
+ synchronized void toWebSocket(String requestURI, MultiMap headers, WebsocketVersion vers, String subProtocols,
+ int maxWebSocketFrameSize, Handler<WebSocket> wsConnect) {
+ if (ws != null) {
+ throw new IllegalStateException("Already websocket");
+ }
+
+ try {
+ URI wsuri = new URI(requestURI);
+ if (!wsuri.isAbsolute()) {
+ // Netty requires an absolute url
+ wsuri = new URI((ssl ? "https:" : "http:") + "//" + host + ":" + port + requestURI);
+ }
+ WebSocketVersion version =
+ WebSocketVersion.valueOf((vers == null ?
+ WebSocketVersion.V13 : vers).toString());
+ HttpHeaders nettyHeaders;
+ if (headers != null) {
+ nettyHeaders = new DefaultHttpHeaders();
+ for (Map.Entry<String, String> entry: headers) {
+ nettyHeaders.add(entry.getKey(), entry.getValue());
+ }
+ } else {
+ nettyHeaders = null;
+ }
+ handshaker = WebSocketClientHandshakerFactory.newHandshaker(wsuri, version, subProtocols, false,
+ nettyHeaders, maxWebSocketFrameSize,!client.getOptions().isSendUnmaskedFrames(),false);
+ ChannelPipeline p = chctx.pipeline();
+ p.addBefore("handler", "handshakeCompleter", new HandshakeInboundHandler(wsConnect, version != WebSocketVersion.V00));
+ handshaker.handshake(chctx.channel()).addListener(future -> {
+ Handler<Throwable> handler = exceptionHandler();
+ if (!future.isSuccess() && handler != null) {
+ handler.handle(future.cause());
+ }
+ });
+ } catch (Exception e) {
+ handleException(e);
+ }
+ }
+
+ private final class HandshakeInboundHandler extends ChannelInboundHandlerAdapter {
+
+ private final boolean supportsContinuation;
+ private final Handler<WebSocket> wsConnect;
+ private final ContextImpl context;
+ private final Queue<Object> buffered = new ArrayDeque<>();
+ private FullHttpResponse response;
+ private boolean handshaking = true;
+
+ public HandshakeInboundHandler(Handler<WebSocket> wsConnect, boolean supportsContinuation) {
+ this.supportsContinuation = supportsContinuation;
+ this.wsConnect = wsConnect;
+ this.context = vertx.getContext();
+ }
+
+ @Override
+ public void channelInactive(ChannelHandlerContext ctx) throws Exception {
+ super.channelInactive(ctx);
+ // if still handshaking this means we not got any response back from the server and so need to notify the client
+ // about it as otherwise the client would never been notified.
+ if (handshaking) {
+ handleException(new WebSocketHandshakeException("Connection closed while handshake in process"));
+ }
+ }
+
+ @Override
+ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
+ if (handshaker != null && handshaking) {
+ if (msg instanceof HttpResponse) {
+ HttpResponse resp = (HttpResponse) msg;
+ HttpResponseStatus status = resp.status();
+ if (status.code() != 101) {
+ handshaker = null;
+ close();
+ handleException(new WebsocketRejectedException(status.code()));
+ return;
+ }
+ response = new DefaultFullHttpResponse(resp.protocolVersion(), status);
+ response.headers().add(resp.headers());
+ }
+
+ if (msg instanceof HttpContent) {
+ if (response != null) {
+ response.content().writeBytes(((HttpContent) msg).content());
+ if (msg instanceof LastHttpContent) {
+ response.trailingHeaders().add(((LastHttpContent) msg).trailingHeaders());
+ try {
+ handshakeComplete(ctx, response);
+ chctx.pipeline().remove(HandshakeInboundHandler.this);
+ for (; ; ) {
+ Object m = buffered.poll();
+ if (m == null) {
+ break;
+ }
+ ctx.fireChannelRead(m);
+ }
+ } catch (WebSocketHandshakeException e) {
+ close();
+ handleException(e);
+ }
+ }
+ }
+ }
+ } else {
+ buffered.add(msg);
+ }
+ }
+
+ private void handleException(Exception e) {
+ handshaking = false;
+ buffered.clear();
+ Handler<Throwable> handler = exceptionHandler();
+ if (handler != null) {
+ context.executeFromIO(() -> {
+ handler.handle(e);
+ });
+ } else {
+ log.error("Error in websocket handshake", e);
+ }
+ }
+
+ private void handshakeComplete(ChannelHandlerContext ctx, FullHttpResponse response) {
+ handshaking = false;
+ ChannelHandler handler = ctx.pipeline().get(HttpContentDecompressor.class);
+ if (handler != null) {
+ // remove decompressor as its not needed anymore once connection was upgraded to websockets
+ ctx.pipeline().remove(handler);
+ }
+ // Need to set context before constructor is called as writehandler registration needs this
+ ContextImpl.setContext(context);
+ WebSocketImpl webSocket = new WebSocketImpl(vertx, Http1xClientConnection.this, supportsContinuation,
+ client.getOptions().getMaxWebsocketFrameSize(),
+ client.getOptions().getMaxWebsocketMessageSize());
+ ws = webSocket;
+ handshaker.finishHandshake(chctx.channel(), response);
+ ws.subProtocol(handshaker.actualSubprotocol());
+ context.executeFromIO(() -> {
+ log.debug("WebSocket handshake complete");
+ if (metrics != null ) {
+ webSocket.setMetric(metrics.connected(endpointMetric, metric(), webSocket));
+ }
+ wsConnect.handle(webSocket);
+ });
+ }
+ }
+
+ @Override
+ public synchronized void handleInterestedOpsChanged() {
+ if (!isNotWritable()) {
+ if (currentRequest != null) {
+ currentRequest.request.handleDrained();
+ } else if (ws != null) {
+ ws.writable();
+ }
+ }
+ }
+
+ public void doPause() {
+ super.doPause();
+ paused = true;
+ }
+
+ public void doResume() {
+ super.doResume();
+ paused = false;
+ if (pausedChunk != null) {
+ context.runOnContext(v -> {
+ if (pausedChunk != null) {
+ Buffer chunk = pausedChunk;
+ pausedChunk = null;
+ currentResponse.response.handleChunk(chunk);
+ }
+ });
+ }
+ }
+
+ synchronized void handleWsFrame(WebSocketFrameInternal frame) {
+ if (ws != null) {
+ ws.handleFrame(frame);
+ }
+ }
+
+ private void retryPending() {
+ StreamImpl stream;
+ while ((stream = pending.poll()) != null) {
+ stream.request.retry();
+ }
+ }
+
+ protected synchronized void handleClosed() {
+ super.handleClosed();
+ if (ws != null) {
+ ws.handleClosed();
+ }
+ retryPending();
+
+ Exception e = new VertxException("Connection was closed");
+
+ // Signal requests failed
+ if (metrics != null) {
+ for (StreamImpl req: inflight) {
+ metrics.requestReset(req.request.metric());
+ }
+ if (currentResponse != null) {
+ metrics.requestReset(currentResponse.request.metric());
+ }
+ }
+
+ // Connection was closed - call exception handlers for any requests in the pipeline or one being currently written
+ for (StreamImpl req: inflight) {
+ if (req != currentRequest) {
+ req.request.handleException(e);
+ }
+ }
+ if (currentRequest != null) {
+ currentRequest.request.handleException(e);
+ } else if (currentResponse != null && currentResponse.response != null) {
+ currentResponse.response.handleException(e);
+ }
+ }
+
+ public ContextImpl getContext() {
+ return super.getContext();
+ }
+
+ @Override
+ protected synchronized void handleException(Throwable e) {
+ super.handleException(e);
+ retryPending();
+ if (currentRequest != null) {
+ currentRequest.request.handleException(e);
+ } else {
+ StreamImpl req = inflight.poll();
+ if (req != null) {
+ req.request.handleException(e);
+ } else if (currentResponse != null && currentResponse.response != null) {
+ currentResponse.response.handleException(e);
+ }
+ }
+ }
+
+ @Override
+ public synchronized void close() {
+ listener.onDiscard();
+ if (handshaker == null) {
+ super.close();
+ } else {
+ // make sure everything is flushed out on close
+ endReadAndFlush();
+ // close the websocket connection by sending a close frame.
+ handshaker.close(chctx.channel(), new CloseWebSocketFrame(1000, null));
+ }
+ }
+
+ @Override
+ public void createStream(HttpClientRequestImpl req, Handler<AsyncResult<HttpClientStream>> handler) {
+ StreamImpl stream = new StreamImpl(this, req, handler);
+ synchronized (this) {
+ if (currentRequest != null) {
+ pending.add(stream);
+ return;
+ }
+ this.currentRequest = stream;
+ }
+ handler.handle(Future.succeededFuture(currentRequest));
+ }
+}
diff --git a/src/main/java/io/vertx/core/http/impl/ClientHandler.java b/src/main/java/io/vertx/core/http/impl/Http1xClientHandler.java
similarity index 71%
rename from src/main/java/io/vertx/core/http/impl/ClientHandler.java
rename to src/main/java/io/vertx/core/http/impl/Http1xClientHandler.java
--- a/src/main/java/io/vertx/core/http/impl/ClientHandler.java
+++ b/src/main/java/io/vertx/core/http/impl/Http1xClientHandler.java
@@ -25,7 +25,8 @@
import io.netty.handler.codec.http.LastHttpContent;
import io.netty.handler.codec.http.websocketx.PongWebSocketFrame;
import io.vertx.core.buffer.Buffer;
-import io.vertx.core.http.impl.ws.WebSocketFrameImpl;
+import io.vertx.core.http.HttpVersion;
+import io.vertx.core.http.impl.pool.ConnectionListener;
import io.vertx.core.http.impl.ws.WebSocketFrameInternal;
import io.vertx.core.impl.ContextImpl;
import io.vertx.core.spi.metrics.HttpClientMetrics;
@@ -33,33 +34,43 @@
/**
* @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
*/
-class ClientHandler extends VertxHttpHandler<ClientConnection> {
+class Http1xClientHandler extends VertxHttpHandler<Http1xClientConnection> {
private boolean closeFrameSent;
private ContextImpl context;
private ChannelHandlerContext chctx;
- private Http1xPool pool;
- private HttpClientImpl client;
- private Object endpointMetric;
- private HttpClientMetrics metrics;
+ private final HttpVersion version;
+ private final String host;
+ private final int port;
+ private final boolean ssl;
+ private final HttpClientImpl client;
+ private final HttpClientMetrics metrics;
+ private final ConnectionListener<HttpClientConnection> listener;
+ private final Object endpointMetric;
- public ClientHandler(ContextImpl context,
- Http1xPool pool,
- HttpClientImpl client,
- Object endpointMetric,
- HttpClientMetrics metrics) {
+ public Http1xClientHandler(ConnectionListener<HttpClientConnection> listener,
+ ContextImpl context,
+ HttpVersion version,
+ String host,
+ int port,
+ boolean ssl,
+ HttpClientImpl client,
+ Object endpointMetric,
+ HttpClientMetrics metrics) {
this.context = context;
- this.pool = pool;
+ this.version = version;
this.client = client;
+ this.host = host;
+ this.port = port;
+ this.ssl = ssl;
this.endpointMetric = endpointMetric;
this.metrics = metrics;
+ this.listener = listener;
}
@Override
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
chctx = ctx;
- ClientConnection conn = new ClientConnection(pool.version(), client, endpointMetric, ctx,
- pool.ssl(), pool.host(), pool.port(), context, pool, metrics);
- setConnection(conn);
+ Http1xClientConnection conn = new Http1xClientConnection(listener, version, client, endpointMetric, ctx, ssl, host, port, context, metrics);
if (metrics != null) {
context.executeFromIO(() -> {
Object metric = metrics.connected(conn.remoteAddress(), conn.remoteName());
@@ -67,6 +78,7 @@ public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
metrics.endpointConnected(endpointMetric, metric);
});
}
+ setConnection(conn);
}
public ChannelHandlerContext context() {
@@ -74,7 +86,15 @@ public ChannelHandlerContext context() {
}
@Override
- protected void handleMessage(ClientConnection conn, ContextImpl context, ChannelHandlerContext chctx, Object msg) throws Exception {
+ public void channelInactive(ChannelHandlerContext chctx) throws Exception {
+ if (metrics != null) {
+ metrics.endpointDisconnected(endpointMetric, getConnection().metric());
+ }
+ super.channelInactive(chctx);
+ }
+
+ @Override
+ protected void handleMessage(Http1xClientConnection conn, ContextImpl context, ChannelHandlerContext chctx, Object msg) throws Exception {
if (msg instanceof HttpObject) {
HttpObject obj = (HttpObject) msg;
DecoderResult result = obj.decoderResult();
diff --git a/src/main/java/io/vertx/core/http/impl/Http1xPool.java b/src/main/java/io/vertx/core/http/impl/Http1xPool.java
deleted file mode 100644
--- a/src/main/java/io/vertx/core/http/impl/Http1xPool.java
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright (c) 2011-2013 The original author or authors
- * ------------------------------------------------------
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Apache License v2.0 which accompanies this distribution.
- *
- * The Eclipse Public License is available at
- * http://www.eclipse.org/legal/epl-v10.html
- *
- * The Apache License v2.0 is available at
- * http://www.opensource.org/licenses/apache2.0.php
- *
- * You may elect to redistribute this code under either of these licenses.
- */
-
-package io.vertx.core.http.impl;
-
-import io.netty.channel.Channel;
-import io.vertx.core.http.HttpClientOptions;
-import io.vertx.core.http.HttpVersion;
-import io.vertx.core.impl.ContextImpl;
-import io.vertx.core.spi.metrics.HttpClientMetrics;
-
-import java.util.ArrayDeque;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Queue;
-import java.util.Set;
-
-/**
- * @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
- */
-public class Http1xPool implements ConnectionManager.Pool<ClientConnection> {
-
- // Pools must locks on the queue object to keep a single lock
- private final ConnectionManager.ConnQueue queue;
- private final HttpClientImpl client;
- private final HttpClientMetrics metrics;
- private final Map<Channel, HttpClientConnection> connectionMap;
- private final boolean pipelining;
- private final boolean keepAlive;
- private final int pipeliningLimit;
- private final boolean ssl;
- private final String host;
- private final int port;
- private final HttpVersion version;
- private final Set<ClientConnection> allConnections = new HashSet<>();
- private final Queue<ClientConnection> availableConnections = new ArrayDeque<>();
- private final int maxSockets;
-
- public Http1xPool(HttpClientImpl client,
- HttpClientMetrics metrics,
- HttpClientOptions options,
- ConnectionManager.ConnQueue queue,
- Map<Channel, HttpClientConnection> connectionMap,
- HttpVersion version,
- int maxSockets,
- String host,
- int port) {
- this.queue = queue;
- this.version = version;
- this.client = client;
- this.metrics = metrics;
- this.pipelining = options.isPipelining();
- this.keepAlive = options.isKeepAlive();
- this.pipeliningLimit = options.getPipeliningLimit();
- this.ssl = options.isSsl();
- this.connectionMap = connectionMap;
- this.maxSockets = maxSockets;
- this.host = host;
- this.port = port;
- }
-
- boolean ssl() {
- return ssl;
- }
-
- String host() {
- return host;
- }
-
- int port() {
- return port;
- }
-
- @Override
- public HttpVersion version() {
- // Correct this
- return version;
- }
-
- @Override
- public ClientConnection pollConnection() {
- return availableConnections.poll();
- }
-
- @Override
- public boolean canCreateConnection(int connCount) {
- return connCount < maxSockets;
- }
-
- @Override
- public HttpClientStream createStream(ClientConnection conn) {
- return conn;
- }
-
- public void recycle(ClientConnection conn) {
- synchronized (queue) {
- Waiter waiter = queue.getNextWaiter();
- if (waiter != null) {
- queue.deliverStream(conn, waiter);
- } else if (conn.getOutstandingRequestCount() == 0) {
- // Return to set of available from here to not return it several times
- availableConnections.add(conn);
- }
- }
- }
-
- void requestEnded(ClientConnection conn) {
- ContextImpl context = conn.getContext();
- context.runOnContext(v -> {
- if (pipelining && conn.getOutstandingRequestCount() < pipeliningLimit) {
- recycle(conn);
- }
- });
- }
-
- void responseEnded(ClientConnection conn, boolean close) {
- if (!keepAlive || close) {
- conn.close();
- } else {
- ContextImpl ctx = conn.getContext();
- ctx.runOnContext(v -> {
- if (conn.getCurrentRequest() == null) {
- recycle(conn);
- }
- });
- }
- }
-
- void createConn(ContextImpl context, Channel ch, Waiter waiter) {
- ClientHandler handler = new ClientHandler(
- context,
- this,
- client,
- queue.metric,
- metrics);
- handler.addHandler(conn -> {
- synchronized (queue) {
- allConnections.add(conn);
- }
- connectionMap.put(ch, conn);
- });
- handler.removeHandler(this::connectionClosed);
- ch.pipeline().addLast("handler", handler);
- ClientConnection conn = handler.getConnection();
- context.executeFromIO(() -> {
- waiter.handleConnection(conn);
- queue.deliverStream(conn, waiter);
- });
- }
-
- // Called if the connection is actually closed, OR the connection attempt failed - in the latter case
- // conn will be null
- // The connection has been closed - tell the pool about it, this allows the pool to create more
- // connections. Note the pool doesn't actually remove the connection, when the next person to get a connection
- // gets the closed on, they will check if it's closed and if so get another one.
- private synchronized void connectionClosed(ClientConnection conn) {
- synchronized (queue) {
- connectionMap.remove(conn.channel());
- allConnections.remove(conn);
- availableConnections.remove(conn);
- queue.connectionClosed();
- }
- if (metrics != null) {
- metrics.endpointDisconnected(queue.metric, conn.metric());
- }
- }
-
- public void closeAllConnections() {
- Set<ClientConnection> copy;
- synchronized (this) {
- copy = new HashSet<>(allConnections);
- allConnections.clear();
- }
- // Close outside sync block to avoid deadlock
- for (ClientConnection conn : copy) {
- try {
- conn.close();
- } catch (Throwable t) {
- ConnectionManager.log.error("Failed to close connection", t);
- }
- }
- }
-
- void removeChannel(Channel channel) {
- connectionMap.remove(channel);
- }
-}
diff --git a/src/main/java/io/vertx/core/http/impl/ServerConnection.java b/src/main/java/io/vertx/core/http/impl/Http1xServerConnection.java
similarity index 95%
rename from src/main/java/io/vertx/core/http/impl/ServerConnection.java
rename to src/main/java/io/vertx/core/http/impl/Http1xServerConnection.java
--- a/src/main/java/io/vertx/core/http/impl/ServerConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http1xServerConnection.java
@@ -81,9 +81,9 @@
*
* @author <a href="http://tfox.org">Tim Fox</a>
*/
-public class ServerConnection extends Http1xConnectionBase implements HttpConnection {
+public class Http1xServerConnection extends Http1xConnectionBase implements HttpConnection {
- private static final Logger log = LoggerFactory.getLogger(ServerConnection.class);
+ private static final Logger log = LoggerFactory.getLogger(Http1xServerConnection.class);
private static final Handler<HttpServerRequest> NULL_REQUEST_HANDLER = req -> {};
@@ -111,13 +111,13 @@ public class ServerConnection extends Http1xConnectionBase implements HttpConnec
// queuing == true <=> (paused || (pendingResponse != null && msg instanceof HttpRequest) || !pending.isEmpty())
private boolean queueing;
- public ServerConnection(VertxInternal vertx,
- SSLHelper sslHelper,
- HttpServerOptions options,
- ChannelHandlerContext channel,
- ContextImpl context,
- String serverOrigin,
- HttpServerMetrics metrics) {
+ public Http1xServerConnection(VertxInternal vertx,
+ SSLHelper sslHelper,
+ HttpServerOptions options,
+ ChannelHandlerContext channel,
+ ContextImpl context,
+ String serverOrigin,
+ HttpServerMetrics metrics) {
super(vertx, channel, context);
this.serverOrigin = serverOrigin;
this.options = options;
@@ -221,7 +221,7 @@ ServerWebSocket upgrade(HttpServerRequest request, HttpRequest nettyReq) {
if (ws != null) {
return ws;
}
- ServerHandler serverHandler = (ServerHandler) chctx.pipeline().get("handler");
+ Http1xServerHandler serverHandler = (Http1xServerHandler) chctx.pipeline().get("handler");
handshaker = serverHandler.createHandshaker(this, chctx.channel(), nettyReq);
if (handshaker == null) {
throw new IllegalStateException("Can't upgrade this request");
@@ -507,7 +507,7 @@ private void checkNextTick() {
}
if (channelPaused && pending.isEmpty()) {
//Resume the actual channel
- ServerConnection.super.doResume();
+ Http1xServerConnection.super.doResume();
channelPaused = false;
}
}
diff --git a/src/main/java/io/vertx/core/http/impl/ServerHandler.java b/src/main/java/io/vertx/core/http/impl/Http1xServerHandler.java
similarity index 83%
rename from src/main/java/io/vertx/core/http/impl/ServerHandler.java
rename to src/main/java/io/vertx/core/http/impl/Http1xServerHandler.java
--- a/src/main/java/io/vertx/core/http/impl/ServerHandler.java
+++ b/src/main/java/io/vertx/core/http/impl/Http1xServerHandler.java
@@ -23,9 +23,9 @@
/**
* @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
*/
-public class ServerHandler extends VertxHttpHandler<ServerConnection> {
+public class Http1xServerHandler extends VertxHttpHandler<Http1xServerConnection> {
- private static final Logger log = LoggerFactory.getLogger(ServerHandler.class);
+ private static final Logger log = LoggerFactory.getLogger(Http1xServerHandler.class);
private final SSLHelper sslHelper;
private final HttpServerOptions options;
@@ -33,7 +33,7 @@ public class ServerHandler extends VertxHttpHandler<ServerConnection> {
private final HttpServerMetrics metrics;
private final HandlerHolder<HttpHandlers> holder;
- public ServerHandler(SSLHelper sslHelper, HttpServerOptions options, String serverOrigin, HandlerHolder<HttpHandlers> holder, HttpServerMetrics metrics) {
+ public Http1xServerHandler(SSLHelper sslHelper, HttpServerOptions options, String serverOrigin, HandlerHolder<HttpHandlers> holder, HttpServerMetrics metrics) {
this.holder = holder;
this.metrics = metrics;
this.sslHelper = sslHelper;
@@ -44,7 +44,7 @@ public ServerHandler(SSLHelper sslHelper, HttpServerOptions options, String serv
@Override
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
super.handlerAdded(ctx);
- ServerConnection conn = new ServerConnection(holder.context.owner(),
+ Http1xServerConnection conn = new Http1xServerConnection(holder.context.owner(),
sslHelper,
options,
ctx,
@@ -65,11 +65,11 @@ public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
}
@Override
- protected void handleMessage(ServerConnection conn, ContextImpl context, ChannelHandlerContext chctx, Object msg) throws Exception {
+ protected void handleMessage(Http1xServerConnection conn, ContextImpl context, ChannelHandlerContext chctx, Object msg) throws Exception {
conn.handleMessage(msg);
}
- WebSocketServerHandshaker createHandshaker(ServerConnection conn, Channel ch, HttpRequest request) {
+ WebSocketServerHandshaker createHandshaker(Http1xServerConnection conn, Channel ch, HttpRequest request) {
// As a fun part, Firefox 6.0.2 supports Websockets protocol '7'. But,
// it doesn't send a normal 'Connection: Upgrade' header. Instead it
// sends: 'Connection: keep-alive, Upgrade'. Brilliant.
diff --git a/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java b/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2ClientConnection.java
@@ -18,6 +18,7 @@
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
+import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.handler.codec.http.HttpResponseStatus;
@@ -27,16 +28,10 @@
import io.netty.handler.codec.http2.Http2Exception;
import io.netty.handler.codec.http2.Http2Headers;
import io.netty.handler.codec.http2.Http2Stream;
-import io.vertx.core.Context;
-import io.vertx.core.Handler;
-import io.vertx.core.MultiMap;
-import io.vertx.core.VertxException;
+import io.vertx.core.*;
import io.vertx.core.buffer.Buffer;
-import io.vertx.core.http.CaseInsensitiveHeaders;
-import io.vertx.core.http.HttpClientRequest;
-import io.vertx.core.http.HttpMethod;
-import io.vertx.core.http.HttpVersion;
-import io.vertx.core.http.StreamResetException;
+import io.vertx.core.http.*;
+import io.vertx.core.http.impl.pool.ConnectionListener;
import io.vertx.core.impl.ContextImpl;
import io.vertx.core.net.NetSocket;
import io.vertx.core.spi.metrics.HttpClientMetrics;
@@ -50,63 +45,76 @@
*/
class Http2ClientConnection extends Http2ConnectionBase implements HttpClientConnection {
- final Http2Pool http2Pool;
+ private final ConnectionListener<HttpClientConnection> listener;
+ private final HttpClientImpl client;
final HttpClientMetrics metrics;
final Object queueMetric;
- int streamCount;
- public Http2ClientConnection(Http2Pool http2Pool,
+ public Http2ClientConnection(ConnectionListener<HttpClientConnection> listener,
Object queueMetric,
+ HttpClientImpl client,
ContextImpl context,
VertxHttp2ConnectionHandler connHandler,
HttpClientMetrics metrics) {
super(context, connHandler);
- this.http2Pool = http2Pool;
this.metrics = metrics;
this.queueMetric = queueMetric;
+ this.client = client;
+ this.listener = listener;
}
@Override
- public HttpClientMetrics metrics() {
- return metrics;
+ synchronized void onGoAwaySent(int lastStreamId, long errorCode, ByteBuf debugData) {
+ listener.onDiscard();
+ super.onGoAwaySent(lastStreamId, errorCode, debugData);
}
@Override
- void onGoAwaySent(int lastStreamId, long errorCode, ByteBuf debugData) {
- http2Pool.discard(Http2ClientConnection.this);
+ synchronized void onGoAwayReceived(int lastStreamId, long errorCode, ByteBuf debugData) {
+ listener.onDiscard();
+ super.onGoAwayReceived(lastStreamId, errorCode, debugData);
}
@Override
- void onGoAwayReceived(int lastStreamId, long errorCode, ByteBuf debugData) {
- super.onGoAwayReceived(lastStreamId, errorCode, debugData);
- http2Pool.discard(Http2ClientConnection.this);
+ public Channel channel() {
+ return chctx.channel();
}
@Override
- void onStreamClosed(Http2Stream nettyStream) {
- super.onStreamClosed(nettyStream);
- http2Pool.recycle(Http2ClientConnection.this);
+ protected void concurrencyChanged(long concurrency) {
+ int limit = client.getOptions().getHttp2MultiplexingLimit();
+ if (limit > 0) {
+ concurrency = Math.min(concurrency, limit);
+ }
+ listener.onConcurrencyChange(concurrency);
}
- synchronized HttpClientStream createStream() throws Http2Exception {
- Http2Connection conn = handler.connection();
- Http2Stream stream = conn.local().createStream(conn.local().incrementAndGetNextStreamId(), false);
- boolean writable = handler.encoder().flowController().isWritable(stream);
- Http2ClientStream clientStream = new Http2ClientStream(this, stream, writable);
- streams.put(clientStream.stream.id(), clientStream);
- return clientStream;
+ @Override
+ public HttpClientMetrics metrics() {
+ return metrics;
}
@Override
- public synchronized void handleClosed() {
- http2Pool.discard(this);
- super.handleClosed();
+ void onStreamClosed(Http2Stream nettyStream) {
+ super.onStreamClosed(nettyStream);
}
@Override
- public boolean isValid() {
- Http2Connection conn = handler.connection();
- return !isClosed() && !conn.goAwaySent() && !conn.goAwayReceived();
+ public void createStream(HttpClientRequestImpl req, Handler<AsyncResult<HttpClientStream>> completionHandler) {
+ Future<HttpClientStream> fut;
+ synchronized (this) {
+ try {
+ Http2Connection conn = handler.connection();
+ Http2Stream stream = conn.local().createStream(conn.local().incrementAndGetNextStreamId(), false);
+ boolean writable = handler.encoder().flowController().isWritable(stream);
+ Http2ClientStream clientStream = new Http2ClientStream(this, req, stream, writable);
+ streams.put(clientStream.stream.id(), clientStream);
+ fut = Future.succeededFuture(clientStream);
+ } catch (Http2Exception e) {
+ fut = Future.failedFuture(e);
+ }
+ }
+ completionHandler.handle(fut);
}
@Override
@@ -133,7 +141,7 @@ public synchronized void onPushPromiseRead(ChannelHandlerContext ctx, int stream
MultiMap headersMap = new Http2HeadersAdaptor(headers);
Http2Stream promisedStream = handler.connection().stream(promisedStreamId);
int port = remoteAddress().port();
- HttpClientRequestPushPromise pushReq = new HttpClientRequestPushPromise(this, promisedStream, http2Pool.client, isSsl(), method, rawMethod, uri, host, port, headersMap);
+ HttpClientRequestPushPromise pushReq = new HttpClientRequestPushPromise(this, promisedStream, client, isSsl(), method, rawMethod, uri, host, port, headersMap);
if (metrics != null) {
pushReq.metric(metrics.responsePushed(queueMetric, metric(), localAddress(), remoteAddress(), pushReq));
}
@@ -148,15 +156,11 @@ public synchronized void onPushPromiseRead(ChannelHandlerContext ctx, int stream
static class Http2ClientStream extends VertxHttp2Stream<Http2ClientConnection> implements HttpClientStream {
- private HttpClientRequestBase request;
+ private final HttpClientRequestBase request;
private HttpClientResponseImpl response;
private boolean requestEnded;
private boolean responseEnded;
- public Http2ClientStream(Http2ClientConnection conn, Http2Stream stream, boolean writable) throws Http2Exception {
- this(conn, null, stream, writable);
- }
-
public Http2ClientStream(Http2ClientConnection conn, HttpClientRequestBase request, Http2Stream stream, boolean writable) throws Http2Exception {
super(conn, stream, writable);
this.request = request;
@@ -208,6 +212,13 @@ void handleReset(long errorCode) {
@Override
void handleClose() {
+ // commented to be used later when we properly define the HTTP/2 connection expiration from the pool
+ // boolean disposable = conn.streams.isEmpty();
+ if (request instanceof HttpClientRequestImpl) {
+ conn.listener.onRecycle(1, false);
+ } /* else {
+ conn.listener.onRecycle(0, dispable);
+ } */
if (!responseEnded) {
responseEnded = true;
if (conn.metrics != null) {
@@ -313,7 +324,7 @@ public void writeHeadWithContent(HttpMethod method, String rawMethod, String uri
h.add(Http2HeadersAdaptor.toLowerCase(header.getKey()), header.getValue());
}
}
- if (conn.http2Pool.client.getOptions().isTryUseCompression() && h.get(HttpHeaderNames.ACCEPT_ENCODING) == null) {
+ if (conn.client.getOptions().isTryUseCompression() && h.get(HttpHeaderNames.ACCEPT_ENCODING) == null) {
h.set(HttpHeaderNames.ACCEPT_ENCODING, DEFLATE_GZIP);
}
if (conn.metrics != null) {
@@ -360,8 +371,7 @@ public boolean isNotWritable() {
}
@Override
- public void beginRequest(HttpClientRequestImpl request) {
- this.request = request;
+ public void beginRequest() {
}
@Override
diff --git a/src/main/java/io/vertx/core/http/impl/Http2ConnectionBase.java b/src/main/java/io/vertx/core/http/impl/Http2ConnectionBase.java
--- a/src/main/java/io/vertx/core/http/impl/Http2ConnectionBase.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2ConnectionBase.java
@@ -20,7 +20,6 @@
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.CompositeByteBuf;
import io.netty.buffer.Unpooled;
-import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.http2.Http2Connection;
import io.netty.handler.codec.http2.Http2Exception;
@@ -81,21 +80,25 @@ static ByteBuf safeBuffer(ByteBuf buf, ByteBufAllocator allocator) {
protected final ChannelHandlerContext handlerContext;
protected final VertxHttp2ConnectionHandler handler;
private boolean shutdown;
- private Handler<io.vertx.core.http.Http2Settings> clientSettingsHandler;
+ private Handler<io.vertx.core.http.Http2Settings> remoteSettingsHandler;
private final ArrayDeque<Runnable> updateSettingsHandlers = new ArrayDeque<>(4);
private final ArrayDeque<Handler<AsyncResult<Buffer>>> pongHandlers = new ArrayDeque<>();
- private Http2Settings serverSettings = new Http2Settings();
+ private Http2Settings localSettings = new Http2Settings();
+ private Http2Settings remoteSettings;
private Handler<GoAway> goAwayHandler;
private Handler<Void> shutdownHandler;
private Handler<Buffer> pingHandler;
private boolean closed;
+ private boolean goneAway;
private int windowSize;
+ private long maxConcurrentStreams;
public Http2ConnectionBase(ContextImpl context, VertxHttp2ConnectionHandler handler) {
super(context.owner(), handler.context(), context);
this.handler = handler;
this.handlerContext = chctx;
this.windowSize = handler.connection().local().flowController().windowSize(handler.connection().connectionStream());
+ this.maxConcurrentStreams = io.vertx.core.http.Http2Settings.DEFAULT_MAX_CONCURRENT_STREAMS;
}
VertxInternal vertx() {
@@ -165,18 +168,25 @@ synchronized void onStreamClosed(Http2Stream stream) {
}
}
- void onGoAwaySent(int lastStreamId, long errorCode, ByteBuf debugData) {
+ synchronized void onGoAwaySent(int lastStreamId, long errorCode, ByteBuf debugData) {
+ if (!goneAway) {
+ goneAway = true;
+ checkShutdownHandler();
+ }
}
synchronized void onGoAwayReceived(int lastStreamId, long errorCode, ByteBuf debugData) {
- Handler<GoAway> handler = goAwayHandler;
- if (handler != null) {
- Buffer buffer = Buffer.buffer(debugData);
- context.executeFromIO(() -> {
- handler.handle(new GoAway().setErrorCode(errorCode).setLastStreamId(lastStreamId).setDebugData(buffer));
- });
+ if (!goneAway) {
+ goneAway = true;
+ Handler<GoAway> handler = goAwayHandler;
+ if (handler != null) {
+ Buffer buffer = Buffer.buffer(debugData);
+ context.executeFromIO(() -> {
+ handler.handle(new GoAway().setErrorCode(errorCode).setLastStreamId(lastStreamId).setDebugData(buffer));
+ });
+ }
+ checkShutdownHandler();
}
- checkShutdownHandler();
}
// Http2FrameListener
@@ -200,13 +210,37 @@ public synchronized void onSettingsAckRead(ChannelHandlerContext ctx) {
}
}
+ protected void onConnect() {
+ }
+
+ protected void concurrencyChanged(long concurrency) {
+ }
+
@Override
- public synchronized void onSettingsRead(ChannelHandlerContext ctx, Http2Settings settings) {
- Handler<io.vertx.core.http.Http2Settings> handler = clientSettingsHandler;
- if (handler != null) {
- context.executeFromIO(() -> {
- handler.handle(HttpUtils.toVertxSettings(settings));
- });
+ public void onSettingsRead(ChannelHandlerContext ctx, Http2Settings settings) {
+ boolean changed;
+ Long val = settings.maxConcurrentStreams();
+ if (val != null) {
+ if (remoteSettings != null) {
+ changed = val != maxConcurrentStreams;
+ } else {
+ changed = false;
+ }
+ maxConcurrentStreams = val;
+ } else {
+ changed = false;
+ }
+ remoteSettings = settings;
+ synchronized (this) {
+ Handler<io.vertx.core.http.Http2Settings> handler = remoteSettingsHandler;
+ if (handler != null) {
+ context.executeFromIO(() -> {
+ handler.handle(HttpUtils.toVertxSettings(settings));
+ });
+ }
+ }
+ if (changed) {
+ concurrencyChanged(maxConcurrentStreams);
}
}
@@ -311,7 +345,7 @@ public synchronized HttpConnection goAway(long errorCode, int lastStreamId, Buff
throw new IllegalArgumentException();
}
if (lastStreamId < 0) {
- throw new IllegalArgumentException();
+ lastStreamId = handler.connection().remote().lastStreamCreated();
}
handler.writeGoAway(errorCode, lastStreamId, debugData != null ? debugData.getByteBuf() : Unpooled.EMPTY_BUFFER);
return this;
@@ -357,12 +391,13 @@ public void close() {
@Override
public synchronized HttpConnection remoteSettingsHandler(Handler<io.vertx.core.http.Http2Settings> handler) {
- clientSettingsHandler = handler;
+ remoteSettingsHandler = handler;
return this;
}
@Override
public synchronized io.vertx.core.http.Http2Settings remoteSettings() {
+ /*
io.vertx.core.http.Http2Settings a = new io.vertx.core.http.Http2Settings();
a.setPushEnabled(handler.connection().remote().allowPushTo());
a.setMaxConcurrentStreams((long) handler.connection().local().maxActiveStreams());
@@ -371,11 +406,13 @@ public synchronized io.vertx.core.http.Http2Settings remoteSettings() {
a.setMaxFrameSize(handler.encoder().configuration().frameSizePolicy().maxFrameSize());
a.setInitialWindowSize(handler.encoder().flowController().initialWindowSize());
return a;
+ */
+ return HttpUtils.toVertxSettings(remoteSettings);
}
@Override
public synchronized io.vertx.core.http.Http2Settings settings() {
- return HttpUtils.toVertxSettings(serverSettings);
+ return HttpUtils.toVertxSettings(localSettings);
}
@Override
@@ -403,7 +440,7 @@ protected synchronized void updateSettings(Http2Settings settingsUpdate, Handler
if (fut.isSuccess()) {
synchronized (Http2ConnectionBase.this) {
updateSettingsHandlers.add(() -> {
- serverSettings.putAll(settingsUpdate);
+ localSettings.putAll(settingsUpdate);
if (completionHandler != null) {
completionContext.runOnContext(v -> {
completionHandler.handle(Future.succeededFuture());
diff --git a/src/main/java/io/vertx/core/http/impl/Http2Pool.java b/src/main/java/io/vertx/core/http/impl/Http2Pool.java
deleted file mode 100644
--- a/src/main/java/io/vertx/core/http/impl/Http2Pool.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright (c) 2011-2013 The original author or authors
- * ------------------------------------------------------
- * All rights reserved. This program and the accompanying materials
- * are made available under the terms of the Eclipse Public License v1.0
- * and Apache License v2.0 which accompanies this distribution.
- *
- * The Eclipse Public License is available at
- * http://www.eclipse.org/legal/epl-v10.html
- *
- * The Apache License v2.0 is available at
- * http://www.opensource.org/licenses/apache2.0.php
- *
- * You may elect to redistribute this code under either of these licenses.
- */
-
-package io.vertx.core.http.impl;
-
-import io.netty.channel.Channel;
-import io.netty.handler.codec.http2.Http2Exception;
-import io.vertx.core.http.HttpVersion;
-import io.vertx.core.impl.ContextImpl;
-import io.vertx.core.spi.metrics.HttpClientMetrics;
-
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
- */
-class Http2Pool implements ConnectionManager.Pool<Http2ClientConnection> {
-
- // Pools must locks on the queue object to keep a single lock
- private final ConnectionManager.ConnQueue queue;
- private final Set<Http2ClientConnection> allConnections = new HashSet<>();
- private final Map<Channel, ? super Http2ClientConnection> connectionMap;
- final HttpClientImpl client;
- final HttpClientMetrics metrics;
- final int maxConcurrency;
- final boolean logEnabled;
- final int maxSockets;
- final int windowSize;
-
- public Http2Pool(ConnectionManager.ConnQueue queue, HttpClientImpl client, HttpClientMetrics metrics,
- Map<Channel, ? super Http2ClientConnection> connectionMap,
- int maxConcurrency, boolean logEnabled, int maxSize, int windowSize) {
- this.queue = queue;
- this.client = client;
- this.metrics = metrics;
- this.connectionMap = connectionMap;
- this.maxConcurrency = maxConcurrency;
- this.logEnabled = logEnabled;
- this.maxSockets = maxSize;
- this.windowSize = windowSize;
- }
-
- @Override
- public HttpVersion version() {
- return HttpVersion.HTTP_2;
- }
-
- @Override
- public boolean canCreateConnection(int connCount) {
- // We create at most one connection concurrently when all others when
- // all others are busy
- return connCount == allConnections.size() && connCount < maxSockets;
- }
-
- @Override
- public Http2ClientConnection pollConnection() {
- for (Http2ClientConnection conn : allConnections) {
- if (canReserveStream(conn)) {
- conn.streamCount++;
- return conn;
- }
- }
- return null;
- }
-
- void createConn(ContextImpl context, Channel ch, Waiter waiter, boolean upgrade) throws Http2Exception {
- synchronized (queue) {
- VertxHttp2ConnectionHandler<Http2ClientConnection> handler = new VertxHttp2ConnectionHandlerBuilder<Http2ClientConnection>(ch)
- .connectionMap(connectionMap)
- .server(false)
- .clientUpgrade(upgrade)
- .useCompression(client.getOptions().isTryUseCompression())
- .initialSettings(client.getOptions().getInitialSettings())
- .connectionFactory(connHandler -> {
- Http2ClientConnection conn = new Http2ClientConnection(Http2Pool.this, queue.metric, context, connHandler, metrics);
- if (metrics != null) {
- Object metric = metrics.connected(conn.remoteAddress(), conn.remoteName());
- conn.metric(metric);
- }
- return conn;
- })
- .logEnabled(logEnabled)
- .build();
- Http2ClientConnection conn = handler.connection;
- if (metrics != null) {
- metrics.endpointConnected(queue.metric, conn.metric());
- }
- allConnections.add(conn);
- if (windowSize > 0) {
- conn.setWindowSize(windowSize);
- }
- conn.streamCount++;
- waiter.handleConnection(conn); // Should make same tests than in deliverRequest
- queue.deliverStream(conn, waiter);
- checkPending(conn);
- }
- }
-
- private boolean canReserveStream(Http2ClientConnection handler) {
- int maxConcurrentStreams = Math.min(handler.handler.connection().local().maxActiveStreams(), maxConcurrency);
- return handler.streamCount < maxConcurrentStreams;
- }
-
- void checkPending(Http2ClientConnection conn) {
- synchronized (queue) {
- Waiter waiter;
- while (canReserveStream(conn) && (waiter = queue.getNextWaiter()) != null) {
- conn.streamCount++;
- queue.deliverStream(conn, waiter);
- }
- }
- }
-
- void discard(Http2ClientConnection conn) {
- synchronized (queue) {
- if (allConnections.remove(conn)) {
- queue.connectionClosed();
- }
- }
- if (metrics != null) {
- metrics.endpointDisconnected(queue.metric, conn.metric());
- }
- }
-
- @Override
- public void recycle(Http2ClientConnection conn) {
- synchronized (queue) {
- conn.streamCount--;
- checkPending(conn);
- }
- }
-
- @Override
- public HttpClientStream createStream(Http2ClientConnection conn) throws Exception {
- return conn.createStream();
- }
-
- @Override
- public void closeAllConnections() {
- List<Http2ClientConnection> toClose;
- synchronized (queue) {
- toClose = new ArrayList<>(allConnections);
- }
- // Close outside sync block to avoid deadlock
- toClose.forEach(Http2ConnectionBase::close);
- }
-}
diff --git a/src/main/java/io/vertx/core/http/impl/Http2ServerConnection.java b/src/main/java/io/vertx/core/http/impl/Http2ServerConnection.java
--- a/src/main/java/io/vertx/core/http/impl/Http2ServerConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/Http2ServerConnection.java
@@ -30,11 +30,7 @@
import io.vertx.core.Handler;
import io.vertx.core.MultiMap;
import io.vertx.core.buffer.Buffer;
-import io.vertx.core.http.HttpMethod;
-import io.vertx.core.http.HttpServerOptions;
-import io.vertx.core.http.HttpServerRequest;
-import io.vertx.core.http.HttpServerResponse;
-import io.vertx.core.http.StreamResetException;
+import io.vertx.core.http.*;
import io.vertx.core.impl.ContextImpl;
import io.vertx.core.spi.metrics.HttpServerMetrics;
diff --git a/src/main/java/io/vertx/core/http/impl/HttpChannelConnector.java b/src/main/java/io/vertx/core/http/impl/HttpChannelConnector.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/http/impl/HttpChannelConnector.java
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2011-2013 The original author or authors
+ * ------------------------------------------------------
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * and Apache License v2.0 which accompanies this distribution.
+ *
+ * The Eclipse Public License is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * The Apache License v2.0 is available at
+ * http://www.opensource.org/licenses/apache2.0.php
+ *
+ * You may elect to redistribute this code under either of these licenses.
+ */
+package io.vertx.core.http.impl;
+
+import io.netty.bootstrap.Bootstrap;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelInboundHandlerAdapter;
+import io.netty.channel.ChannelPipeline;
+import io.netty.handler.codec.http.*;
+import io.netty.handler.logging.LoggingHandler;
+import io.netty.handler.ssl.SslHandler;
+import io.netty.handler.timeout.IdleStateHandler;
+import io.vertx.core.AsyncResult;
+import io.vertx.core.Handler;
+import io.vertx.core.http.HttpClientOptions;
+import io.vertx.core.http.HttpVersion;
+import io.vertx.core.http.impl.pool.ConnectionListener;
+import io.vertx.core.http.impl.pool.ConnectionProvider;
+import io.vertx.core.impl.ContextImpl;
+import io.vertx.core.net.ProxyType;
+import io.vertx.core.net.SocketAddress;
+import io.vertx.core.net.impl.ChannelProvider;
+import io.vertx.core.net.impl.ProxyChannelProvider;
+import io.vertx.core.net.impl.SSLHelper;
+import io.vertx.core.spi.metrics.HttpClientMetrics;
+
+import javax.net.ssl.SSLHandshakeException;
+
+/**
+ * Performs the channel configuration and connection according to the client options and the protocol version.
+ *
+ * @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
+ */
+class HttpChannelConnector implements ConnectionProvider<HttpClientConnection> {
+
+ private final HttpClientImpl client;
+ private final HttpClientOptions options;
+ private final HttpClientMetrics metrics;
+ private final SSLHelper sslHelper;
+ private final HttpVersion version;
+ private final long weight;
+ private final long http1Weight;
+ private final long http2Weight;
+ private final long http2MaxConcurrency;
+ private final long http1MaxConcurrency;
+ private final boolean ssl;
+ private final String peerHost;
+ private final String host;
+ private final int port;
+ private final Object metric;
+
+ HttpChannelConnector(HttpClientImpl client,
+ Object metric,
+ HttpVersion version,
+ boolean ssl,
+ String peerHost,
+ String host,
+ int port) {
+ this.client = client;
+ this.metric = metric;
+ this.options = client.getOptions();
+ this.metrics = client.metrics();
+ this.sslHelper = client.getSslHelper();
+ this.version = version;
+ this.http1Weight = client.getOptions().getHttp2MaxPoolSize();
+ this.http2Weight = client.getOptions().getMaxPoolSize();
+ this.weight = version == HttpVersion.HTTP_2 ? http2Weight : http1Weight;
+ this.http2MaxConcurrency = client.getOptions().getHttp2MultiplexingLimit() <= 0 ? Long.MAX_VALUE : client.getOptions().getHttp2MultiplexingLimit();
+ this.http1MaxConcurrency = client.getOptions().isPipelining() ? client.getOptions().getPipeliningLimit() : 1;
+ this.ssl = ssl;
+ this.peerHost = peerHost;
+ this.host = host;
+ this.port = port;
+ }
+
+ @Override
+ public void close(HttpClientConnection conn) {
+ conn.close();
+ }
+
+ public long connect(
+ ConnectionListener<HttpClientConnection> listener,
+ ContextImpl context) {
+
+ Bootstrap bootstrap = new Bootstrap();
+ bootstrap.group(context.nettyEventLoop());
+ bootstrap.channel(client.getVertx().transport().channelType(false));
+
+ applyConnectionOptions(bootstrap);
+
+ ChannelProvider channelProvider;
+ // http proxy requests are handled in HttpClientImpl, everything else can use netty proxy handler
+ if (options.getProxyOptions() == null || !ssl && options.getProxyOptions().getType()== ProxyType.HTTP ) {
+ channelProvider = ChannelProvider.INSTANCE;
+ } else {
+ channelProvider = ProxyChannelProvider.INSTANCE;
+ }
+
+ boolean useAlpn = options.isUseAlpn();
+ Handler<Channel> channelInitializer = ch -> {
+
+ // Configure pipeline
+ ChannelPipeline pipeline = ch.pipeline();
+ if (ssl) {
+ SslHandler sslHandler = new SslHandler(sslHelper.createEngine(client.getVertx(), peerHost, port, options.isForceSni() ? peerHost : null));
+ ch.pipeline().addLast("ssl", sslHandler);
+ // TCP connected, so now we must do the SSL handshake
+ sslHandler.handshakeFuture().addListener(fut -> {
+ if (fut.isSuccess()) {
+ String protocol = sslHandler.applicationProtocol();
+ if (useAlpn) {
+ if ("h2".equals(protocol)) {
+ applyHttp2ConnectionOptions(ch.pipeline());
+ http2Connected(listener, context, ch);
+ } else {
+ applyHttp1xConnectionOptions(ch.pipeline());
+ HttpVersion fallbackProtocol = "http/1.0".equals(protocol) ?
+ HttpVersion.HTTP_1_0 : HttpVersion.HTTP_1_1;
+ http1xConnected(listener, fallbackProtocol, host, port, true, context, ch);
+ }
+ } else {
+ applyHttp1xConnectionOptions(ch.pipeline());
+ http1xConnected(listener, version, host, port, true, context, ch);
+ }
+ } else {
+ handshakeFailure(context, ch, fut.cause(), listener);
+ }
+ });
+ } else {
+ if (version == HttpVersion.HTTP_2) {
+ if (client.getOptions().isHttp2ClearTextUpgrade()) {
+ HttpClientCodec httpCodec = new HttpClientCodec();
+ class UpgradeRequestHandler extends ChannelInboundHandlerAdapter {
+ @Override
+ public void channelActive(ChannelHandlerContext ctx) throws Exception {
+ DefaultFullHttpRequest upgradeRequest =
+ new DefaultFullHttpRequest(io.netty.handler.codec.http.HttpVersion.HTTP_1_1, HttpMethod.GET, "/");
+ String hostHeader = peerHost;
+ if (port != 80) {
+ hostHeader += ":" + port;
+ }
+ upgradeRequest.headers().set(HttpHeaderNames.HOST, hostHeader);
+ ctx.writeAndFlush(upgradeRequest);
+ ctx.fireChannelActive();
+ }
+ @Override
+ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
+ if (msg instanceof LastHttpContent) {
+ ChannelPipeline p = ctx.pipeline();
+ p.remove(httpCodec);
+ p.remove(this);
+ // Upgrade handler will remove itself
+ applyHttp1xConnectionOptions(ch.pipeline());
+ http1xConnected(listener, HttpVersion.HTTP_1_1, host, port, false, context, ch);
+ }
+ }
+ @Override
+ public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
+ super.userEventTriggered(ctx, evt);
+ if (evt == HttpClientUpgradeHandler.UpgradeEvent.UPGRADE_SUCCESSFUL) {
+ ctx.pipeline().remove(this);
+ // Upgrade handler will remove itself
+ }
+ }
+ }
+ VertxHttp2ClientUpgradeCodec upgradeCodec = new VertxHttp2ClientUpgradeCodec(client.getOptions().getInitialSettings()) {
+ @Override
+ public void upgradeTo(ChannelHandlerContext ctx, FullHttpResponse upgradeResponse) throws Exception {
+ applyHttp2ConnectionOptions(pipeline);
+ http2Connected(listener, context, ch);
+ }
+ };
+ HttpClientUpgradeHandler upgradeHandler = new HttpClientUpgradeHandler(httpCodec, upgradeCodec, 65536);
+ ch.pipeline().addLast(httpCodec, upgradeHandler, new UpgradeRequestHandler());
+ } else {
+ applyHttp2ConnectionOptions(pipeline);
+ }
+ } else {
+ applyHttp1xConnectionOptions(pipeline);
+ }
+ }
+ };
+
+ Handler<AsyncResult<Channel>> channelHandler = res -> {
+
+ if (res.succeeded()) {
+ Channel ch = res.result();
+ if (!ssl) {
+ if (ch.pipeline().get(HttpClientUpgradeHandler.class) != null) {
+ // Upgrade handler do nothing
+ } else {
+ if (version == HttpVersion.HTTP_2 && !client.getOptions().isHttp2ClearTextUpgrade()) {
+ http2Connected(listener, context, ch);
+ } else {
+ http1xConnected(listener, version, host, port, false, context, ch);
+ }
+ }
+ }
+ } else {
+ connectFailed(context, null, listener, res.cause());
+ }
+ };
+
+ channelProvider.connect(client.getVertx(), bootstrap, client.getOptions().getProxyOptions(), SocketAddress.inetSocketAddress(port, host), channelInitializer, channelHandler);
+
+ return weight;
+ }
+
+ private void applyConnectionOptions(Bootstrap bootstrap) {
+ client.getVertx().transport().configure(options, bootstrap);
+ }
+
+ private void applyHttp2ConnectionOptions(ChannelPipeline pipeline) {
+ if (client.getOptions().getIdleTimeout() > 0) {
+ pipeline.addLast("idle", new IdleStateHandler(0, 0, options.getIdleTimeout()));
+ }
+ }
+
+ private void applyHttp1xConnectionOptions(ChannelPipeline pipeline) {
+ if (client.getOptions().getLogActivity()) {
+ pipeline.addLast("logging", new LoggingHandler());
+ }
+ pipeline.addLast("codec", new HttpClientCodec(
+ client.getOptions().getMaxInitialLineLength(),
+ client.getOptions().getMaxHeaderSize(),
+ client.getOptions().getMaxChunkSize(),
+ false,
+ false,
+ client.getOptions().getDecoderInitialBufferSize()));
+ if (client.getOptions().isTryUseCompression()) {
+ pipeline.addLast("inflater", new HttpContentDecompressor(true) {
+ @Override
+ public void channelInactive(ChannelHandlerContext ctx) throws Exception {
+ try {
+ super.channelInactive(ctx);
+ } catch (Exception e) {
+ // Workaround for https://github.com/eclipse/vert.x/issues/2212 until we can get
+ // the proper Netty fix https://github.com/netty/netty/pull/7415
+ // ctx.fireExceptionCaught(e);
+ ctx.fireChannelInactive();
+ }
+ }
+ });
+ }
+ if (client.getOptions().getIdleTimeout() > 0) {
+ pipeline.addLast("idle", new IdleStateHandler(0, 0, client.getOptions().getIdleTimeout()));
+ }
+ }
+
+ private void handshakeFailure(ContextImpl context, Channel ch, Throwable cause, ConnectionListener<HttpClientConnection> listener) {
+ SSLHandshakeException sslException = new SSLHandshakeException("Failed to create SSL connection");
+ if (cause != null) {
+ sslException.initCause(cause);
+ }
+ connectFailed(context, ch, listener, sslException);
+ }
+
+ private void http1xConnected(ConnectionListener<HttpClientConnection> listener,
+ HttpVersion version,
+ String host,
+ int port,
+ boolean ssl,
+ ContextImpl context,
+ Channel ch) {
+ Http1xClientHandler clientHandler = new Http1xClientHandler(
+ listener,
+ context,
+ version,
+ host,
+ port,
+ ssl,
+ client,
+ metric,
+ client.metrics());
+ clientHandler.addHandler(conn -> {
+ listener.onConnectSuccess(conn, http1MaxConcurrency, ch, context, weight, http1Weight);
+ });
+ clientHandler.removeHandler(conn -> {
+ listener.onDiscard();
+ });
+ ch.pipeline().addLast("handler", clientHandler);
+ }
+
+ private void http2Connected(ConnectionListener<HttpClientConnection> listener,
+ ContextImpl context,
+ Channel ch) {
+ try {
+ boolean upgrade;
+ upgrade = ch.pipeline().get(SslHandler.class) == null && options.isHttp2ClearTextUpgrade();
+ VertxHttp2ConnectionHandler<Http2ClientConnection> handler = new VertxHttp2ConnectionHandlerBuilder<Http2ClientConnection>(ch)
+ .server(false)
+ .clientUpgrade(upgrade)
+ .useCompression(client.getOptions().isTryUseCompression())
+ .initialSettings(client.getOptions().getInitialSettings())
+ .connectionFactory(connHandler -> new Http2ClientConnection(listener, metric, client, context, connHandler, metrics))
+ .logEnabled(options.getLogActivity())
+ .build();
+ handler.addHandler(conn -> {
+ if (options.getHttp2ConnectionWindowSize() > 0) {
+ conn.setWindowSize(options.getHttp2ConnectionWindowSize());
+ }
+ if (metrics != null) {
+ Object metric = metrics.connected(conn.remoteAddress(), conn.remoteName());
+ conn.metric(metric);
+ }
+ long concurrency = conn.remoteSettings().getMaxConcurrentStreams();
+ if (http2MaxConcurrency > 0) {
+ concurrency = Math.min(concurrency, http2MaxConcurrency);
+ }
+ listener.onConnectSuccess(conn, concurrency, ch, context, weight, http2Weight);
+ });
+ handler.removeHandler(conn -> {
+ if (metrics != null) {
+ metrics.endpointDisconnected(metric, conn.metric());
+ }
+ listener.onDiscard();
+ });
+ } catch (Exception e) {
+ connectFailed(context, ch, listener, e);
+ }
+ }
+
+ private void connectFailed(ContextImpl context, Channel ch, ConnectionListener<HttpClientConnection> listener, Throwable t) {
+ if (ch != null) {
+ try {
+ ch.close();
+ } catch (Exception ignore) {
+ }
+ }
+ listener.onConnectFailure(context, t, weight);
+ }
+}
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientConnection.java b/src/main/java/io/vertx/core/http/impl/HttpClientConnection.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientConnection.java
@@ -16,6 +16,9 @@
package io.vertx.core.http.impl;
+import io.netty.channel.Channel;
+import io.vertx.core.AsyncResult;
+import io.vertx.core.Handler;
import io.vertx.core.http.HttpConnection;
import io.vertx.core.impl.ContextImpl;
@@ -24,18 +27,16 @@
*/
interface HttpClientConnection extends HttpConnection {
- ContextImpl getContext();
+ Channel channel();
void reportBytesWritten(long numberOfBytes);
void reportBytesRead(long s);
- /**
- * Check if the connection is valid for creating streams. The connection might be closed or a {@literal GOAWAY}
- * frame could have been sent or received.
- */
- boolean isValid();
-
void close();
+ void createStream(HttpClientRequestImpl req, Handler<AsyncResult<HttpClientStream>> handler);
+
+ ContextImpl getContext();
+
}
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java b/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
@@ -16,32 +16,19 @@
package io.vertx.core.http.impl;
-import io.vertx.core.Closeable;
-import io.vertx.core.Context;
-import io.vertx.core.Future;
-import io.vertx.core.Handler;
-import io.vertx.core.MultiMap;
-import io.vertx.core.VertxException;
-import io.vertx.core.http.HttpClient;
-import io.vertx.core.http.HttpClientOptions;
-import io.vertx.core.http.HttpClientRequest;
-import io.vertx.core.http.HttpClientResponse;
-import io.vertx.core.http.HttpHeaders;
-import io.vertx.core.http.HttpMethod;
-import io.vertx.core.http.HttpVersion;
-import io.vertx.core.http.RequestOptions;
-import io.vertx.core.http.WebSocket;
-import io.vertx.core.http.WebsocketVersion;
+import io.vertx.core.*;
+import io.vertx.core.http.*;
import io.vertx.core.impl.ContextImpl;
+import io.vertx.core.impl.ContextInternal;
import io.vertx.core.impl.VertxInternal;
import io.vertx.core.logging.Logger;
import io.vertx.core.logging.LoggerFactory;
import io.vertx.core.net.ProxyOptions;
import io.vertx.core.net.ProxyType;
import io.vertx.core.net.impl.SSLHelper;
+import io.vertx.core.spi.metrics.HttpClientMetrics;
import io.vertx.core.spi.metrics.Metrics;
import io.vertx.core.spi.metrics.MetricsProvider;
-import io.vertx.core.spi.metrics.VertxMetrics;
import io.vertx.core.streams.ReadStream;
import java.net.MalformedURLException;
@@ -53,6 +40,8 @@
import java.util.Collections;
import java.util.List;
import java.util.Objects;
+import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
import java.util.function.Function;
/**
@@ -107,15 +96,20 @@ public class HttpClientImpl implements HttpClient, MetricsProvider {
private final VertxInternal vertx;
private final HttpClientOptions options;
private final ContextImpl creatingContext;
- private final ConnectionManager connectionManager;
+ private final ConnectionManager websocketCM; // The queue manager for websockets
+ private final ConnectionManager httpCM; // The queue manager for requests
private final Closeable closeHook;
private final ProxyType proxyType;
private final SSLHelper sslHelper;
+ private final HttpClientMetrics metrics;
+ private final boolean keepAlive;
+ private final boolean pipelining;
private volatile boolean closed;
private volatile Function<HttpClientResponse, Future<HttpClientRequest>> redirectHandler = DEFAULT_HANDLER;
public HttpClientImpl(VertxInternal vertx, HttpClientOptions options) {
this.vertx = vertx;
+ this.metrics = vertx.metricsSPI() != null ? vertx.metricsSPI().createMetrics(this, options) : null;
this.options = new HttpClientOptions(options);
List<HttpVersion> alpnVersions = options.getAlpnVersions();
if (alpnVersions == null || alpnVersions.isEmpty()) {
@@ -128,8 +122,11 @@ public HttpClientImpl(VertxInternal vertx, HttpClientOptions options) {
break;
}
}
+ this.keepAlive = options.isKeepAlive();
+ this.pipelining = options.isPipelining();
this.sslHelper = new SSLHelper(options, options.getKeyCertOptions(), options.getTrustOptions()).
setApplicationProtocols(alpnVersions);
+ sslHelper.validate(vertx);
this.creatingContext = vertx.getContext();
closeHook = completionHandler -> {
HttpClientImpl.this.close();
@@ -144,11 +141,19 @@ public HttpClientImpl(VertxInternal vertx, HttpClientOptions options) {
}
creatingContext.addCloseHook(closeHook);
}
- VertxMetrics metrics = vertx.metricsSPI();
- connectionManager = new ConnectionManager(this, metrics != null ? metrics.createMetrics(this, options) : null);
+ if (!keepAlive && pipelining) {
+ throw new IllegalStateException("Cannot have pipelining with no keep alive");
+ }
+ long maxWeight = options.getMaxPoolSize() * options.getHttp2MaxPoolSize();
+ websocketCM = new ConnectionManager(this, metrics, HttpVersion.HTTP_1_1, maxWeight, options.getMaxWaitQueueSize());
+ httpCM = new ConnectionManager(this, metrics, options.getProtocolVersion(), maxWeight, options.getMaxWaitQueueSize());
proxyType = options.getProxyOptions() != null ? options.getProxyOptions().getType() : null;
}
+ HttpClientMetrics metrics() {
+ return metrics;
+ }
+
@Override
public HttpClient websocket(RequestOptions options, Handler<WebSocket> wsConnect) {
return websocket(options, null, wsConnect);
@@ -889,7 +894,11 @@ public synchronized void close() {
if (creatingContext != null) {
creatingContext.removeCloseHook(closeHook);
}
- connectionManager.close();
+ websocketCM.close();
+ httpCM.close();
+ if (metrics != null) {
+ metrics.close();
+ }
}
@Override
@@ -899,7 +908,7 @@ public boolean isMetricsEnabled() {
@Override
public Metrics getMetrics() {
- return connectionManager.metrics();
+ return metrics;
}
@Override
@@ -920,34 +929,29 @@ public HttpClientOptions getOptions() {
return options;
}
- void getConnectionForWebsocket(boolean ssl,
+ private void getConnectionForWebsocket(boolean ssl,
int port,
String host,
- Handler<ClientConnection> handler,
+ Handler<Http1xClientConnection> handler,
Handler<Throwable> connectionExceptionHandler,
ContextImpl context) {
- connectionManager.getConnectionForWebsocket(ssl, port, host, new Waiter(null, context) {
- @Override
- void handleConnection(HttpClientConnection conn) {
- }
- @Override
- void handleStream(HttpClientStream stream) {
- // Use some variance for this
- handler.handle((ClientConnection) stream);
- }
- @Override
- void handleFailure(Throwable failure) {
+ websocketCM.getConnection(host, ssl, port, host, null, (ctx, conn) -> {
+ ctx.executeFromIO(() -> {
+ handler.handle((Http1xClientConnection) conn);
+ });
+ return true;
+ }, (ctx, failure) -> {
+ ctx.executeFromIO(() -> {
connectionExceptionHandler.handle(failure);
- }
- @Override
- boolean isCancelled() {
- return false;
- }
+ });
});
}
- void getConnectionForRequest(String peerHost, boolean ssl, int port, String host, Waiter waiter) {
- connectionManager.getConnectionForRequest(options.getProtocolVersion(), peerHost, ssl, port, host, waiter);
+ void getConnectionForRequest(String peerHost, boolean ssl, int port, String host,
+ Handler<HttpConnection> connectionHandler,
+ BiFunction<ContextInternal, HttpClientConnection, Boolean> onSuccess,
+ BiConsumer<ContextInternal, Throwable> onFailure) {
+ httpCM.getConnection(peerHost, ssl, port, host, connectionHandler, onSuccess, onFailure);
}
/**
@@ -1075,11 +1079,7 @@ public synchronized ReadStream<WebSocket> handler(Handler<WebSocket> handler) {
}
getConnectionForWebsocket(ssl != null ? ssl : options.isSsl(), port, host, conn -> {
conn.exceptionHandler(connectionExceptionHandler);
- if (conn.isValid()) {
- conn.toWebSocket(requestURI, headers, version, subProtocols, options.getMaxWebsocketFrameSize(), wsConnect);
- } else {
- websocket(port, host, requestURI, headers, version, subProtocols, wsConnect);
- }
+ conn.toWebSocket(requestURI, headers, version, subProtocols, options.getMaxWebsocketFrameSize(), wsConnect);
}, connectionExceptionHandler, context);
}
return this;
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientRequestImpl.java b/src/main/java/io/vertx/core/http/impl/HttpClientRequestImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientRequestImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientRequestImpl.java
@@ -20,9 +20,7 @@
import io.netty.buffer.CompositeByteBuf;
import io.netty.buffer.Unpooled;
import io.vertx.codegen.annotations.Nullable;
-import io.vertx.core.Future;
-import io.vertx.core.Handler;
-import io.vertx.core.MultiMap;
+import io.vertx.core.*;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.http.CaseInsensitiveHeaders;
import io.vertx.core.http.HttpClientRequest;
@@ -31,7 +29,10 @@
import io.vertx.core.http.HttpFrame;
import io.vertx.core.http.HttpMethod;
import io.vertx.core.http.HttpVersion;
+import io.vertx.core.impl.ContextImpl;
import io.vertx.core.impl.VertxInternal;
+import io.vertx.core.logging.Logger;
+import io.vertx.core.logging.LoggerFactory;
import io.vertx.core.net.NetSocket;
import java.util.List;
@@ -50,6 +51,8 @@
*/
public class HttpClientRequestImpl extends HttpClientRequestBase implements HttpClientRequest {
+ static final Logger log = LoggerFactory.getLogger(ConnectionManager.class);
+
private final VertxInternal vertx;
private Handler<HttpClientResponse> respHandler;
private Handler<Void> endHandler;
@@ -62,20 +65,20 @@ public class HttpClientRequestImpl extends HttpClientRequestBase implements Http
private Handler<Void> drainHandler;
private Handler<HttpClientRequest> pushHandler;
private Handler<HttpConnection> connectionHandler;
- private boolean headWritten;
+ private Handler<HttpVersion> headersCompletionHandler;
private boolean completed;
private Handler<Void> completionHandler;
private Long reset;
- private HttpClientResponseImpl response;
private ByteBuf pendingChunks;
private CompositeByteBuf cachedChunks;
private int pendingMaxSize = -1;
private int followRedirects;
private boolean connecting;
- private boolean writeHead;
private long written;
private CaseInsensitiveHeaders headers;
+ // completed => drainHandler = null
+
HttpClientRequestImpl(HttpClientImpl client, boolean ssl, HttpMethod method, String host, int port,
String relativeURI, VertxInternal vertx) {
super(client, ssl, method, host, port, relativeURI);
@@ -139,6 +142,10 @@ public HttpClientRequest endHandler(Handler<Void> endHandler) {
@Override
public HttpClientRequestImpl setChunked(boolean chunked) {
+
+ // If connecting -> write buffer in list
+ // If connected -> check if the buffer needs to be written
+
synchronized (getLock()) {
checkComplete();
if (written > 0) {
@@ -217,36 +224,6 @@ public HttpClientRequest putHeader(String name, Iterable<String> values) {
}
}
- @Override
- public HttpClientRequestImpl write(Buffer chunk) {
- synchronized (getLock()) {
- checkComplete();
- checkResponseHandler();
- ByteBuf buf = chunk.getByteBuf();
- write(buf, false);
- return this;
- }
- }
-
- @Override
- public HttpClientRequestImpl write(String chunk) {
- synchronized (getLock()) {
- checkComplete();
- checkResponseHandler();
- return write(Buffer.buffer(chunk));
- }
- }
-
- @Override
- public HttpClientRequestImpl write(String chunk, String enc) {
- synchronized (getLock()) {
- Objects.requireNonNull(enc, "no null encoding accepted");
- checkComplete();
- checkResponseHandler();
- return write(Buffer.buffer(chunk, enc));
- }
- }
-
@Override
public HttpClientRequest setWriteQueueMaxSize(int maxSize) {
synchronized (getLock()) {
@@ -312,53 +289,15 @@ public HttpClientRequest sendHead(Handler<HttpVersion> completionHandler) {
checkComplete();
checkResponseHandler();
if (stream != null) {
- if (!headWritten) {
- writeHead();
- if (completionHandler != null) {
- completionHandler.handle(stream.version());
- }
- }
+ throw new IllegalStateException("Head already written");
} else {
- connect(completionHandler);
- writeHead = true;
+ headersCompletionHandler = completionHandler;
+ connect();
}
return this;
}
}
- @Override
- public void end(String chunk) {
- synchronized (getLock()) {
- end(Buffer.buffer(chunk));
- }
- }
-
- @Override
- public void end(String chunk, String enc) {
- synchronized (getLock()) {
- Objects.requireNonNull(enc, "no null encoding accepted");
- end(Buffer.buffer(chunk, enc));
- }
- }
-
- @Override
- public void end(Buffer chunk) {
- synchronized (getLock()) {
- checkComplete();
- checkResponseHandler();
- write(chunk.getByteBuf(), true);
- }
- }
-
- @Override
- public void end() {
- synchronized (getLock()) {
- checkComplete();
- checkResponseHandler();
- write(null, true);
- }
- }
-
@Override
public HttpClientRequest putHeader(CharSequence name, CharSequence value) {
synchronized (getLock()) {
@@ -390,8 +329,7 @@ public boolean reset(long code) {
synchronized (getLock()) {
if (reset == null) {
reset = code;
- if (!completed) {
- completed = true;
+ if (tryComplete()) {
if (completionHandler != null) {
completionHandler.handle(null);
}
@@ -405,6 +343,16 @@ public boolean reset(long code) {
}
}
+ private boolean tryComplete() {
+ if (!completed) {
+ completed = true;
+ drainHandler = null;
+ return true;
+ } else {
+ return false;
+ }
+ }
+
@Override
public HttpConnection connection() {
synchronized (getLock()) {
@@ -433,7 +381,7 @@ public HttpClientRequest writeCustomFrame(int type, int flags, Buffer payload) {
void handleDrained() {
synchronized (getLock()) {
- if (!completed && drainHandler != null) {
+ if (drainHandler != null) {
try {
drainHandler.handle(null);
} catch (Throwable t) {
@@ -443,7 +391,7 @@ void handleDrained() {
}
}
- private void handleNextRequest(HttpClientResponse resp, HttpClientRequestImpl next, long timeoutMs) {
+ private void handleNextRequest(HttpClientRequestImpl next, long timeoutMs) {
next.handler(respHandler);
next.exceptionHandler(exceptionHandler());
exceptionHandler(null);
@@ -507,14 +455,13 @@ else if (completed) {
protected void doHandleResponse(HttpClientResponseImpl resp, long timeoutMs) {
if (reset == null) {
- response = resp;
int statusCode = resp.statusCode();
if (followRedirects > 0 && statusCode >= 300 && statusCode < 400) {
Future<HttpClientRequest> next = client.redirectHandler().apply(resp);
if (next != null) {
next.setHandler(ar -> {
if (ar.succeeded()) {
- handleNextRequest(resp, (HttpClientRequestImpl) ar.result(), timeoutMs);
+ handleNextRequest((HttpClientRequestImpl) ar.result(), timeoutMs);
} else {
handleException(ar.cause());
}
@@ -691,42 +638,13 @@ public HttpClientResponse exceptionHandler(Handler<Throwable> handler) {
};
}
- private synchronized void connect(Handler<HttpVersion> headersCompletionHandler) {
+ private synchronized void connect() {
if (!connecting) {
if (method == HttpMethod.OTHER && rawMethod == null) {
throw new IllegalStateException("You must provide a rawMethod when using an HttpMethod.OTHER method");
}
- Waiter waiter = new Waiter(this, vertx.getContext()) {
-
- @Override
- void handleFailure(Throwable failure) {
- handleException(failure);
- }
-
- @Override
- void handleConnection(HttpClientConnection conn) {
- synchronized (HttpClientRequestImpl.this) {
- if (connectionHandler != null) {
- connectionHandler.handle(conn);
- }
- }
- }
-
- @Override
- void handleStream(HttpClientStream stream) {
- connected(stream, headersCompletionHandler);
- }
-
- @Override
- boolean isCancelled() {
- // No need to synchronize as the thread is the same that set exceptionOccurred to true
- // exceptionOccurred=true getting the connection => it's a TimeoutException
- return exceptionOccurred != null || reset != null;
- }
- };
-
String peerHost;
if (hostHeader != null) {
int idx = hostHeader.lastIndexOf(':');
@@ -742,18 +660,45 @@ boolean isCancelled() {
// We defer actual connection until the first part of body is written or end is called
// This gives the user an opportunity to set an exception handler before connecting so
// they can capture any exceptions on connection
- client.getConnectionForRequest(peerHost, ssl, port, host, waiter);
+ client.getConnectionForRequest(peerHost, ssl, port, host, connectionHandler, (ctx, conn) -> {
+ // No need to synchronize as the thread is the same that set exceptionOccurred to true
+ // exceptionOccurred=true getting the connection => it's a TimeoutException
+ if (exceptionOccurred != null || reset != null) {
+ return false;
+ }
+ // checkContext(ctx);
+ conn.createStream(HttpClientRequestImpl.this, ar -> {
+ if (ar.succeeded()) {
+ HttpClientStream stream = ar.result();
+ ctx.executeFromIO(() -> {
+ connected(stream, HttpClientRequestImpl.this.headersCompletionHandler);
+ });
+ } else {
+ throw new RuntimeException(ar.cause());
+ }
+ });
+ return true;
+ }, (ctx, failure) -> {
+ ctx.executeFromIO(() -> {
+ handleException(failure);
+ });
+ });
connecting = true;
}
}
+ synchronized void retry() {
+ connecting = false;
+ connect();
+ }
+
private void connected(HttpClientStream stream, Handler<HttpVersion> headersCompletionHandler) {
HttpClientConnection conn = stream.connection();
synchronized (this) {
this.stream = stream;
- stream.beginRequest(this);
+ stream.beginRequest();
// If anything was written or the request ended before we got the connection, then
// we need to write it now
@@ -768,15 +713,13 @@ private void connected(HttpClientStream stream, Handler<HttpVersion> headersComp
if (completed) {
// we also need to write the head so optimize this and write all out in once
- writeHeadWithContent(pending, true);
-
+ stream.writeHeadWithContent(method, rawMethod, uri, headers, hostHeader(), chunked, pending, true);
conn.reportBytesWritten(written);
-
if (respHandler != null) {
this.stream.endRequest();
}
} else {
- writeHeadWithContent(pending, false);
+ stream.writeHeadWithContent(method, rawMethod, uri, headers, hostHeader(), chunked, pending, false);
if (headersCompletionHandler != null) {
headersCompletionHandler.handle(stream.version());
}
@@ -784,19 +727,15 @@ private void connected(HttpClientStream stream, Handler<HttpVersion> headersComp
} else {
if (completed) {
// we also need to write the head so optimize this and write all out in once
- writeHeadWithContent(null, true);
-
+ stream.writeHeadWithContent(method, rawMethod, uri, headers, hostHeader(), chunked, null, true);
conn.reportBytesWritten(written);
-
if (respHandler != null) {
this.stream.endRequest();
}
} else {
- if (writeHead) {
- writeHead();
- if (headersCompletionHandler != null) {
- headersCompletionHandler.handle(stream.version());
- }
+ stream.writeHead(method, rawMethod, uri, headers, hostHeader(), chunked);
+ if (headersCompletionHandler != null) {
+ headersCompletionHandler.handle(stream.version());
}
}
}
@@ -811,17 +750,54 @@ private boolean contentLengthSet() {
return headers != null && headers().contains(CONTENT_LENGTH);
}
- private void writeHead() {
- stream.writeHead(method, rawMethod, uri, headers, hostHeader(), chunked);
- headWritten = true;
+ @Override
+ public void end(String chunk) {
+ end(Buffer.buffer(chunk));
+ }
+
+ @Override
+ public void end(String chunk, String enc) {
+ Objects.requireNonNull(enc, "no null encoding accepted");
+ end(Buffer.buffer(chunk, enc));
+ }
+
+ @Override
+ public void end(Buffer chunk) {
+ write(chunk.getByteBuf(), true);
+ }
+
+ @Override
+ public void end() {
+ write(null, true);
+ }
+
+ @Override
+ public HttpClientRequestImpl write(Buffer chunk) {
+ ByteBuf buf = chunk.getByteBuf();
+ write(buf, false);
+ return this;
+ }
+
+ @Override
+ public HttpClientRequestImpl write(String chunk) {
+ return write(Buffer.buffer(chunk));
}
- private void writeHeadWithContent(ByteBuf buf, boolean end) {
- stream.writeHeadWithContent(method, rawMethod, uri, headers, hostHeader(), chunked, buf, end);
- headWritten = true;
+ @Override
+ public HttpClientRequestImpl write(String chunk, String enc) {
+ Objects.requireNonNull(enc, "no null encoding accepted");
+ return write(Buffer.buffer(chunk, enc));
}
private void write(ByteBuf buff, boolean end) {
+ synchronized (getLock()) {
+ checkComplete();
+ checkResponseHandler();
+ _write(buff, end);
+ }
+ }
+
+ private void _write(ByteBuf buff, boolean end) {
if (buff == null && !end) {
// nothing to write to the connection just return
return;
@@ -864,33 +840,26 @@ private void write(ByteBuf buff, boolean end) {
pending.addComponent(true, buff);
}
}
- connect(null);
- } else {
- if (!headWritten) {
- writeHeadWithContent(buff, end);
- } else {
- stream.writeBuffer(buff, end);
+ if (end) {
+ tryComplete();
+ if (completionHandler != null) {
+ completionHandler.handle(null);
+ }
}
+ connect();
+ } else {
+ stream.writeBuffer(buff, end);
if (end) {
stream.connection().reportBytesWritten(written);
if (respHandler != null) {
stream.endRequest();
}
+ tryComplete();
+ if (completionHandler != null) {
+ completionHandler.handle(null);
+ }
}
}
-
- if (end) {
- completed = true;
- if (completionHandler != null) {
- completionHandler.handle(null);
- }
- }
- }
-
- void handleResponseEnd() {
- synchronized (getLock()) {
- response = null;
- }
}
protected void checkComplete() {
diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientStream.java b/src/main/java/io/vertx/core/http/impl/HttpClientStream.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientStream.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientStream.java
@@ -54,7 +54,7 @@ interface HttpClientStream {
void reset(long code);
- void beginRequest(HttpClientRequestImpl request);
+ void beginRequest();
void endRequest();
NetSocket createNetSocket();
diff --git a/src/main/java/io/vertx/core/http/impl/HttpServerImpl.java b/src/main/java/io/vertx/core/http/impl/HttpServerImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpServerImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpServerImpl.java
@@ -122,7 +122,7 @@ public class HttpServerImpl implements HttpServer, Closeable, MetricsProvider {
private final VertxInternal vertx;
private final SSLHelper sslHelper;
private final ContextImpl creatingContext;
- private final Map<Channel, ServerConnection> connectionMap = new ConcurrentHashMap<>();
+ private final Map<Channel, Http1xServerConnection> connectionMap = new ConcurrentHashMap<>();
private final Map<Channel, Http2ServerConnection> connectionMap2 = new ConcurrentHashMap<>();
private final VertxEventLoopGroup availableWorkers = new VertxEventLoopGroup();
private final HandlerManager<HttpHandlers> httpHandlerMgr = new HandlerManager<>(availableWorkers);
@@ -395,26 +395,37 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E
}
private VertxHttp2ConnectionHandler<Http2ServerConnection> setHandler(HandlerHolder<HttpHandlers> holder, Http2Settings upgrade, Channel ch) {
- return new VertxHttp2ConnectionHandlerBuilder<Http2ServerConnection>(ch)
- .connectionMap(connectionMap2)
- .server(true)
- .serverUpgrade(upgrade)
- .useCompression(options.isCompressionSupported())
- .useDecompression(options.isDecompressionSupported())
- .compressionLevel(options.getCompressionLevel())
- .initialSettings(options.getInitialSettings())
- .connectionFactory(connHandler -> {
- Http2ServerConnection conn = new Http2ServerConnection(holder.context, serverOrigin, connHandler, options, holder.handler.requesthHandler, metrics);
- if (metrics != null) {
- conn.metric(metrics.connected(conn.remoteAddress(), conn.remoteName()));
- }
- if (options.getHttp2ConnectionWindowSize() > 0) {
- conn.setWindowSize(options.getHttp2ConnectionWindowSize());
- }
- return conn;
- })
- .logEnabled(logEnabled)
- .build();
+ VertxHttp2ConnectionHandler<Http2ServerConnection> handler = new VertxHttp2ConnectionHandlerBuilder<Http2ServerConnection>(ch)
+ .server(true)
+ .serverUpgrade(upgrade)
+ .useCompression(options.isCompressionSupported())
+ .useDecompression(options.isDecompressionSupported())
+ .compressionLevel(options.getCompressionLevel())
+ .initialSettings(options.getInitialSettings())
+ .connectionFactory(connHandler -> {
+ Http2ServerConnection conn = new Http2ServerConnection(holder.context, serverOrigin, connHandler, options, holder.handler.requesthHandler, metrics);
+ if (metrics != null) {
+ conn.metric(metrics.connected(conn.remoteAddress(), conn.remoteName()));
+ }
+ if (options.getHttp2ConnectionWindowSize() > 0) {
+ conn.setWindowSize(options.getHttp2ConnectionWindowSize());
+ }
+ return conn;
+ })
+ .logEnabled(logEnabled)
+ .build();
+ handler.addHandler(conn -> {
+ connectionMap2.put(conn.channel(), conn);
+ if (holder.handler.connectionHandler != null) {
+ holder.context.executeFromIO(() -> {
+ holder.handler.connectionHandler.handle(conn);
+ });
+ }
+ });
+ handler.removeHandler(conn -> {
+ connectionMap2.remove(conn.channel());
+ });
+ return handler;
}
private void configureHttp1(ChannelPipeline pipeline) {
@@ -444,11 +455,11 @@ private void configureHttp1(ChannelPipeline pipeline) {
pipeline.addLast("h2c", new Http2UpgradeHandler());
}
HandlerHolder<HttpHandlers> holder = httpHandlerMgr.chooseHandler(pipeline.channel().eventLoop());
- ServerHandler handler;
+ Http1xServerHandler handler;
if (DISABLE_WEBSOCKETS) {
// As a performance optimisation you can set a system property to disable websockets altogether which avoids
// some casting and a header check
- handler = new ServerHandler(sslHelper, options, serverOrigin, holder, metrics);
+ handler = new Http1xServerHandler(sslHelper, options, serverOrigin, holder, metrics);
} else {
handler = new ServerHandlerWithWebSockets(sslHelper, options, serverOrigin, holder, metrics);
}
@@ -464,12 +475,7 @@ private void configureHttp1(ChannelPipeline pipeline) {
public void handleHttp2(Channel ch) {
HandlerHolder<HttpHandlers> holder = httpHandlerMgr.chooseHandler(ch.eventLoop());
configureHttp2(ch.pipeline());
- VertxHttp2ConnectionHandler<Http2ServerConnection> handler = setHandler(holder, null, ch);
- if (holder.handler.connectionHandler != null) {
- holder.context.executeFromIO(() -> {
- holder.handler.connectionHandler.handle(handler.connection);
- });
- }
+ setHandler(holder, null, ch);
}
public void configureHttp2(ChannelPipeline pipeline) {
@@ -579,7 +585,7 @@ private void actualClose(final ContextImpl closeContext, final Handler<AsyncResu
ContextImpl currCon = vertx.getContext();
- for (ServerConnection conn : connectionMap.values()) {
+ for (Http1xServerConnection conn : connectionMap.values()) {
conn.close();
}
for (Http2ServerConnection conn : connectionMap2.values()) {
@@ -611,7 +617,7 @@ private void executeCloseDone(final ContextImpl closeContext, final Handler<Asyn
}
}
- public class ServerHandlerWithWebSockets extends ServerHandler {
+ public class ServerHandlerWithWebSockets extends Http1xServerHandler {
private boolean closeFrameSent;
private FullHttpRequest wsRequest;
@@ -623,7 +629,7 @@ public ServerHandlerWithWebSockets(SSLHelper sslHelper, HttpServerOptions option
}
@Override
- protected void handleMessage(ServerConnection conn, ContextImpl context, ChannelHandlerContext chctx, Object msg) throws Exception {
+ protected void handleMessage(Http1xServerConnection conn, ContextImpl context, ChannelHandlerContext chctx, Object msg) throws Exception {
Channel ch = chctx.channel();
if (msg instanceof HttpRequest) {
final HttpRequest request = (HttpRequest) msg;
@@ -708,7 +714,7 @@ protected void handleMessage(ServerConnection conn, ContextImpl context, Channel
}
}
- protected void handshake(ServerConnection conn, FullHttpRequest request, Channel ch, ChannelHandlerContext ctx) throws Exception {
+ protected void handshake(Http1xServerConnection conn, FullHttpRequest request, Channel ch, ChannelHandlerContext ctx) throws Exception {
WebSocketServerHandshaker shake = createHandshaker(conn, ch, request);
if (shake == null) {
diff --git a/src/main/java/io/vertx/core/http/impl/HttpServerRequestImpl.java b/src/main/java/io/vertx/core/http/impl/HttpServerRequestImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpServerRequestImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpServerRequestImpl.java
@@ -64,7 +64,7 @@ public class HttpServerRequestImpl implements HttpServerRequest {
private static final Logger log = LoggerFactory.getLogger(HttpServerRequestImpl.class);
- private final ServerConnection conn;
+ private final Http1xServerConnection conn;
private final HttpRequest request;
private final HttpServerResponse response;
@@ -91,7 +91,7 @@ public class HttpServerRequestImpl implements HttpServerRequest {
private boolean ended;
- HttpServerRequestImpl(ServerConnection conn,
+ HttpServerRequestImpl(Http1xServerConnection conn,
HttpRequest request,
HttpServerResponse response) {
this.conn = conn;
diff --git a/src/main/java/io/vertx/core/http/impl/HttpServerResponseImpl.java b/src/main/java/io/vertx/core/http/impl/HttpServerResponseImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpServerResponseImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpServerResponseImpl.java
@@ -59,7 +59,7 @@ public class HttpServerResponseImpl implements HttpServerResponse {
private static final Logger log = LoggerFactory.getLogger(HttpServerResponseImpl.class);
private final VertxInternal vertx;
- private final ServerConnection conn;
+ private final Http1xServerConnection conn;
private HttpResponseStatus status;
private final HttpVersion version;
private final boolean keepAlive;
@@ -81,7 +81,7 @@ public class HttpServerResponseImpl implements HttpServerResponse {
private String statusMessage;
private long bytesWritten;
- HttpServerResponseImpl(final VertxInternal vertx, ServerConnection conn, HttpRequest request) {
+ HttpServerResponseImpl(final VertxInternal vertx, Http1xServerConnection conn, HttpRequest request) {
this.vertx = vertx;
this.conn = conn;
this.version = request.getProtocolVersion();
diff --git a/src/main/java/io/vertx/core/http/impl/HttpUtils.java b/src/main/java/io/vertx/core/http/impl/HttpUtils.java
--- a/src/main/java/io/vertx/core/http/impl/HttpUtils.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpUtils.java
@@ -457,6 +457,16 @@ static HttpVersion toNettyHttpVersion(io.vertx.core.http.HttpVersion version) {
}
}
+ static io.vertx.core.http.HttpVersion toVertxHttpVersion(HttpVersion version) {
+ if (version == io.netty.handler.codec.http.HttpVersion.HTTP_1_0) {
+ return io.vertx.core.http.HttpVersion.HTTP_1_0;
+ } else if (version == io.netty.handler.codec.http.HttpVersion.HTTP_1_1) {
+ return io.vertx.core.http.HttpVersion.HTTP_1_1;
+ } else {
+ return null;
+ }
+ }
+
static io.vertx.core.http.HttpMethod toVertxMethod(String method) {
try {
return io.vertx.core.http.HttpMethod.valueOf(method);
diff --git a/src/main/java/io/vertx/core/http/impl/VertxHttp2ConnectionHandler.java b/src/main/java/io/vertx/core/http/impl/VertxHttp2ConnectionHandler.java
--- a/src/main/java/io/vertx/core/http/impl/VertxHttp2ConnectionHandler.java
+++ b/src/main/java/io/vertx/core/http/impl/VertxHttp2ConnectionHandler.java
@@ -17,20 +17,10 @@
package io.vertx.core.http.impl;
import io.netty.buffer.ByteBuf;
-import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
-import io.netty.handler.codec.http2.Http2Connection;
-import io.netty.handler.codec.http2.Http2ConnectionDecoder;
-import io.netty.handler.codec.http2.Http2ConnectionEncoder;
-import io.netty.handler.codec.http2.Http2ConnectionHandler;
-import io.netty.handler.codec.http2.Http2Exception;
-import io.netty.handler.codec.http2.Http2Flags;
-import io.netty.handler.codec.http2.Http2Headers;
-import io.netty.handler.codec.http2.Http2RemoteFlowController;
-import io.netty.handler.codec.http2.Http2Settings;
-import io.netty.handler.codec.http2.Http2Stream;
+import io.netty.handler.codec.http2.*;
import io.netty.handler.timeout.IdleState;
import io.netty.handler.timeout.IdleStateEvent;
import io.netty.util.concurrent.EventExecutor;
@@ -38,24 +28,29 @@
import io.vertx.core.Future;
import io.vertx.core.Handler;
-import java.util.Map;
+import java.util.function.Function;
/**
* @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
*/
-class VertxHttp2ConnectionHandler<C extends Http2ConnectionBase> extends Http2ConnectionHandler implements Http2Connection.Listener {
+class VertxHttp2ConnectionHandler<C extends Http2ConnectionBase> extends Http2ConnectionHandler implements Http2FrameListener, Http2Connection.Listener {
- private final Map<Channel, ? super C> connectionMap;
- C connection;
+ private final Function<VertxHttp2ConnectionHandler<C>, C> connectionFactory;
+ private C connection;
private ChannelHandlerContext chctx;
+ private Handler<C> addHandler;
+ private Handler<C> removeHandler;
+ private final boolean useDecompressor;
public VertxHttp2ConnectionHandler(
- Map<Channel, ? super C> connectionMap,
+ Function<VertxHttp2ConnectionHandler<C>, C> connectionFactory,
+ boolean useDecompressor,
Http2ConnectionDecoder decoder,
Http2ConnectionEncoder encoder,
Http2Settings initialSettings) {
super(decoder, encoder, initialSettings);
- this.connectionMap = connectionMap;
+ this.connectionFactory = connectionFactory;
+ this.useDecompressor = useDecompressor;
encoder().flowController().listener(s -> {
if (connection != null) {
connection.onStreamwritabilityChanged(s);
@@ -68,17 +63,34 @@ public ChannelHandlerContext context() {
return chctx;
}
+ /**
+ * Set an handler to be called when the connection is set on this handler.
+ *
+ * @param handler the handler to be notified
+ * @return this
+ */
+ public VertxHttp2ConnectionHandler<C> addHandler(Handler<C> handler) {
+ this.addHandler = handler;
+ return this;
+ }
+
+ /**
+ * Set an handler to be called when the connection is unset from this handler.
+ *
+ * @param handler the handler to be notified
+ * @return this
+ */
+ public VertxHttp2ConnectionHandler<C> removeHandler(Handler<C> handler) {
+ this.removeHandler = handler;
+ return this;
+ }
+
@Override
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
super.handlerAdded(ctx);
chctx = ctx;
}
- void init(C conn) {
- connection = conn;
- connectionMap.put(chctx.channel(), connection);
- }
-
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
super.exceptionCaught(ctx, cause);
@@ -88,13 +100,20 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
super.channelActive(ctx);
+
+ // super call writes the connection preface
+ // we need to flush to send it
+ // this is called only on the client
+ ctx.flush();
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
super.channelInactive(ctx);
- connectionMap.remove(ctx.channel());
connection.getContext().executeFromIO(connection::handleClosed);
+ if (removeHandler != null) {
+ removeHandler.handle(connection);
+ }
}
@Override
@@ -321,4 +340,81 @@ void writePushPromise(int streamId, Http2Headers headers, Handler<AsyncResult<In
private void _writePushPromise(int streamId, int promisedStreamId, Http2Headers headers, ChannelPromise promise) {
encoder().writePushPromise(chctx, streamId, promisedStreamId, headers, 0, promise);
}
+
+ // Http2FrameListener
+
+ @Override
+ public int onDataRead(ChannelHandlerContext ctx, int streamId, ByteBuf data, int padding, boolean endOfStream) throws Http2Exception {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, boolean endOfStream) throws Http2Exception {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int streamDependency, short weight, boolean exclusive, int padding, boolean endOfStream) throws Http2Exception {
+ assert connection != null;
+ connection.onHeadersRead(ctx, streamId, headers, streamDependency, weight, exclusive, padding, endOfStream);
+ }
+
+ @Override
+ public void onPriorityRead(ChannelHandlerContext ctx, int streamId, int streamDependency, short weight, boolean exclusive) throws Http2Exception {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void onRstStreamRead(ChannelHandlerContext ctx, int streamId, long errorCode) throws Http2Exception {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void onSettingsAckRead(ChannelHandlerContext ctx) throws Http2Exception {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void onSettingsRead(ChannelHandlerContext ctx, Http2Settings settings) throws Http2Exception {
+ connection = connectionFactory.apply(this);
+ if (useDecompressor) {
+ decoder().frameListener(new DelegatingDecompressorFrameListener(decoder().connection(), connection));
+ } else {
+ decoder().frameListener(connection);
+ }
+ connection.onSettingsRead(ctx, settings);
+ if (addHandler != null) {
+ addHandler.handle(connection);
+ }
+ }
+
+ @Override
+ public void onPingRead(ChannelHandlerContext ctx, ByteBuf data) throws Http2Exception {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void onPingAckRead(ChannelHandlerContext ctx, ByteBuf data) throws Http2Exception {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void onPushPromiseRead(ChannelHandlerContext ctx, int streamId, int promisedStreamId, Http2Headers headers, int padding) throws Http2Exception {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void onGoAwayRead(ChannelHandlerContext ctx, int lastStreamId, long errorCode, ByteBuf debugData) throws Http2Exception {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void onWindowUpdateRead(ChannelHandlerContext ctx, int streamId, int windowSizeIncrement) throws Http2Exception {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void onUnknownFrame(ChannelHandlerContext ctx, byte frameType, int streamId, Http2Flags flags, ByteBuf payload) throws Http2Exception {
+ throw new UnsupportedOperationException();
+ }
}
diff --git a/src/main/java/io/vertx/core/http/impl/VertxHttp2ConnectionHandlerBuilder.java b/src/main/java/io/vertx/core/http/impl/VertxHttp2ConnectionHandlerBuilder.java
--- a/src/main/java/io/vertx/core/http/impl/VertxHttp2ConnectionHandlerBuilder.java
+++ b/src/main/java/io/vertx/core/http/impl/VertxHttp2ConnectionHandlerBuilder.java
@@ -21,7 +21,6 @@
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.http2.AbstractHttp2ConnectionHandlerBuilder;
import io.netty.handler.codec.http2.CompressorHttp2ConnectionEncoder;
-import io.netty.handler.codec.http2.DelegatingDecompressorFrameListener;
import io.netty.handler.codec.http2.Http2ConnectionDecoder;
import io.netty.handler.codec.http2.Http2ConnectionEncoder;
import io.netty.handler.codec.http2.Http2Exception;
@@ -33,7 +32,6 @@
import io.netty.handler.logging.LogLevel;
import io.vertx.core.http.HttpServerOptions;
-import java.util.Map;
import java.util.function.Function;
/**
@@ -44,7 +42,6 @@
class VertxHttp2ConnectionHandlerBuilder<C extends Http2ConnectionBase> extends AbstractHttp2ConnectionHandlerBuilder<VertxHttp2ConnectionHandler<C>, VertxHttp2ConnectionHandlerBuilder<C>> {
private final Channel channel;
- private Map<Channel, ? super C> connectionMap;
private boolean useCompression;
private boolean useDecompression;
private int compressionLevel = HttpServerOptions.DEFAULT_COMPRESSION_LEVEL;
@@ -62,11 +59,6 @@ protected VertxHttp2ConnectionHandlerBuilder<C> server(boolean isServer) {
return super.server(isServer);
}
- VertxHttp2ConnectionHandlerBuilder<C> connectionMap(Map<Channel, ? super C> connectionMap) {
- this.connectionMap = connectionMap;
- return this;
- }
-
VertxHttp2ConnectionHandlerBuilder<C> initialSettings(io.vertx.core.http.Http2Settings settings) {
this.initialSettings = settings;
return this;
@@ -88,22 +80,22 @@ public VertxHttp2ConnectionHandlerBuilder<C> serverUpgrade(Http2Settings upgrade
}
/**
- * This method allows to set the compression level to be used in the http/2 connection encoder
- * (for data sent to client) when compression support is turned on (@see useCompression) and
+ * This method allows to set the compression level to be used in the http/2 connection encoder
+ * (for data sent to client) when compression support is turned on (@see useCompression) and
* the client advertises to support deflate/gizip compression in the Accept-Encoding header
- *
+ *
* default value is : 6 (Netty legacy)
- *
- * While one can think that best value is always the maximum compression ratio,
- * there's a trade-off to consider: the most compressed level requires the most computatinal work to compress/decompress,
- * E.g. you have it set fairly high on a high-volume website, you may experience performance degradation
- * and latency on resource serving due to CPU overload, and however - as the comptational work is required also client side
+ *
+ * While one can think that best value is always the maximum compression ratio,
+ * there's a trade-off to consider: the most compressed level requires the most computatinal work to compress/decompress,
+ * E.g. you have it set fairly high on a high-volume website, you may experience performance degradation
+ * and latency on resource serving due to CPU overload, and however - as the comptational work is required also client side
* while decompressing - setting an higher compression level can result in an overall higher page load time
* especially nowadays when many clients are handled mobile devices with a low CPU profile.
- *
+ *
* see also: http://www.gzip.org/algorithm.txt
- *
- * @param compressionLevel integer 1-9, 1 means use fastest algorithm, 9 slower algorithm but better compression ratio
+ *
+ * @param compressionLevel integer 1-9, 1 means use fastest algorithm, 9 slower algorithm but better compression ratio
* @return a reference to this instance for fulent API coding style
*/
VertxHttp2ConnectionHandlerBuilder<C> compressionLevel(int compressionLevel) {
@@ -115,7 +107,7 @@ VertxHttp2ConnectionHandlerBuilder<C> useDecompression(boolean useDecompression)
this.useDecompression = useDecompression;
return this;
}
-
+
VertxHttp2ConnectionHandlerBuilder<C> connectionFactory(Function<VertxHttp2ConnectionHandler<C>, C> connectionFactory) {
this.connectionFactory = connectionFactory;
return this;
@@ -198,30 +190,20 @@ protected VertxHttp2ConnectionHandler<C> build(Http2ConnectionDecoder decoder, H
if (useCompression) {
encoder = new CompressorHttp2ConnectionEncoder(encoder,compressionLevel,CompressorHttp2ConnectionEncoder.DEFAULT_WINDOW_BITS,CompressorHttp2ConnectionEncoder.DEFAULT_MEM_LEVEL);
}
- VertxHttp2ConnectionHandler<C> handler = new VertxHttp2ConnectionHandler<>(connectionMap, decoder, encoder, initialSettings);
+ VertxHttp2ConnectionHandler<C> handler = new VertxHttp2ConnectionHandler<>(connectionFactory, useDecompression, decoder, encoder, initialSettings);
if (serverUpgrade != null) {
handler.onHttpServerUpgrade(serverUpgrade);
}
channel.pipeline().addLast(handler);
- handler.init(connectionFactory.apply(handler));
- if (useDecompression) {
- decoder.frameListener(new DelegatingDecompressorFrameListener(decoder.connection(), handler.connection));
- } else {
- decoder.frameListener(handler.connection);
- }
+ decoder.frameListener(handler);
return handler;
} else {
- VertxHttp2ConnectionHandler<C> handler = new VertxHttp2ConnectionHandler<>(connectionMap, decoder, encoder, initialSettings);
+ VertxHttp2ConnectionHandler<C> handler = new VertxHttp2ConnectionHandler<>(connectionFactory, useCompression, decoder, encoder, initialSettings);
if (clientUpgrade) {
handler.onHttpClientUpgrade();
}
channel.pipeline().addLast(handler);
- handler.init(connectionFactory.apply(handler));
- if (useCompression) {
- decoder.frameListener(new DelegatingDecompressorFrameListener(decoder.connection(), handler.connection));
- } else {
- decoder.frameListener(handler.connection);
- }
+ decoder.frameListener(handler);
return handler;
}
}
diff --git a/src/main/java/io/vertx/core/http/impl/WebSocketImpl.java b/src/main/java/io/vertx/core/http/impl/WebSocketImpl.java
--- a/src/main/java/io/vertx/core/http/impl/WebSocketImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/WebSocketImpl.java
@@ -17,7 +17,6 @@
package io.vertx.core.http.impl;
import io.vertx.core.http.WebSocket;
-import io.vertx.core.http.WebSocketFrame;
import io.vertx.core.impl.VertxInternal;
import io.vertx.core.spi.metrics.HttpClientMetrics;
@@ -33,7 +32,7 @@
public class WebSocketImpl extends WebSocketImplBase<WebSocket> implements WebSocket {
public WebSocketImpl(VertxInternal vertx,
- ClientConnection conn, boolean supportsContinuation,
+ Http1xClientConnection conn, boolean supportsContinuation,
int maxWebSocketFrameSize, int maxWebSocketMessageSize) {
super(vertx, conn, supportsContinuation, maxWebSocketFrameSize, maxWebSocketMessageSize);
}
@@ -41,7 +40,7 @@ public WebSocketImpl(VertxInternal vertx,
@Override
void handleClosed() {
synchronized (conn) {
- HttpClientMetrics metrics = ((ClientConnection) conn).metrics();
+ HttpClientMetrics metrics = ((Http1xClientConnection) conn).metrics();
if (metrics != null) {
metrics.disconnected(getMetric());
}
diff --git a/src/main/java/io/vertx/core/http/impl/pool/ConnectionListener.java b/src/main/java/io/vertx/core/http/impl/pool/ConnectionListener.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/http/impl/pool/ConnectionListener.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2011-2013 The original author or authors
+ * ------------------------------------------------------
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * and Apache License v2.0 which accompanies this distribution.
+ *
+ * The Eclipse Public License is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * The Apache License v2.0 is available at
+ * http://www.opensource.org/licenses/apache2.0.php
+ *
+ * You may elect to redistribute this code under either of these licenses.
+ */
+package io.vertx.core.http.impl.pool;
+
+import io.netty.channel.Channel;
+import io.vertx.core.impl.ContextImpl;
+
+/**
+ * The listener definest the contract used by the {@link ConnectionProvider} to interact with the
+ * connection pool. Its purpose is also to use a connection implementation without a pool.
+ */
+public interface ConnectionListener<C> {
+
+ /**
+ * Callback to signal the connection succeeded and provide all the info requires to manage the connection
+ *
+ * @param conn the connection
+ * @param concurrency the connection concurrency
+ * @param channel the channel
+ * @param context the context
+ * @param initialWeight the initial weight
+ * @param actualWeight the actual weight
+ */
+ void onConnectSuccess(C conn,
+ long concurrency,
+ Channel channel,
+ ContextImpl context,
+ long initialWeight,
+ long actualWeight);
+
+ /**
+ * Callback to signal the connection failed.
+ *
+ * @param context the context
+ * @param err the error
+ * @param weight the weight
+ */
+ void onConnectFailure(ContextImpl context, Throwable err, long weight);
+
+ /**
+ * Signals the connrection changed to the {@code concurrency} value.
+ *
+ * @param concurrency the concurrency
+ */
+ void onConcurrencyChange(long concurrency);
+
+ /**
+ * Signals the connection can recycled, it must not give back more than it borrowed.
+ *
+ * @param capacity the capacity to recycle
+ * @param disposable wether the connection can be disposed
+ */
+ void onRecycle(int capacity, boolean disposable);
+
+ /**
+ * Signals the connection must not be used anymore by the pool.
+ */
+ void onDiscard();
+
+}
diff --git a/src/main/java/io/vertx/core/http/impl/pool/ConnectionProvider.java b/src/main/java/io/vertx/core/http/impl/pool/ConnectionProvider.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/http/impl/pool/ConnectionProvider.java
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2011-2013 The original author or authors
+ * ------------------------------------------------------
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * and Apache License v2.0 which accompanies this distribution.
+ *
+ * The Eclipse Public License is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * The Apache License v2.0 is available at
+ * http://www.opensource.org/licenses/apache2.0.php
+ *
+ * You may elect to redistribute this code under either of these licenses.
+ */
+package io.vertx.core.http.impl.pool;
+
+import io.vertx.core.impl.ContextImpl;
+
+/**
+ * Provides how the connection manager interacts its connections.
+ *
+ * @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
+ */
+public interface ConnectionProvider<C> {
+
+ /**
+ * Connect to the server and signals the {@code listener} the success with {@link ConnectionListener#onConnectSuccess}
+ * or the failure with {@link ConnectionListener#onConnectFailure}.
+ *
+ * @param listener the listener
+ * @param context the context to use for the connection
+ * @return the initial weight of the connection, which will eventually be corrected when calling the listener
+ */
+ long connect(ConnectionListener<C> listener, ContextImpl context);
+
+ /**
+ * Close a connection.
+ *
+ * @param conn the connection
+ */
+ void close(C conn);
+
+}
diff --git a/src/main/java/io/vertx/core/http/impl/pool/Pool.java b/src/main/java/io/vertx/core/http/impl/pool/Pool.java
new file mode 100644
--- /dev/null
+++ b/src/main/java/io/vertx/core/http/impl/pool/Pool.java
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2011-2014 The original author or authors
+ * ------------------------------------------------------
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * and Apache License v2.0 which accompanies this distribution.
+ *
+ * The Eclipse Public License is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * The Apache License v2.0 is available at
+ * http://www.opensource.org/licenses/apache2.0.php
+ *
+ * You may elect to redistribute this code under either of these licenses.
+ */
+package io.vertx.core.http.impl.pool;
+
+import io.netty.channel.Channel;
+import io.vertx.core.Handler;
+import io.vertx.core.http.ConnectionPoolTooBusyException;
+import io.vertx.core.impl.ContextImpl;
+import io.vertx.core.logging.Logger;
+import io.vertx.core.logging.LoggerFactory;
+
+import java.util.*;
+import java.util.function.BiConsumer;
+
+/**
+ * The pool is a queue of waiters and a list of connections.
+ *
+ * Pool invariants:
+ * - a connection in the {@link #available} list has its {@code Holder#capacity > 0}
+ * - the {@link #weight} is the sum of all inflight connections {@link Holder#weight}
+ *
+ * A connection is delivered to a {@link Waiter} on the connection's event loop thread, the waiter must take care of
+ * calling {@link io.vertx.core.impl.ContextInternal#executeFromIO} if necessary.
+ *
+ * Calls to the pool are synchronized on the pool to avoid race conditions and maintain its invariants. This pool can
+ * be called from different threads safely (although it is not encouraged for performance reasons, we benefit from biased
+ * locking which makes the overhead of synchronized near zero), since it synchronizes on the pool.
+ *
+ * In order to avoid deadlocks, acquisition events (success or failure) are dispatched on the event loop thread of the
+ * connection without holding the pool lock.
+ *
+ * To constrain the number of connections the pool maintains a {@link #weight} value that must remain below the the
+ * {@link #maxWeight} value to create a connection. Weight is used instead of counting connection because this pool
+ * can mix connections with different concurrency (HTTP/1 and HTTP/2) and this flexibility is necessary.
+ *
+ * When a connection is created an initial weight is returned by the {@link ConnectionProvider#connect} method and is
+ * added to the current weight. When the channel is connected the {@link ConnectionListener#onConnectSuccess} callback
+ * provides the initial weight returned by the connect method and the actual connection weight so it can be used to
+ * correct the current weight. When the channel fails to connect the {@link ConnectionListener#onConnectFailure} failure
+ * provides the initial weight so it can be used to correct the current weight.
+ *
+ * When a connection is recycled and reaches its full capacity (i.e {@code Holder#concurrency == Holder#capacity},
+ * the behavior depends on the {@link ConnectionListener#onRecycle(int, boolean)} event that release this connection.
+ * When {@code disposable} is {@code true} the connection is closed, otherwise it is maintained in the pool, letting
+ * the borrower define the behavior. HTTP/1 will close the connection and HTTP/2 will maintain it.
+ *
+ * When a waiter asks for a connection, it is either added to the queue (when it's not empty) or attempted to be
+ * served (from the pool or by creating a new connection) or failed. The {@link #waitersCount} is the number
+ * of total waiters (the waiters in {@link #waitersQueue} but also the inflight) so we know if we can close the pool
+ * or not. The {@link #waitersCount} is incremented when a waiter wants to acquire a connection succesfully (i.e
+ * it is either added to the queue or served from the pool) and decremented when the it gets a reply (either with
+ * a connection or with a failure).
+ *
+ * @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
+ * @author <a href="http://tfox.org">Tim Fox</a>
+ */
+public class Pool<C> {
+
+ /**
+ * Pool state associated with a connection.
+ *
+ * @param <C> the connection type
+ */
+ public static class Holder<C> {
+
+ boolean removed; // Removed
+ C connection; // The connection instance
+ long concurrency; // How many times we can borrow from the connection
+ long capacity; // How many times the connection is currently borrowed (0 <= capacity <= concurrency)
+ Channel channel; // Transport channel
+ ContextImpl context; // Context associated with the connection
+ long weight; // The weight that participates in the pool weight
+
+ }
+ private static final Logger log = LoggerFactory.getLogger(Pool.class);
+
+ private final ConnectionProvider<C> connector;
+ private final BiConsumer<Channel, C> connectionAdded;
+ private final BiConsumer<Channel, C> connectionRemoved;
+
+ private final int queueMaxSize; // the queue max size (does not include inflight waiters)
+ private final Queue<Waiter<C>> waitersQueue = new ArrayDeque<>(); // The waiters pending
+ private int waitersCount; // The number of waiters (including the inflight waiters not in the queue)
+
+ private final Deque<Holder<C>> available; // Available connections
+
+ private final long maxWeight; // The max weight (equivalent to max pool size)
+ private long weight; // The actual pool weight (equivalent to connection count)
+
+ private boolean closed;
+ private final Handler<Void> poolClosed;
+
+ public Pool(ConnectionProvider<C> connector,
+ int queueMaxSize,
+ long maxWeight,
+ Handler<Void> poolClosed,
+ BiConsumer<Channel, C> connectionAdded,
+ BiConsumer<Channel, C> connectionRemoved) {
+ this.maxWeight = maxWeight;
+ this.connector = connector;
+ this.queueMaxSize = queueMaxSize;
+ this.poolClosed = poolClosed;
+ this.available = new ArrayDeque<>();
+ this.connectionAdded = connectionAdded;
+ this.connectionRemoved = connectionRemoved;
+ }
+
+ public synchronized int waitersInQueue() {
+ return waitersQueue.size();
+ }
+
+ public synchronized int waitersCount() {
+ return waitersCount;
+ }
+
+ public synchronized long weight() {
+ return weight;
+ }
+
+ public synchronized long capacity() {
+ return available.stream().mapToLong(c -> c.capacity).sum();
+ }
+
+ /**
+ * Get a connection for a waiter asynchronously.
+ *
+ * @param waiter the waiter
+ * @return whether the pool can satisfy the request
+ */
+ public synchronized boolean getConnection(Waiter<C> waiter) {
+ if (closed) {
+ return false;
+ }
+ int size = waitersQueue.size();
+ if (size == 0 && acquireConnection(waiter)) {
+ waitersCount++;
+ } else if (queueMaxSize < 0 || size < queueMaxSize) {
+ waitersCount++;
+ waitersQueue.add(waiter);
+ } else {
+ waiter.context.nettyEventLoop().execute(() -> {
+ waiter.handleFailure(waiter.context, new ConnectionPoolTooBusyException("Connection pool reached max wait queue size of " + queueMaxSize));
+ });
+ }
+ return true;
+ }
+
+ /**
+ * Attempt to acquire a connection for the waiter, either borrowed from the pool or by creating a new connection.
+ *
+ * This method does not modify the waitersQueue list.
+ *
+ * @return wether the waiter is assigned a connection (or a future connection)
+ */
+ private boolean acquireConnection(Waiter<C> waiter) {
+ if (available.size() > 0) {
+ Holder<C> conn = available.peek();
+ if (--conn.capacity == 0) {
+ available.poll();
+ }
+ ContextImpl ctx = conn.context;
+ ctx.nettyEventLoop().execute(() -> {
+ boolean handled = deliverToWaiter(conn, waiter);
+ synchronized (Pool.this) {
+ waitersCount--;
+ if (!handled) {
+ synchronized (Pool.this) {
+ recycleConnection(conn, 1,false);
+ checkPending();
+ }
+ }
+ }
+ });
+ return true;
+ } else if (weight < maxWeight) {
+ weight += createConnection(waiter);
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ private void checkPending() {
+ while (waitersQueue.size() > 0) {
+ Waiter<C> waiter = waitersQueue.peek();
+ if (acquireConnection(waiter)) {
+ waitersQueue.poll();
+ } else {
+ break;
+ }
+ }
+ }
+
+ private long createConnection(Waiter<C> waiter) {
+ Holder<C> holder = new Holder<>();
+ ConnectionListener<C> listener = new ConnectionListener<C>() {
+ @Override
+ public void onConnectSuccess(C conn, long concurrency, Channel channel, ContextImpl context, long initialWeight, long actualWeight) {
+ // Update state
+ synchronized (Pool.this) {
+ initConnection(holder, context, concurrency, conn, channel, initialWeight, actualWeight);
+ }
+ // Init connection - state might change (i.e init could close the connection)
+ waiter.initConnection(context, conn);
+ synchronized (Pool.this) {
+ if (holder.capacity == 0) {
+ waitersQueue.add(waiter);
+ checkPending();
+ return;
+ }
+ waitersCount--;
+ holder.capacity--;
+ if (holder.capacity > 0) {
+ available.add(holder);
+ }
+ }
+ boolean consumed = deliverToWaiter(holder, waiter);
+ synchronized (Pool.this) {
+ if (!consumed) {
+ recycleConnection(holder, 1,false);
+ }
+ checkPending();
+ }
+ }
+ @Override
+ public void onConnectFailure(ContextImpl context, Throwable err, long weight) {
+ waiter.handleFailure(context, err);
+ synchronized (Pool.this) {
+ waitersCount--;
+ Pool.this.weight -= weight;
+ holder.removed = true;
+ checkPending();
+ checkClose();
+ }
+ }
+ @Override
+ public void onConcurrencyChange(long concurrency) {
+ synchronized (Pool.this) {
+ if (holder.removed) {
+ return;
+ }
+ if (holder.concurrency < concurrency) {
+ long diff = concurrency - holder.concurrency;
+ if (holder.capacity == 0) {
+ available.add(holder);
+ }
+ holder.capacity += diff;
+ holder.concurrency = concurrency;
+ checkPending();
+ } else if (holder.concurrency > concurrency) {
+ throw new UnsupportedOperationException("Not yet implemented");
+ }
+ }
+ }
+ @Override
+ public void onRecycle(int capacity, boolean disposable) {
+ if (capacity < 0) {
+ throw new IllegalArgumentException("Illegal capacity");
+ }
+ synchronized (Pool.this) {
+ if (holder.removed) {
+ return;
+ }
+ recycle(holder, capacity, disposable);
+ }
+ }
+ @Override
+ public void onDiscard() {
+ synchronized (Pool.this) {
+ if (holder.removed) {
+ return;
+ }
+ closed(holder);
+ }
+ }
+ };
+ return connector.connect(listener, waiter.context);
+ }
+
+ private synchronized void recycle(Holder<C> holder, int capacity, boolean closeable) {
+ recycleConnection(holder, capacity, closeable);
+ checkPending();
+ checkClose();
+ }
+
+ private synchronized void closed(Holder<C> holder) {
+ closeConnection(holder);
+ checkPending();
+ checkClose();
+ }
+
+ private void closeConnection(Holder<C> holder) {
+ holder.removed = true;
+ connectionRemoved.accept(holder.channel, holder.connection);
+ if (holder.capacity > 0) {
+ available.remove(holder);
+ holder.capacity = 0;
+ }
+ weight -= holder.weight;
+ }
+
+ /**
+ * Should not be called under the pool lock.
+ */
+ private boolean deliverToWaiter(Holder<C> conn, Waiter<C> waiter) {
+ try {
+ return waiter.handleConnection(conn.context, conn.connection);
+ } catch (Exception e) {
+ // Handle this case gracefully
+ e.printStackTrace();
+ return true;
+ }
+ }
+
+ // These methods assume to be called under synchronization
+
+ private void recycleConnection(Holder<C> conn, int c, boolean closeable) {
+ long newCapacity = conn.capacity + c;
+ if (newCapacity > conn.concurrency) {
+ log.debug("Attempt to recycle a connection more than permitted");
+ return;
+ }
+ if (closeable && newCapacity == conn.concurrency && waitersQueue.isEmpty()) {
+ available.remove(conn);
+ conn.capacity = 0;
+ connector.close(conn.connection);
+ } else {
+ if (conn.capacity == 0) {
+ available.add(conn);
+ }
+ conn.capacity = newCapacity;
+ }
+ }
+
+ private void initConnection(Holder<C> holder, ContextImpl context, long concurrency, C conn, Channel channel, long oldWeight, long newWeight) {
+ weight += newWeight - oldWeight;
+ holder.context = context;
+ holder.concurrency = concurrency;
+ holder.connection = conn;
+ holder.channel = channel;
+ holder.weight = newWeight;
+ holder.capacity = concurrency;
+ connectionAdded.accept(holder.channel, holder.connection);
+ }
+
+ private void checkClose() {
+ if (weight == 0 && waitersCount == 0) {
+ // No waitersQueue and no connections - remove the ConnQueue
+ closed = true;
+ poolClosed.handle(null);
+ }
+ }
+}
diff --git a/src/main/java/io/vertx/core/http/impl/Waiter.java b/src/main/java/io/vertx/core/http/impl/pool/Waiter.java
similarity index 50%
rename from src/main/java/io/vertx/core/http/impl/Waiter.java
rename to src/main/java/io/vertx/core/http/impl/pool/Waiter.java
--- a/src/main/java/io/vertx/core/http/impl/Waiter.java
+++ b/src/main/java/io/vertx/core/http/impl/pool/Waiter.java
@@ -14,48 +14,45 @@
* You may elect to redistribute this code under either of these licenses.
*/
-package io.vertx.core.http.impl;
+package io.vertx.core.http.impl.pool;
import io.vertx.core.impl.ContextImpl;
+import io.vertx.core.impl.ContextInternal;
/**
* @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
*/
-abstract class Waiter {
+public abstract class Waiter<C> {
- final HttpClientRequestImpl req;
- final ContextImpl context;
- Object metric;
+ public final ContextImpl context;
- public Waiter(HttpClientRequestImpl req, ContextImpl context) {
- this.req = req;
+ protected Waiter(ContextImpl context) {
this.context = context;
}
/**
- * Handle connection failure.
+ * Handle connection failure, this callback is on a Netty even loop.
*
+ * @param ctx the context used to create the connection
* @param failure the failure
*/
- abstract void handleFailure(Throwable failure);
+ public abstract void handleFailure(ContextInternal ctx, Throwable failure);
/**
- * Handle connection success.
+ * Init connection, this callback is on a Netty event loop.
*
+ * @param ctx the context used to create the connection
* @param conn the connection
*/
- abstract void handleConnection(HttpClientConnection conn);
+ public abstract void initConnection(ContextInternal ctx, C conn);
/**
- * Handle connection success.
+ * Handle connection success, , this callback is on a Netty event loop.
*
- * @param stream the stream
+ * @param ctx the context used to create the connection
+ * @param conn the connection
+ * @return wether the waiter uses the connection
*/
- abstract void handleStream(HttpClientStream stream);
+ public abstract boolean handleConnection(ContextInternal ctx, C conn) throws Exception;
- /**
- * @return true if the waiter has been cancelled
- */
- abstract boolean isCancelled();
-
}
| diff --git a/src/test/benchmarks/io/vertx/benchmarks/HttpServerHandlerBenchmark.java b/src/test/benchmarks/io/vertx/benchmarks/HttpServerHandlerBenchmark.java
--- a/src/test/benchmarks/io/vertx/benchmarks/HttpServerHandlerBenchmark.java
+++ b/src/test/benchmarks/io/vertx/benchmarks/HttpServerHandlerBenchmark.java
@@ -19,7 +19,6 @@
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.CompositeByteBuf;
import io.netty.buffer.Unpooled;
-import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.channel.embedded.EmbeddedChannel;
@@ -42,7 +41,7 @@
import io.vertx.core.http.HttpServerRequest;
import io.vertx.core.http.HttpServerResponse;
import io.vertx.core.http.impl.HttpHandlers;
-import io.vertx.core.http.impl.ServerHandler;
+import io.vertx.core.http.impl.Http1xServerHandler;
import io.vertx.core.impl.ContextImpl;
import io.vertx.core.impl.EventLoopContext;
import io.vertx.core.impl.VertxInternal;
@@ -232,7 +231,7 @@ public void setup() {
response.end(HELLO_WORLD_BUFFER);
};
HandlerHolder<HttpHandlers> holder = new HandlerHolder<>(context, new HttpHandlers(app, null, null, null));
- ServerHandler handler = new ServerHandler(null, new HttpServerOptions(), "localhost", holder, null);
+ Http1xServerHandler handler = new Http1xServerHandler(null, new HttpServerOptions(), "localhost", holder, null);
vertxChannel.pipeline().addLast("handler", handler);
nettyChannel = new EmbeddedChannel(new HttpRequestDecoder(
diff --git a/src/test/java/io/vertx/test/ListTransferTest.java b/src/test/java/io/vertx/test/ListTransferTest.java
new file mode 100644
--- /dev/null
+++ b/src/test/java/io/vertx/test/ListTransferTest.java
@@ -0,0 +1,81 @@
+package io.vertx.test;
+
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.junit.Assert.*;
+
+public class ListTransferTest {
+
+ private static final int CONNECTING = 0;
+ private static final int WRITING = 1;
+ private static final int CONNECTED = 2;
+
+ @Test
+ public void doTheTest() throws Exception {
+
+ AtomicInteger status = new AtomicInteger();
+ ArrayList<Integer> array = new ArrayList<>();
+
+ AtomicInteger expectedCount = new AtomicInteger();
+
+ Thread t1 = new Thread(() -> {
+ int count = 0;
+ while (true) {
+ switch (status.get()) {
+ case CONNECTING:
+ if (status.compareAndSet(CONNECTING, WRITING)) {
+ array.add(count++);
+ status.set(CONNECTING);
+ }
+ break;
+ case CONNECTED: {
+ expectedCount.set(count);
+ return;
+ }
+ }
+ }
+ });
+
+ AtomicReference<ArrayList<Integer>> expectedArray = new AtomicReference<>();
+ AtomicInteger expectedSpin = new AtomicInteger();
+ Thread t2 = new Thread(() -> {
+ try {
+ Thread.sleep(50);
+ } catch (InterruptedException ignore) {
+ return;
+ }
+ int spin = 0;
+ while (true) {
+ if (status.compareAndSet(CONNECTING, CONNECTED)) {
+ expectedArray.set(new ArrayList<>(array));
+ expectedSpin.set(spin);
+ return;
+ } else {
+ spin ++;
+ Thread.yield();
+ }
+ }
+ });
+
+ t1.start();
+ t2.start();
+
+ t1.join();
+ t2.join();
+
+ assertEquals(expectedCount.get(), expectedArray.get().size());
+
+ System.out.println(expectedCount.get());
+ System.out.println(expectedSpin.get());
+
+
+
+
+ }
+
+
+}
diff --git a/src/test/java/io/vertx/test/core/Http1xTest.java b/src/test/java/io/vertx/test/core/Http1xTest.java
--- a/src/test/java/io/vertx/test/core/Http1xTest.java
+++ b/src/test/java/io/vertx/test/core/Http1xTest.java
@@ -25,20 +25,7 @@
import io.vertx.core.Vertx;
import io.vertx.core.VertxException;
import io.vertx.core.buffer.Buffer;
-import io.vertx.core.http.ConnectionPoolTooBusyException;
-import io.vertx.core.http.Http2Settings;
-import io.vertx.core.http.HttpClient;
-import io.vertx.core.http.HttpClientOptions;
-import io.vertx.core.http.HttpClientRequest;
-import io.vertx.core.http.HttpConnection;
-import io.vertx.core.http.HttpHeaders;
-import io.vertx.core.http.HttpMethod;
-import io.vertx.core.http.HttpServer;
-import io.vertx.core.http.HttpServerOptions;
-import io.vertx.core.http.HttpServerRequest;
-import io.vertx.core.http.HttpServerResponse;
-import io.vertx.core.http.HttpVersion;
-import io.vertx.core.http.RequestOptions;
+import io.vertx.core.http.*;
import io.vertx.core.http.impl.HttpClientRequestImpl;
import io.vertx.core.impl.ConcurrentHashSet;
import io.vertx.core.impl.ContextImpl;
@@ -1250,6 +1237,47 @@ public void testPipeliningLimit() throws Exception {
await();
}
+ @Test
+ public void testPipeliningFailure() throws Exception {
+ int n = 5;
+ client.close();
+ client = vertx.createHttpClient(new HttpClientOptions().setKeepAlive(true).setPipelining(true).setPipeliningLimit(n).setMaxPoolSize(1));
+ CompletableFuture<Void> closeFuture = new CompletableFuture<>();
+ AtomicBoolean first = new AtomicBoolean(true);
+ server.requestHandler(req -> {
+ if (first.compareAndSet(true, false)) {
+ closeFuture.thenAccept(v -> {
+ req.response().close();
+ });
+ } else {
+ req.response().end();
+ }
+ });
+ startServer();
+ AtomicInteger succeeded = new AtomicInteger();
+ List<HttpClientRequest> requests = new ArrayList<>();
+ for (int i = 0;i < n * 2;i++) {
+ HttpClientRequest req = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/" + i);
+ req.handler(resp -> {
+ succeeded.incrementAndGet();
+ requests.remove(req);
+ if (requests.isEmpty()) {
+ assertEquals(n * 2 - 1, succeeded.get());
+ testComplete();
+ }
+ });
+ req.exceptionHandler(err -> {
+ requests.remove(req);
+ for (HttpClientRequest r : requests) {
+ r.end();
+ }
+ }).sendHead();
+ requests.add(req);
+ }
+ closeFuture.complete(null);
+ await();
+ }
+
@Test
public void testKeepAlive() throws Exception {
testKeepAlive(true, 5, 10, 5);
@@ -1334,11 +1362,6 @@ public void testPoolingNoKeepAliveNoPipelining() {
testPooling(false, false);
}
- @Test
- public void testPoolingNoKeepAliveAndPipelining() {
- testPooling(false, true);
- }
-
private void testPooling(boolean keepAlive, boolean pipelining) {
String path = "foo.txt";
int numGets = 100;
@@ -1367,15 +1390,7 @@ private void testPooling(boolean keepAlive, boolean pipelining) {
}
});
req.exceptionHandler(t -> {
- if (pipelining && !keepAlive) {
- // Illegal combination - should get exception
- assertTrue(t instanceof IllegalStateException);
- if (completeAlready.compareAndSet(false, true)) {
- testComplete();
- }
- } else {
- fail("Should not throw exception: " + t.getMessage());
- }
+ fail("Should not throw exception: " + t.getMessage());
});
req.headers().set("count", String.valueOf(i));
req.end();
@@ -1385,6 +1400,15 @@ private void testPooling(boolean keepAlive, boolean pipelining) {
await();
}
+ @Test
+ public void testPoolingNoKeepAliveAndPipelining() {
+ try {
+ vertx.createHttpClient(new HttpClientOptions().setKeepAlive(false).setPipelining(true));
+ fail();
+ } catch (IllegalStateException ignore) {
+ }
+ }
+
@Test
public void testMaxWaitQueueSizeIsRespected() throws Exception {
client.close();
@@ -1392,38 +1416,35 @@ public void testMaxWaitQueueSizeIsRespected() throws Exception {
client = vertx.createHttpClient(new HttpClientOptions().setDefaultHost(DEFAULT_HTTP_HOST).setDefaultPort(DEFAULT_HTTP_PORT)
.setPipelining(false).setMaxWaitQueueSize(0).setMaxPoolSize(2));
+ waitFor(3);
+
+ Set<String> expected = new HashSet<>(Arrays.asList("/1", "/2"));
server.requestHandler(req -> {
- req.response().setStatusCode(200);
- req.response().end("OK");
+ assertTrue(expected.contains(req.path()));
+ complete();
});
- server.listen(onSuccess(s -> {
- HttpClientRequest req1 = client.get(DEFAULT_TEST_URI, resp -> {
- resp.bodyHandler(body -> {
- assertEquals("OK", body.toString());
- });
- });
- req1.exceptionHandler(t -> fail("Should not be called."));
+ startServer();
- HttpClientRequest req2 = client.get(DEFAULT_TEST_URI, resp -> {
- resp.bodyHandler(body -> {
- assertEquals("OK", body.toString());
- testComplete();
- });
- });
- req2.exceptionHandler(t -> fail("Should not be called."));
+ HttpClientRequest req1 = client.get("/1", resp -> {
+ fail("Should not be called.");
+ });
- HttpClientRequest req3 = client.get(DEFAULT_TEST_URI, resp -> {
- fail("Should not be called.");
- });
- req3.exceptionHandler(t -> {
- assertTrue("Incorrect exception time.", t instanceof ConnectionPoolTooBusyException);
- });
+ HttpClientRequest req2 = client.get("/2", resp -> {
+ fail("Should not be called.");
+ });
- req1.end();
- req2.end();
- req3.end();
- }));
+ HttpClientRequest req3 = client.get("/3", resp -> {
+ fail("Should not be called.");
+ });
+ req3.exceptionHandler(t -> {
+ assertTrue("Incorrect exception: " + t.getClass().getName(), t instanceof ConnectionPoolTooBusyException);
+ complete();
+ });
+
+ req1.end();
+ req2.end();
+ req3.end();
await();
}
@@ -1614,7 +1635,7 @@ public void testDefaultHttpVersion() {
public void testIncorrectHttpVersion() throws Exception {
server.requestHandler(req -> {
NetSocket so = req.netSocket();
- so.write(Buffer.buffer("HTTP/1.2 200 OK\r\n\r\n"));
+ so.write(Buffer.buffer("HTTP/1.2 200 OK\r\nContent-Length:5\r\n\r\nHELLO"));
so.close();
});
startServer();
@@ -1891,60 +1912,40 @@ public void testClientContextWithPipelining() throws Exception {
}
private void testClientContext() throws Exception {
- CountDownLatch serverLatch = new CountDownLatch(1);
- CountDownLatch req1Latch = new CountDownLatch(1);
server.requestHandler(req -> {
req.response().end();
- req1Latch.countDown();
- } ).listen(ar -> {
- assertTrue(ar.succeeded());
- serverLatch.countDown();
- });
- awaitLatch(serverLatch);
- AtomicReference<Context> c = new AtomicReference<>();
- HttpClientRequest req1 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/1");
- AtomicReference<HttpConnection> conn = new AtomicReference<>();
- req1.handler(res -> {
- c.set(Vertx.currentContext());
- conn.set(req1.connection());
});
- req1.end();
- Consumer<HttpClientRequest> checker = req -> {
- assertSame(Vertx.currentContext(), c.get());
- assertSame(conn.get(), req.connection());
+ startServer();
+ Set<Context> contexts = Collections.synchronizedSet(new HashSet<>());
+ Set<HttpConnection> connections = Collections.synchronizedSet(new HashSet<>());
+ Handler<HttpClientResponse> checker = response -> {
+ contexts.add(Vertx.currentContext());
+ connections.add(response.request().connection());
};
- CountDownLatch req2Latch = new CountDownLatch(2);
- HttpClientRequest req2 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/2");
- req2.handler(res -> {
- checker.accept(req2);
- req2Latch.countDown();
- }).exceptionHandler(err -> {
- fail(err);
- }).sendHead();
- awaitLatch(req1Latch);
- HttpClientRequest req3 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/3");
- req3.handler(res -> {
- checker.accept(req3);
- req2Latch.countDown();
- }).exceptionHandler(err -> {
- fail(err);
- });
- req2.end();
- req3.end();
- awaitLatch(req2Latch);
- vertx.getOrCreateContext().runOnContext(v -> {
- HttpClientRequest req4 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/4");
- req4.handler(res -> {
+ HttpClientRequest req1 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/2");
+ req1.handler(checker).exceptionHandler(this::fail);
+ HttpClientRequest req2 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/3");
+ req2.handler(checker).exceptionHandler(this::fail);
+ CompletableFuture<HttpClientRequest> fut = new CompletableFuture<>();
+ Context ctx = vertx.getOrCreateContext();
+ ctx.runOnContext(v -> {
+ HttpClientRequest req3 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/4");
+ req3.handler(resp -> {
// This should warn in the log (console) as we are called back on the connection context
// and not on the context doing the request
- checker.accept(req4);
+ // checker.accept(req4);
+ assertEquals(1, contexts.size());
+ assertEquals(1, connections.size());
+ assertNotSame(Vertx.currentContext(), ctx);
testComplete();
});
- req4.exceptionHandler(err -> {
- fail(err);
- });
- req4.end();
+ req3.exceptionHandler(this::fail);
+ fut.complete(req3);
});
+ HttpClientRequest req3 = fut.get(10, TimeUnit.SECONDS);
+ req1.end();
+ req2.end();
+ req3.end();
await();
}
@@ -2530,11 +2531,11 @@ public void testConnectionCloseHttp_1_1_NoClose() throws Exception {
AtomicBoolean firstRequest = new AtomicBoolean(true);
socket.handler(RecordParser.newDelimited("\r\n\r\n", buffer -> {
if (firstRequest.getAndSet(false)) {
- socket.write("HTTP/1.1 200 OK\n" + "Content-Type: text/plain\n" + "Content-Length: 4\n"
- + "\n" + "xxx\n");
+ socket.write("HTTP/1.1 200 OK\r\n" + "Content-Type: text/plain\r\n" + "Content-Length: 4\r\n"
+ + "\r\n" + "xxx\n");
} else {
- socket.write("HTTP/1.1 200 OK\n" + "Content-Type: text/plain\n" + "Content-Length: 1\n"
- + "Connection: close\n" + "\n" + "\n");
+ socket.write("HTTP/1.1 200 OK\r\n" + "Content-Type: text/plain\r\n" + "Content-Length: 1\r\n"
+ + "Connection: close\r\n" + "\r\n" + "\r\n");
}
}));
});
@@ -2546,11 +2547,11 @@ public void testConnectionCloseHttp_1_1_Close() throws Exception {
AtomicBoolean firstRequest = new AtomicBoolean(true);
socket.handler(RecordParser.newDelimited("\r\n\r\n", buffer -> {
if (firstRequest.getAndSet(false)) {
- socket.write("HTTP/1.1 200 OK\n" + "Content-Type: text/plain\n" + "Content-Length: 4\n"
- + "\n" + "xxx\n");
+ socket.write("HTTP/1.1 200 OK\r\n" + "Content-Type: text/plain\r\n" + "Content-Length: 3\r\n"
+ + "\r\n" + "xxx");
} else {
- socket.write("HTTP/1.1 200 OK\n" + "Content-Type: text/plain\n" + "Content-Length: 1\n"
- + "Connection: close\n" + "\n" + "\n");
+ socket.write("HTTP/1.1 200 OK\r\n" + "Content-Type: text/plain\r\n" + "Content-Length: 0\r\n"
+ + "Connection: close\r\n" + "\r\n");
socket.close();
}
}));
@@ -3016,7 +3017,7 @@ public void testResetKeepAliveClientRequest() throws Exception {
@Test
public void testResetPipelinedClientRequest() throws Exception {
waitFor(2);
- CompletableFuture<Void> reset = new CompletableFuture<>();
+ CompletableFuture<Void> doReset = new CompletableFuture<>();
server.close();
NetServer server = vertx.createNetServer();
AtomicInteger count = new AtomicInteger();
@@ -3031,7 +3032,7 @@ public void testResetPipelinedClientRequest() throws Exception {
"POST /somepath HTTP/1.1\r\n" +
"Host: localhost:8080\r\n" +
"\r\n")) {
- reset.complete(null);
+ doReset.complete(null);
so.write(
"HTTP/1.1 200 OK\r\n" +
"Content-Type: text/plain\r\n" +
@@ -3052,13 +3053,11 @@ public void testResetPipelinedClientRequest() throws Exception {
awaitLatch(listenLatch);
client.close();
client = vertx.createHttpClient(new HttpClientOptions().setMaxPoolSize(1).setPipelining(true).setKeepAlive(true));
- AtomicInteger status = new AtomicInteger();
HttpClientRequest req1 = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {
- assertEquals(0, status.getAndIncrement());
+ fail();
});
req1.connectionHandler(conn -> {
conn.closeHandler(v -> {
- assertEquals(1, status.getAndIncrement());
complete();
});
});
@@ -3067,7 +3066,7 @@ public void testResetPipelinedClientRequest() throws Exception {
fail();
});
req2.sendHead();
- reset.thenAccept(v -> {
+ doReset.thenAccept(v -> {
assertTrue(req2.reset());
});
await();
diff --git a/src/test/java/io/vertx/test/core/Http2ClientTest.java b/src/test/java/io/vertx/test/core/Http2ClientTest.java
--- a/src/test/java/io/vertx/test/core/Http2ClientTest.java
+++ b/src/test/java/io/vertx/test/core/Http2ClientTest.java
@@ -71,6 +71,7 @@
import io.vertx.core.net.impl.SSLHelper;
import io.vertx.test.core.tls.Cert;
import io.vertx.test.core.tls.Trust;
+import org.junit.Ignore;
import org.junit.Test;
import java.io.ByteArrayOutputStream;
@@ -134,20 +135,19 @@ public void testClientSettings() throws Exception {
req.response().end();
});
}).connectionHandler(conn -> {
+ io.vertx.core.http.Http2Settings initialRemoteSettings = conn.remoteSettings();
+ assertEquals(initialSettings.isPushEnabled(), initialRemoteSettings.isPushEnabled());
+ assertEquals(initialSettings.getMaxHeaderListSize(), initialRemoteSettings.getMaxHeaderListSize());
+ assertEquals(initialSettings.getMaxFrameSize(), initialRemoteSettings.getMaxFrameSize());
+ assertEquals(initialSettings.getInitialWindowSize(), initialRemoteSettings.getInitialWindowSize());
+// assertEquals(Math.min(initialSettings.getMaxConcurrentStreams(), Integer.MAX_VALUE), settings.getMaxConcurrentStreams());
+ assertEquals(initialSettings.getHeaderTableSize(), initialRemoteSettings.getHeaderTableSize());
+ assertEquals(initialSettings.get('\u0007'), initialRemoteSettings.get(7));
Context ctx = Vertx.currentContext();
conn.remoteSettingsHandler(settings -> {
assertOnIOContext(ctx);
switch (count.getAndIncrement()) {
case 0:
- assertEquals(initialSettings.isPushEnabled(), settings.isPushEnabled());
- assertEquals(initialSettings.getMaxHeaderListSize(), settings.getMaxHeaderListSize());
- assertEquals(initialSettings.getMaxFrameSize(), settings.getMaxFrameSize());
- assertEquals(initialSettings.getInitialWindowSize(), settings.getInitialWindowSize());
-// assertEquals(Math.min(initialSettings.getMaxConcurrentStreams(), Integer.MAX_VALUE), settings.getMaxConcurrentStreams());
- assertEquals(initialSettings.getHeaderTableSize(), settings.getHeaderTableSize());
- assertEquals(initialSettings.get('\u0007'), settings.get(7));
- break;
- case 1:
// find out why it fails sometimes ...
// assertEquals(updatedSettings.pushEnabled(), settings.getEnablePush());
assertEquals(updatedSettings.getMaxHeaderListSize(), settings.getMaxHeaderListSize());
@@ -159,6 +159,9 @@ public void testClientSettings() throws Exception {
assertEquals(updatedSettings.get('\u0007'), settings.get(7));
complete();
break;
+ default:
+ fail();
+
}
});
});
@@ -198,7 +201,6 @@ public void testInvalidSettings() throws Exception {
@Test
public void testServerSettings() throws Exception {
- waitFor(2);
io.vertx.core.http.Http2Settings expectedSettings = TestUtils.randomHttp2Settings();
expectedSettings.setHeaderTableSize((int)io.vertx.core.http.Http2Settings.DEFAULT_HEADER_TABLE_SIZE);
server.close();
@@ -210,26 +212,20 @@ public void testServerSettings() throws Exception {
});
});
server.requestHandler(req -> {
- req.response().end();
});
startServer();
AtomicInteger count = new AtomicInteger();
- client.get(DEFAULT_HTTPS_PORT, DEFAULT_HTTPS_HOST, "/somepath", resp -> {
- complete();
- }).connectionHandler(conn -> {
+ client.get(DEFAULT_HTTPS_PORT, DEFAULT_HTTPS_HOST, "/somepath", resp -> fail()).connectionHandler(conn -> {
conn.remoteSettingsHandler(settings -> {
switch (count.getAndIncrement()) {
case 0:
- // Initial settings
- break;
- case 1:
assertEquals(expectedSettings.getMaxHeaderListSize(), settings.getMaxHeaderListSize());
assertEquals(expectedSettings.getMaxFrameSize(), settings.getMaxFrameSize());
assertEquals(expectedSettings.getInitialWindowSize(), settings.getInitialWindowSize());
assertEquals(expectedSettings.getMaxConcurrentStreams(), settings.getMaxConcurrentStreams());
assertEquals(expectedSettings.getHeaderTableSize(), settings.getHeaderTableSize());
assertEquals(expectedSettings.get('\u0007'), settings.get(7));
- complete();
+ testComplete();
break;
}
});
@@ -594,10 +590,8 @@ private void testQueueingRequests(int numReq, Long max) throws Exception {
CountDownLatch latch = new CountDownLatch(1);
client.get(DEFAULT_HTTPS_PORT, DEFAULT_HTTPS_HOST, "/somepath", resp -> {
}).connectionHandler(conn -> {
- conn.remoteSettingsHandler(settings -> {
- assertEquals(max == null ? 0xFFFFFFFFL : max, settings.getMaxConcurrentStreams());
- latch.countDown();
- });
+ assertEquals(max == null ? 0xFFFFFFFFL : max, conn.remoteSettings().getMaxConcurrentStreams());
+ latch.countDown();
}).exceptionHandler(err -> {
fail();
}).end();
@@ -946,6 +940,7 @@ public void testConnectionHandler() throws Exception {
@Test
public void testConnectionShutdownInConnectionHandler() throws Exception {
+ waitFor(2);
AtomicInteger serverStatus = new AtomicInteger();
server.connectionHandler(conn -> {
if (serverStatus.getAndIncrement() == 0) {
@@ -963,28 +958,27 @@ public void testConnectionShutdownInConnectionHandler() throws Exception {
});
server.requestHandler(req -> {
assertEquals(5, serverStatus.getAndIncrement());
- req.response().end();
+ req.response().end("" + serverStatus.get());
});
startServer();
AtomicInteger clientStatus = new AtomicInteger();
HttpClientRequest req1 = client.get(DEFAULT_HTTPS_PORT, DEFAULT_HTTPS_HOST, "/somepath");
req1.connectionHandler(conn -> {
Context ctx = Vertx.currentContext();
- conn.shutdownHandler(v -> {
- assertOnIOContext(ctx);
- clientStatus.compareAndSet(1, 2);
- });
if (clientStatus.getAndIncrement() == 0) {
+ conn.shutdownHandler(v -> {
+ assertOnIOContext(ctx);
+ clientStatus.compareAndSet(1, 2);
+ complete();
+ });
conn.shutdown();
}
});
- req1.exceptionHandler(err -> {
- fail();
- });
+ req1.exceptionHandler(err -> fail());
req1.handler(resp -> {
- assertEquals(2, clientStatus.getAndIncrement());
- resp.endHandler(v -> {
- testComplete();
+ resp.bodyHandler(body -> {
+ assertEquals("6", body.toString());
+ complete();
});
});
req1.end();
@@ -1021,7 +1015,9 @@ public void testServerShutdownConnection() throws Exception {
@Test
public void testReceivingGoAwayDiscardsTheConnection() throws Exception {
AtomicInteger reqCount = new AtomicInteger();
+ Set<HttpConnection> connections = Collections.synchronizedSet(new HashSet<>());
server.requestHandler(req -> {
+ connections.add(req.connection());
switch (reqCount.getAndIncrement()) {
case 0:
req.connection().goAway(0);
@@ -1040,6 +1036,7 @@ public void testReceivingGoAwayDiscardsTheConnection() throws Exception {
conn.goAwayHandler(ga -> {
if (gaCount.getAndIncrement() == 0) {
client.get(DEFAULT_HTTPS_PORT, DEFAULT_HTTPS_HOST, "/somepath", resp2 -> {
+ assertEquals(2, connections.size());
testComplete();
}).setTimeout(5000).exceptionHandler(this::fail).end();
}
@@ -1748,14 +1745,13 @@ public void testMaxConcurrencySingleConnection() throws Exception {
@Test
public void testMaxConcurrencyMultipleConnections() throws Exception {
- testMaxConcurrency(3, 5);
+ testMaxConcurrency(2, 1);
}
private void testMaxConcurrency(int poolSize, int maxConcurrency) throws Exception {
int rounds = 1 + poolSize;
int maxRequests = poolSize * maxConcurrency;
int totalRequests = maxRequests + maxConcurrency;
-
Set<HttpConnection> serverConns = new HashSet<>();
server.connectionHandler(conn -> {
serverConns.add(conn);
@@ -1781,7 +1777,6 @@ private void testMaxConcurrency(int poolSize, int maxConcurrency) throws Excepti
setHttp2MaxPoolSize(poolSize).
setHttp2MultiplexingLimit(maxConcurrency));
AtomicInteger respCount = new AtomicInteger();
-
Set<HttpConnection> clientConnections = Collections.synchronizedSet(new HashSet<>());
for (int j = 0;j < rounds;j++) {
for (int i = 0;i < maxConcurrency;i++) {
diff --git a/src/test/java/io/vertx/test/core/Http2ServerTest.java b/src/test/java/io/vertx/test/core/Http2ServerTest.java
--- a/src/test/java/io/vertx/test/core/Http2ServerTest.java
+++ b/src/test/java/io/vertx/test/core/Http2ServerTest.java
@@ -386,34 +386,24 @@ public void testClientSettings() throws Exception {
Context ctx = vertx.getOrCreateContext();
io.vertx.core.http.Http2Settings initialSettings = TestUtils.randomHttp2Settings();
io.vertx.core.http.Http2Settings updatedSettings = TestUtils.randomHttp2Settings();
- Future<Void> settingsRead = Future.future();
AtomicInteger count = new AtomicInteger();
server.connectionHandler(conn -> {
io.vertx.core.http.Http2Settings settings = conn.remoteSettings();
- assertEquals(true, settings.isPushEnabled());
+ assertEquals(initialSettings.isPushEnabled(), settings.isPushEnabled());
// Netty bug ?
// Nothing has been yet received so we should get Integer.MAX_VALUE
// assertEquals(Integer.MAX_VALUE, settings.getMaxHeaderListSize());
- assertEquals(io.vertx.core.http.Http2Settings.DEFAULT_MAX_FRAME_SIZE, settings.getMaxFrameSize());
- assertEquals(io.vertx.core.http.Http2Settings.DEFAULT_INITIAL_WINDOW_SIZE, settings.getInitialWindowSize());
- assertEquals((Long)(long)Integer.MAX_VALUE, (Long)(long)settings.getMaxConcurrentStreams());
- assertEquals(io.vertx.core.http.Http2Settings.DEFAULT_HEADER_TABLE_SIZE, settings.getHeaderTableSize());
+ assertEquals(initialSettings.getMaxFrameSize(), settings.getMaxFrameSize());
+ assertEquals(initialSettings.getInitialWindowSize(), settings.getInitialWindowSize());
+ assertEquals((Long)(long)initialSettings.getMaxConcurrentStreams(), (Long)(long)settings.getMaxConcurrentStreams());
+ assertEquals(initialSettings.getHeaderTableSize(), settings.getHeaderTableSize());
+
conn.remoteSettingsHandler(update -> {
assertOnIOContext(ctx);
switch (count.getAndIncrement()) {
case 0:
- assertEquals(initialSettings.isPushEnabled(), update.isPushEnabled());
- assertEquals(initialSettings.getMaxHeaderListSize(), update.getMaxHeaderListSize());
- assertEquals(initialSettings.getMaxFrameSize(), update.getMaxFrameSize());
- assertEquals(initialSettings.getInitialWindowSize(), update.getInitialWindowSize());
- assertEquals(initialSettings.getMaxConcurrentStreams(), update.getMaxConcurrentStreams());
- assertEquals(initialSettings.getHeaderTableSize(), update.getHeaderTableSize());
- assertEquals(initialSettings.get('\u0007'), update.get(7));
- settingsRead.complete();
- break;
- case 1:
assertEquals(updatedSettings.isPushEnabled(), update.isPushEnabled());
assertEquals(updatedSettings.getMaxHeaderListSize(), update.getMaxHeaderListSize());
assertEquals(updatedSettings.getMaxFrameSize(), update.getMaxFrameSize());
@@ -423,6 +413,8 @@ public void testClientSettings() throws Exception {
assertEquals(updatedSettings.get('\u0007'), update.get(7));
testComplete();
break;
+ default:
+ fail();
}
});
});
@@ -433,10 +425,8 @@ public void testClientSettings() throws Exception {
TestClient client = new TestClient();
client.settings.putAll(HttpUtils.fromVertxSettings(initialSettings));
ChannelFuture fut = client.connect(DEFAULT_HTTPS_PORT, DEFAULT_HTTPS_HOST, request -> {
- settingsRead.setHandler(ar -> {
- request.encoder.writeSettings(request.context, HttpUtils.fromVertxSettings(updatedSettings), request.context.newPromise());
- request.context.flush();
- });
+ request.encoder.writeSettings(request.context, HttpUtils.fromVertxSettings(updatedSettings), request.context.newPromise());
+ request.context.flush();
});
fut.sync();
await();
diff --git a/src/test/java/io/vertx/test/core/Http2Test.java b/src/test/java/io/vertx/test/core/Http2Test.java
--- a/src/test/java/io/vertx/test/core/Http2Test.java
+++ b/src/test/java/io/vertx/test/core/Http2Test.java
@@ -25,11 +25,13 @@
import io.vertx.test.core.tls.Cert;
import org.junit.Test;
+import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
/**
* @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
@@ -356,4 +358,51 @@ public void testClientMakeRequestHttp2WithSSLWithoutAlpn() throws Exception {
// Expected
}
}
+
+ @Test
+ public void testServePendingRequests() throws Exception {
+ int n = 10;
+ waitFor(n);
+ LinkedList<HttpServerRequest> requests = new LinkedList<>();
+ Set<HttpConnection> connections = new HashSet<>();
+ server.requestHandler(req -> {
+ requests.add(req);
+ connections.add(req.connection());
+ assertEquals(1, connections.size());
+ if (requests.size() == n) {
+ while (requests.size() > 0) {
+ requests.removeFirst().response().end();
+ }
+ }
+ });
+ startServer();
+ for (int i = 0;i < n;i++) {
+ client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> complete()).end();
+ }
+ await();
+ }
+
+ @Test
+ public void testInitialMaxConcurrentStreamZero() throws Exception {
+ AtomicLong concurrency = new AtomicLong();
+ server.close();
+ server = vertx.createHttpServer(createBaseServerOptions().setInitialSettings(new Http2Settings().setMaxConcurrentStreams(0)));
+ server.requestHandler(req -> {
+ assertEquals(10, concurrency.get());
+ req.response().end();
+ });
+ server.connectionHandler(conn -> {
+ vertx.setTimer(500, id -> {
+ conn.updateSettings(new Http2Settings().setMaxConcurrentStreams(10));
+ });
+ });
+ startServer();
+ client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ testComplete();
+ }).connectionHandler(conn -> {
+ assertEquals(0, conn.remoteSettings().getMaxConcurrentStreams());
+ conn.remoteSettingsHandler(settings -> concurrency.set(settings.getMaxConcurrentStreams()));
+ }).setTimeout(10000).exceptionHandler(err -> fail(err)).end();
+ await();
+ }
}
diff --git a/src/test/java/io/vertx/test/core/HttpTLSTest.java b/src/test/java/io/vertx/test/core/HttpTLSTest.java
--- a/src/test/java/io/vertx/test/core/HttpTLSTest.java
+++ b/src/test/java/io/vertx/test/core/HttpTLSTest.java
@@ -1216,10 +1216,8 @@ public void testCrlInvalidPath() throws Exception {
clientOptions.setTrustOptions(Trust.SERVER_PEM_ROOT_CA.get());
clientOptions.setSsl(true);
clientOptions.addCrlPath("/invalid.pem");
- HttpClient client = vertx.createHttpClient(clientOptions);
- HttpClientRequest req = client.request(HttpMethod.CONNECT, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/", (handler) -> {});
try {
- req.end();
+ vertx.createHttpClient(clientOptions);
fail("Was expecting a failure");
} catch (VertxException e) {
assertNotNull(e.getCause());
diff --git a/src/test/java/io/vertx/test/core/HttpTest.java b/src/test/java/io/vertx/test/core/HttpTest.java
--- a/src/test/java/io/vertx/test/core/HttpTest.java
+++ b/src/test/java/io/vertx/test/core/HttpTest.java
@@ -16,8 +16,10 @@
package io.vertx.test.core;
+import io.netty.handler.codec.compression.DecompressionException;
import io.netty.handler.codec.http.DefaultHttpHeaders;
import io.netty.handler.codec.http.HttpResponseStatus;
+import io.netty.handler.codec.http2.Http2Exception;
import io.netty.util.internal.logging.InternalLoggerFactory;
import io.vertx.codegen.annotations.Nullable;
import io.vertx.core.AbstractVerticle;
@@ -47,30 +49,17 @@
import io.vertx.core.impl.WorkerContext;
import io.vertx.core.net.NetClient;
import io.vertx.core.net.NetSocket;
+import io.vertx.core.net.SocketAddress;
import io.vertx.test.netty.TestLoggerFactory;
import org.junit.Assume;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.OutputStreamWriter;
-import java.io.UnsupportedEncodingException;
-import java.net.InetAddress;
-import java.net.URLEncoder;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
+import java.io.*;
+import java.net.*;
+import java.util.*;
+import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
@@ -2400,7 +2389,29 @@ public void testPauseClientResponse() {
@Test
public void testDeliverPausedBufferWhenResume() throws Exception {
- Buffer data = TestUtils.randomBuffer(20);
+ testDeliverPausedBufferWhenResume(block -> vertx.setTimer(10, id -> block.run()));
+ }
+
+ @Test
+ public void testDeliverPausedBufferWhenResumeOnOtherThread() throws Exception {
+ ExecutorService exec = Executors.newSingleThreadExecutor();
+ try {
+ testDeliverPausedBufferWhenResume(block -> exec.execute(() -> {
+ try {
+ Thread.sleep(10);
+ } catch (InterruptedException e) {
+ fail(e);
+ Thread.currentThread().interrupt();
+ }
+ block.run();
+ }));
+ } finally {
+ exec.shutdown();
+ }
+ }
+
+ private void testDeliverPausedBufferWhenResume(Consumer<Runnable> scheduler) throws Exception {
+ Buffer data = TestUtils.randomBuffer(2048);
int num = 10;
waitFor(num);
List<CompletableFuture<Void>> resumes = Collections.synchronizedList(new ArrayList<>());
@@ -2422,18 +2433,18 @@ public void testDeliverPausedBufferWhenResume() throws Exception {
int idx = i;
client.request(HttpMethod.GET, DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/" + i, resp -> {
Buffer body = Buffer.buffer();
+ Thread t = Thread.currentThread();
resp.handler(buff -> {
+ assertSame(t, Thread.currentThread());
resumes.get(idx).complete(null);
body.appendBuffer(buff);
});
resp.endHandler(v -> {
- assertEquals(data, body);
+ // assertEquals(data, body);
complete();
});
resp.pause();
- vertx.setTimer(10, id -> {
- resp.resume();
- });
+ scheduler.accept(resp::resume);
}).end();
}
await();
@@ -3858,6 +3869,37 @@ private TestLoggerFactory testLogging() throws Exception {
return factory;
}
+ @Test
+ public void testClientDecompressionError() throws Exception {
+ waitFor(2);
+ server.requestHandler(req -> {
+ req.response()
+ .putHeader("Content-Encoding", "gzip")
+ .end("long response with mismatched encoding causes connection leaks");
+ });
+ startServer();
+ client.close();
+ client = vertx.createHttpClient(createBaseClientOptions().setTryUseCompression(true));
+ client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ resp.exceptionHandler(err -> {
+ if (err instanceof Http2Exception) {
+ complete();
+ // Connection is not closed for HTTP/2 only the streams so we need to force it
+ resp.request().connection().close();
+ } else if (err instanceof DecompressionException) {
+ complete();
+ }
+ });
+ }).connectionHandler(conn -> {
+ conn.closeHandler(v -> {
+ complete();
+ });
+ }).end();
+
+ await();
+
+ }
+
protected File setupFile(String fileName, String content) throws Exception {
File file = new File(testDir, fileName);
if (file.exists()) {
@@ -3901,4 +3943,26 @@ protected static MultiMap getHeaders(int num) {
}
return headers;
}
+/*
+ @Test
+ public void testReset() throws Exception {
+ CountDownLatch latch = new CountDownLatch(1);
+ server.requestHandler(req -> {
+ req.exceptionHandler(err -> {
+ System.out.println("GOT ERR");
+ });
+ req.endHandler(v -> {
+ System.out.println("GOT END");
+ latch.countDown();
+ });
+ });
+ startServer();
+ HttpClientRequest req = client.get(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, "/somepath", resp -> {});
+ req.end();
+ awaitLatch(latch);
+ req.reset();
+
+ await();
+ }
+*/
}
diff --git a/src/test/java/io/vertx/test/core/MetricsTest.java b/src/test/java/io/vertx/test/core/MetricsTest.java
--- a/src/test/java/io/vertx/test/core/MetricsTest.java
+++ b/src/test/java/io/vertx/test/core/MetricsTest.java
@@ -680,7 +680,7 @@ public void testHttpClientMetricsQueueLength() throws Exception {
assertWaitUntil(() -> requests.size() == 3);
assertEquals(Collections.singleton("localhost:8080"), metrics.endpoints());
assertEquals(0, (int)metrics.queueSize("localhost:8080"));
- assertEquals(5, (int)metrics.connectionCount("localhost:8080"));
+ assertEquals(3, (int)metrics.connectionCount("localhost:8080"));
}
@Test
diff --git a/src/test/java/io/vertx/test/core/net/ConnectionPoolTest.java b/src/test/java/io/vertx/test/core/net/ConnectionPoolTest.java
new file mode 100644
--- /dev/null
+++ b/src/test/java/io/vertx/test/core/net/ConnectionPoolTest.java
@@ -0,0 +1,775 @@
+/*
+ * Copyright (c) 2011-2014 The original author or authors
+ * ------------------------------------------------------
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * and Apache License v2.0 which accompanies this distribution.
+ *
+ * The Eclipse Public License is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * The Apache License v2.0 is available at
+ * http://www.opensource.org/licenses/apache2.0.php
+ *
+ * You may elect to redistribute this code under either of these licenses.
+ */
+package io.vertx.test.core.net;
+
+import io.netty.channel.Channel;
+import io.netty.channel.embedded.EmbeddedChannel;
+import io.vertx.core.Vertx;
+import io.vertx.core.http.impl.pool.*;
+import io.vertx.core.impl.ContextImpl;
+import io.vertx.core.impl.ContextInternal;
+import io.vertx.core.net.SocketAddress;
+import io.vertx.test.core.VertxTestBase;
+import org.junit.Test;
+
+import java.util.*;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ * @author <a href="mailto:julien@julienviet.com">Julien Viet</a>
+ */
+public class ConnectionPoolTest extends VertxTestBase {
+
+ class FakeConnectionManager {
+
+ private final FakeConnectionProvider connector;
+ private final int queueMaxSize;
+ private final int maxPoolSize;
+ private Pool<FakeConnection> pool;
+ private Set<FakeConnection> active = new HashSet<>();
+ private boolean closed = true;
+ private int seq;
+
+ FakeConnectionManager(int queueMaxSize, int maxPoolSize, FakeConnectionProvider connector) {
+ this.queueMaxSize = queueMaxSize;
+ this.maxPoolSize = maxPoolSize;
+ this.connector = connector;
+ }
+
+ synchronized int sequence() {
+ return seq;
+ }
+
+ synchronized boolean closed() {
+ return closed;
+ }
+
+ synchronized boolean contains(FakeConnection conn) {
+ return active.contains(conn);
+ }
+
+ synchronized int size() {
+ return active.size();
+ }
+
+ synchronized Pool<FakeConnection> pool() {
+ return pool;
+ }
+
+ void getConnection(Waiter<FakeConnection> waiter) {
+ synchronized (this) {
+ if (closed) {
+ seq++;
+ closed = false;
+ pool = new Pool<>(
+ connector,
+ queueMaxSize,
+ maxPoolSize,
+ v -> {
+ synchronized (FakeConnectionManager.this) {
+ closed = true;
+ }
+ }, (channel, conn) -> {
+ synchronized (FakeConnectionManager.this) {
+ active.add(conn);
+ }
+ }, (channel, conn) -> {
+ synchronized (FakeConnectionManager.this) {
+ active.remove(conn);
+ }
+ }
+ );
+ }
+ }
+ pool.getConnection(waiter);
+ }
+ }
+
+ @Test
+ public void testConnectSuccess() {
+ FakeConnectionProvider connector = new FakeConnectionProvider();
+ FakeConnectionManager mgr = new FakeConnectionManager(3, 4, connector);
+ AtomicReference<Boolean> initLock = new AtomicReference<>();
+ AtomicReference<Boolean> handleLock = new AtomicReference<>();
+ FakeWaiter waiter = new FakeWaiter() {
+ @Override
+ public synchronized void initConnection(ContextInternal ctx, FakeConnection conn) {
+ assertNull(Vertx.currentContext());
+ assertSame(ctx, context);
+ Pool<FakeConnection> pool = mgr.pool();
+ initLock.set(Thread.holdsLock(pool));
+ super.initConnection(ctx, conn);
+ }
+ @Override
+ public synchronized boolean handleConnection(ContextInternal ctx, FakeConnection conn) throws Exception {
+ assertNull(Vertx.currentContext());
+ assertSame(ctx, context);
+ Pool<FakeConnection> pool = mgr.pool();
+ handleLock.set(Thread.holdsLock(pool));
+ return super.handleConnection(ctx, conn);
+ }
+ };
+ mgr.getConnection(waiter);
+ FakeConnection conn = connector.assertRequest();
+ conn.connect();
+ assertWaitUntil(waiter::isComplete);
+ assertEquals(Boolean.FALSE, handleLock.get());
+ assertEquals(Boolean.FALSE, initLock.get());
+ waiter.assertInitialized(conn);
+ waiter.assertSuccess(conn);
+ waiter.recycle();
+ assertEquals(0, mgr.size());
+ assertTrue(mgr.closed());
+ }
+
+ @Test
+ public void testConnectFailure() {
+ FakeConnectionProvider connector = new FakeConnectionProvider();
+ FakeConnectionManager mgr = new FakeConnectionManager(3, 4, connector);
+ AtomicReference<Boolean> holdsLock = new AtomicReference<>();
+ FakeWaiter waiter = new FakeWaiter() {
+ @Override
+ public synchronized void handleFailure(ContextInternal ctx, Throwable failure) {
+ assertNull(Vertx.currentContext());
+ assertSame(ctx, context);
+ Pool<FakeConnection> pool = mgr.pool();
+ holdsLock.set(Thread.holdsLock(pool));
+ super.handleFailure(ctx, failure);
+ }
+ };
+ mgr.getConnection(waiter);
+ FakeConnection conn = connector.assertRequest();
+ Throwable failure = new Throwable();
+ conn.fail(failure);
+ assertWaitUntil(waiter::isComplete);
+ assertEquals(Boolean.FALSE, holdsLock.get());
+ waiter.assertNotInitialized();
+ waiter.assertFailure(failure);
+ }
+
+ @Test
+ public void testConnectPoolEmptyWaiterCancelledAfterConnectRequest() {
+ FakeConnectionProvider connector = new FakeConnectionProvider();
+ FakeConnectionManager mgr = new FakeConnectionManager(3, 3, connector);
+ FakeWaiter waiter = new FakeWaiter();
+ mgr.getConnection(waiter);
+ FakeConnection conn = connector.assertRequest();
+ waiter.cancel();
+ conn.connect();
+ waitUntil(() -> mgr.size() == 1);
+ waiter.assertInitialized(conn);
+ assertTrue(waiter.isComplete());
+ assertFalse(waiter.isSuccess());
+ assertFalse(waiter.isFailure());
+ assertTrue(mgr.contains(conn));
+ }
+
+ @Test
+ public void testConnectionFailure() {
+ FakeConnectionProvider connector = new FakeConnectionProvider();
+ FakeConnectionManager mgr = new FakeConnectionManager(3, 3, connector);
+ FakeWaiter waiter = new FakeWaiter();
+ mgr.getConnection(waiter);
+ FakeConnection conn = connector.assertRequest();
+ Exception expected = new Exception();
+ conn.fail(expected);
+ assertWaitUntil(waiter::isComplete);
+ waiter.assertFailure(expected);
+ assertTrue(waiter.isFailure());
+ assertTrue(mgr.closed());
+ }
+
+ @Test
+ public void testRecycleConnection() {
+ FakeConnectionProvider connector = new FakeConnectionProvider();
+ FakeConnectionManager mgr = new FakeConnectionManager(3, 1, connector);
+ FakeWaiter waiter1 = new FakeWaiter();
+ mgr.getConnection(waiter1);
+ FakeConnection conn = connector.assertRequest();
+ conn.connect();
+ assertWaitUntil(waiter1::isComplete);
+ FakeWaiter waiter2 = new FakeWaiter();
+ mgr.getConnection(waiter2);
+ connector.assertRequests(0);
+ waiter1.recycle();
+ assertWaitUntil(waiter2::isComplete);
+ waiter2.assertSuccess(conn);
+ }
+
+ @Test
+ public void testRecycleDiscardedConnection() {
+ FakeConnectionProvider connector = new FakeConnectionProvider();
+ FakeConnectionManager mgr = new FakeConnectionManager(3, 1, connector);
+ FakeWaiter waiter1 = new FakeWaiter();
+ mgr.getConnection(waiter1);
+ FakeConnection conn = connector.assertRequest();
+ conn.connect();
+ waitUntil(waiter1::isComplete);
+ FakeWaiter waiter2 = new FakeWaiter();
+ mgr.getConnection(waiter2);
+ conn.close();
+ waiter1.recycle();
+ waitUntil(() -> connector.requests() == 1);
+ assertFalse(mgr.closed());
+ FakeConnection conn2 = connector.assertRequest();
+ conn2.connect();
+ waitUntil(waiter2::isSuccess);
+ }
+
+ @Test
+ public void testWaiterThrowsException() {
+ FakeConnectionProvider connector = new FakeConnectionProvider();
+ FakeConnectionManager mgr = new FakeConnectionManager(3, 1, connector);
+ Exception failure = new Exception();
+ FakeWaiter waiter = new FakeWaiter() {
+ @Override
+ public synchronized boolean handleConnection(ContextInternal ctx, FakeConnection conn) throws Exception {
+ throw failure;
+ }
+ };
+ mgr.getConnection(waiter);
+ FakeConnection conn = connector.assertRequest();
+ conn.connect();
+ assertEquals(0, mgr.size());
+ }
+
+ @Test
+ public void testEndpointLifecycle() {
+ FakeConnectionProvider connector = new FakeConnectionProvider();
+ FakeConnectionManager mgr = new FakeConnectionManager(3, 1, connector);
+ FakeWaiter waiter1 = new FakeWaiter();
+ mgr.getConnection(waiter1);
+ FakeConnection conn = connector.assertRequest();
+ conn.connect();
+ waitUntil(waiter1::isSuccess);
+ conn.close();
+ waitUntil(mgr::closed);
+ FakeWaiter waiter2 = new FakeWaiter();
+ mgr.getConnection(waiter2);
+ assertEquals(2, mgr.sequence());
+ }
+
+ @Test
+ public void testDontCloseEndpointWithInflightRequest() {
+ FakeConnectionProvider connector = new FakeConnectionProvider();
+ FakeConnectionManager mgr = new FakeConnectionManager(3, 2, connector);
+ FakeWaiter waiter1 = new FakeWaiter();
+ mgr.getConnection(waiter1);
+ FakeConnection conn = connector.assertRequest();
+ conn.connect();
+ waitUntil(waiter1::isComplete);
+ FakeWaiter waiter2 = new FakeWaiter();
+ mgr.getConnection(waiter2);
+ conn.close();
+ waitUntil(() -> !mgr.contains(conn));
+ assertFalse(mgr.closed());
+ }
+
+ @Test
+ public void testInitialConcurrency() {
+ int n = 10;
+ FakeConnectionProvider connector = new FakeConnectionProvider();
+ FakeConnectionManager mgr = new FakeConnectionManager(-1, 1, connector);
+ List<FakeWaiter> waiters = new ArrayList<>();
+ for (int i = 0; i < n; i++) {
+ FakeWaiter waiter = new FakeWaiter();
+ mgr.getConnection(waiter);
+ waiters.add(waiter);
+ }
+ FakeConnection conn = connector.assertRequest();
+ conn.concurrency(n).connect();
+ waiters.forEach(waiter -> {
+ waitUntil(waiter::isSuccess);
+ });
+ waiters.forEach(FakeWaiter::recycle);
+ }
+
+ @Test
+ public void testInitialNoConcurrency() {
+ int n = 10;
+ FakeConnectionProvider connector = new FakeConnectionProvider();
+ FakeConnectionManager mgr = new FakeConnectionManager(-1, 1, connector);
+ List<FakeWaiter> waiters = new ArrayList<>();
+ for (int i = 0; i < n; i++) {
+ FakeWaiter waiter = new FakeWaiter();
+ mgr.getConnection(waiter);
+ waiters.add(waiter);
+ }
+ FakeConnection conn = connector.assertRequest();
+ conn.concurrency(0).connect().awaitConnected();
+ conn.concurrency(n - 1);
+ waitUntil(() -> waiters.stream().filter(FakeWaiter::isSuccess).count() == n - 1);
+ waiters.stream().filter(FakeWaiter::isSuccess).findFirst().get().recycle();
+ waiters.forEach(waiter -> {
+ waitUntil(waiter::isSuccess);
+ });
+ }
+
+ @Test
+ public void testRecycleWithoutDispose() {
+ FakeConnectionProvider connector = new FakeConnectionProvider();
+ FakeConnectionManager mgr = new FakeConnectionManager(-1, 1, connector);
+ FakeWaiter waiter1 = new FakeWaiter();
+ mgr.getConnection(waiter1);
+ FakeConnection conn = connector.assertRequest();
+ conn.connect();
+ waitUntil(waiter1::isSuccess);
+ conn.recycle(false);
+ FakeWaiter waiter2 = new FakeWaiter();
+ mgr.getConnection(waiter2);
+ waitUntil(waiter1::isSuccess);
+ waiter2.assertSuccess(conn);
+ conn.recycle(true);
+ assertEquals(0, mgr.size());
+ }
+
+ @Test
+ public void testDiscardWaiterWhenFull() {
+ FakeConnectionProvider connector = new FakeConnectionProvider();
+ FakeConnectionManager mgr = new FakeConnectionManager(2, 1, connector);
+ FakeWaiter waiter1 = new FakeWaiter();
+ mgr.getConnection(waiter1);
+ FakeConnection conn = connector.assertRequest();
+ FakeWaiter waiter2 = new FakeWaiter();
+ mgr.getConnection(waiter2);
+ FakeWaiter waiter3 = new FakeWaiter();
+ mgr.getConnection(waiter3);
+ FakeWaiter waiter4 = new FakeWaiter();
+ mgr.getConnection(waiter4);
+ assertWaitUntil(waiter4::isFailure); // Full
+ }
+
+ @Test
+ public void testDiscardConnectionDuringInit() {
+ FakeConnectionProvider connector = new FakeConnectionProvider();
+ FakeConnectionManager mgr = new FakeConnectionManager(2, 1, connector);
+ FakeWaiter waiter1 = new FakeWaiter() {
+ @Override
+ public synchronized void initConnection(ContextInternal ctx, FakeConnection conn) {
+ super.initConnection(ctx, conn);
+ conn.close(); // Close during init
+ }
+ };
+ mgr.getConnection(waiter1);
+ FakeConnection conn = connector.assertRequest();
+ conn.connect();
+ assertWaitUntil(() -> connector.requests() == 1); // Connection close during init - reattempt to connect
+ assertFalse(mgr.closed());
+ }
+
+ @Test
+ public void testStress() {
+ int numActors = 16;
+ int numConnections = 1000;
+
+ FakeConnectionProvider connector = new FakeConnectionProvider() {
+ @Override
+ public long connect(ConnectionListener<FakeConnection> listener, ContextImpl context) {
+ int i = ThreadLocalRandom.current().nextInt(100);
+ FakeConnection conn = new FakeConnection(context, listener);
+ if (i < 10) {
+ conn.fail(new Exception("Could not connect"));
+ } else {
+ conn.connect();
+ }
+ return 1;
+ }
+ };
+ FakeConnectionManager mgr = new FakeConnectionManager(-1, 16, connector);
+
+ Thread[] actors = new Thread[numActors];
+ for (int i = 0; i < numActors; i++) {
+ actors[i] = new Thread(() -> {
+ CountDownLatch latch = new CountDownLatch(numConnections);
+ for (int i1 = 0; i1 < numConnections; i1++) {
+ mgr.getConnection(new Waiter<FakeConnection>((ContextImpl) vertx.getOrCreateContext()) {
+ @Override
+ public void handleFailure(ContextInternal ctx, Throwable failure) {
+ latch.countDown();
+ }
+
+ @Override
+ public void initConnection(ContextInternal ctx, FakeConnection conn) {
+ }
+
+ @Override
+ public boolean handleConnection(ContextInternal ctx, FakeConnection conn) throws Exception {
+ int action = ThreadLocalRandom.current().nextInt(100);
+ if (action < -1) {
+ latch.countDown();
+ return false;
+ } /* else if (i < 30) {
+ latch.countDown();
+ throw new Exception();
+ } */ else {
+ vertx.setTimer(10, id -> {
+ if (action < 15) {
+ conn.close();
+ } else {
+ conn.recycle();
+ }
+ latch.countDown();
+ });
+ return true;
+ }
+ }
+ });
+ }
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ });
+ actors[i].start();
+ }
+
+ for (int i = 0; i < actors.length; i++) {
+ try {
+ actors[i].join();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+
+ assertWaitUntil(() -> mgr.closed());
+
+ // Check state at the end
+ assertEquals(0, mgr.size());
+ assertEquals(0, mgr.pool.waitersCount());
+ assertEquals(0, mgr.pool.waitersInQueue());
+ assertEquals(0, mgr.pool.weight());
+ assertEquals(0, mgr.pool.capacity());
+ }
+
+ class FakeWaiter extends Waiter<FakeConnection> {
+
+ private FakeConnection init;
+ private boolean cancelled;
+ private boolean completed;
+ private Object result;
+
+ FakeWaiter() {
+ super((ContextImpl) vertx.getOrCreateContext());
+ }
+
+ synchronized boolean cancel() {
+ if (completed) {
+ return false;
+ } else {
+ cancelled = true;
+ return true;
+ }
+ }
+
+ synchronized void assertInitialized(FakeConnection conn) {
+ assertSame(conn, init);
+ }
+
+ synchronized void assertNotInitialized() {
+ assertSame(null, init);
+ }
+ synchronized void assertSuccess(FakeConnection conn) {
+ assertSame(conn, result);
+ }
+
+ synchronized void assertFailure(Throwable failure) {
+ assertSame(failure, result);
+ }
+
+ synchronized boolean isComplete() {
+ return completed;
+ }
+
+ synchronized boolean isSuccess() {
+ return completed && result instanceof FakeConnection;
+ }
+
+ synchronized boolean isFailure() {
+ return completed && result instanceof Throwable;
+ }
+
+ @Override
+ public synchronized void handleFailure(ContextInternal ctx, Throwable failure) {
+ assertFalse(completed);
+ completed = true;
+ result = failure;
+ }
+
+ @Override
+ public synchronized void initConnection(ContextInternal ctx, FakeConnection conn) {
+ assertNull(init);
+ assertNotNull(conn);
+ init = conn;
+ }
+
+ @Override
+ public synchronized boolean handleConnection(ContextInternal ctx, FakeConnection conn) throws Exception {
+ assertFalse(completed);
+ completed = true;
+ if (cancelled) {
+ return false;
+ } else {
+ synchronized (conn) {
+ conn.inflight++;
+ }
+ result = conn;
+ return true;
+ }
+ }
+
+ long recycle() {
+ FakeConnection conn = (FakeConnection) result;
+ return conn.recycle();
+ }
+ }
+
+ /*
+ class FakeConnnectionPool implements ConnectionPool<FakeConnection>, Function<SocketAddress, ConnectionPool<FakeConnection>> {
+
+ private final SocketAddress address = SocketAddress.inetSocketAddress(8080, "localhost");
+ private final int maxSize;
+ private final ArrayDeque<FakeConnection> available = new ArrayDeque<>();
+ private final Set<FakeConnection> all = new HashSet<>();
+ private boolean closed = true;
+ private int sequence;
+
+ FakeConnnectionPool(int maxSize) {
+ this.maxSize = maxSize;
+ }
+
+ @Override
+ public Deque<FakeConnection> available() {
+ return available;
+ }
+
+ @Override
+ public Set<FakeConnection> all() {
+ return all;
+ }
+
+ synchronized int size() {
+ return available.size();
+ }
+
+ synchronized boolean contains(FakeConnection conn) {
+ Deque<ConnectionHolder<FakeConnection>> a = (Deque<ConnectionHolder<FakeConnection>>)(Deque) available;
+ for (ConnectionHolder<FakeConnection> b : a) {
+ if (b.connection() == conn) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ synchronized int sequence() {
+ return sequence;
+ }
+
+ @Override
+ public synchronized FakeConnnectionPool apply(SocketAddress socketAddress) {
+ if (!socketAddress.equals(address)) {
+ throw new AssertionError();
+ }
+ if (!closed) {
+ throw new AssertionError();
+ }
+ closed = false;
+ sequence++;
+ return this;
+ }
+
+ @Override
+ public synchronized int maxSize() {
+ if (closed) {
+ throw new AssertionError();
+ }
+ return maxSize;
+ }
+
+ @Override
+ public synchronized boolean canBorrow(int connCount) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public synchronized FakeConnection pollConnection() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public synchronized boolean canCreateConnection(int connCount) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public synchronized boolean initConnection(FakeConnection conn) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public synchronized void recycleConnection(FakeConnection conn) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public synchronized void evictConnection(FakeConnection conn) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public synchronized boolean isValid(FakeConnection conn) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public synchronized ContextImpl getContext(FakeConnection conn) {
+ throw new UnsupportedOperationException();
+ }
+
+ public synchronized void close() {
+ if (closed) {
+ throw new AssertionError();
+ }
+ closed = true;
+ available.clear();
+ all.clear();
+ }
+
+ synchronized boolean isClosed() {
+ return closed;
+ }
+ }
+ */
+ class FakeConnection {
+
+ private static final int DISCONNECTED = 0;
+ private static final int CONNECTING = 1;
+ private static final int CONNECTED = 2;
+ private static final int CLOSED = 3;
+
+ private final ContextImpl context;
+ private final ConnectionListener<FakeConnection> listener;
+ private final Channel channel = new EmbeddedChannel();
+
+ private long inflight;
+ private long concurrency = 1;
+ private int status = DISCONNECTED;
+
+ FakeConnection(ContextImpl context, ConnectionListener<FakeConnection> listener) {
+ this.context = context;
+ this.listener = listener;
+ }
+
+ synchronized void close() {
+ if (status != CONNECTED) {
+ throw new IllegalStateException();
+ }
+ status = CLOSED;
+ listener.onDiscard();
+ }
+
+ synchronized long recycle(boolean dispose) {
+ return recycle(1, dispose);
+ }
+
+ synchronized long recycle() {
+ return recycle(true);
+ }
+
+ synchronized long recycle(int capacity, boolean dispose) {
+ inflight -= capacity;
+ listener.onRecycle(capacity, dispose);
+ return inflight;
+ }
+
+ synchronized FakeConnection concurrency(long value) {
+ if (value < 0) {
+ throw new IllegalArgumentException("Invalid concurrency");
+ }
+ if (status == CONNECTED) {
+ if (concurrency != value) {
+ concurrency = value;
+ listener.onConcurrencyChange(value);
+ }
+ } else {
+ concurrency = value;
+ }
+ return this;
+ }
+
+ FakeConnection awaitConnected() {
+ waitUntil(() -> {
+ synchronized (FakeConnection.this) {
+ return status == CONNECTED;
+ }
+ });
+ return this;
+ }
+
+ synchronized FakeConnection connect() {
+ if (status != DISCONNECTED) {
+ throw new IllegalStateException();
+ }
+ status = CONNECTING;
+ context.nettyEventLoop().execute(() -> {
+ synchronized (FakeConnection.this) {
+ status = CONNECTED;
+ listener.onConnectSuccess(this, concurrency, channel, context, 1, 1);
+ }
+ });
+ return this;
+ }
+
+ void fail(Throwable err) {
+ context.nettyEventLoop().execute(() -> listener.onConnectFailure(context, err, 1));
+ }
+ }
+
+ class FakeConnectionProvider implements ConnectionProvider<FakeConnection> {
+
+ private final ArrayDeque<FakeConnection> pendingRequests = new ArrayDeque<>();
+
+ void assertRequests(int expectedSize) {
+ assertEquals(expectedSize, pendingRequests.size());
+ }
+
+ int requests() {
+ return pendingRequests.size();
+ }
+
+ FakeConnection assertRequest() {
+ assertNotNull(pendingRequests);
+ assertTrue(pendingRequests.size() > 0);
+ FakeConnection request = pendingRequests.poll();
+ assertNotNull(request);
+ return request;
+ }
+
+ @Override
+ public long connect(ConnectionListener<FakeConnection> listener, ContextImpl context) {
+ pendingRequests.add(new FakeConnection(context, listener));
+ return 1;
+ }
+
+ @Override
+ public void close(FakeConnection conn) {
+ conn.listener.onDiscard();
+ }
+ }
+}
| HttpClientResponse executed on the a different context thread when resumed outside of its context
I am trying to load the body of an HTTP request. However, in the data handler, I need to do stuff that is asynchronous. So I pause the stream at the beginning of the handler, and resume it when I'm done doing my asynchronous stuff.
Pseudocode:
```
each time I receive a buffer
pause the response
asynchronously
do something with the buffer
resume the response
```
However, it's not working as I expect it to.
```
val channel = Channel<Byte>()
val lock = Object()
val INITIAL = 1
val PAUSED = 2
val READING = 3
val RESUMED = 4
var state = INITIAL
vertxResponse.handler { buffer ->
synchronized(lock) {
if (state != INITIAL && state != RESUMED) {
println("Fail A")
}
state = PAUSED
vertxResponse.pause()
}
async(Unconfined) {
synchronized(lock) {
if (state != PAUSED) {
println("Fail B")
}
state = READING
}
try {
buffer.bytes.forEach {
channel.send(it)
}
} catch (t: Throwable) {
channel.close(t)
}
synchronized(lock) {
if (state != READING) {
println("Fail C")
}
state = RESUMED
vertxResponse.resume()
}
}
}
vertxResponse.endHandler {
channel.close()
}
vertxResponse.exceptionHandler {
channel.close(it)
}
bodyIterable = object : CancelableAsyncIterable<Byte> {
suspend override fun iterator(): CancelableAsyncIterator<Byte> = ChannelAsyncIterator(channel)
}
```
Typical log:
```
Fail A
Fail C
```
Am I getting something wrong? Is it possible my handler gets called again before it has time to pause the reading?
| 2017-11-15T17:16:32Z | 3.5 |
|
eclipse-vertx/vert.x | 2,108 | eclipse-vertx__vert.x-2108 | [
"1741"
] | 1591487a2f941e354cb9bb8419fc8a946defa216 | diff --git a/src/main/java/io/vertx/core/eventbus/impl/clustered/ClusteredEventBus.java b/src/main/java/io/vertx/core/eventbus/impl/clustered/ClusteredEventBus.java
--- a/src/main/java/io/vertx/core/eventbus/impl/clustered/ClusteredEventBus.java
+++ b/src/main/java/io/vertx/core/eventbus/impl/clustered/ClusteredEventBus.java
@@ -314,7 +314,7 @@ private String getClusterPublicHost(EventBusOptions options) {
private Handler<NetSocket> getServerHandler() {
return socket -> {
- RecordParser parser = RecordParser.newFixed(4, null);
+ RecordParser parser = RecordParser.newFixed(4);
Handler<Buffer> handler = new Handler<Buffer>() {
int size = -1;
diff --git a/src/main/java/io/vertx/core/parsetools/RecordParser.java b/src/main/java/io/vertx/core/parsetools/RecordParser.java
--- a/src/main/java/io/vertx/core/parsetools/RecordParser.java
+++ b/src/main/java/io/vertx/core/parsetools/RecordParser.java
@@ -20,6 +20,7 @@
import io.vertx.core.Handler;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.parsetools.impl.RecordParserImpl;
+import io.vertx.core.streams.ReadStream;
/**
* A helper class which allows you to easily parse protocols which are delimited by a sequence of bytes, or fixed
@@ -56,7 +57,7 @@
* @author <a href="mailto:larsdtimm@gmail.com">Lars Timm</a>
*/
@VertxGen
-public interface RecordParser extends Handler<Buffer> {
+public interface RecordParser extends Handler<Buffer>, ReadStream<Buffer> {
void setOutput(Handler<Buffer> output);
@@ -68,7 +69,20 @@ public interface RecordParser extends Handler<Buffer> {
* @param output handler that will receive the output
*/
static RecordParser newDelimited(String delim, Handler<Buffer> output) {
- return RecordParserImpl.newDelimited(delim, output);
+ return RecordParserImpl.newDelimited(delim, null, output);
+ }
+
+ /**
+ * Like {@link #newDelimited(String)} but wraps the {@code stream}, the stream handlers will be set/unset
+ * when the {@link #handler(Handler)} is set.
+ * <p/>
+ * The {@code pause()}/{@code resume()} operations are propagated to the {@code stream}.
+ *
+ * @param delim the initial delimiter string
+ * @param stream the wrapped stream
+ */
+ static RecordParser newDelimited(String delim, ReadStream<Buffer> stream) {
+ return RecordParserImpl.newDelimited(delim, stream, null);
}
/**
@@ -80,19 +94,18 @@ static RecordParser newDelimited(String delim, Handler<Buffer> output) {
* @param delim the initial delimiter string
*/
static RecordParser newDelimited(String delim) {
- return RecordParserImpl.newDelimited(delim, null);
+ return RecordParserImpl.newDelimited(delim, null, null);
}
/**
* Create a new {@code RecordParser} instance, initially in delimited mode, and where the delimiter can be represented
* by the {@code Buffer} delim.
* <p>
- * {@code output} Will receive whole records which have been parsed.
*
* @param delim the initial delimiter buffer
*/
static RecordParser newDelimited(Buffer delim) {
- return RecordParserImpl.newDelimited(delim, null);
+ return RecordParserImpl.newDelimited(delim,null, null);
}
/**
@@ -103,9 +116,22 @@ static RecordParser newDelimited(Buffer delim) {
* @param output handler that will receive the output
*/
static RecordParser newDelimited(Buffer delim, Handler<Buffer> output) {
- return RecordParserImpl.newDelimited(delim, output);
+ return RecordParserImpl.newDelimited(delim, null, output);
}
+ /**
+ * Like {@link #newDelimited(Buffer)} but wraps the {@code stream}, the stream handlers will be set/unset
+ * when the {@link #handler(Handler)} is set.
+ * <p/>
+ * The {@code pause()}/{@code resume()} operations are propagated to the {@code stream}.
+ *
+ * @param delim the initial delimiter buffer
+ * @param stream the wrapped stream
+ */
+ static RecordParser newDelimited(Buffer delim, ReadStream<Buffer> stream) {
+ return RecordParserImpl.newDelimited(delim, stream, null);
+ }
+
/**
* Create a new {@code RecordParser} instance, initially in fixed size mode, and where the record size is specified
* by the {@code size} parameter.
@@ -115,7 +141,7 @@ static RecordParser newDelimited(Buffer delim, Handler<Buffer> output) {
* @param size the initial record size
*/
static RecordParser newFixed(int size) {
- return RecordParserImpl.newFixed(size, null);
+ return RecordParserImpl.newFixed(size, null, null);
}
/**
@@ -126,7 +152,20 @@ static RecordParser newFixed(int size) {
* @param output handler that will receive the output
*/
static RecordParser newFixed(int size, Handler<Buffer> output) {
- return RecordParserImpl.newFixed(size, output);
+ return RecordParserImpl.newFixed(size, null, output);
+ }
+
+ /**
+ * Like {@link #newFixed(int)} but wraps the {@code stream}, the stream handlers will be set/unset
+ * when the {@link #handler(Handler)} is set.
+ * <p/>
+ * The {@code pause()}/{@code resume()} operations are propagated to the {@code stream}.
+ *
+ * @param size the initial record size
+ * @param stream the wrapped stream
+ */
+ static RecordParser newFixed(int size, ReadStream<Buffer> stream) {
+ return RecordParserImpl.newFixed(size, stream, null);
}
/**
@@ -164,4 +203,19 @@ static RecordParser newFixed(int size, Handler<Buffer> output) {
* @param buffer a chunk of data
*/
void handle(Buffer buffer);
+
+ @Override
+ RecordParser exceptionHandler(Handler<Throwable> handler);
+
+ @Override
+ RecordParser handler(Handler<Buffer> handler);
+
+ @Override
+ RecordParser pause();
+
+ @Override
+ RecordParser resume();
+
+ @Override
+ RecordParser endHandler(Handler<Void> endHandler);
}
diff --git a/src/main/java/io/vertx/core/parsetools/impl/RecordParserImpl.java b/src/main/java/io/vertx/core/parsetools/impl/RecordParserImpl.java
--- a/src/main/java/io/vertx/core/parsetools/impl/RecordParserImpl.java
+++ b/src/main/java/io/vertx/core/parsetools/impl/RecordParserImpl.java
@@ -20,6 +20,7 @@
import io.vertx.core.buffer.Buffer;
import io.vertx.core.impl.Arguments;
import io.vertx.core.parsetools.RecordParser;
+import io.vertx.core.streams.ReadStream;
import java.util.Objects;
@@ -39,8 +40,13 @@ public class RecordParserImpl implements RecordParser {
private byte[] delim;
private int recordSize;
private Handler<Buffer> output;
+ private Handler<Void> endHandler;
+ private Handler<Throwable> exceptionHandler;
- private RecordParserImpl(Handler<Buffer> output) {
+ private final ReadStream<Buffer> stream;
+
+ private RecordParserImpl(ReadStream<Buffer> stream, Handler<Buffer> output) {
+ this.stream = stream;
this.output = output;
}
@@ -74,8 +80,8 @@ public static Buffer latin1StringToBytes(String str) {
* @param delim the initial delimiter string
* @param output handler that will receive the output
*/
- public static RecordParser newDelimited(String delim, Handler<Buffer> output) {
- return newDelimited(latin1StringToBytes(delim), output);
+ public static RecordParser newDelimited(String delim, ReadStream<Buffer> stream, Handler<Buffer> output) {
+ return newDelimited(latin1StringToBytes(delim), stream, output);
}
/**
@@ -87,8 +93,8 @@ public static RecordParser newDelimited(String delim, Handler<Buffer> output) {
* @param delim the initial delimiter buffer
* @param output handler that will receive the output
*/
- public static RecordParser newDelimited(Buffer delim, Handler<Buffer> output) {
- RecordParserImpl ls = new RecordParserImpl(output);
+ public static RecordParser newDelimited(Buffer delim, ReadStream<Buffer> stream, Handler<Buffer> output) {
+ RecordParserImpl ls = new RecordParserImpl(stream, output);
ls.delimitedMode(delim);
return ls;
}
@@ -102,9 +108,9 @@ public static RecordParser newDelimited(Buffer delim, Handler<Buffer> output) {
* @param size the initial record size
* @param output handler that will receive the output
*/
- public static RecordParser newFixed(int size, Handler<Buffer> output) {
+ public static RecordParser newFixed(int size, ReadStream<Buffer> stream, Handler<Buffer> output) {
Arguments.require(size > 0, "Size must be > 0");
- RecordParserImpl ls = new RecordParserImpl(output);
+ RecordParserImpl ls = new RecordParserImpl(stream, output);
ls.fixedSizeMode(size);
return ls;
}
@@ -217,4 +223,60 @@ public void handle(Buffer buffer) {
}
handleParsing();
}
+
+ private void end() {
+ Handler<Void> handler = endHandler;
+ if (handler != null) {
+ handler.handle(null);
+ }
+ }
+
+ @Override
+ public RecordParser exceptionHandler(Handler<Throwable> handler) {
+ exceptionHandler = handler;
+ return this;
+ }
+
+ @Override
+ public RecordParser handler(Handler<Buffer> handler) {
+ output = handler;
+ if (stream != null) {
+ if (handler != null) {
+ stream.endHandler(v -> end());
+ stream.exceptionHandler(err -> {
+ if (exceptionHandler != null) {
+ exceptionHandler.handle(err);
+ }
+ });
+ stream.handler(this);
+ } else {
+ stream.handler(null);
+ stream.endHandler(null);
+ stream.exceptionHandler(null);
+ }
+ }
+ return this;
+ }
+
+ @Override
+ public RecordParser pause() {
+ if (stream != null) {
+ stream.pause();
+ }
+ return this;
+ }
+
+ @Override
+ public RecordParser resume() {
+ if (stream != null) {
+ stream.resume();
+ }
+ return this;
+ }
+
+ @Override
+ public RecordParser endHandler(Handler<Void> handler) {
+ endHandler = handler;
+ return this;
+ }
}
| diff --git a/src/test/java/io/vertx/test/core/RecordParserTest.java b/src/test/java/io/vertx/test/core/RecordParserTest.java
--- a/src/test/java/io/vertx/test/core/RecordParserTest.java
+++ b/src/test/java/io/vertx/test/core/RecordParserTest.java
@@ -19,14 +19,21 @@
import io.vertx.core.Handler;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.parsetools.RecordParser;
+import io.vertx.core.streams.ReadStream;
import org.junit.Test;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collections;
import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
import static io.vertx.test.core.TestUtils.assertNullPointerException;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
/**
* @author <a href="http://tfox.org">Tim Fox</a>
@@ -275,4 +282,61 @@ public void testSpreadDelimiter() {
doTestDelimited(Buffer.buffer("start-ab-c-dddabc"), Buffer.buffer("abc"),
new Integer[] { 18 }, Buffer.buffer("start-ab-c-ddd"));
}
+
+ @Test
+ public void testWrapReadStream() {
+ AtomicBoolean paused = new AtomicBoolean();
+ AtomicReference<Handler<Buffer>> eventHandler = new AtomicReference<>();
+ AtomicReference<Handler<Void>> endHandler = new AtomicReference<>();
+ AtomicReference<Handler<Throwable>> exceptionHandler = new AtomicReference<>();
+ ReadStream<Buffer> original = new ReadStream<Buffer>() {
+ @Override
+ public ReadStream<Buffer> exceptionHandler(Handler<Throwable> handler) {
+ exceptionHandler.set(handler);
+ return this;
+ }
+ @Override
+ public ReadStream<Buffer> handler(Handler<Buffer> handler) {
+ eventHandler.set(handler);
+ return this;
+ }
+ @Override
+ public ReadStream<Buffer> pause() {
+ paused.set(true);
+ return this;
+ }
+ @Override
+ public ReadStream<Buffer> resume() {
+ paused.set(false);
+ return this;
+ }
+ @Override
+ public ReadStream<Buffer> endHandler(Handler<Void> handler) {
+ endHandler.set(handler);
+ return this;
+ }
+ };
+ RecordParser parser = RecordParser.newDelimited("\r\n", original);
+ AtomicInteger ends = new AtomicInteger();
+ parser.endHandler(v -> ends.incrementAndGet());
+ List<String> records = new ArrayList<>();
+ parser.handler(record -> records.add(record.toString()));
+ assertFalse(paused.get());
+ parser.pause();
+ assertTrue(paused.get());
+ parser.resume();
+ assertFalse(paused.get());
+ eventHandler.get().handle(Buffer.buffer("first\r\nsecond\r\nthird"));
+ assertEquals(Arrays.asList("first", "second"), records);
+ assertEquals(0, ends.get());
+ Throwable cause = new Throwable();
+ exceptionHandler.get().handle(cause);
+ List<Throwable> failures = new ArrayList<>();
+ parser.exceptionHandler(failures::add);
+ exceptionHandler.get().handle(cause);
+ assertEquals(Collections.singletonList(cause), failures);
+ endHandler.get().handle(null);
+ assertEquals(Arrays.asList("first", "second"), records);
+ assertEquals(1, ends.get());
+ }
}
| RecordParser implement ReadStream<Buffer>
That way we can trivially pump files or other ReadStream sources through it.
pump(asyncFile, recordParser)
| good idea, would you mind to contribute it ? | 2017-08-31T10:06:21Z | 3.5 |
eclipse-vertx/vert.x | 2,083 | eclipse-vertx__vert.x-2083 | [
"2059"
] | 7869b43c83e033f2f9ebfb247100b283e65105d5 | diff --git a/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java b/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
--- a/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
+++ b/src/main/java/io/vertx/core/http/impl/HttpClientImpl.java
@@ -153,7 +153,7 @@ public HttpClientImpl(VertxInternal vertx, HttpClientOptions options) {
@Override
public HttpClient websocket(RequestOptions options, Handler<WebSocket> wsConnect) {
- return websocket(options.getPort(), options.getHost(), options.getURI(), wsConnect);
+ return websocket(options, null, wsConnect);
}
@Override
@@ -164,7 +164,7 @@ public HttpClient websocket(int port, String host, String requestURI, Handler<We
@Override
public HttpClient websocket(RequestOptions options, Handler<WebSocket> wsConnect, Handler<Throwable> failureHandler) {
- return websocket(options.getPort(), options.getHost(), options.getURI(), wsConnect, failureHandler);
+ return websocket(options, null, wsConnect, failureHandler);
}
public HttpClient websocket(int port, String host, String requestURI, Handler<WebSocket> wsConnect, Handler<Throwable> failureHandler){
@@ -184,7 +184,7 @@ public HttpClient websocket(String host, String requestURI, Handler<WebSocket> w
@Override
public HttpClient websocket(RequestOptions options, MultiMap headers, Handler<WebSocket> wsConnect) {
- return websocket(options.getPort(), options.getHost(), options.getURI(), headers, wsConnect);
+ return websocket(options, headers, null, wsConnect);
}
@Override
@@ -195,7 +195,7 @@ public HttpClient websocket(int port, String host, String requestURI, MultiMap h
@Override
public HttpClient websocket(RequestOptions options, MultiMap headers, Handler<WebSocket> wsConnect, Handler<Throwable> failureHandler) {
- return websocket(options.getPort(), options.getHost(), options.getURI(), headers, wsConnect, failureHandler);
+ return websocket(options, headers, null, wsConnect, failureHandler);
}
@Override
@@ -216,7 +216,7 @@ public HttpClient websocket(String host, String requestURI, MultiMap headers, Ha
@Override
public HttpClient websocket(RequestOptions options, MultiMap headers, WebsocketVersion version, Handler<WebSocket> wsConnect) {
- return websocket(options.getPort(), options.getHost(), options.getURI(), headers, version, wsConnect);
+ return websocket(options, headers, version, null, wsConnect);
}
@Override
@@ -227,7 +227,7 @@ public HttpClient websocket(int port, String host, String requestURI, MultiMap h
@Override
public HttpClient websocket(RequestOptions options, MultiMap headers, WebsocketVersion version, Handler<WebSocket> wsConnect, Handler<Throwable> failureHandler) {
- return websocket(options.getPort(), options.getHost(), options.getURI(), headers, version, wsConnect, failureHandler);
+ return websocket(options, headers, version, null, wsConnect, failureHandler);
}
@Override
@@ -250,7 +250,8 @@ public HttpClient websocket(String host, String requestURI, MultiMap headers, We
@Override
public HttpClient websocket(RequestOptions options, MultiMap headers, WebsocketVersion version, String subProtocols, Handler<WebSocket> wsConnect) {
- return websocket(options.getPort(), options.getHost(), options.getURI(), headers, version, subProtocols, wsConnect);
+ websocketStream(options, headers, version, subProtocols).handler(wsConnect);
+ return this;
}
@Override
@@ -262,7 +263,8 @@ public HttpClient websocket(int port, String host, String requestURI, MultiMap h
@Override
public HttpClient websocket(RequestOptions options, MultiMap headers, WebsocketVersion version, String subProtocols, Handler<WebSocket> wsConnect, Handler<Throwable> failureHandler) {
- return websocket(options.getPort(), options.getHost(), options.getURI(), headers, version, subProtocols, wsConnect, failureHandler);
+ websocketStream(options, headers, version, subProtocols).exceptionHandler(failureHandler).handler(wsConnect);
+ return this;
}
@Override
| diff --git a/src/test/java/io/vertx/test/core/WebsocketTest.java b/src/test/java/io/vertx/test/core/WebsocketTest.java
--- a/src/test/java/io/vertx/test/core/WebsocketTest.java
+++ b/src/test/java/io/vertx/test/core/WebsocketTest.java
@@ -27,11 +27,25 @@
import io.vertx.core.Vertx;
import io.vertx.core.VertxOptions;
import io.vertx.core.buffer.Buffer;
-import io.vertx.core.http.*;
+import io.vertx.core.http.ClientAuth;
+import io.vertx.core.http.HttpClient;
+import io.vertx.core.http.HttpClientOptions;
+import io.vertx.core.http.HttpMethod;
+import io.vertx.core.http.HttpServer;
+import io.vertx.core.http.HttpServerOptions;
+import io.vertx.core.http.HttpServerRequest;
+import io.vertx.core.http.HttpVersion;
+import io.vertx.core.http.RequestOptions;
+import io.vertx.core.http.ServerWebSocket;
+import io.vertx.core.http.WebSocket;
+import io.vertx.core.http.WebSocketBase;
+import io.vertx.core.http.WebSocketFrame;
import io.vertx.core.http.WebsocketRejectedException;
+import io.vertx.core.http.WebsocketVersion;
import io.vertx.core.impl.ConcurrentHashSet;
import io.vertx.core.net.NetServer;
import io.vertx.core.net.NetSocket;
+import io.vertx.core.net.SelfSignedCertificate;
import io.vertx.core.streams.ReadStream;
import io.vertx.test.core.tls.Cert;
import io.vertx.test.core.tls.Trust;
@@ -48,7 +62,12 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
-import java.util.concurrent.*;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
@@ -1735,4 +1754,25 @@ private void doTestClientWebsocketConnectionCloseOnBadResponse(boolean keepAlive
}
}
}
+
+ @Test
+ public void testClearClientSslOptions() {
+ SelfSignedCertificate certificate = SelfSignedCertificate.create();
+ HttpServerOptions serverOptions = new HttpServerOptions().setPort(HttpTestBase.DEFAULT_HTTP_PORT)
+ .setSsl(true)
+ .setKeyCertOptions(certificate.keyCertOptions());
+ HttpClientOptions options = new HttpClientOptions()
+ .setTrustAll(true)
+ .setVerifyHost(false);
+ client = vertx.createHttpClient(options);
+ server = vertx.createHttpServer(serverOptions).websocketHandler(WebSocketBase::close).listen(onSuccess(server -> {
+ RequestOptions requestOptions = new RequestOptions().setPort(HttpTestBase.DEFAULT_HTTP_PORT).setSsl(true);
+ client.websocket(requestOptions, ws -> {
+ ws.closeHandler(v -> {
+ testComplete();
+ });
+ });
+ }));
+ await();
+ }
}
| WebSocket request options not honored
```java
HttpClient client = vertx.createHttpClient(new HttpClientOptions());
RequestOptions options = new RequestOptions()
.setHost("ws.cex.io")
.setPort(443)
.setSsl(true)
.setURI("/ws");
client.websocket(options, ws -> {
});
```
Connection fails with because HTTP upgrade request is sent as clear text.
| This code works
```java
HttpClient client = vertx.createHttpClient(new HttpClientOptions()
.setDefaultHost("ws.cex.io")
.setDefaultPort(443)
.setSsl(true));
client.websocket("/ws", ws -> {
});
``` | 2017-07-31T17:33:57Z | 3.4 |
eclipse-vertx/vert.x | 2,074 | eclipse-vertx__vert.x-2074 | [
"1375"
] | 71bf69e90d81a23ee658043c7cae8829939b6cee | diff --git a/src/main/java/io/vertx/core/impl/VertxImpl.java b/src/main/java/io/vertx/core/impl/VertxImpl.java
--- a/src/main/java/io/vertx/core/impl/VertxImpl.java
+++ b/src/main/java/io/vertx/core/impl/VertxImpl.java
@@ -204,19 +204,17 @@ private void createAndStartEventBus(VertxOptions options, Handler<AsyncResult<Ve
} else {
eventBus = new EventBusImpl(this);
}
- eventBus.start(ar2 -> {
- if (ar2.succeeded()) {
+ eventBus.start(ar -> {
+ if (ar.succeeded()) {
if (metrics != null) {
// If the metric provider wants to use the event bus, it cannot use it in its constructor as the event bus
// may not be initialized yet. We invokes the eventBusInitialized so it can starts using the event bus.
metrics.eventBusInitialized(eventBus);
}
-
- if (resultHandler != null) {
- resultHandler.handle(Future.succeededFuture(this));
- }
+ if (resultHandler != null) resultHandler.handle(Future.succeededFuture(this));
} else {
- log.error("Failed to start event bus", ar2.cause());
+ log.error("Failed to start event bus", ar.cause());
+ if (resultHandler != null) resultHandler.handle(Future.failedFuture(ar.cause()));
}
});
}
| diff --git a/src/test/java/io/vertx/test/core/ClusteredEventBusStartFailureTest.java b/src/test/java/io/vertx/test/core/ClusteredEventBusStartFailureTest.java
new file mode 100644
--- /dev/null
+++ b/src/test/java/io/vertx/test/core/ClusteredEventBusStartFailureTest.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2011-2017 The original author or authors
+ * ------------------------------------------------------
+ * All rights reserved. This program and the accompanying materials
+ * are made available under the terms of the Eclipse Public License v1.0
+ * and Apache License v2.0 which accompanies this distribution.
+ *
+ * The Eclipse Public License is available at
+ * http://www.eclipse.org/legal/epl-v10.html
+ *
+ * The Apache License v2.0 is available at
+ * http://www.opensource.org/licenses/apache2.0.php
+ *
+ * You may elect to redistribute this code under either of these licenses.
+ */
+
+package io.vertx.test.core;
+
+import io.vertx.core.AsyncResult;
+import io.vertx.core.Vertx;
+import io.vertx.core.VertxOptions;
+import io.vertx.test.fakecluster.FakeClusterManager;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.junit.Assert.*;
+
+/**
+ * @author Thomas Segismont
+ */
+public class ClusteredEventBusStartFailureTest {
+
+ @Test
+ public void testCallbackInvokedOnFailure() throws Exception {
+ VertxOptions options = new VertxOptions()
+ .setClusterManager(new FakeClusterManager())
+ .setClusterHost(getClass().getSimpleName());
+
+ AtomicReference<AsyncResult<Vertx>> resultRef = new AtomicReference<>();
+
+ CountDownLatch latch = new CountDownLatch(1);
+ Vertx.clusteredVertx(options, ar -> {
+ resultRef.set(ar);
+ latch.countDown();
+ });
+ latch.await(5, TimeUnit.SECONDS);
+
+ assertFalse(resultRef.get() == null);
+ assertTrue(resultRef.get().failed());
+ }
+}
| Vertx.clusteredVertx() handler not called if cannot bind to address
When starting clustered vetx instance:
``` java
Vertx.clusteredVertx(
new VertxOptions()
.setClustered(true)
.setClusterHost("127.0.0.1")
.setClusterPort(9999),
result ->
{
System.out.println("Cluster started");
}
);
```
if it cannot bind to the address it prints the exception:
```
io.vertx.core.impl.VertxImpl SEVERE: Failed to start event bus
java.net.BindException: Address already in use
at sun.nio.ch.Net.bind0(Native Method)
at sun.nio.ch.Net.bind(Net.java:433)
at sun.nio.ch.Net.bind(Net.java:425)
at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:223)
at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:74)
at io.netty.channel.socket.nio.NioServerSocketChannel.doBind(NioServerSocketChannel.java:125)
at io.netty.channel.AbstractChannel$AbstractUnsafe.bind(AbstractChannel.java:485)
at io.netty.channel.DefaultChannelPipeline$HeadContext.bind(DefaultChannelPipeline.java:1089)
at io.netty.channel.AbstractChannelHandlerContext.invokeBind(AbstractChannelHandlerContext.java:440)
at io.netty.channel.AbstractChannelHandlerContext.bind(AbstractChannelHandlerContext.java:425)
at io.netty.channel.DefaultChannelPipeline.bind(DefaultChannelPipeline.java:903)
at io.netty.channel.AbstractChannel.bind(AbstractChannel.java:198)
at io.netty.bootstrap.AbstractBootstrap$2.run(AbstractBootstrap.java:348)
at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:358)
at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:357)
at io.netty.util.concurrent.SingleThreadEventExecutor$2.run(SingleThreadEventExecutor.java:112)
at java.lang.Thread.run(Thread.java:745)
```
and the result handler is never called.
The problem seems to be in `io.vertx.core.impl.VertxImpl.createAndStartEventBus()` where only error is logged and the `resultHandler` is not called:
``` java
} else {
log.error("Failed to start event bus", ar2.cause());
}
```
| would you mind to contribute a test for this and possibly a fix ?
| 2017-07-26T17:14:41Z | 3.4 |
eclipse-vertx/vert.x | 2,073 | eclipse-vertx__vert.x-2073 | [
"1486"
] | 71bf69e90d81a23ee658043c7cae8829939b6cee | diff --git a/src/main/generated/io/vertx/core/file/OpenOptionsConverter.java b/src/main/generated/io/vertx/core/file/OpenOptionsConverter.java
--- a/src/main/generated/io/vertx/core/file/OpenOptionsConverter.java
+++ b/src/main/generated/io/vertx/core/file/OpenOptionsConverter.java
@@ -27,6 +27,9 @@
public class OpenOptionsConverter {
public static void fromJson(JsonObject json, OpenOptions obj) {
+ if (json.getValue("append") instanceof Boolean) {
+ obj.setAppend((Boolean)json.getValue("append"));
+ }
if (json.getValue("create") instanceof Boolean) {
obj.setCreate((Boolean)json.getValue("create"));
}
@@ -60,6 +63,7 @@ public static void fromJson(JsonObject json, OpenOptions obj) {
}
public static void toJson(OpenOptions obj, JsonObject json) {
+ json.put("append", obj.isAppend());
json.put("create", obj.isCreate());
json.put("createNew", obj.isCreateNew());
json.put("deleteOnClose", obj.isDeleteOnClose());
diff --git a/src/main/java/io/vertx/core/file/OpenOptions.java b/src/main/java/io/vertx/core/file/OpenOptions.java
--- a/src/main/java/io/vertx/core/file/OpenOptions.java
+++ b/src/main/java/io/vertx/core/file/OpenOptions.java
@@ -21,7 +21,7 @@
/**
* Describes how an {@link io.vertx.core.file.AsyncFile} should be opened.
- *
+ *
* @author <a href="http://tfox.org">Tim Fox</a>
*/
@DataObject(generateConverter = true)
@@ -38,6 +38,11 @@ public class OpenOptions {
public static final boolean DEFAULT_TRUNCATEEXISTING = false;
public static final boolean DEFAULT_SPARSE = false;
+ /**
+ * Whether the file should be opened in append mode by default = false.
+ */
+ public static final boolean DEFAULT_APPEND = false;
+
private String perms = DEFAULT_PERMS;
private boolean read = DEFAULT_READ;
private boolean write = DEFAULT_WRITE;
@@ -48,6 +53,7 @@ public class OpenOptions {
private boolean deleteOnClose = DEFAULT_DELETEONCLOSE;
private boolean truncateExisting = DEFAULT_TRUNCATEEXISTING;
private boolean sparse = DEFAULT_SPARSE;
+ private boolean append = DEFAULT_APPEND;
/**
* Default constructor
@@ -72,6 +78,7 @@ public OpenOptions(OpenOptions other) {
this.deleteOnClose = other.deleteOnClose;
this.truncateExisting = other.truncateExisting;
this.sparse = other.sparse;
+ this.append = other.append;
}
/**
@@ -280,4 +287,22 @@ public OpenOptions setDsync(boolean dsync) {
this.dsync = dsync;
return this;
}
+
+ /**
+ * @return true if the file should be opened in append mode, false otherwise
+ */
+ public boolean isAppend() {
+ return append;
+ }
+
+ /**
+ * Whether the file should be opened in append mode. Defaults to {@code false}.
+ *
+ * @param append true to open file in append mode, false otherwise
+ * @return a reference to this, so the API can be used fluently
+ */
+ public OpenOptions setAppend(boolean append) {
+ this.append = append;
+ return this;
+ }
}
diff --git a/src/main/java/io/vertx/core/file/impl/AsyncFileImpl.java b/src/main/java/io/vertx/core/file/impl/AsyncFileImpl.java
--- a/src/main/java/io/vertx/core/file/impl/AsyncFileImpl.java
+++ b/src/main/java/io/vertx/core/file/impl/AsyncFileImpl.java
@@ -101,6 +101,7 @@ public class AsyncFileImpl implements AsyncFile {
} else {
ch = AsynchronousFileChannel.open(file, opts, vertx.getWorkerPool());
}
+ if (options.isAppend()) writePos = ch.size();
} catch (IOException e) {
throw new FileSystemException(e);
}
| diff --git a/src/test/java/io/vertx/test/core/FileSystemTest.java b/src/test/java/io/vertx/test/core/FileSystemTest.java
--- a/src/test/java/io/vertx/test/core/FileSystemTest.java
+++ b/src/test/java/io/vertx/test/core/FileSystemTest.java
@@ -1162,11 +1162,9 @@ public void testWriteStreamAppend() throws Exception {
createFile(fileName, existing);
byte[] content = TestUtils.randomByteArray(chunkSize * chunks);
Buffer buff = Buffer.buffer(content);
- vertx.fileSystem().open(testDir + pathSep + fileName, new OpenOptions(), ar -> {
+ vertx.fileSystem().open(testDir + pathSep + fileName, new OpenOptions().setAppend(true), ar -> {
if (ar.succeeded()) {
AsyncFile ws = ar.result();
- long size = vertx.fileSystem().propsBlocking(testDir + pathSep + fileName).size();
- ws.setWritePos(size);
ws.exceptionHandler(t -> fail(t.getMessage()));
for (int i = 0; i < chunks; i++) {
Buffer chunk = buff.getBuffer(i * chunkSize, (i + 1) * chunkSize);
| Allow AsyncFile's to be opened in append mode
Appending to a file requires three steps:
1. Open the file with `FileSystem.open()` with an `OpenOptions` object on which `setTruncateExisting(false)` and `setWrite(true)` have been called.
2. Get the file properties with `FileSystem.props()`
3. Call `setWritePos()` on the file with the length according to the properties.
It seems like a simpler and less error-prone way would be to add `setAppend()` and `getAppend()` to `OpenOptions` and make `AsyncFileImpl`'s constructor translate it to `StandardOpenOption.APPEND`.
| 2017-07-26T16:39:07Z | 3.4 |
|
eclipse-vertx/vert.x | 2,064 | eclipse-vertx__vert.x-2064 | [
"2027"
] | 349ddf91172dafaf16ed6639469ba80aa75d204c | diff --git a/src/main/java/io/vertx/core/http/impl/ServerConnection.java b/src/main/java/io/vertx/core/http/impl/ServerConnection.java
--- a/src/main/java/io/vertx/core/http/impl/ServerConnection.java
+++ b/src/main/java/io/vertx/core/http/impl/ServerConnection.java
@@ -43,6 +43,7 @@
import io.vertx.core.AsyncResult;
import io.vertx.core.Handler;
import io.vertx.core.Vertx;
+import io.vertx.core.VertxException;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.http.HttpConnection;
import io.vertx.core.http.HttpServerOptions;
@@ -345,6 +346,9 @@ synchronized protected void handleClosed() {
if (ws != null) {
ws.handleClosed();
}
+ if (currentRequest != null) {
+ currentRequest.handleException(new VertxException("Connection was closed"));
+ }
if (pendingResponse != null) {
if (METRICS_ENABLED && metrics != null) {
metrics.requestReset(requestMetric);
| diff --git a/src/test/java/io/vertx/test/core/Http1xTest.java b/src/test/java/io/vertx/test/core/Http1xTest.java
--- a/src/test/java/io/vertx/test/core/Http1xTest.java
+++ b/src/test/java/io/vertx/test/core/Http1xTest.java
@@ -3397,7 +3397,7 @@ public void testInvalidTrailerInHttpServerRequest() throws Exception {
so.write("01234567");
}
}, errors -> {
- assertEquals(1, errors.size());
+ assertEquals(2, errors.size());
assertEquals(TooLongFrameException.class, errors.get(0).getClass());
});
}
@@ -3407,7 +3407,7 @@ public void testInvalidChunkInHttpServerRequest() throws Exception {
testHttpServerRequestDecodeError(so -> {
so.write("invalid\r\n"); // Empty chunk
}, errors -> {
- assertEquals(1, errors.size());
+ assertEquals(2, errors.size());
assertEquals(NumberFormatException.class, errors.get(0).getClass());
});
}
diff --git a/src/test/java/io/vertx/test/core/HttpTest.java b/src/test/java/io/vertx/test/core/HttpTest.java
--- a/src/test/java/io/vertx/test/core/HttpTest.java
+++ b/src/test/java/io/vertx/test/core/HttpTest.java
@@ -1304,6 +1304,26 @@ public void testNoExceptionHandlerCalledWhenResponseReceivedOK() throws Exceptio
await();
}
+ @Test
+ public void testServerRequestExceptionHandlerCalledWhenConnectionClosed() throws Exception {
+ CountDownLatch closeLatch = new CountDownLatch(1);
+ server.requestHandler(request -> {
+ request.exceptionHandler(err -> {
+ testComplete();
+ });
+ request.handler(buff -> {
+ closeLatch.countDown();
+ });
+ });
+ startServer();
+ AtomicReference<HttpConnection> conn = new AtomicReference<>();
+ client.post(DEFAULT_HTTP_PORT, DEFAULT_HTTP_HOST, DEFAULT_TEST_URI, resp -> {
+ }).connectionHandler(conn::set).setChunked(true).write("some_chunk");
+ awaitLatch(closeLatch);
+ conn.get().close();
+ await();
+ }
+
@Test
public void testDefaultStatus() {
testStatusCode(-1, null);
| HttpServerRequest exception handler is not called when the connection is closed
`HttpServerRequest` extends `ReadStream<Buffer>`. When there is an ongoing file upload and the connection closes abruptly, the exception handler on the `HttpServerRequest` should be called to signal that the stream an error occured
| note : works with HTTP/2 not HTTP/1.x | 2017-07-20T13:34:27Z | 3.4 |
eclipse-vertx/vert.x | 2,017 | eclipse-vertx__vert.x-2017 | [
"2012"
] | bf199bc643c400e11018ffb79f87d8a351da97e0 | diff --git a/src/main/java/io/vertx/core/file/impl/AsyncFileImpl.java b/src/main/java/io/vertx/core/file/impl/AsyncFileImpl.java
--- a/src/main/java/io/vertx/core/file/impl/AsyncFileImpl.java
+++ b/src/main/java/io/vertx/core/file/impl/AsyncFileImpl.java
@@ -148,11 +148,15 @@ private synchronized AsyncFile doWrite(Buffer buffer, long position, Handler<Asy
Handler<AsyncResult<Void>> wrapped = ar -> {
if (ar.succeeded()) {
checkContext();
- if (writesOutstanding == 0 && closedDeferred != null) {
- closedDeferred.run();
- } else {
- checkDrained();
+ Runnable action;
+ synchronized (AsyncFileImpl.this) {
+ if (writesOutstanding == 0 && closedDeferred != null) {
+ action = closedDeferred;
+ } else {
+ action = this::checkDrained;
+ }
}
+ action.run();
if (handler != null) {
handler.handle(ar);
}
@@ -372,7 +376,9 @@ private void doWrite(ByteBuffer buff, long position, long toWrite, Handler<Async
if (toWrite == 0) {
throw new IllegalStateException("Cannot save zero bytes");
}
- writesOutstanding += toWrite;
+ synchronized (this) {
+ writesOutstanding += toWrite;
+ }
writeInternal(buff, position, handler);
}
@@ -392,7 +398,9 @@ public void completed(Integer bytesWritten, Object attachment) {
} else {
// It's been fully written
context.runOnContext((v) -> {
- writesOutstanding -= buff.limit();
+ synchronized (AsyncFileImpl.this) {
+ writesOutstanding -= buff.limit();
+ }
handler.handle(Future.succeededFuture());
});
}
| diff --git a/src/test/java/io/vertx/test/core/FileSystemTest.java b/src/test/java/io/vertx/test/core/FileSystemTest.java
--- a/src/test/java/io/vertx/test/core/FileSystemTest.java
+++ b/src/test/java/io/vertx/test/core/FileSystemTest.java
@@ -53,8 +53,10 @@
import java.util.HashSet;
import java.util.List;
import java.util.Set;
+import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
import static io.vertx.test.core.TestUtils.*;
@@ -1679,4 +1681,35 @@ private void deleteFile(String fileName) {
File file = new File(testDir + pathSep + fileName);
file.delete();
}
-}
+
+ // @Repeat(times=1000)
+ @Test
+ public void testAsyncFileConcurrency() throws Exception {
+ String fileName = "some-file.dat";
+
+ AtomicReference<AsyncFile> arFile = new AtomicReference<>();
+ CountDownLatch latch = new CountDownLatch(1);
+ vertx.fileSystem().open(testDir + pathSep + fileName, new OpenOptions(), ar -> {
+ if (ar.succeeded()) {
+ AsyncFile af = ar.result();
+ arFile.set(af);
+ } else {
+ fail(ar.cause().getMessage());
+ }
+ latch.countDown();
+ });
+ awaitLatch(latch);
+
+ AsyncFile af = arFile.get();
+
+ Buffer buff = Buffer.buffer(randomByteArray(4096));
+ for (int i = 0; i < 100000; i++) {
+ af.write(buff);
+ }
+
+ af.close(onSuccess(v -> {
+ testComplete();
+ }));
+
+ await();
+ }}
| Race condition in AsyncFileImpl writesOutstanding
If an AsyncFileImpl is written to from a thread different to the context that created then the long counter writesOutstanding can get into an invalid state. This is because the counter is updated from both the caller's thread and also on the context when a write is complete.
Fix is to use an AtomicLong for writesOutstanding:
https://gist.github.com/purplefox/723fa69a1330fb4f4f74d4a5dd5e6658
| a couple of questions:
- can you tell the unsynchronized write that happens ? is it the handler passed to the doWrite method that seems unsynchronized when called back
- is the atomicity of the long necessary ?
- could it be fixed using synchronized instead or setting the field as volatile ?
```
context.runOnContext((v) -> {
writesOutstanding -= buff.limit();
handler.handle(Future.succeededFuture());
});
```
I think the issue is that writeOutstanding can be increased here:
https://github.com/eclipse/vert.x/blob/master/src/main/java/io/vertx/core/file/impl/AsyncFileImpl.java#L375
At the same time it is decreased here:
https://github.com/eclipse/vert.x/blob/master/src/main/java/io/vertx/core/file/impl/AsyncFileImpl.java#L395
If write is called from a different thread to the context thread.
I tried using synchronized to fix initially but this resulted in a deadlock. Volatile can't be used as volatile adds aren't atomic.
can you provide a reproducer ?
Sure, will try and put one together :)
Here's a reproducer that can run inside vertx-core FileSystemTest:
https://gist.github.com/purplefox/6de14775d331395ceeb01168367715f4
It needs to be run with many iterations to reproduce (hence the @Repeat)
If you run the test enough times you will find it hangs eventually, this is because the close() method never completes. It doesn't complete because it is waiting for an invalid value of outstanding writes.
makes sense thank! | 2017-06-11T19:34:15Z | 3.4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.