code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _getScalesDiag(self,termx=0):
assert self.P>1, 'VarianceDecomposition:: diagonal init_method allowed only for multi trait models'
assert self.noisPos is not None, 'VarianceDecomposition:: noise term has to be set'
assert termx<self.n_randEffs-1, 'VarianceDecomposition:: termx>=n_randEffs-1'
assert self.trait_covar_type[self.noisPos] not in ['lowrank','block','fixed'], 'VarianceDecomposition:: diagonal initializaiton not posible for such a parametrization'
assert self.trait_covar_type[termx] not in ['lowrank','block','fixed'], 'VarianceDecimposition:: diagonal initializaiton not posible for such a parametrization'
scales = []
res = self._getH2singleTrait(self.vd.getTerm(termx).getK())
scaleg = sp.sqrt(res['varg'].mean())
scalen = sp.sqrt(res['varn'].mean())
for term_i in range(self.n_randEffs):
if term_i==termx:
_scales = scaleg*self.diag[term_i]
elif term_i==self.noisPos:
_scales = scalen*self.diag[term_i]
else:
_scales = 0.*self.diag[term_i]
if self.jitter[term_i]>0:
_scales = sp.concatenate((_scales,sp.array([sp.sqrt(self.jitter[term_i])])))
scales.append(_scales)
return sp.concatenate(scales)
|
Internal function for parameter initialization
Uses 2 term single trait model to get covar params for initialization
Args:
termx: non-noise term terms that is used for initialization
|
juraj-google-style
|
def set(self, key, value):
self._check_limit()
_expire = time.time() + self._timeout if self._timeout else None
self._store[key] = (value, _expire)
|
Add an item to the cache
Args:
key: item key
value: the value associated with this key
|
juraj-google-style
|
def Bernoulli(cls,
mean: 'TensorFluent',
batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:
probs = mean.tensor
dist = tf.distributions.Bernoulli(probs=probs, dtype=tf.bool)
batch = mean.batch
if not batch and batch_size is not None:
t = dist.sample(batch_size)
batch = True
else:
t = dist.sample()
scope = mean.scope.as_list()
return (dist, TensorFluent(t, scope, batch=batch))
|
Returns a TensorFluent for the Bernoulli sampling op with given mean parameter.
Args:
mean: The mean parameter of the Bernoulli distribution.
batch_size: The size of the batch (optional).
Returns:
The Bernoulli distribution and a TensorFluent sample drawn from the distribution.
|
juraj-google-style
|
def get_plot(self, normalize_rxn_coordinate=True, label_barrier=True):
plt = pretty_plot(12, 8)
scale = (1 if (not normalize_rxn_coordinate) else (1 / self.r[(- 1)]))
x = np.arange(0, np.max(self.r), 0.01)
y = (self.spline(x) * 1000)
relative_energies = (self.energies - self.energies[0])
plt.plot((self.r * scale), (relative_energies * 1000), 'ro', (x * scale), y, 'k-', linewidth=2, markersize=10)
plt.xlabel('Reaction coordinate')
plt.ylabel('Energy (meV)')
plt.ylim(((np.min(y) - 10), ((np.max(y) * 1.02) + 20)))
if label_barrier:
data = zip((x * scale), y)
barrier = max(data, key=(lambda d: d[1]))
plt.plot([0, barrier[0]], [barrier[1], barrier[1]], 'k--')
plt.annotate(('%.0f meV' % (np.max(y) - np.min(y))), xy=((barrier[0] / 2), (barrier[1] * 1.02)), xytext=((barrier[0] / 2), (barrier[1] * 1.02)), horizontalalignment='center')
plt.tight_layout()
return plt
|
Returns the NEB plot. Uses Henkelman's approach of spline fitting
each section of the reaction path based on tangent force and energies.
Args:
normalize_rxn_coordinate (bool): Whether to normalize the
reaction coordinate to between 0 and 1. Defaults to True.
label_barrier (bool): Whether to label the maximum barrier.
Returns:
matplotlib.pyplot object.
|
codesearchnet
|
def AddEventSource(self, event_source):
self._RaiseIfNotWritable()
event_source = self._PrepareAttributeContainer(event_source)
self._event_sources.append(event_source)
self.number_of_event_sources += 1
|
Adds an event source.
Args:
event_source (EventSource): event source.
Raises:
IOError: when the storage writer is closed.
OSError: when the storage writer is closed.
|
juraj-google-style
|
def get_container_instance_logs(access_token, subscription_id, resource_group, container_group_name, container_name=None):
if (container_name is None):
container_name = container_group_name
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups/', container_group_name, '/containers/', container_name, '/logs?api-version=', CONTAINER_API])
return do_get(endpoint, access_token)
|
Get the container logs for containers in a container group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
container_name (str): Optional name of a container in the group.
Returns:
HTTP response. Container logs.
|
codesearchnet
|
def _maybe_strip_extension(number):
match = _EXTN_PATTERN.search(number)
if match and _is_viable_phone_number(number[:match.start()]):
for group in match.groups():
if group is not None:
return (group, number[:match.start()])
return ("", number)
|
Strip extension from the end of a number string.
Strips any extension (as in, the part of the number dialled after the
call is connected, usually indicated with extn, ext, x or similar) from
the end of the number, and returns it.
Arguments:
number -- the non-normalized telephone number that we wish to strip the extension from.
Returns a 2-tuple of:
- the phone extension (or "" or not present)
- the number before the extension.
|
juraj-google-style
|
def addResource(self, pid):
self._check_initialized()
try:
self.getObjectByPid(pid)
return
except IndexError:
pass
oid = self._pid_to_id(pid)
obj = rdflib.URIRef(oid)
ag = self.getAggregation()
self.add((ag, ORE.aggregates, obj))
self.add((obj, ORE.isAggregatedBy, ag))
self.add((obj, DCTERMS.identifier, rdflib.term.Literal(pid)))
|
Add a resource to the Resource Map.
Args:
pid : str
|
juraj-google-style
|
def module_set_id(self) -> str:
fnames = sorted(['@'.join(m) for m in self.schema_data.modules])
return hashlib.sha1(''.join(fnames).encode('ascii')).hexdigest()
|
Compute unique id of YANG modules comprising the data model.
Returns:
String consisting of hexadecimal digits.
|
codesearchnet
|
def get_domain(self):
if hasattr(self, 'domain'):
return Domain(self.rest_client.make_request(self.domain), self.rest_client)
|
Get the Streams domain for the instance that owns this view.
Returns:
Domain: Streams domain for the instance owning this view.
|
codesearchnet
|
def RemoveUser(self, user):
self.logger.info('Removing user %s.', user)
if self.remove:
command = self.userdel_cmd.format(user=user)
try:
subprocess.check_call(command.split(' '))
except subprocess.CalledProcessError as e:
self.logger.warning('Could not remove user %s. %s.', user, str(e))
else:
self.logger.info('Removed user account %s.', user)
self._RemoveAuthorizedKeys(user)
self._UpdateSudoer(user, sudoer=False)
|
Remove a Linux user account.
Args:
user: string, the Linux user account to remove.
|
juraj-google-style
|
def lowpass_filter(data: FLOATS_TYPE,
sampling_freq_hz: float,
cutoff_freq_hz: float,
numtaps: int) -> FLOATS_TYPE:
coeffs = firwin(
numtaps=numtaps,
cutoff=normalized_frequency(cutoff_freq_hz, sampling_freq_hz),
pass_zero=True
)
filtered_data = lfilter(b=coeffs, a=1.0, x=data)
return filtered_data
|
Apply a low-pass filter to the data.
Args:
data: time series of the data
sampling_freq_hz: sampling frequency :math:`f_s`, in Hz
(or other consistent units)
cutoff_freq_hz: filter cutoff frequency in Hz
(or other consistent units)
numtaps: number of filter taps
Returns:
filtered data
Note: number of filter taps = filter order + 1
|
juraj-google-style
|
def estimate_motion(self, time, intensity_grid, max_u, max_v):
ti = np.where(time == self.times)[0][0]
mask_vals = np.where(self.masks[ti].ravel() == 1)
i_vals = self.i[ti].ravel()[mask_vals]
j_vals = self.j[ti].ravel()[mask_vals]
obj_vals = self.timesteps[ti].ravel()[mask_vals]
u_shifts = np.arange(-max_u, max_u + 1)
v_shifts = np.arange(-max_v, max_v + 1)
min_error = 99999999999.0
best_u = 0
best_v = 0
for u in u_shifts:
j_shift = j_vals - u
for v in v_shifts:
i_shift = i_vals - v
if np.all((0 <= i_shift) & (i_shift < intensity_grid.shape[0]) &
(0 <= j_shift) & (j_shift < intensity_grid.shape[1])):
shift_vals = intensity_grid[i_shift, j_shift]
else:
shift_vals = np.zeros(i_shift.shape)
error = np.abs(shift_vals - obj_vals).mean()
if error < min_error:
min_error = error
best_u = u * self.dx
best_v = v * self.dx
self.u[ti] = best_u
self.v[ti] = best_v
return best_u, best_v, min_error
|
Estimate the motion of the object with cross-correlation on the intensity values from the previous time step.
Args:
time: time being evaluated.
intensity_grid: 2D array of intensities used in cross correlation.
max_u: Maximum x-component of motion. Used to limit search area.
max_v: Maximum y-component of motion. Used to limit search area
Returns:
u, v, and the minimum error.
|
juraj-google-style
|
def getRowByIndex(self, index):
assert isinstance(index, int)
return Row(self._impl.getRowByIndex(index))
|
Get row by numeric index.
Args:
index: Zero-based index of the row to get.
Returns:
The corresponding row.
|
codesearchnet
|
def _checkInt(inputvalue, minvalue=None, maxvalue=None, description='inputvalue'):
if (not isinstance(description, str)):
raise TypeError('The description should be a string. Given: {0!r}'.format(description))
if (not isinstance(inputvalue, (int, long))):
raise TypeError('The {0} must be an integer. Given: {1!r}'.format(description, inputvalue))
if (not isinstance(minvalue, (int, long, type(None)))):
raise TypeError('The minvalue must be an integer or None. Given: {0!r}'.format(minvalue))
if (not isinstance(maxvalue, (int, long, type(None)))):
raise TypeError('The maxvalue must be an integer or None. Given: {0!r}'.format(maxvalue))
_checkNumerical(inputvalue, minvalue, maxvalue, description)
|
Check that the given integer is valid.
Args:
* inputvalue (int or long): The integer to be checked
* minvalue (int or long, or None): Minimum value of the integer
* maxvalue (int or long, or None): Maximum value of the integer
* description (string): Used in error messages for the checked inputvalue
Raises:
TypeError, ValueError
Note: Can not use the function :func:`_checkString`, as that function uses this function internally.
|
codesearchnet
|
def greedy_set_cover(universe, subsets, costs):
elements = set((e for s in subsets.keys() for e in subsets[s]))
if (elements != universe):
return None
covered = set()
cover_sets = []
while (covered != universe):
min_cost_elem_ratio = float('inf')
min_set = None
for (s, elements) in subsets.items():
new_elements = len((elements - covered))
if (new_elements != 0):
cost_elem_ratio = (costs[s] / new_elements)
if (cost_elem_ratio < min_cost_elem_ratio):
min_cost_elem_ratio = cost_elem_ratio
min_set = s
cover_sets.append(min_set)
covered |= subsets[min_set]
return cover_sets
|
Approximate greedy algorithm for set-covering. Can be used on large
inputs - though not an optimal solution.
Args:
universe (list): Universe of elements
subsets (dict): Subsets of U {S1:elements,S2:elements}
costs (dict): Costs of each subset in S - {S1:cost, S2:cost...}
|
codesearchnet
|
def is_namedtuple(x) -> bool:
return isinstance(x, tuple) and hasattr(type(x), '_fields')
|
Returns `True` if the value is instance of `NamedTuple`.
This is using some heuristic by checking for a `._field` attribute.
Args:
x: Object to check
Returns:
`True` if the object is a `namedtuple`
|
github-repos
|
def set_string(self, option, value):
if not isinstance(value, str):
raise TypeError("%s must be a string" % option)
self.options[option] = value
|
Set a string option.
Args:
option (str): name of option.
value (str): value of the option.
Raises:
TypeError: Value must be a string.
|
juraj-google-style
|
def get_maybe_base_expanded_node_name(self, node_name, run_key, device_name):
device_name = tf.compat.as_str(device_name)
if (run_key not in self._run_key_to_original_graphs):
raise ValueError(('Unknown run_key: %s' % run_key))
if (device_name not in self._run_key_to_original_graphs[run_key]):
raise ValueError(('Unknown device for run key "%s": %s' % (run_key, device_name)))
return self._run_key_to_original_graphs[run_key][device_name].maybe_base_expanded_node_name(node_name)
|
Obtain possibly base-expanded node name.
Base-expansion is the transformation of a node name which happens to be the
name scope of other nodes in the same graph. For example, if two nodes,
called 'a/b' and 'a/b/read' in a graph, the name of the first node will
be base-expanded to 'a/b/(b)'.
This method uses caching to avoid unnecessary recomputation.
Args:
node_name: Name of the node.
run_key: The run key to which the node belongs.
graph_def: GraphDef to which the node belongs.
Raises:
ValueError: If `run_key` and/or `device_name` do not exist in the record.
|
codesearchnet
|
def validate(self, value):
if value is not None and not isinstance(value, self.data_type):
raise datastore_errors.BadValueError(
"Property %s must be convertible to a %s instance (%s)" %
(self.name, self.data_type, value))
return super(JsonProperty, self).validate(value)
|
Validate value.
Args:
value: model value.
Returns:
Whether the specified value is valid data type value.
Raises:
BadValueError: when value is not of self.data_type type.
|
juraj-google-style
|
def assign_sub(self, delta, use_locking=False, name=None, read_value=True):
assign = state_ops.assign_sub(self._variable, delta, use_locking=use_locking, name=name)
if read_value:
return assign
return assign.op
|
Subtracts a value from this variable.
This is essentially a shortcut for `assign_sub(self, delta)`.
Args:
delta: A `Tensor`. The value to subtract from this variable.
use_locking: If `True`, use locking during the operation.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the new
value of the variable; if False will return the assign op.
Returns:
A `Tensor` that will hold the new value of this variable after
the subtraction has completed.
|
github-repos
|
def dbmax10years(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `dbmax10years`'.format(value))
self._dbmax10years = value
|
Corresponds to IDD Field `dbmax10years`
10-year return period values for maximum extreme dry-bulb temperature
Args:
value (float): value for IDD Field `dbmax10years`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def __setitem__(self, key, value):
setitem = self._class_to_mock.__dict__.get('__setitem__', None)
if setitem is None:
raise TypeError('object does not support item assignment')
if self._replay_mode:
return MockMethod('__setitem__', self._expected_calls_queue,
self._replay_mode)(key, value)
return self._CreateMockMethod('__setitem__')(key, value)
|
Provide custom logic for mocking classes that support item assignment.
Args:
key: Key to set the value for.
value: Value to set.
Returns:
Expected return value in replay mode. A MockMethod object for the
__setitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not support item assignment.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
|
juraj-google-style
|
def dump_credibilities(self, output):
for p in self.products:
json.dump({'product_id': p.name, 'credibility': self.credibility(p)}, output)
output.write('\n')
|
Dump credibilities of all products.
Args:
output: a writable object.
|
codesearchnet
|
def revnet_step(name, x, hparams, reverse=True):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
if (hparams.coupling == 'additive'):
coupling_layer = functools.partial(additive_coupling, name='additive', reverse=reverse, mid_channels=hparams.coupling_width, activation=hparams.activation, dropout=hparams.coupling_dropout)
else:
coupling_layer = functools.partial(affine_coupling, name='affine', reverse=reverse, mid_channels=hparams.coupling_width, activation=hparams.activation, dropout=hparams.coupling_dropout)
ops = [functools.partial(actnorm, name='actnorm', reverse=reverse), functools.partial(invertible_1x1_conv, name='invertible', reverse=reverse), coupling_layer]
if reverse:
ops = ops[::(- 1)]
objective = 0.0
for op in ops:
(x, curr_obj) = op(x=x)
objective += curr_obj
return (x, objective)
|
One step of glow generative flow.
Actnorm + invertible 1X1 conv + affine_coupling.
Args:
name: used for variable scope.
x: input
hparams: coupling_width is the only hparam that is being used in
this function.
reverse: forward or reverse pass.
Returns:
z: Output of one step of reversible flow.
|
codesearchnet
|
def prod(self, vars_list: List[str]) -> 'TensorFluent':
operand = self
if operand.dtype == tf.bool:
operand = operand.cast(tf.float32)
return self._aggregation_op(tf.reduce_prod, operand, vars_list)
|
Returns the TensorFluent for the prod aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the prod aggregation function.
|
juraj-google-style
|
def collection(iterable=None, mutable=True, ordered=False, unique=False):
if iterable is None:
iterable = tuple()
if unique:
if ordered:
if mutable:
return setlist(iterable)
else:
return frozensetlist(iterable)
else:
if mutable:
return set(iterable)
else:
return frozenset(iterable)
else:
if ordered:
if mutable:
return list(iterable)
else:
return tuple(iterable)
else:
if mutable:
return bag(iterable)
else:
return frozenbag(iterable)
|
Return a :class:`Collection` with the specified properties.
Args:
iterable (Iterable): collection to instantiate new collection from.
mutable (bool): Whether or not the new collection is mutable.
ordered (bool): Whether or not the new collection is ordered.
unique (bool): Whether or not the new collection contains only unique values.
|
juraj-google-style
|
def extractDates(self, inp):
def merge(param):
day, time = param
if not (day or time):
return None
if not day:
return time
if not time:
return day
return datetime.datetime(
day.year, day.month, day.day, time.hour, time.minute
)
days = self.extractDays(inp)
times = self.extractTimes(inp)
return map(merge, zip_longest(days, times, fillvalue=None))
|
Extract semantic date information from an input string.
In effect, runs both parseDay and parseTime on the input
string and merges the results to produce a comprehensive
datetime object.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted dates from the
input snippet, or an empty list if not found.
|
juraj-google-style
|
def get_wallet_height(self, id=None, endpoint=None):
return self._call_endpoint(GET_WALLET_HEIGHT, id=id, endpoint=endpoint)
|
Get the current wallet index height.
Args:
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
|
juraj-google-style
|
def Shell(self, command, timeout_ms=None):
return self.protocol_handler.Command(self._handle, service=b'shell', command=command, timeout_ms=timeout_ms)
|
Run command on the device, returning the output.
Args:
command: Shell command to run
timeout_ms: Maximum time to allow the command to run.
|
codesearchnet
|
def get_sharding_tile_shape(sharding):
if sharding is None:
return None
sharding_message = xla_data_pb2.OpSharding()
sharding_message.ParseFromString(sharding)
if sharding_message.tile_assignment_dimensions:
return sharding_message.tile_assignment_dimensions
else:
return None
|
Returns the tile assignment shape for a sharded Tensor.
Args:
sharding: a serialized OpSharding message describing the layout of a
sharded Tensor.
Returns:
A list, for each dimension of the sharded Tensor, of the number of shards
into which it has been split. Returns None if the input indicates no tile
assignments.
|
github-repos
|
def _build_list_of_Intervals(cls, data_dict, stop=None, points=False, include=None, exclude=None, ignore=None, lexicon=None):
include = (include or {})
exclude = (exclude or {})
ignore = (ignore or [])
all_data = []
for data in zip(*data_dict.values()):
all_data.append({k: v for (k, v) in zip(data_dict.keys(), data)})
all_data = sorted(all_data, key=(lambda x: x['top']))
wanted_data = []
for dictionary in all_data:
keep = True
delete = []
for (k, v) in dictionary.items():
incl = include.get(k, utils.null_default(True))
excl = exclude.get(k, utils.null_default(False))
if (k in ignore):
delete.append(k)
if (not incl(v)):
keep = False
if excl(v):
keep = False
if delete:
for key in delete:
_ = dictionary.pop(key, None)
if keep:
wanted_data.append(dictionary)
if (not points):
for (i, iv) in enumerate(wanted_data):
if (iv.get('base', None) is None):
try:
iv['base'] = wanted_data[(i + 1)]['top']
except (IndexError, KeyError):
if (stop is not None):
thick = (stop - iv['top'])
else:
thick = 1
iv['base'] = (iv['top'] + thick)
list_of_Intervals = []
for iv in wanted_data:
top = iv.pop('top')
base = iv.pop('base', None)
descr = iv.pop('description', '')
if iv:
(c, d) = ({}, {})
for (k, v) in iv.items():
if ((k[:5].lower() == 'comp ') or (k[:9].lower() == 'component')):
k = re.sub('comp(?:onent)? ', '', k, flags=re.I)
c[k] = v
elif (v is not None):
d[k] = v
comp = ([Component(c)] if c else None)
this = Interval(**{'top': top, 'base': base, 'description': descr, 'data': d, 'components': comp})
else:
this = Interval(**{'top': top, 'base': base, 'description': descr, 'lexicon': lexicon})
list_of_Intervals.append(this)
return list_of_Intervals
|
Private function. Takes a data dictionary and reconstructs a list
of Intervals from it.
Args:
data_dict (dict)
stop (float): Where to end the last interval.
points (bool)
include (dict)
exclude (dict)
ignore (list)
lexicon (Lexicon)
Returns:
list.
|
codesearchnet
|
def typical_or_extreme_period_type(self, value=None):
if (value is not None):
try:
value = str(value)
except ValueError:
raise ValueError('value {} need to be of type str for field `typical_or_extreme_period_type`'.format(value))
if (',' in value):
raise ValueError('value should not contain a comma for field `typical_or_extreme_period_type`')
self._typical_or_extreme_period_type = value
|
Corresponds to IDD Field `typical_or_extreme_period_type`
Args:
value (str): value for IDD Field `typical_or_extreme_period_type`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def speed_difference(points):
data = [0]
for (before, after) in pairwise(points):
data.append((before.vel - after.vel))
return data
|
Computes the speed difference between each adjacent point
Args:
points (:obj:`Point`)
Returns:
:obj:`list` of int: Indexes of changepoints
|
codesearchnet
|
def tfidf_corpus(docs=CORPUS):
vectorizer = TfidfVectorizer()
vectorizer = vectorizer.fit(docs)
return vectorizer, vectorizer.transform(docs)
|
Count the words in a corpus and return a TfidfVectorizer() as well as all the TFIDF vecgtors for the corpus
Args:
docs (iterable of strs): a sequence of documents (strings)
Returns:
(TfidfVectorizer, tfidf_vectors)
|
juraj-google-style
|
def get_relavent_flags(self):
relavent_flags = {}
for (code, flags_list) in self.flags.items():
relavent_flags[code] = []
for flag in flags_list:
if self.flag_is_related(flag):
relavent_flags[code].append(flag)
if (not relavent_flags[code]):
del relavent_flags[code]
return relavent_flags
|
Retrieves the relevant flags for this data block.
Returns:
All flags related to this block.
|
codesearchnet
|
def first(series, order_by=None):
if (order_by is not None):
series = order_series_by(series, order_by)
first_s = series.iloc[0]
return first_s
|
Returns the first value of a series.
Args:
series (pandas.Series): column to summarize.
Kwargs:
order_by: a pandas.Series or list of series (can be symbolic) to order
the input series by before summarization.
|
codesearchnet
|
def generate_encoded_user_data(env='dev', region='us-east-1', generated=None, group_name='', pipeline_type='', canary=False):
if (env in ['prod', 'prodp', 'prods']):
(env_c, env_p, env_s) = ('prod', 'prodp', 'prods')
else:
(env_c, env_p, env_s) = (env, env, env)
user_data = get_template(template_file='infrastructure/user_data.sh.j2', env=env, env_c=env_c, env_p=env_p, env_s=env_s, region=region, app_name=generated.app_name(), group_name=group_name, pipeline_type=pipeline_type, canary=canary, formats=generated)
return base64.b64encode(user_data.encode()).decode()
|
r"""Generate base64 encoded User Data.
Args:
env (str): Deployment environment, e.g. dev, stage.
region (str): AWS Region, e.g. us-east-1.
generated (gogoutils.Generator): Generated naming formats.
group_name (str): Application group nane, e.g. core.
pipeline_type (str): Type of Foremast Pipeline to configure.
Returns:
str: base64 encoded User Data script.
#!/bin/bash
export CLOUD_ENVIRONMENT=dev
export CLOUD_ENVIRONMENT_C=dev
export CLOUD_ENVIRONMENT_P=dev
export CLOUD_ENVIRONMENT_S=dev
export CLOUD_APP=coreforrest
export CLOUD_APP_GROUP=forrest
export CLOUD_STACK=forrest
export EC2_REGION=us-east-1
export CLOUD_DOMAIN=dev.example.com
printenv | grep 'CLOUD\|EC2' | awk '$0="export "$0'>> /etc/gogo/cloud_env
|
codesearchnet
|
def line(self, value):
if value == self._defaults['line'] and 'line' in self._values:
del self._values['line']
else:
self._values['line'] = value
|
The line property.
Args:
value (int). the property value.
|
juraj-google-style
|
def elastic(x, severity=1):
c = [(244 * 2, 244 * 0.7, 244 * 0.1), (244 * 2, 244 * 0.08, 244 * 0.2),
(244 * 0.05, 244 * 0.01, 244 * 0.02), (244 * 0.07, 244 * 0.01,
244 * 0.02),
(244 * 0.12, 244 * 0.01, 244 * 0.02)][severity - 1]
image = np.array(x, dtype=np.float32) / 255.
shape = image.shape
shape_size = shape[:2]
center_square = np.float32(shape_size)
square_size = min(shape_size)
pts1 = np.float32([
center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size
])
pts2 = pts1 + np.random.uniform(
-c[2], c[2], size=pts1.shape).astype(np.float32)
affine_trans = tfds.core.lazy_imports.cv2.getAffineTransform(pts1, pts2)
image = tfds.core.lazy_imports.cv2.warpAffine(
image,
affine_trans,
shape_size[::-1],
borderMode=tfds.core.lazy_imports.cv2.BORDER_REFLECT_101)
dx = (tfds.core.lazy_imports.skimage.filters.gaussian(
np.random.uniform(-1, 1, size=shape[:2]),
c[1],
mode='reflect',
truncate=3) * c[0]).astype(np.float32)
dy = (tfds.core.lazy_imports.skimage.filters.gaussian(
np.random.uniform(-1, 1, size=shape[:2]),
c[1],
mode='reflect',
truncate=3) * c[0]).astype(np.float32)
dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]
x, y, z = np.meshgrid(
np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx,
(-1, 1)), np.reshape(
z, (-1, 1))
x_clip = np.clip(
tfds.core.lazy_imports.scipy.ndimage.interpolation.map_coordinates(
image, indices, order=1, mode='reflect').reshape(shape), 0, 1) * 255
return around_and_astype(x_clip)
|
Conduct elastic transform to images.
Elastic transform is performed on small patches of the images.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied elastic transform.
|
juraj-google-style
|
def _try_to_compute_deterministic_class_id(cls, depth=5):
class_id = pickle.dumps(cls)
for _ in range(depth):
new_class_id = pickle.dumps(pickle.loads(class_id))
if (new_class_id == class_id):
return hashlib.sha1(new_class_id).digest()
class_id = new_class_id
logger.warning('WARNING: Could not produce a deterministic class ID for class {}'.format(cls))
return hashlib.sha1(new_class_id).digest()
|
Attempt to produce a deterministic class ID for a given class.
The goal here is for the class ID to be the same when this is run on
different worker processes. Pickling, loading, and pickling again seems to
produce more consistent results than simply pickling. This is a bit crazy
and could cause problems, in which case we should revert it and figure out
something better.
Args:
cls: The class to produce an ID for.
depth: The number of times to repeatedly try to load and dump the
string while trying to reach a fixed point.
Returns:
A class ID for this class. We attempt to make the class ID the same
when this function is run on different workers, but that is not
guaranteed.
Raises:
Exception: This could raise an exception if cloudpickle raises an
exception.
|
codesearchnet
|
def get(self, url, headers=None, parameters=None, get_json=True):
if self.debug:
print(('GET: %s, headers=%s' % (url, headers)))
self.headers = self._get_default_headers()
get_parameters = self.parameters
if (get_parameters is None):
get_parameters = {}
if (headers is not None):
self.headers.update(headers)
if (parameters is not None):
get_parameters.update(parameters)
response = requests.get(url, headers=self.headers, params=get_parameters, auth=self.auth, verify=self.verify_ssl)
json_response = self._process_json_response(response)
return (json_response if (get_json is True) else response)
|
Send a GET request with custome headers and parameters
Args:
url (str): URL to send the request to
headers (str, optional): custom headers
parameters (str, optional): optional parameters
Returns:
A JSON object of the returned response if `get_json` is True,
Requests' response object otherwise
|
codesearchnet
|
def remove_op_callback(self, callback):
if callback not in self._thread_local_data.op_callbacks:
raise KeyError('The specified op callback has not been registered, and hence cannot be removed.')
del self._thread_local_data.op_callbacks[self._thread_local_data.op_callbacks.index(callback)]
|
Remove an already-registered op callback.
Args:
callback: The op callback to be removed.
Raises:
KeyError: If `callback` is not already registered.
|
github-repos
|
def return_type(type_name, formatter=None):
def _returns(func):
annotated(func)
func.metadata.typed_returnvalue(type_name, formatter)
return func
return _returns
|
Specify that this function returns a typed value.
Args:
type_name (str): A type name known to the global typedargs type system
formatter (str): An optional name of a formatting function specified
for the type given in type_name.
|
codesearchnet
|
def resource_struct(self, resource: str) -> str:
resource = self.api_document['schemas'][resource]['properties']
return self.to_struct(from_api=resource)
|
Return BigQuery STRUCT for a Discovery API resource.
Args:
resource: the name of the Google API resource
Returns:
A string STRUCT of the resource ready to be used in a query.
|
github-repos
|
def create_config(self, name, data, labels=None):
if not isinstance(data, bytes):
data = data.encode('utf-8')
data = base64.b64encode(data)
if six.PY3:
data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
'Labels': labels
}
url = self._url('/configs/create')
return self._result(
self._post_json(url, data=body), True
)
|
Create a config
Args:
name (string): Name of the config
data (bytes): Config data to be stored
labels (dict): A mapping of labels to assign to the config
Returns (dict): ID of the newly created config
|
juraj-google-style
|
def _legacy_weights(layer):
weights = layer.trainable_weights + layer.non_trainable_weights
if any((not isinstance(w, variables_module.Variable) for w in weights)):
raise NotImplementedError("Save or restore weights that is not an instance of `tf.Variable` is not supported in h5, use `save_format='tf'` instead. Got a model or layer {} with weights {}".format(layer.__class__.__name__, weights))
return weights
|
DO NOT USE.
For legacy reason, the layer.weights was in the order of
[self.trainable_weights + self.non_trainable_weights], and this order was
used for preserving the weights in h5 format. The new order of layer.weights
are the same as layer.get_weights() which is more intuitive for user. To
keep supporting the existing saved h5 file, this method should be used to
save/load weights. In future version, we will delete this method and
introduce a breaking change for h5 and stay with the new order for weights.
Args:
layer: a `tf.keras.Model` or `tf.keras.layers.Layer` instance.
Returns:
A list of variables with the order of trainable_weights, followed by
non_trainable_weights.
|
github-repos
|
def SetCredentials(self, password=None, username=None):
if password:
self._password = password
if username:
self._user = username
|
Sets the database credentials.
Args:
password (Optional[str]): password to access the database.
username (Optional[str]): username to access the database.
|
codesearchnet
|
def _NormalizeKeyPath(self, key_path):
normalized_key_path = key_path.lower()
if ((len(normalized_key_path) < 39) or (not normalized_key_path.startswith(self._CONTROL_SET_PREFIX))):
return normalized_key_path
return ''.join([self._NORMALIZED_CONTROL_SET_PREFIX, normalized_key_path[39:]])
|
Normalizes a Windows Registry key path.
Args:
key_path (str): Windows Registry key path.
Returns:
str: normalized Windows Registry key path.
|
codesearchnet
|
def __init__(self, correction_limit=88., **kwargs):
self.correction_limit = correction_limit
super(EffectiveSolarPathLengthCorrector, self).__init__(**kwargs)
|
Collect custom configuration values.
Args:
correction_limit (float): Maximum solar zenith angle to apply the
correction in degrees. Pixels beyond this limit have a
constant correction applied. Default 88.
max_sza (float): Maximum solar zenith angle in degrees that is
considered valid and correctable. Default 95.0.
|
juraj-google-style
|
def eval_in_new(cls, expr, *args, **kwargs):
ctx = cls(*args, **kwargs)
ctx.env.rec_new(expr)
return ctx.eval(expr)
|
:meth:`eval` an expression in a new, temporary :class:`Context`.
This should be safe to use directly on user input.
Args:
expr (LispVal): The expression to evaluate.
*args: Args for the :class:`Context` constructor.
**kwargs: Kwargs for the :class:`Context` constructor.
|
juraj-google-style
|
def _UpdateCounters(self, event):
self._session.parsers_counter['total'] += 1
parser_name = getattr(event, 'parser', '')
(_, _, parser_name) = parser_name.rpartition('/')
if (not parser_name):
parser_name = 'N/A'
self._session.parsers_counter[parser_name] += 1
|
Updates the counters.
Args:
event (EventObject): event.
|
codesearchnet
|
def __init__(self, dataset, worker, devices, options=None):
self._dataset = dataset
self._worker = worker
self._devices = devices
self._element_spec = dataset.element_spec
self._options = options
self._make_iterator()
|
Create iterator for the `dataset` to fetch data to worker's `devices` .
A `MultiDeviceIterator` or `OwnedMultiDeviceIterator` is used to prefetch
input to the devices on the given worker.
Args:
dataset: A `tf.data.Dataset` instance.
worker: Worker on which ops should be created.
devices: Distribute data from `dataset` to these devices.
options: options.
|
github-repos
|
def l1_loss(tensor, weight=1.0, scope=None):
with tf.name_scope(scope, 'L1Loss', [tensor]):
weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='loss_weight')
loss = tf.multiply(weight, tf.reduce_sum(tf.abs(tensor)), name='value')
tf.add_to_collection(LOSSES_COLLECTION, loss)
return loss
|
Define a L1Loss, useful for regularize, i.e. lasso.
Args:
tensor: tensor to regularize.
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
the L1 loss op.
|
codesearchnet
|
def load_yaml(task: Task, file: str) -> Result:
with open(file, "r") as f:
yml = ruamel.yaml.YAML(typ="safe")
data = yml.load(f)
return Result(host=task.host, result=data)
|
Loads a yaml file.
Arguments:
file: path to the file containing the yaml file to load
Examples:
Simple example with ``ordered_dict``::
> nr.run(task=load_yaml,
file="mydata.yaml")
Returns:
Result object with the following attributes set:
* result (``dict``): dictionary with the contents of the file
|
juraj-google-style
|
def get_first_model_with_resource_name(cls, resource_name):
models = cls.get_models_with_resource_name(resource_name)
if (len(models) > 0):
return models[0]
return None
|
Get the first model corresponding to a resource_name
Args:
resource_name: the resource name
|
codesearchnet
|
def build_logits(data_ops, embed_layer, rnn_core, output_linear, name_prefix):
embedded_input_seq = snt.BatchApply(
embed_layer, name="input_embed_seq")(data_ops.sparse_obs)
initial_rnn_state = nest.map_structure(
lambda t: tf.get_local_variable(
"{}/rnn_state/{}".format(name_prefix, t.op.name), initializer=t),
rnn_core.initial_state(FLAGS.batch_size))
assign_zero_rnn_state = nest.map_structure(
lambda x: x.assign(tf.zeros_like(x)), initial_rnn_state)
assign_zero_rnn_state = tf.group(*nest.flatten(assign_zero_rnn_state))
rnn_output_seq, rnn_final_state = tf.nn.dynamic_rnn(
cell=rnn_core,
inputs=embedded_input_seq,
initial_state=initial_rnn_state,
time_major=True)
update_rnn_state = nest.map_structure(
tf.assign, initial_rnn_state, rnn_final_state)
with tf.control_dependencies(nest.flatten(update_rnn_state)):
rnn_output_seq = tf.identity(rnn_output_seq, name="rnn_output_seq")
output_logits = snt.BatchApply(
output_linear, name="output_embed_seq")(rnn_output_seq)
return output_logits, assign_zero_rnn_state
|
This is the core model logic.
Unrolls a Bayesian RNN over the given sequence.
Args:
data_ops: A `sequence_data.SequenceDataOps` namedtuple.
embed_layer: A `snt.Embed` instance.
rnn_core: A `snt.RNNCore` instance.
output_linear: A `snt.Linear` instance.
name_prefix: A string to use to prefix local variable names.
Returns:
A 3D time-major tensor representing the model's logits for a sequence of
predictions. Shape `[time_steps, batch_size, vocab_size]`.
|
juraj-google-style
|
def NotIn(self, *values):
self._awql = self._CreateMultipleValuesCondition(values, 'NOT_IN')
return self._query_builder
|
Sets the type of the WHERE clause as "in".
Args:
*values: The values to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to.
|
juraj-google-style
|
def get_sources(self, prefix=''):
prefix = prefix.replace('-', '_')
prefixed = ('%s_sources' % prefix)
if (prefixed in self.__cli):
sources = self.__cli.get(prefixed)
from_conf = False
else:
sources = self.__config.get(prefixed)
from_conf = True
if (sources is None):
return OrderedSet()
sources = self.__resolve_patterns(sources, from_conf)
prefixed = ('%s_source_filters' % prefix)
if (prefixed in self.__cli):
filters = self.__cli.get(prefixed)
from_conf = False
else:
filters = self.__config.get(prefixed)
from_conf = True
if (filters is None):
return sources
sources -= self.__resolve_patterns(filters, from_conf)
return sources
|
Retrieve a set of absolute paths to sources, according to `prefix`
`ConfigParser` will perform wildcard expansion and
filtering.
Args:
prefix: str, the desired prefix.
Returns:
utils.utils.OrderedSet: The set of sources for the given
`prefix`.
|
codesearchnet
|
def get(self, uid: int) -> FrozenSet[Flag]:
recent = _recent_set if uid in self._recent else frozenset()
flags = self._flags.get(uid)
return recent if flags is None else (flags | recent)
|
Return the session flags for the mailbox session.
Args:
uid: The message UID value.
|
juraj-google-style
|
def _format_parameter_error_message(name: str, sig: Signature,
num_params: int) -> str:
if num_params == 0:
plural = 's'
missing = 2
arguments = "'slack' and 'event'"
else:
plural = ''
missing = 1
arguments = "'event'"
return (f"{name}{sig} missing {missing} required positional "
f"argument{plural}: {arguments}")
|
Format an error message for missing positional arguments.
Args:
name: The function name.
sig: The function's signature.
num_params: The number of function parameters.
Returns:
str: A formatted error message.
|
juraj-google-style
|
def inject_params(self, params):
for (arg, value) in params.items():
cli_arg = '--{}'.format(arg)
if (cli_arg in sys.argv):
self.tcex.log.debug('skipping existing arg: {}'.format(cli_arg))
continue
param_data = (self.tcex.install_json_params.get(arg) or {})
if (param_data.get('type', '').lower() == 'multichoice'):
value = value.split('|')
elif (param_data.get('type', '').lower() == 'boolean'):
value = self.tcex.utils.to_bool(value)
elif (arg in self.tc_bool_args):
value = self.tcex.utils.to_bool(value)
if isinstance(value, bool):
if (value is True):
sys.argv.append(cli_arg)
elif isinstance(value, list):
for mcv in value:
sys.argv.append('{}={}'.format(cli_arg, mcv))
else:
sys.argv.append('{}={}'.format(cli_arg, value))
(self._default_args, unknown) = self.parser.parse_known_args()
self.tcex._logger()
|
Inject params into sys.argv from secureParams API, AOT, or user provided.
Args:
params (dict): A dictionary containing all parameters that need to be injected as args.
|
codesearchnet
|
def __init__(self, channel):
self._remote_execute = channel.unary_unary(
'/OnlineActionHandler/_remote_execute',
request_serializer=actions__pb2.OnlineActionRequest.SerializeToString,
response_deserializer=actions__pb2.OnlineActionResponse.FromString,
)
self._remote_reload = channel.unary_unary(
'/OnlineActionHandler/_remote_reload',
request_serializer=actions__pb2.ReloadRequest.SerializeToString,
response_deserializer=actions__pb2.ReloadResponse.FromString,
)
self._health_check = channel.unary_unary(
'/OnlineActionHandler/_health_check',
request_serializer=actions__pb2.HealthCheckRequest.SerializeToString,
response_deserializer=actions__pb2.HealthCheckResponse.FromString,
)
|
Constructor.
Args:
channel: A grpc.Channel.
|
juraj-google-style
|
def update_state(self, y_true, y_pred, sample_weight=None):
return metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, class_id=self.class_id, sample_weight=sample_weight)
|
Accumulates confusion matrix statistics.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
|
github-repos
|
def dbmin10years(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `dbmin10years`'.format(value))
self._dbmin10years = value
|
Corresponds to IDD Field `dbmin10years`
10-year return period values for minimum extreme dry-bulb temperature
Args:
value (float): value for IDD Field `dbmin10years`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def append_from_list(self, content, fill_title=False):
row_index = 0
for row in content:
tr = TableRow()
column_index = 0
for item in row:
if ((row_index == 0) and fill_title):
ti = TableTitle(item)
else:
ti = TableItem(item)
tr.append(ti, str(column_index))
column_index = (column_index + 1)
self.append(tr, str(row_index))
row_index = (row_index + 1)
|
Appends rows created from the data contained in the provided
list of tuples of strings. The first tuple of the list can be
set as table title.
Args:
content (list): list of tuples of strings. Each tuple is a row.
fill_title (bool): if true, the first tuple in the list will
be set as title.
|
codesearchnet
|
def _count_righthand_zero_bits(number, bits):
if number == 0:
return bits
for i in range(bits):
if (number >> i) & 1:
return i
return bits
|
Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
|
juraj-google-style
|
def set_shutdown(self, default=False, disable=True):
return self._configure_mlag('shutdown', True, default, disable)
|
Configures the mlag shutdown value
Default setting for set_shutdown is disable=True, meaning
'no shutdown'. Setting both default and disable to False will
effectively enable shutdown.
Args:
default (bool): Configures the shutdown using the
default keyword
disable (bool): Negates shutdown using the no keyword
Returns:
bool: Returns True if the commands complete successfully
|
codesearchnet
|
def __init__(self, channel):
self.SayHello = channel.unary_unary(
'/helloworld.Greeter/SayHello',
request_serializer=hello__world__pb2.HelloRequest.
SerializeToString,
response_deserializer=hello__world__pb2.HelloReply.FromString,
)
|
Constructor.
Args:
channel: A grpc.Channel.
|
juraj-google-style
|
async def items(self):
response = (await self._api.get('/v1/acl/list'))
results = [decode_token(r) for r in response.body]
return consul(results, meta=extract_meta(response.headers))
|
Lists all the active tokens
Returns:
ObjectMeta: where value is a list of tokens
It returns a body like this::
[
{
"CreateIndex": 3,
"ModifyIndex": 3,
"ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05",
"Name": "Client Token",
"Type": "client",
"Rules": {
"key": {
"": { "policy": "read" },
"private/": { "policy": "deny" }
}
}
}
]
|
codesearchnet
|
def __init__(self, record_bytes, header_bytes=None, footer_bytes=None, hop_bytes=None, name=None, encoding=None):
rr = gen_io_ops.fixed_length_record_reader_v2(record_bytes=record_bytes, header_bytes=header_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, encoding=encoding, name=name)
super(FixedLengthRecordReader, self).__init__(rr)
|
Create a FixedLengthRecordReader.
Args:
record_bytes: An int.
header_bytes: An optional int. Defaults to 0.
footer_bytes: An optional int. Defaults to 0.
hop_bytes: An optional int. Defaults to 0.
name: A name for the operation (optional).
encoding: The type of encoding for the file. Defaults to none.
|
github-repos
|
def baredoc(obj):
doc = getdoc(obj)
if (not doc):
return ''
doc = doc.splitlines()[0]
return doc.rstrip(' .').lstrip()
|
Return the first line of the docstring of an object.
Trailing periods and spaces as well as leading spaces are removed from the
output.
Args:
obj: any Python object.
Returns:
str: the first line of the docstring of obj.
|
codesearchnet
|
def validate_config_must_have(config, required_keys):
missing_keys = set(required_keys) - set(config)
if len(missing_keys) > 0:
raise Exception('Invalid config with missing keys "%s"' % ', '.join(missing_keys))
|
Validate a config dictionary to make sure it has all of the specified keys
Args:
config: the config to validate.
required_keys: the list of possible keys that config must include.
Raises:
Exception if the config does not have any of them.
|
juraj-google-style
|
def complement(self):
if self.complementary:
try:
return self._instances[self.complementary]
except KeyError:
raise ValueError('{} has a complementary but it was not defined !')
else:
return None
|
Return the complementary relationship of self.
Raises:
ValueError: if the relationship has a complementary
which was not defined.
Returns:
complementary (Relationship): the complementary relationship.
Example:
>>> from pronto.relationship import Relationship
>>> print(Relationship('has_part').complement())
Relationship('part_of')
>>> print(Relationship('has_units').complement())
None
|
codesearchnet
|
def sort_by_modified(files_or_folders: list) -> list:
return sorted(files_or_folders, key=os.path.getmtime, reverse=True)
|
Sort files or folders by modified time
Args:
files_or_folders: list of files or folders
Returns:
list
|
codesearchnet
|
def movie_lists(self, **kwargs):
path = self._get_path('movie_lists')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Gets the movie lists available from the API.
Returns:
A dict respresentation of the JSON returned from the API.
|
codesearchnet
|
def layer_prepostprocess(previous_value, x, sequence, dropout_rate, norm_type, depth, epsilon, default_name, name=None, dropout_broadcast_dims=None, layer_collection=None):
with tf.variable_scope(name, default_name=default_name):
if (sequence == 'none'):
return x
for c in sequence:
if (c == 'a'):
x += previous_value
elif (c == 'z'):
x = zero_add(previous_value, x)
elif (c == 'n'):
x = apply_norm(x, norm_type, depth, epsilon, layer_collection=layer_collection)
else:
assert (c == 'd'), ('Unknown sequence step %s' % c)
x = dropout_with_broadcast_dims(x, (1.0 - dropout_rate), broadcast_dims=dropout_broadcast_dims)
return x
|
Apply a sequence of functions to the input or output of a layer.
The sequence is specified as a string which may contain the following
characters:
a: add previous_value
n: apply normalization
d: apply dropout
z: zero add
For example, if sequence=="dna", then the output is
previous_value + normalize(dropout(x))
Args:
previous_value: A Tensor, to be added as a residual connection ('a')
x: A Tensor to be transformed.
sequence: a string.
dropout_rate: a float
norm_type: a string (see apply_norm())
depth: an integer (size of last dimension of x).
epsilon: a float (parameter for normalization)
default_name: a string
name: a string
dropout_broadcast_dims: an optional list of integers less than 3
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
layer_collection: A tensorflow_kfac.LayerCollection. Only used by the
KFAC optimizer. Default is None.
Returns:
a Tensor
|
codesearchnet
|
def main(argv=None):
args = None
cmd = None
try:
args = parse_args(argv)
if args.quiet:
logger.setLevel(logging.CRITICAL)
elif args.verbose:
logger.setLevel(logging.DEBUG)
cmd = args.func(args)
ret = cmd.run_cmd()
except KeyboardInterrupt:
logger.exception('interrupted by the user')
ret = 252
except NotDvcRepoError:
logger.exception('')
ret = 253
except DvcParserError:
ret = 254
except Exception:
logger.exception('unexpected error')
ret = 255
Analytics().send_cmd(cmd, args, ret)
return ret
|
Run dvc CLI command.
Args:
argv: optional list of arguments to parse. sys.argv is used by default.
Returns:
int: command's return code.
|
codesearchnet
|
def Create(self, request, global_params=None):
config = self.GetMethodConfig('Create')
return self._RunMethod(config, request, global_params=global_params)
|
Creates a new `BuildTrigger`. This API is experimental.
Args:
request: (CloudbuildProjectsLocationsTriggersCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BuildTrigger) The response message.
|
github-repos
|
def initialize(self, emt_id, emt_pass):
self._emt_id = emt_id
self._emt_pass = emt_pass
self.bus = BusApi(self)
self.geo = GeoApi(self)
self.parking = ParkingApi(self)
|
Manual initialization of the interface attributes.
This is useful when the interface must be declare but initialized later
on with parsed configuration values.
Args:
emt_id (str): ID given by the server upon registration
emt_pass (str): Token given by the server upon registration
|
juraj-google-style
|
def merge(self, options):
return options_lib.merge_options(self, options)
|
Merges itself with the given `tf.data.Options`.
If this object and the `options` to merge set an option differently, a
warning is generated and this object's value is updated with the `options`
object's value.
Args:
options: The `tf.data.Options` to merge with.
Returns:
New `tf.data.Options` object which is the result of merging self with
the input `tf.data.Options`.
|
github-repos
|
def list(self, cat, ctr=None, nb_results=None, offset=None):
path = (LIST_URL + '?c=3&cat={}'.format(requests.utils.quote(cat)))
if (ctr is not None):
path += '&ctr={}'.format(requests.utils.quote(ctr))
if (nb_results is not None):
path += '&n={}'.format(requests.utils.quote(str(nb_results)))
if (offset is not None):
path += '&o={}'.format(requests.utils.quote(str(offset)))
data = self.executeRequestApi2(path)
clusters = []
docs = []
if (ctr is None):
for pf in data.preFetch:
for cluster in pf.response.payload.listResponse.doc:
clusters.extend(cluster.child)
return [c.docid for c in clusters]
else:
apps = []
for d in data.payload.listResponse.doc:
for c in d.child:
for a in c.child:
apps.append(utils.parseProtobufObj(a))
return apps
|
List all possible subcategories for a specific category. If
also a subcategory is provided, list apps from this category.
Args:
cat (str): category id
ctr (str): subcategory id
nb_results (int): if a subcategory is specified, limit number
of results to this number
offset (int): if a subcategory is specified, start counting from this
result
Returns:
A list of categories. If subcategory is specified, a list of apps in this
category.
|
codesearchnet
|
def empty(cls, labels=None):
warnings.warn("Table.empty(labels) is deprecated. Use Table(labels)", FutureWarning)
if labels is None:
return cls()
values = [[] for label in labels]
return cls(values, labels)
|
Creates an empty table. Column labels are optional. [Deprecated]
Args:
``labels`` (None or list): If ``None``, a table with 0
columns is created.
If a list, each element is a column label in a table with
0 rows.
Returns:
A new instance of ``Table``.
|
juraj-google-style
|
def Serialize(self, writer):
super(Header, self).Serialize(writer)
writer.WriteByte(0)
|
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
|
juraj-google-style
|
def _render_batch(self,
non_fluents: NonFluents,
states: Fluents, actions: Fluents, interms: Fluents,
rewards: np.array,
horizon: Optional[int] = None) -> None:
if horizon is None:
horizon = len(states[0][1])
self._render_round_init(horizon, non_fluents)
for t in range(horizon):
s = [(s[0], s[1][t]) for s in states]
f = [(f[0], f[1][t]) for f in interms]
a = [(a[0], a[1][t]) for a in actions]
r = rewards[t]
self._render_timestep(t, s, a, f, r)
self._render_round_end(rewards)
|
Prints `non_fluents`, `states`, `actions`, `interms` and `rewards`
for given `horizon`.
Args:
states (Sequence[Tuple[str, np.array]]): A state trajectory.
actions (Sequence[Tuple[str, np.array]]): An action trajectory.
interms (Sequence[Tuple[str, np.array]]): An interm state trajectory.
rewards (np.array): Sequence of rewards (1-dimensional array).
horizon (Optional[int]): Number of timesteps.
|
juraj-google-style
|
def __init__(self, num_steps=None, last_step=None):
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
|
Initializes a `StopAtStepHook`.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only one of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
|
github-repos
|
def chdir(directory):
directory = os.path.abspath(directory)
logger.info(('chdir -> %s' % directory))
try:
if (not os.path.isdir(directory)):
logger.error('chdir -> %s failed! Directory does not exist!', directory)
return False
os.chdir(directory)
return True
except Exception as e:
logger.error(('chdir -> %s failed! %s' % (directory, e)))
return False
|
Change the current working directory.
Args:
directory (str): Directory to go to.
|
codesearchnet
|
def __init__(self, checkpoint, proto_id):
self._checkpoint = checkpoint
self._proto_id = proto_id
self.skip_restore = False
self.callback = checkpoint_adapter.ReshardCallback()
|
Specify an object within a checkpoint.
Args:
checkpoint: A _CheckpointRestoreCoordinator object.
proto_id: The index of this object in TrackableObjectGraph.nodes.
|
github-repos
|
def NotEqualTo(self, value):
self._awql = self._CreateSingleValueCondition(value, '!=')
return self._query_builder
|
Sets the type of the WHERE clause as "not equal to".
Args:
value: The value to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to.
|
codesearchnet
|
def _parse_path(self):
if (self.engine == ENGINE_DROPBOX):
path = get_dropbox_folder_location()
elif (self.engine == ENGINE_GDRIVE):
path = get_google_drive_folder_location()
elif (self.engine == ENGINE_COPY):
path = get_copy_folder_location()
elif (self.engine == ENGINE_ICLOUD):
path = get_icloud_folder_location()
elif (self.engine == ENGINE_BOX):
path = get_box_folder_location()
elif (self.engine == ENGINE_FS):
if self._parser.has_option('storage', 'path'):
cfg_path = self._parser.get('storage', 'path')
path = os.path.join(os.environ['HOME'], cfg_path)
else:
raise ConfigError("The required 'path' can't be found while the 'file_system' engine is used.")
return str(path)
|
Parse the storage path in the config.
Returns:
str
|
codesearchnet
|
def read_string(self, registeraddress, numberOfRegisters=16, functioncode=3):
_checkFunctioncode(functioncode, [3, 4])
_checkInt(numberOfRegisters, minvalue=1, description='number of registers for read string')
return self._genericCommand(functioncode, registeraddress, numberOfRegisters=numberOfRegisters, payloadformat='string')
|
Read a string from the slave.
Each 16-bit register in the slave are interpreted as two characters (1 byte = 8 bits).
For example 16 consecutive registers can hold 32 characters (32 bytes).
Args:
* registeraddress (int): The slave register start address (use decimal numbers, not hex).
* numberOfRegisters (int): The number of registers allocated for the string.
* functioncode (int): Modbus function code. Can be 3 or 4.
Returns:
The string (str).
Raises:
ValueError, TypeError, IOError
|
codesearchnet
|
def weighted_moments(x, axes, frequency_weights, name=None, keep_dims=None, keepdims=None):
keep_dims = deprecated_argument_lookup('keepdims', keepdims, 'keep_dims', keep_dims)
if keep_dims is None:
keep_dims = False
with ops.name_scope(name, 'weighted_moments', [x, frequency_weights, axes]):
x = ops.convert_to_tensor(x, name='x')
frequency_weights = ops.convert_to_tensor(frequency_weights, name='frequency_weights')
needs_cast = x.dtype == dtypes.float16
if needs_cast:
x = math_ops.cast(x, dtypes.float32)
if frequency_weights.dtype != x.dtype:
frequency_weights = math_ops.cast(frequency_weights, x.dtype)
weighted_input_sum = math_ops.reduce_sum(frequency_weights * x, axes, name='weighted_input_sum', keepdims=True)
broadcasted_weights = frequency_weights + array_ops.zeros_like(x)
sum_of_weights = math_ops.reduce_sum(broadcasted_weights, axes, name='sum_of_weights', keepdims=True)
weighted_mean = math_ops.div_no_nan(weighted_input_sum, sum_of_weights)
weighted_distsq = math_ops.reduce_sum(frequency_weights * math_ops.squared_difference(x, weighted_mean), axes, name='weighted_distsq', keepdims=True)
weighted_variance = math_ops.div_no_nan(weighted_distsq, sum_of_weights)
if not keep_dims:
weighted_mean = array_ops.squeeze(weighted_mean, axis=axes)
weighted_variance = array_ops.squeeze(weighted_variance, axis=axes)
if needs_cast:
weighted_mean = math_ops.cast(weighted_mean, dtypes.float16)
weighted_variance = math_ops.cast(weighted_variance, dtypes.float16)
return (weighted_mean, weighted_variance)
|
Returns the frequency-weighted mean and variance of `x`.
Args:
x: A tensor.
axes: 1-d tensor of int32 values; these are the axes along which
to compute mean and variance.
frequency_weights: A tensor of positive weights which can be
broadcast with x.
name: Name used to scope the operation.
keep_dims: Produce moments with the same dimensionality as the input.
keepdims: Alias of keep_dims.
Returns:
Two tensors: `weighted_mean` and `weighted_variance`.
|
github-repos
|
def _add_tests(self, testcases):
def _add_test(test_runner, test_dict):
def test(self):
try:
test_runner.run_test(test_dict)
except exceptions.MyBaseFailure as ex:
self.fail(str(ex))
finally:
self.meta_datas = test_runner.meta_datas
if "config" in test_dict:
test.__doc__ = test_dict["config"].get("name")
variables = test_dict["config"].get("variables", {})
else:
test.__doc__ = test_dict.get("name")
variables = test_dict.get("variables", {})
if isinstance(test.__doc__, parser.LazyString):
parsed_variables = parser.parse_variables_mapping(variables, ignore=True)
test.__doc__ = parser.parse_lazy_data(
test.__doc__, parsed_variables)
return test
test_suite = unittest.TestSuite()
for testcase in testcases:
config = testcase.get("config", {})
test_runner = runner.Runner(config)
TestSequense = type('TestSequense', (unittest.TestCase,), {})
tests = testcase.get("teststeps", [])
for index, test_dict in enumerate(tests):
for times_index in range(int(test_dict.get("times", 1))):
test_method_name = 'test_{:04}_{:03}'.format(index, times_index)
test_method = _add_test(test_runner, test_dict)
setattr(TestSequense, test_method_name, test_method)
loaded_testcase = self.test_loader.loadTestsFromTestCase(TestSequense)
setattr(loaded_testcase, "config", config)
setattr(loaded_testcase, "teststeps", tests)
setattr(loaded_testcase, "runner", test_runner)
test_suite.addTest(loaded_testcase)
return test_suite
|
initialize testcase with Runner() and add to test suite.
Args:
testcases (list): testcases list.
Returns:
unittest.TestSuite()
|
juraj-google-style
|
def index_of(self, value_str):
if value_str is None:
value_str = ''
if value_str in self._string_to_index:
return self._string_to_index[value_str]
index = len(self._string_table)
self._string_table.append(value_str)
self._string_to_index[value_str] = index
return index
|
Get index of value_str in the string table.
If value_str is not in the string table, we will add it at the end
and then return the new index.
Args:
value_str: (string) Value to lookup/add in/to the string table.
Returns:
Index of value_str in the string table.
|
github-repos
|
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
local_buffer = utils.BytearrayStream()
if (kmip_version < enums.KMIPVersion.KMIP_2_0):
if (self._common_template_attribute is not None):
self._common_template_attribute.write(local_buffer, kmip_version=kmip_version)
elif (self._common_template_attribute is not None):
attributes = objects.convert_template_attribute_to_attributes(self._common_template_attribute)
attributes.write(local_buffer, kmip_version=kmip_version)
if (kmip_version < enums.KMIPVersion.KMIP_2_0):
if (self._private_key_template_attribute is not None):
self._private_key_template_attribute.write(local_buffer, kmip_version=kmip_version)
elif (self._private_key_template_attribute is not None):
attributes = objects.convert_template_attribute_to_attributes(self._private_key_template_attribute)
attributes.write(local_buffer, kmip_version=kmip_version)
if (kmip_version < enums.KMIPVersion.KMIP_2_0):
if (self._public_key_template_attribute is not None):
self._public_key_template_attribute.write(local_buffer, kmip_version=kmip_version)
elif (self._public_key_template_attribute is not None):
attributes = objects.convert_template_attribute_to_attributes(self._public_key_template_attribute)
attributes.write(local_buffer, kmip_version=kmip_version)
self.length = local_buffer.length()
super(CreateKeyPairRequestPayload, self).write(output_buffer, kmip_version=kmip_version)
output_buffer.write(local_buffer.buffer)
|
Write the data encoding the CreateKeyPair request payload to a buffer.
Args:
output_buffer (stream): A data buffer in which to encode object
data, supporting a write method.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
|
codesearchnet
|
def get_tag_html(tag_id):
tag_data = get_lazy_tag_data(tag_id)
tag = tag_data['tag']
args = tag_data['args']
kwargs = tag_data['kwargs']
lib, tag_name = get_lib_and_tag_name(tag)
args_str = ''
if args:
for arg in args:
if isinstance(arg, six.string_types):
args_str += "'{0}' ".format(arg)
else:
args_str += "{0} ".format(arg)
kwargs_str = ''
if kwargs:
for name, value in kwargs.items():
if isinstance(value, six.string_types):
kwargs_str += "{0}='{1}' ".format(name, value)
else:
kwargs_str += "{0}={1} ".format(name, value)
html = '{{% load {lib} %}}{{% {tag_name} {args}{kwargs}%}}'.format(
lib=lib, tag_name=tag_name, args=args_str, kwargs=kwargs_str)
return html
|
Returns the Django HTML to load the tag library and render the tag.
Args:
tag_id (str): The tag id for the to return the HTML for.
|
juraj-google-style
|
def encrypt(self, plainText):
encryptedResult = ''
for index in range(0, len(plainText), BLOCK_SIZE):
block = plainText[index:(index + BLOCK_SIZE)]
if (len(block) < BLOCK_SIZE):
block = zero_pad(block, BLOCK_SIZE)
encryptedResult += self.encrypt_block(block)
return encryptedResult
|
Encrypt an arbitrary-length block of data.
NOTE: This function formerly worked only on 16-byte blocks of `plainText`.
code that assumed this should still work fine, but can optionally be
modified to call `encrypt_block` instead.
Args:
plainText (str): data to encrypt. If the data is not a multiple of 16
bytes long, it will be padded with null (0x00) bytes until it is.
Returns:
encrypted data. Note that this will always be a multiple of 16 bytes
long.
|
codesearchnet
|
def setup(self, dna_spec: geno.DNASpec) -> None:
self._dna_spec = dna_spec
|
Setup states of an early stopping policy based on dna_spec.
Args:
dna_spec: DNASpec for DNA to propose.
Raises:
RuntimeError: if dna_spec is not supported.
|
github-repos
|
def match(self, url):
try:
urlSchemes = self._urlSchemes.itervalues()
except AttributeError:
urlSchemes = self._urlSchemes.values()
for urlScheme in urlSchemes:
if urlScheme.match(url):
return True
return False
|
Try to find if url matches against any of the schemes within this
endpoint.
Args:
url: The url to match against each scheme
Returns:
True if a matching scheme was found for the url, False otherwise
|
juraj-google-style
|
def first_paragraph_indent(indent_texts):
opening_indent = determine_opening_indent(indent_texts)
result = []
input = iter(indent_texts)
for (indent, text) in input:
if (indent == 0):
result.append((opening_indent, text))
else:
result.append((indent, text))
break
for (indent, text) in input:
result.append((indent, text))
return result
|
Fix the indentation on the first paragraph.
This occurs because the first line of a multi-line docstring following the
opening quote usually has no indent.
Args:
indent_texts: The lines of the docstring as an iterable over 2-tuples
each containing an integer indent level as the first element and
the text as the second element.
Return:
A list of 2-tuples, each containing an integer indent level as the
first element and the text as the second element.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.