code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def creationlog(base, package, stackdepth=_def_stackdepth):
@staticmethod
def wrapnew(cls, *argl, **argd):
global _atdepth_new, _cstack_new, streamlining
origstream = None
if (not (decorating or streamlining)):
(entry, _atdepth_new) = _pre_create(cls, _atdepth_new, stackdepth, *argl, **argd)
_cstack_new.append(cls)
fqdn = cls.__fqdn__
if ((fqdn in _streamlines) and _streamlines[fqdn]):
msg.std('Streamlining {}.'.format(fqdn), 2)
origstream = streamlining
streamlining = True
try:
if six.PY2:
result = base.__old__(cls, *argl, **argd)
elif (base.__old__ is object.__new__):
result = base.__old__(cls)
else:
result = base.__old__(cls, *argl, **argd)
except TypeError:
import sys
(xcls, xerr) = sys.exc_info()[0:2]
referral = xerr.args[0].split()[(- 1)]
if ('.__new__()' in referral):
t = eval(referral.split('.')[0])
result = t.__new__(cls, *argl, **argd)
else:
raise
result = None
if ((result is not None) and hasattr(cls, '__init__')):
try:
cls.__init__(result, *argl, **argd)
except:
print(cls, argl, argd)
raise
else:
msg.err('Object initialize failed for {}.'.format(base.__name__))
if (origstream is not None):
streamlining = origstream
if (not (decorating or streamlining)):
_cstack_new.pop()
if (len(_cstack_new) == 0):
_atdepth_new = False
_post_create(_atdepth_new, entry, result)
return result
return wrapnew
|
Decorator for wrapping the creation of class instances that are being logged
by acorn.
Args:
base: base class used to call __new__ for the construction.
package (str): name of (global) package the class belongs to.
stackdepth (int): if the calling stack is less than this depth, than
include the entry in the log; otherwise ignore it.
|
codesearchnet
|
def points_random_3d(count, range_x=((- 10.0), 10.0), range_y=((- 10.0), 10.0), range_z=((- 10.0), 10.0), seed=None) -> VAO:
random.seed(seed)
def gen():
for _ in range(count):
(yield random.uniform(*range_x))
(yield random.uniform(*range_y))
(yield random.uniform(*range_z))
data = numpy.fromiter(gen(), count=(count * 3), dtype=numpy.float32)
vao = VAO('geometry:points_random_3d', mode=moderngl.POINTS)
vao.buffer(data, '3f', ['in_position'])
return vao
|
Generates random positions inside a confied box.
Args:
count (int): Number of points to generate
Keyword Args:
range_x (tuple): min-max range for x axis: Example (-10.0. 10.0)
range_y (tuple): min-max range for y axis: Example (-10.0. 10.0)
range_z (tuple): min-max range for z axis: Example (-10.0. 10.0)
seed (int): The random seed
Returns:
A :py:class:`demosys.opengl.vao.VAO` instance
|
codesearchnet
|
def load_tf_sharded_weights(model, shard_files, ignore_mismatched_sizes=False, strict=False, _prefix=None):
unexpected_keys = set()
saved_keys = set()
mismatched_keys = set()
model_keys = set()
model_layer_map = {}
for i, k in enumerate(model.weights):
layer_name = k.name
if _prefix is not None and layer_name.startswith(_prefix):
layer_name = layer_name[len(_prefix):]
layer_name = layer_name.lstrip('/')
if not ('model.' in layer_name or len(layer_name.split('/')) == 1):
layer_name = '/'.join(layer_name.split('/')[1:])
model_keys.add(layer_name)
model_layer_map[layer_name] = i
for shard_file in shard_files:
saved_weight_names_set, unexpected_keys_set, mismatched_keys_set = load_tf_shard(model, model_layer_map, shard_file, ignore_mismatched_sizes=ignore_mismatched_sizes, _prefix=_prefix)
saved_keys.update(saved_weight_names_set)
unexpected_keys.update(unexpected_keys_set)
mismatched_keys.update(mismatched_keys_set)
gc.collect()
missing_keys = model_keys - saved_keys
if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0):
error_message = f'Error(s) in loading state_dict for {model.__class__.__name__}'
if len(missing_keys) > 0:
str_missing_keys = ','.join([f'"{k}"' for k in missing_keys])
error_message += f'\nMissing key(s): {str_missing_keys}.'
if len(unexpected_keys) > 0:
str_unexpected_keys = ','.join([f'"{k}"' for k in unexpected_keys])
error_message += f'\nMissing key(s): {str_unexpected_keys}.'
raise RuntimeError(error_message)
return (missing_keys, unexpected_keys, mismatched_keys)
|
This is the same as `load_tf_weights` but for a sharded checkpoint. Detect missing and unexpected layers and load
the TF weights from the shard file accordingly to their names and shapes.
This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being
loaded in the model.
Args:
model (`keras.models.Model`): The model in which to load the checkpoint.
shard_files (`str` or `os.PathLike`): A list containing the sharded checkpoint names.
ignore_mismatched_sizes`bool`, *optional`, defaults to `True`):
Whether or not to ignore the mismatch between the sizes
strict (`bool`, *optional*, defaults to `True`):
Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint.
Returns:
Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the
mismatched layers.
|
github-repos
|
def conversation(self, name=None, **kwargs):
convo = Conversation(self, **kwargs)
super().conversation(name, convo)
return convo
|
Make a new conversation.
Arguments:
name: The key for the dictionary the conversation will be stored as
in conversations. If None the conversation will be stored as a
list instead. Mixing both types results in an error.
**kwargs: Keyword arguments to pass into the new conversation.
These accept the same arguments as Cleverbot.
Returns:
The new conversation.
|
codesearchnet
|
def method(*args, **kwargs):
assert (len(args) == 0)
assert (len(kwargs) == 1)
assert ('num_return_vals' in kwargs)
num_return_vals = kwargs['num_return_vals']
def annotate_method(method):
method.__ray_num_return_vals__ = num_return_vals
return method
return annotate_method
|
Annotate an actor method.
.. code-block:: python
@ray.remote
class Foo(object):
@ray.method(num_return_vals=2)
def bar(self):
return 1, 2
f = Foo.remote()
_, _ = f.bar.remote()
Args:
num_return_vals: The number of object IDs that should be returned by
invocations of this actor method.
|
codesearchnet
|
def GetContract(self, script_hash):
if script_hash.ToBytes() in self._contracts.keys():
return self._contracts[script_hash.ToBytes()]
return None
|
Get contract for specified script_hash.
Args:
script_hash (UInt160): a bytearray (len 20).
Returns:
Contract: if a contract was found matching the provided script hash, otherwise None
|
juraj-google-style
|
def __init__(self, resolution, **kwargs):
super(CelebaHQConfig, self).__init__(
name="%d" % resolution,
description=("CelebaHQ images in %d x %d resolution" %
(resolution, resolution)),
**kwargs)
self.resolution = resolution
self.file_name = "data%dx%d.tar" % (resolution, resolution)
|
BuilderConfig for SQUAD.
Args:
resolution: Resolution of the image. Values supported: powers of 2 up to
1024.
**kwargs: keyword arguments forwarded to super.
|
juraj-google-style
|
def range(*args, prefix: str):
return [NamedQubit((prefix + str(i))) for i in range(*args)]
|
Returns a range of NamedQubits.
The range returned starts with the prefix, and followed by a qubit for
each number in the range, e.g.:
NamedQubit.range(3, prefix="a") -> ["a1", "a2", "a3]
NamedQubit.range(2, 4, prefix="a") -> ["a2", "a3]
Args:
*args: Args to be passed to Python's standard range function.
prefix: A prefix for constructed NamedQubits.
Returns:
A list of NamedQubits.
|
codesearchnet
|
def args_to_kwargs(base_type, removed_method=False, removed_args=None):
def wrap(func):
if removed_method:
return func
removed_arg_names = removed_args if removed_args is not None else []
base_arg_spec = getfullargspec(unwrap(getattr(base_type, func.__name__)))
base_arg_names = base_arg_spec.args
all_possible_base_arg_names = base_arg_names + base_arg_spec.kwonlyargs
beam_arg_names = getfullargspec(func).args
if (not_found := (set(beam_arg_names) - set(all_possible_base_arg_names) - set(removed_arg_names))):
raise TypeError(f'Beam definition of {func.__name__} has arguments that are not found in the base version of the function: {not_found}')
@functools.wraps(func)
def wrapper(*args, **kwargs):
if len(args) > len(base_arg_names):
raise TypeError(f'{func.__name__} got too many positioned arguments.')
for name, value in zip(base_arg_names, args):
if name in kwargs:
raise TypeError("%s() got multiple values for argument '%s'" % (func.__name__, name))
kwargs[name] = value
if removed_args:
for name in removed_args:
if name not in kwargs:
kwargs[name] = None
return func(**kwargs)
return wrapper
return wrap
|
Convert all args to kwargs before calling the decorated function.
When applied to a function, this decorator creates a new function
that always calls the wrapped function with *only* keyword arguments. It
inspects the argspec for the identically-named method on `base_type` to
determine the name to use for arguments that are converted to keyword
arguments.
For internal use only. No backwards compatibility guarantees.
Args:
base_type: The pandas type of the method that this is trying to replicate.
removed_method: Whether this method has been removed in the running
Pandas version.
removed_args: If not empty, which arguments have been dropped in the
running Pandas version.
|
github-repos
|
def tap_hold(self, x, y, duration=1.0):
data = {'x': x, 'y': y, 'duration': duration}
return self.http.post('/wda/touchAndHold', data=data)
|
Tap and hold for a moment
Args:
- x, y(int): position
- duration(float): seconds of hold time
[[FBRoute POST:@"/wda/touchAndHold"] respondWithTarget:self action:@selector(handleTouchAndHoldCoordinate:)],
|
juraj-google-style
|
def _get_object_by_name(self, object_endpoint, object_name, timeout=None):
timeout = timeout or self._timeout
resp = self._get(self._u(object_endpoint, object_name),
session=self._session, timeout=timeout)
resp.raise_for_status()
return resp.json()
|
generic function to get object (metadata, tag, ) by name from SignalFx.
Args:
object_endpoint (string): API endpoint suffix (e.g. 'v2/tag')
object_name (string): name of the object (e.g. 'jvm.cpu.load')
Returns:
dictionary of response
|
juraj-google-style
|
def count_divisors(n):
if not isinstance(n, int):
raise TypeError("Expecting a strictly positive integer")
if n <= 0:
raise ValueError("Expecting a strictly positive integer")
number_of_divisors = 1
remain = n
for p in prime_generator():
if p > n:
return number_of_divisors
exponent = 1
while remain % p == 0:
remain = remain
exponent += 1
number_of_divisors *= exponent
if remain == 1:
return number_of_divisors
|
Count the number of divisors of an integer n
Args:
n (int): strictly positive integer
Returns:
The number of distinct divisors of n
Raises:
TypeError: if n is not an integer
ValueError: if n is negative
|
juraj-google-style
|
def _block_orth(self, p1, p2):
if p1.shape.as_list() != p2.shape.as_list():
raise ValueError(f'The dimension of the matrices must be the same. Received p1.shape={p1.shape} and p2.shape={p2.shape}.')
n = p1.shape.as_list()[0]
kernel2x2 = {}
eye = linalg_ops_impl.eye(n, dtype=self.dtype)
kernel2x2[0, 0] = math_ops.matmul(p1, p2)
kernel2x2[0, 1] = math_ops.matmul(p1, eye - p2)
kernel2x2[1, 0] = math_ops.matmul(eye - p1, p2)
kernel2x2[1, 1] = math_ops.matmul(eye - p1, eye - p2)
return kernel2x2
|
Construct a 2 x 2 kernel.
Used to construct orthgonal kernel.
Args:
p1: A symmetric projection matrix.
p2: A symmetric projection matrix.
Returns:
A 2 x 2 kernel [[p1p2, p1(1-p2)],
[(1-p1)p2, (1-p1)(1-p2)]].
Raises:
ValueError: If the dimensions of p1 and p2 are different.
|
github-repos
|
def __str__(self):
name = self.__class__.__name__
return '%s(Type %d, Address %d)' % (name, self.Type, self.Addr)
|
Returns a string representation of the data event.
Args:
self (JLinkDataEvent): the ``JLinkDataEvent`` instance
Returns:
A string representation of the data event.
|
juraj-google-style
|
def add_workflow_definitions(sbi_config: dict):
registered_workflows = []
for i in range(len(sbi_config['processing_blocks'])):
workflow_config = sbi_config['processing_blocks'][i]['workflow']
workflow_name = '{}:{}'.format(workflow_config['id'], workflow_config['version'])
if (workflow_name in registered_workflows):
continue
workflow_definition = dict(id=workflow_config['id'], version=workflow_config['version'], stages=[])
key = 'workflow_definitions:{}:{}'.format(workflow_config['id'], workflow_config['version'])
DB.save_dict(key, workflow_definition, hierarchical=False)
registered_workflows.append(workflow_name)
|
Add any missing SBI workflow definitions as placeholders.
This is a utility function used in testing and adds mock / test workflow
definitions to the database for workflows defined in the specified
SBI config.
Args:
sbi_config (dict): SBI configuration dictionary.
|
codesearchnet
|
def _read_ipv4_options(self, size=None):
counter = 0
optkind = list()
options = dict()
while (counter < size):
kind = self._read_unpack(1)
opts = IPv4_OPT.get(kind)
if (opts is None):
len_ = (size - counter)
counter = size
options['Unknown'] = self._read_fileng(len_)
break
dscp = OPT_TYPE.get(kind)
desc = dscp.name
if opts[0]:
byte = self._read_unpack(1)
if byte:
data = process_opt[opts[2]](self, byte, kind)
else:
data = dict(kind=kind, type=self._read_opt_type(kind), length=2, flag=True)
else:
byte = 1
data = dict(kind=kind, type=self._read_opt_type(kind), length=1)
counter += byte
if (dscp in optkind):
if isinstance(options[desc], tuple):
options[desc] += (Info(data),)
else:
options[desc] = (Info(options[desc]), Info(data))
else:
optkind.append(dscp)
options[desc] = data
if (not kind):
break
if (counter < size):
len_ = (size - counter)
self._read_binary(len_)
return (tuple(optkind), options)
|
Read IPv4 option list.
Positional arguments:
* size -- int, buffer size
Returns:
* tuple -- IPv4 option list
* dict -- extracted IPv4 option
|
codesearchnet
|
def find_duplicates_in_array(array):
duplicates = []
non_duplicates = []
if (len(array) != len(set(array))):
for item in array:
if (item not in non_duplicates):
non_duplicates.append(item)
elif ((item in non_duplicates) and (item not in duplicates)):
duplicates.append(item)
return duplicates
|
Runs through the array and returns the elements that contain
more than one duplicate
Args:
array: The array to check for duplicates.
Returns:
Array of the elements that are duplicates. Returns empty list if
there are no duplicates.
|
codesearchnet
|
def get_figure(new_fig=True, subplot='111', params=None):
_get_plt()
if new_fig:
fig = plt.figure()
else:
fig = plt.gcf()
params = dict_if_none(params)
if isinstance(subplot, (tuple, list)):
ax = fig.add_subplot(*subplot, **params)
else:
ax = fig.add_subplot(subplot, **params)
return fig, ax
|
Function to be used for viewing - plotting,
to initialize the matplotlib figure - axes.
Args:
new_fig(bool): Defines if a new figure will be created, if false current figure is used
subplot (tuple or matplolib subplot specifier string): Create axes with these parameters
params (dict): extra options passed to add_subplot()
Returns:
Matplotlib Figure and Axes
|
juraj-google-style
|
def summary(self, line_length=160, detailed=True, print_fn=None):
if not self._converted:
raise RuntimeError(f'Impossible to call `{self.__class__.__name__}.summary()` before calling {self.__class__.__name__}.convert()`.')
if line_length < 160:
raise ValueError(f'Invalid `line_length` value has been received: {line_length}. Minimum: 160.')
if print_fn is None:
print_fn = print
columns = [('TRTEngineOP Name', 0.2), ('Device', 0.09), ('
positions = [int(line_length * p) for _, p in columns]
positions = np.cumsum(positions).tolist()
headers = [h for h, _ in columns]
_print_row(headers, positions, print_fn=print_fn)
print_fn('=' * line_length)
n_engines = 0
n_ops_converted = 0
n_ops_not_converted = 0
graphdef = self._converted_func.graph.as_graph_def(add_shapes=True)
trtengineops_dict = dict()
for node in graphdef.node:
if node.op != 'TRTEngineOp':
n_ops_not_converted += 1
continue
else:
trtengineops_dict[node.name] = node
n_engines += 1
for name, node in sorted(trtengineops_dict.items()):
node_device = node.device.split('/')[-1]
in_shapes = trt_utils.get_node_io_shapes(node, 'input_shapes')
out_shapes = trt_utils.get_node_io_shapes(node, '_output_shapes')
in_dtypes = trt_utils.get_trtengineop_io_dtypes(node, 'InT')
out_dtypes = trt_utils.get_trtengineop_io_dtypes(node, 'OutT')
in_nodes_count = trt_utils.get_trtengineop_io_nodes_count(node, 'InT')
out_nodes_count = trt_utils.get_trtengineop_io_nodes_count(node, 'OutT')
node_count, converted_ops_dict = trt_utils.get_trtengineop_node_op_count(graphdef, name)
n_ops_converted += node_count
if n_engines != 1:
print_fn(f'\n{'-' * 40}\n')
_print_row(fields=[name, node_device, node_count, in_nodes_count, out_nodes_count, in_dtypes, out_dtypes, in_shapes, out_shapes], positions=positions, print_fn=print_fn)
if detailed:
print_fn()
for key, value in sorted(dict(converted_ops_dict).items()):
print_fn(f'\t- {key}: {value}x')
print_fn(f'\n{'=' * line_length}')
print_fn(f'[*] Total number of TensorRT engines: {n_engines}')
total_ops = n_ops_not_converted + n_ops_converted
conversion_ratio = n_ops_converted / total_ops * 100
print_fn(f'[*] % of OPs Converted: {conversion_ratio:.2f}% [{n_ops_converted}/{total_ops}]\n')
|
This method describes the results of the conversion by TF-TRT.
It includes information such as the name of the engine, the number of nodes
per engine, the input and output dtype, along with the input shape of each
TRTEngineOp.
Args:
line_length: Default line length when printing on the console. Minimum 160
characters long.
detailed: Whether or not to show the nodes inside each TRTEngineOp.
print_fn: Print function to use. Defaults to `print`. It will be called on
each line of the summary. You can set it to a custom function in order
to capture the string summary.
Raises:
RuntimeError: if the graph is not converted.
|
github-repos
|
def MsgUser(msg):
msg_tested_versions = ['xp', 'vista', '2008', '2003']
msg_args = ['/c', '%SystemRoot%\\System32\\msg.exe', '*', '/TIME:0']
host_version = platform.platform().lower()
if (not msg):
return ('Command not ran.', 'Empty message.', (- 1))
else:
msg_args.extend([msg])
for version in msg_tested_versions:
if (host_version.find(version) != (- 1)):
res = client_utils_common.Execute('cmd', msg_args, time_limit=(- 1), bypass_whitelist=True)
return res
return ('', 'Command not available for this version.', (- 1))
|
Sends a message to a user.
Args:
msg: Message to be displaied to user.
Returns:
res which is a tuple of (stdout, stderr, exit_status, time_taken).
|
codesearchnet
|
def top_kth_iterative(x, k):
def next_x(cur_x, _):
top_x = tf.reduce_max(cur_x, axis=(- 1), keep_dims=True)
return (cur_x * to_float((cur_x < top_x)))
fin_x = tf.foldl(next_x, tf.range((k - 1)), initializer=tf.stop_gradient(x), parallel_iterations=2, back_prop=False)
return tf.stop_gradient(tf.reduce_max(fin_x, axis=(- 1), keep_dims=True))
|
Compute the k-th top element of x on the last axis iteratively.
This assumes values in x are non-negative, rescale if needed.
It is often faster than tf.nn.top_k for small k, especially if k < 30.
Note: this does not support back-propagation, it stops gradients!
Args:
x: a Tensor of non-negative numbers of type float.
k: a python integer.
Returns:
a float tensor of the same shape as x but with 1 on the last axis
that contains the k-th largest number in x.
|
codesearchnet
|
def vectorial_decomp(self, symbols):
try:
symbols = [s.vec for s in symbols]
N = sum(map((lambda s: len(s)), symbols))
symbols_ = Vector(N)
i = 0
for v in symbols:
for s in v:
symbols_[i] = s
i += 1
symbols = symbols_
except TypeError:
pass
return self.mba.vectorial_decomp(symbols, self.vec)
|
Compute the vectorial decomposition of the expression according to the given symbols.
symbols is a list that represents the input of the resulting
application. They are considerated as a flatten vector of bits.
Args:
symbols: TODO
Returns:
An :class:`pytanque.App` object
Example:
>>> mba = MBA(4)
>>> x = mba.var('x')
>>> y = mba.var('y')
>>> e = x^y^6
>>> e.vectorial_decomp([x,y])
App NL = Vec([
0,
0,
0,
0
])
AffApp matrix = Mat([
[1, 0, 0, 0, 1, 0, 0, 0]
[0, 1, 0, 0, 0, 1, 0, 0]
[0, 0, 1, 0, 0, 0, 1, 0]
[0, 0, 0, 1, 0, 0, 0, 1]
])
AffApp cst = Vec([
0,
1,
1,
0
])
|
codesearchnet
|
def request(self, request_method, api_method, *args, **kwargs):
url = self._build_url(api_method)
resp = requests.request(request_method, url, *args, **kwargs)
try:
rv = resp.json()
except ValueError:
raise RequestFailedError(resp, 'not a json body')
if (not resp.ok):
raise RequestFailedError(resp, rv.get('error'))
return rv
|
Perform a request.
Args:
request_method: HTTP method for this request.
api_method: API method name for this request.
*args: Extra arguments to pass to the request.
**kwargs: Extra keyword arguments to pass to the request.
Returns:
A dict contains the request response data.
Raises:
RequestFailedError: Raises when BearyChat's OpenAPI responses
with status code != 2xx
|
codesearchnet
|
def must_exist(*components):
_path = path(*components)
if (not exists(_path)):
raise File404(_path)
return _path
|
Ensure path exists.
Arguments:
*components (str[]): Path components.
Returns:
str: File path.
Raises:
File404: If path does not exist.
|
codesearchnet
|
def unwrap(tensor):
while isinstance(tensor, (PrettyTensor, Loss)):
tensor = tensor.tensor
return tensor
|
Returns the underlying tensor if tensor is wrapped or tensor.
Args:
tensor: The tensor to unwrap.
Returns:
Tensor or if it is a pretty tensor, the unwrapped version.
Raises:
ValueError: if tensor holds a sequence.
|
juraj-google-style
|
def submit_job(self, job_config=None):
job_id = self._delegator._submit_bundle(self, job_config)
return self._instance.get_job(job_id)
|
Submit this Streams Application Bundle (sab file) to
its associated instance.
Args:
job_config(JobConfig): a job configuration overlay
Returns:
Job: Resulting job instance.
|
juraj-google-style
|
def log_cdf_laplace(x, name='log_cdf_laplace'):
with tf.name_scope(name):
x = tf.convert_to_tensor(value=x, name='x')
lower_solution = ((- np.log(2.0)) + x)
safe_exp_neg_x = tf.exp((- tf.abs(x)))
upper_solution = tf.math.log1p(((- 0.5) * safe_exp_neg_x))
return tf.where((x < 0.0), lower_solution, upper_solution)
|
Log Laplace distribution function.
This function calculates `Log[L(x)]`, where `L(x)` is the cumulative
distribution function of the Laplace distribution, i.e.
```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```
For numerical accuracy, `L(x)` is computed in different ways depending on `x`,
```
x <= 0:
Log[L(x)] = Log[0.5] + x, which is exact
0 < x:
Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
`Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
|
codesearchnet
|
def extract_annotation(self, node, var, name, stack, allowed_type_params: set[str] | None=None):
try:
typ = abstract_utils.get_atomic_value(var)
except abstract_utils.ConversionError:
self.ctx.errorlog.ambiguous_annotation(self.ctx.vm.frames, None, name)
return self.ctx.convert.unsolvable
typ = self._process_one_annotation(node, typ, name, stack)
if not typ:
return self.ctx.convert.unsolvable
if typ.formal and allowed_type_params is not None:
allowed_type_params = allowed_type_params | self.get_callable_type_parameter_names(typ)
if self.ctx.vm.frame.func and (isinstance(self.ctx.vm.frame.func.data, abstract.BoundFunction) or self.ctx.vm.frame.func.data.is_class_builder):
allowed_type_params.add('typing.Self')
illegal_params = []
for x in self.get_type_parameters(typ):
if not allowed_type_params.intersection([x.name, x.full_name]):
illegal_params.append(x.name)
if illegal_params:
self._log_illegal_params(illegal_params, stack, typ, name)
return self.ctx.convert.unsolvable
return typ
|
Returns an annotation extracted from 'var'.
Args:
node: The current node.
var: The variable to extract from.
name: The annotated name.
stack: The frame stack.
allowed_type_params: Type parameters that are allowed to appear in the
annotation. 'None' means all are allowed. If non-None, the result of
calling get_callable_type_parameter_names on the extracted annotation is
also added to the allowed set.
|
github-repos
|
def merge(left, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True, indicator=False, validate=None):
if (not isinstance(left, DataFrame)):
raise ValueError('can not merge DataFrame with instance of type {}'.format(type(right)))
return left.merge(right, how=how, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, copy=copy, indicator=indicator, validate=validate)
|
Database style join, where common columns in "on" are merged.
Args:
left: DataFrame.
right: DataFrame.
how: What type of join to use.
on: The common column name(s) to join on. If None, and left_on and
right_on are also None, will default to all commonly named
columns.
left_on: The column(s) on the left to use for the join.
right_on: The column(s) on the right to use for the join.
left_index: Use the index from the left as the join keys.
right_index: Use the index from the right as the join keys.
sort: Sort the join keys lexicographically in the result.
suffixes: Add this suffix to the common names not in the "on".
copy: Does nothing in our implementation
indicator: Adds a column named _merge to the DataFrame with
metadata from the merge about each row.
validate: Checks if merge is a specific type.
Returns:
A merged Dataframe
|
codesearchnet
|
def slice_hidden(x, hidden_size, num_blocks):
(batch_size, latent_dim, _) = common_layers.shape_list(x)
block_dim = (hidden_size
x_sliced = tf.reshape(x, shape=[batch_size, latent_dim, num_blocks, block_dim])
return x_sliced
|
Slice encoder hidden state under num_blocks.
Args:
x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size].
hidden_size: Dimension of the latent space.
num_blocks: Number of blocks in DVQ.
Returns:
Sliced states of shape [batch_size, latent_dim, num_blocks, block_dim].
|
codesearchnet
|
def readuntil(self, token, size=0):
self.__append()
i = self.buf.find(token, self.pos)
if (i < 0):
index = max((len(token) - 1), size)
newpos = max((len(self.buf) - index), self.pos)
data = self.buf[self.pos:newpos]
self.pos = newpos
self.__discard()
return (False, data)
newpos = (i + len(token))
data = self.buf[self.pos:newpos]
self.pos = newpos
self.__discard()
return (True, data)
|
Reads data from the FIFO until a token is encountered.
If no token is encountered as much data is read from the FIFO as
possible keeping in mind that the FIFO must retain enough data to
perform matches for the token across writes.
Args:
token: The token to read until.
size: The minimum amount of data that should be left in the FIFO.
This is only used if it is greater than the length of the
token. When ommited this value will default to the length of
the token.
Returns: A tuple of (found, data) where found is a boolean indicating
whether the token was found, and data is all the data that could be
read from the FIFO.
Note: When a token is found the token is also read from the buffer and
returned in the data.
|
codesearchnet
|
def decorator(wrapped_decorator):
def helper(_func=None, **options):
def outer_wrapper(func):
@wrapping(func)
def inner_wrapper(*args, **kwds):
return wrapped_decorator(func, args, kwds, **options)
return inner_wrapper
if (_func is None):
return outer_wrapper
if options:
raise TypeError('positional arguments not supported')
return outer_wrapper(_func)
helper.wrapped_decorator = wrapped_decorator
return helper
|
Converts a function into a decorator that optionally accepts keyword
arguments in its declaration.
Example usage:
@utils.decorator
def decorator(func, args, kwds, op1=None):
... apply op1 ...
return func(*args, **kwds)
# Form (1), vanilla
@decorator
foo(...)
...
# Form (2), with options
@decorator(op1=5)
foo(...)
...
Args:
wrapped_decorator: A function that accepts positional args (func, args,
kwds) and any additional supported keyword arguments.
Returns:
A decorator with an additional 'wrapped_decorator' property that is set to
the original function.
|
codesearchnet
|
def run_inference(examples, serving_bundle):
batch_size = 64
if (serving_bundle.estimator and serving_bundle.feature_spec):
preds = serving_bundle.estimator.predict((lambda : tf.data.Dataset.from_tensor_slices(tf.parse_example([ex.SerializeToString() for ex in examples], serving_bundle.feature_spec)).batch(batch_size)))
if serving_bundle.use_predict:
preds_key = serving_bundle.predict_output_tensor
elif (serving_bundle.model_type == 'regression'):
preds_key = 'predictions'
else:
preds_key = 'probabilities'
values = []
for pred in preds:
values.append(pred[preds_key])
return common_utils.convert_prediction_values(values, serving_bundle)
elif serving_bundle.custom_predict_fn:
values = serving_bundle.custom_predict_fn(examples)
return common_utils.convert_prediction_values(values, serving_bundle)
else:
return platform_utils.call_servo(examples, serving_bundle)
|
Run inference on examples given model information
Args:
examples: A list of examples that matches the model spec.
serving_bundle: A `ServingBundle` object that contains the information to
make the inference request.
Returns:
A ClassificationResponse or RegressionResponse proto.
|
codesearchnet
|
def matmul(x1, x2):
if any_symbolic_tensors((x1, x2)):
return Matmul().symbolic_call(x1, x2)
return backend.numpy.matmul(x1, x2)
|
Matrix product of two tensors.
- If both tensors are 1-dimensional, the dot product (scalar) is returned.
- If either tensor is N-D, N > 2, it is treated as a stack of matrices
residing in the last two indexes and broadcast accordingly.
- If the first tensor is 1-D, it is promoted to a matrix by prepending
a 1 to its dimensions. After matrix multiplication the prepended
1 is removed.
- If the second tensor is 1-D, it is promoted to a matrix by appending a 1
to its dimensions. After matrix multiplication the appended 1 is removed.
Args:
x1: First tensor.
x2: Second tensor.
Returns:
Output tensor, matrix product of the inputs.
|
github-repos
|
def add_time_step(self, **create_time_step_kwargs):
ts = time_step.TimeStep.create_time_step(**create_time_step_kwargs)
assert isinstance(ts, time_step.TimeStep)
self._time_steps.append(ts)
|
Creates a time-step and appends it to the list.
Args:
**create_time_step_kwargs: Forwarded to
time_step.TimeStep.create_time_step.
|
juraj-google-style
|
def local_set_state(self, device, state, id_override=None, type_override=None):
if ALLOW_LOCAL_CONTROL:
if (device.local_id() is not None):
hub = HUBS.get(device.hub_id())
if ((hub is None) or (hub['token'] is None)):
return self.set_device_state(device, state, id_override, type_override)
else:
return self.set_device_state(device, state, id_override, type_override)
_LOGGER.info('Setting local state')
local_id = (id_override or device.local_id().split('.')[0])
object_type = (type_override or device.object_type())
LOCAL_API_HEADERS['Authorization'] = ('Bearer ' + hub['token'])
url_string = 'https:
try:
arequest = requests.put(url_string, data=json.dumps(state), headers=LOCAL_API_HEADERS, verify=False, timeout=3)
except requests.exceptions.RequestException:
_LOGGER.error('Error sending local control request. Sending request online')
return self.set_device_state(device, state, id_override, type_override)
response_json = arequest.json()
_LOGGER.debug('%s', response_json)
temp_state = device.json_state
for (key, value) in response_json['data']['last_reading'].items():
temp_state['last_reading'][key] = value
return temp_state
else:
return self.set_device_state(device, state, id_override, type_override)
|
Set device state via local API, and fall back to online API.
Args:
device (WinkDevice): The device the change is being requested for.
state (Dict): The state being requested.
id_override (String, optional): A device ID used to override the
passed in device's ID. Used to make changes on sub-devices.
i.e. Outlet in a Powerstrip. The Parent device's ID.
type_override (String, optional): Used to override the device type
when a device inherits from a device other than WinkDevice.
Returns:
response_json (Dict): The API's response in dictionary format
|
codesearchnet
|
def get_elements_between_bands(self, band_i, band_j):
if ((band_i < 1) or (band_i > self.nb_bands) or (band_j < 1) or (band_j > self.nb_bands)):
raise ValueError('Band index out of bounds')
return self.data[(:, (band_i - 1), (band_j - 1), :)]
|
Method returning a numpy array with elements
[cdum_x_real, cdum_x_imag, cdum_y_real, cdum_y_imag, cdum_z_real, cdum_z_imag]
between bands band_i and band_j (vasp 1-based indexing) for all kpoints.
Args:
band_i (Integer): Index of band i
band_j (Integer): Index of band j
Returns:
a numpy list of elements for each kpoint
|
codesearchnet
|
def _get_anchor(module_to_name, fullname):
if not _anchor_re.match(fullname):
raise ValueError("'%s' is not a valid anchor" % fullname)
anchor = fullname
for module_name in module_to_name.values():
if fullname.startswith(module_name + "."):
rest = fullname[len(module_name)+1:]
if len(anchor) > len(rest):
anchor = rest
return anchor
|
Turn a full member name into an anchor.
Args:
module_to_name: Dictionary mapping modules to short names.
fullname: Fully qualified name of symbol.
Returns:
HTML anchor string. The longest module name prefix of fullname is
removed to make the anchor.
Raises:
ValueError: If fullname uses characters invalid in an anchor.
|
juraj-google-style
|
def decode_image_tokens(self, image_tokens: torch.Tensor):
decoded_image = self.model.vqmodel.decode(image_tokens)
decoded_image = decoded_image.permute(0, 2, 3, 1)
return decoded_image
|
Decodes generated image tokens from language model to continuous pixel values
with VQGAN module via upsampling.
Args:
image_tokens (`torch.LongTensor` of shape `(batch_size, num_of_tokens)`):
The tensors corresponding to the input images.
|
github-repos
|
def _bits_in_condition(self, cond):
all_bits = []
if cond is not None:
all_bits.extend([(cond[0], j) for j in range(self.cregs[cond[0].name].size)])
return all_bits
|
Return a list of bits in the given condition.
Args:
cond (tuple or None): optional condition (ClassicalRegister, int)
Returns:
list[(ClassicalRegister, idx)]: list of bits
|
juraj-google-style
|
def gaussian_pdf(std=10.0, mean=0.0):
norm_const = 1.0
def pdf(x):
return ((norm_const * np.exp(((- 0.5) * (((x - mean) / std) ** 2)))) * np.sin(((np.pi / 180.0) * x)))
norm_dev = quad(pdf, 0.0, 180.0)[0]
norm_const /= norm_dev
return pdf
|
Gaussian PDF for orientation averaging.
Args:
std: The standard deviation in degrees of the Gaussian PDF
mean: The mean in degrees of the Gaussian PDF. This should be a number
in the interval [0, 180)
Returns:
pdf(x), a function that returns the value of the spherical Jacobian-
normalized Gaussian PDF with the given STD at x (degrees). It is
normalized for the interval [0, 180].
|
codesearchnet
|
def _add_tag(self, tag):
tags = self.data.get('tags', None)
if tags:
if (tag in [x['name'] for x in tags]):
return False
else:
tags = list()
tags.append({'name': tag})
self.data['tags'] = tags
return True
|
Add a tag
Args:
tag (str): Tag to add
Returns:
bool: True if tag added or False if tag already present
|
codesearchnet
|
def fuzzUsufy(fDomains=None, fFuzzStruct=None):
if (fFuzzStruct == None):
fuzzingStructures = ['http:
else:
try:
fuzzingStructures = fFuzzStruct.read().splitlines()
except:
print(('Usufy could NOT open the following file: ' + fFuzzStruct))
res = {}
lines = fDomains.read().splitlines()
for l in lines:
domain = l.split()[0]
print((('Performing tests for' + domain) + '...'))
nick = l.split()[1]
possibleURL = []
for struct in fuzzingStructures:
urlToTry = struct.replace('<DOMAIN>', domain)
test = urlToTry.replace('<USERNAME>', nick.lower())
print((('Processing ' + test) + '...'))
i3Browser = browser.Browser()
try:
html = i3Browser.recoverURL(test)
if (nick in html):
possibleURL.append(test)
print(general.success('\tPossible usufy found!!!\n'))
except:
print('The resource could not be downloaded.')
res[domain] = possibleURL
print(json.dumps(res, indent=2))
return res
|
Method to guess the usufy path against a list of domains or subdomains.
Args:
-----
fDomains: A list to strings containing the domains and (optionally) a
nick.
fFuzzStruct: A list to strings containing the transforms to be
performed.
Returns:
--------
dict: A dictionary of the form of `{"domain": "url"}`.
|
codesearchnet
|
def update(self, force=False):
if self.is_404 and not force:
return 0
if self._last_modified:
headers = {'If-Modified-Since': self._last_modified}
else:
headers = None
try:
res = self._board._requests_session.get(self._api_url, headers=headers)
except:
return 0
if res.status_code == 304:
return 0
elif res.status_code == 404:
self.is_404 = True
self._board._thread_cache.pop(self.id, None)
return 0
elif res.status_code == 200:
if self.is_404:
self.is_404 = False
self._board._thread_cache[self.id] = self
self.want_update = False
self.omitted_images = 0
self.omitted_posts = 0
self._last_modified = res.headers['Last-Modified']
posts = res.json()['posts']
original_post_count = len(self.replies)
self.topic = Post(self, posts[0])
if self.last_reply_id and not force:
self.replies.extend(Post(self, p) for p in posts if p['no'] > self.last_reply_id)
else:
self.replies[:] = [Post(self, p) for p in posts[1:]]
new_post_count = len(self.replies)
post_count_delta = new_post_count - original_post_count
if not post_count_delta:
return 0
self.last_reply_id = self.replies[-1].post_number
return post_count_delta
else:
res.raise_for_status()
|
Fetch new posts from the server.
Arguments:
force (bool): Force a thread update, even if thread has 404'd.
Returns:
int: How many new posts have been fetched.
|
juraj-google-style
|
def construct_graph(sakefile, settings):
verbose = settings['verbose']
sprint = settings['sprint']
G = nx.DiGraph()
sprint('Going to construct Graph', level='verbose')
for target in sakefile:
if (target == 'all'):
continue
if ('formula' not in sakefile[target]):
for atomtarget in sakefile[target]:
if (atomtarget == 'help'):
continue
sprint("Adding '{}'".format(atomtarget), level='verbose')
data_dict = sakefile[target][atomtarget]
data_dict['parent'] = target
G.add_node(atomtarget, **data_dict)
else:
sprint("Adding '{}'".format(target), level='verbose')
G.add_node(target, **sakefile[target])
sprint('Nodes are built\nBuilding connections', level='verbose')
for node in G.nodes(data=True):
sprint('checking node {} for dependencies'.format(node[0]), level='verbose')
for (k, v) in node[1].items():
if (v is None):
node[1][k] = []
if ('output' in node[1]):
for (index, out) in enumerate(node[1]['output']):
node[1]['output'][index] = clean_path(node[1]['output'][index])
if ('dependencies' not in node[1]):
continue
sprint('it has dependencies', level='verbose')
connects = []
for (index, dep) in enumerate(node[1]['dependencies']):
dep = os.path.normpath(dep)
shrt = 'dependencies'
node[1]['dependencies'][index] = clean_path(node[1][shrt][index])
for node in G.nodes(data=True):
connects = []
if ('dependencies' not in node[1]):
continue
for dep in node[1]['dependencies']:
matches = check_for_dep_in_outputs(dep, verbose, G)
if (not matches):
continue
for match in matches:
sprint('Appending {} to matches'.format(match), level='verbose')
connects.append(match)
if connects:
for connect in connects:
G.add_edge(connect, node[0])
return G
|
Takes the sakefile dictionary and builds a NetworkX graph
Args:
A dictionary that is the parsed Sakefile (from sake.py)
The settings dictionary
Returns:
A NetworkX graph
|
codesearchnet
|
def should_drop(self):
if self._drop_if_none and self.value is None:
return True
if self._drop_if_default and self.value == self._default:
return True
return False
|
Return True if the item should be dropped, or False if it should not
be dropped. This depends on the drop_if_none, and drop_if_default calls.
Returns:
True or False; depending on whether the item should be dropped or kept.
|
github-repos
|
def auto_convert_cell_no_flags(cell, units=None, parens_as_neg=True):
units = (units if (units != None) else {})
return auto_convert_cell(flagable=Flagable(), cell=cell, position=None, worksheet=0, flags={}, units=units, parens_as_neg=parens_as_neg)
|
Performs a first step conversion of the cell to check
it's type or try to convert if a valid conversion exists.
This version of conversion doesn't flag changes nor store
cell units.
Args:
units: The dictionary holder for cell units.
parens_as_neg: Converts numerics surrounded by parens to
negative values
|
codesearchnet
|
def enrich_json_objects_by_object_type(request, value):
time_start_globally = time()
if isinstance(value, list):
json = [x.to_json() if hasattr(x, "to_json") else x for x in value]
else:
if isinstance(value, dict):
json = value
else:
json = value.to_json()
objects, nested = _collect_json_objects(json, by='object_type')
for enricher_info in _get_OBJECT_TYPE_ENRICHER_ORDER():
if len(enricher_info['object_types']) > 0:
enricher_objects = flatten([objects.get(object_type, []) for object_type in enricher_info['object_types']])
enricher_nested = any([nested.get(object_type, False) for object_type in enricher_info['object_types']])
else:
enricher_objects = flatten(objects.values())
enricher_nested = any(nested.values())
if len(enricher_objects) > 0:
time_start = time()
enricher_info['enricher'](request, enricher_objects, enricher_nested)
LOGGER.debug('enrichment "{}" took {} seconds'.format(enricher_info['enricher_name'], time() - time_start))
if not enricher_info['pure']:
objects, nested = _collect_json_objects(json, by='object_type')
LOGGER.debug('The whole enrichment of json objects by their object_type took {} seconds.'.format(time() - time_start_globally))
return json
|
Take the given value and start enrichment by object_type. The va
Args:
request (django.http.request.HttpRequest): request which is currently processed
value (dict|list|django.db.models.Model):
in case of django.db.models.Model object (or list of these
objects), to_json method is invoked
Returns:
dict|list
|
juraj-google-style
|
def handle_malformed_config(error: MalformedConfigError) -> ResponseReturnValue:
return (DQMResponse(name='MalformedConfigError', description=str(error), code=400), 400)
|
DQM Malformed Config Response.
Args:
* error: Config error
Returns:
* DQMResponse for the error with a 400 status code
|
github-repos
|
def start_range(self, line, membership):
last = self._transitions[-1] if self._transitions else -1
if line < last:
raise ValueError('Line number less than previous start_range() call.')
previous = len(self._transitions) % 2 == 1
if membership == previous:
return
elif line == last:
self._transitions.pop()
else:
self._transitions.append(line)
|
Start a range of lines that are either included/excluded from the set.
Args:
line: A line number.
membership: If True, lines >= line are included in the set (starting a
range), otherwise they are excluded (ending a range).
Raises:
ValueError: if line is less than that of a previous call to start_range().
|
github-repos
|
def history(self, condition: Optional[Callable[['Origin'], bool]]=None) -> List['Origin']:
condition = condition or (lambda o: True)
current = self
history = []
while current is not None:
if condition(current):
history.append(current)
current = getattr(current.source, 'sym_origin', None)
history.reverse()
return history
|
Returns a history of origins with an optional filter.
Args:
condition: An optional callable object with signature
(origin) -> should_list. If None, all origins will be listed.
Returns:
A list of filtered origin from the earliest (root) to the most recent.
|
github-repos
|
def connect(self, db_uri, debug=False):
kwargs = {'echo': debug, 'convert_unicode': True}
if ('mysql' in db_uri):
kwargs['pool_recycle'] = 3600
elif (':
logger.debug('detected sqlite path URI: {}'.format(db_uri))
db_path = os.path.abspath(os.path.expanduser(db_uri))
db_uri = 'sqlite:
self.engine = create_engine(db_uri, **kwargs)
logger.debug('connection established successfully')
BASE.metadata.bind = self.engine
self.session = scoped_session(sessionmaker(bind=self.engine))
self.query = self.session.query
return self
|
Configure connection to a SQL database.
Args:
db_uri (str): path/URI to the database to connect to
debug (Optional[bool]): whether to output logging information
|
codesearchnet
|
def write_config(config, config_path=CONFIG_PATH):
if not os.path.exists(config_path):
os.makedirs(os.path.dirname(config_path))
with open(config_path, 'w', encoding='utf-8') as f:
config.write(f)
|
Write the config to the output path.
Creates the necessary directories if they aren't there.
Args:
config (configparser.ConfigParser): A ConfigParser.
|
juraj-google-style
|
def cylindrical_vert(script, radius=1.0, inside=True):
if inside:
function = 'sqrt(x^2+y^2)<={}'.format(radius)
else:
function = 'sqrt(x^2+y^2)>={}'.format(radius)
vert_function(script, function=function)
return None
|
Select all vertices within a cylindrical radius
Args:
radius (float): radius of the sphere
center_pt (3 coordinate tuple or list): center point of the sphere
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
|
codesearchnet
|
def GetFileEntryByPathSpec(self, path_spec):
return encoded_stream_file_entry.EncodedStreamFileEntry(
self._resolver_context, self, path_spec, is_root=True, is_virtual=True)
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
EncodedStreamFileEntry: a file entry or None if not available.
|
juraj-google-style
|
def __call__(self, *x_batch, **kwargs) -> Union[List, np.ndarray]:
with self.graph.as_default():
K.set_session(self.sess)
return self._net.predict_on_batch(x_batch, **kwargs)
|
Predicts answers on batch elements.
Args:
instance: a batch to predict answers on
|
juraj-google-style
|
def json_to_entity(tc_data, value_fields, resource_type, resource_type_parent):
if (not isinstance(tc_data, list)):
tc_data = [tc_data]
entity_array = []
for d in tc_data:
entity = {'id': d.get('id'), 'webLink': d.get('webLink')}
values = []
if ('summary' in d):
values.append(d.get('summary'))
else:
for field in value_fields:
if (d.get(field) is not None):
values.append(d.get(field))
entity['value'] = ' : '.join(values)
if (d.get('type') is not None):
entity['type'] = d.get('type')
else:
entity['type'] = resource_type
if (resource_type_parent in ['Indicator']):
entity['confidence'] = d.get('confidence')
entity['rating'] = d.get('rating')
entity['threatAssessConfidence'] = d.get('threatAssessConfidence')
entity['threatAssessRating'] = d.get('threatAssessRating')
entity['dateLastModified'] = d.get('lastModified')
if (resource_type_parent in ['Indicator', 'Group']):
if ('owner' in d):
entity['ownerName'] = d['owner']['name']
else:
entity['ownerName'] = d.get('ownerName')
entity['dateAdded'] = d.get('dateAdded')
if (resource_type_parent in ['Victim']):
entity['ownerName'] = d.get('org')
entity_array.append(entity)
return entity_array
|
Convert ThreatConnect JSON response to a TCEntityArray.
.. Attention:: This method is subject to frequent changes.
Args:
tc_data (dictionary): Array of data returned from TC API call.
value_fields (list): Field names that contain the "value" data.
resource_type (string): The resource type of the tc_data provided.
resource_type_parent (string): The resource parent type of the tc_data provided.
Returns:
(list): A list representing a TCEntityArray.
|
codesearchnet
|
def model_call_event(self) -> asyncio.Event:
return self._model_call_event
|
Returns an event that is set when the wrapped processor has all parts.
The event is set when the wrapped processor has all the input parts and
is about to start generating the output.
The event starts in a cleared state when the first part of the input
stream is yielded. It is also cleared at the end of the wrappedprocessor,
when all the output parts have been yielded.
Its default value is unset and this event is set only for a short time
during the call.
Returns:
An event that is set when the model call is started, that is when all the
input parts have been sent to the wrapped processor.
|
github-repos
|
def determine_git_ref(self, config):
ref_config_keys = 0
for i in ['commit', 'tag', 'branch']:
if config.get(i):
ref_config_keys += 1
if (ref_config_keys > 1):
raise ImportError("Fetching remote git sources failed: conflicting revisions (e.g. 'commit', 'tag', 'branch') specified for a package source")
if config.get('commit'):
ref = config['commit']
elif config.get('tag'):
ref = config['tag']
else:
ref = self.git_ls_remote(config['uri'], self.determine_git_ls_remote_ref(config))
if ((sys.version_info[0] > 2) and isinstance(ref, bytes)):
return ref.decode()
return ref
|
Determine the ref to be used for 'git checkout'.
Args:
config (dict): git config dictionary
Returns:
str: A commit id or tag name
|
codesearchnet
|
def get_ip_reports(self, ips):
api_name = 'virustotal-ip-address-reports'
(all_responses, ips) = self._bulk_cache_lookup(api_name, ips)
responses = self._request_reports('ip', ips, 'ip-address/report')
for (ip, response) in zip(ips, responses):
if self._cache:
self._cache.cache_value(api_name, ip, response)
all_responses[ip] = response
return all_responses
|
Retrieves the most recent VT info for a set of ips.
Args:
ips: list of IPs.
Returns:
A dict with the IP as key and the VT report as value.
|
codesearchnet
|
def get_alignment_df(a_aln_seq, b_aln_seq, a_seq_id=None, b_seq_id=None):
if (len(a_aln_seq) != len(b_aln_seq)):
raise ValueError('Sequence lengths not equal - was an alignment run?')
if (not a_seq_id):
a_seq_id = 'a_seq'
if (not b_seq_id):
b_seq_id = 'b_seq'
a_aln_seq = ssbio.protein.sequence.utils.cast_to_str(a_aln_seq)
b_aln_seq = ssbio.protein.sequence.utils.cast_to_str(b_aln_seq)
a_idx = 1
b_idx = 1
appender = []
for (i, (a, b)) in enumerate(zip(a_aln_seq, b_aln_seq)):
to_append = {}
if ((a == b) and (a != '-') and (b != '-')):
aa_flag = 'match'
elif ((a != b) and (a == '-') and (b != '-')):
aa_flag = 'insertion'
elif ((a != b) and (a != '-') and (b == '-')):
aa_flag = 'deletion'
elif ((a != b) and (a != '-') and (b == 'X')):
aa_flag = 'unresolved'
elif ((a != b) and (b != '-') and (a == 'X')):
aa_flag = 'unresolved'
elif ((a != b) and (a != '-') and (b != '-')):
aa_flag = 'mutation'
to_append['id_a'] = a_seq_id
to_append['id_b'] = b_seq_id
to_append['type'] = aa_flag
if ((aa_flag == 'match') or (aa_flag == 'unresolved') or (aa_flag == 'mutation')):
to_append['id_a_aa'] = a
to_append['id_a_pos'] = int(a_idx)
to_append['id_b_aa'] = b
to_append['id_b_pos'] = int(b_idx)
a_idx += 1
b_idx += 1
if (aa_flag == 'deletion'):
to_append['id_a_aa'] = a
to_append['id_a_pos'] = int(a_idx)
a_idx += 1
if (aa_flag == 'insertion'):
to_append['id_b_aa'] = b
to_append['id_b_pos'] = int(b_idx)
b_idx += 1
appender.append(to_append)
cols = ['id_a', 'id_b', 'type', 'id_a_aa', 'id_a_pos', 'id_b_aa', 'id_b_pos']
alignment_df = pd.DataFrame.from_records(appender, columns=cols)
alignment_df = alignment_df.fillna(value=np.nan)
return alignment_df
|
Summarize two alignment strings in a dataframe.
Args:
a_aln_seq (str): Aligned sequence string
b_aln_seq (str): Aligned sequence string
a_seq_id (str): Optional ID of a_seq
b_seq_id (str): Optional ID of b_aln_seq
Returns:
DataFrame: a per-residue level annotation of the alignment
|
codesearchnet
|
def find_gaps(self, index=False):
return self.__find_incongruities(op=operator.lt, index=index)
|
Finds gaps in a striplog.
Args:
index (bool): If True, returns indices of intervals with
gaps after them.
Returns:
Striplog: A striplog of all the gaps. A sort of anti-striplog.
|
juraj-google-style
|
def write(data, file_name, worksheet_names=None):
if re.search(XML_EXT_REGEX, file_name):
return write_xml(data, file_name, worksheet_names=worksheet_names)
elif re.search(XLSX_EXT_REGEX, file_name):
return write_xlsx(data, file_name, worksheet_names=worksheet_names)
elif re.search(XLS_EXT_REGEX, file_name):
return write_xls(data, file_name, worksheet_names=worksheet_names)
elif re.search(CSV_EXT_REGEX, file_name):
return write_csv(data, file_name)
else:
return write_csv(data, file_name)
|
Writes 2D tables to file.
Args:
data: 2D list of tables/worksheets.
file_name: Name of the output file (determines type).
worksheet_names: A list of worksheet names (optional).
|
codesearchnet
|
def _terminate_all(self, sig=None):
sig = sig or getattr(signal, 'SIGKILL', signal.SIGTERM)
for (task_type, task_id), p in self._processes.items():
if p.exitcode is not None:
logging.info('%s-%d has already exited. Not terminating.', task_type, task_id)
continue
try:
os.kill(p.pid, sig)
self._terminated.add((task_type, task_id))
logging.info('%s-%d terminated with signal %r.', task_type, task_id, sig)
except ProcessLookupError:
logging.info('Attempting to kill %s-%d but it does not exist.', task_type, task_id)
|
Terminates all subprocesses.
The caller is required to hold self._process_lock.
Args:
sig: the signal used to terminate the process. The default is SIGKILL.
|
github-repos
|
def poll_stack(self):
logging.info('polling stack status, POLL_INTERVAL={}'.format(POLL_INTERVAL))
time.sleep(POLL_INTERVAL)
completed_states = [
'CREATE_COMPLETE',
'UPDATE_COMPLETE',
'DELETE_COMPLETE'
]
stack_name = self._config.get('environment', {}).get('stack_name', None)
while True:
try:
response = self._cloudFormation.describe_stacks(StackName=stack_name)
stack = response['Stacks'][0]
current_status = stack['StackStatus']
logging.info('current status of {}: {}'.format(stack_name, current_status))
if current_status.endswith('COMPLETE') or current_status.endswith('FAILED'):
if current_status in completed_states:
return True
else:
return False
time.sleep(POLL_INTERVAL)
except ClientError as wtf:
if str(wtf).find('does not exist') == -1:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
else:
logging.info('{} is gone'.format(stack_name))
return True
except Exception as wtf:
logging.error('Exception caught in wait_for_stack(): {}'.format(wtf))
traceback.print_exc(file=sys.stdout)
return False
|
Spin in a loop while the Cloud Formation process either fails or succeeds
Args:
None
Returns:
Good or bad; True or False
|
juraj-google-style
|
def _GetClientIdFromQueue(q):
split = q.Split()
if not split or len(split) < 2:
return None
split = [s.lower() for s in split]
str_client_id, tasks_marker = split
if not str_client_id.startswith("c.") or tasks_marker != "tasks":
return None
str_client_id = "C" + str_client_id[1:]
return str_client_id
|
Returns q's client id, if q is a client task queue, otherwise None.
Args:
q: rdfvalue.RDFURN
Returns:
string or None
|
juraj-google-style
|
def _ReadAttributeValueString(self, attribute_values_data, record_offset, attribute_values_data_offset, attribute_value_offset):
if (attribute_value_offset == 0):
return None
data_type_map = self._GetDataTypeMap('keychain_string')
file_offset = ((record_offset + attribute_values_data_offset) + attribute_value_offset)
attribute_value_offset -= (attribute_values_data_offset + 1)
attribute_value_data = attribute_values_data[attribute_value_offset:]
try:
string_attribute_value = self._ReadStructureFromByteStream(attribute_value_data, file_offset, data_type_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to map string attribute value data at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))
return string_attribute_value.string
|
Reads a string attribute value.
Args:
attribute_values_data (bytes): attribute values data.
record_offset (int): offset of the record relative to the start of
the file.
attribute_values_data_offset (int): offset of the attribute values data
relative to the start of the record.
attribute_value_offset (int): offset of the attribute relative to
the start of the record.
Returns:
str: string value or None if attribute value offset is not set.
Raises:
ParseError: if the attribute value cannot be read.
|
codesearchnet
|
def get_occupation(self, atom_index, orbital):
orbital_index = self.orbitals.index(orbital)
return {spin: np.sum((d[(:, :, atom_index, orbital_index)] * self.weights[(:, None)])) for (spin, d) in self.data.items()}
|
Returns the occupation for a particular orbital of a particular atom.
Args:
atom_num (int): Index of atom in the PROCAR. It should be noted
that VASP uses 1-based indexing for atoms, but this is
converted to 0-based indexing in this parser to be
consistent with representation of structures in pymatgen.
orbital (str): An orbital. If it is a single character, e.g., s,
p, d or f, the sum of all s-type, p-type, d-type or f-type
orbitals occupations are returned respectively. If it is a
specific orbital, e.g., px, dxy, etc., only the occupation
of that orbital is returned.
Returns:
Sum occupation of orbital of atom.
|
codesearchnet
|
def getUserCaPath(self, name):
cert = self.getUserCert(name)
if (cert is None):
return None
return self._getCaPath(cert)
|
Gets the path to the CA certificate that issued a given user keypair.
Args:
name (str): The name of the user keypair.
Examples:
Get the path to the CA cert which issue the cert for "myuser":
mypath = cdir.getUserCaPath('myuser')
Returns:
str: The path if exists.
|
codesearchnet
|
def _configure_tls_parameters(parameters):
cert = config.conf["tls"]["certfile"]
key = config.conf["tls"]["keyfile"]
if cert and key:
_log.info(
"Authenticating with server using x509 (certfile: %s, keyfile: %s)",
cert,
key,
)
parameters.credentials = pika.credentials.ExternalCredentials()
else:
cert, key = None, None
if SSLOptions is None:
parameters.ssl = True
parameters.ssl_options = {
"keyfile": key,
"certfile": cert,
"ca_certs": config.conf["tls"]["ca_cert"],
"cert_reqs": ssl.CERT_REQUIRED,
"ssl_version": ssl.PROTOCOL_TLSv1_2,
}
else:
ssl_context = ssl.create_default_context()
if config.conf["tls"]["ca_cert"]:
try:
ssl_context.load_verify_locations(cafile=config.conf["tls"]["ca_cert"])
except ssl.SSLError as e:
raise ConfigurationException(
'The "ca_cert" setting in the "tls" section is invalid ({})'.format(
e
)
)
ssl_context.options |= ssl.OP_NO_SSLv2
ssl_context.options |= ssl.OP_NO_SSLv3
ssl_context.options |= ssl.OP_NO_TLSv1
ssl_context.options |= ssl.OP_NO_TLSv1_1
ssl_context.verify_mode = ssl.CERT_REQUIRED
ssl_context.check_hostname = True
if cert and key:
try:
ssl_context.load_cert_chain(cert, key)
except ssl.SSLError as e:
raise ConfigurationException(
'The "keyfile" setting in the "tls" section is invalid ({})'.format(
e
)
)
parameters.ssl_options = SSLOptions(
ssl_context, server_hostname=parameters.host
)
|
Configure the pika connection parameters for TLS based on the configuration.
This modifies the object provided to it. This accounts for whether or not
the new API based on the standard library's SSLContext is available for
pika.
Args:
parameters (pika.ConnectionParameters): The connection parameters to apply
TLS connection settings to.
|
juraj-google-style
|
def new(self, injection_site_fn):
return _InjectionContext(injection_site_fn, binding_stack=[], scope_id=scoping.UNSCOPED, is_scope_usable_from_scope_fn=self._is_scope_usable_from_scope_fn)
|
Creates a _InjectionContext.
Args:
injection_site_fn: the initial function being injected into
Returns:
a new empty _InjectionContext in the default scope
|
codesearchnet
|
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
values_dict = {}
if registry_key.number_of_values > 0:
for registry_value in registry_key.GetValues():
value_name = registry_value.name or '(default)'
if registry_value.DataIsString():
value_string = '[{0:s}] {1:s}'.format(
registry_value.data_type_string, registry_value.GetDataAsObject())
elif registry_value.DataIsInteger():
value_string = '[{0:s}] {1:d}'.format(
registry_value.data_type_string, registry_value.GetDataAsObject())
elif registry_value.DataIsMultiString():
value_string = '[{0:s}] {1:s}'.format(
registry_value.data_type_string, ''.join(
registry_value.GetDataAsObject()))
else:
value_string = '[{0:s}]'.format(registry_value.data_type_string)
values_dict[value_name] = value_string
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = registry_key.path
event_data.offset = registry_key.offset
event_data.regvalue = values_dict
event_data.urls = self.URLS
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
if registry_key.number_of_subkeys == 0:
error_string = 'Key: {0:s} missing subkeys.'.format(registry_key.path)
parser_mediator.ProduceExtractionWarning(error_string)
return
for zone_key in registry_key.GetSubkeys():
path = '{0:s}\\{1:s}'.format(
registry_key.path, self._ZONE_NAMES[zone_key.name])
values_dict = {}
for value in zone_key.GetValues():
if not value.name:
continue
if value.DataIsString():
value_string = value.GetDataAsObject()
elif value.DataIsInteger():
value_integer = value.GetDataAsObject()
if value.name in self._KNOWN_PERMISSIONS_VALUE_NAMES:
value_string = self._CONTROL_VALUES_PERMISSIONS.get(
value_integer, 'UNKNOWN')
elif value.name == '1A00':
value_string = self._CONTROL_VALUES_1A00.get(
value_integer, 'UNKNOWN')
elif value.name == '1C00':
value_string = self._CONTROL_VALUES_1C00.get(
value_integer, 'UNKNOWN')
elif value.name == '1E05':
value_string = self._CONTROL_VALUES_SAFETY.get(
value_integer, 'UNKNOWN')
else:
value_string = '{0:d}'.format(value_integer)
else:
value_string = '[{0:s}]'.format(value.data_type_string)
if len(value.name) == 4 and value.name != 'Icon':
value_description = self._FEATURE_CONTROLS.get(value.name, 'UNKNOWN')
else:
value_description = self._FEATURE_CONTROLS.get(value.name, '')
if value_description:
feature_control = '[{0:s}] {1:s}'.format(
value.name, value_description)
else:
feature_control = '[{0:s}]'.format(value.name)
values_dict[feature_control] = value_string
event_data = windows_events.WindowsRegistryEventData()
event_data.key_path = path
event_data.offset = zone_key.offset
event_data.regvalue = values_dict
event_data.urls = self.URLS
event = time_events.DateTimeValuesEvent(
zone_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
|
juraj-google-style
|
def generate_support_dump(self, information, timeout=(- 1)):
uri = '{}/support-dumps'.format(self.data['uri'])
return self._helper.create(information, uri=uri, timeout=timeout)
|
Generates a support dump for the logical enclosure with the specified ID. A logical enclosure support dump
includes content for logical interconnects associated with that logical enclosure. By default, it also contains
appliance support dump content.
Args:
information (dict): Information to generate support dump.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Support dump.
|
codesearchnet
|
def bind(self, extension: Extension) -> 'DictMentor':
if not Extension.is_valid_extension(extension):
raise ValueError("Cannot bind extension due to missing interface requirements")
self._extensions.append(extension)
return self
|
Add any predefined or custom extension.
Args:
extension: Extension to add to the processor.
Returns:
The DictMentor itself for chaining.
|
juraj-google-style
|
def describe(self, version_name):
version_yaml = yaml.safe_dump(self.get_version_details(version_name),
default_flow_style=False)
print(version_yaml)
|
Print information of a specified model.
Args:
version: the name of the version in short form, such as "v1".
|
juraj-google-style
|
def split(s, posix=True):
if isinstance(s, six.binary_type):
s = s.decode('utf-8')
return shlex.split(s, posix=posix)
|
Split the string s using shell-like syntax.
Args:
s (str): String to split
posix (bool): Use posix split
Returns:
list of str: List of string parts
|
codesearchnet
|
def oxide_type(structure, relative_cutoff=1.1, return_nbonds=False):
ox_obj = OxideType(structure, relative_cutoff)
if return_nbonds:
return ox_obj.oxide_type, ox_obj.nbonds
else:
return ox_obj.oxide_type
|
Determines if an oxide is a peroxide/superoxide/ozonide/normal oxide
Args:
structure (Structure): Input structure.
relative_cutoff (float): Relative_cutoff * act. cutoff stipulates the
max distance two O atoms must be from each other.
return_nbonds (bool): Should number of bonds be requested?
|
juraj-google-style
|
def while_loop(self, context, step_method):
logger.debug('starting')
context['whileCounter'] = 0
if ((self.stop is None) and (self.max is None)):
logger.error(f'while decorator missing both max and stop.')
raise PipelineDefinitionError('the while decorator must have either max or stop, or both. But not neither.')
error_on_max = context.get_formatted_as_type(self.error_on_max, out_type=bool)
sleep = context.get_formatted_as_type(self.sleep, out_type=float)
if (self.max is None):
max = None
logger.info(f'while decorator will loop until {self.stop} evaluates to True at {sleep}s intervals.')
else:
max = context.get_formatted_as_type(self.max, out_type=int)
if (max < 1):
logger.info(f'max {self.max} is {max}. while only runs when max > 0.')
logger.debug('done')
return
if (self.stop is None):
logger.info(f'while decorator will loop {max} times at {sleep}s intervals.')
else:
logger.info(f'while decorator will loop {max} times, or until {self.stop} evaluates to True at {sleep}s intervals.')
if (not poll.while_until_true(interval=sleep, max_attempts=max)(self.exec_iteration)(context=context, step_method=step_method)):
if error_on_max:
logger.error(f'exhausted {max} iterations of while loop, and errorOnMax is True.')
if (self.stop and max):
raise LoopMaxExhaustedError(f'while loop reached {max} and {self.stop} never evaluated to True.')
else:
raise LoopMaxExhaustedError(f'while loop reached {max}.')
elif (self.stop and max):
logger.info(f'while decorator looped {max} times, and {self.stop} never evaluated to True.')
logger.debug('while loop done')
else:
logger.info(f'while loop done, stop condition {self.stop} evaluated True.')
logger.debug('done')
|
Run step inside a while loop.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate - after method execution will contain the new
updated context.
step_method: (method/function) This is the method/function that
will execute on every loop iteration. Signature is:
function(context)
|
codesearchnet
|
def generate_output(line='0', short=None, name=None, value=None, is_parent=False, colorize=True):
output = '{0}{1}{2}{3}{4}{5}{6}{7}\n'.format((LINES['{0}{1}'.format(line, ('C' if colorize else ''))] if (line in LINES.keys()) else ''), (COLOR_DEPTH[line] if (colorize and (line in COLOR_DEPTH)) else ''), ANSI['b'], (short if (short is not None) else (name if (name is not None) else '')), ('' if ((name is None) or (short is None)) else ' ({0})'.format(name)), ('' if ((name is None) and (short is None)) else ': '), (ANSI['end'] if colorize else ''), ('' if is_parent else value))
return output
|
The function for formatting CLI output results.
Args:
line (:obj:`str`): The line number (0-4). Determines indentation.
Defaults to '0'.
short (:obj:`str`): The optional abbreviated name for a field.
See hr.py for values.
name (:obj:`str`): The optional name for a field. See hr.py for values.
value (:obj:`str`): The field data (required).
is_parent (:obj:`bool`): Set to True if the field value has sub-items
(dicts/lists). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI colors.
Defaults to True.
Returns:
str: The generated output.
|
codesearchnet
|
def run_using_threadpool(fn_to_execute, inputs, pool_size):
if not hasattr(threading.current_thread(), '_children'):
threading.current_thread()._children = weakref.WeakKeyDictionary()
pool = ThreadPool(min(pool_size, len(inputs)))
try:
old_level = logging.getLogger().level
return pool.map(fn_to_execute, inputs)
finally:
pool.terminate()
logging.getLogger().setLevel(old_level)
|
For internal use only; no backwards-compatibility guarantees.
Runs the given function on given inputs using a thread pool.
Args:
fn_to_execute: Function to execute
inputs: Inputs on which given function will be executed in parallel.
pool_size: Size of thread pool.
Returns:
Results retrieved after executing the given function on given inputs.
|
github-repos
|
def _GetPlistRootKey(self, file_entry):
file_object = file_entry.GetFileObject()
try:
plist_file = plist.PlistFile()
plist_file.Read(file_object)
except IOError as exception:
location = getattr(file_entry.path_spec, 'location', '')
raise errors.PreProcessFail('Unable to read plist file: {0:s} with error: {1!s}'.format(location, exception))
finally:
file_object.close()
return plist_file.root_key
|
Retrieves the root key of a plist file.
Args:
file_entry (dfvfs.FileEntry): file entry of the plist.
Returns:
dict[str, object]: plist root key.
Raises:
errors.PreProcessFail: if the preprocessing fails.
|
codesearchnet
|
def cli_print(msg, color='', end=None, file=sys.stdout, logger=_LOG):
if logger:
logger.debug('-> {}'.format(msg))
if CLI_QUIET:
return
if (end is None):
end = _linesep_for_file(file)
file.write('{color}{msg}{reset}{end}'.format(color=color, msg=msg, reset=colorama.Style.RESET_ALL, end=end))
|
Print the message to file and also log it.
This function is intended as a 'tee' mechanism to enable the CLI interface as
a first-class citizen, while ensuring that everything the operator sees also
has an analogous logging entry in the test record for later inspection.
Args:
msg: The message to print/log.
color: Optional colorama color string to be applied to the message. You can
concatenate colorama color strings together in order to get any set of
effects you want.
end: A custom line-ending string to print instead of newline.
file: A file object to which the baracketed text will be written. Intended
for use with CLI output file objects like sys.stdout.
logger: A logger to use, or None to disable logging.
|
codesearchnet
|
def get_uid(prefix=''):
object_name_uids = global_state.get_global_attribute('object_name_uids', default=collections.defaultdict(int), set_to_default=True)
object_name_uids[prefix] += 1
return object_name_uids[prefix]
|
Associates a string prefix with an integer counter.
Args:
prefix: String prefix to index.
Returns:
Unique integer ID.
Example:
>>> get_uid('dense')
1
>>> get_uid('dense')
2
|
github-repos
|
def generate_hdate(date: str, subtract_year: str) -> str:
try:
input_date = datetime.datetime.strptime(date, '%Y-%m-%d')
if input_date.month == 2 and input_date.day == 29:
input_date = input_date - datetime.timedelta(days=1)
subtract_year = int(subtract_year)
except (ValueError, TypeError):
logger.error('Invalid input.')
raise
hdate = input_date - relativedelta(years=subtract_year)
return hdate.strftime('%Y-%m-%d')
|
Generate a historical date by subtracting a specified number of years from the given date.
If input date is leap day (Feb 29), return Feb 28 even if target hdate is also a leap year.
This is expected in ECMWF API.
Args:
date (str): The input date in the format 'YYYY-MM-DD'.
subtract_year (str): The number of years to subtract.
Returns:
str: The historical date in the format 'YYYY-MM-DD'.
|
github-repos
|
def add(self, value, date=None, return_value=False, key=None):
data = {}
if self._metric_id is None:
self.tcex.handle_error(715, [self._metric_name])
body = {'value': value}
if date is not None:
body['date'] = self.tcex.utils.format_datetime(date, date_format='%Y-%m-%dT%H:%M:%SZ')
if key is not None:
body['name'] = key
self.tcex.log.debug('metric data: {}'.format(body))
params = {}
if return_value:
params = {'returnValue': 'true'}
url = '/v2/customMetrics/{}/data'.format(self._metric_id)
r = self.tcex.session.post(url, json=body, params=params)
if r.status_code == 200 and 'application/json' in r.headers.get('content-type', ''):
data = r.json()
elif r.status_code == 204:
pass
else:
self.tcex.handle_error(710, [r.status_code, r.text])
return data
|
Add metrics data to collection.
Args:
value (str): The value of the metric.
date (str, optional): The optional date of the metric.
return_value (bool, default:False): Tell the API to return the updates metric value.
key (str, optional): The key value for keyed metrics.
Return:
dict: If return_value is True a dict with the current value for the time period
is returned.
|
juraj-google-style
|
def html_to_xhtml(html_unicode_string):
try:
assert isinstance(html_unicode_string, basestring)
except AssertionError:
raise TypeError
root = BeautifulSoup(html_unicode_string, 'html.parser')
try:
assert root.html is not None
except AssertionError:
raise ValueError(''.join(['html_unicode_string cannot be a fragment.',
'string is the following: %s', unicode(root)]))
root.html['xmlns'] = 'http:
unicode_string = unicode(root.prettify(encoding='utf-8', formatter='html'), encoding='utf-8')
for tag in constants.SINGLETON_TAG_LIST:
unicode_string = unicode_string.replace(
'<' + tag + '/>',
'<' + tag + ' />')
return unicode_string
|
Converts html to xhtml
Args:
html_unicode_string: A (possible unicode) string representing HTML.
Returns:
A (possibly unicode) string representing XHTML.
Raises:
TypeError: Raised if input_string isn't a unicode string or string.
|
juraj-google-style
|
def cumprod(x, axis=0):
return math_ops.cumprod(x, axis=axis)
|
Cumulative product of the values in a tensor, alongside the specified axis.
Args:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
Returns:
A tensor of the cumulative product of values of `x` along `axis`.
|
github-repos
|
def revert_to(self):
response = self.resource.repo.api.http_request('PATCH', self.uri)
if response.status_code == 204:
logger.debug('reverting to previous version of resource, %s' % self.uri)
self._current_resource.refresh()
else:
raise Exception('HTTP %s, could not revert to resource version, %s' % (response.status_code, self.uri))
|
method to revert resource to this version by issuing PATCH
Args:
None
Returns:
None: sends PATCH request, and refreshes parent resource
|
juraj-google-style
|
def _zeros_slot(self, var, slot_name, op_name):
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_zeros_slot(var, op_name, copy_xla_sharding=True)
self._restore_slot_variable(slot_name=slot_name, variable=var, slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return named_slots[_var_key(var)]
|
Find or create a slot initialized with 0.0.
Args:
var: A `Variable` object.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
|
github-repos
|
def set(self, *args, **kwargs):
if args:
for arg in args:
if arg is not None:
for name in self.__slots__:
self._set(name, getattr(arg, name, UNSET))
for name in kwargs:
self._set(name, kwargs.get(name, UNSET))
|
Conveniently set one or more fields at a time.
Args:
*args: Optionally set from other objects, available fields from the passed object are used in order
**kwargs: Set from given key/value pairs (only names defined in __slots__ are used)
|
juraj-google-style
|
def asdim(dimension):
if isinstance(dimension, Dimension):
return dimension
elif isinstance(dimension, (tuple, dict, basestring)):
return Dimension(dimension)
else:
raise ValueError('%s type could not be interpreted as Dimension. Dimensions must be declared as a string, tuple, dictionary or Dimension type.')
|
Convert the input to a Dimension.
Args:
dimension: tuple, dict or string type to convert to Dimension
Returns:
A Dimension object constructed from the dimension spec. No
copy is performed if the input is already a Dimension.
|
codesearchnet
|
def ajax(cls, url, param={}, method='get'):
param = urllib.parse.urlencode(param)
if (method.lower() == 'get'):
req = urllib.request.Request(((url + '?') + param))
elif (method.lower() == 'post'):
param = param.encode('utf-8')
req = urllib.request.Request(url, data=param)
else:
raise Exception("invalid method '{}' (GET/POST)".format(method))
rsp = urllib.request.urlopen(req)
if rsp:
rsp_json = rsp.read().decode('utf-8')
rsp_dict = json.loads(rsp_json)
return rsp_dict
return None
|
Get info by ajax
Args:
url: string
Returns:
dict: json decoded into a dict
|
codesearchnet
|
def WMITimeStrToRDFDatetime(self, timestr):
offset_minutes = timestr[21:]
year = timestr[:4]
month = timestr[4:6]
day = timestr[6:8]
hours = timestr[8:10]
minutes = timestr[10:12]
seconds = timestr[12:14]
microseconds = timestr[15:21]
unix_seconds = calendar.timegm(tuple(map(int, [year, month, day, hours, minutes, seconds])))
unix_seconds -= (int(offset_minutes) * 60)
return rdfvalue.RDFDatetime(((unix_seconds * 1000000.0) + int(microseconds)))
|
Return RDFDatetime from string like 20140825162259.000000-420.
Args:
timestr: WMI time string
Returns:
rdfvalue.RDFDatetime
We have some timezone manipulation work to do here because the UTC offset is
in minutes rather than +-HHMM
|
codesearchnet
|
def copy_course_videos(source_course_id, destination_course_id):
if (source_course_id == destination_course_id):
return
course_videos = CourseVideo.objects.select_related('video', 'video_image').filter(course_id=six.text_type(source_course_id))
for course_video in course_videos:
(destination_course_video, __) = CourseVideo.objects.get_or_create(video=course_video.video, course_id=destination_course_id)
if hasattr(course_video, 'video_image'):
VideoImage.create_or_update(course_video=destination_course_video, file_name=course_video.video_image.image.name)
|
Adds the destination_course_id to the videos taken from the source_course_id
Args:
source_course_id: The original course_id
destination_course_id: The new course_id where the videos will be copied
|
codesearchnet
|
def set_property(property_map, name, value, exclude_from_indexes=None):
set_value(property_map[name], value, exclude_from_indexes)
|
Set property value in the given datastore.Property proto message.
Args:
property_map: a string->datastore.Value protobuf map.
name: name of the property.
value: python object or datastore.Value.
exclude_from_indexes: if the value should be exclude from indexes. None
leaves indexing as is (defaults to False if value is not a Value message).
Usage:
>>> set_property(property_proto, 'foo', u'a')
Raises:
TypeError: if the given value type is not supported.
|
codesearchnet
|
def interpolate(features, hparams, decode_hp):
inputs, targets = features["inputs"], features["targets"]
inputs = tf.unstack(inputs, axis=1)
targets = tf.unstack(targets, axis=1)
coeffs = np.linspace(0.0, 1.0, decode_hp.num_interp)
first_frame, last_frame = inputs[0], targets[-1]
first_top_z, first_level_eps = frame_to_latents(first_frame, hparams)
last_top_z, last_level_eps = frame_to_latents(last_frame, hparams)
first_lats = first_level_eps + [first_top_z]
last_lats = last_level_eps + [last_top_z]
interp_lats = []
lat_iterator = enumerate(zip(first_lats, last_lats))
for level_ind, (first_lat, last_lat) in lat_iterator:
if level_ind in decode_hp.level_interp:
if decode_hp.channel_interp == "all":
interp_lat = glow_ops.linear_interpolate(first_lat, last_lat, coeffs)
else:
interp_lat = glow_ops.linear_interpolate_rank(
first_lat, last_lat, coeffs, decode_hp.rank_interp)
else:
interp_lat = tf.tile(first_lat, [decode_hp.num_interp, 1, 1, 1])
interp_lats.append(interp_lat)
level_eps_interp = interp_lats[:hparams.n_levels-1]
z_top_interp = interp_lats[-1]
images = latents_to_frames(z_top_interp, level_eps_interp, hparams)
return images, first_frame, last_frame
|
Interpolate between the first input frame and last target frame.
Args:
features: dict of tensors
hparams: HParams, training hparams.
decode_hp: HParams, decode hparams.
Returns:
images: interpolated images, 4-D Tensor, shape=(num_interp, H, W, C)
first_frame: image, 3-D Tensor, shape=(1, H, W, C)
last_frame: image, 3-D Tensor, shape=(1, H, W, C)
|
juraj-google-style
|
def dbmax50years(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `dbmax50years`'.format(value))
self._dbmax50years = value
|
Corresponds to IDD Field `dbmax50years`
50-year return period values for maximum extreme dry-bulb temperature
Args:
value (float): value for IDD Field `dbmax50years`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def __init__(self, rr, table='services'):
self.rr = rr
self.table = table
self._ensure_table()
|
Initialize the service registry.
Creates the database table if it does not exist.
Args:
rr (doublethink.Rethinker): a doublethink.Rethinker, which must
have `dbname` set
|
juraj-google-style
|
def PrepareMergeTaskStorage(self, task):
if task.identifier not in self._task_storage_writers:
raise IOError('Storage writer for task: {0:s} does not exist.'.format(
task.identifier))
|
Prepares a task storage for merging.
Args:
task (Task): task.
Raises:
IOError: if the task storage does not exist.
OSError: if the task storage does not exist.
|
juraj-google-style
|
def dump_artifact(obj, path, filename=None):
p_sha1 = None
if (not os.path.exists(path)):
os.makedirs(path, mode=448)
else:
p_sha1 = hashlib.sha1()
p_sha1.update(obj.encode(encoding='UTF-8'))
if (filename is None):
(fd, fn) = tempfile.mkstemp(dir=path)
else:
fn = os.path.join(path, filename)
if os.path.exists(fn):
c_sha1 = hashlib.sha1()
with open(fn) as f:
contents = f.read()
c_sha1.update(contents.encode(encoding='UTF-8'))
if ((not os.path.exists(fn)) or (p_sha1.hexdigest() != c_sha1.hexdigest())):
lock_fp = os.path.join(path, '.artifact_write_lock')
lock_fd = os.open(lock_fp, (os.O_RDWR | os.O_CREAT), (stat.S_IRUSR | stat.S_IWUSR))
fcntl.lockf(lock_fd, fcntl.LOCK_EX)
try:
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR)
f.write(str(obj))
finally:
fcntl.lockf(lock_fd, fcntl.LOCK_UN)
os.close(lock_fd)
os.remove(lock_fp)
return fn
|
Write the artifact to disk at the specified path
Args:
obj (string): The string object to be dumped to disk in the specified
path. The artifact filename will be automatically created
path (string): The full path to the artifacts data directory.
filename (string, optional): The name of file to write the artifact to.
If the filename is not provided, then one will be generated.
Returns:
string: The full path filename for the artifact that was generated
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.