code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def add_delegate(self, callback):
if callback in self._delegate_methods:
return
self._delegate_methods.append(callback)
|
Registers a new delegate callback
The prototype should be function(data), where data will be the decoded json push
Args:
callback (function): method to trigger when push center receives events
|
juraj-google-style
|
def readMonthTariffs(self, months_type):
self.setContext('readMonthTariffs')
try:
req_type = binascii.hexlify(str(months_type).zfill(1))
req_str = (('01523102303031' + req_type) + '282903')
work_table = self.m_mons
if (months_type == ReadMonths.kWhReverse):
work_table = self.m_rev_mons
self.request(False)
req_crc = self.calc_crc16(req_str[2:].decode('hex'))
req_str += req_crc
self.m_serial_port.write(req_str.decode('hex'))
raw_ret = self.m_serial_port.getResponse(self.getContext())
self.serialPostEnd()
unpacked_read = self.unpackStruct(raw_ret, work_table)
self.convertData(unpacked_read, work_table, self.m_kwh_precision)
return_crc = self.calc_crc16(raw_ret[1:(- 2)])
if (str(return_crc) == str(work_table['crc16'][MeterData.StringValue])):
ekm_log(('Months CRC success, type = ' + str(req_type)))
self.setContext('')
return True
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext('')
return False
|
Serial call to read month tariffs block into meter object buffer.
Args:
months_type (int): A :class:`~ekmmeters.ReadMonths` value.
Returns:
bool: True on completion.
|
codesearchnet
|
def generate_token():
length = 50
stringset = (string.ascii_letters + string.digits)
token = ''.join([stringset[(i % len(stringset))] for i in [ord(x) for x in os.urandom(length)]])
return token
|
Generate a new random security token.
>>> len(generate_token()) == 50
True
Returns:
string
|
codesearchnet
|
def update(self, resource, timeout=-1):
return self._client.update(resource, timeout=timeout)
|
Updates the specified data center resource.
Args:
resource (dict): Object to update.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Updated data center.
|
juraj-google-style
|
def _create_output_from_match(self, match_result):
if isinstance(match_result, dict):
return LinterOutput(self.name, **match_result)
return LinterOutput(self.name, *match_result)
|
Create Result instance from pattern match results.
Args:
match: Pattern match.
|
juraj-google-style
|
def get_classes_in_module(module, superclass=object):
ret = []
for classname in dir(module):
attr = module.__getattribute__(classname)
try:
if issubclass(attr, superclass) and (attr != superclass):
ret.append(attr)
except TypeError:
pass
except RuntimeError:
pass
return ret
|
Returns a list with all classes in module that descend from parent
Args:
module: builtins.module
superclass: a class
Returns: list
|
juraj-google-style
|
def assemble(self, ops):
return pwnypack.asm.asm(self.compile(ops), target=self.target)
|
Assemble a list of operations into executable code.
Arguments:
ops(list): A list of shellcode operations.
Returns:
bytes: The executable code that implements the shellcode.
|
codesearchnet
|
def cumall(series):
alls = series.expanding().apply(np.all).astype(bool)
return alls
|
Calculates cumulative all of values. Equivalent to
`series.expanding().apply(np.all).astype(bool)`.
Args:
series: column to compute cumulative all for.
|
juraj-google-style
|
def GetMessages(self, formatter_mediator, event):
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
message_type = event_values.get('message_type', None)
if message_type is not None:
event_values['message_type'] = (
self._MESSAGE_TYPE.get(message_type, 'UNKNOWN'))
message_status = event_values.get('message_status', None)
if message_status is not None:
event_values['message_status'] = (
self._MESSAGE_STATUS.get(message_status, 'UNKNOWN'))
return self._ConditionalFormatMessages(event_values)
|
Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
|
juraj-google-style
|
def as_dense_types(types, classes):
ret = nest.pack_sequence_as(types, [dtypes.variant if c is sparse_tensor.SparseTensor else ty for ty, c in zip(nest.flatten(types), nest.flatten(classes))])
return ret
|
Converts sparse tensor types to `dtypes.variant`.
Args:
types: a structure of types to convert.
classes: a structure of objects that identify the dataset item classes
Returns:
a structure matching the nested structure of `types`, containing
`dtypes.variant` at positions where `classes` contains
`tf.sparse.SparseTensor` and matching contents of `types` otherwise
|
github-repos
|
def escape(inp, quote='"'):
output = ''
for c in inp:
if (c == quote):
output += '\\'
output += c
return output
|
Escape `quote` in string `inp`.
Example usage::
>>> escape('hello "')
'hello \\"'
>>> escape('hello \\"')
'hello \\\\"'
Args:
inp (str): String in which `quote` will be escaped.
quote (char, default "): Specify which character will be escaped.
Returns:
str: Escaped string.
|
codesearchnet
|
def AddArg(self, arg):
self.args.append(arg)
if (len(self.args) > self.number_of_args):
raise ParseError('Too many args for this expression.')
elif (len(self.args) == self.number_of_args):
return True
return False
|
Adds a new arg to this expression.
Args:
arg: The argument to add (string).
Returns:
True if this arg is the last arg, False otherwise.
Raises:
ParseError: If there are too many args.
|
codesearchnet
|
def _evaluate_barycentric(nodes, degree, lambda1, lambda2, lambda3):
(dimension, num_nodes) = nodes.shape
binom_val = 1.0
result = np.zeros((dimension, 1), order='F')
index = (num_nodes - 1)
result[(:, 0)] += nodes[(:, index)]
lambda1 = np.asfortranarray([lambda1])
lambda2 = np.asfortranarray([lambda2])
for k in six.moves.xrange((degree - 1), (- 1), (- 1)):
binom_val = ((binom_val * (k + 1)) / (degree - k))
index -= 1
new_index = ((index - degree) + k)
col_nodes = nodes[(:, new_index:(index + 1))]
col_nodes = np.asfortranarray(col_nodes)
col_result = _curve_helpers.evaluate_multi_barycentric(col_nodes, lambda1, lambda2)
result *= lambda3
result += (binom_val * col_result)
index = new_index
return result
|
r"""Compute a point on a surface.
Evaluates :math:`B\left(\lambda_1, \lambda_2, \lambda_3\right)` for a
B |eacute| zier surface / triangle defined by ``nodes``.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): Control point nodes that define the surface.
degree (int): The degree of the surface define by ``nodes``.
lambda1 (float): Parameter along the reference triangle.
lambda2 (float): Parameter along the reference triangle.
lambda3 (float): Parameter along the reference triangle.
Returns:
numpy.ndarray: The evaluated point as a ``D x 1`` array (where ``D``
is the ambient dimension where ``nodes`` reside).
|
codesearchnet
|
def one_of_keyword_only(*valid_keywords):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
sentinel = object()
values = {}
for key in valid_keywords:
kwarg_value = kwargs.pop(key, sentinel)
if (kwarg_value is not sentinel):
values[key] = kwarg_value
if kwargs:
raise TypeError('Unexpected arguments: {}'.format(kwargs))
if (not values):
raise TypeError('Must provide one of {} as keyword argument'.format(', '.join(valid_keywords)))
if (len(values) > 1):
raise TypeError('Must provide only one of {} as keyword argument. Received {}'.format(', '.join(valid_keywords), values))
return func(*(args + values.popitem()))
return wrapper
return decorator
|
Decorator to help make one-and-only-one keyword-only argument functions more reusable
Notes:
Decorated function should take 2 arguments, the first for the key, the second the value
Examples:
::
@one_of_keyword_only('a', 'b', 'c')
def func(key, value):
if key == 'a':
...
elif key == 'b':
...
else:
# key = 'c'
...
...
func(a=1)
func(b=2)
func(c=3)
try:
func(d=4)
except TypeError:
...
try:
func(a=1, b=2)
except TypeError:
...
Args:
*valid_keywords (str): All allowed keyword argument names
Raises:
TypeError: On decorated call, if 0 or 2+ arguments are provided or kwargs contains a key not in valid_keywords
|
codesearchnet
|
def add_number_parameters(self, number):
if isinstance(number, list):
for x in number:
self.add_number_parameters(x)
return
self._parameters.append((('{ "value": ' + str(number)) + ' }'))
|
Add given number parameters to the internal list.
Args:
number (list of int or list of float): A number or list of numbers to add to the parameters.
|
codesearchnet
|
def submit_bsub_job(command, job_id=None, dependent_id=None, memory=None, requeue_code=None, logfile=None):
if job_id is None:
job_id = get_random_string()
job = "-J \"{0}\"".format(job_id)
mem = ""
if memory is not None:
mem = "-R 'select[mem>{0}] rusage[mem={0}]' -M {0}".format(memory)
requeue = ""
if requeue_code is not None:
requeue = "-Q 'EXCLUDE({0})'".format(requeue_code)
dependent = ""
if dependent_id is not None:
if type(dependent_id) == list:
dependent_id = " && ".join(dependent_id)
dependent = "-w '{0}'".format(dependent_id)
log = "bjob_output.txt"
if logfile is not None:
log = logfile
preamble = ["bsub", job, dependent, requeue, "-q", "normal", "-o", log, mem]
command = ["bash", "-c", "\""] + command + ["\""]
command = " ".join(preamble + command)
subprocess.call(command, shell=True)
|
construct a bsub job submission command
Args:
command: list of strings that forma unix command
job_id: string for job ID for submission
dependent_id: job ID, or list of job IDs which the current command needs
to have finished before the current command will start. Note that
the list can be empty, in which case there are no dependencies.
memory: minimum memory requirements (in megabytes)
Returns:
nothing
|
juraj-google-style
|
def intersection(self, other, recursive=True):
if (not isinstance(other, composite)):
raise AssertionError('Cannot intersect composite and {} types'.format(type(other)))
if (self.meta_type != other.meta_type):
return composite({})
if (self.meta_type == 'list'):
keep = []
for item in self._list:
if (item in other._list):
if (recursive and isinstance(item, composite)):
keep.extend(item.intersection(other.index(item), recursive=True))
else:
keep.append(item)
return composite(keep)
elif (self.meta_type == 'dict'):
keep = {}
for key in self._dict:
item = self._dict[key]
if (key in other._dict):
if (recursive and isinstance(item, composite) and isinstance(other.get(key), composite)):
keep[key] = item.intersection(other.get(key), recursive=True)
elif (item == other[key]):
keep[key] = item
return composite(keep)
return
|
Recursively compute intersection of data. For dictionaries, items
for specific keys will be reduced to unique items. For lists, items
will be reduced to unique items. This method is meant to be analogous
to set.intersection for composite objects.
Args:
other (composite): Other composite object to intersect with.
recursive (bool): Whether or not to perform the operation recursively,
for all nested composite objects.
|
codesearchnet
|
def _object_url(self, objtype, objid):
return '{base_url}/api/{api_version}/{controller}/{obj_id}'.format(base_url=self._base_url(), api_version=self.api_version, controller=self._controller_name(objtype), obj_id=objid)
|
Generate the URL for the specified object
Args:
objtype (str): The object's type
objid (int): The objects ID
Returns:
A string containing the URL of the object
|
codesearchnet
|
def getnamespace(f):
namespace = dict(f.__globals__)
closure = f.__closure__
freevars = f.__code__.co_freevars
if freevars and closure:
for name, cell in zip(freevars, closure):
try:
namespace[name] = cell.cell_contents
except ValueError:
pass
return namespace
|
Returns the complete namespace of a function.
Namespace is defined here as the mapping of all non-local variables to values.
This includes the globals and the closure variables. Note that this captures
the entire globals collection of the function, and may contain extra symbols
that it does not actually use.
Args:
f: User defined function.
Returns:
A dict mapping symbol names to values.
|
github-repos
|
def __init__(self, index: Optional[int]=None):
super().__init__()
self._index = index
|
Constructor.
Args:
index: index of the tuple field that this key spec applies to.
If None, this tuple value spec applies to all elements of a
variable-length tuple.
|
github-repos
|
def copy_handle_data(source_t, target_t):
if target_t.dtype == dtypes.resource or target_t.dtype == dtypes.variant:
handle_data = get_handle_data(source_t)
set_handle_data(target_t, handle_data)
|
Copies HandleData for variant and resource type tensors if available.
The CppShapeInferenceResult::HandleData proto contains information about the
shapes and types of the element tensors of resource/variant type tensors.
We need to copy this across function boundaries, i.e., when capturing a
placeholder or when returning a function tensor as output. If we don't do this
the element tensors will have unknown shapes, e.g., if a TensorList variant
tensor is captured as a placeholder, elements popped from that list would have
unknown shape.
Args:
source_t: The tensor to copy HandleData from.
target_t: The tensor to copy HandleData to.
|
github-repos
|
def get_default_settings(sub_scripts, script_order, script_execution_freq, iterator_type):
def populate_sweep_param(scripts, parameter_list, trace=''):
def get_parameter_from_dict(trace, dic, parameter_list, valid_values=None):
if valid_values is None and isinstance(dic, Parameter):
valid_values = dic.valid_values
for key, value in dic.items():
if isinstance(value, dict):
parameter_list = get_parameter_from_dict(trace + '.' + key, value, parameter_list,
dic.valid_values[key])
elif (valid_values[key] in (float, int)) or \
(isinstance(valid_values[key], list) and valid_values[key][0] in (float, int)):
parameter_list.append(trace + '.' + key)
else:
print(('ignoring sweep parameter', key))
return parameter_list
for script_name in list(scripts.keys()):
from pylabcontrol.core import ScriptIterator
script_trace = trace
if script_trace == '':
script_trace = script_name
else:
script_trace = script_trace + '->' + script_name
if issubclass(scripts[script_name], ScriptIterator):
populate_sweep_param(vars(scripts[script_name])['_SCRIPTS'], parameter_list=parameter_list,
trace=script_trace)
else:
for setting in \
[elem[1] for elem in inspect.getmembers(scripts[script_name]) if elem[0] == '_DEFAULT_SETTINGS'][0]:
parameter_list = get_parameter_from_dict(script_trace, setting, parameter_list)
return parameter_list
if iterator_type == 'loop':
script_default_settings = [
Parameter('script_order', script_order),
Parameter('script_execution_freq', script_execution_freq),
Parameter('num_loops', 0, int, 'times the subscripts will be executed'),
Parameter('run_all_first', True, bool, 'Run all scripts with nonzero frequency in first pass')
]
elif iterator_type == 'sweep':
sweep_params = populate_sweep_param(sub_scripts, [])
script_default_settings = [
Parameter('script_order', script_order),
Parameter('script_execution_freq', script_execution_freq),
Parameter('sweep_param', sweep_params[0], sweep_params, 'variable over which to sweep'),
Parameter('sweep_range',
[Parameter('min_value', 0, float, 'min parameter value'),
Parameter('max_value', 0, float, 'max parameter value'),
Parameter('N/value_step', 0, float,
'either number of steps or parameter value step, depending on mode')]),
Parameter('stepping_mode', 'N', ['N', 'value_step'],
'Switch between number of steps and step amount'),
Parameter('run_all_first', True, bool, 'Run all scripts with nonzero frequency in first pass')
]
else:
print(('unknown iterator type ' + iterator_type))
raise TypeError('unknown iterator type ' + iterator_type)
return script_default_settings
|
assigning the actual script settings depending on the iterator type
this might be overwritten by classes that inherit form ScriptIterator
Args:
sub_scripts: dictionary with the subscripts
script_order: execution order of subscripts
script_execution_freq: execution frequency of subscripts
Returns:
the default setting for the iterator
|
juraj-google-style
|
def validate_queues(queues):
if (not isinstance(queues, dict)):
raise exceptions.ConfigurationException("'queues' must be a dictionary mapping queue names to settings.")
for (queue, settings) in queues.items():
if (not isinstance(settings, dict)):
raise exceptions.ConfigurationException("the {} queue in the 'queues' setting has a value of type {}, but it should be a dictionary of settings.".format(queue, type(settings)))
missing_keys = []
for key in ('durable', 'auto_delete', 'exclusive', 'arguments'):
if (key not in settings):
missing_keys.append(key)
if missing_keys:
raise exceptions.ConfigurationException('the {} queue is missing the following keys from its settings value: {}'.format(queue, missing_keys))
|
Validate the queues configuration.
Raises:
exceptions.ConfigurationException: If the configuration provided is of an
invalid format.
|
codesearchnet
|
def _is_univariate_marginal(self, index_points):
num_index_points = tf.compat.dimension_value(
index_points.shape[-(self.kernel.feature_ndims + 1)])
if num_index_points is None:
warnings.warn(
'Unable to detect statically whether the number of index_points is '
'1. As a result, defaulting to treating the marginal GP at '
'`index_points` as a multivariate Gaussian. This makes some methods, '
'like `cdf` unavailable.')
return num_index_points == 1
|
True if the given index_points would yield a univariate marginal.
Args:
index_points: the set of index set locations at which to compute the
marginal Gaussian distribution. If this set is of size 1, the marginal is
univariate.
Returns:
is_univariate: Boolean indicating whether the marginal is univariate or
multivariate. In the case of dynamic shape in the number of index points,
defaults to "multivariate" since that's the best we can do.
|
juraj-google-style
|
def matches(self, node, value):
if self.skip(value):
return True
if (not self._valid_value(value)):
msg = 'Invalid value {value} passed to filter {name} - '.format(value=repr(value), name=self.name)
if (self.default is not None):
warn((msg + 'defaulting to {}'.format(self.default)))
value = self.default
else:
warn((msg + 'skipping'))
return True
return self.func(node, value)
|
Returns whether the given node matches the filter rule with the given value.
Args:
node (Element): The node to filter.
value (object): The desired value with which the node should be evaluated.
Returns:
bool: Whether the given node matches.
|
codesearchnet
|
def optimize(self, sensor_graph, model):
passes = self._order_pases(self._known_passes.keys())
for opt_name in passes:
rerun = True
pass_instance = self._known_passes[opt_name][0]()
while rerun:
rerun = pass_instance.run(sensor_graph, model=model)
|
Optimize a sensor graph by running optimization passes.
The passes are run one at a time and modify the sensor graph
for future passes.
Args:
sensor_graph (SensorGraph): The graph to be optimized
model (DeviceModel): The device that we are optimizing
for, that OptimizationPass objects are free to use
to guide their optimizations.
|
codesearchnet
|
def get_doc_id(document_pb, expected_prefix):
prefix, document_id = document_pb.name.rsplit(DOCUMENT_PATH_DELIMITER, 1)
if prefix != expected_prefix:
raise ValueError(
"Unexpected document name",
document_pb.name,
"Expected to begin with",
expected_prefix,
)
return document_id
|
Parse a document ID from a document protobuf.
Args:
document_pb (google.cloud.proto.firestore.v1beta1.\
document_pb2.Document): A protobuf for a document that
was created in a ``CreateDocument`` RPC.
expected_prefix (str): The expected collection prefix for the
fully-qualified document name.
Returns:
str: The document ID from the protobuf.
Raises:
ValueError: If the name does not begin with the prefix.
|
juraj-google-style
|
def list_objects(self, path='', relative=False, first_level=False,
max_request_entries=None):
entries = 0
next_values = []
max_request_entries_arg = None
if not relative:
path = self.relpath(path)
if not path:
objects = self._list_locators()
else:
objects = self._list_objects(
self.get_client_kwargs(path), max_request_entries)
for obj in objects:
try:
name, header, is_directory = obj
except ValueError:
name, header = obj
is_directory = True
if is_directory and not first_level:
name = next_path = name.rstrip('/') + '/'
if path:
next_path = '/'.join((path.rstrip('/'), name))
if max_request_entries is not None:
max_request_entries_arg = max_request_entries - entries
next_values.append((
name, self._generate_async(self.list_objects(
next_path, relative=True,
max_request_entries=max_request_entries_arg))))
entries += 1
yield name, header
if entries == max_request_entries:
return
for next_name, generator in next_values:
for name, header in generator:
entries += 1
yield '/'.join((next_name.rstrip('/'), name)), header
if entries == max_request_entries:
return
|
List objects.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
first_level (bool): It True, returns only first level objects.
Else, returns full tree.
max_request_entries (int): If specified, maximum entries returned
by request.
Returns:
generator of tuple: object name str, object header dict
|
juraj-google-style
|
def post_process_single(self, generation: str, fix_markdown: bool=True) -> str:
generation = re.sub('(?:\\n|^)
generation = generation.strip()
generation = generation.replace('\n* [leftmargin=*]\n', '\n')
generation = re.sub('^
lines = generation.split('\n')
if lines[-1].startswith('
logger.info('Likely hallucinated title at the end of the page: ' + lines[-1])
generation = '\n'.join(lines[:-1])
generation = truncate_repetitions(generation)
generation = self.remove_hallucinated_references(generation)
generation = re.sub('^\\* \\[\\d+\\](\\s?[A-W]\\.+\\s?){10,}.*$', '', generation, flags=re.M)
generation = re.sub('^(\\* \\[\\d+\\])\\[\\](.*)$', '\\1\\2', generation, flags=re.M)
generation = re.sub('(^\\w\\n\\n|\\n\\n\\w$)', '', generation)
generation = re.sub('([\\s.,()])_([a-zA-Z0-9])__([a-zA-Z0-9]){1,3}_([\\s.,:()])', '\\1\\(\\2_{\\3}\\)\\4', generation)
generation = re.sub('([\\s.,\\d])_([a-zA-Z0-9])_([\\s.,\\d;])', '\\1\\(\\2\\)\\3', generation)
generation = re.sub('(\\nFootnote .*?:) (?:footnotetext|thanks):\\W*(.*(?:\\n\\n|$))', '\\1 \\2', generation)
generation = re.sub('\\[FOOTNOTE:.+?\\](.*?)\\[ENDFOOTNOTE\\]', '', generation)
generation = normalize_list_like_lines(generation)
if generation.endswith(('.', '}')):
generation += '\n\n'
if re.match('[A-Z0-9,;:]$', generation):
generation += ' '
elif generation.startswith(('
generation = '\n\n' + generation
elif generation.split('\n')[-1].startswith(('
generation = generation + '\n\n'
else:
try:
last_word = generation.split(' ')[-1]
if last_word in nltk.corpus.words.words():
generation += ' '
except LookupError:
generation += ' '
generation = self.correct_tables(generation)
generation = generation.replace('\\begin{array}[]{', '\\begin{array}{')
generation = re.sub('\\\\begin{tabular}{([clr ]){2,}}\\s*[& ]*\\s*(\\\\\\\\)? \\\\end{tabular}', '', generation)
generation = re.sub('(\\*\\*S\\. A\\. B\\.\\*\\*\\n+){2,}', '', generation)
generation = re.sub('^
generation = re.sub('^\\.\\s*$', '', generation, flags=re.M)
generation = re.sub('\\n{3,}', '\n\n', generation)
if fix_markdown:
return markdown_compatible(generation)
else:
return generation
|
Postprocess a single generated text. Regular expressions used here are taken directly from the Nougat article
authors. These expressions are commented for clarity and tested end-to-end in most cases.
Args:
generation (str): The generated text to be postprocessed.
fix_markdown (bool, optional): Whether to perform Markdown formatting fixes. Default is True.
Returns:
str: The postprocessed text.
|
github-repos
|
def set(self, name, value, autodeclare=False):
if ((not autodeclare) and (name not in self._data)):
raise KeyError('Key {} has not been declared and autodeclare=False'.format(name))
self._ensure_declared(name)
self._data[name].set_result(value)
|
Set the value of a key.
This method will cause anyone waiting on a key (and any future
waiters) to unblock and be returned the value you pass here.
If the key has not been declared previously, a KeyError() is
raised unless you pass ``autodeclare=True`` which will cause
the key to be declared. Normally you don't want to autodeclare.
This method is not a coroutine and does not block.
Args:
name (str): The key to set
value (object): The value to set
autodeclare (bool): Whether to automatically declare the
key if is has not already been declared. Defaults to
False.
|
codesearchnet
|
def convert_args_to_laid_out_tensors(xs):
ret = []
for x in xs:
if hasattr(x, "to_laid_out_tensor"):
ret.append(x.to_laid_out_tensor())
else:
ret.append(x)
return ret
|
Convert list elements to laid-out-tensors when possible.
Args:
xs: a list
Returns:
a list
|
juraj-google-style
|
def _bbox_intersect(nodes1, nodes2):
(left1, right1, bottom1, top1) = _helpers.bbox(nodes1)
(left2, right2, bottom2, top2) = _helpers.bbox(nodes2)
if ((right2 < left1) or (right1 < left2) or (top2 < bottom1) or (top1 < bottom2)):
return BoxIntersectionType.DISJOINT
if ((right2 == left1) or (right1 == left2) or (top2 == bottom1) or (top1 == bottom2)):
return BoxIntersectionType.TANGENT
else:
return BoxIntersectionType.INTERSECTION
|
r"""Bounding box intersection predicate.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Determines if the bounding box of two sets of control points
intersects in :math:`\mathbf{R}^2` with non-trivial
intersection (i.e. tangent bounding boxes are insufficient).
.. note::
Though we assume (and the code relies on this fact) that
the nodes are two-dimensional, we don't check it.
Args:
nodes1 (numpy.ndarray): Set of control points for a
B |eacute| zier shape.
nodes2 (numpy.ndarray): Set of control points for a
B |eacute| zier shape.
Returns:
int: Enum from ``BoxIntersectionType`` indicating the type of
bounding box intersection.
|
codesearchnet
|
def remove_tag(self, tag):
return self._remove_hdxobject(self.data.get('tags'), tag, matchon='name')
|
Remove a tag
Args:
tag (str): Tag to remove
Returns:
bool: True if tag removed or False if not
|
codesearchnet
|
def _batch_prepare_for_model(self, batch_text_or_text_pairs, is_pair: Optional[bool]=None, boxes: Optional[List[List[int]]]=None, word_labels: Optional[List[List[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
batch_outputs = {}
for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):
batch_text_or_text_pair, boxes_example = example
outputs = self.prepare_for_model(batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, batch_text_or_text_pair[1] if is_pair else None, boxes_example, word_labels=word_labels[idx] if word_labels is not None else None, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
|
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens.
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
|
github-repos
|
def _initialize_mesh_dimension_name_to_size(self, mesh_shape):
mesh_dimension_name_to_size = {}
for mesh_dimension in mesh_shape.dims:
mesh_dimension_name_to_size[mesh_dimension.name] = mesh_dimension.size
return mesh_dimension_name_to_size
|
Initializer for self._mesh_dimension_name_to_size.
Args:
mesh_shape: an mtf.Shape.
Returns:
A {string: int} mapping mesh dimension names to their sizes.
|
codesearchnet
|
def to_json(self):
return {'resourceType': self.resource.resource_type_id, 'resourceId': self.id, 'accountId': self.resource.account_id, 'account': self.account, 'location': self.resource.location, 'properties': {to_camelcase(prop.name): prop.value for prop in self.resource.properties}, 'tags': [{'key': t.key, 'value': t.value} for t in self.resource.tags]}
|
Return a `dict` representation of the resource, including all properties and tags
Returns:
`dict`
|
codesearchnet
|
def _validate_children_inputs_mappings(self, children_inputs_mappings):
assert isinstance(children_inputs_mappings, dict)
assert 'parent_first_child_input' in children_inputs_mappings
assert 'parent_last_child_output' in children_inputs_mappings
assert 'internal_children_input_output' in children_inputs_mappings
def assert_dictlist_has_keys(dictlist, keys):
for dikt in dictlist:
assert isinstance(dikt, dict)
for key in keys:
assert key in dikt
assert_dictlist_has_keys(children_inputs_mappings['parent_first_child_input'], ['parent_ophint_input_index', 'first_child_ophint_input_index'])
assert_dictlist_has_keys(children_inputs_mappings['parent_last_child_output'], ['parent_output_index', 'child_output_index'])
assert_dictlist_has_keys(children_inputs_mappings['internal_children_input_output'], ['child_input_index', 'child_output_index'])
|
Validate children inputs mappings is in the right format.
Args:
children_inputs_mappings: the Children ophint inputs/outputs mapping.
|
github-repos
|
def get_synchronous_execution():
return context.context().execution_mode == context.SYNC
|
Gets whether operations are executed synchronously or asynchronously.
TensorFlow can execute operations synchronously or asynchronously. If
asynchronous execution is enabled, operations may return "non-ready" handles.
Returns:
Current thread execution mode
|
github-repos
|
def shape(self) -> torch.Size:
return self._trans.shape[:-1]
|
Returns the shape of the shared dimensions of the rotation and the translation.
Returns:
The shape of the transformation
|
github-repos
|
def _abstractify_value(val: '_instances.ConcreteValue', ctx: 'context.Context', seen: 'set[_base.BaseValue] | None'=None) -> '_instances.ConcreteValue':
if seen is None:
seen = set()
if not val.is_concrete or val in seen:
return val
seen = seen | {val}
if not isinstance(val.pyval, (list, tuple)):
return ctx.convert.get_maybe_abstract_instance(val)
new_content = []
for elem in val.pyval:
new_elem_data = [_abstractify_value(v, ctx, seen) for v in elem.data]
if any((v != new_v for v, new_v in zip(elem.data, new_elem_data))):
new_elem = ctx.program.NewVariable()
for b, new_data in zip(elem.bindings, new_elem_data):
new_elem.PasteBindingWithNewData(b, new_data)
new_content.append(new_elem)
else:
new_content.append(elem)
if any((elem != new_elem for elem, new_elem in zip(val.pyval, new_content))):
return type(val)(type(val.pyval)(new_content), ctx)
else:
return val
|
Converts a maybe-abstract value to a concrete one.
Args:
val: A value.
ctx: The context.
seen: Optionally, a seen values set.
Unlike ctx.convert.get_maybe_abstract_instance, this method recursively
descends into lists and tuples.
Returns:
A concrete value.
|
github-repos
|
def organize_models(self, outdir, force_rerun=False):
uniprot_to_swissmodel = defaultdict(list)
for (u, models) in self.all_models.items():
for m in models:
original_filename = '{}_{}_{}_{}'.format(m['from'], m['to'], m['template'], m['coordinate_id'])
file_path = op.join(self.metadata_dir, u[:2], u[2:4], u[4:], 'swissmodel', '{}.pdb'.format(original_filename))
if op.exists(file_path):
new_filename = '{}_{}_{}_{}.pdb'.format(u, m['from'], m['to'], m['template'][:4])
shutil.copy(file_path, op.join(outdir, new_filename))
uniprot_to_swissmodel[u].append(new_filename)
else:
log.warning('{}: no file {} found for model'.format(u, file_path))
return uniprot_to_swissmodel
|
Organize and rename SWISS-MODEL models to a single folder with a name containing template information.
Args:
outdir (str): New directory to copy renamed models to
force_rerun (bool): If models should be copied again even if they already exist
Returns:
dict: Dictionary of lists, UniProt IDs as the keys and new file paths as the values
|
codesearchnet
|
def alltoall(self, x, mesh_axis, split_axis, concat_axis):
return self._collective_with_groups(
x, [mesh_axis],
functools.partial(
alltoall_ring, split_axis=split_axis, concat_axis=concat_axis))
|
Grouped alltoall.
Args:
x: a LaidOutTensor
mesh_axis: an integer the mesh axis along which to group
split_axis: an integer (the Tensor axis along which to split)
concat_axis: an integer (the Tensor axis along which to concatenate)
Returns:
a LaidOutTensor
|
juraj-google-style
|
def model_fn_sharded(self, sharded_features):
dp = self._data_parallelism
datashard_to_features = self._to_features_per_datashard(sharded_features)
if self.use_body_sharded():
if self.hparams.scheduled_sampling_prob > 0.0:
raise NotImplementedError(
"Scheduled sampling for non-sharded body only.")
transformed_features = dp(self.bottom, datashard_to_features)
body_out = self.body_sharded(
self._to_single_features_dict(transformed_features))
body_out, losses = self._normalize_body_output(body_out)
if "training" in losses:
log_info("Skipping T2TModel top and loss because training loss "
"returned from body")
sharded_logits = body_out
else:
if isinstance(body_out, dict):
sharded_logits = collections.OrderedDict()
sharded_losses = collections.OrderedDict()
for k, v in sorted(six.iteritems(body_out)):
sharded_logits[k] = dp(self.top, v, datashard_to_features)
sharded_losses[k] = dp(self.loss, sharded_logits[k],
datashard_to_features)
training_loss_dict = average_sharded_losses([({
"training": l
} for l in loss) for loss in sharded_losses.values()])
losses.update(training_loss_dict)
else:
sharded_logits = dp(self.top, body_out, datashard_to_features)
sharded_losses = dp(self.loss, sharded_logits, datashard_to_features)
if isinstance(sharded_losses, tuple):
nums, dens = sharded_losses
sharded_losses = zip(nums, dens)
training_loss_dict = average_sharded_losses([{
"training": loss
} for loss in sharded_losses])
losses.update(training_loss_dict)
else:
sharded_logits, sharded_losses = dp(self.model_fn, datashard_to_features)
sharded_logits, sharded_losses = dp(
self.maybe_scheduled_sampling,
datashard_to_features, sharded_logits, sharded_losses)
if isinstance(sharded_logits[0], dict):
temp_dict = {k: [] for k, _ in six.iteritems(sharded_logits[0])}
for k, _ in six.iteritems(sharded_logits[0]):
for l in sharded_logits:
temp_dict[k].append(l[k])
sharded_logits = temp_dict
losses = average_sharded_losses(sharded_losses)
return sharded_logits, losses
|
Estimator model_fn sharded along batch dimension.
Args:
sharded_features: {str: [Tensor]}. Features sharded along batch dimension.
Each list is the same length (== number of shards).
Returns:
sharded_logits: [Tensor]. Logits for each shard of examples.
losses: {str: 0-D Tensor}. Loss averaged across shards.
|
juraj-google-style
|
def take_bug_report(self, test_name, begin_time, timeout=300, destination=None):
new_br = True
try:
stdout = self.adb.shell('bugreportz -v').decode('utf-8')
if ('not found' in stdout):
new_br = False
except adb.AdbError:
new_br = False
if destination:
br_path = utils.abs_path(destination)
else:
br_path = os.path.join(self.log_path, 'BugReports')
utils.create_dir(br_path)
base_name = (',%s,%s.txt' % (begin_time, self._normalized_serial))
if new_br:
base_name = base_name.replace('.txt', '.zip')
test_name_len = (utils.MAX_FILENAME_LEN - len(base_name))
out_name = (test_name[:test_name_len] + base_name)
full_out_path = os.path.join(br_path, out_name.replace(' ', '\\ '))
self.wait_for_boot_completion()
self.log.info('Taking bugreport for %s.', test_name)
if new_br:
out = self.adb.shell('bugreportz', timeout=timeout).decode('utf-8')
if (not out.startswith('OK')):
raise DeviceError(self, ('Failed to take bugreport: %s' % out))
br_out_path = out.split(':')[1].strip()
self.adb.pull([br_out_path, full_out_path])
else:
self.adb.bugreport((' > "%s"' % full_out_path), shell=True, timeout=timeout)
self.log.info('Bugreport for %s taken at %s.', test_name, full_out_path)
|
Takes a bug report on the device and stores it in a file.
Args:
test_name: Name of the test method that triggered this bug report.
begin_time: Timestamp of when the test started.
timeout: float, the number of seconds to wait for bugreport to
complete, default is 5min.
destination: string, path to the directory where the bugreport
should be saved.
|
codesearchnet
|
def get_version():
if PackageHelper.__version:
return PackageHelper.__version
PackageHelper.__version = 'Unknown'
file = os.path.realpath(__file__)
folder = os.path.dirname(file)
try:
semver = open((folder + '/../../.semver'), 'r')
PackageHelper.__version = semver.read().rstrip()
semver.close()
return PackageHelper.__version
except:
pass
try:
distribution = pkg_resources.get_distribution(PackageHelper.get_alias())
if distribution.version:
PackageHelper.__version = distribution.version
return PackageHelper.__version
except:
pass
return PackageHelper.__version
|
Get the version number of this package.
Returns:
str: The version number (marjor.minor.patch).
Note:
When this package is installed, the version number will be available through the
package resource details. Otherwise this method will look for a ``.semver`` file.
Note:
In rare cases corrupt installs can cause the version number to be unknown. In this case
the version number will be set to the string "Unknown".
|
codesearchnet
|
def add_or_update(data, item, value):
data = data.splitlines()
data = map(lambda x: bytearray(x), data)
conf = filter(lambda x: x.strip() and x.strip().split()[0] == item, data)
if conf:
conf[0][:] = conf[0].strip().split()[0] + " " + value
else:
comments = filter(
lambda x: x.strip().startswith("
and len(x.split("
and x.split("
and x.split("
data
)
if comments:
comments[0][:] = comments[0].split("
else:
data.append(item + " " + value + "\n")
return "\n".join(map(lambda x: str(x), data))
|
Add or update value in configuration file format used by proftpd.
Args:
data (str): Configuration file as string.
item (str): What option will be added/updated.
value (str): Value of option.
Returns:
str: updated configuration
|
juraj-google-style
|
def download(self):
self.downloaded_paths = list()
for path in self.paths_for_download:
downloaded_path = list()
utils.mkdir_p(os.path.abspath(self.directory))
sra_run = path.split('/')[(- 1)]
logger.info(('Analysing %s' % sra_run))
url = type(self).FTP_ADDRESS_TPL.format(range_subdir=sra_run[:6], file_dir=sra_run)
logger.debug('URL: %s', url)
filepath = os.path.abspath(os.path.join(self.directory, ('%s.sra' % sra_run)))
utils.download_from_url(url, filepath, aspera=self.aspera, silent=self.silent, force=self.force)
if (self.filetype in ('fasta', 'fastq')):
if (utils.which('fastq-dump') is None):
logger.error('fastq-dump command not found')
ftype = ''
if (self.filetype == 'fasta'):
ftype = ' --fasta '
cmd = 'fastq-dump'
if (utils.which('parallel-fastq-dump') is None):
cmd += ' %s --outdir %s %s'
else:
logger.debug('Using parallel fastq-dump')
cmd = ' parallel-fastq-dump --threads %s'
cmd = (cmd % self.threads)
cmd += ' %s --outdir %s -s %s'
cmd = (cmd % (ftype, self.directory, filepath))
for (fqoption, fqvalue) in iteritems(self.fastq_dump_options):
if fqvalue:
cmd += (' --%s %s' % (fqoption, fqvalue))
elif (fqvalue is None):
cmd += (' --%s' % fqoption)
logger.debug(cmd)
process = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
logger.info(('Converting to %s/%s*.%s.gz\n' % (self.directory, sra_run, self.filetype)))
(pout, perr) = process.communicate()
downloaded_path = glob.glob(os.path.join(self.directory, ('%s*.%s.gz' % (sra_run, self.filetype))))
elif (self.filetype == 'sra'):
downloaded_path = glob.glob(os.path.join(self.directory, ('%s*.%s' % (sra_run, self.filetype))))
else:
downloaded_path = glob.glob(os.path.join(self.directory, ('%s*' % sra_run)))
logger.error(('Filetype %s not supported.' % self.filetype))
if ((not self.keep_sra) and (self.filetype != 'sra')):
os.unlink(filepath)
self.downloaded_paths += downloaded_path
return self.downloaded_paths
|
Download SRA files.
Returns:
:obj:`list` of :obj:`str`: List of downloaded files.
|
codesearchnet
|
def run(self, **kwargs):
super().run(**kwargs)
scheduler = self.scheduler_plugins[self.active_scheduler]()
if (not kwargs['no_daemon']):
self.log.info('Starting {} worker with {} threads checking for new messages every {} seconds'.format(scheduler.name, kwargs['threads'], kwargs['delay']))
for i in range(kwargs['threads']):
thd = threading.Thread(target=self.execute_worker_thread, args=(scheduler.execute_worker, kwargs['delay']))
thd.start()
else:
self.log.info('Starting {} worker for a single non-daemon execution'.format(scheduler.name))
scheduler.execute_worker()
|
Execute the worker thread.
Returns:
`None`
|
codesearchnet
|
def load_state_dict(module, state_dict, strict=False, logger=None):
unexpected_keys = []
own_state = module.state_dict()
for (name, param) in state_dict.items():
if (name not in own_state):
unexpected_keys.append(name)
continue
if isinstance(param, torch.nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
raise RuntimeError('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}.'.format(name, own_state[name].size(), param.size()))
missing_keys = (set(own_state.keys()) - set(state_dict.keys()))
err_msg = []
if unexpected_keys:
err_msg.append('unexpected key in source state_dict: {}\n'.format(', '.join(unexpected_keys)))
if missing_keys:
err_msg.append('missing keys in source state_dict: {}\n'.format(', '.join(missing_keys)))
err_msg = '\n'.join(err_msg)
if err_msg:
if strict:
raise RuntimeError(err_msg)
elif (logger is not None):
logger.warn(err_msg)
else:
print(err_msg)
|
Load state_dict to a module.
This method is modified from :meth:`torch.nn.Module.load_state_dict`.
Default value for ``strict`` is set to ``False`` and the message for
param mismatch will be shown even if strict is False.
Args:
module (Module): Module that receives the state_dict.
state_dict (OrderedDict): Weights.
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
logger (:obj:`logging.Logger`, optional): Logger to log the error
message. If not specified, print function will be used.
|
codesearchnet
|
def convert_variables_to_tensors(values):
def _convert_resource_variable_to_tensor(x):
if _pywrap_utils.IsResourceVariable(x):
return ops.convert_to_tensor(x)
elif isinstance(x, composite_tensor.CompositeTensor):
return composite_tensor.convert_variables_to_tensors(x)
else:
return x
return nest.map_structure(_convert_resource_variable_to_tensor, values)
|
Converts `ResourceVariable`s in `values` to `Tensor`s.
If an object is a `CompositeTensor` and overrides its
`_convert_variables_to_tensors` method, its `ResourceVariable` components
will also be converted to `Tensor`s. Objects other than `ResourceVariable`s
in `values` will be returned unchanged.
Args:
values: A nested structure of `ResourceVariable`s, or any other objects.
Returns:
A new structure with `ResourceVariable`s in `values` converted to `Tensor`s.
|
github-repos
|
def exists(self, path):
try:
return self._blobstorageIO().exists(path)
except Exception as e:
raise BeamIOError('Exists operation failed', {path: e})
|
Check if the provided path exists on the FileSystem.
Args:
path: string path that needs to be checked.
Returns: boolean flag indicating if path exists
|
github-repos
|
def verify_password(self, password, password_hash):
if isinstance(password_hash, self.user_manager.db_manager.UserClass):
print('Deprecation warning: verify_password(password, user) has been changed to: verify_password(password, password_hash). The user param will be deprecated. Please change your call with verify_password(password, user) into a call with verify_password(password, user.password) as soon as possible.')
password_hash = password_hash.password
return self.password_crypt_context.verify(password, password_hash)
|
Verify plaintext ``password`` against ``hashed password``.
Args:
password(str): Plaintext password that the user types in.
password_hash(str): Password hash generated by a previous call to ``hash_password()``.
Returns:
| True when ``password`` matches ``password_hash``.
| False otherwise.
Example:
::
if verify_password('mypassword', user.password):
login_user(user)
|
codesearchnet
|
def stepEnabled(self):
if ((self.value() > self.minimum()) and (self.value() < self.maximum())):
return (self.StepUpEnabled | self.StepDownEnabled)
elif (self.value() <= self.minimum()):
return self.StepUpEnabled
elif (self.value() >= self.maximum()):
return self.StepDownEnabled
|
Virtual function that determines whether stepping up and down is legal at any given time.
Returns:
ored combination of StepUpEnabled | StepDownEnabled
|
codesearchnet
|
def _add_new_ide_controller_helper(ide_controller_label,
controller_key,
bus_number):
if controller_key is None:
controller_key = randint(-200, 250)
ide_spec = vim.vm.device.VirtualDeviceSpec()
ide_spec.device = vim.vm.device.VirtualIDEController()
ide_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
ide_spec.device.key = controller_key
ide_spec.device.busNumber = bus_number
ide_spec.device.deviceInfo = vim.Description()
ide_spec.device.deviceInfo.label = ide_controller_label
ide_spec.device.deviceInfo.summary = ide_controller_label
return ide_spec
|
Helper function for adding new IDE controllers
.. versionadded:: 2016.3.0
Args:
ide_controller_label: label of the IDE controller
controller_key: if not None, the controller key to use; otherwise it is randomly generated
bus_number: bus number
Returns: created device spec for an IDE controller
|
juraj-google-style
|
def get_data(__pkg: str, __name: str) -> str:
for dname in get_data_dirs(__pkg):
test_path = path.join(dname, __name)
if path.exists(test_path):
return test_path
raise FileNotFoundError('No data file {!r} for {!r}'.format(__name, __pkg))
|
Return top-most data file for given package.
Args:
__pkg: Package name
__name: Data file name
|
codesearchnet
|
def encoder_vgg(x, enc_final_size, reuse=False, scope_prefix='', hparams=None,
is_training=True):
with tf.variable_scope(scope_prefix + 'encoder', reuse=reuse):
x *= 256
x = x - COLOR_NORMALIZATION_VECTOR
with arg_scope(vgg.vgg_arg_scope()):
x = tf.pad(x, [[0, 0], [0, VGG_IMAGE_SIZE - IMG_WIDTH],
[0, VGG_IMAGE_SIZE - IMG_HEIGHT], [0, 0]])
_, end_points = vgg.vgg_16(
x,
num_classes=enc_final_size,
is_training=is_training)
pool5_key = [key for key in end_points.keys() if 'pool5' in key]
assert len(pool5_key) == 1
enc = end_points[pool5_key[0]]
enc = tf.slice(enc, [0, 0, 0, 0], [-1, 2, 2, -1])
enc_shape = enc.get_shape().as_list()
enc_shape[0] = -1
enc_size = enc_shape[1] * enc_shape[2] * enc_shape[3]
enc_flat = tf.reshape(enc, (-1, enc_size))
enc_flat = tf.nn.dropout(enc_flat, hparams.enc_keep_prob)
enc_flat = tf.layers.dense(
enc_flat,
enc_final_size,
kernel_initializer=tf.truncated_normal_initializer(stddev=1e-4,))
if hparams.enc_pred_use_l2norm:
enc_flat = tf.nn.l2_normalize(enc_flat, 1)
return enc_flat
|
VGG network to use as encoder without the top few layers.
Can be pretrained.
Args:
x: The image to encode. In the range 0 to 1.
enc_final_size: The desired size of the encoding.
reuse: To reuse in variable scope or not.
scope_prefix: The prefix before the scope name.
hparams: The python hparams.
is_training: boolean value indicating if training is happening.
Returns:
The generated image.
|
juraj-google-style
|
def get_timestamped_export_dir(export_dir_base):
attempts = 0
while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS:
timestamp = int(time.time())
result_dir = file_io.join(compat.as_bytes(export_dir_base), compat.as_bytes(str(timestamp)))
if not gfile.Exists(result_dir):
return result_dir
time.sleep(1)
attempts += 1
logging.warn('Directory {} already exists; retrying (attempt {}/{})'.format(compat.as_str(result_dir), attempts, MAX_DIRECTORY_CREATION_ATTEMPTS))
raise RuntimeError(f'Failed to obtain a unique export directory name after {MAX_DIRECTORY_CREATION_ATTEMPTS} attempts.')
|
Builds a path to a new subdirectory within the base directory.
Each export is written into a new subdirectory named using the
current time. This guarantees monotonically increasing version
numbers even across multiple runs of the pipeline.
The timestamp used is the number of seconds since epoch UTC.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
Returns:
The full path of the new subdirectory (which is not actually created yet).
Raises:
RuntimeError: if repeated attempts fail to obtain a unique timestamped
directory name.
|
github-repos
|
def get_all_supported_aspect_ratios(min_image_tiles: int, max_image_tiles: int) -> List[Tuple[int, int]]:
aspect_ratios = []
for width in range(1, max_image_tiles + 1):
for height in range(1, max_image_tiles + 1):
if width * height <= max_image_tiles and width * height >= min_image_tiles:
aspect_ratios.append((width, height))
aspect_ratios = sorted(aspect_ratios, key=lambda x: x[0] * x[1])
return aspect_ratios
|
Computes all allowed aspect ratios for a given minimum and maximum number of input tiles.
This function calculates all possible arrangements of tiles that can be formed
within the constraint of the minimum and maximum number of tiles. Each arrangement is
represented by its aspect ratio (width/height) and the corresponding tile configuration.
Args:
min_image_tiles (`int`):
The minimum number of tiles allowed.
max_image_tiles (`int`):
The maximum number of tiles allowed.
Returns:
`List[Tuple[int, int]]`: A list of tuples, each tuple representing a valid (width, height)
configuration in terms of number of tiles.
Example:
>>> get_all_supported_aspect_ratios(1, 4)
[(1, 1), (1, 2), (2, 1), (1, 3), (3, 1), (1, 4), (2, 2), (4, 1)]
|
github-repos
|
def from_string(cls, key, password='notasecret'):
key = _helpers._from_bytes(key)
(marker_id, key_bytes) = pem.readPemBlocksFromFile(six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)
if (marker_id == 0):
pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes, format='DER')
elif (marker_id == 1):
(key_info, remaining) = decoder.decode(key_bytes, asn1Spec=_PKCS8_SPEC)
if (remaining != b''):
raise ValueError('Unused bytes', remaining)
pkey_info = key_info.getComponentByName('privateKey')
pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(), format='DER')
else:
raise ValueError('No key could be detected.')
return cls(pkey)
|
Construct an RsaSigner instance from a string.
Args:
key: string, private key in PEM format.
password: string, password for private key file. Unused for PEM
files.
Returns:
RsaSigner instance.
Raises:
ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in
PEM format.
|
codesearchnet
|
def serialize_skycoord(o):
representation = o.representation.get_name()
frame = o.frame.name
r = o.represent_as('spherical')
d = dict(
_type='astropy.coordinates.SkyCoord',
frame=frame,
representation=representation,
lon=r.lon,
lat=r.lat)
if len(o.distance.unit.to_string()):
d['distance'] = r.distance
return d
|
Serializes an :obj:`astropy.coordinates.SkyCoord`, for JSONification.
Args:
o (:obj:`astropy.coordinates.SkyCoord`): :obj:`SkyCoord` to be serialized.
Returns:
A dictionary that can be passed to :obj:`json.dumps`.
|
juraj-google-style
|
def get_user_information(self):
url = 'https:
headers = self.__gen_headers()
headers['Content-Type'] = 'application/json'
r = requests.get(url, headers=headers)
return r.json()
|
Gets the current user information, including sensor ID
Args:
None
Returns:
dictionary object containing information about the current user
|
codesearchnet
|
def __init__(self, topic_path, add_uuids=None, expansion_service=None):
if add_uuids is None:
add_uuids = False
if expansion_service is None:
expansion_service = _default_io_expansion_service()
super().__init__('beam:transform:org.apache.beam:pubsublite_write:v1', NamedTupleBasedPayloadBuilder(_WriteSchema(topic_path=topic_path, add_uuids=add_uuids)), expansion_service)
|
Initializes a write operation to Pub/Sub Lite, writing the serialized bytes
of PubSubMessage protos.
Args:
topic_path: A Pub/Sub Lite Topic path.
add_uuids: Whether to add uuids to the 'x-goog-pubsublite-dataflow-uuid'
uuid attribute.
|
github-repos
|
def load_op_from_signature_def(signature_def, key, import_scope=None):
tensor_info = signature_def.outputs[key]
try:
return utils.get_element_from_tensor_info(tensor_info, import_scope=import_scope)
except KeyError:
raise errors.NotFoundError(None, None, f'The key "{key}" could not be found in the graph. Please make sure the SavedModel was created by the internal _SavedModelBuilder. If you are using the public API, please make sure the SignatureDef in the SavedModel does not contain the key "{key}".')
|
Load an Op from a SignatureDef created by op_signature_def().
Args:
signature_def: a SignatureDef proto
key: string key to op in the SignatureDef outputs.
import_scope: Scope used to import the op
Returns:
Op (or possibly Tensor) in the graph with the same name as saved in the
SignatureDef.
Raises:
NotFoundError: If the op could not be found in the graph.
|
github-repos
|
def matchall(text, patterns):
ret = []
for pattern in patterns:
match = re.findall(pattern, text)
ret += match
return ret
|
Scans through a string for substrings matched some patterns.
Args:
text: A string to be scanned.
patterns: a list of regex pattern.
Returns:
a list if matched. empty if not.
|
juraj-google-style
|
def _WsdlHasMethod(self, method_name):
try:
self._method_bindings.get(method_name)
return True
except ValueError:
return False
|
Determine if a method is in the wsdl.
Args:
method_name: The name of the method.
Returns:
True if the method is in the wsdl, otherwise False.
|
codesearchnet
|
def learn_one(self, x: beam.Row) -> None:
raise NotImplementedError
|
Trains the detector on a single data instance.
Args:
x: A `beam.Row` representing the data instance.
|
github-repos
|
def Where(self, field):
where_builder = _WhereBuilder(self, field)
self.where_builders.append(where_builder)
return where_builder
|
Creates a WHERE builder using a provided field.
Args:
field: the field to be added as an argument in the WHERE clause.
Returns:
The created WHERE builder.
|
juraj-google-style
|
def _Operations(self, rule, line):
if (rule.record_op == 'Record'):
self._AppendRecord()
elif (rule.record_op == 'Clear'):
self._ClearRecord()
elif (rule.record_op == 'Clearall'):
self._ClearAllRecord()
if (rule.line_op == 'Error'):
if rule.new_state:
raise TextFSMError(('Error: %s. Rule Line: %s. Input Line: %s.' % (rule.new_state, rule.line_num, line)))
raise TextFSMError(('State Error raised. Rule Line: %s. Input Line: %s' % (rule.line_num, line)))
elif (rule.line_op == 'Continue'):
return False
return True
|
Operators on the data record.
Operators come in two parts and are a '.' separated pair:
Operators that effect the input line or the current state (line_op).
'Next' Get next input line and restart parsing (default).
'Continue' Keep current input line and continue resume parsing.
'Error' Unrecoverable input discard result and raise Error.
Operators that affect the record being built for output (record_op).
'NoRecord' Does nothing (default)
'Record' Adds the current record to the result.
'Clear' Clears non-Filldown data from the record.
'Clearall' Clears all data from the record.
Args:
rule: FSMRule object.
line: A string, the current input line.
Returns:
True if state machine should restart state with new line.
Raises:
TextFSMError: If Error state is encountered.
|
codesearchnet
|
def convertData(self, contents, def_buf, kwh_scale=ScaleKWH.EmptyScale):
log_str = ''
count = 0
if (kwh_scale == ScaleKWH.EmptyScale):
scale_offset = int(def_buf.keys().index(Field.kWh_Scale))
self.m_kwh_precision = kwh_scale = int(contents[scale_offset])
for fld in def_buf:
if def_buf[fld][MeterData.CalculatedFlag]:
count += 1
continue
if (len(contents) == 0):
count += 1
continue
try:
raw_data = contents[count]
fld_type = def_buf[fld][MeterData.TypeValue]
fld_scale = def_buf[fld][MeterData.ScaleValue]
if (fld_type == FieldType.Float):
float_data = float(str(raw_data))
divisor = 1
if (fld_scale == ScaleType.KWH):
divisor = 1
if (kwh_scale == ScaleKWH.Scale10):
divisor = 10
elif (kwh_scale == ScaleKWH.Scale100):
divisor = 100
elif ((kwh_scale != ScaleKWH.NoScale) and (kwh_scale != ScaleKWH.EmptyScale)):
ekm_log('Unrecognized kwh scale.')
elif (fld_scale == ScaleType.Div10):
divisor = 10
elif (fld_scale == ScaleType.Div100):
divisor = 100
elif (fld_scale != ScaleType.No):
ekm_log('Unrecognized float scale.')
float_data /= divisor
float_data_str = str(float_data)
def_buf[fld][MeterData.StringValue] = float_data_str
def_buf[fld][MeterData.NativeValue] = float_data
elif (fld_type == FieldType.Hex):
hex_data = raw_data.encode('hex')
def_buf[fld][MeterData.StringValue] = hex_data
def_buf[fld][MeterData.NativeValue] = hex_data
elif (fld_type == FieldType.Int):
integer_data = int(raw_data)
integer_data_str = str(integer_data)
if (len(integer_data_str) == 0):
integer_data_str = str(0)
def_buf[fld][MeterData.StringValue] = integer_data_str
def_buf[fld][MeterData.NativeValue] = integer_data
elif (fld_type == FieldType.String):
string_data = str(raw_data)
def_buf[fld][MeterData.StringValue] = string_data
def_buf[fld][MeterData.NativeValue] = string_data
elif (fld_type == FieldType.PowerFactor):
def_buf[fld][MeterData.StringValue] = str(raw_data)
def_buf[fld][MeterData.NativeValue] = str(raw_data)
else:
ekm_log('Unrecognized field type')
log_str = (((((log_str + '"') + fld) + '": "') + def_buf[fld][MeterData.StringValue]) + '"\n')
except:
ekm_log(('Exception on Field:' + str(fld)))
ekm_log(traceback.format_exc(sys.exc_info()))
self.writeCmdMsg(('Exception on Field:' + str(fld)))
count += 1
return True
|
Move data from raw tuple into scaled and conveted values.
Args:
contents (tuple): Breakout of passed block from unpackStruct().
def_buf (): Read buffer destination.
kwh_scale (int): :class:`~ekmmeters.ScaleKWH` as int, from Field.kWhScale`
Returns:
bool: True on completion.
|
codesearchnet
|
def _unflatten_beam_dim(tensor, batch_size, beam_size):
shape = _shape_list(tensor)
new_shape = [batch_size, beam_size] + shape[1:]
return tf.reshape(tensor, new_shape)
|
Reshapes first dimension back to [batch_size, beam_size].
Args:
tensor: Tensor to reshape of shape [batch_size*beam_size, ...]
batch_size: Tensor, original batch size.
beam_size: int, original beam size.
Returns:
Reshaped tensor of shape [batch_size, beam_size, ...]
|
juraj-google-style
|
def _open_rpc_interface(self, connection_id, callback):
try:
context = self.connections.get_context(connection_id)
except ArgumentError:
callback(connection_id, self.id, False, "Could not find connection information")
return
self.connections.begin_operation(connection_id, 'open_interface', callback, self.get_config('default_timeout'))
try:
service = context['services'][TileBusService]
header_characteristic = service[ReceiveHeaderChar]
payload_characteristic = service[ReceivePayloadChar]
except KeyError:
self.connections.finish_operation(connection_id, False, "Can't find characteristics to open rpc interface")
return
self.bable.set_notification(
enabled=True,
connection_handle=context['connection_handle'],
characteristic=header_characteristic,
on_notification_set=[self._on_interface_opened, context, payload_characteristic],
on_notification_received=self._on_notification_received,
sync=False
)
|
Enable RPC interface for this IOTile device
Args:
connection_id (int): The unique identifier for the connection
callback (callback): Callback to be called when this command finishes
callback(conn_id, adapter_id, success, failure_reason)
|
juraj-google-style
|
def get_shannon_radius(self, cn: str, spin: str='', radius_type: str='ionic'):
radii = self._el.data['Shannon radii']
if (len(radii[str(int(self._oxi_state))][cn]) == 1):
(k, data) = list(radii[str(int(self._oxi_state))][cn].items())[0]
if (k != spin):
warnings.warn(('Specified spin state of %s not consistent with database spin of %s. Only one spin data available, and that value is returned.' % (spin, k)))
else:
data = radii[str(int(self._oxi_state))][cn][spin]
return data[('%s_radius' % radius_type)]
|
Get the local environment specific ionic radius for species.
Args:
cn (str): Coordination using roman letters. Supported values are
I-IX, as well as IIIPY, IVPY and IVSQ.
spin (str): Some species have different radii for different
spins. You can get specific values using "High Spin" or
"Low Spin". Leave it as "" if not available. If only one spin
data is available, it is returned and this spin parameter is
ignored.
radius_type (str): Either "crystal" or "ionic" (default).
Returns:
Shannon radius for specie in the specified environment.
|
codesearchnet
|
def from_raw(self, file_names=None, **kwargs):
if file_names:
self.file_names = file_names
if not isinstance(file_names, (list, tuple)):
self.file_names = [file_names, ]
raw_file_loader = self.loader
set_number = 0
test = None
counter = 0
self.logger.debug("start iterating through file(s)")
for f in self.file_names:
self.logger.debug("loading raw file:")
self.logger.debug(f"{f}")
new_tests = raw_file_loader(f, **kwargs)
if new_tests:
if test is not None:
self.logger.debug("continuing reading files...")
_test = self._append(test[set_number], new_tests[set_number])
if not _test:
self.logger.warning(f"EMPTY TEST: {f}")
continue
test[set_number] = _test
self.logger.debug("added this test - started merging")
for j in range(len(new_tests[set_number].raw_data_files)):
raw_data_file = new_tests[set_number].raw_data_files[j]
file_size = new_tests[set_number].raw_data_files_length[j]
test[set_number].raw_data_files.append(raw_data_file)
test[set_number].raw_data_files_length.append(file_size)
counter += 1
if counter > 10:
self.logger.debug("ERROR? Too many files to merge")
raise ValueError("Too many files to merge - "
"could be a p2-p3 zip thing")
else:
self.logger.debug("getting data from first file")
if new_tests[set_number].no_data:
self.logger.debug("NO DATA")
else:
test = new_tests
else:
self.logger.debug("NOTHING LOADED")
self.logger.debug("finished loading the raw-files")
test_exists = False
if test:
if test[0].no_data:
self.logging.debug("the first dataset (or only dataset) loaded from the raw data file is empty")
else:
test_exists = True
if test_exists:
if not prms.Reader.sorted_data:
self.logger.debug("sorting data")
test[set_number] = self._sort_data(test[set_number])
self.datasets.append(test[set_number])
else:
self.logger.warning("No new datasets added!")
self.number_of_datasets = len(self.datasets)
self.status_datasets = self._validate_datasets()
self._invent_a_name()
return self
|
Load a raw data-file.
Args:
file_names (list of raw-file names): uses CellpyData.file_names if
None. If the list contains more than one file name, then the
runs will be merged together.
|
juraj-google-style
|
def DotProductAttention(query, key, value, mask, dropout, mode, rng):
depth = np.shape(query)[-1]
dots = np.matmul(query, np.swapaxes(key, -1, -2)) / np.sqrt(depth)
if mask is not None:
dots = np.where(mask, dots, -1e9)
dots = np.exp(dots - backend.logsumexp(dots, axis=-1, keepdims=True))
if dropout >= 1.0:
raise ValueError('Dropout rates must be lower than 1.')
if dropout is not None and dropout > 0.0 and mode == 'train':
keep = backend.random.bernoulli(rng, 1.0 - dropout, dots.shape)
dots = np.where(keep, dots / (1.0 - dropout), 0)
out = np.matmul(dots, value)
return out
|
Core dot product self-attention.
Args:
query: array of representations
key: array of representations
value: array of representations
mask: attention-mask, gates attention
dropout: float: dropout rate
mode: 'eval' or 'train': whether to use dropout
rng: JAX PRNGKey: subkey for disposable use
Returns:
Self attention for q, k, v arrays.
|
juraj-google-style
|
def autodiscover(self, autoregister=True):
logger.debug(('<%s> Sending autodiscover message to broadcast address' % str(self.cuuid)))
if (not self.listener.listening):
logger.warning('Neteria client is not listening. The client will not be able to process responses from the server')
message = serialize_data({'method': 'OHAI', 'version': self.version, 'cuuid': str(self.cuuid)}, self.compression, encryption=False)
if autoregister:
self.autoregistering = True
self.listener.send_datagram(message, ('<broadcast>', self.server_port), message_type='broadcast')
|
This function will send out an autodiscover broadcast to find a
Neteria server. Any servers that respond with an "OHAI CLIENT"
packet are servers that we can connect to. Servers that respond are
stored in the "discovered_servers" list.
Args:
autoregister (boolean): Whether or not to automatically register
with any responding servers. Defaults to True.
Returns:
None
Examples:
>>> myclient = neteria.client.NeteriaClient()
>>> myclient.listen()
>>> myclient.autodiscover()
>>> myclient.discovered_servers
{('192.168.0.20', 40080): u'1.0', ('192.168.0.82', 40080): '2.0'}
|
codesearchnet
|
def encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[List[List[int]]]=None, word_labels: Optional[List[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._encode_plus(text=text, boxes=boxes, text_pair=text_pair, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
|
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
`__call__` should be used instead.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
|
github-repos
|
def map_or(self, callback: Callable[([T], U)], default: A) -> Union[(U, A)]:
return (callback(self._val) if self._is_some else default)
|
Applies the ``callback`` to the contained value or returns ``default``.
Args:
callback: The callback to apply to the contained value.
default: The default value.
Returns:
The ``callback`` result if the contained value is ``Some``,
otherwise ``default``.
Notes:
If you wish to use the result of a function call as ``default``,
it is recommended to use :py:meth:`map_or_else` instead.
Examples:
>>> Some(0).map_or(lambda x: x + 1, 1000)
1
>>> NONE.map_or(lambda x: x * x, 1)
1
|
codesearchnet
|
def get_description(self, description_type=DescriptionTypeEnum.FULL):
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
|
Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
|
juraj-google-style
|
def get_dev_examples(self, data_dir, filename=None):
if data_dir is None:
data_dir = ''
if self.dev_file is None:
raise ValueError('SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor')
with open(os.path.join(data_dir, self.dev_file if filename is None else filename), 'r', encoding='utf-8') as reader:
input_data = json.load(reader)['data']
return self._create_examples(input_data, 'dev')
|
Returns the evaluation example from the data directory.
Args:
data_dir: Directory containing the data files used for training and evaluating.
filename: None by default, specify this if the evaluation file has a different name than the original one
which is `dev-v1.1.json` and `dev-v2.0.json` for squad versions 1.1 and 2.0 respectively.
|
github-repos
|
def payment(self, origin, destination, amount):
if (type(amount) != Decimal):
amount = Decimal(amount)
if (amount <= 0):
raise Exception('Amount must be a positive number')
all_addresses = []
accounts = self.listaccounts()
if (origin in accounts):
if (destination in accounts):
with self.openwallet():
result = self.move(origin, destination, amount)
return self.record_tx(origin, None, amount, result, destination)
for account in accounts:
addresses = self.getaddressesbyaccount(account)
if (destination in addresses):
with self.openwallet():
result = self.move(origin, account, amount)
return self.record_tx(origin, destination, amount, result, account)
else:
with self.openwallet():
txhash = self.sendfrom(origin, destination, amount)
return self.record_tx(origin, destination, amount, txhash)
|
Convenience method for sending Bitcoins.
Send coins from origin to destination. Calls record_tx to log the
transaction to database. Uses free, instant "move" transfers
if addresses are both local (in the same wallet), and standard
"sendfrom" transactions otherwise.
The sender is required to be specified by user_id (account label);
however, the recipient can be specified either by Bitcoin address
(anyone) or user_id (if the user is local).
Payment tries sending Bitcoins in this order:
1. "move" from account to account (local)
2. "move" from account to address (local)
3. "sendfrom" account to address (broadcast)
Args:
origin (str): user_id of the sender
destination (str): coin address or user_id of the recipient
amount (str, Decimal, number): amount to send
Returns:
bool: True if successful, False otherwise
|
codesearchnet
|
def get_log_id(cls, id):
conn = Qubole.agent()
r = conn.get_raw((cls.element_path(id) + '/logs'))
return r.text
|
Fetches log for the command represented by this id
Args:
`id`: command id
|
codesearchnet
|
def mark_complex(self, name, serializer, deserializer):
self._complex_properties[name] = (serializer, deserializer)
|
Mark a property as complex with serializer and deserializer functions.
Args:
name (str): The name of the complex property.
serializer (callable): The function to call to serialize the property's
value to something that can be saved in a json.
deserializer (callable): The function to call to unserialize the property
from a dict loaded by a json back to the original value.
|
codesearchnet
|
def get(self, url, params=None, **kwargs):
check_type(url, basestring, may_be_none=False)
check_type(params, dict)
erc = kwargs.pop('erc', EXPECTED_RESPONSE_CODE['GET'])
response = self.request('GET', url, erc, params=params, **kwargs)
return extract_and_parse_json(response)
|
Sends a GET request.
Args:
url(basestring): The URL of the API endpoint.
params(dict): The parameters for the HTTP GET request.
**kwargs:
erc(int): The expected (success) response code for the request.
others: Passed on to the requests package.
Raises:
ApiError: If anything other than the expected response code is
returned by the Webex Teams API endpoint.
|
codesearchnet
|
def __convertChannelMask(self, channelsArray):
maskSet = 0
for eachChannel in channelsArray:
mask = 1 << eachChannel
maskSet = (maskSet | mask)
return maskSet
|
convert channelsArray to bitmask format
Args:
channelsArray: channel array (i.e. [21, 22])
Returns:
bitmask format corresponding to a given channel array
|
juraj-google-style
|
def insert(self, meter_db):
if meter_db:
meter_db.dbInsert(self.m_req, self.m_raw_read_a, self.m_raw_read_b)
else:
ekm_log("Attempt to insert when no MeterDB assigned.")
pass
|
Insert to :class:`~ekmmeters.MeterDB` subclass.
Please note MeterDB subclassing is only for simplest-case.
Args:
meter_db (MeterDB): Instance of subclass of MeterDB.
|
juraj-google-style
|
def run(xml_report_dir, xml_report_filter='TEST-', html_report_path='.', generate_exec_time_graphs=True, html_report_dir='report.th', initial_java_heap_size=None, maximum_java_heap_size=None):
cmd = []
cmd.append('java')
if initial_java_heap_size:
cmd.append('-Xms{}'.format(initial_java_heap_size))
if maximum_java_heap_size:
cmd.append('-Xmx{}'.format(maximum_java_heap_size))
cmd.append('-Dunitth.xml.report.filter={}'.format(xml_report_filter))
cmd.append('-Dunitth.html.report.path={}'.format(html_report_path))
cmd.append('-Dunitth.generate.exectimegraphs={}'.format('{}'.format(generate_exec_time_graphs).lower()))
cmd.append('-Dunitth.report.dir={}'.format(html_report_dir))
cmd.append('-jar')
cmd.append('"{}"'.format(resource_filename('unitth', 'lib/unitth/unitth.jar')))
cmd.append(xml_report_dir)
subprocess.check_call(' '.join(cmd), shell=True)
|
Use UnitTH to generate a test history report
Args:
xml_report_dir (:obj:`str`): Parent directory of XML reports of individual builds to generate a history report of
xml_report_filter (:obj:`str`, optional): Starts-with filter for individual reports with `xml_report_dir` that should
be included in the history report. Set `xml_report_filter` to '' to include all files/subdirectories in the history
report.
html_report_path (:obj:`str`, optional): Directory of HTML reports of individual builds (relative to XML directories of
individual builds)
generate_exec_time_graphs (:obj:`bool`, optional): Whether execution time graphs shall be generated
html_report_dir (:obj:`str`, optional): directory to store generated HTML history report
initial_java_heap_size (:obj:`str`, optional): initial Java heap size
maximum_java_heap_size (:obj:`str`, optional): maximum Java heap size
|
codesearchnet
|
def FlashFromFile(self, partition, source_file, source_len=0,
info_cb=DEFAULT_MESSAGE_CALLBACK, progress_callback=None):
if source_len == 0:
source_len = os.stat(source_file).st_size
download_response = self.Download(
source_file, source_len=source_len, info_cb=info_cb,
progress_callback=progress_callback)
flash_response = self.Flash(partition, info_cb=info_cb)
return download_response + flash_response
|
Flashes a partition from the file on disk.
Args:
partition: Partition name to flash to.
source_file: Filename to download to the device.
source_len: Optional length of source_file, uses os.stat if not provided.
info_cb: See Download.
progress_callback: See Download.
Returns:
Download and flash responses, normally nothing.
|
juraj-google-style
|
def update_clinvar_submission_status(self, user_id, submission_id, status):
LOG.info('closing clinvar submission "%s"', submission_id)
if status == 'open':
self.clinvar_submission_collection.update_many(
{'user_id' : user_id},
{'$set' :
{'status' : 'closed', 'updated_at' : datetime.now()}
}
)
updated_submission = self.clinvar_submission_collection.find_one_and_update(
{'_id' : ObjectId(submission_id)},
{'$set' :
{'status' : status, 'updated_at' : datetime.now()}
},
return_document=pymongo.ReturnDocument.AFTER
)
return updated_submission
|
Set a clinvar submission ID to 'closed'
Args:
submission_id(str): the ID of the clinvar submission to close
Return
updated_submission(obj): the submission object with a 'closed' status
|
juraj-google-style
|
def sigmoid_cross_entropy_with_logits(logits, targets):
if (logits.shape != targets.shape):
raise ValueError(('logits shape must equal targets shapelogits=%s targets=%s' % (logits.to_string, targets.to_string)))
x = logits
z = targets
return ((mtf.relu(x) - (x * z)) + mtf.log((1 + mtf.exp((- mtf.abs(x))))))
|
Sigmoid cross-entropy loss.
Args:
logits: a mtf.Tensor
targets: a mtf.Tensor with the same shape as logits
Returns:
a mtf.Tensor whose shape is equal to logits.shape
Raises:
ValueError: if the shapes do not match.
|
codesearchnet
|
def shape_rb_data(raw_rb):
rb_data = []
rb_data.append(np.mean(raw_rb, 0))
rb_data.append(np.std(raw_rb, 0))
return rb_data
|
Take the raw rb data and convert it into averages and std dev
Args:
raw_rb (numpy.array): m x n x l list where m is the number of seeds, n
is the number of Clifford sequences and l is the number of qubits
Return:
numpy_array: 2 x n x l list where index 0 is the mean over seeds, 1 is
the std dev overseeds
|
juraj-google-style
|
def hide_stevedore_logs():
stevedore_logger = logging.getLogger('stevedore.extension')
stevedore_logger.propagate = False
stevedore_logger.setLevel(logging.ERROR)
stevedore_logger.addHandler(logging.NullHandler())
|
Hides the logs of stevedore, this function was
added in order to support older versions of stevedore
We are using the NullHandler in order to get rid from
'No handlers could be found for logger...' msg
Returns:
None
|
codesearchnet
|
def get_state_event(self, room_id, event_type):
return self._send("GET", "/rooms/{}/state/{}".format(quote(room_id), event_type))
|
Perform GET /rooms/$room_id/state/$event_type
Args:
room_id(str): The room ID.
event_type (str): The type of the event.
Raises:
MatrixRequestError(code=404) if the state event is not found.
|
juraj-google-style
|
def bind(self, **bindings):
new_context = dict(self._partial_context)
unknown_keys = []
for (k, v) in six.iteritems(bindings):
if (k not in self._unbound_vars):
unknown_keys.append(k)
new_context[self._unbound_vars[k]] = v
if unknown_keys:
raise ValueError(('The following keys are not associated with any unbound vars: %s, legal values are %s' % (unknown_keys, list(self._unbound_vars.keys()))))
return _DeferredLayer(self.bookkeeper, None, (), {}, scope=self._scope, defaults=self._defaults, pass_through=self, partial_context=new_context)
|
Creates a new template with the given unbound variables bound.
Args:
**bindings: Arguments for every deferred parameter.
Returns:
A new template with the given bindings.
Raises:
ValueError: If any of the bindings do not correspond to unbound variables.
|
codesearchnet
|
def _calc_digest(self, origin):
if hasattr(origin, 'read') and hasattr(origin, 'seek'):
pos = origin.tell()
digest = hashtools.calc_digest(origin, algorithm=self._conf['hash_alg'])
origin.seek(pos)
else:
digest = hashtools.calc_file_digest(origin, algorithm=self._conf['hash_alg'])
return digest
|
calculate digest for the given file or readable/seekable object
Args:
origin -- could be the path of a file or a readable/seekable object ( fileobject, stream, stringIO...)
Returns:
String rapresenting the digest for the given origin
|
juraj-google-style
|
def add_vcenter(self, **kwargs):
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter",
xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id = ET.SubElement(vcenter, "id")
id.text = kwargs.pop('id')
credentials = ET.SubElement(vcenter, "credentials")
url = ET.SubElement(credentials, "url")
url.text = kwargs.pop('url')
username = ET.SubElement(credentials, "username")
username.text = kwargs.pop('username')
password = ET.SubElement(credentials, "password")
password.text = kwargs.pop('password')
try:
self._callback(config)
return True
except Exception as error:
logging.error(error)
return False
|
Add vCenter on the switch
Args:
id(str) : Name of an established vCenter
url (bool) : vCenter URL
username (str): Username of the vCenter
password (str): Password of the vCenter
callback (function): A function executed upon completion of the
method.
Returns:
Return value of `callback`.
Raises:
None
|
juraj-google-style
|
def __init__(self, unresponsive_kill_period):
super(NannyThread, self).__init__(name="Nanny")
self.last_heart_beat_time = time.time()
self.unresponsive_kill_period = unresponsive_kill_period
self.running = True
self.daemon = True
self.proc = psutil.Process()
self.memory_quota = config.CONFIG["Client.rss_max_hard"] * 1024 * 1024
|
Constructor.
Args:
unresponsive_kill_period: The time in seconds which we wait for a
heartbeat.
|
juraj-google-style
|
def remove_forwarding_rules(self, forwarding_rules):
rules_dict = [rule.__dict__ for rule in forwarding_rules]
return self.get_data(('load_balancers/%s/forwarding_rules/' % self.id), type=DELETE, params={'forwarding_rules': rules_dict})
|
Removes existing forwarding rules from a LoadBalancer.
Args:
forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects
|
codesearchnet
|
def alloc_data(self, value):
if isinstance(value, six.binary_type):
return self._alloc_data(value)
elif isinstance(value, six.text_type):
return self._alloc_data((value.encode('utf-8') + b'\x00'))
else:
raise TypeError(('No idea how to encode %s' % repr(value)))
|
Allocate a piece of data that will be included in the shellcode body.
Arguments:
value(...): The value to add to the shellcode. Can be bytes or
string type.
Returns:
~pwnypack.types.Offset: The offset used to address the data.
|
codesearchnet
|
def get_capabilities(image=None):
if (salt.utils.versions.version_cmp(__grains__['osversion'], '10') == (- 1)):
raise NotImplementedError('`installed_capabilities` is not available on this version of Windows: {0}'.format(__grains__['osversion']))
cmd = ['DISM', '/English', ('/Image:{0}'.format(image) if image else '/Online'), '/Get-Capabilities']
out = __salt__['cmd.run'](cmd)
pattern = 'Capability Identity : (.*)\\r\\n'
capabilities = re.findall(pattern, out, re.MULTILINE)
capabilities.sort()
return capabilities
|
List all capabilities on the system
Args:
image (Optional[str]): The path to the root directory of an offline
Windows image. If `None` is passed, the running operating system is
targeted. Default is None.
Raises:
NotImplementedError: For all versions of Windows that are not Windows 10
and later. Server editions of Windows use ServerManager instead.
Returns:
list: A list of capabilities
CLI Example:
.. code-block:: bash
salt '*' dism.get_capabilities
|
codesearchnet
|
def console(discord_token, discord_client_id):
(state, response) = datatools.get_compare_version()
logger.info('Starting Modis in console')
logger.info(response)
import threading
import asyncio
logger.debug('Loading packages')
from modis.discord_modis import main as discord_modis_console
from modis.reddit_modis import main as reddit_modis_console
from modis.facebook_modis import main as facebook_modis_console
logger.debug('Initiating threads')
loop = asyncio.get_event_loop()
discord_thread = threading.Thread(target=discord_modis_console.start, args=[discord_token, discord_client_id, loop])
reddit_thread = threading.Thread(target=reddit_modis_console.start, args=[])
facebook_thread = threading.Thread(target=facebook_modis_console.start, args=[])
logger.debug('Starting threads')
discord_thread.start()
reddit_thread.start()
facebook_thread.start()
logger.debug('Root startup completed')
|
Start Modis in console format.
Args:
discord_token (str): The bot token for your Discord application
discord_client_id: The bot's client ID
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.