code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def period_from_list(period: Tuple[int, List[int]]) -> dateslib.PeriodTensor:
amount = period[1]
period_type = period_pb2.PeriodType.Name(period[0])
return dateslib.PeriodTensor(amount, dateslib.PeriodType[period_type]) | Utility to convert a list of periods to a PeriodTensor.
Args:
period: A tuple of an integer (which corresponds to the proto type of the
period (see `period_pb2.Period`)) and a list of period values.
Returns:
An instance of the `PeriodTensor`. | github-repos |
def window_unpartition(self, windows: torch.Tensor, window_size: int, padding_shape: Tuple[int, int], original_shape: Tuple[int, int]) -> torch.Tensor:
pad_height, pad_width = padding_shape
height, width = original_shape
batch_size = windows.shape[0]
hidden_states = windows.reshape(batch_size, pad_height
hidden_states = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().reshape(batch_size, pad_height, pad_width, -1)
hidden_states = hidden_states[:, :height, :width, :].contiguous()
return hidden_states | Args:
Window unpartition into original sequences and removing padding.
hidden_states (tensor):
input tokens with [batch_size * num_windows, window_size, window_size, channel].
window_size (int):
window size.
padding_shape (Tuple):
padded height and width (pad_height, pad_width).
original_shape (Tuple): original height and width (height, width) before padding.
Returns:
hidden_states: unpartitioned sequences with [batch_size, height, width, channel]. | github-repos |
def rsolve(A, b, epsilon=_epsilon):
A = asarray(A, float)
b = asarray(b, float)
if (A.shape[0] == 0):
return zeros((A.shape[1],))
if (A.shape[1] == 0):
return zeros((0,))
try:
x = lstsq(A, b, rcond=epsilon)
r = sum((x[3] > epsilon))
if (r == 0):
return zeros(A.shape[1])
return x[0]
except (ValueError, LinAlgError) as e:
warnings.warn(str(e), RuntimeWarning)
return solve(A, b) | r"""Robust solve for the linear equations.
Args:
A (array_like): Coefficient matrix.
b (array_like): Ordinate values.
Returns:
:class:`numpy.ndarray`: Solution ``x``. | codesearchnet |
def broadcast_tensors(*args: Sequence[tf.Tensor], name: Optional[str]=None) -> Tuple[tf.Tensor]:
name = 'broadcast_tensors' if name is None else name
with tf.name_scope(name):
output_shape = common_shape(*args)
return tuple((tf.broadcast_to(arg, output_shape) for arg in args)) | Broadcasts arguments to the common shape.
#### Example
```python
import tensorflow as tf
import tf_quant_finance as tff
args = [tf.ones([1, 2], dtype=tf.float64), tf.constant([[True], [False]])]
tff.utils.broadcast_tensor_shapes(*args)
# Expected: (array([[1., 1.], [1., 1.]]),
# array([[True, True], [False, False]])
```
Args:
*args: A sequence of `Tensor`s of compatible shapes and any `dtype`s.
name: Python string. The name to give to the ops created by this function.
Default value: `None` which maps to the default name
`broadcast_tensor_shapes`.
Returns:
A tuple of broadcasted `Tensor`s. Each `Tensor` has the same `dtype` as the
corresponding input `Tensor`.
Raises:
ValueError: If inputs are of incompatible shapes. | github-repos |
def _infer_binary_broadcast_shape(shape1, shape2, given_output_shape=None):
shape1 = convert_to_shape(shape1)
shape2 = convert_to_shape(shape2)
given_output_shape = convert_to_shape(given_output_shape)
if (given_output_shape is not None):
return given_output_shape
if is_subsequence(shape1.dims, shape2.dims):
return shape2
if is_subsequence(shape2.dims, shape1.dims):
return shape1
return Shape((shape1.dims + [d for d in shape2.dims if (d not in shape1.dims)])) | Infer shape of the output of a binary op with broadcasting.
If the output shape is not given with given_output_shape, then we check
to see if one of the shapes is a subsequence of the other one, and we
return the one that is the supersequence. Otherwise, we list the dimensions
of shape1, followed by all new dimensions in shape2.
Args:
shape1: a Shape
shape2: a Shape
given_output_shape: an optional Shape
Returns:
a Shape | codesearchnet |
def collapse(self, dimensions=None, function=None, spreadfn=None, **kwargs):
from .data import concat
if (not dimensions):
dimensions = self.kdims
if (not isinstance(dimensions, list)):
dimensions = [dimensions]
if ((self.ndims > 1) and (len(dimensions) != self.ndims)):
groups = self.groupby([dim for dim in self.kdims if (dim not in dimensions)])
elif all(((d in self.kdims) for d in dimensions)):
groups = HoloMap([(0, self)])
else:
raise KeyError('Supplied dimensions not found.')
collapsed = groups.clone(shared_data=False)
for (key, group) in groups.items():
if hasattr(group.last, 'interface'):
group_data = concat(group)
if function:
agg = group_data.aggregate(group.last.kdims, function, spreadfn, **kwargs)
group_data = group.type(agg)
else:
group_data = [el.data for el in group]
args = (group_data, function, group.last.kdims)
data = group.type.collapse_data(*args, **kwargs)
group_data = group.last.clone(data)
collapsed[key] = group_data
return (collapsed if (self.ndims - len(dimensions)) else collapsed.last) | Concatenates and aggregates along supplied dimensions
Useful to collapse stacks of objects into a single object,
e.g. to average a stack of Images or Curves.
Args:
dimensions: Dimension(s) to collapse
Defaults to all key dimensions
function: Aggregation function to apply, e.g. numpy.mean
spreadfn: Secondary reduction to compute value spread
Useful for computing a confidence interval, spread, or
standard deviation.
**kwargs: Keyword arguments passed to the aggregation function
Returns:
Returns the collapsed element or HoloMap of collapsed
elements | codesearchnet |
def url_to_text(self, url):
(path, headers) = urllib.request.urlretrieve(url)
return self.path_to_text(path) | Download PDF file and transform its document to string.
Args:
url: PDF url.
Returns:
string. | codesearchnet |
def _to_snake_case(string):
sub_string = '\\1_\\2'
string = REGEX_CAMEL_FIRST.sub(sub_string, string)
return REGEX_CAMEL_SECOND.sub(sub_string, string).lower() | Return a snake cased version of the input string.
Args:
string (str): A camel cased string.
Returns:
str: A snake cased string. | codesearchnet |
def GetHelp(self, prefix='', include_special_flags=True):
helplist = []
flags_by_module = self.FlagsByModuleDict()
if flags_by_module:
modules = sorted(flags_by_module)
main_module = sys.argv[0]
if main_module in modules:
modules.remove(main_module)
modules = [main_module] + modules
for module in modules:
self.__RenderOurModuleFlags(module, helplist)
if include_special_flags:
self.__RenderModuleFlags('gflags',
_helpers.SPECIAL_FLAGS.FlagDict().values(),
helplist)
else:
values = self.FlagDict().values()
if include_special_flags:
values.append(_helpers.SPECIAL_FLAGS.FlagDict().values())
self.__RenderFlagList(values, helplist, prefix)
return '\n'.join(helplist) | Generates a help string for all known flags.
Args:
prefix: str, per-line output prefix.
include_special_flags: bool, whether to include description of
_SPECIAL_FLAGS, i.e. --flagfile and --undefok.
Returns:
str, formatted help message. | juraj-google-style |
def close(self):
if (self._fd is None):
return
try:
os.close(self._fd)
except OSError as e:
raise LEDError(e.errno, ('Closing LED: ' + e.strerror))
self._fd = None | Close the sysfs LED.
Raises:
LEDError: if an I/O or OS error occurs. | codesearchnet |
def clear(self, url=None, xpath=None):
if url is not None:
query = self._query(url, xpath)
if query.count() > 0:
query.delete()
self.session.commit()
else:
raise KeyError("Cannot clear URL, not in cache: " + str(url) + " xpath:" + str(xpath))
else:
self.close()
if path.exists(self.db_path):
remove(self.db_path) | Clear cache
Args:
url (str): If given, clear specific item only. Otherwise remove the DB file.
xpath (str): xpath to search (may be ``None``) | juraj-google-style |
def set_atten(self, idx, value):
if not self.is_open:
raise attenuator.Error('Connection to attenuator at %s is not open!' % self._telnet_client.host)
if idx + 1 > self.path_count:
raise IndexError('Attenuator index out of range!', self.path_count, idx)
if value > self.max_atten:
raise ValueError('Attenuator value out of range!', self.max_atten, value)
self._telnet_client.cmd('CHAN:%s:SETATT:%s' % (idx + 1, value)) | Sets the attenuation value for a particular signal path.
Args:
idx: Zero-based index int which is the identifier for a particular
signal path in an instrument. For instruments that only has one
channel, this is ignored by the device.
value: A float that is the attenuation value to set.
Raises:
Error: The underlying telnet connection to the instrument is not
open.
IndexError: The index of the attenuator is greater than the maximum
index of the underlying instrument.
ValueError: The requested set value is greater than the maximum
attenuation value. | github-repos |
def expand(self, pcoll):
return pcoll | core.CombinePerKey(TopCombineFn(self._n, self._key, self._reverse)) | Expands the transform.
Raises TypeCheckError: If the output type of the input PCollection is not
compatible with tuple[A, B].
Args:
pcoll: PCollection to process
Returns:
the PCollection containing the result. | github-repos |
def put(value):
worker = global_worker
worker.check_connected()
with profiling.profile('ray.put'):
if (worker.mode == LOCAL_MODE):
return value
object_id = ray._raylet.compute_put_id(worker.current_task_id, worker.task_context.put_index)
worker.put_object(object_id, value)
worker.task_context.put_index += 1
return object_id | Store an object in the object store.
Args:
value: The Python object to be stored.
Returns:
The object ID assigned to this value. | codesearchnet |
def is_separating(direction, polygon1, polygon2):
norm_squared = ((direction[0] * direction[0]) + (direction[1] * direction[1]))
params = []
vertex = np.empty((2,), order='F')
for polygon in (polygon1, polygon2):
(_, polygon_size) = polygon.shape
min_param = np.inf
max_param = (- np.inf)
for index in six.moves.xrange(polygon_size):
vertex[:] = polygon[(:, index)]
param = (cross_product(direction, vertex) / norm_squared)
min_param = min(min_param, param)
max_param = max(max_param, param)
params.append((min_param, max_param))
return ((params[0][0] > params[1][1]) or (params[0][1] < params[1][0])) | Checks if a given ``direction`` is a separating line for two polygons.
.. note::
This is a helper for :func:`_polygon_collide`.
Args:
direction (numpy.ndarray): A 1D ``2``-array (``float64``) of a
potential separating line for the two polygons.
polygon1 (numpy.ndarray): A ``2 x N`` array (``float64``) of ordered
points in a polygon.
polygon2 (numpy.ndarray): A ``2 x N`` array (``float64``) of ordered
points in a polygon.
Returns:
bool: Flag indicating if ``direction`` is a separating line. | codesearchnet |
def __init__(self, lookup_list, do_not_log_prefix=None):
super().__init__()
self._lookup_list = lookup_list
self._do_not_log_prefix = do_not_log_prefix | Create this visitor.
Args:
lookup_list: An iterable of symbol tables (i.e., objects that have a
"lookup" function)
do_not_log_prefix: If given, don't log error messages for classes with
this prefix. | github-repos |
def average_name(self, var):
if var.ref() in self._averages:
return self._averages[var.ref()].name[:-len(':0')]
return ops.get_default_graph().unique_name(var.name[:-len(':0')] + '/' + self.name, mark_as_used=False) | [Meant for TF1] Returns name of `Variable` holding the average for `var`.
(Designed to work with legacy `tf.compat.v1.train.Saver`, it is sensitive to
specific variable names and not recommended for TF2)
The typical scenario for `ExponentialMovingAverage` is to compute moving
averages of variables during training, and restore the variables from the
computed moving averages during evaluations.
To restore variables, you have to know the name of the shadow variables.
That name and the original variable can then be passed to a `Saver()` object
to restore the variable from the moving average value with:
`saver = tf.compat.v1.train.Saver({ema.average_name(var): var})`
`average_name()` can be called whether or not `apply()` has been called.
Args:
var: A `Variable` object.
Returns:
A string: The name of the variable that will be used or was used
by the `ExponentialMovingAverage class` to hold the moving average of
`var`. | github-repos |
def create_dummy_files(backend_specific_objects: Optional[Dict[str, List[str]]]=None) -> Dict[str, str]:
if backend_specific_objects is None:
backend_specific_objects = read_init()
dummy_files = {}
for backend, objects in backend_specific_objects.items():
backend_name = '[' + ', '.join((f'"{b}"' for b in backend.split('_and_'))) + ']'
dummy_file = '
dummy_file += 'from ..utils import DummyObject, requires_backends\n\n'
dummy_file += '\n'.join([create_dummy_object(o, backend_name) for o in objects])
dummy_files[backend] = dummy_file
return dummy_files | Create the content of the dummy files.
Args:
backend_specific_objects (`Dict[str, List[str]]`, *optional*):
The mapping backend name to list of backend-specific objects. If not passed, will be obtained by calling
`read_init()`.
Returns:
`Dict[str, str]`: A dictionary mapping backend name to code of the corresponding backend file. | github-repos |
def _data_to_tensor(data_list, batch_size, name=None):
r
const_list = [tf.constant(data) for data in data_list]
queue_list = tf.train.slice_input_producer(const_list, capacity=batch_size*128, name=name)
return tf.train.shuffle_batch(queue_list, batch_size, capacity=batch_size*128,
min_after_dequeue=batch_size*32, name=name) | r"""Returns batch queues from the whole data.
Args:
data_list: A list of ndarrays. Every array must have the same size in the first dimension.
batch_size: An integer.
name: A name for the operations (optional).
Returns:
A list of tensors of `batch_size`. | juraj-google-style |
def pairwise(lst):
if not lst:
return
length = len(lst)
for i in range(length - 1):
yield lst[i], lst[i + 1]
yield lst[-1], None | yield item i and item i+1 in lst. e.g.
(lst[0], lst[1]), (lst[1], lst[2]), ..., (lst[-1], None)
Args:
lst (list): List to process
Returns:
list | juraj-google-style |
def _recur_flatten(key, x, out, sep='.'):
if x is None or isinstance(x, (str, int, float, bool)):
out[key] = x
return out
if isinstance(x, list):
for i, v in enumerate(x):
new_key = '{}{}{}'.format(key, sep, i)
out = _recur_flatten(new_key, v, out, sep)
if isinstance(x, dict):
for k, v in x.items():
new_key = '{}{}{}'.format(key, sep, k)
out = _recur_flatten(new_key, v, out, sep)
return out | Helper function to flatten_dict
Recursively flatten all nested values within a dict
Args:
key (str): parent key
x (object): object to flatten or add to out dict
out (dict): 1D output dict
sep (str): flattened key separator string
Returns:
dict: flattened 1D dict | juraj-google-style |
def disambiguate_pdf(self, file, language=None, entities=None):
body = {
"customisation": "generic"
}
if language:
body['language'] = {"lang": language}
if entities:
body['entities'] = entities
files = {
'query': str(body),
'file': (
file,
open(file, 'rb'),
'application/pdf',
{'Expires': '0'}
)
}
res, status = self.post(
self.disambiguate_service,
files=files,
headers={'Accept': 'application/json'},
)
if status != 200:
logger.debug('Disambiguation failed with error ' + str(status))
return self.decode(res), status | Call the disambiguation service in order to process a pdf file .
Args:
pdf (file): PDF file to be disambiguated.
language (str): language of text (if known)
Returns:
dict, int: API response and API status. | juraj-google-style |
def UploadSignedBinary(source_path,
binary_type,
platform,
upload_subdirectory=""):
file_size = os.path.getsize(source_path)
if file_size > _MAX_SIGNED_BINARY_BYTES:
raise BinaryTooLargeError(
"File [%s] is of size %d (bytes), which exceeds the allowed maximum "
"of %d bytes." % (source_path, file_size, _MAX_SIGNED_BINARY_BYTES))
context = ["Platform:%s" % platform.title(), "Client Context"]
signing_key = grr_config.CONFIG.Get(
"PrivateKeys.executable_signing_private_key", context=context)
root_api = maintenance_utils.InitGRRRootAPI()
binary_path = "/".join([
platform.lower(),
upload_subdirectory,
os.path.basename(source_path),
])
binary = root_api.GrrBinary(binary_type, binary_path)
with open(source_path, "rb") as fd:
binary.Upload(
fd,
sign_fn=binary.DefaultUploadSigner(
private_key=signing_key.GetRawPrivateKey()))
print("Uploaded %s to %s" % (binary_type, binary_path)) | Signs a binary and uploads it to the datastore.
Args:
source_path: Path to the binary to upload.
binary_type: Type of the binary, e.g python-hack or executable.
platform: Client platform where the binary is intended to be run.
upload_subdirectory: Path of a subdirectory to upload the binary to,
relative to the canonical path for binaries of the given type and
platform.
Raises:
BinaryTooLargeError: If the binary to upload is too large. | juraj-google-style |
def GetParserObjectByName(cls, parser_name):
parser_class = cls._parser_classes.get(parser_name, None)
if parser_class:
return parser_class()
return None | Retrieves a specific parser object by its name.
Args:
parser_name (str): name of the parser.
Returns:
BaseParser: parser object or None. | codesearchnet |
def update_device_info(self, device_id, display_name):
content = {'display_name': display_name}
return self._send('PUT', ('/devices/%s' % device_id), content=content) | Update the display name of a device.
Args:
device_id (str): The device ID of the device to update.
display_name (str): New display name for the device. | codesearchnet |
def _format_device(var):
if var.dtype.name.endswith("_ref"):
resource_var_annotation = "(legacy)"
else:
resource_var_annotation = "(resource)"
if var.device:
return "{} {}".format(var.device, resource_var_annotation)
else:
return resource_var_annotation | Returns the device with an annotation specifying `ResourceVariable`.
"legacy" means a normal tf.Variable while "resource" means a ResourceVariable.
For example:
`(legacy)`
`(resource)`
`/job:learner/task:0/device:CPU:* (legacy)`
`/job:learner/task:0/device:CPU:* (resource)`
Args:
var: The Tensorflow Variable to print. | juraj-google-style |
def getInfo(self, query=None, process=False, mode='phonefy', qURI=None):
results = []
data = ''
if (self._modeIsValid(mode=mode) and self._isValidQuery(query, mode=mode)):
if (mode in ['mailfy', 'phonefy', 'searchfy', 'usufy']):
try:
results = getattr(self, 'do_{}'.format(mode))(query)
except AttributeError as e:
raise NotImplementedModeError(str(self), mode)
return json.dumps(results) | Method that checks the presence of a given query and recovers the first list of complains.
Args:
-----
query: Query to verify.
process: Calling the processing function.
mode: Mode to be executed.
qURI: A query to be checked.
Return:
-------
Python structure for the html processed.
Raises:
-------
NoCredentialsException.
NotImplementedModeError.
BadImplementationError. | codesearchnet |
def join(self, other, **kwargs):
if not isinstance(other, list):
other = [other]
return self._join_list_of_managers(other, **kwargs) | Joins a list or two objects together.
Args:
other: The other object(s) to join on.
Returns:
Joined objects. | juraj-google-style |
def to_proto(self, export_scope=None):
if context.executing_eagerly():
raise RuntimeError('This operation is not supported when eager execution is enabled.')
if export_scope is None or self.handle.name.startswith(export_scope):
var_def = variable_pb2.VariableDef()
var_def.variable_name = ops.strip_name_scope(self.handle.name, export_scope)
if self._initial_value is not None:
var_def.initial_value_name = ops.strip_name_scope(self._initial_value.name, export_scope)
var_def.initializer_name = ops.strip_name_scope(self.initializer.name, export_scope)
if self._cached_value is not None:
var_def.snapshot_name = ops.strip_name_scope(self._cached_value.name, export_scope)
else:
var_def.snapshot_name = ops.strip_name_scope(self._graph_element.name, export_scope)
var_def.is_resource = True
var_def.trainable = self.trainable
var_def.synchronization = self.synchronization.value
var_def.aggregation = self.aggregation.value
if self._save_slice_info:
var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto(export_scope=export_scope))
return var_def
else:
return None | Converts a `ResourceVariable` to a `VariableDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Raises:
RuntimeError: If run in EAGER mode.
Returns:
A `VariableDef` protocol buffer, or `None` if the `Variable` is not
in the specified name scope. | github-repos |
def validate_artifact_url(valid_artifact_rules, valid_artifact_task_ids, url):
def callback(match):
path_info = match.groupdict()
if (('taskId' in path_info) and (path_info['taskId'] not in valid_artifact_task_ids)):
return
if ('filepath' not in path_info):
return
return path_info['filepath']
filepath = match_url_regex(valid_artifact_rules, url, callback)
if (filepath is None):
raise ScriptWorkerTaskException("Can't validate url {}".format(url), exit_code=STATUSES['malformed-payload'])
return unquote(filepath).lstrip('/') | Ensure a URL fits in given scheme, netloc, and path restrictions.
If we fail any checks, raise a ScriptWorkerTaskException with
``malformed-payload``.
Args:
valid_artifact_rules (tuple): the tests to run, with ``schemas``, ``netlocs``,
and ``path_regexes``.
valid_artifact_task_ids (list): the list of valid task IDs to download from.
url (str): the url of the artifact.
Returns:
str: the ``filepath`` of the path regex.
Raises:
ScriptWorkerTaskException: on failure to validate. | codesearchnet |
def use_value_spec(self, value_spec: Optional[pg_typing.List], allow_partial: bool=False) -> 'List':
if value_spec is None:
self._value_spec = None
self._accessor_writable = True
return self
if not isinstance(value_spec, pg_typing.List):
raise ValueError(self._error_message(f'Value spec for list must be a `pg.typing.List` object. Encountered: {value_spec!r}'))
if self._value_spec and self._value_spec != value_spec:
raise RuntimeError(self._error_message(f'List is already bound with a different value spec: {self._value_spec}. New value spec: {value_spec}.'))
self._allow_partial = allow_partial
if flags.is_type_check_enabled():
value_spec.apply(self, allow_partial=base.accepts_partial(self), child_transform=base.symbolic_transform_fn(self._allow_partial), root_path=self.sym_path)
else:
self._value_spec = value_spec
return self | Applies a ``pg.List`` as the value spec for current list.
Args:
value_spec: A List ValueSpec to apply to this List.
If current List is schema-less (whose immediate members are not
validated against schema), and `value_spec` is not None, the value spec
will be applied to the List.
Or else if current List is already symbolic (whose immediate members
are under the constraint of a List value spec), and `value_spec` is
None, current List will become schema-less. However, the schema
constraints for non-immediate members will remain.
allow_partial: Whether allow partial dict based on the schema. This flag
will override allow_partial flag in __init__ for spec-less List.
Returns:
Self.
Raises:
ValueError: schema validation failed due to value error.
RuntimeError: List is already bound with another value_spec.
TypeError: type errors during validation.
KeyError: key errors during validation. | github-repos |
def delete_association(self, target, api_type=None, api_sub_type=None, unique_id=None):
api_type = api_type or target.api_type
api_sub_type = api_sub_type or target.api_sub_type
unique_id = unique_id or target.unique_id
if not self.can_update():
self._tcex.handle_error(910, [self.type])
if not target.can_update():
self._tcex.handle_error(910, [target.type])
return self.tc_requests.delete_association(
self.api_type,
self.api_sub_type,
self.unique_id,
api_type,
api_sub_type,
unique_id,
owner=self.owner,
) | Deletes a association from a Indicator/Group/Victim
Args:
target:
api_type:
api_sub_type:
unique_id:
Returns: | juraj-google-style |
def add_comments(self, comments):
for comment in comments:
if comment not in self.comments and len(comment) > 0:
self.comments.append(comment)
if len(self.comments[0]) == 0:
self.comments.pop(0) | Add comments to the localization entry
Args:
comments (list of str): The comments to be added to the localization entry. | juraj-google-style |
def get_atoms(structure, **kwargs):
if (not structure.is_ordered):
raise ValueError('ASE Atoms only supports ordered structures')
symbols = [str(site.specie.symbol) for site in structure]
positions = [site.coords for site in structure]
cell = structure.lattice.matrix
return Atoms(symbols=symbols, positions=positions, pbc=True, cell=cell, **kwargs) | Returns ASE Atoms object from pymatgen structure.
Args:
structure: pymatgen.core.structure.Structure
**kwargs: other keyword args to pass into the ASE Atoms constructor
Returns:
ASE Atoms object | codesearchnet |
def to_file_async(self, destination, format='csv', csv_delimiter=',', csv_header=True):
self.to_file(destination, format=format, csv_delimiter=csv_delimiter, csv_header=csv_header) | Start saving the results to a local file in CSV format and return a Job for completion.
Args:
destination: path on the local filesystem for the saved results.
format: the format to use for the exported data; currently only 'csv' is supported.
csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ','
csv_header: for CSV exports, whether to include an initial header line. Default true.
Returns:
A Job for the async save operation.
Raises:
An Exception if the operation failed. | codesearchnet |
def outer_multiply(x, y):
x_shape = tf.shape(x)
padded_shape = tf.concat([x_shape, tf.ones(tf.rank(y), dtype=x_shape.dtype)], axis=0)
return tf.reshape(x, padded_shape) * y | Performs an outer multiplication of two tensors.
Given two `Tensor`s, `S` and `T` of shape `s` and `t` respectively, the outer
product `P` is a `Tensor` of shape `s + t` whose components are given by:
```none
P_{i1,...ik, j1, ... , jm} = S_{i1...ik} T_{j1, ... jm}
```
Args:
x: A `Tensor` of any shape and numeric dtype.
y: A `Tensor` of any shape and the same dtype as `x`.
Returns:
outer_product: A `Tensor` of shape Shape[x] + Shape[y] and the same dtype
as `x`. | github-repos |
def model_code_key_prefix(code_location_key_prefix, model_name, image):
training_job_name = sagemaker.utils.name_from_image(image)
return '/'.join(filter(None, [code_location_key_prefix, (model_name or training_job_name)])) | Returns the s3 key prefix for uploading code during model deployment
The location returned is a potential concatenation of 2 parts
1. code_location_key_prefix if it exists
2. model_name or a name derived from the image
Args:
code_location_key_prefix (str): the s3 key prefix from code_location
model_name (str): the name of the model
image (str): the image from which a default name can be extracted
Returns:
str: the key prefix to be used in uploading code | codesearchnet |
def stop(name, file=sys.stderr):
if is_enabled():
elapsed = (time() - __TIMERS[name])
if elapsed > 60:
elapsed_str = '{:.1f} m'.format(elapsed / 60)
elif elapsed > 1:
elapsed_str = '{:.1f} s'.format(elapsed)
else:
elapsed_str = '{:.1f} ms'.format(elapsed * 1000)
del __TIMERS[name]
print("[prof]", name, elapsed_str, file=file)
return is_enabled() | Stop a profiling timer.
Arguments:
name (str): The name of the timer to stop. If no name is given, stop
the global anonymous timer.
Returns:
bool: Whether or not profiling is enabled.
Raises:
KeyError: If the named timer does not exist. | juraj-google-style |
def _fill_in_missing(x):
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(tf.sparse.to_dense(tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]), default_value), axis=1) | Replace missing values in a SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to a dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in. | github-repos |
def unreduce_tensor(tensor, shape, axis, keepdims):
if not keepdims:
if axis is None:
axis = range(len(shape))
elif isinstance(axis, int):
axis = axis,
for ax in sorted(axis):
tensor = tf.expand_dims(tensor, ax)
tile_shape = np.array(shape) / np.array(shape_as_list(tensor))
return tf.tile(tensor, tile_shape) | Reverse summing over a dimension.
See utils.py.
Args:
tensor: The tensor that was reduced.
shape: A list, the original shape of the tensor before reduction.
axis: The axis or axes that were summed.
keepdims: Whether these axes were kept as singleton axes.
Returns:
A tensor with axes broadcast to match the shape of the original tensor. | juraj-google-style |
def output(self, _filename):
txt = ""
for c in self.contracts:
(name, _inheritance, _var, func_summaries, _modif_summaries) = c.get_summary()
txt += blue("\n+ Contract %s\n"%name)
public = [(elem[0], (elem[1], elem[2]) ) for elem in func_summaries]
collect = collections.defaultdict(list)
for a,b in public:
collect[a].append(b)
public = list(collect.items())
for contract, functions in public:
txt += blue(" - From {}\n".format(contract))
functions = sorted(functions)
for (function, visi) in functions:
if visi in ['external', 'public']:
txt += green(" - {} ({})\n".format(function, visi))
for (function, visi) in functions:
if visi in ['internal', 'private']:
txt += magenta(" - {} ({})\n".format(function, visi))
for (function, visi) in functions:
if visi not in ['external', 'public', 'internal', 'private']:
txt += " - {} ({})\n".format(function, visi)
self.info(txt) | _filename is not used
Args:
_filename(string) | juraj-google-style |
def asdict(self):
return {'stream': self.stream, 'device_timestamp': self.raw_time, 'streamer_local_id': self.reading_id, 'timestamp': self.reading_time, 'extra_data': self.summary_data, 'data': self.raw_data} | Encode the data in this event into a dictionary.
The dictionary returned from this method is a reference to the data
stored in the IOTileEvent, not a copy. It should be treated as read
only.
Returns:
dict: A dictionary containing the information from this event. | codesearchnet |
def get_tensor_shard(param, empty_param, device_mesh, rank, dim):
param_dim = empty_param.dim()
if dim < 0:
dim = param_dim + dim
if dim >= param_dim:
raise ValueError(f'dim {dim} is out of bounds for tensor of dimension {param_dim}')
mesh_shape = device_mesh.shape
world_size = reduce(operator.mul, mesh_shape)
if rank >= world_size:
raise ValueError(f'Rank {rank} is out of bounds for mesh size {world_size}')
shard_size = empty_param.shape[dim]
start = rank * shard_size
end = start + shard_size
slice_indices = [slice(None)] * param_dim
slice_indices[dim] = slice(start, end)
return param[tuple(slice_indices)] | Generalized tensor sharding across a multi-dimensional device mesh.
Args:
param (torch.Tensor): The tensor to shard.
empty_param (torch.Tensor): A tensor used for shape reference.
device_mesh (torch.Tensor): Shape [d_0, ..., d_n] representing the mesh.
rank (int): Global rank of the current process/device.
dim (int): Dimension along which to shard the tensor. | github-repos |
def price(self, valuation_date, market, model=None, name=None):
del model, valuation_date
name = name or self._name + '_price'
with tf.name_scope(name):
reference_curve = market.reference_curve
df1 = reference_curve.get_discount_factor(self._accrual_start_dates)
df2 = reference_curve.get_discount_factor(self._accrual_end_dates)
fwd_rates = (df1 / df2 - 1.0) / self._accrual_daycount
total_accrual = tf.math.segment_sum(self._daycount_fractions, self._contract_idx)
if self._averaging_type == rc.AverageType.ARITHMETIC_AVERAGE:
settlement_rate = tf.math.segment_sum(fwd_rates * self._daycount_fractions, self._contract_idx) / total_accrual
else:
settlement_rate = (tf.math.segment_prod(1.0 + fwd_rates * self._daycount_fractions, self._contract_idx) - 1.0) / total_accrual
return 100.0 * (1.0 - settlement_rate) | Returns the price of the contract on the valuation date.
Args:
valuation_date: A scalar `DateTensor` specifying the date on which
valuation is being desired.
market: An object of type `InterestRateMarket` which contains the
necessary information for pricing the FRA instrument.
model: Reserved for future use.
name: Python string. The name to give this op.
Default value: `None` which maps to `price`.
Returns:
A Rank 1 `Tensor` of real type containing the modeled price of each
futures contract based on the input market data. | github-repos |
def convert_lstm_weights(weights, from_cudnn=True):
kernels = transform_kernels(weights[0], transpose_input(from_cudnn), n_gates)
recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates)
if from_cudnn:
biases = np.sum(np.split(weights[2], 2, axis=0), axis=0)
else:
biases = np.tile(0.5 * weights[2], 2)
return [kernels, recurrent_kernels, biases] | Converts the weights between CuDNNLSTM and LSTM.
Args:
weights: Original weights.
from_cudnn: Indicates whether original weights are from CuDNN layer.
Returns:
Updated weights compatible with LSTM. | github-repos |
def _get_backend(p0: _GPath, p1: _GPath) -> backend_lib.Backend:
if p0._backend in _GCS_BACKENDS:
return p0._backend
elif p1._backend in _GCS_BACKENDS:
return p1._backend
else:
return p0._backend | When composing with another backend, GCS win.
To allow `Path('.').replace('gs://')`
Args:
p0: Path to compare
p1: Path to compare
Returns:
GCS backend if one of the 2 path is GCS, else p0 backend. | github-repos |
def set_timing(self, timing: bool, reset: bool = False) -> None:
self._timing = timing
if reset:
self.reset() | Manually set the ``timing`` parameter, and optionally reset the timers.
Args:
timing: should we be timing?
reset: reset the timers? | juraj-google-style |
def _convert_path(path, name):
table = os.path.splitext(path)[0]
table = table.replace(os.path.sep, '__')
if (name is not None):
table = '___'.join([table, name])
table = re.sub('[^0-9a-zA-Z_]+', '_', table)
table = table.lower()
return table | Convert resource's path and name to storage's table name.
Args:
path (str): resource path
name (str): resource name
Returns:
str: table name | codesearchnet |
def delete(filething):
t = OggOpus(filething)
filething.fileobj.seek(0)
t.delete(filething) | delete(filething)
Arguments:
filething (filething)
Raises:
mutagen.MutagenError
Remove tags from a file. | juraj-google-style |
def direct_normal_radiation(self, value=9999.0):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `direct_normal_radiation`'.format(value))
if (value < 0.0):
raise ValueError('value need to be greater or equal 0.0 for field `direct_normal_radiation`')
self._direct_normal_radiation = value | Corresponds to IDD Field `direct_normal_radiation`
Args:
value (float): value for IDD Field `direct_normal_radiation`
Unit: Wh/m2
value >= 0.0
Missing value: 9999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | codesearchnet |
def _updateEncoding(self, index):
encoding = self._encodingComboBox.itemText(index)
encoding = encoding.lower()
self._encodingKey = _calculateEncodingKey(encoding)
self._previewFile() | Changes the value of the encoding combo box to the value of given index.
This method is also a `SLOT`.
After the encoding is changed, the file will be reloaded and previewed.
Args:
index (int): An valid index of the combo box. | codesearchnet |
def vdot(x1, x2):
if any_symbolic_tensors((x1, x2)):
return Vdot().symbolic_call(x1, x2)
return backend.numpy.vdot(x1, x2) | Return the dot product of two vectors.
If the first argument is complex, the complex conjugate of the first
argument is used for the calculation of the dot product.
Multidimensional tensors are flattened before the dot product is taken.
Args:
x1: First input tensor. If complex, its complex conjugate is taken
before calculation of the dot product.
x2: Second input tensor.
Returns:
Output tensor. | github-repos |
def flux_minimization(model, fixed, solver, weights={}):
fba = FluxBalanceProblem(model, solver)
for reaction_id, value in iteritems(fixed):
flux = fba.get_flux_var(reaction_id)
fba.prob.add_linear_constraints(flux >= value)
fba.minimize_l1()
return ((reaction_id, fba.get_flux(reaction_id))
for reaction_id in model.reactions) | Minimize flux of all reactions while keeping certain fluxes fixed.
The fixed reactions are given in a dictionary as reaction id
to value mapping. The weighted L1-norm of the fluxes is minimized.
Args:
model: MetabolicModel to solve.
fixed: dict of additional lower bounds on reaction fluxes.
solver: LP solver instance to use.
weights: dict of weights on the L1-norm terms.
Returns:
An iterator of reaction ID and reaction flux pairs. | juraj-google-style |
def start_proc_mask_signal(proc):
if not isinstance(proc, list):
proc = [proc]
with mask_sigint():
for p in proc:
if isinstance(p, mp.Process):
if sys.version_info < (3, 4) or mp.get_start_method() == 'fork':
log_once(
"Starting a process with 'fork' method is not safe and may consume unnecessary extra memory."
" Use 'forkserver' method (available after Py3.4) instead if you run into any issues. "
"See https:
'warn')
p.start() | Start process(es) with SIGINT ignored.
Args:
proc: (mp.Process or list)
Note:
The signal mask is only applied when called from main thread. | juraj-google-style |
def _process_new(self, feed_item):
creative = {'advertiserId': feed_item.get(FieldMap.ADVERTISER_ID, None), 'name': feed_item.get(FieldMap.CREATIVE_NAME, None), 'active': True}
self._associate_third_party_urls(feed_item, creative)
self._associate_click_tags(feed_item, creative)
if feed_item.get(FieldMap.CREATIVE_TYPE, None) == 'VIDEO':
creative['type'] = 'INSTREAM_VIDEO'
for association in feed_item.get('associations', []):
identifier = self.creative_asset_dao.get_identifier(association, self._creative_asset_feed)
creative['creativeAssets'] = [{'assetIdentifier': identifier, 'role': 'PARENT_VIDEO'}]
del creative['active']
elif feed_item.get(FieldMap.CREATIVE_TYPE, None) == 'DISPLAY':
creative['type'] = 'DISPLAY'
if feed_item.get(FieldMap.CREATIVE_WIDTH, None) and feed_item.get(FieldMap.CREATIVE_HEIGHT, None):
creative['size'] = {'kind': 'dfareporting
for association in feed_item.get('associations', []):
identifier = self.creative_asset_dao.get_identifier(association, self._creative_asset_feed)
creative['creativeAssets'] = [{'assetIdentifier': identifier, 'role': 'PRIMARY'}]
if feed_item.get(FieldMap.CREATIVE_BACKUP_ASSET_ID, None) and feed_item.get(FieldMap.CREATIVE_BACKUP_ASSET_ID, None) != '':
backup_identifier = self.creative_asset_dao.get_backup_identifier(association, self._creative_asset_feed)
creative['backupImageReportingLabel'] = feed_item.get(FieldMap.CREATIVE_BACKUP_NAME, None)
backup_features = feed_item.get(FieldMap.BACKUP_IMAGE_FEATURES, None)
if backup_features != None or backup_features != '':
features = backup_features.split(',')
creative['backupImageFeatures'] = features
creative['backupImageTargetWindow'] = {'targetWindowOption': feed_item.get(FieldMap.BACKUP_IMAGE_TARGET_WINDOW_OPTION, None), 'customHtml': feed_item.get(FieldMap.BACKUP_IMAGE_CUSTOM_HTML, None)}
lp = self.landing_page_dao.get(feed_item, column_name=FieldMap.BACKUP_IMAGE_CLICK_THROUGH_LANDING_PAGE_ID)
creative['backupImageClickThroughUrl'] = {'landingPageId': feed_item.get(FieldMap.BACKUP_IMAGE_CLICK_THROUGH_LANDING_PAGE_ID) if not lp else lp['id']}
creative['creativeAssets'].append({'assetIdentifier': backup_identifier, 'role': 'BACKUP_IMAGE'})
del creative['active']
else:
raise Exception('Only video and display are supported at the moment!')
return creative | Creates a new creative DCM object from a feed item representing an creative from the Bulkdozer feed.
This function simply creates the object to be inserted later by the BaseDAO
object.
Args:
feed_item: Feed item representing the creative from the Bulkdozer feed.
Returns:
A creative object ready to be inserted in DCM through the API. | github-repos |
def EquilibrateEigenVectorPhases(x, y):
phases = np.sum(np.conj(x) * y, -2, keepdims=True)
phases /= np.abs(phases)
return phases * x | Equilibrate the phase of the Eigenvectors in the columns of `x` and `y`.
Eigenvectors are only unique up to an arbitrary phase. This function rotates x
such that it matches y. Precondition: The columns of x and y differ by a
multiplicative complex phase factor only.
Args:
x: `np.ndarray` with Eigenvectors
y: `np.ndarray` with Eigenvectors
Returns:
`np.ndarray` containing an equilibrated version of x. | github-repos |
def distance(self, physical_qubit1, physical_qubit2):
if (physical_qubit1 not in self.physical_qubits):
raise CouplingError(('%s not in coupling graph' % (physical_qubit1,)))
if (physical_qubit2 not in self.physical_qubits):
raise CouplingError(('%s not in coupling graph' % (physical_qubit2,)))
if (self._dist_matrix is None):
self._compute_distance_matrix()
return self._dist_matrix[(physical_qubit1, physical_qubit2)] | Returns the undirected distance between physical_qubit1 and physical_qubit2.
Args:
physical_qubit1 (int): A physical qubit
physical_qubit2 (int): Another physical qubit
Returns:
int: The undirected distance
Raises:
CouplingError: if the qubits do not exist in the CouplingMap | codesearchnet |
def _get_fullname(obj):
if (not hasattr(obj, '__name__')):
obj = obj.__class__
if (obj.__module__ in ('builtins', '__builtin__')):
return obj.__name__
return '{}.{}'.format(obj.__module__, obj.__name__) | Get the full name of an object including the module.
Args:
obj: An object.
Returns:
The full class name of the object. | codesearchnet |
def trace(self, data, callback=None):
conn_id = self._find_connection(self.conn_string)
if (conn_id is not None):
self.adapter.notify_event_nowait(self.conn_string, 'trace', data)
if (callback is not None):
callback((conn_id is not None)) | Queue data for tracing
Args:
data (bytearray, string): Unstructured data to trace to any
connected client.
callback (callable): An optional callback that will be called with
a bool value of True when this data actually gets traced.
If the client disconnects and the data is dropped instead,
callback will be called with False. | codesearchnet |
def extract_output(self, accumulator, *args, **kwargs):
raise NotImplementedError(str(self)) | Return result of converting accumulator into the output value.
Args:
accumulator: the final accumulator value computed by this CombineFn
for the entire input key or PCollection. Can be modified for
efficiency.
*args: Additional arguments and side inputs.
**kwargs: Additional arguments and side inputs. | github-repos |
def dims(x):
if isinstance(x, tf.TensorShape):
return x.dims
r = tf.TensorShape(x).dims
return None if r is None else list(map(tf.compat.dimension_value, r)) | Returns a list of dimension sizes, or `None` if `rank` is unknown.
For more details, see `help(tf.TensorShape.dims)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
Returns:
shape_as_list: list of sizes or `None` values representing each
dimensions size if known. A size is `tf.Dimension` if input is a
`tf.TensorShape` and an `int` otherwise. | juraj-google-style |
def set_tuple_types(self, tuple_types):
if len(tuple_types) != self.number_of_tuple_elements:
raise ValueError(f'tuple_types is {str(tuple_types)}, but must be a list of length {self.number_of_tuple_elements}')
if self._frozen:
for frozen, updated in zip(self._tuple_types, tuple_types):
if frozen != updated:
raise ValueError(f'Trying to update InfeedQueue with frozen configuration with an incompatible type. Frozen types are {str(self._tuple_types)}, updated types are {str(tuple_types)}')
else:
try:
self._tuple_types = [dtypes.as_dtype(t) for t in tuple_types]
except TypeError as e:
raise TypeError(f'tuple_types is {str(tuple_types)}, but must be a list of elements each convertible to dtype: got error {str(e)}') from e | Sets the type of each element of the queue.
tuple_types must be a list of length
self.number_of_tuple_elements, and each element must be
convertible to a dtype.
Args:
tuple_types: the types of each queue element.
Raises:
ValueError: if tuple_types is not of length
self.number_of_tuple_elements.
TypeError: if an element of tuple_types cannot be converted to a
dtype. | github-repos |
def translate_opcodes(code_obj, target):
target = get_py_internals(target)
src_ops = code_obj.disassemble()
dst_opmap = target['opmap']
dst_ops = []
op_iter = enumerate(src_ops)
for i, op in op_iter:
if isinstance(op, pwnypack.bytecode.Label):
dst_ops.append(op)
continue
if op.name not in dst_opmap:
if op.name == 'POP_JUMP_IF_FALSE' and 'JUMP_IF_TRUE' in dst_opmap:
lbl = pwnypack.bytecode.Label()
dst_ops.extend([
pwnypack.bytecode.Op('JUMP_IF_TRUE', lbl),
pwnypack.bytecode.Op('POP_TOP', None),
pwnypack.bytecode.Op('JUMP_ABSOLUTE', op.arg),
lbl,
pwnypack.bytecode.Op('POP_TOP', None),
])
elif op.name == 'POP_JUMP_IF_TRUE' and 'JUMP_IF_FALSE' in dst_opmap:
lbl = pwnypack.bytecode.Label()
dst_ops.extend([
pwnypack.bytecode.Op('JUMP_IF_FALSE', lbl),
pwnypack.bytecode.Op('POP_TOP', None),
pwnypack.bytecode.Op('JUMP_ABSOLUTE', op.arg),
lbl,
pwnypack.bytecode.Op('POP_TOP', None),
])
elif op.name == 'JUMP_IF_FALSE' and 'JUMP_IF_FALSE_OR_POP' in dst_opmap and \
src_ops[i + 1].name == 'POP_TOP':
next(op_iter)
dst_ops.append(pwnypack.bytecode.Op('JUMP_IF_FALSE_OR_POP', op.arg))
elif op.name == 'JUMP_IF_TRUE' and 'JUMP_IF_TRUE_OR_POP' in dst_opmap and \
src_ops[i + 1].name == 'POP_TOP':
next(op_iter)
dst_ops.append(pwnypack.bytecode.Op('JUMP_IF_TRUE_OR_POP', op.arg))
else:
raise SyntaxError('Opcode %s not supported on target.' % op.name)
else:
dst_ops.append(op)
code_obj.assemble(dst_ops, target) | Very crude inter-python version opcode translator. Raises SyntaxError when
the opcode doesn't exist in the destination opmap. Used to transcribe
python code objects between python versions.
Arguments:
code_obj(pwnypack.bytecode.CodeObject): The code object representation
to translate.
target(dict): The py_internals structure for the target
python version. | juraj-google-style |
def verify(self, flag_values):
param = self._get_input_to_checker_function(flag_values)
if not self.checker(param):
raise _exceptions.ValidationError(self.message) | Verifies that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: flags.FlagValues, the FlagValues instance to get flags from.
Raises:
Error: Raised if constraint is not satisfied. | juraj-google-style |
def is_original_format(tweet):
if "created_at" in tweet:
original_format = True
elif "postedTime" in tweet:
original_format = False
else:
raise NotATweetError("This dict has neither 'created_at' or 'postedTime' as keys")
return original_format | Simple checker to flag the format of a tweet.
Args:
tweet (Tweet): tweet in qustion
Returns:
Bool
Example:
>>> import tweet_parser.tweet_checking as tc
>>> tweet = {"created_at": 124125125125,
... "text": "just setting up my twttr",
... "nested_field": {"nested_1": "field", "nested_2": "field2"}}
>>> tc.is_original_format(tweet)
True | juraj-google-style |
def validates(version):
def _validates(cls):
validators[version] = cls
meta_schema_id = cls.ID_OF(cls.META_SCHEMA)
if meta_schema_id:
meta_schemas[meta_schema_id] = cls
return cls
return _validates | Register the decorated validator for a ``version`` of the specification.
Registered validators and their meta schemas will be considered when
parsing ``$schema`` properties' URIs.
Arguments:
version (str):
An identifier to use as the version's name
Returns:
callable: a class decorator to decorate the validator with the version | juraj-google-style |
def to_pandas(self, is_transposed=False):
if is_transposed:
return self.transpose().to_pandas(False).T
else:
retrieved_objects = [
[obj.to_pandas() for obj in part] for part in self.partitions
]
if all(
isinstance(part, pandas.Series)
for row in retrieved_objects
for part in row
):
axis = 0
elif all(
isinstance(part, pandas.DataFrame)
for row in retrieved_objects
for part in row
):
axis = 1
else:
ErrorMessage.catch_bugs_and_request_email(True)
df_rows = [
pandas.concat([part for part in row], axis=axis)
for row in retrieved_objects
if not all(part.empty for part in row)
]
if len(df_rows) == 0:
return pandas.DataFrame()
else:
return pandas.concat(df_rows) | Convert this object into a Pandas DataFrame from the partitions.
Args:
is_transposed: A flag for telling this object that the external
representation is transposed, but not the internal.
Returns:
A Pandas DataFrame | juraj-google-style |
def load_flax_sharded_weights(cls, shard_files):
state_sharded_dict = {}
for shard_file in shard_files:
try:
with open(shard_file, 'rb') as state_f:
state = from_bytes(cls, state_f.read())
except (UnpicklingError, msgpack.exceptions.ExtraData) as e:
with open(shard_file) as f:
if f.read().startswith('version'):
raise OSError('You seem to have cloned a repository without having git-lfs installed. Please install git-lfs and run `git lfs install` followed by `git lfs pull` in the folder you cloned.')
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise OSError(f'Unable to convert {shard_file} to Flax deserializable object. ')
state = flatten_dict(state, sep='/')
state_sharded_dict.update(state)
del state
gc.collect()
return unflatten_dict(state_sharded_dict, sep='/') | This is the same as [`flax.serialization.from_bytes`]
(https:lax.readthedocs.io/en/latest/_modules/flax/serialization.html#from_bytes) but for a sharded checkpoint.
This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being
loaded in the model.
Args:
shard_files (`List[str]`:
The list of shard files to load.
Returns:
`Dict`: A nested dictionary of the model parameters, in the expected format for flax models : `{'model':
{'params': {'...'}}}`. | github-repos |
def get_structure_seqrecords(model):
structure_seq_records = []
for chain in model:
tracker = 0
chain_seq = ''
chain_resnums = []
for res in chain.get_residues():
res_id = res.id
res_num = res_id[1]
res_icode = res_id[2]
if Polypeptide.is_aa(res, standard=True):
end_tracker = res_num
res_aa_one = Polypeptide.three_to_one(res.get_resname())
if end_tracker != (tracker + 1):
if res_icode != ' ':
chain_seq += res_aa_one
chain_resnums.append(res_num)
tracker = end_tracker + 1
continue
else:
multiplier = (end_tracker - tracker - 1)
chain_seq += 'X' * multiplier
chain_resnums.extend([float("Inf")] * multiplier)
chain_seq += res_aa_one
chain_resnums.append(res_num)
tracker = end_tracker
else:
continue
chain_seq_record = SeqRecord(Seq(chain_seq, IUPAC.protein), id=chain.get_id())
chain_seq_record.letter_annotations['structure_resnums'] = chain_resnums
structure_seq_records.append(chain_seq_record)
return structure_seq_records | Get a dictionary of a PDB file's sequences.
Special cases include:
- Insertion codes. In the case of residue numbers like "15A", "15B", both residues are written out. Example: 9LPR
- HETATMs. Currently written as an "X", or unknown amino acid.
Args:
model: Biopython Model object of a Structure
Returns:
list: List of SeqRecords | juraj-google-style |
def lin_moma(self, wt_fluxes):
reactions = set(self._adjustment_reactions())
z_diff = self._z_diff
v = self._v
with self.constraints() as constr:
for (f_reaction, f_value) in iteritems(wt_fluxes):
if (f_reaction in reactions):
constr.add((z_diff[f_reaction] >= (f_value - v[f_reaction])), ((f_value - v[f_reaction]) >= (- z_diff[f_reaction])))
self._prob.set_objective(z_diff.sum(reactions))
self._solve(lp.ObjectiveSense.Minimize) | Minimize the redistribution of fluxes using a linear objective.
The change in flux distribution is mimimized by minimizing the sum
of the absolute values of the differences of wild type FBA solution
and the knockout strain flux solution.
This formulation bases the solution on the wild type fluxes that
are specified by the user. If these wild type fluxes were calculated
using FBA, then an arbitrary flux vector that optimizes the objective
function is used. See [Segre`_02] for more information.
Args:
wt_fluxes: Dictionary of all the wild type fluxes. Use
:meth:`.get_fba_flux(objective)` to return a dictionary of
fluxes found by FBA. | codesearchnet |
def _get_bond_data(line):
line = line.split()
length = float(line[2])
sites = line[0].replace("/", "-").split("-")
site_indices = tuple(int(ind) - 1 for ind in sites[1:4:2])
species = tuple(re.split(r"\d+", spec)[0] for spec in sites[0:3:2])
label = "%s%d-%s%d" % (species[0], site_indices[0] + 1,
species[1], site_indices[1] + 1)
return label, length, site_indices | Subroutine to extract bond label, site indices, and length from
a COPL header line. The site indices are zero-based, so they
can be easily used with a Structure object.
Example header line: Fe-1/Fe-1-tr(-1,-1,-1) : 2.482 Ang.
Args:
line: line in the COHPCAR header describing the bond.
Returns:
The bond label, the bond length and a tuple of the site
indices. | juraj-google-style |
class TableTransformerEncoder(TableTransformerPreTrainedModel):
def __init__(self, config: TableTransformerConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
self.layers = nn.ModuleList([TableTransformerEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm = nn.LayerNorm(config.d_model)
self.post_init()
def forward(self, inputs_embeds=None, attention_mask=None, object_queries=None, output_attentions=None, output_hidden_states=None, return_dict=None):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = inputs_embeds
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
if attention_mask is not None:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for encoder_layer in self.layers:
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(hidden_states, attention_mask, object_queries=object_queries, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
hidden_states = self.layernorm(hidden_states)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions) | Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`TableTransformerEncoderLayer`].
The encoder updates the flattened feature map through multiple self-attention layers.
Small tweak for Table Transformer:
- object_queries are added to the forward pass.
Args:
config: TableTransformerConfig | github-repos |
def create_sp(operations, operation):
operations.execute(
"CREATE FUNCTION %s %s" % (
operation.target.name, operation.target.sqltext
)
) | Implements ``CREATE FUNCTION``.
Args:
operations: instance of ``alembic.operations.base.Operations``
operation: instance of :class:`.ReversibleOp`
Returns:
``None`` | juraj-google-style |
def _GenerateStatsTable(self, feed_merger):
rows = []
rows.append('<tr><th class="header"/><th class="header">Merged</th><th class="header">Copied from old feed</th><th class="header">Copied from new feed</th></tr>')
for merger in feed_merger.GetMergerList():
stats = merger.GetMergeStats()
if (stats is None):
continue
(merged, not_merged_a, not_merged_b) = stats
rows.append(('<tr><th class="header">%s</th><td class="header">%d</td><td class="header">%d</td><td class="header">%d</td></tr>' % (merger.DATASET_NAME, merged, not_merged_a, not_merged_b)))
return ('<table>%s</table>' % '\n'.join(rows)) | Generate an HTML table of merge statistics.
Args:
feed_merger: The FeedMerger instance.
Returns:
The generated HTML as a string. | codesearchnet |
def validate(self):
self._attribute_errors = dict()
for (local_name, attribute) in self._attributes.items():
value = getattr(self, local_name, None)
if (attribute.is_required and ((value is None) or (value == ''))):
self._attribute_errors[local_name] = {'title': 'Invalid input', 'description': 'This value is mandatory.', 'remote_name': attribute.remote_name}
continue
if (value is None):
continue
if (not self._validate_type(local_name, attribute.remote_name, value, attribute.attribute_type)):
continue
if ((attribute.min_length is not None) and (len(value) < attribute.min_length)):
self._attribute_errors[local_name] = {'title': 'Invalid length', 'description': ('Attribute %s minimum length should be %s but is %s' % (attribute.remote_name, attribute.min_length, len(value))), 'remote_name': attribute.remote_name}
continue
if ((attribute.max_length is not None) and (len(value) > attribute.max_length)):
self._attribute_errors[local_name] = {'title': 'Invalid length', 'description': ('Attribute %s maximum length should be %s but is %s' % (attribute.remote_name, attribute.max_length, len(value))), 'remote_name': attribute.remote_name}
continue
if (attribute.attribute_type == list):
valid = True
for item in value:
if (valid is True):
valid = self._validate_value(local_name, attribute, item)
else:
self._validate_value(local_name, attribute, value)
return self.is_valid() | Validate the current object attributes.
Check all attributes and store errors
Returns:
Returns True if all attibutes of the object
respect contraints. Returns False otherwise and
store error in errors dict. | codesearchnet |
def relpath(self, path):
for root in self.roots:
if isinstance(root, Pattern):
match = root.match(path)
if not match:
continue
root = match.group(0)
try:
relative = path.split(root, 1)[1]
return relative.lstrip('/')
except IndexError:
continue
return path | Get path relative to storage.
args:
path (str): Absolute path or URL.
Returns:
str: relative path. | juraj-google-style |
def random_new(algo: int = RNG_CMWC) -> tcod.random.Random:
return tcod.random.Random(algo) | Return a new Random instance. Using ``algo``.
Args:
algo (int): The random number algorithm to use.
Returns:
Random: A new Random instance using the given algorithm. | juraj-google-style |
def _WriteFileEntry(self, file_entry, data_stream_name, destination_file):
source_file_object = file_entry.GetFileObject(
data_stream_name=data_stream_name)
if not source_file_object:
return
try:
with open(destination_file, 'wb') as destination_file_object:
source_file_object.seek(0, os.SEEK_SET)
data = source_file_object.read(self._COPY_BUFFER_SIZE)
while data:
destination_file_object.write(data)
data = source_file_object.read(self._COPY_BUFFER_SIZE)
finally:
source_file_object.close() | Writes the contents of the source file entry to a destination file.
Note that this function will overwrite an existing file.
Args:
file_entry (dfvfs.FileEntry): file entry whose content is to be written.
data_stream_name (str): name of the data stream whose content is to be
written.
destination_file (str): path of the destination file. | juraj-google-style |
def connect(self, component):
if (not isinstance(component, ThreadPool)):
raise TypeError('"component" must be a ThreadPool object')
component.in_queue = self.out_queue
return component | Connect two ThreadPools.
The ``in_queue`` of the second pool will be set as the ``out_queue`` of
the current pool, thus all the output will be input to the second pool.
Args:
component (ThreadPool): the ThreadPool to be connected.
Returns:
ThreadPool: the modified second ThreadPool. | codesearchnet |
def parse(self, words):
def exact(words):
try:
return float(words)
except:
return None
guess = exact(words)
if guess is not None:
return guess
split = words.split(' ')
if split[-1] in self.__fractions__:
split[-1] = self.__fractions__[split[-1]]
elif split[-1] in self.__ordinals__:
split[-1] = self.__ordinals__[split[-1]]
parsed_ordinals = ' '.join(split)
return self.parseFloat(parsed_ordinals) | A general method for parsing word-representations of numbers.
Supports floats and integers.
Args:
words (str): Description of an arbitrary number.
Returns:
A double representation of the words. | juraj-google-style |
def update(self, data):
updated = False
if 'state' in data:
updated = self.set_property('state', data['state'])
if 'end' in data:
updated |= self.set_property('end', data['end'])
if 'last_alert' in data:
updated |= self.set_property('last_alert', data['last_alert'])
return updated | Updates the object information based on live data, if there were any changes made. Any changes will be
automatically applied to the object, but will not be automatically persisted. You must manually call
`db.session.add(instance)` on the object.
Args:
data (:obj:): AWS API Resource object fetched from AWS API
Returns:
`bool` | juraj-google-style |
def system_repertoire_distance(r1, r2):
if config.MEASURE in measures.asymmetric():
raise ValueError(
'{} is asymmetric and cannot be used as a system-level '
'irreducibility measure.'.format(config.MEASURE))
return measures[config.MEASURE](r1, r2) | Compute the distance between two repertoires of a system.
Args:
r1 (np.ndarray): The first repertoire.
r2 (np.ndarray): The second repertoire.
Returns:
float: The distance between ``r1`` and ``r2``. | juraj-google-style |
def repr(self, changed_widgets=None):
if (changed_widgets is None):
changed_widgets = {}
return super(Widget, self).repr(changed_widgets) | Represents the widget as HTML format, packs all the attributes, children and so on.
Args:
client (App): Client instance.
changed_widgets (dict): A dictionary containing a collection of widgets that have to be updated.
The Widget that have to be updated is the key, and the value is its textual repr. | codesearchnet |
def _OpenParentFile(self, file_system, path_spec, vhdi_file):
location = getattr(path_spec, 'location', None)
if not location:
raise errors.PathSpecError(
'Unsupported path specification without location.')
location_path_segments = file_system.SplitPath(location)
parent_filename = vhdi_file.parent_filename
_, _, parent_filename = parent_filename.rpartition('\\')
location_path_segments.pop()
location_path_segments.append(parent_filename)
parent_file_location = file_system.JoinPath(location_path_segments)
kwargs = path_spec_factory.Factory.GetProperties(path_spec)
kwargs['location'] = parent_file_location
if path_spec.parent is not None:
kwargs['parent'] = path_spec.parent
parent_file_path_spec = path_spec_factory.Factory.NewPathSpec(
path_spec.type_indicator, **kwargs)
if not file_system.FileEntryExistsByPathSpec(parent_file_path_spec):
return
file_object = resolver.Resolver.OpenFileObject(
parent_file_path_spec, resolver_context=self._resolver_context)
vhdi_parent_file = pyvhdi.file()
vhdi_parent_file.open_file_object(file_object)
if vhdi_parent_file.parent_identifier:
self._OpenParentFile(
file_system, parent_file_path_spec, vhdi_parent_file)
vhdi_file.set_parent(vhdi_parent_file)
self._parent_vhdi_files.append(vhdi_parent_file)
self._sub_file_objects.append(file_object) | Opens the parent file.
Args:
file_system (FileSystem): file system of the VHDI file.
path_spec (PathSpec): path specification of the VHDI file.
vhdi_file (pyvhdi.file): VHDI file.
Raises:
PathSpecError: if the path specification is incorrect. | juraj-google-style |
def _queue_dag(self, name, *, data=None):
if self._stop_workflow:
return None
if name not in self._dags_blueprint:
raise DagNameUnknown()
new_dag = copy.deepcopy(self._dags_blueprint[name])
new_dag.workflow_name = self.name
self._dags_running[new_dag.name] = self._celery_app.send_task(
JobExecPath.Dag, args=(new_dag, self._workflow_id, data),
queue=new_dag.queue, routing_key=new_dag.queue)
return new_dag.name | Add a new dag to the queue.
If the stop workflow flag is set, no new dag can be queued.
Args:
name (str): The name of the dag that should be queued.
data (MultiTaskData): The data that should be passed on to the new dag.
Raises:
DagNameUnknown: If the specified dag name does not exist
Returns:
str: The name of the queued dag. | juraj-google-style |
def _build_orthogonal_rings(core_locations: List[_CoreLocation], ring_size: int, rotate_ring_across_rings: bool) -> List[_CoreLocation]:
num_cores = len(core_locations)
permutation = _build_all_reduce_ring(core_locations[:ring_size])
for r in range(0, num_cores, ring_size):
core_locations[r:r + ring_size] = [core_locations[r + permutation[i]] for i in range(ring_size)]
logging.vlog(1, 'Permutated core locations: %s', core_locations)
transposed = []
for i in range(ring_size):
transposed += [core_locations[g + i] for g in range(0, num_cores, ring_size)]
num_rings = int(num_cores / ring_size)
permutation = _build_all_reduce_ring(transposed[:num_rings], rotate=rotate_ring_across_rings)
for r in range(0, num_cores, num_rings):
transposed[r:r + num_rings] = [transposed[r + permutation[i]] for i in range(num_rings)]
untransposed = []
for i in range(num_rings):
untransposed += [transposed[g + i] for g in range(0, num_cores, num_rings)]
logging.vlog(1, 'Stride-permutated core locations: %s', untransposed)
return untransposed | Build two all-reduce rings orthogonal to each other.
One ring includes every `ring_size` consecutive core locations. It is usually
applied to the model-parallel dimension of a mesh to achieve best 1D
all-reduce performance. The other ring includes core locations separated by
a stride of `ring_size`. It is usually applied to the data-parallel dimension
of a mesh to get predictable strided all-reduce performance.
Args:
core_locations: A list of core locations expressed as [x, y, z, core].
ring_size: The number of core locations in the consecutive ring.
rotate_ring_across_rings: Build column-major secondary rings.
Returns:
A permutation of the input list forming the described rings. | github-repos |
def log_and_count(self, event_str, msg_str=None, inc_int=None):
logger.info(' - '.join(map(str, [v for v in (event_str, msg_str, inc_int) if v])))
self.count(event_str, (inc_int or 1)) | Count an event and write a message to a logger.
Args:
event_str: str
The name of an event to count. Used as a key in the event dict. The same
name will be used in the summary. This also becomes a part of the message
logged by this function.
msg_str: str
Optional message with details about the events. The message is only written
to the log. While the ``event_str`` functions as a key and must remain the
same for the same type of event, ``log_str`` may change between calls.
inc_int: int
Optional argument to increase the count for the event by more than 1. | codesearchnet |
def __get_default_value_from_element(self, element):
if element.name == "select":
options = element.find_all("option")
is_multiple = element.has_attr("multiple")
selected_options = [
option for option in options
if option.has_attr("selected")
]
if not selected_options and options:
selected_options = [options[0]]
selected_values = []
if is_multiple:
for option in selected_options:
value = option["value"] if option.has_attr("value") else option.string
selected_values.append(value)
return selected_values
elif len(selected_options) >= 1:
if selected_options[0].has_attr("value"):
return selected_options[0]["value"]
else:
return selected_options[0].string
return ""
if element.name == "textarea":
return element.string if element.string is not None else ""
if element.name == "input" and element.has_attr("type"):
if element["type"] in ("checkbox", "radio"):
if not element.has_attr("checked"):
return False
if element.has_attr("value"):
return element["value"]
else:
return "on"
if element.has_attr("value"):
return element["value"]
return "" | Get the default value of a form element
Args:
elements (obj): The soup element.
Returns:
str: The default value | juraj-google-style |
def get_region(b):
remap = {None: 'us-east-1', 'EU': 'eu-west-1'}
region = b.get('Location', {}).get('LocationConstraint')
return remap.get(region, region) | Tries to get the bucket region from Location.LocationConstraint
Special cases:
LocationConstraint EU defaults to eu-west-1
LocationConstraint null defaults to us-east-1
Args:
b (object): A bucket object
Returns:
string: an aws region string | juraj-google-style |
def get_page_artid(self, separator='-'):
publication_info = get_value(self.record, 'publication_info[0]', default={})
return LiteratureReader.get_page_artid_for_publication_info(publication_info, separator) | Return the page range or the article id of a record.
Args:
separator(basestring): optional page range symbol, defaults to a single dash
Returns:
string: the page range or the article id of the record.
Examples:
>>> record = {
... 'publication_info': [
... {'artid': '054021'},
... ],
... }
>>> LiteratureReader(record).get_page_artid()
'054021' | codesearchnet |
def _prepare_doc(func, args, delimiter_chars):
_LOG.debug("Preparing doc for '%s'", func.__name__)
if (not func.__doc__):
return _get_default_help_message(func, args)
description = []
args_help = {}
fill_description = True
arg_name = None
arg_doc_regex = re.compile(('\x08*(?P<arg_name>\\w+)\\s*%s\\s*(?P<help_msg>.+)' % delimiter_chars))
for line in func.__doc__.splitlines():
line = line.strip()
if (line and fill_description):
description.append(line)
elif line:
arg_match = arg_doc_regex.match(line)
try:
arg_name = arg_match.groupdict()['arg_name'].strip()
args_help[arg_name] = arg_match.groupdict()['help_msg'].strip()
except AttributeError:
if (arg_name is not None):
args_help[arg_name] = ' '.join([args_help[arg_name], line])
else:
if ((not fill_description) and args_help):
break
fill_description = False
return _get_default_help_message(func, args, ' '.join(description), args_help) | From the function docstring get the arg parse description and arguments
help message. If there is no docstring simple description and help
message are created.
Args:
func: the function that needs argument parsing
args: name of the function arguments
delimiter_chars: characters used to separate the parameters from their
help message in the docstring
Returns:
A tuple containing the description to be used in the argument parser and
a dict indexed on the callable argument name and their associated help
message | codesearchnet |
def _load_attributes(self, mft_config, attrs_view):
offset = 0
load_attrs = mft_config.attribute_load_list
while (attrs_view[offset:(offset + 4)] != b'\xff\xff\xff\xff'):
(attr_type, attr_len, non_resident) = _get_attr_info(attrs_view[offset:])
if (attr_type in load_attrs):
attr = Attribute.create_from_binary(non_resident, mft_config.load_dataruns, attrs_view[offset:])
if (not (attr.header.attr_type_id is AttrTypes.DATA)):
self.attrs[attr.header.attr_type_id].append(attr)
else:
self._add_data_attribute(attr)
offset += attr_len | Loads all the attributes of an entry.
Once executed, all the attributes should have been loaded in the
attribute *attrs* instance attribute.
Args:
mft_config (:obj:`MFTConfig`) - An instance of MFTConfig, as this tells
how the library will interpret data.
attrs_view (memoryview(bytearray)) - A binary stream that starts at
the first attribute until the end of the entry | codesearchnet |
def start(self, attempts=5, timeout=2):
if (not self.alive()):
with LogTask(('Create network %s' % self.name())):
net = self.libvirt_con.networkCreateXML(self._libvirt_xml())
if (net is None):
raise RuntimeError(('failed to create network, XML: %s' % self._libvirt_xml()))
for _ in range(attempts):
if net.isActive():
return
LOGGER.debug('waiting for network %s to become active', net.name())
time.sleep(timeout)
raise RuntimeError(('failed to verify network %s is active' % net.name())) | Start the network, will check if the network is active ``attempts``
times, waiting ``timeout`` between each attempt.
Args:
attempts (int): number of attempts to check the network is active
timeout (int): timeout for each attempt
Returns:
None
Raises:
RuntimeError: if network creation failed, or failed to verify it is
active. | codesearchnet |
def _client_send(self, message):
try:
self._client.write(f'{message}\n'.encode('utf8'))
self._client.flush()
except socket.error as e:
raise errors.Error(self._device, f'Encountered socket error "{e}" sending RPC message "{message}"') from e | Sends an RPC message through the connection.
Args:
message: str, the message to send.
Raises:
errors.Error: if a socket error occurred during the send. | github-repos |
def run(self, stim, merge=True, **merge_kwargs):
results = list(chain(*[self.run_node(n, stim) for n in self.roots]))
results = list(flatten(results))
self._results = results
return (merge_results(results, **merge_kwargs) if merge else results) | Executes the graph by calling all Transformers in sequence.
Args:
stim (str, Stim, list): One or more valid inputs to any
Transformer's 'transform' call.
merge (bool): If True, all results are merged into a single pandas
DataFrame before being returned. If False, a list of
ExtractorResult objects is returned (one per Extractor/Stim
combination).
merge_kwargs: Optional keyword arguments to pass onto the
merge_results() call. | codesearchnet |
def popular(self, **kwargs):
path = self._get_path('popular')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | Get the list of popular movies on The Movie Database. This list
refreshes every day.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
Returns:
A dict representation of the JSON returned from the API. | codesearchnet |
def register_layouts(layouts, app, url='/api/props/', brand='Pyxley'):
def props(name):
if (name not in layouts):
name = list(layouts.keys())[0]
return jsonify({'layouts': layouts[name]['layout']})
def apps():
paths = []
for (i, k) in enumerate(layouts.keys()):
if (i == 0):
paths.append({'path': '/', 'label': layouts[k].get('title', k)})
paths.append({'path': ('/' + k), 'label': layouts[k].get('title', k)})
return jsonify({'brand': brand, 'navlinks': paths})
app.add_url_rule((url + '<string:name>/'), view_func=props)
app.add_url_rule(url, view_func=apps) | register UILayout with the flask app
create a function that will send props for each UILayout
Args:
layouts (dict): dict of UILayout objects by name
app (object): flask app
url (string): address of props; default is /api/props/ | codesearchnet |
def deconstruct_single_qubit_matrix_into_angles(
mat: np.ndarray) -> Tuple[float, float, float]:
right_phase = cmath.phase(mat[0, 1] * np.conj(mat[0, 0])) + math.pi
mat = np.dot(mat, _phase_matrix(-right_phase))
bottom_phase = cmath.phase(mat[1, 0] * np.conj(mat[0, 0]))
mat = np.dot(_phase_matrix(-bottom_phase), mat)
rotation = math.atan2(abs(mat[1, 0]), abs(mat[0, 0]))
mat = np.dot(_rotation_matrix(-rotation), mat)
diagonal_phase = cmath.phase(mat[1, 1] * np.conj(mat[0, 0]))
return right_phase + diagonal_phase, rotation * 2, bottom_phase | Breaks down a 2x2 unitary into more useful ZYZ angle parameters.
Args:
mat: The 2x2 unitary matrix to break down.
Returns:
A tuple containing the amount to phase around Z, then rotate around Y,
then phase around Z (all in radians). | juraj-google-style |
def _parse_input_data(self, node):
data = DotDict()
try:
for nod in self._get_input_nodes(node):
data.update(self._parse_input_node(nod))
except Exception as e:
log.exception("Error while processing node: %s" % node)
return data | Parses inputOutput part camunda modeller extensions.
Args:
node: SpiffWorkflow Node object.
Returns:
Data dict. | juraj-google-style |
def load(self, source, pause=False):
self._source = source
self._load_source(source)
if pause:
time.sleep(0.5)
self.pause() | Loads a new source (as a file) from ``source`` (a file path or URL)
by killing the current ``omxplayer`` process and forking a new one.
Args:
source (string): Path to the file to play or URL | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.