code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _process_celeba_config_file(self, file_path):
with tf.io.gfile.GFile(file_path) as f:
data_raw = f.read()
lines = data_raw.split('\n')
keys = lines[1].strip().split()
values = {}
for line in lines[2:(- 1)]:
row_values = line.strip().split()
values[row_values[0]] = [int(v) for v in row_values[1:]]
return (keys, values)
|
Unpack the celeba config file.
The file starts with the number of lines, and a header.
Afterwards, there is a configuration for each file: one per line.
Args:
file_path: Path to the file with the configuration.
Returns:
keys: names of the attributes
values: map from the file name to the list of attribute values for
this file.
|
codesearchnet
|
def validate_level_indexes(num_levels, v_level_indexes, h_level_indexes):
if (num_levels < 1):
raise ValueError('num_levels {} is less than one'.format(num_levels))
all_levels = SortedFrozenSet(range(num_levels))
if ((h_level_indexes is None) and (v_level_indexes is None)):
v_level_indexes = range(0, num_levels, 2)
h_level_indexes = range(1, num_levels, 2)
h_level_set = SortedFrozenSet(h_level_indexes)
v_level_set = SortedFrozenSet(v_level_indexes)
if (h_level_indexes is None):
h_level_indexes = (all_levels - v_level_set)
if (v_level_indexes is None):
v_level_indexes = (all_levels - h_level_set)
if (len(h_level_indexes) != len(h_level_set)):
raise ValueError('h_level_indexes contains duplicate values')
if (h_level_set and ((h_level_set[0] < 0) or (h_level_set[(- 1)] >= num_levels))):
raise ValueError('h_level_indexes contains out of range values')
if (len(v_level_indexes) != len(v_level_set)):
raise ValueError('v_level_indexes contains duplicate values')
if (v_level_set and ((v_level_set[0] < 0) or (v_level_set[(- 1)] >= num_levels))):
raise ValueError('v_level_indexes contains out of range values')
unmentioned_levels = ((all_levels - v_level_set) - h_level_set)
if (len(unmentioned_levels) > 0):
raise ValueError('v_level_indexes and h_level_indexes do not together include levels {}'.format(', '.join(map(str, unmentioned_levels))))
if (not h_level_set.isdisjoint(v_level_set)):
raise ValueError('h_level_indexes and v_level_indexes are not disjoint')
v_level_indexes = list(v_level_indexes)
h_level_indexes = list(h_level_indexes)
return (v_level_indexes, h_level_indexes)
|
Ensure that v_level_indexes and h_level_indexes are consistent.
Args:
num_levels: The number of levels of keys in the data structure being tabulated.
v_level_indexes: A sequence of level indexes between zero and num_levels for
the vertical axis, or None.
h_level_indexes: A sequence of level indexes between zero and num_levels for for
the horizontal axis, or None.
Returns:
A 2-tuple containing v_level_indexes and h_level_indexes sequences.
Raises:
ValueError: If v_level_indexes contains duplicate values.
ValueError: If h_level_indexes contains duplicate values.
ValueError: If v_level_indexes contains out of range values.
ValueError: If h_level_indexes contains out of range values.
ValueError: If taken together v_level_indexes and h_level_indexes
do not include all levels from zero to up to, but not including
num_levels.
ValueError: If v_level_indexes and h_level_indexes have items in
common.
|
codesearchnet
|
def gradient_summaries(grad_vars, groups=None, scope='gradients'):
groups = groups or {r'all': r'.*'}
grouped = collections.defaultdict(list)
for grad, var in grad_vars:
if grad is None:
continue
for name, pattern in groups.items():
if re.match(pattern, var.name):
name = re.sub(pattern, name, var.name)
grouped[name].append(grad)
for name in groups:
if name not in grouped:
tf.logging.warn("No variables matching '{}' group.".format(name))
summaries = []
for name, grads in grouped.items():
grads = [tf.reshape(grad, [-1]) for grad in grads]
grads = tf.concat(grads, 0)
summaries.append(tf.summary.histogram(scope + '/' + name, grads))
return tf.summary.merge(summaries)
|
Create histogram summaries of the gradient.
Summaries can be grouped via regexes matching variables names.
Args:
grad_vars: List of (gradient, variable) tuples as returned by optimizers.
groups: Mapping of name to regex for grouping summaries.
scope: Name scope for this operation.
Returns:
Summary tensor.
|
juraj-google-style
|
def parse_hgnc_line(line, header):
hgnc_gene = {}
line = line.rstrip().split('\t')
raw_info = dict(zip(header, line))
if 'Withdrawn' in raw_info['status']:
return hgnc_gene
hgnc_symbol = raw_info['symbol']
hgnc_gene['hgnc_symbol'] = hgnc_symbol
hgnc_gene['hgnc_id'] = int(raw_info['hgnc_id'].split(':')[-1])
hgnc_gene['description'] = raw_info['name']
aliases = set([hgnc_symbol, hgnc_symbol.upper()])
previous_names = raw_info['prev_symbol']
if previous_names:
for alias in previous_names.strip('"').split('|'):
aliases.add(alias)
alias_symbols = raw_info['alias_symbol']
if alias_symbols:
for alias in alias_symbols.strip('"').split('|'):
aliases.add(alias)
hgnc_gene['previous_symbols'] = list(aliases)
hgnc_gene['ensembl_gene_id'] = raw_info.get('ensembl_gene_id')
omim_id = raw_info.get('omim_id')
if omim_id:
hgnc_gene['omim_id'] = int(omim_id.strip('"').split('|')[0])
else:
hgnc_gene['omim_id'] = None
entrez_id = hgnc_gene['entrez_id'] = raw_info.get('entrez_id')
if entrez_id:
hgnc_gene['entrez_id'] = int(entrez_id)
else:
hgnc_gene['entrez_id'] = None
ref_seq = raw_info.get('refseq_accession')
if ref_seq:
hgnc_gene['ref_seq'] = ref_seq.strip('"').split('|')
else:
hgnc_gene['ref_seq'] = []
uniprot_ids = raw_info.get('uniprot_ids')
if uniprot_ids:
hgnc_gene['uniprot_ids'] = uniprot_ids.strip('""').split('|')
else:
hgnc_gene['uniprot_ids'] = []
ucsc_id = raw_info.get('ucsc_id')
if ucsc_id:
hgnc_gene['ucsc_id'] = ucsc_id
else:
hgnc_gene['ucsc_id'] = None
vega_id = raw_info.get('vega_id')
if vega_id:
hgnc_gene['vega_id'] = vega_id
else:
hgnc_gene['vega_id'] = None
return hgnc_gene
|
Parse an hgnc formated line
Args:
line(list): A list with hgnc gene info
header(list): A list with the header info
Returns:
hgnc_info(dict): A dictionary with the relevant info
|
juraj-google-style
|
def recompute_grad(fn):
@functools.wraps(fn)
def wrapped(*args):
return _recompute_grad(fn, args)
return wrapped
|
Decorator that recomputes the function on the backwards pass.
Args:
fn: a function that takes Tensors (all as positional arguments) and returns
a tuple of Tensors.
Returns:
A wrapped fn that is identical to fn when called, but its activations will
be discarded and recomputed on the backwards pass (i.e. on a call to
tf.gradients).
|
codesearchnet
|
def make_qs(n, m=None):
try:
import sympy
except ImportError:
raise ImportError("This function requires sympy. Please install it.")
if m is None:
syms = sympy.symbols(" ".join(f"q{i}" for i in range(n)))
if isinstance(syms, tuple):
return syms
else:
return (syms,)
syms = sympy.symbols(" ".join(f"q{i}" for i in range(n, m)))
if isinstance(syms, tuple):
return syms
else:
return (syms,)
|
Make sympy symbols q0, q1, ...
Args:
n(int), m(int, optional):
If specified both n and m, returns [qn, q(n+1), ..., qm],
Only n is specified, returns[q0, q1, ..., qn].
Return:
tuple(Symbol): Tuple of sympy symbols.
|
juraj-google-style
|
def CheckFile(self, filename):
result = True
artifact_reader = reader.YamlArtifactsReader()
try:
for artifact_definition in artifact_reader.ReadFile(filename):
try:
self._artifact_registry.RegisterDefinition(artifact_definition)
except KeyError:
logging.warning(
'Duplicate artifact definition: {0:s} in file: {1:s}'.format(
artifact_definition.name, filename))
result = False
artifact_definition_supports_macos = (
definitions.SUPPORTED_OS_DARWIN in (
artifact_definition.supported_os))
artifact_definition_supports_windows = (
definitions.SUPPORTED_OS_WINDOWS in (
artifact_definition.supported_os))
for source in artifact_definition.sources:
if source.type_indicator in (
definitions.TYPE_INDICATOR_FILE, definitions.TYPE_INDICATOR_PATH):
if (definitions.SUPPORTED_OS_DARWIN in source.supported_os or (
artifact_definition_supports_macos and
not source.supported_os)):
if not self._CheckMacOSPaths(
filename, artifact_definition, source, source.paths):
result = False
elif (artifact_definition_supports_windows or
definitions.SUPPORTED_OS_WINDOWS in source.supported_os):
for path in source.paths:
if not self._CheckWindowsPath(
filename, artifact_definition, source, path):
result = False
elif source.type_indicator == (
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY):
if (filename != self.LEGACY_PATH and
self._HasDuplicateRegistryKeyPaths(
filename, artifact_definition, source)):
result = False
for key_path in source.keys:
if not self._CheckWindowsRegistryKeyPath(
filename, artifact_definition, key_path):
result = False
elif source.type_indicator == (
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE):
for key_value_pair in source.key_value_pairs:
if not self._CheckWindowsRegistryKeyPath(
filename, artifact_definition, key_value_pair['key']):
result = False
except errors.FormatError as exception:
logging.warning(
'Unable to validate file: {0:s} with error: {1!s}'.format(
filename, exception))
result = False
return result
|
Validates the artifacts definition in a specific file.
Args:
filename (str): name of the artifacts definition file.
Returns:
bool: True if the file contains valid artifacts definitions.
|
juraj-google-style
|
def _AddAttribute(self, attribute):
if (attribute.identifier in self._attributes):
raise KeyError('Volume attribute object already set for volume attribute identifier: {0:s}.'.format(attribute.identifier))
self._attributes[attribute.identifier] = attribute
|
Adds an attribute.
Args:
attribute (VolumeAttribute): a volume attribute.
Raises:
KeyError: if volume attribute is already set for the corresponding volume
attribute identifier.
|
codesearchnet
|
def set_colourtemp(self, colourtemp):
if (not (0 <= colourtemp <= 255)):
raise ValueError('The colour temperature needs to be between 0 and 255.')
payload = self.generate_payload(SET, {self.DPS_INDEX_COLOURTEMP: colourtemp})
data = self._send_receive(payload)
return data
|
Set the colour temperature of an rgb bulb.
Args:
colourtemp(int): Value for the colour temperature (0-255).
|
codesearchnet
|
def _remove_one_redundant_stack_unstack(in_graph_def):
name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(in_graph_def)
del name_to_seq_num
do_generic_pack_unpack = True
out = _graph_pb2.GraphDef()
out.library.CopyFrom(in_graph_def.library)
out.versions.CopyFrom(in_graph_def.versions)
for n in in_graph_def.node:
node_name = _tensor_name_base(n.name)
if not node_name.startswith('OpHintStack') and (not n.op.startswith('Pack')):
continue
next_to_visit = [node_name]
visited = set()
unpack_nodes = set()
pack_node = node_name
matches_pattern = True
is_hint_created_stack = False
while next_to_visit:
current_node_name = next_to_visit[0]
visited.add(current_node_name)
del next_to_visit[0]
node = name_to_node[current_node_name]
is_op_hint_stack = node.name.startswith('OpHintStack')
is_op_hint_unstack = node.name.startswith('OpHintUnstack')
if node.op == 'Identity' or is_op_hint_stack or (do_generic_pack_unpack and node.op == 'Pack'):
is_hint_created_stack |= is_op_hint_stack
next_to_visit += [input_node for input_node in name_to_input_name[current_node_name] if input_node not in visited]
elif is_op_hint_unstack or (do_generic_pack_unpack and node.op == 'Unpack'):
unpack_nodes.add(node.name)
is_hint_created_stack &= is_op_hint_unstack
else:
matches_pattern = False
break
visited.add(node.name)
if matches_pattern and len(unpack_nodes) == 1:
pack_node = node_name
no_external_dependency = True
for other_n in in_graph_def.node:
if other_n.name in visited:
continue
for input_tensor in name_to_input_name[other_n.name]:
input_op = _tensor_name_base(input_tensor)
if input_op in visited and input_op != pack_node:
no_external_dependency = False
if is_hint_created_stack or no_external_dependency:
end = unpack_nodes.pop()
end_input = name_to_node[end].input[0]
for other_n in in_graph_def.node:
node_name = _tensor_name_base(other_n.name)
if node_name not in visited:
new_node = _copy.deepcopy(other_n)
new_node.input[:] = [end_input if stripped == pack_node else non_stripped for stripped, non_stripped in zip(name_to_input_name[node_name], new_node.input[:])]
out.node.extend([new_node])
return (out, True)
return (in_graph_def, False)
|
Removes a stack->unstack pattern from in_graph_def in a returned graph.
Args:
in_graph_def: Graph def to use as input.
Returns:
Simplified tuple (graph_def, changed_something) where changed_something
is true if anything was done.
|
github-repos
|
def __init__(self, value_type, value):
self.value_type = value_type
self.value = value_type(value)
|
Args:
value_type: Type of the static value
value: Static value
|
github-repos
|
def scatter(indices, values, shape):
if any_symbolic_tensors((indices, values)):
return Scatter(shape=shape).symbolic_call(indices, values)
return backend.core.scatter(indices, values, shape)
|
Returns a tensor of shape `shape` where `indices` are set to `values`.
At a high level, this operation does `zeros[indices] = updates` and
returns the output. It is equivalent to:
```python
zeros = keras.ops.zeros(shape)
output = keras.ops.scatter_update(zeros, indices, values)
```
Args:
indices: A tensor or list/tuple specifying
indices for the values in `values`.
values: A tensor, the values to be set at `indices`.
shape: Shape of the output tensor.
Example:
>>> indices = [[0, 1], [1, 1]]
>>> values = np.array([1., 1.])
>>> keras.ops.scatter(indices, values, shape=(2, 2))
array([[0., 1.],
[0., 1.]])
|
github-repos
|
def __init__(
self, processing_configuration, enable_sigsegv_handler=False, **kwargs):
super(MultiProcessBaseProcess, self).__init__(**kwargs)
self._debug_output = False
self._enable_sigsegv_handler = enable_sigsegv_handler
self._guppy_memory_profiler = None
self._log_filename = None
self._memory_profiler = None
self._original_sigsegv_handler = None
self._pid = None
self._processing_configuration = processing_configuration
self._process_information = None
self._processing_profiler = None
self._quiet_mode = False
self._rpc_server = None
self._serializers_profiler = None
self._status_is_running = False
self._storage_profiler = None
self._tasks_profiler = None
if self._processing_configuration:
self._debug_output = self._processing_configuration.debug_output
if processing_configuration.log_filename:
log_path = os.path.dirname(self._processing_configuration.log_filename)
log_filename = os.path.basename(
self._processing_configuration.log_filename)
log_filename = '{0:s}_{1:s}'.format(self._name, log_filename)
self._log_filename = os.path.join(log_path, log_filename)
self.rpc_port = multiprocessing.Value('I', 0)
|
Initializes a process.
Args:
processing_configuration (ProcessingConfiguration): processing
configuration.
enable_sigsegv_handler (Optional[bool]): True if the SIGSEGV handler
should be enabled.
kwargs (dict[str,object]): keyword arguments to pass to
multiprocessing.Process.
|
juraj-google-style
|
def learn(self, state_arr, limit=1000):
while self.t <= limit:
next_action_arr = self.extract_possible_actions(state_arr)
predicted_q_arr = self.__function_approximator.inference_q(next_action_arr)
reward_value_arr = np.empty((next_action_arr.shape[0], 1))
next_max_q_arr = np.empty((next_action_arr.shape[0], 1))
for i in range(reward_value_arr.shape[0]):
reward_value_arr[i] = self.observe_reward_value(state_arr, next_action_arr[i])
next_next_action_arr = self.extract_possible_actions(next_action_arr[i])
next_max_q_arr[i] = self.__function_approximator.inference_q(next_next_action_arr).max()
action_arr, predicted_q = self.select_action(next_action_arr, predicted_q_arr)
real_q_arr = self.update_q(
predicted_q_arr,
reward_value_arr,
next_max_q_arr
)
real_q = real_q_arr[np.where(predicted_q_arr == predicted_q)[0][0]]
if self.__q_logs_arr.shape[0] > 0:
self.__q_logs_arr = np.r_[
self.__q_logs_arr,
np.array([predicted_q, real_q]).reshape(1, 2)
]
else:
self.__q_logs_arr = np.array([predicted_q, real_q]).reshape(1, 2)
self.learn_q(predicted_q_arr, real_q_arr)
state_arr = self.update_state(state_arr, action_arr)
self.t += 1
end_flag = self.check_the_end_flag(state_arr)
if end_flag is True:
break
|
Learning and searching the optimal solution.
Args:
state_arr: `np.ndarray` of initial state.
limit: The maximum number of iterative updates based on value iteration algorithms.
|
juraj-google-style
|
def __init__(self, group_key_start=1):
self._group_key = group_key_start
self._instance_key_table = {}
self._lock = threading.Lock()
self._known_groups = {}
|
Initializes the object.
Args:
group_key_start: the starting integer of group key.
|
github-repos
|
def _serialize_tensor_like_io(value, debug_path: Optional[str]=None, use_repr: bool=True, path_to_value: Optional[str]=None):
torch.set_printoptions(sci_mode=True)
if use_repr:
value_out = _repr_to_list(value)
elif path_to_value:
if not path_to_value.endswith('.safetensors'):
path_to_value += '.safetensors'
filepath = os.path.join(debug_path, path_to_value) if debug_path else path_to_value
save_file({'data': value.contiguous().detach().cpu()}, filepath)
value_out = f'./{path_to_value}'
else:
raise ValueError(f'use_repr={use_repr!r} and path_to_value={path_to_value!r} cannot both be falsy.')
out = {'shape': repr(value.shape), 'dtype': repr(value.dtype), 'value': value_out}
if value.dtype in {torch.float16, torch.float32, torch.bfloat16}:
out.update({'mean': _sanitize_repr_for_diff(repr(value.mean())), 'std': _sanitize_repr_for_diff(repr(value.std())), 'min': _sanitize_repr_for_diff(repr(value.min())), 'max': _sanitize_repr_for_diff(repr(value.max()))})
return out
|
Converts Tensors and DTensors to a JSON-serializable dictionary representation.
Args:
value: Any Python object, often including torch Tensors, lists, dicts, etc.
debug_path (`str`, *optional*, defaults to `None`): Directory to dump debug JSON and SafeTensors files.
use_repr (bool, *optional*, defaults to `True`): Whether to save a `repr()`-ized version of the tensor as the
`value` property in the asscoiated FULL_TENSORS.json file, or to store the full tensors in separate
SafeTensors file and store the relative path to that file in the `value` property in the dictionary.
path_to_value (`str`, *optional*, defaults to `None`): The file name for the SafeTensors file holding the full
tensor value if `use_repr=False`.
Returns:
A nested Python structure (list, dict, or sanitized string) that is safe to json.dump.
|
github-repos
|
def _QueryHash(self, nsrl_socket, digest):
try:
query = 'QUERY {0:s}\n'.format(digest).encode('ascii')
except UnicodeDecodeError:
logger.error('Unable to encode digest: {0!s} to ASCII.'.format(digest))
return False
response = None
try:
nsrl_socket.sendall(query)
response = nsrl_socket.recv(self._RECEIVE_BUFFER_SIZE)
except socket.error as exception:
logger.error('Unable to query nsrlsvr with error: {0!s}.'.format(
exception))
if not response:
return False
response = response.strip()
return response == b'OK 1'
|
Queries nsrlsvr for a specific hash.
Args:
nsrl_socket (socket._socketobject): socket of connection to nsrlsvr.
digest (str): hash to look up.
Returns:
bool: True if the hash was found, False if not or None on error.
|
juraj-google-style
|
def __init__(self, *args, **kwargs):
super(JLinkDeviceInfo, self).__init__(*args, **kwargs)
self.SizeofStruct = ctypes.sizeof(self)
|
Initializes the instance.
Populates the ``.SizeofStruct`` parameter to the size of the instance.
Args:
self (JLinkDeviceInfo): the ``JLinkDeviceInfo`` instance
args: list of arguments
kwargs: key-word arguments dictionary
Returns:
``None``
|
juraj-google-style
|
def open_channel_url(channel, staging=False):
return OPEN_CHANNEL_URL.format(domain=DOMAIN, channel_id=channel, access='staging' if staging or STAGE else 'edit')
|
open_channel_url: returns url to uploaded channel
Args:
channel (str): channel id of uploaded channel
Returns: string url to open channel
|
juraj-google-style
|
def condition_details_has_owner(condition_details, owner):
if 'subconditions' in condition_details:
result = condition_details_has_owner(condition_details['subconditions'], owner)
if result:
return True
elif isinstance(condition_details, list):
for subcondition in condition_details:
result = condition_details_has_owner(subcondition, owner)
if result:
return True
else:
if 'public_key' in condition_details \
and owner == condition_details['public_key']:
return True
return False
|
Check if the public_key of owner is in the condition details
as an Ed25519Fulfillment.public_key
Args:
condition_details (dict): dict with condition details
owner (str): base58 public key of owner
Returns:
bool: True if the public key is found in the condition details, False otherwise
|
juraj-google-style
|
def random( self ):
j = np.searchsorted( self.cumulative_probabilities(), random.random() )
return self.jumps[ j ]
|
Select a jump at random with appropriate relative probabilities.
Args:
None
Returns:
(Jump): The randomly selected Jump.
|
juraj-google-style
|
def get_functions_overridden_by(self, function):
candidates = [c.functions_not_inherited for c in self.inheritance]
candidates = [candidate for sublist in candidates for candidate in sublist]
return [f for f in candidates if f.full_name == function.full_name]
|
Return the list of functions overriden by the function
Args:
(core.Function)
Returns:
list(core.Function)
|
juraj-google-style
|
def pipe(engine, format, data, renderer=None, formatter=None, quiet=False):
(cmd, _) = command(engine, format, None, renderer, formatter)
(out, _) = run(cmd, input=data, capture_output=True, check=True, quiet=quiet)
return out
|
Return ``data`` piped through Graphviz ``engine`` into ``format``.
Args:
engine: The layout commmand used for rendering (``'dot'``, ``'neato'``, ...).
format: The output format used for rendering (``'pdf'``, ``'png'``, ...).
data: The binary (encoded) DOT source string to render.
renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...).
formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...).
quiet (bool): Suppress ``stderr`` output.
Returns:
Binary (encoded) stdout of the layout command.
Raises:
ValueError: If ``engine``, ``format``, ``renderer``, or ``formatter`` are not known.
graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None.
graphviz.ExecutableNotFound: If the Graphviz executable is not found.
subprocess.CalledProcessError: If the exit status is non-zero.
|
codesearchnet
|
def create_position_ids_from_input_ids(input_ids, padding_idx):
mask = input_ids.ne(padding_idx).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + padding_idx
|
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
|
github-repos
|
def get_layer_vis_square(data,
allow_heatmap=True,
normalize=True,
min_img_dim=100,
max_width=1200,
channel_order='RGB',
colormap='jet',
):
if channel_order not in ['RGB', 'BGR']:
raise ValueError('Unsupported channel_order %s' % channel_order)
if data.ndim == 1:
data = data[:, np.newaxis, np.newaxis]
elif data.ndim == 2:
data = data.reshape((data.shape[0] * data.shape[1], 1, 1))
elif data.ndim == 3:
if data.shape[0] == 3:
if channel_order == 'BGR':
data = data[[2, 1, 0], ...]
data = data.transpose(1, 2, 0)
data = data[np.newaxis, ...]
else:
pass
elif data.ndim == 4:
if data.shape[0] == 3:
data = data.transpose(1, 2, 3, 0)
if channel_order == 'BGR':
data = data[:, :, :, [2, 1, 0]]
elif data.shape[1] == 3:
data = data.transpose(0, 2, 3, 1)
if channel_order == 'BGR':
data = data[:, :, :, [2, 1, 0]]
else:
data = data.reshape((data.shape[0] * data.shape[1], data.shape[2], data.shape[3]))
else:
raise RuntimeError('unrecognized data shape: %s' % (data.shape,))
return get_layer_vis_square_raw(data,
allow_heatmap,
normalize,
min_img_dim,
max_width,
colormap,
)
|
Returns a vis_square for the given layer data
Arguments:
data -- a np.ndarray
Keyword arguments:
allow_heatmap -- if True, convert single channel images to heatmaps
normalize -- whether to normalize the data when visualizing
max_width -- maximum width for the vis_square
|
juraj-google-style
|
def MakeHistFromList(t, name=''):
hist = Hist(name=name)
[hist.Incr(x) for x in t]
return hist
|
Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
name: string name for this histogram
Returns:
Hist object
|
codesearchnet
|
async def get_ticket(self, request):
session = await get_session(request)
return session.get(self.cookie_name)
|
Called to return the ticket for a request.
Args:
request: aiohttp Request object.
Returns:
A ticket (string like) object, or None if no ticket is available
for the passed request.
|
juraj-google-style
|
def system_repertoire_distance(r1, r2):
if (config.MEASURE in measures.asymmetric()):
raise ValueError('{} is asymmetric and cannot be used as a system-level irreducibility measure.'.format(config.MEASURE))
return measures[config.MEASURE](r1, r2)
|
Compute the distance between two repertoires of a system.
Args:
r1 (np.ndarray): The first repertoire.
r2 (np.ndarray): The second repertoire.
Returns:
float: The distance between ``r1`` and ``r2``.
|
codesearchnet
|
def VerifyStructure(self, parser_mediator, line):
return max([parser.matches(line) for (_, parser) in self.LINE_STRUCTURES])
|
Verifies that this is an apache access log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from the text file.
Returns:
bool: True if this is the correct parser, False otherwise.
|
codesearchnet
|
def __init__(self, napp_path, tpl_path):
self._napp_path = napp_path
self._template = tpl_path / 'openapi.yml.template'
self._api_file = napp_path / 'openapi.yml'
metadata = napp_path / 'kytos.json'
self._napp = NApp.create_from_json(metadata)
self._summary = None
self._description = None
self._paths = {}
|
Instantiate an OpenAPI object.
Args:
napp_path (string): Napp directory
tlp_path (string): File name from template
|
juraj-google-style
|
def build(self, text, matrix, skim_depth=10, d_weights=False):
for anchor in bar(matrix.keys):
n1 = text.unstem(anchor)
pairs = matrix.anchored_pairs(anchor).items()
for term, weight in list(pairs)[:skim_depth]:
if d_weights: weight = 1-weight
n2 = text.unstem(term)
self.graph.add_edge(n1, n2, weight=float(weight))
|
1. For each term in the passed matrix, score its KDE similarity with
all other indexed terms.
2. With the ordered stack of similarities in hand, skim off the top X
pairs and add them as edges.
Args:
text (Text): The source text instance.
matrix (Matrix): An indexed term matrix.
skim_depth (int): The number of siblings for each term.
d_weights (bool): If true, give "close" words low edge weights.
|
juraj-google-style
|
def __response_message_descriptor(self, message_type, method_id):
descriptor = {'200': {'description': 'A successful response'}}
if message_type != message_types.VoidMessage():
self.__parser.add_message(message_type.__class__)
self.__response_schema[method_id] = self.__parser.ref_for_message_type(
message_type.__class__)
descriptor['200']['schema'] = {'$ref': '
self.__response_schema[method_id])}
return dict(descriptor)
|
Describes the response.
Args:
message_type: messages.Message class, The message to describe.
method_id: string, Unique method identifier (e.g. 'myapi.items.method')
Returns:
Dictionary describing the response.
|
juraj-google-style
|
def MakeSuiteFromList(t, name=''):
hist = MakeHistFromList(t)
d = hist.GetDict()
return MakeSuiteFromDict(d)
|
Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
name: string name for this suite
Returns:
Suite object
|
juraj-google-style
|
def get_course_duration(self, obj):
duration = obj.end - obj.start if obj.start and obj.end else None
if duration:
return strfdelta(duration, '{W} weeks {D} days.')
return ''
|
Get course's duration as a timedelta.
Arguments:
obj (CourseOverview): CourseOverview object
Returns:
(timedelta): Duration of a course.
|
juraj-google-style
|
def next_trials(self):
trials = []
for trial in self._trial_generator:
if (trial is None):
return trials
trials += [trial]
self._finished = True
return trials
|
Provides a batch of Trial objects to be queued into the TrialRunner.
A batch ends when self._trial_generator returns None.
Returns:
trials (list): Returns a list of trials.
|
codesearchnet
|
def remove(self, *l):
for a in flatten(l):
self._remove([self.Inner(a)], self.l)
|
remove inner from outer
Args:
*l element that is passes into Inner init
|
juraj-google-style
|
def random_data(line_count=1, chars_per_line=80):
divide_lines = chars_per_line * line_count
return '\n'.join(random_line_data(chars_per_line) for x in range(int(divide_lines / chars_per_line)))
|
Function to creates lines of random string data
Args:
line_count: An integer that says how many lines to return
chars_per_line: An integer that says how many characters per line to return
Returns:
A String
|
juraj-google-style
|
def get_response(response: Dict[str, Any]) -> JSONRPCResponse:
if "error" in response:
return ErrorResponse(**response)
return SuccessResponse(**response)
|
Converts a deserialized response into a JSONRPCResponse object.
The dictionary be either an error or success response, never a notification.
Args:
response: Deserialized response dictionary. We can assume the response is valid
JSON-RPC here, since it passed the jsonschema validation.
|
juraj-google-style
|
def check_list_type(objects, allowed_type, name, allow_none=True):
if objects is None:
if not allow_none:
raise TypeError('%s is None, which is not allowed.' % name)
return objects
if not isinstance(objects, (tuple, list)):
raise TypeError('%s is not a list.' % name)
if not all(isinstance(i, allowed_type) for i in objects):
type_list = sorted(list(set(type(obj) for obj in objects)))
raise TypeError('%s contains types that don\'t match %s: %s' %
(name, allowed_type.__name__, type_list))
return objects
|
Verify that objects in list are of the allowed type or raise TypeError.
Args:
objects: The list of objects to check.
allowed_type: The allowed type of items in 'settings'.
name: Name of the list of objects, added to the exception.
allow_none: If set, None is also allowed.
Raises:
TypeError: if object is not of the allowed type.
Returns:
The list of objects, for convenient use in assignment.
|
juraj-google-style
|
def __ne__(self, other):
if isinstance(other, DocumentReference):
return self._client != other._client or self._path != other._path
else:
return NotImplemented
|
Inequality check against another instance.
Args:
other (Any): A value to compare against.
Returns:
Union[bool, NotImplementedType]: Indicating if the values are
not equal.
|
juraj-google-style
|
def _g(self, z):
return (np.exp(np.multiply((- self.theta), z)) - 1)
|
Helper function to solve Frank copula.
This functions encapsulates :math:`g_z = e^{-\\theta z} - 1` used on Frank copulas.
Argument:
z: np.ndarray
Returns:
np.ndarray
|
codesearchnet
|
def plots_html_page(query_module):
template = jenv.get_template('analysis.html')
context = dict(extended=config.EXTENDED)
cl = client.get_client()
session = cl.create_session()
seaborn.set_style('whitegrid')
decade_df = query_module.decade_query()
pix_size = pixels_to_inches((600, 400))
ax = seaborn.lmplot(x='decade', y='area', data=decade_df, size=pix_size[1], aspect=(pix_size[0] / pix_size[1]), scatter_kws={'s': 30, 'alpha': 0.3})
ax.set(xlabel='Decade', ylabel='Area, m^2')
context['area_by_decade_svg'] = fig_to_svg(plt.gcf())
plt.close('all')
if config.EXTENDED:
gender_df = query_module.gender_query()
pix_size = pixels_to_inches((600, 400))
g = seaborn.FacetGrid(gender_df, hue='gender', margin_titles=True, size=pix_size[1], aspect=(pix_size[0] / pix_size[1]))
bins = np.linspace(0, 5, 30)
g.map(plt.hist, 'area', bins=bins, lw=0, alpha=0.5, normed=True)
g.axes[(0, 0)].set_xlabel('Area, m^2')
g.axes[(0, 0)].set_ylabel('Percentage of paintings')
context['area_by_gender_svg'] = fig_to_svg(plt.gcf())
plt.close('all')
out_file = path.join(out_dir, 'analysis.html')
html_content = template.render(**context)
with open(out_file, 'w') as f:
f.write(html_content)
plt.close('all')
session.close()
|
Generate analysis output as html page
Args:
query_module (module): module to use for querying data for the
desired model/pipeline variant, e.g. leonardo.standard.queries
|
codesearchnet
|
def as_objective(obj):
if isinstance(obj, Objective):
return obj
elif callable(obj):
return obj
elif isinstance(obj, str):
layer, n = obj.split(":")
layer, n = layer.strip(), int(n)
return channel(layer, n)
|
Convert obj into Objective class.
Strings of the form "layer:n" become the Objective channel(layer, n).
Objectives are returned unchanged.
Args:
obj: string or Objective.
Returns:
Objective
|
juraj-google-style
|
def add_token(self, token):
token = self.process_token(token)
self._token_count.update([token])
|
Add token to vocabulary.
Args:
token (str): token to add.
|
juraj-google-style
|
def _parse_services(self, service_config: dict, service_name: str, service_list: dict) -> dict:
for (key, value) in service_list['services'][service_name].items():
service_config[key] = value
if ('command' in key):
key = 'args'
service_config['args'] = value
service_config.pop('command')
if ('ports' in key):
endpoint_spec = self._parse_ports(value)
service_config['endpoint_spec'] = endpoint_spec
service_config.pop('ports')
if ('volumes' in key):
volume_spec = self._parse_volumes(value)
service_config['mounts'] = volume_spec
service_config.pop('volumes')
if ('deploy' in key):
self._parse_deploy(value, service_config)
service_config.pop('deploy')
if ('networks' in key):
network_spec = self._parse_networks(service_list)
service_config['networks'] = network_spec
if ('logging' in key):
self._parse_logging(value, service_config)
service_config.pop('logging')
if ('environment' in key):
service_config['env'] = value
service_config.pop('environment')
return service_config
|
Parse the docker compose file.
Args:
service_config (dict): Service configurations from the compose file
service_name (string): Name of the services
service_list (dict): Service configuration list
Returns:
dict, service specifications extracted from the compose file
|
codesearchnet
|
def track_storms(storm_objects, times, distance_components, distance_maxima, distance_weights, tracked_objects=None):
obj_matcher = ObjectMatcher(distance_components, distance_weights, distance_maxima)
if (tracked_objects is None):
tracked_objects = []
for (t, time) in enumerate(times):
past_time_objects = []
for obj in tracked_objects:
if (obj.end_time == (time - obj.step)):
past_time_objects.append(obj)
if (len(past_time_objects) == 0):
tracked_objects.extend(storm_objects[t])
elif ((len(past_time_objects) > 0) and (len(storm_objects[t]) > 0)):
assignments = obj_matcher.match_objects(past_time_objects, storm_objects[t], times[(t - 1)], times[t])
unpaired = list(range(len(storm_objects[t])))
for pair in assignments:
past_time_objects[pair[0]].extend(storm_objects[t][pair[1]])
unpaired.remove(pair[1])
if (len(unpaired) > 0):
for up in unpaired:
tracked_objects.append(storm_objects[t][up])
return tracked_objects
|
Given the output of extract_storm_objects, this method tracks storms through time and merges individual
STObjects into a set of tracks.
Args:
storm_objects: list of list of STObjects that have not been tracked.
times: List of times associated with each set of STObjects
distance_components: list of function objects that make up components of distance function
distance_maxima: array of maximum values for each distance for normalization purposes
distance_weights: weight given to each component of the distance function. Should add to 1.
tracked_objects: List of STObjects that have already been tracked.
Returns:
tracked_objects:
|
codesearchnet
|
def select_action(self, next_action_arr, next_q_arr):
key_arr = self.select_action_key(next_action_arr, next_q_arr)
return next_action_arr[key_arr], next_q_arr[key_arr]
|
Select action by Q(state, action).
Args:
next_action_arr: `np.ndarray` of actions.
next_q_arr: `np.ndarray` of Q-Values.
Retruns:
Tuple(`np.ndarray` of action., Q-Value)
|
juraj-google-style
|
def template_string(task: Task, template: str, jinja_filters: FiltersDict=None, **kwargs: Any) -> Result:
jinja_filters = (jinja_filters or {} or task.nornir.config.jinja2.filters)
text = jinja_helper.render_from_string(template=template, host=task.host, jinja_filters=jinja_filters, **kwargs)
return Result(host=task.host, result=text)
|
Renders a string with jinja2. All the host data is available in the template
Arguments:
template (string): template string
jinja_filters (dict): jinja filters to enable. Defaults to nornir.config.jinja2.filters
**kwargs: additional data to pass to the template
Returns:
Result object with the following attributes set:
* result (``string``): rendered string
|
codesearchnet
|
def locate_module(module_id: str, module_type: str = None):
entry_point = None
if module_type:
entry_point = 'ehforwarderbot.%s' % module_type
module_id = module_id.split('
if entry_point:
for i in pkg_resources.iter_entry_points(entry_point):
if i.name == module_id:
return i.load()
return pydoc.locate(module_id)
|
Locate module by module ID
Args:
module_id: Module ID
module_type: Type of module, one of ``'master'``, ``'slave'`` and ``'middleware'``
|
juraj-google-style
|
def verify_account(self, email_address):
request = self._get_request()
resp = request.post(self.ACCOUNT_VERIFY_URL, {
'email_address': email_address
})
return ('account' in resp)
|
Verify whether a HelloSign Account exists
Args:
email_address (str): Email address for the account to verify
Returns:
True or False
|
juraj-google-style
|
def install_package(self, name, index=None, force=False, update=False):
cmd = 'install'
if force:
cmd = '{0} {1}'.format(cmd, '--force-reinstall')
if update:
cmd = '{0} {1}'.format(cmd, '--update')
if index:
cmd = '{0} {1}'.format(cmd, '--index-url {0}'.format(index))
self.pip('{0} {1}'.format(cmd, name))
|
Install a given package.
Args:
name (str): The package name to install. This can be any valid
pip package specification.
index (str): The URL for a pypi index to use.
force (bool): For the reinstall of packages during updates.
update (bool): Update the package if it is out of date.
|
juraj-google-style
|
def receive(self, event_type, signature, data_str):
if (not self.validate_signature(signature, data_str)):
raise HelpScoutSecurityException('The signature provided by this request was invalid.')
return HelpScoutWebHookEvent(event_type=event_type, record=json.loads(data_str))
|
Receive a web hook for the event and signature.
Args:
event_type (str): Name of the event that was received (from the
request ``X-HelpScout-Event`` header).
signature (str): The signature that was received, which serves as
authentication (from the request ``X-HelpScout-Signature``
header).
data_str (str): The raw data that was posted by HelpScout
to the web hook. This must be the raw string, because if it
is parsed with JSON it will lose its ordering and not pass
signature validation.
Raises:
helpscout.exceptions.HelpScoutSecurityException: If an invalid
signature is provided, and ``raise_if_invalid`` is ``True``.
Returns:
helpscout.web_hook.WebHookEvent: The authenticated web hook
request.
|
codesearchnet
|
def get_energy_relax_structure_buckingham(structure, gulp_cmd='gulp', keywords=('optimise', 'conp'), valence_dict=None):
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.buckingham_input(structure, keywords, valence_dict=valence_dict)
gout = gc.run(gin)
energy = gio.get_energy(gout)
relax_structure = gio.get_relaxed_structure(gout)
return (energy, relax_structure)
|
Relax a structure and compute the energy using Buckingham potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
keywords: GULP first line keywords
valence_dict: {El: valence}. Needed if the structure is not charge
neutral.
|
codesearchnet
|
def read(self, length, timeout=None):
data = b''
while True:
if (timeout is not None):
(rlist, _, _) = select.select([self._fd], [], [], timeout)
if (self._fd not in rlist):
break
try:
data += os.read(self._fd, (length - len(data)))
except OSError as e:
raise SerialError(e.errno, ('Reading serial port: ' + e.strerror))
if (len(data) == length):
break
return data
|
Read up to `length` number of bytes from the serial port with an
optional timeout.
`timeout` can be positive for a timeout in seconds, 0 for a
non-blocking read, or negative or None for a blocking read that will
block until `length` number of bytes are read. Default is a blocking
read.
For a non-blocking or timeout-bound read, read() may return data whose
length is less than or equal to the requested length.
Args:
length (int): length in bytes.
timeout (int, float, None): timeout duration in seconds.
Returns:
bytes: data read.
Raises:
SerialError: if an I/O or OS error occurs.
|
codesearchnet
|
def CreateTaskStorage(self, task):
if self._storage_type != definitions.STORAGE_TYPE_SESSION:
raise IOError('Unsupported storage type.')
storage_file_path = self._GetTaskStorageFilePath(task)
return self._CreateTaskStorageWriter(storage_file_path, task)
|
Creates a task storage.
The task storage is used to store attributes created by the task.
Args:
task(Task): task.
Returns:
StorageWriter: storage writer.
Raises:
IOError: if the storage type is not supported.
OSError: if the storage type is not supported.
|
juraj-google-style
|
def __init__(self, host: str, port: int, time_to_live: Union[int, timedelta], *, request_coder: Optional[coders.Coder], response_coder: Optional[coders.Coder], kwargs: Optional[Dict[str, Any]]=None, source_caller: Optional[Caller]=None, mode: _RedisMode):
self.host, self.port = (host, port)
self.time_to_live = time_to_live
self.request_coder = request_coder
self.response_coder = response_coder
self.kwargs = kwargs
self.source_caller = source_caller
self.mode = mode
|
Args:
host (str): The hostname or IP address of the Redis server.
port (int): The port number of the Redis server.
time_to_live: `(Union[int, timedelta])` The time-to-live (TTL) for
records stored in Redis. Provide an integer (in seconds) or a
`datetime.timedelta` object.
request_coder: (Optional[`coders.Coder`]) coder for requests stored
in Redis.
response_coder: (Optional[`coders.Coder`]) coder for decoding responses
received from Redis.
kwargs: Optional(Dict[str, Any]) additional keyword arguments that
are required to connect to your redis server. Same as `redis.Redis()`.
source_caller: (Optional[`Caller`]): The source caller using this Redis
cache in case of fetching the cache request to store in Redis.
mode: `_RedisMode` An enum type specifying the operational mode of
the `_RedisCaller`.
|
github-repos
|
def get_section_by_name(self, section_name):
sections = self.unravel_sections(self.get_sections())
for section in sections:
if (section['name'] == section_name):
return (section['groupId'], section)
return (None, None)
|
Get a section by its name.
Get a list of sections for a given gradebook,
specified by a gradebookid.
Args:
section_name (str): The section's name.
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
tuple: tuple of group id, and section dictionary
An example return value is:
.. code-block:: python
(
1327565,
{
u'editable': True,
u'groupId': 1327565,
u'groupingScheme': u'Recitation',
u'members': None,
u'name': u'r01',
u'shortName': u'r01',
u'staffs': None
}
)
|
codesearchnet
|
def generate_message_doc(message_descriptor, locations, path, name_prefix=''):
prefixed_name = name_prefix + message_descriptor.name
print(make_subsection(prefixed_name))
location = locations[path]
if location.HasField('leading_comments'):
print(textwrap.dedent(location.leading_comments))
row_tuples = []
for field_index, field in enumerate(message_descriptor.field):
field_location = locations[path + (2, field_index)]
if field.type not in [11, 14]:
type_str = TYPE_TO_STR[field.type]
else:
type_str = make_link(field.type_name.lstrip('.'))
row_tuples.append((
make_code(field.name),
field.number,
type_str,
LABEL_TO_STR[field.label],
textwrap.fill(get_comment_from_location(field_location), INFINITY),
))
print_table(('Field', 'Number', 'Type', 'Label', 'Description'),
row_tuples)
nested_types = enumerate(message_descriptor.nested_type)
for index, nested_message_desc in nested_types:
generate_message_doc(nested_message_desc, locations,
path + (3, index),
name_prefix=prefixed_name + '.')
for index, nested_enum_desc in enumerate(message_descriptor.enum_type):
generate_enum_doc(nested_enum_desc, locations, path + (4, index),
name_prefix=prefixed_name + '.')
|
Generate docs for message and nested messages and enums.
Args:
message_descriptor: descriptor_pb2.DescriptorProto instance for message
to generate docs for.
locations: Dictionary of location paths tuples to
descriptor_pb2.SourceCodeInfo.Location instances.
path: Path tuple to the message definition.
name_prefix: Optional prefix for this message's name.
|
juraj-google-style
|
def cancel(self, job_ids):
statuses = []
for job_id in job_ids:
try:
self.delete_instance(job_id)
statuses.append(True)
self.provisioned_blocks -= 1
except Exception:
statuses.append(False)
return statuses
|
Cancels the resources identified by the job_ids provided by the user.
Args:
- job_ids (list): A list of job identifiers
Returns:
- A list of status from cancelling the job which can be True, False
Raises:
- ExecutionProviderException or its subclasses
|
codesearchnet
|
def get_losses_for(self, inputs):
warnings.warn('`layer.get_losses_for` is deprecated and will be removed in a future version. Please use `layer.losses` instead.')
return self.losses
|
Deprecated, do NOT use!
Retrieves losses relevant to a specific set of inputs.
Args:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of loss tensors of the layer that depend on `inputs`.
|
github-repos
|
def _verify_output(self, submission_type):
result = True
if (submission_type == 'defense'):
try:
image_classification = load_defense_output(os.path.join(self._sample_output_dir, 'result.csv'))
expected_keys = [IMAGE_NAME_PATTERN.format(i) for i in range(BATCH_SIZE)]
if (set(image_classification.keys()) != set(expected_keys)):
logging.error('Classification results are not saved for all images')
result = False
except IOError as e:
logging.error('Failed to read defense output file: %s', e)
result = False
else:
for i in range(BATCH_SIZE):
image_filename = os.path.join(self._sample_output_dir, IMAGE_NAME_PATTERN.format(i))
try:
img = np.array(Image.open(image_filename).convert('RGB'))
if (list(img.shape) != [299, 299, 3]):
logging.error('Invalid image size %s for image %s', str(img.shape), image_filename)
result = False
except IOError as e:
result = False
return result
|
Verifies correctness of the submission output.
Args:
submission_type: type of the submission
Returns:
True if output looks valid
|
codesearchnet
|
def dumpfile(item, path):
with io.open(path, 'wb') as fd:
fd.write(en(item))
|
Dump an object to a file by path.
Args:
item (object): The object to serialize.
path (str): The file path to save.
Returns:
None
|
juraj-google-style
|
def squared_hinge(y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
y_true = convert_binary_labels_to_hinge(y_true)
return ops.mean(ops.square(ops.maximum(1.0 - y_true * y_pred, 0.0)), axis=-1)
|
Computes the squared hinge loss between `y_true` & `y_pred`.
Formula:
```python
loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)
```
Args:
y_true: The ground truth values. `y_true` values are expected to be -1
or 1. If binary (0 or 1) labels are provided we will convert them
to -1 or 1 with shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
Returns:
Squared hinge loss values with shape = `[batch_size, d0, .. dN-1]`.
Example:
>>> y_true = np.random.choice([-1, 1], size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras.losses.squared_hinge(y_true, y_pred)
|
github-repos
|
def install(name, dst, capture_error=False):
if dst not in sys.path:
sys.path.insert(0, dst)
entrypoint_type = _entry_point_type.get(dst, name)
if entrypoint_type is _entry_point_type.PYTHON_PACKAGE:
_modules.install(dst, capture_error)
if entrypoint_type is _entry_point_type.COMMAND:
os.chmod(os.path.join(dst, name), 511)
|
Install the user provided entry point to be executed as follow:
- add the path to sys path
- if the user entry point is a command, gives exec permissions to the script
Args:
name (str): name of the script or module.
dst (str): path to directory with the script or module.
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors.
|
juraj-google-style
|
def _confirm_overwrite(filename):
message = '{}Would you like to overwrite the contents of {} (y/[n])? '.format(c.Fore.MAGENTA, filename)
response = raw_input(message)
response = response.lower()
if (response in ['y', 'yes']):
return True
return False
|
Confirm overwrite of template files.
Make sure the user would like to continue downloading a file which will overwrite a file
in the current directory.
Args:
filename (str): The name of the file to overwrite.
Returns:
bool: True if the user specifies a "yes" response.
|
codesearchnet
|
def log_error(self, msg):
if self.__logger:
self.__logger.error(msg)
raise RuntimeError(msg)
|
Log an error and raise an exception.
Args:
msg: Error message to log.
Raises:
RuntimeError: With the message.
|
codesearchnet
|
def Process(self, parser_mediator, root_item=None, **kwargs):
super(DefaultOLECFPlugin, self).Process(parser_mediator, **kwargs)
if not root_item:
raise ValueError('Root item not set.')
if not self._ParseItem(parser_mediator, root_item):
event_data = OLECFItemEventData()
event_data.name = root_item.name
event_data.offset = 0
event_data.size = root_item.size
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses an OLECF file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
root_item (Optional[pyolecf.item]): root item of the OLECF file.
Raises:
ValueError: If the root item is not set.
|
juraj-google-style
|
def sample_from_discretized_mix_logistic(pred, seed=None):
logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params(
pred)
num_mixtures = shape_list(logits)[-1]
gumbel_noise = -tf.log(-tf.log(
tf.random_uniform(
tf.shape(logits), minval=1e-5, maxval=1. - 1e-5, seed=seed)))
sel = tf.one_hot(
tf.argmax(logits + gumbel_noise, -1),
depth=num_mixtures,
dtype=tf.float32)
sel = tf.expand_dims(sel, -1)
locs = tf.reduce_sum(locs * sel, 3)
log_scales = tf.reduce_sum(log_scales * sel, 3)
coeffs = tf.reduce_sum(coeffs * sel, 3)
uniform_noise = tf.random_uniform(
tf.shape(locs), minval=1e-5, maxval=1. - 1e-5, seed=seed)
logistic_noise = tf.log(uniform_noise) - tf.log1p(-uniform_noise)
x = locs + tf.exp(log_scales) * logistic_noise
x0 = x[..., 0]
x1 = x[..., 1] + coeffs[..., 0] * x0
x2 = x[..., 2] + coeffs[..., 1] * x0 + coeffs[..., 2] * x1
x = tf.stack([x0, x1, x2], axis=-1)
x = tf.clip_by_value(x, -1., 1.)
return x
|
Sampling from a discretized mixture of logistics.
Args:
pred: A [batch, height, width, num_mixtures*10] tensor of floats
comprising one unconstrained mixture probability, three means
(one per channel), three standard deviations (one per channel),
and three coefficients which linearly parameterize dependence across
channels.
seed: Random seed.
Returns:
A tensor of shape [batch, height, width, 3] with real intensities scaled
between -1 and 1.
|
juraj-google-style
|
def _read_protocol_line(self):
while True:
line = self._proc.stdout.readline().decode('utf-8')
if (not line):
raise jsonrpc_client_base.AppStartError(self._ad, 'Unexpected EOF waiting for app to start')
line = line.strip()
if (line.startswith('INSTRUMENTATION_RESULT:') or line.startswith('SNIPPET ')):
self.log.debug('Accepted line from instrumentation output: "%s"', line)
return line
self.log.debug('Discarded line from instrumentation output: "%s"', line)
|
Reads the next line of instrumentation output relevant to snippets.
This method will skip over lines that don't start with 'SNIPPET' or
'INSTRUMENTATION_RESULT'.
Returns:
(str) Next line of snippet-related instrumentation output, stripped.
Raises:
jsonrpc_client_base.AppStartError: If EOF is reached without any
protocol lines being read.
|
codesearchnet
|
def profile(self, profile):
self._staging_data = None
lang = profile.get('install_json', {}).get('programLanguage', 'PYTHON')
profile_args = ArgBuilder(lang, self.profile_args(profile.get('args')))
self._profile = profile
self._profile['profile_args'] = profile_args
self.load_tcex()
self.reports.profile(profile.get('profile_name'))
self._create_tc_dirs()
|
Set the current profile.
Args:
profile (dict): The profile data.
|
juraj-google-style
|
def sg_int(tensor, opt):
r
return tf.cast(tensor, tf.sg_intx, name=opt.name)
|
r"""Casts a tensor to intx.
See `tf.cast()` in tensorflow.
Args:
tensor: A `Tensor` or `SparseTensor` (automatically given by chain).
opt:
name: If provided, it replaces current tensor's name.
Returns:
A `Tensor` or `SparseTensor` with same shape as `tensor`.
|
juraj-google-style
|
def __init__(self, _max_size, _random=None, always_keep_last=True):
if _max_size < 0 or _max_size != round(_max_size):
raise ValueError('_max_size must be nonnegative int, was %s' % _max_size)
self.items = []
self._mutex = threading.Lock()
self._max_size = _max_size
self._num_items_seen = 0
if _random is not None:
self._random = _random
else:
self._random = random.Random(0)
self.always_keep_last = always_keep_last
|
Create the _ReservoirBucket.
Args:
_max_size: The maximum size the reservoir bucket may grow to. If size is
zero, the bucket has unbounded size.
_random: The random number generator to use. If not specified, defaults to
random.Random(0).
always_keep_last: Whether the latest seen item should always be included
in the end of the bucket.
Raises:
ValueError: if the size is not a nonnegative integer.
|
juraj-google-style
|
def set_all_tiers(key, value, django_cache_timeout=DEFAULT_TIMEOUT):
DEFAULT_REQUEST_CACHE.set(key, value)
django_cache.set(key, value, django_cache_timeout)
|
Caches the value for the provided key in both the request cache and the
django cache.
Args:
key (string)
value (object)
django_cache_timeout (int): (Optional) Timeout used to determine
if and for how long to cache in the django cache. A timeout of
0 will skip the django cache. If timeout is provided, use that
timeout for the key; otherwise use the default cache timeout.
|
juraj-google-style
|
def random_uniform(mesh, shape, **kwargs):
shape = convert_to_shape(shape)
return RandomOperation(mesh, shape, tf.random.uniform, **kwargs).outputs[0]
|
Random uniform.
Args:
mesh: a Mesh
shape: a Shape
**kwargs: keyword args for tf.random.uniform, except seed
Returns:
a Tensor
|
juraj-google-style
|
def unicode(self, b, encoding=None):
if (encoding is None):
encoding = self.string_encoding
return unicode(b, encoding, self.decode_errors)
|
Convert a byte string to unicode, using string_encoding and decode_errors.
Arguments:
b: a byte string.
encoding: the name of an encoding. Defaults to the string_encoding
attribute for this instance.
Raises:
TypeError: Because this method calls Python's built-in unicode()
function, this method raises the following exception if the
given string is already unicode:
TypeError: decoding Unicode is not supported
|
codesearchnet
|
def run(self, *args, **kwargs):
accounts = list(AWSAccount.get_all(include_disabled=False).values())
self.manage_policies(accounts)
|
Iterate through all AWS accounts and apply roles and policies from Github
Args:
*args: Optional list of arguments
**kwargs: Optional list of keyword arguments
Returns:
`None`
|
codesearchnet
|
def convert_elementwise_div(
params, w_name, scope_name, inputs, layers, weights, names
):
print('Converting elementwise_div ...')
if names == 'short':
tf_name = 'D' + random_string(7)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
def target_layer(x):
layer = tf.div(
x[0],
x[1]
)
return layer
lambda_layer = keras.layers.Lambda(target_layer, name=tf_name)
layers[scope_name] = lambda_layer([layers[inputs[0]], layers[inputs[1]]])
|
Convert elementwise multiplication.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
|
juraj-google-style
|
def box_predictor(self, image_feats: torch.FloatTensor, feature_map: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor:
pred_boxes = self.box_head(image_feats)
if interpolate_pos_encoding:
_, num_patches_height, num_patches_width, _ = feature_map.shape
box_bias = self.compute_box_bias(num_patches_height, num_patches_width)
else:
box_bias = self.box_bias
box_bias = box_bias.to(feature_map.device)
pred_boxes += box_bias
pred_boxes = self.sigmoid(pred_boxes)
return pred_boxes
|
Args:
image_feats:
Features extracted from the image, returned by the `image_text_embedder` method.
feature_map:
A spatial re-arrangement of image_features, also returned by the `image_text_embedder` method.
interpolate_pos_encoding:
Whether to interpolate the pre-trained position encodings.
Returns:
pred_boxes:
List of predicted boxes (cxcywh normalized to 0, 1) nested within a dictionary.
|
github-repos
|
def make_class(node, props, ctx):
name = abstract_utils.get_atomic_python_constant(props.name_var)
log.info('Declaring class %s', name)
try:
class_dict = abstract_utils.get_atomic_value(props.class_dict_var)
except abstract_utils.ConversionError:
log.error('Error initializing class %r', name)
return ctx.convert.create_new_unknown(node)
metacls, bases = _filter_out_metaclasses(props.bases, ctx)
cls_var = metacls if metacls else props.metaclass_var
bases = [_process_base_class(node, base, ctx) for base in bases]
bases = _expand_generic_protocols(node, bases, ctx)
if not bases:
base = ctx.convert.object_type
bases = [base.to_variable(ctx.root_node)]
if isinstance(class_dict, abstract.Unsolvable) or not isinstance(class_dict, abstract.PythonConstant):
var = ctx.new_unsolvable(node)
else:
if cls_var is None:
cls_var = class_dict.members.get('__metaclass__')
if cls_var:
ctx.errorlog.ignored_metaclass(ctx.vm.frames, name, cls_var.data[0].full_name if cls_var.bindings else 'Any')
if cls_var and all((v.data.full_name == 'builtins.type' for v in cls_var.bindings)):
cls_var = None
cls = abstract_utils.get_atomic_value(cls_var, default=ctx.convert.unsolvable) if cls_var else None
if '__annotations__' not in class_dict.members and name in ctx.vm.annotated_locals:
annotations_dict = ctx.vm.annotated_locals[name]
if any((local.typ for local in annotations_dict.values())):
annotations_member = abstract.AnnotationsDict(annotations_dict, ctx).to_variable(node)
class_dict.members['__annotations__'] = annotations_member
class_dict.pyval['__annotations__'] = annotations_member
if '__init_subclass__' in class_dict.members:
underlying = class_dict.pyval['__init_subclass__']
_, method = ctx.vm.load_special_builtin('classmethod').call(node, func=None, args=function.Args(posargs=(underlying,)))
class_dict.pyval['__init_subclass__'] = method
try:
class_type = props.class_type or abstract.InterpreterClass
assert issubclass(class_type, abstract.InterpreterClass)
val = class_type(name, bases, class_dict.pyval, cls, ctx.vm.current_opcode, props.undecorated_methods, ctx)
_check_final_members(val, class_dict.pyval, ctx)
overriding_checks.check_overriding_members(val, bases, class_dict.pyval, ctx.matcher(node), ctx)
val.decorators = props.decorators or []
except mro.MROError as e:
ctx.errorlog.mro_error(ctx.vm.frames, name, e.mro_seqs)
var = ctx.new_unsolvable(node)
except abstract_utils.GenericTypeError as e:
ctx.errorlog.invalid_annotation(ctx.vm.frames, e.annot, e.error)
var = ctx.new_unsolvable(node)
else:
var = props.new_class_var or ctx.program.NewVariable()
var.AddBinding(val, props.class_dict_var.bindings, node)
node = val.call_metaclass_init(node)
node = val.call_init_subclass(node)
ctx.vm.trace_opcode(None, name, var)
return (node, var)
|
Create a class with the name, bases and methods given.
Args:
node: The current CFG node.
props: class_mixin.ClassBuilderProperties required to build the class
ctx: The current context.
Returns:
A node and an instance of class_type.
|
github-repos
|
def _AbortJoin(self, timeout=None):
for pid, process in iter(self._processes_per_pid.items()):
logger.debug('Waiting for process: {0:s} (PID: {1:d}).'.format(
process.name, pid))
process.join(timeout=timeout)
if not process.is_alive():
logger.debug('Process {0:s} (PID: {1:d}) stopped.'.format(
process.name, pid))
|
Aborts all registered processes by joining with the parent process.
Args:
timeout (int): number of seconds to wait for processes to join, where
None represents no timeout.
|
juraj-google-style
|
def handle_error(err, halt=True):
print('{}{}{}'.format(c.Style.BRIGHT, c.Fore.RED, err))
if halt:
sys.exit(1)
|
Print errors message and optionally exit.
Args:
err (str): The error message to print.
halt (bool, optional): Defaults to True. If True the script will exit.
|
juraj-google-style
|
def _decode_helper(obj, deserialize=False, module_objects=None, custom_objects=None):
if isinstance(obj, dict) and 'class_name' in obj:
if tf.available:
if obj['class_name'] == 'TensorShape':
return tf.TensorShape(obj['items'])
elif obj['class_name'] == 'TypeSpec':
from tensorflow.python.framework import type_spec_registry
return type_spec_registry.lookup(obj['type_spec'])._deserialize(_decode_helper(obj['serialized']))
elif obj['class_name'] == 'CompositeTensor':
spec = obj['spec']
tensors = []
for dtype, tensor in obj['tensors']:
tensors.append(tf.constant(tensor, dtype=tf.dtypes.as_dtype(dtype)))
return tf.nest.pack_sequence_as(_decode_helper(spec), tensors, expand_composites=True)
if obj['class_name'] == '__tuple__':
return tuple((_decode_helper(i) for i in obj['items']))
elif obj['class_name'] == '__ellipsis__':
return Ellipsis
elif deserialize and '__passive_serialization__' in obj:
try:
if 'module' not in obj:
return serialization.deserialize_keras_object(obj, module_objects=module_objects, custom_objects=custom_objects)
else:
return serialization_lib.deserialize_keras_object(obj, module_objects=module_objects, custom_objects=custom_objects)
except ValueError:
pass
elif obj['class_name'] == '__bytes__':
return obj['value'].encode('utf-8')
return obj
|
A decoding helper that is TF-object aware.
Args:
obj: A decoded dictionary that may represent an object.
deserialize: Boolean. When True, deserializes any Keras
objects found in `obj`. Defaults to `False`.
module_objects: A dictionary of built-in objects to look the name up in.
Generally, `module_objects` is provided by midlevel library
implementers.
custom_objects: A dictionary of custom objects to look the name up in.
Generally, `custom_objects` is provided by the end user.
Returns:
The decoded object.
|
github-repos
|
def get_vnet(access_token, subscription_id, resource_group, vnet_name):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/virtualNetworks/', vnet_name, '?api-version=', NETWORK_API])
return do_get(endpoint, access_token)
|
Get details about the named virtual network.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vnet_name (str): Name of the VNet.
Returns:
HTTP response. VNet JSON body.
|
codesearchnet
|
def register_rpc(self, address, rpc_id, func):
if ((rpc_id < 0) or (rpc_id > 65535)):
raise RPCInvalidIDError('Invalid RPC ID: {}'.format(rpc_id))
if (address not in self._rpc_overlays):
self._rpc_overlays[address] = RPCDispatcher()
self._rpc_overlays[address].add_rpc(rpc_id, func)
|
Register a single RPC handler with the given info.
This function can be used to directly register individual RPCs,
rather than delegating all RPCs at a given address to a virtual
Tile.
If calls to this function are mixed with calls to add_tile for
the same address, these RPCs will take precedence over what is
defined in the tiles.
Args:
address (int): The address of the mock tile this RPC is for
rpc_id (int): The number of the RPC
func (callable): The function that should be called to handle the
RPC. func is called as func(payload) and must return a single
string object of up to 20 bytes with its response
|
codesearchnet
|
def image_section(image, title):
img = yield marv.pull(image)
if img is None:
return
widget = {'title': image.title, 'image': {'src': img.relpath}}
section = {'title': title, 'widgets': [widget]}
yield marv.push(section)
|
Create detail section with one image.
Args:
title (str): Title to be displayed for detail section.
image: marv image file.
Returns
One detail section.
|
juraj-google-style
|
def _minimize_peak_memory_list(graph):
schedule = []
bytes_freed = {}
users_of = collections.defaultdict(set)
in_degree = collections.defaultdict(int)
operation_id = {}
priority_queue = []
for i, operation_name in enumerate(graph.get_all_operation_names()):
operation_id[operation_name] = i
for input_name in graph.get_operation_input_names(operation_name):
if operation_name in users_of[input_name]:
continue
users_of[input_name].add(operation_name)
in_degree[operation_name] += 1
for operation_name in graph.get_all_operation_names():
bytes_freed[operation_name] = 0
for input_name in graph.get_operation_input_names(operation_name):
if len(users_of[input_name]) == 1 and not graph.is_tensor_final(
input_name):
bytes_freed[operation_name] += graph.get_tensor_size(input_name)
for output_name in graph.get_operation_output_names(operation_name):
if users_of[output_name] or graph.is_tensor_final(output_name):
bytes_freed[operation_name] -= graph.get_tensor_size(output_name)
for operation_name in graph.get_all_operation_names():
if in_degree[operation_name] == 0:
heapq.heappush(priority_queue,
(-bytes_freed[operation_name], operation_name))
while priority_queue:
neg_bytes_freed, operation_name = heapq.heappop(priority_queue)
if bytes_freed[operation_name] != -neg_bytes_freed:
continue
schedule.append(operation_id[operation_name])
bytes_freed[operation_name] = None
for output_name in graph.get_operation_output_names(operation_name):
for other_operation_name in users_of[output_name]:
in_degree[other_operation_name] -= 1
if in_degree[other_operation_name] == 0:
heapq.heappush(priority_queue,
(-bytes_freed[other_operation_name],
other_operation_name))
for input_name in graph.get_operation_input_names(operation_name):
if operation_name not in users_of[input_name]:
continue
users_of[input_name].remove(operation_name)
if len(users_of[input_name]) != 1 or graph.is_tensor_final(output_name):
continue
(other_operation_name,) = users_of[input_name]
bytes_freed[other_operation_name] += graph.get_tensor_size(
input_name)
if in_degree[other_operation_name] > 0:
continue
heapq.heappush(priority_queue, (-bytes_freed[other_operation_name],
other_operation_name))
return schedule
|
Computes schedule according to the greedy list heuristic.
Greedy list heuristic: schedule the operation which results in the most bytes
of memory being (immediately) freed.
TODO(joshuawang): Experiment with tiebreaking by preferring more successors.
Args:
graph: an mtf.auto_mtf.graph_interface.GraphInterface.
Returns:
an iterable of integers representing the schedule.
|
juraj-google-style
|
def deepcopy(original_obj):
if isinstance(original_obj, list):
return list(deepcopy(item) for item in original_obj)
elif isinstance(original_obj, dict):
return dict((key, deepcopy(val)) for key, val in original_obj.items())
else:
return original_obj
|
Creates a deep copy of an object with no crossed referenced lists or dicts,
useful when loading from yaml as anchors generate those cross-referenced
dicts and lists
Args:
original_obj(object): Object to deep copy
Return:
object: deep copy of the object
|
juraj-google-style
|
def db990(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `db990`'.format(value))
self._db990 = value
|
Corresponds to IDD Field `db990`
Dry-bulb temperature corresponding to 90.0% annual cumulative
frequency of occurrence (cold conditions)
Args:
value (float): value for IDD Field `db990`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def add_mutually_exclusive_groups(self, groups):
all_params = set.union(*groups)
for group in groups:
mutually_exclusive = all_params - group
for name in group:
self._mutually_exclusive[name].update(mutually_exclusive)
|
Adds groups of mutually exclusive type parameters.
For example, [{"T1", "T2"}, {"T3", "T4"}] would mean that the following
pairs are mutually exclusive: (T1, T3), (T1, T4), (T2, T3), (T2, T4).
Args:
groups: The mutually exclusive groups.
|
github-repos
|
def listdir(self, target_directory):
target_directory = self.resolve_path(target_directory, allow_fd=True)
directory = self.confirmdir(target_directory)
directory_contents = directory.contents
return list(directory_contents.keys())
|
Return a list of file names in target_directory.
Args:
target_directory: Path to the target directory within the
fake filesystem.
Returns:
A list of file names within the target directory in arbitrary
order.
Raises:
OSError: if the target is not a directory.
|
codesearchnet
|
def add_history(self, filename, color_scheme, font, wrap):
filename = encoding.to_unicode_from_fs(filename)
if (filename in self.filenames):
return
editor = codeeditor.CodeEditor(self)
if (osp.splitext(filename)[1] == '.py'):
language = 'py'
else:
language = 'bat'
editor.setup_editor(linenumbers=False, language=language, scrollflagarea=False, show_class_func_dropdown=False)
editor.focus_changed.connect((lambda : self.focus_changed.emit()))
editor.setReadOnly(True)
editor.set_font(font, color_scheme)
editor.toggle_wrap_mode(wrap)
(text, _) = encoding.read(filename)
editor.set_text(text)
editor.set_cursor_position('eof')
self.editors.append(editor)
self.filenames.append(filename)
index = self.tabwidget.addTab(editor, osp.basename(filename))
self.find_widget.set_editor(editor)
self.tabwidget.setTabToolTip(index, filename)
self.tabwidget.setCurrentIndex(index)
|
Add new history tab.
Args:
filename (str): file to be loaded in a new tab.
|
codesearchnet
|
def resolve(self, context, provider):
resolve_variables(self.variables, context, provider)
self.blueprint.resolve_variables(self.variables)
|
Resolve the Stack variables.
This resolves the Stack variables and then prepares the Blueprint for
rendering by passing the resolved variables to the Blueprint.
Args:
context (:class:`stacker.context.Context`): stacker context
provider (:class:`stacker.provider.base.BaseProvider`): subclass of
the base provider
|
codesearchnet
|
def layer_norm(x, dim, epsilon=1e-6, name="layer_prepostprocess"):
with tf.variable_scope(name + "/layer_norm"):
scale = mtf.get_variable(
x.mesh,
"layer_norm_scale",
mtf.Shape([dim]),
initializer=tf.ones_initializer(),
activation_dtype=x.dtype)
bias = mtf.get_variable(
x.mesh,
"layer_norm_bias",
mtf.Shape([dim]),
initializer=tf.zeros_initializer(),
activation_dtype=x.dtype)
reduced_shape = x.shape - dim
mean = mtf.reduce_mean(x, output_shape=reduced_shape)
variance = mtf.reduce_mean(mtf.square(x - mean), output_shape=reduced_shape)
norm_x = (x - mean) * mtf.rsqrt(variance + epsilon)
return norm_x * scale + bias
|
Layer normalization over dimension dim.
Args:
x: a mtf.Tensor whose shape contains dim.
dim: a mtf.Dimension
epsilon: a floating point number
name: a string. variable scope.
Returns:
a mtf.Tensor with same shape as x.
|
juraj-google-style
|
def __init__(self, key: Key, exclude_from_indexes: Iterable[str]=()):
self.key = key
self.exclude_from_indexes = set(exclude_from_indexes)
self.properties = {}
|
Represents a Datastore entity.
Does not support the property value "meaning" field.
Args:
key: (Key) A complete Key representing this Entity.
exclude_from_indexes: (iterable of str) List of property keys whose values
should not be indexed for this entity.
|
github-repos
|
def stop_threadsafe(self):
if self.stopped:
return
try:
self._loop.run_coroutine(self.stop())
except asyncio.TimeoutError:
raise TimeoutExpiredError('Timeout stopping task {} with {} subtasks'.format(self.name, len(self.subtasks)))
|
Stop this task from another thread and wait for it to finish.
This method must not be called from within the BackgroundEventLoop but
will inject self.stop() into the event loop and block until it
returns.
Raises:
TimeoutExpiredError: If the task does not stop in the given
timeout specified in __init__()
|
codesearchnet
|
def lu_solve(LU, b):
from scipy.linalg import lu_solve as sp_lu_solve
LU = (asarray(LU[0], float), asarray(LU[1], float))
b = asarray(b, float)
return sp_lu_solve(LU, b, check_finite=False)
|
r"""Solve for LU decomposition.
Solve the linear equations :math:`\mathrm A \mathbf x = \mathbf b`,
given the LU factorization of :math:`\mathrm A`.
Args:
LU (array_like): LU decomposition.
b (array_like): Right-hand side.
Returns:
:class:`numpy.ndarray`: The solution to the system
:math:`\mathrm A \mathbf x = \mathbf b`.
See Also
--------
scipy.linalg.lu_factor : LU decomposition.
scipy.linalg.lu_solve : Solve linear equations given LU factorization.
|
codesearchnet
|
def set_site_energies(self, energies):
self.site_energies = energies
for site_label in energies:
for site in self.sites:
if (site.label == site_label):
site.energy = energies[site_label]
|
Set the energies for every site in the lattice according to the site labels.
Args:
energies (Dict(Str:Float): Dictionary of energies for each site label, e.g.::
{ 'A' : 1.0, 'B', 0.0 }
Returns:
None
|
codesearchnet
|
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0):
if (kmip_version < enums.KMIPVersion.KMIP_2_0):
raise exceptions.VersionNotSupported('KMIP {} does not support the ObjectDefaults object.'.format(kmip_version.value))
local_buffer = BytearrayStream()
if self._object_type:
self._object_type.write(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidField('The ObjectDefaults structure is missing the object type field.')
if self._attributes:
self._attributes.write(local_buffer, kmip_version=kmip_version)
else:
raise exceptions.InvalidField('The ObjectDefaults structure is missing the attributes field.')
self.length = local_buffer.length()
super(ObjectDefaults, self).write(output_buffer, kmip_version=kmip_version)
output_buffer.write(local_buffer.buffer)
|
Write the ObjectDefaults structure encoding to the data stream.
Args:
output_buffer (stream): A data stream in which to encode
Attributes structure data, supporting a write method.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidField: Raised if the object type or attributes fields are
not defined.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the ObjectDefaults structure.
|
codesearchnet
|
def subCell2DSlices(arr, shape, d01=None, p01=None):
if p01 is not None:
yinit, xinit = p01
else:
xinit, yinit = 0, 0
x, y = xinit, yinit
g0, g1 = shape
s0, s1 = arr.shape[:2]
if d01 is not None:
d0, d1 = d01
else:
d0, d1 = s0 / g0, s1 / g1
y1 = d0 + yinit
for i in range(g0):
for j in range(g1):
x1 = x + d1
yield (i, j, slice(max(0, _rint(y)),
max(0, _rint(y1))),
slice(max(0, _rint(x)),
max(0, _rint(x1))))
x = x1
y = y1
y1 = y + d0
x = xinit
|
Generator to access evenly sized sub-cells in a 2d array
Args:
shape (tuple): number of sub-cells in y,x e.g. (10,15)
d01 (tuple, optional): cell size in y and x
p01 (tuple, optional): position of top left edge
Returns:
int: 1st index
int: 2nd index
slice: first dimension
slice: 1st dimension
|
juraj-google-style
|
def create_secret(self, name, data, labels=None, driver=None):
if (not isinstance(data, bytes)):
data = data.encode('utf-8')
data = base64.b64encode(data)
if six.PY3:
data = data.decode('ascii')
body = {'Data': data, 'Name': name, 'Labels': labels}
if (driver is not None):
if utils.version_lt(self._version, '1.31'):
raise errors.InvalidVersion('Secret driver is only available for API version > 1.31')
body['Driver'] = driver
url = self._url('/secrets/create')
return self._result(self._post_json(url, data=body), True)
|
Create a secret
Args:
name (string): Name of the secret
data (bytes): Secret data to be stored
labels (dict): A mapping of labels to assign to the secret
driver (DriverConfig): A custom driver configuration. If
unspecified, the default ``internal`` driver will be used
Returns (dict): ID of the newly created secret
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.