code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def remove_handler(self, handler: Handler, group: int=0):
if isinstance(handler, DisconnectHandler):
self.disconnect_handler = None
else:
self.dispatcher.remove_handler(handler, group)
|
Removes a previously-added update handler.
Make sure to provide the right group that the handler was added in. You can use
the return value of the :meth:`add_handler` method, a tuple of (handler, group), and
pass it directly.
Args:
handler (``Handler``):
The handler to be removed.
group (``int``, *optional*):
The group identifier, defaults to 0.
|
codesearchnet
|
def getWindow(title, exact=False):
titles = getWindows()
hwnd = titles.get(title, None)
if not hwnd and not exact:
for k, v in titles.items():
if title in k:
hwnd = v
break
if hwnd:
return Window(hwnd)
else:
return None
|
Return Window object if 'title' or its part found in visible windows titles, else return None
Return only 1 window found first
Args:
title: unicode string
exact (bool): True if search only exact match
|
juraj-google-style
|
def concat(self, second_iterable):
if self.closed():
raise ValueError("Attempt to call concat() on a closed Queryable.")
if not is_iterable(second_iterable):
raise TypeError("Cannot compute concat() with second_iterable of "
"non-iterable {0}".format(str(type(second_iterable))[7: -1]))
return self._create(itertools.chain(self, second_iterable))
|
Concatenates two sequences.
Note: This method uses deferred execution.
Args:
second_iterable: The sequence to concatenate on to the sequence.
Returns:
A Queryable over the concatenated sequences.
Raises:
ValueError: If the Queryable is closed().
TypeError: If second_iterable is not in fact iterable.
|
juraj-google-style
|
def ReadFile(self, definitions_registry, path):
with open(path, 'r') as file_object:
self.ReadFileObject(definitions_registry, file_object)
|
Reads data type definitions from a file into the registry.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
path (str): path of the file to read from.
|
codesearchnet
|
def _DrawHours(self):
tmpstrs = []
for i in range(0, self._gwidth, self._min_grid):
if ((i % self._hour_grid) == 0):
tmpstrs.append(('<polyline class="FullHour" points="%d,%d, %d,%d" />' % (((i + 0.5) + 20), 20, ((i + 0.5) + 20), self._gheight)))
tmpstrs.append(('<text class="Label" x="%d" y="%d">%d</text>' % ((i + 20), 20, (((i / self._hour_grid) + self._offset) % 24))))
else:
tmpstrs.append(('<polyline class="SubHour" points="%d,%d,%d,%d" />' % (((i + 0.5) + 20), 20, ((i + 0.5) + 20), self._gheight)))
return ''.join(tmpstrs)
|
Generates svg to show a vertical hour and sub-hour grid
Returns:
# A string containing a polyline tag for each grid line
" <polyline class="FullHour" points="20,0 ..."
|
codesearchnet
|
def uninstalled(name):
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
old = __salt__['flatpak.is_installed'](name)
if not old:
ret['comment'] = 'Package {0} is not installed'.format(name)
ret['result'] = True
return ret
else:
if __opts__['test']:
ret['comment'] = 'Package {0} would have been uninstalled'.format(name)
ret['changes']['old'] = old[0]['version']
ret['changes']['new'] = None
ret['result'] = None
return ret
__salt__['flatpak.uninstall'](name)
if not __salt__['flatpak.is_installed'](name):
ret['comment'] = 'Package {0} uninstalled'.format(name)
ret['changes']['old'] = old[0]['version']
ret['changes']['new'] = None
ret['result'] = True
return ret
|
Ensure that the named package is not installed.
Args:
name (str): The flatpak package.
Returns:
dict: The ``result`` and ``output``.
Example:
.. code-block:: yaml
uninstall_package:
flatpack.uninstalled:
- name: gimp
|
juraj-google-style
|
def deprecated(msg):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
logging.getLogger(__name__).warning(msg)
return func(*args, **kwargs)
return wrapper
return decorator
|
Marks a function / method as deprecated.
Takes one argument, a message to be logged with information on future usage of the function or alternative methods
to call.
Args:
msg (str): Deprecation message to be logged
Returns:
`callable`
|
juraj-google-style
|
def witness_tx(tx_ins, tx_outs, tx_witnesses, **kwargs):
deser = [script_ser.deserialize(tx_in.redeem_script) for tx_in in tx_ins
if tx_in is not None]
for w in tx_witnesses:
try:
deser.append(script_ser.deserialize(w.stack[-1].item))
except (NotImplementedError, ValueError):
pass
version = max([guess_version(d) for d in deser])
if 'lock_time' in kwargs:
lock_time = kwargs['lock_time']
else:
lock_time = max([guess_locktime(d) for d in deser])
return tb.make_tx(
version=version,
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=lock_time,
tx_witnesses=tx_witnesses)
|
Construct a fully-signed segwit transaction
Args:
tx_ins list(TxIn instances): list of transaction inputs
tx_outs list(TxOut instances): list of transaction outputs
tx_witnesses list(TxWitness instances): list of transaction witnsses
**kwargs:
version (int): transaction version number
locktime (hex): transaction locktime
Returns:
(Tx instance): signed transaction with witnesses
|
juraj-google-style
|
def defer(self, func: typing.Callable[([], typing.Any)], until: typing.Union[(int, float)]=(- 1)) -> typing.Any:
raise NotImplementedError()
|
Defer the execution of a function until some clock value.
Args:
func (typing.Callable[[], typing.Any]): A callable that accepts no
arguments. All return values are ignored.
until (typing.Union[int, float]): A numeric value that represents
the clock time when the callback becomes available for
execution. Values that are less than the current time result in
the function being called at the next opportunity.
Returns:
typing.Any: An opaque identifier that represents the callback
uniquely within the processor. This identifier is used to
modify the callback scheduling.
Note:
The time given should not be considered absolute. It represents
the time when the callback becomes available to execute. It may
be much later than the given time value when the function actually
executes depending on the implementation.
|
codesearchnet
|
def process_fixed_issues(self, volumes, existing_issues):
fixed_issues = []
for issue_id, issue in list(existing_issues.items()):
if issue_id not in volumes:
fixed_issues.append(issue)
return fixed_issues
|
Provided a list of volumes and existing issues, returns a list of fixed issues to be deleted
Args:
volumes (`dict`): A dictionary keyed on the issue id, with the :obj:`Volume` object as the value
existing_issues (`dict`): A dictionary keyed on the issue id, with the :obj:`EBSVolumeAuditIssue` object as
the value
Returns:
:obj:`list` of :obj:`EBSVolumeAuditIssue`
|
juraj-google-style
|
def inspect_last(self, stream, only_allocated=False):
if only_allocated:
found = False
for walker in self._virtual_walkers:
if walker.matches(stream):
found = True
break
if (not found):
raise UnresolvedIdentifierError('inspect_last could not find an allocated virtual streamer for the desired stream', stream=stream)
if (stream in self._last_values):
return self._last_values[stream]
raise StreamEmptyError(u'inspect_last called on stream that has never been written to', stream=stream)
|
Return the last value pushed into a stream.
This function works even if the stream is virtual and no
virtual walker has been created for it. It is primarily
useful to aid in debugging sensor graphs.
Args:
stream (DataStream): The stream to inspect.
only_allocated (bool): Optional parameter to only allow inspection
of allocated virtual streams. This is useful for mimicking the
behavior of an embedded device that does not have a _last_values
array.
Returns:
IOTileReading: The data in the stream
Raises:
StreamEmptyError: if there has never been data written to
the stream.
UnresolvedIdentifierError: if only_allocated is True and there has not
been a virtual stream walker allocated to listen to this stream.
|
codesearchnet
|
def with_division(self, division):
if (division is None):
division = ''
division = slugify(division)
self._validate_division(division)
self.division = division
return self
|
Add a division segment
Args:
division (str): Official name of an electoral division.
Returns:
IdBuilder
Raises:
ValueError
|
codesearchnet
|
def limit_epochs(tensor, num_epochs=None, name=None):
if num_epochs is None:
return tensor
if num_epochs <= 0:
raise ValueError('num_epochs must be > 0 not %d.' % num_epochs)
with ops.name_scope(name, 'limit_epochs', [tensor]) as name:
zero64 = constant_op.constant(0, dtype=dtypes.int64)
epochs = variable_v1.VariableV1(zero64, name='epochs', trainable=False, collections=[ops.GraphKeys.LOCAL_VARIABLES])
counter = epochs.count_up_to(num_epochs)
with ops.control_dependencies([counter]):
return array_ops.identity(tensor, name=name)
|
Returns tensor `num_epochs` times and then raises an `OutOfRange` error.
Note: creates local counter `epochs`. Use `local_variables_initializer()` to
initialize local variables.
Args:
tensor: Any `Tensor`.
num_epochs: A positive integer (optional). If specified, limits the number
of steps the output tensor may be evaluated.
name: A name for the operations (optional).
Returns:
tensor or `OutOfRange`.
Raises:
ValueError: if `num_epochs` is invalid.
|
github-repos
|
def iter_predict(self, X, include_init=False):
utils.validation.check_is_fitted(self, 'init_estimator_')
X = utils.check_array(X, accept_sparse=['csr', 'csc'], dtype=None, force_all_finite=False)
y_pred = self.init_estimator_.predict(X)
if include_init:
(yield y_pred)
for (estimators, line_searchers, cols) in itertools.zip_longest(self.estimators_, self.line_searchers_, self.columns_):
for (i, (estimator, line_searcher)) in enumerate(itertools.zip_longest(estimators, (line_searchers or []))):
if (cols is None):
direction = estimator.predict(X)
else:
direction = estimator.predict(X[(:, cols)])
if line_searcher:
direction = line_searcher.update(direction)
y_pred[(:, i)] += (self.learning_rate * direction)
(yield y_pred)
|
Returns the predictions for ``X`` at every stage of the boosting procedure.
Args:
X (array-like or sparse matrix of shape (n_samples, n_features): The input samples.
Sparse matrices are accepted only if they are supported by the weak model.
include_init (bool, default=False): If ``True`` then the prediction from
``init_estimator`` will also be returned.
Returns:
iterator of arrays of shape (n_samples,) containing the predicted values at each stage
|
codesearchnet
|
def groups_invite(self, *, channel: str, user: str, **kwargs) -> SlackResponse:
self._validate_xoxp_token()
kwargs.update({'channel': channel, 'user': user})
return self.api_call('groups.invite', json=kwargs)
|
Invites a user to a private channel.
Args:
channel (str): The group id. e.g. 'G1234567890'
user (str): The user id. e.g. 'U1234567890'
|
codesearchnet
|
def add_timing_signal_1d_given_position(x,
position,
min_timescale=1.0,
max_timescale=1.0e4):
channels = common_layers.shape_list(x)[2]
num_timescales = channels
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = (
tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims(
tf.expand_dims(inv_timescales, 0), 0))
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2)
signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]])
signal = common_layers.cast_like(signal, x)
return x + signal
|
Adds sinusoids of diff frequencies to a Tensor, with timing position given.
Args:
x: a Tensor with shape [batch, length, channels]
position: a Tensor with shape [batch, length]
min_timescale: a float
max_timescale: a float
Returns:
a Tensor the same shape as x.
|
juraj-google-style
|
def _get_saver_def_or_none(exported_model: exported_model_pb2.ExportedModel) -> Optional[saver_pb2.SaverDef]:
if exported_model.HasField('saver_def'):
return exported_model.saver_def
return None
|
Returns the SaverDef from ExportedModel, None otherwise.
Args:
exported_model: ExportedModel to take the SaverDef from.
Returns:
SaverDef instance if the field `saver_def` is set. None otherwise.
|
github-repos
|
def range(self, x_data=None):
if x_data is None:
try:
x_data = evaluation.evaluate_inverse(
self, numpy.array([[0.5]]*len(self)))
except StochasticallyDependentError:
x_data = approximation.find_interior_point(self)
shape = (len(self),)
if hasattr(self, "_range"):
return self._range(x_data, {})
else:
x_data = numpy.asfarray(x_data)
shape = x_data.shape
x_data = x_data.reshape(len(self), -1)
q_data = evaluation.evaluate_bound(self, x_data)
q_data = q_data.reshape((2,)+shape)
return q_data
|
Generate the upper and lower bounds of a distribution.
Args:
x_data (numpy.ndarray) :
The bounds might vary over the sample space. By providing
x_data you can specify where in the space the bound should be
taken. If omitted, a (pseudo-)random sample is used.
Returns:
(numpy.ndarray):
The lower (out[0]) and upper (out[1]) bound where
out.shape=(2,)+x_data.shape
|
juraj-google-style
|
def _ip_int_from_string(self, ip_str):
if not ip_str:
raise AddressValueError('Address cannot be empty')
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError("Expected 4 octets in %r" % ip_str)
try:
return _int_from_bytes(map(self._parse_octet, octets), 'big')
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
|
Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
|
juraj-google-style
|
def add_ipdu(self, information, timeout=(- 1)):
uri = (self.URI + '/discover')
return self._client.create(information, uri=uri, timeout=timeout)
|
Add an HP iPDU and bring all components under management by discovery of its management module. Bring the
management module under exclusive management by the appliance, configure any management or data collection
settings, and create a private set of administrative credentials to enable ongoing communication and management
of the iPDU. Use "force" to claim the device, even if claimed by another management appliance
Args:
resource: power device information
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: added power device.
|
codesearchnet
|
def SetExtractionConfiguration(self, configuration):
self._hasher_file_size_limit = configuration.hasher_file_size_limit
self._SetHashers(configuration.hasher_names_string)
self._process_archives = configuration.process_archives
self._process_compressed_streams = configuration.process_compressed_streams
self._SetYaraRules(configuration.yara_rules_string)
|
Sets the extraction configuration settings.
Args:
configuration (ExtractionConfiguration): extraction configuration.
|
juraj-google-style
|
def __init__(self, xid=None, data=None):
super().__init__(xid)
self.data = data
|
Create an EchoReply with the optional parameters below.
Args:
xid (int): xid to be used on the message header.
data (bytes): arbitrary-length data field.
|
juraj-google-style
|
def get_checklist(self, id, name=None):
return self.create_checklist(dict(id=id, name=name))
|
Get a checklist
Returns:
Checklist: The checklist with the given `id`
|
codesearchnet
|
def warn_logging(logger):
def showwarning(message, category, filename, lineno, file=None, line=None):
logger.warning(message)
return showwarning
|
Create a `showwarning` function that uses the given logger.
Arguments:
logger (~logging.Logger): the logger to use.
Returns:
function: a function that can be used as the `warnings.showwarning`
callback.
|
juraj-google-style
|
def staged_rewards(self):
cubeA_pos = self.sim.data.body_xpos[self.cubeA_body_id]
cubeB_pos = self.sim.data.body_xpos[self.cubeB_body_id]
gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id]
dist = np.linalg.norm((gripper_site_pos - cubeA_pos))
r_reach = ((1 - np.tanh((10.0 * dist))) * 0.25)
touch_left_finger = False
touch_right_finger = False
touch_cubeA_cubeB = False
for i in range(self.sim.data.ncon):
c = self.sim.data.contact[i]
if ((c.geom1 in self.l_finger_geom_ids) and (c.geom2 == self.cubeA_geom_id)):
touch_left_finger = True
if ((c.geom1 == self.cubeA_geom_id) and (c.geom2 in self.l_finger_geom_ids)):
touch_left_finger = True
if ((c.geom1 in self.r_finger_geom_ids) and (c.geom2 == self.cubeA_geom_id)):
touch_right_finger = True
if ((c.geom1 == self.cubeA_geom_id) and (c.geom2 in self.r_finger_geom_ids)):
touch_right_finger = True
if ((c.geom1 == self.cubeA_geom_id) and (c.geom2 == self.cubeB_geom_id)):
touch_cubeA_cubeB = True
if ((c.geom1 == self.cubeB_geom_id) and (c.geom2 == self.cubeA_geom_id)):
touch_cubeA_cubeB = True
if (touch_left_finger and touch_right_finger):
r_reach += 0.25
cubeA_height = cubeA_pos[2]
table_height = self.table_full_size[2]
cubeA_lifted = (cubeA_height > (table_height + 0.04))
r_lift = (1.0 if cubeA_lifted else 0.0)
if cubeA_lifted:
horiz_dist = np.linalg.norm((np.array(cubeA_pos[:2]) - np.array(cubeB_pos[:2])))
r_lift += (0.5 * (1 - np.tanh(horiz_dist)))
r_stack = 0
not_touching = ((not touch_left_finger) and (not touch_right_finger))
if (not_touching and (r_lift > 0) and touch_cubeA_cubeB):
r_stack = 2.0
return (r_reach, r_lift, r_stack)
|
Helper function to return staged rewards based on current physical states.
Returns:
r_reach (float): reward for reaching and grasping
r_lift (float): reward for lifting and aligning
r_stack (float): reward for stacking
|
codesearchnet
|
def do_REMOTE(self, target: str, remote_command: str, source: list, *args, **kwargs) -> None:
if (target == self.messaging._service_name):
info = 'target for remote command is the bot itself! Returning the function'
self.logger.info(info)
return self._handle_command(remote_command, source, *args, **kwargs)
try:
target = self.messaging._address_map[target]
except KeyError:
warn = ' Target %s, not found in addresses. Are you sure that %s sent an IDENT message?'
self.logger.warn(warn, target, target)
return
self.logger.info(' REMOTE %s, target: %s | %s, %s', remote_command, target, args, kwargs)
source = (target + source)
self.messaging.send_command_response(source, remote_command, *args, **kwargs)
|
Send a remote command to a service. Used
Args:
target: The service that the command gets set to
remote_command: The command to do remotely.
source: the binary source of the zmq_socket. Packed to send to the
|
codesearchnet
|
def maybe_set_static_shape(tensor, shape):
if _ENABLE_MAYBE_SET_STATIC_SHAPE and (not context.executing_eagerly()) and ops.get_default_graph().building_function and (not tensor.shape.is_fully_defined()) and tensor_util.is_tensor(shape):
shape = shape_tensor(shape)
const_shape = tensor_util.constant_value_as_shape(shape)
tensor.set_shape(const_shape)
|
Sets the shape of `tensor` to the `shape`'s constant value, if inferrable.
This is a temporary workaround to fix shape inference across functional op
boundaries. E.g.
```python
shape = tf.constant([3])
@tf.function
def f():
u = tf.random_uniform(shape)
return u
```
If we were to rely solely on C++ shape inference, the shape of `u` inside
`f` would be unknown because C++ shape inference is not aware of the outer
graph and all it sees is a Placeholder node when backtracing the captured
tensor for `shape`. `maybe_set_static_shape` computes the static shape value
of `shape` by traversing the `FuncGraph` boundaries and sets the correct
shape.
A longer term solution would be to fix C++ shape inference.
Args:
tensor: A tensor.
shape: A shape tensor.
|
github-repos
|
def get_page_artid(self, separator='-'):
publication_info = get_value(
self.record,
'publication_info[0]',
default={}
)
return LiteratureReader.get_page_artid_for_publication_info(
publication_info,
separator
)
|
Return the page range or the article id of a record.
Args:
separator(basestring): optional page range symbol, defaults to a single dash
Returns:
string: the page range or the article id of the record.
Examples:
>>> record = {
... 'publication_info': [
... {'artid': '054021'},
... ],
... }
>>> LiteratureReader(record).get_page_artid()
'054021'
|
juraj-google-style
|
def _get_metrics_result_or_logs(self, logs):
metric_logs = self.get_metrics_result()
if isinstance(logs, dict) and set(logs.keys()) == set(metric_logs.keys()):
return metric_logs
return logs
|
Returns model metrics as a dict if the keys match with input logs.
When the training / evaluation is performed with an asynchronous steps,
the last scheduled `train / test_step` may not give the latest metrics
because it is not guaranteed to be executed the last. This method gets
metrics from the model directly instead of relying on the return from
last step function.
When the user has custom train / test step functions, the metrics
returned may be different from `Model.metrics`. In those instances,
this function will be no-op and return the logs passed in.
Args:
logs: A `dict` of metrics returned by train / test step function.
Returns:
A `dict` containing values of the metrics listed in `self.metrics`
when logs and model metrics keys match. Otherwise it returns input
`logs`.
|
github-repos
|
def record_kv_cache_memory_metrics(self, cache) -> None:
if not _has_opentelemetry:
return
try:
num_used_blocks = cache.num_blocks - len(cache._free_blocks)
num_layers = len(cache.key_cache)
bytes_per_parameter = 2 if cache.dtype in [torch.float16, torch.bfloat16] else 4
memory_bytes = num_layers * num_used_blocks * cache.block_size * cache.num_key_value_heads * cache.head_dim * 2 * bytes_per_parameter
free_memory_bytes = num_layers * len(cache._free_blocks) * cache.block_size * cache.num_key_value_heads * cache.head_dim * 2 * bytes_per_parameter
self.kv_cache_memory_gauge.set(memory_bytes)
self.kv_cache_free_memory_gauge.set(free_memory_bytes)
logger.debug(f'KV Cache memory: {memory_bytes / (1024 * 1024):.2f}MB, Used blocks: {num_used_blocks}/{cache.num_blocks} ({num_used_blocks / cache.num_blocks * 100:.1f}%)')
except Exception as e:
logger.warning(f'Failed to record KV cache memory metrics: {e}')
|
Record memory usage of the PagedAttentionCache without GPU synchronization.
This calculates the theoretical memory usage based on cache configuration
and the number of blocks currently in use.
Args:
cache: The PagedAttentionCache object to measure
|
github-repos
|
def _checkFunctioncode(functioncode, listOfAllowedValues=[]):
FUNCTIONCODE_MIN = 1
FUNCTIONCODE_MAX = 127
_checkInt(functioncode, FUNCTIONCODE_MIN, FUNCTIONCODE_MAX, description='functioncode')
if (listOfAllowedValues is None):
return
if (not isinstance(listOfAllowedValues, list)):
raise TypeError('The listOfAllowedValues should be a list. Given: {0!r}'.format(listOfAllowedValues))
for value in listOfAllowedValues:
_checkInt(value, FUNCTIONCODE_MIN, FUNCTIONCODE_MAX, description='functioncode inside listOfAllowedValues')
if (functioncode not in listOfAllowedValues):
raise ValueError('Wrong function code: {0}, allowed values are {1!r}'.format(functioncode, listOfAllowedValues))
|
Check that the given functioncode is in the listOfAllowedValues.
Also verifies that 1 <= function code <= 127.
Args:
* functioncode (int): The function code
* listOfAllowedValues (list of int): Allowed values. Use *None* to bypass this part of the checking.
Raises:
TypeError, ValueError
|
codesearchnet
|
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]:
if isinstance(variables, dict):
if variables.get("times"):
times = int(variables["times"])
del variables["times"]
yield list(variable_matrix(variables, parent, "product")) * times
else:
raise ValueError(f"times is a required keyword for the repeat iterator.")
else:
raise ValueError(
f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}"
)
|
Cycle through a list of values a specified number of times
Args:
variables: The input variables for the creation of the range
parent: The variable for which the values are being generated.
Returns: A list of dictionaries mapping the parent to each value.
|
juraj-google-style
|
def by_name(name):
devices = discover(all_households=True)
for device in (devices or []):
if device.player_name == name:
return device
return None
|
Return a device by name.
Args:
name (str): The name of the device to return.
Returns:
:class:`~.SoCo`: The first device encountered among all zone with the
given player name. If none are found `None` is returned.
|
juraj-google-style
|
def rotate_view(self, axis_ind=0, angle=0):
camera = self.ren.GetActiveCamera()
if axis_ind == 0:
camera.Roll(angle)
elif axis_ind == 1:
camera.Azimuth(angle)
else:
camera.Pitch(angle)
self.ren_win.Render()
|
Rotate the camera view.
Args:
axis_ind: Index of axis to rotate. Defaults to 0, i.e., a-axis.
angle: Angle to rotate by. Defaults to 0.
|
juraj-google-style
|
def _get_shards_by_task(self, sharding_callback: sharding_util.ShardingCallback) -> Sequence[tuple[str, Sequence[sharding_util.Shard]]]:
def wrap_tensor(shardable_tensor):
tensor_val = shardable_tensor.tensor
tensor_shape = shardable_tensor.shape
save_spec = shardable_tensor._tensor_save_spec
with ops.device(shardable_tensor.device):
save_spec_tensor = save_spec.tensor
if tensor_val is None and save_spec_tensor is None:
return None
elif save_spec_tensor is not None:
tensor_val = save_spec_tensor
tensor_shape = save_spec_tensor.shape
if isinstance(save_spec.name, tensor_lib.Tensor):
tensor_val._wrapped_name = save_spec.name
if isinstance(shardable_tensor.slice_spec, tensor_lib.Tensor):
tensor_val._wrapped_slice_spec = save_spec.slice_spec
return dataclasses.replace(shardable_tensor, tensor=tensor_val, shape=tensor_shape)
shardable_tensors_by_task = {task: [shardable_tensor for shardable_tensor in map(wrap_tensor, shardable_tensors) if shardable_tensor is not None] for task, shardable_tensors in self._shardable_tensors_by_task.items()}
sharding_callback = sharding_callback or sharding_policies.ShardByTaskPolicy()
metrics.SetShardingCallbackDescription(description=sharding_callback.description)
callback_start_time = time.time() * 1000000.0
shards_by_task = []
for task, shardable_tensors in shardable_tensors_by_task.items():
shards_by_task.append((task, sharding_callback(shardable_tensors)))
callback_end_time = time.time() * 1000000.0
callback_duration = math.ceil(callback_end_time - callback_start_time)
metrics.AddShardingCallbackDuration(callback_duration=max(1, callback_duration))
logging.info('Sharding callback duration: %s microseconds', callback_duration)
return shards_by_task
|
Calls the sharding callback with shardable_tensors.
Args:
sharding_callback: ShardingCallback. The callback function wrapper that
splits shardable_tensors into shards.
Returns:
A list of (task, shards) tuples.
|
github-repos
|
def find_stacks(node, strict=False):
fso = FindStackOps()
fso.visit(node)
AnnotateStacks(fso.push_pop_pairs, strict).visit(node)
return node
|
Find pushes and pops to the stack and annotate them as such.
Args:
node: An AST node that might contain stack pushes and pops.
strict: A boolean indicating whether to stringently test whether each
push and pop are matched. This is not always possible when taking
higher-order derivatives of code generated in split-motion.
Returns:
node: The node passed in, but with pushes and pops annotated in AST nodes.
|
codesearchnet
|
def sg_to_sparse(tensor, opt):
r
indices = tf.where(tf.not_equal(tensor.sg_float(), 0.))
return tf.SparseTensor(indices=indices,
values=tf.gather_nd(tensor, indices) - 1,
dense_shape=tf.shape(tensor).sg_cast(dtype=tf.int64))
|
r"""Converts a dense tensor into a sparse tensor.
See `tf.SparseTensor()` in tensorflow.
Args:
tensor: A `Tensor` with zero-padding (automatically given by chain).
opt:
name: If provided, replace current tensor's name.
Returns:
A `SparseTensor`.
|
juraj-google-style
|
def infer_inputs_from_restored_call_function(fn):
def common_spec(x, y):
common_shape = get_common_shape(x.shape, y.shape)
if isinstance(x, sparse_tensor.SparseTensorSpec):
return sparse_tensor.SparseTensorSpec(common_shape, x.dtype)
elif isinstance(x, ragged_tensor.RaggedTensorSpec):
return ragged_tensor.RaggedTensorSpec(common_shape, x.dtype)
return tensor_spec.TensorSpec(common_shape, x.dtype, x.name)
spec = fn.concrete_functions[0].structured_input_signature[0][0]
for concrete in fn.concrete_functions[1:]:
spec2 = concrete.structured_input_signature[0][0]
spec = nest.map_structure(common_spec, spec, spec2)
return spec
|
Returns TensorSpec of inputs from a restored call function.
Args:
fn: Restored layer call function. It is assumed that `fn` has at least
one concrete function and that the inputs are in the first argument.
Returns:
TensorSpec of call function inputs.
|
github-repos
|
def _Lock(self, path=None, force=False):
if self.lock is None:
self.lock = lock.PidFile(filename=path)
return self.lock.Lock(force=force)
|
Grab a system-wide lock for this command.
Commands wishing to prevent concurrent operation can invoke this
method to acquire a system-wide lock. The lock will be
automatically released on object destruction, however an optional
Unlock() method is provided for commands wishing a smaller scope
of locking.
Args:
path: optional path to lock file.
force: optional boolean to override existing locks.
Returns:
True if the lock was acquired.
False if the lock was not.
|
github-repos
|
def ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions=[]):
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, error)
if nesting_state.InAsmBlock():
return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state, nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line, nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckDefaultLambdaCaptures(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
|
Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
|
codesearchnet
|
def _ParsePlistKeyValue(self, knowledge_base, name, value):
if not knowledge_base.GetValue('operating_system_version'):
if name in self._PLIST_KEYS:
knowledge_base.SetValue('operating_system_version', value)
|
Parses a plist key value.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
name (str): name of the plist key.
value (str): value of the plist key.
|
juraj-google-style
|
def AddKeywordsForName(self, name, keywords):
data_store.DB.IndexAddKeywordsForName(self.urn, name, keywords)
|
Associates keywords with name.
Records that keywords are associated with name.
Args:
name: A name which should be associated with some keywords.
keywords: A collection of keywords to associate with name.
|
codesearchnet
|
def get_class(schema_name):
global _registry_loaded
if (not _registry_loaded):
load_message_classes()
try:
return _schema_name_to_class[schema_name]
except KeyError:
_log.warning('The schema "%s" is not in the schema registry! Either install the package with its schema definition or define a schema. Falling back to the default schema...', schema_name)
return Message
|
Retrieve the message class associated with the schema name.
If no match is found, the default schema is returned and a warning is logged.
Args:
schema_name (six.text_type): The name of the :class:`Message` sub-class;
this is typically the Python path.
Returns:
Message: A sub-class of :class:`Message` to create the message from.
|
codesearchnet
|
def _ProcessEvent(self, mediator, event):
try:
self._analysis_plugin.ExamineEvent(mediator, event)
except Exception as exception:
self.SignalAbort()
if self._debug_output:
logger.warning('Unhandled exception while processing event object.')
logger.exception(exception)
|
Processes an event.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event.
|
juraj-google-style
|
def convert_to_string(self, productions):
symbols = []
for production in tf.unstack(productions, axis=1):
lhs, rhs = self.production_rules[tf.argmax(input=production, axis=-1)]
if not symbols:
if lhs != self.start_symbol:
raise ValueError("`productions` must begin with `self.start_symbol`.")
symbols = rhs
else:
index = symbols.index(lhs)
symbols = symbols[:index] + rhs + symbols[index + 1:]
string = "".join(symbols)
return string
|
Converts a sequence of productions into a string of terminal symbols.
Args:
productions: Tensor of shape [1, num_productions, num_production_rules].
Slices along the `num_productions` dimension represent one-hot vectors.
Returns:
str that concatenates all terminal symbols from `productions`.
Raises:
ValueError: If the first production rule does not begin with
`self.start_symbol`.
|
juraj-google-style
|
def Page(self, text=None, show_percent=None):
if (text is not None):
self._text += text
if (show_percent is None):
show_percent = (text is None)
self._show_percent = show_percent
text = LineWrap(self._text).splitlines()
while True:
self._newlines = text[self._displayed:(self._displayed + self._lines_to_show)]
for line in self._newlines:
sys.stdout.write((line + '\n'))
if (self._delay and (self._lastscroll > 0)):
time.sleep(0.005)
self._displayed += len(self._newlines)
self._currentpagelines += len(self._newlines)
if (self._currentpagelines >= self._lines_to_show):
self._currentpagelines = 0
wish = self._AskUser()
if (wish == 'q'):
return False
elif (wish == 'g'):
self._Scroll(((len(text) - self._displayed) + 1))
elif (wish == '\r'):
self._Scroll(1)
elif (wish == '\x1b[B'):
self._Scroll(1)
elif (wish == '\x1b[A'):
self._Scroll((- 1))
elif (wish == 'b'):
self._Scroll((0 - self._cli_lines))
else:
self._Scroll()
if (self._displayed >= len(text)):
break
return True
|
Page text.
Continues to page through any text supplied in the constructor. Also, any
text supplied to this method will be appended to the total text to be
displayed. The method returns when all available text has been displayed to
the user, or the user quits the pager.
Args:
text: A string, extra text to be paged.
show_percent: A boolean, if True, indicate how much is displayed so far.
If None, this behaviour is 'text is None'.
Returns:
A boolean. If True, more data can be displayed to the user. False
implies that the user has quit the pager.
|
codesearchnet
|
def _CreateRoutesFolder(self, schedule, doc, route_type=None):
def GetRouteName(route):
'Return a placemark name for the route.\n\n Args:\n route: The transitfeed.Route instance.\n\n Returns:\n The name as a string.\n '
name_parts = []
if route.route_short_name:
name_parts.append(('<b>%s</b>' % route.route_short_name))
if route.route_long_name:
name_parts.append(route.route_long_name)
return (' - '.join(name_parts) or route.route_id)
def GetRouteDescription(route):
'Return a placemark description for the route.\n\n Args:\n route: The transitfeed.Route instance.\n\n Returns:\n The description as a string.\n '
desc_items = []
if route.route_desc:
desc_items.append(route.route_desc)
if route.route_url:
desc_items.append(('Route info page: <a href="%s">%s</a>' % (route.route_url, route.route_url)))
description = '<br/>'.join(desc_items)
return (description or None)
routes = [route for route in schedule.GetRouteList() if ((route_type is None) or (route.route_type == route_type))]
if (not routes):
return None
routes.sort(key=(lambda x: GetRouteName(x)))
if (route_type is not None):
route_type_names = {0: 'Tram, Streetcar or Light rail', 1: 'Subway or Metro', 2: 'Rail', 3: 'Bus', 4: 'Ferry', 5: 'Cable car', 6: 'Gondola or suspended cable car', 7: 'Funicular'}
type_name = route_type_names.get(route_type, str(route_type))
folder_name = ('Routes - %s' % type_name)
else:
folder_name = 'Routes'
routes_folder = self._CreateFolder(doc, folder_name, visible=False)
for route in routes:
style_id = self._CreateStyleForRoute(doc, route)
route_folder = self._CreateFolder(routes_folder, GetRouteName(route), description=GetRouteDescription(route))
self._CreateRouteShapesFolder(schedule, route_folder, route, style_id, False)
self._CreateRoutePatternsFolder(route_folder, route, style_id, False)
if self.show_trips:
self._CreateRouteTripsFolder(route_folder, route, style_id, schedule)
return routes_folder
|
Create a KML Folder containing routes in a schedule.
The folder contains a subfolder for each route in the schedule of type
route_type. If route_type is None, then all routes are selected. Each
subfolder contains a flattened graph placemark, a route shapes placemark
and, if show_trips is True, a subfolder containing placemarks for each of
the trips in the route.
If there are no routes in the schedule then no folder is created and None
is returned.
Args:
schedule: The transitfeed.Schedule instance.
doc: The KML Document ElementTree.Element instance.
route_type: The route type integer or None.
Returns:
The Folder ElementTree.Element instance or None.
|
codesearchnet
|
def sequence_path(self, fasta_path):
if not fasta_path:
self.sequence_dir = None
self.sequence_file = None
else:
if not op.exists(fasta_path):
raise OSError('{}: file does not exist'.format(fasta_path))
if not op.dirname(fasta_path):
self.sequence_dir = '.'
else:
self.sequence_dir = op.dirname(fasta_path)
self.sequence_file = op.basename(fasta_path)
tmp_sr = SeqIO.read(fasta_path, 'fasta')
if self.name == '<unknown name>':
self.name = tmp_sr.name
if self.description == '<unknown description>':
self.description = tmp_sr.description
if not self.dbxrefs:
self.dbxrefs = tmp_sr.dbxrefs
if not self.features:
self.features = tmp_sr.features
if not self.annotations:
self.annotations = tmp_sr.annotations
if not self.letter_annotations:
self.letter_annotations = tmp_sr.letter_annotations
|
Provide pointers to the paths of the FASTA file
Args:
fasta_path: Path to FASTA file
|
juraj-google-style
|
def explicit_method_override(method):
setattr(method, '__explicit_override__', True)
return method
|
Decorator that marks a member method as explicitly overridden.
In PyGlove, many methods are managed by the framework - for example -
``pg.Object.__init__``. It's easy for users to override these methods
unconsciously. Therefore, we introduce this decorator to catch error at
the first place when such overrides incidentally take place, while allowing
advanced users to override them.
Usage::
class Foo(pg.Object):
@pg.explicit_method_override
def __init__(self, *args, **kwargs):
...
Args:
method: method to explicitly overriden.
Returns:
The original method with an explicit overriden stamp.
|
github-repos
|
def document(self, name, file_name, **kwargs):
group_obj = Document(name, file_name, **kwargs)
return self._group(group_obj)
|
Add Document data to Batch object.
Args:
name (str): The name for this Group.
file_name (str): The name for the attached file for this Group.
date_added (str, kwargs): The date timestamp the Indicator was created.
file_content (str;method, kwargs): The file contents or callback method to retrieve
file content.
malware (bool, kwargs): If true the file is considered malware.
password (bool, kwargs): If malware is true a password for the zip archive is
xid (str, kwargs): The external id for this Group.
Returns:
obj: An instance of Document.
|
codesearchnet
|
def rhombohedral(a: float, alpha: float):
return Lattice.from_parameters(a, a, a, alpha, alpha, alpha)
|
Convenience constructor for a rhombohedral lattice.
Args:
a (float): *a* lattice parameter of the rhombohedral cell.
alpha (float): Angle for the rhombohedral lattice in degrees.
Returns:
Rhombohedral lattice of dimensions a x a x a.
|
juraj-google-style
|
def setupSerialPort(loopback, port):
if loopback:
testSerial = SerialTestClass()
serialPort = testSerial.serialPort
else:
serialPort = serial.Serial(port, 115200, timeout=0)
return serialPort
|
Sets up serial port by connecting to phsyical or software port.
Depending on command line options, this function will either connect to a
SerialTestClass() port for loopback testing or to the specified port from
the command line option. If loopback is True it overrides the physical port
specification.
Args:
loopback: argparse option
port: argparse option
Returns:
serialPort: Pyserial serial port instance
|
juraj-google-style
|
def fit(self, volumes, energies):
eos_fit = self.model(np.array(volumes), np.array(energies))
eos_fit.fit()
return eos_fit
|
Fit energies as function of volumes.
Args:
volumes (list/np.array)
energies (list/np.array)
Returns:
EOSBase: EOSBase object
|
codesearchnet
|
def _ModifyInterface(
self, interface_config, config_key, config_value, replace=False):
config_entry = '%s=%s' % (config_key, config_value)
if not open(interface_config).read().count(config_key):
with open(interface_config, 'a') as config:
config.write('%s\n' % config_entry)
elif replace:
for line in fileinput.input(interface_config, inplace=True):
print(re.sub(r'%s=.*' % config_key, config_entry, line.rstrip()))
|
Write a value to a config file if not already present.
Args:
interface_config: string, the path to a config file.
config_key: string, the configuration key to set.
config_value: string, the value to set for the configuration key.
replace: bool, replace the configuration option if already present.
|
juraj-google-style
|
def square(duration: int, amp: complex, period: float = None,
phase: float = 0, name: str = None) -> SamplePulse:
if period is None:
period = duration
return _sampled_square_pulse(duration, amp, period, phase=phase, name=name)
|
Generates square wave `SamplePulse`.
Applies `left` sampling strategy to generate discrete pulse from continuous function.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude. Wave range is [-amp, amp].
period: Pulse period, units of dt. If `None` defaults to single cycle.
phase: Pulse phase.
name: Name of pulse.
|
juraj-google-style
|
def scalar_projection(v1, v2):
return (np.dot(v1, v2) / np.linalg.norm(v2))
|
compute the scalar projection of v1 upon v2
Args:
v1, v2: iterable
indices 0, 1, 2 corresponding to cartesian coordinates
Returns:
3-vector of the projection of point p onto the direction of v
|
codesearchnet
|
class CustomHFIndex(HFIndexBase):
def __init__(self, vector_size: int, dataset, index_path=None):
requires_backends(self, ['faiss'])
super().__init__(vector_size, dataset, index_initialized=index_path is None)
self.index_path = index_path
@classmethod
def load_from_disk(cls, vector_size, dataset_path, index_path):
logger.info(f'Loading passages from {dataset_path}')
if dataset_path is None or index_path is None:
raise ValueError("Please provide `dataset_path` and `index_path` after calling `dataset.save_to_disk(dataset_path)` and `dataset.get_index('embeddings').save(index_path)`.")
dataset = load_from_disk(dataset_path)
return cls(vector_size=vector_size, dataset=dataset, index_path=index_path)
def init_index(self):
if not self.is_initialized():
logger.info(f'Loading index from {self.index_path}')
self.dataset.load_faiss_index('embeddings', file=self.index_path)
self._index_initialized = True
|
A wrapper around an instance of [`~datasets.Datasets`]. The dataset and the index are both loaded from the
indicated paths on disk.
Args:
vector_size (`int`): the dimension of the passages embeddings used by the index
dataset_path (`str`):
The path to the serialized dataset on disk. The dataset should have 3 columns: title (str), text (str) and
embeddings (arrays of dimension vector_size)
index_path (`str`)
The path to the serialized faiss index on disk.
|
github-repos
|
def score(self, data, metric='accuracy', break_ties='random', verbose=True, print_confusion_matrix=True, **kwargs):
(Y_p, Y, Y_s) = self._get_predictions(data, break_ties=break_ties, return_probs=True, **kwargs)
return_list = isinstance(metric, list)
metric_list = (metric if isinstance(metric, list) else [metric])
scores = []
for metric in metric_list:
score = metric_score(Y, Y_p, metric, probs=Y_s, ignore_in_gold=[0])
scores.append(score)
if verbose:
print(f'{metric.capitalize()}: {score:.3f}')
if (print_confusion_matrix and verbose):
confusion_matrix(Y, Y_p, pretty_print=True)
if ((len(scores) == 1) and (not return_list)):
return scores[0]
else:
return scores
|
Scores the predictive performance of the Classifier on all tasks
Args:
data: a Pytorch DataLoader, Dataset, or tuple with Tensors (X,Y):
X: The input for the predict method
Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels
in {1,...,k}
metric: A metric (string) with which to score performance or a
list of such metrics
break_ties: A tie-breaking policy (see Classifier._break_ties())
verbose: The verbosity for just this score method; it will not
update the class config.
print_confusion_matrix: Print confusion matrix (overwritten to False if
verbose=False)
Returns:
scores: A (float) score or a list of such scores if kwarg metric
is a list
|
codesearchnet
|
def add_comment(self, comment):
if (not comment):
return
self.__comments[comment.name] = comment
self.comment_added_signal(self, comment)
|
Add a comment to the database.
Args:
comment (hotdoc.core.Comment): comment to add
|
codesearchnet
|
def from_callable(cls, fn: Callable) -> Optional['IOTypeHints']:
if _disable_from_callable or getattr(fn, '_beam_no_annotations', False):
return None
signature = get_signature(fn)
if all((param.annotation == param.empty for param in signature.parameters.values())) and signature.return_annotation == signature.empty:
return None
input_args = []
input_kwargs = {}
for param in signature.parameters.values():
if param.annotation == param.empty:
if param.kind == param.VAR_POSITIONAL:
input_args.append(_ANY_VAR_POSITIONAL)
elif param.kind == param.VAR_KEYWORD:
input_kwargs[param.name] = _ANY_VAR_KEYWORD
elif param.kind == param.KEYWORD_ONLY:
input_kwargs[param.name] = typehints.Any
else:
input_args.append(typehints.Any)
elif param.kind in [param.KEYWORD_ONLY, param.VAR_KEYWORD]:
input_kwargs[param.name] = convert_to_beam_type(param.annotation)
else:
assert param.kind in [param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD, param.VAR_POSITIONAL], 'Unsupported Parameter kind: %s' % param.kind
input_args.append(convert_to_beam_type(param.annotation))
output_args = []
if signature.return_annotation != signature.empty:
output_args.append(convert_to_beam_type(signature.return_annotation))
else:
output_args.append(typehints.Any)
name = getattr(fn, '__name__', '<unknown>')
msg = ['from_callable(%s)' % name, ' signature: %s' % signature]
if hasattr(fn, '__code__'):
msg.append(' File "%s", line %d' % (fn.__code__.co_filename, fn.__code__.co_firstlineno))
return IOTypeHints(input_types=(tuple(input_args), input_kwargs), output_types=(tuple(output_args), {}), origin=cls._make_origin([], tb=False, msg=msg))
|
Construct an IOTypeHints object from a callable's signature.
Supports Python 3 annotations. For partial annotations, sets unknown types
to Any, _ANY_VAR_POSITIONAL, or _ANY_VAR_KEYWORD.
Returns:
A new IOTypeHints or None if no annotations found.
|
github-repos
|
def del_hparam(self, name):
if hasattr(self, name):
delattr(self, name)
del self._hparam_types[name]
|
Removes the hyperparameter with key 'name'.
Does nothing if it isn't present.
Args:
name: Name of the hyperparameter.
|
juraj-google-style
|
def create_magic_packet(macaddress):
if len(macaddress) == 12:
pass
elif len(macaddress) == 17:
sep = macaddress[2]
macaddress = macaddress.replace(sep, '')
else:
raise ValueError('Incorrect MAC address format')
data = b'FFFFFFFFFFFF' + (macaddress * 16).encode()
send_data = b''
for i in range(0, len(data), 2):
send_data += struct.pack(b'B', int(data[i: i + 2], 16))
return send_data
|
Create a magic packet.
A magic packet is a packet that can be used with the for wake on lan
protocol to wake up a computer. The packet is constructed from the
mac address given as a parameter.
Args:
macaddress (str): the mac address that should be parsed into a
magic packet.
|
juraj-google-style
|
def trim_whitespace(self, text):
lines = text.split('\n')
new_lines = [x.lstrip() for x in lines]
return '\n'.join(new_lines)
|
Remove leading whitespace from each line of a multiline string
Args:
text (string): The text to be unindented
Returns:
string: The unindented block of text
|
juraj-google-style
|
def MakeJoint(pmf1, pmf2):
joint = Joint()
for (v1, p1) in pmf1.Items():
for (v2, p2) in pmf2.Items():
joint.Set((v1, v2), (p1 * p2))
return joint
|
Joint distribution of values from pmf1 and pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
Joint pmf of value pairs
|
codesearchnet
|
def get_snpeff_info(snpeff_string, snpeff_header):
snpeff_annotations = [
dict(zip(snpeff_header, snpeff_annotation.split('|')))
for snpeff_annotation in snpeff_string.split(',')
]
return snpeff_annotations
|
Make the vep annotations into a dictionaries
A snpeff dictionary will have the snpeff column names as keys and
the vep annotations as values.
The dictionaries are stored in a list.
One dictionary for each transcript.
Args:
snpeff_string (string): A string with the ANN annotation
snpeff_header (list): A list with the vep header
Return:
snpeff_annotations (list): A list of vep dicts
|
juraj-google-style
|
def get_default_settings(sub_scripts, script_order, script_execution_freq, iterator_type):
def populate_sweep_param(scripts, parameter_list, trace=''):
"\n\n Args:\n scripts: a dict of {'class name': <class object>} pairs\n\n Returns: A list of all parameters of the input scripts\n\n "
def get_parameter_from_dict(trace, dic, parameter_list, valid_values=None):
'\n appends keys in the dict to a list in the form trace.key.subkey.subsubkey...\n Args:\n trace: initial prefix (path through scripts and parameters to current location)\n dic: dictionary\n parameter_list: list to which append the parameters\n\n valid_values: valid values of dictionary values if None dic should be a dictionary\n\n Returns:\n\n '
if ((valid_values is None) and isinstance(dic, Parameter)):
valid_values = dic.valid_values
for (key, value) in dic.items():
if isinstance(value, dict):
parameter_list = get_parameter_from_dict(((trace + '.') + key), value, parameter_list, dic.valid_values[key])
elif ((valid_values[key] in (float, int)) or (isinstance(valid_values[key], list) and (valid_values[key][0] in (float, int)))):
parameter_list.append(((trace + '.') + key))
else:
print(('ignoring sweep parameter', key))
return parameter_list
for script_name in list(scripts.keys()):
from pylabcontrol.core import ScriptIterator
script_trace = trace
if (script_trace == ''):
script_trace = script_name
else:
script_trace = ((script_trace + '->') + script_name)
if issubclass(scripts[script_name], ScriptIterator):
populate_sweep_param(vars(scripts[script_name])['_SCRIPTS'], parameter_list=parameter_list, trace=script_trace)
else:
for setting in [elem[1] for elem in inspect.getmembers(scripts[script_name]) if (elem[0] == '_DEFAULT_SETTINGS')][0]:
parameter_list = get_parameter_from_dict(script_trace, setting, parameter_list)
return parameter_list
if (iterator_type == 'loop'):
script_default_settings = [Parameter('script_order', script_order), Parameter('script_execution_freq', script_execution_freq), Parameter('num_loops', 0, int, 'times the subscripts will be executed'), Parameter('run_all_first', True, bool, 'Run all scripts with nonzero frequency in first pass')]
elif (iterator_type == 'sweep'):
sweep_params = populate_sweep_param(sub_scripts, [])
script_default_settings = [Parameter('script_order', script_order), Parameter('script_execution_freq', script_execution_freq), Parameter('sweep_param', sweep_params[0], sweep_params, 'variable over which to sweep'), Parameter('sweep_range', [Parameter('min_value', 0, float, 'min parameter value'), Parameter('max_value', 0, float, 'max parameter value'), Parameter('N/value_step', 0, float, 'either number of steps or parameter value step, depending on mode')]), Parameter('stepping_mode', 'N', ['N', 'value_step'], 'Switch between number of steps and step amount'), Parameter('run_all_first', True, bool, 'Run all scripts with nonzero frequency in first pass')]
else:
print(('unknown iterator type ' + iterator_type))
raise TypeError(('unknown iterator type ' + iterator_type))
return script_default_settings
|
assigning the actual script settings depending on the iterator type
this might be overwritten by classes that inherit form ScriptIterator
Args:
sub_scripts: dictionary with the subscripts
script_order: execution order of subscripts
script_execution_freq: execution frequency of subscripts
Returns:
the default setting for the iterator
|
codesearchnet
|
def get_ituz(self, callsign, timestamp=timestamp_now):
return self.get_all(callsign, timestamp)[const.ITUZ]
|
Returns ITU Zone of a callsign
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
int: containing the callsign's CQ Zone
Raises:
KeyError: No ITU Zone found for callsign
Note:
Currently, only Country-files.com lookup database contains ITU Zones
|
juraj-google-style
|
def plot_hall_carriers(self, temp=300):
import matplotlib.pyplot as plt
hall_carriers = [abs(i) for i in self._bz.get_hall_carrier_concentration()[temp]]
plt.semilogy(self._bz.mu_steps, hall_carriers, linewidth=3.0, color='r')
self._plot_bg_limits()
self._plot_doping(temp)
plt.xlim((- 0.5), (self._bz.gap + 0.5))
plt.ylim(100000000000000.0, 1e+22)
plt.ylabel('Hall carrier concentration (cm-3)', fontsize=30.0)
plt.xlabel('E-E$_f$ (eV)', fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
return plt
|
Plot the Hall carrier concentration in function of Fermi level
Args:
temp: the temperature
Returns:
a matplotlib object
|
codesearchnet
|
def get_sources(self, prefix=''):
prefix = prefix.replace('-', '_')
prefixed = '%s_sources' % prefix
if prefixed in self.__cli:
sources = self.__cli.get(prefixed)
from_conf = False
else:
sources = self.__config.get(prefixed)
from_conf = True
if sources is None:
return OrderedSet()
sources = self.__resolve_patterns(sources, from_conf)
prefixed = '%s_source_filters' % prefix
if prefixed in self.__cli:
filters = self.__cli.get(prefixed)
from_conf = False
else:
filters = self.__config.get(prefixed)
from_conf = True
if filters is None:
return sources
sources -= self.__resolve_patterns(filters, from_conf)
return sources
|
Retrieve a set of absolute paths to sources, according to `prefix`
`ConfigParser` will perform wildcard expansion and
filtering.
Args:
prefix: str, the desired prefix.
Returns:
utils.utils.OrderedSet: The set of sources for the given
`prefix`.
|
juraj-google-style
|
def get_memory_growth(device):
return context.context().get_memory_growth(device)
|
Get if memory growth is enabled for a `PhysicalDevice`.
If memory growth is enabled for a `PhysicalDevice`, the runtime initialization
will not allocate all memory on the device.
For example:
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> try:
... tf.config.experimental.set_memory_growth(physical_devices[0], True)
... assert tf.config.experimental.get_memory_growth(physical_devices[0])
... except:
... # Invalid device or cannot modify virtual devices once initialized.
... pass
Args:
device: `PhysicalDevice` to query
Returns:
A boolean indicating the memory growth setting for the `PhysicalDevice`.
Raises:
ValueError: Invalid `PhysicalDevice` specified.
|
github-repos
|
def seek_to_end(self, *partitions):
if (not all([isinstance(p, TopicPartition) for p in partitions])):
raise TypeError('partitions must be TopicPartition namedtuples')
if (not partitions):
partitions = self._subscription.assigned_partitions()
assert partitions, 'No partitions are currently assigned'
else:
for p in partitions:
assert (p in self._subscription.assigned_partitions()), 'Unassigned partition'
for tp in partitions:
log.debug('Seeking to end of partition %s', tp)
self._subscription.need_offset_reset(tp, OffsetResetStrategy.LATEST)
|
Seek to the most recent available offset for partitions.
Arguments:
*partitions: Optionally provide specific TopicPartitions, otherwise
default to all assigned partitions.
Raises:
AssertionError: If any partition is not currently assigned, or if
no partitions are assigned.
|
codesearchnet
|
def _get_shoulds(options):
if options.version == '2.0':
return shoulds20.list_shoulds(options)
else:
return shoulds21.list_shoulds(options)
|
Return the list of 'SHOULD' validators for the correct version of STIX.
Args:
options: ValidationOptions instance with validation options for this
validation run, including the STIX spec version.
|
juraj-google-style
|
def __init__(
self,
input_columns: t.List[Column],
output_columns: t.List[Column],
column_transform,) -> None:
self.input_columns = input_columns
self.output_columns = output_columns
self.column_transform = column_transform
|
Construct a new ``CompoundColumn`` object.
Args:
input_columns (list, Column): A list of ``Column`` objects representing column(s) from the SOURCE table.
output_columns (list, Column): A list of ``Column`` objects representing column(s) from the FINAL table.
column_transform (Callable): Function accepting the table object, performing transformations to it and returning a DataFrame containing the NEW columns only.
|
juraj-google-style
|
def imfrombytes(content, flag='color'):
img_np = np.frombuffer(content, np.uint8)
flag = (imread_flags[flag] if is_str(flag) else flag)
img = cv2.imdecode(img_np, flag)
return img
|
Read an image from bytes.
Args:
content (bytes): Image bytes got from files or other streams.
flag (str): Same as :func:`imread`.
Returns:
ndarray: Loaded image array.
|
codesearchnet
|
def orth_chol(order, dist, normed=True, sort='GR', cross_truncation=1.0, **kws):
dim = len(dist)
basis = chaospy.poly.basis(start=1, stop=order, dim=dim, sort=sort, cross_truncation=cross_truncation)
length = len(basis)
cholmat = chaospy.chol.gill_king(chaospy.descriptives.Cov(basis, dist))
cholmat_inv = numpy.linalg.inv(cholmat.T).T
if (not normed):
diag_mesh = numpy.repeat(numpy.diag(cholmat_inv), len(cholmat_inv))
cholmat_inv /= diag_mesh.reshape(cholmat_inv.shape)
coefs = numpy.empty(((length + 1), (length + 1)))
coefs[(1:, 1:)] = cholmat_inv
coefs[(0, 0)] = 1
coefs[(0, 1:)] = 0
expected = (- numpy.sum((cholmat_inv * chaospy.descriptives.E(basis, dist, **kws)), (- 1)))
coefs[(1:, 0)] = expected
coefs = coefs.T
out = {}
out[((0,) * dim)] = coefs[0]
for idx in range(length):
index = basis[idx].keys[0]
out[index] = coefs[(idx + 1)]
polynomials = chaospy.poly.Poly(out, dim, coefs.shape[1:], float)
return polynomials
|
Create orthogonal polynomial expansion from Cholesky decomposition.
Args:
order (int):
Order of polynomial expansion
dist (Dist):
Distribution space where polynomials are orthogonal
normed (bool):
If True orthonormal polynomials will be used instead of monic.
sort (str):
Ordering argument passed to poly.basis. If custom basis is used,
argument is ignored.
cross_truncation (float):
Use hyperbolic cross truncation scheme to reduce the number of
terms in expansion.
Examples:
>>> Z = chaospy.Normal()
>>> print(chaospy.around(chaospy.orth_chol(3, Z), 4))
[1.0, q0, 0.7071q0^2-0.7071, 0.4082q0^3-1.2247q0]
|
codesearchnet
|
def as_dict(self):
return {'@module': self.__class__.__module__, '@class': self.__class__.__name__, 'r': jsanitize(self.r), 'energies': jsanitize(self.energies), 'forces': jsanitize(self.forces), 'structures': [s.as_dict() for s in self.structures]}
|
Dict representation of NEBAnalysis.
Returns:
JSON serializable dict representation.
|
codesearchnet
|
def ConvertStringToFilename(name):
return re.sub('\\W', (lambda x: ('%%%02X' % ord(x.group(0)))), name, flags=re.UNICODE).rstrip('/')
|
Converts an unicode string to a filesystem safe filename.
For maximum compatibility we escape all chars which are not alphanumeric (in
the unicode sense).
Args:
name: a unicode string that is part of a subject.
Returns:
A safe filename with escaped special chars.
|
codesearchnet
|
def __init__(self, config_files, use_tc=None, **kwargs):
super(VIIRSSDRReader, self).__init__(config_files, **kwargs)
self.use_tc = use_tc
|
Initialize file reader and adjust geolocation preferences.
Args:
config_files (iterable): yaml config files passed to base class
use_tc (boolean): If `True` use the terrain corrected
files. If `False`, switch to non-TC files. If
`None` (default), use TC if available, non-TC otherwise.
|
juraj-google-style
|
def delete(filething):
f = FLAC(filething)
filething.fileobj.seek(0)
f.delete(filething)
|
Remove tags from a file.
Args:
filething (filething)
Raises:
mutagen.MutagenError
|
juraj-google-style
|
def __init__(self, config: Dict[str, str], default_level: str):
self._should_log: Dict[Tuple[str, str], bool] = {}
self._default_level = config.get('', default_level)
self._log_rules = [
(logger.split('.') if logger else list(), level)
for logger, level in config.items()
]
|
Initializes a new `LogFilter`
Args:
config: Dictionary mapping module names to logging level
default_level: The default logging level
|
juraj-google-style
|
def make_json_formatted_for_single_chart(mutant_features, inference_result_proto, index_to_mutate):
x_label = 'step'
y_label = 'scalar'
if isinstance(inference_result_proto, classification_pb2.ClassificationResponse):
series = {}
for (idx, classification) in enumerate(inference_result_proto.result.classifications):
mutant_feature = mutant_features[(idx % len(mutant_features))]
for (class_index, classification_class) in enumerate(classification.classes):
if (classification_class.label == ''):
classification_class.label = str(class_index)
if ((len(classification.classes) == 2) and (classification_class.label == '0')):
continue
key = classification_class.label
if index_to_mutate:
key += (' (index %d)' % index_to_mutate)
if (not (key in series)):
series[key] = {}
if (not (mutant_feature.mutant_value in series[key])):
series[key][mutant_feature.mutant_value] = []
series[key][mutant_feature.mutant_value].append(classification_class.score)
return_series = collections.defaultdict(list)
for (key, mutant_values) in iteritems(series):
for (value, y_list) in iteritems(mutant_values):
return_series[key].append({x_label: value, y_label: (sum(y_list) / float(len(y_list)))})
return_series[key].sort(key=(lambda p: p[x_label]))
return return_series
elif isinstance(inference_result_proto, regression_pb2.RegressionResponse):
points = {}
for (idx, regression) in enumerate(inference_result_proto.result.regressions):
mutant_feature = mutant_features[(idx % len(mutant_features))]
if (not (mutant_feature.mutant_value in points)):
points[mutant_feature.mutant_value] = []
points[mutant_feature.mutant_value].append(regression.value)
key = 'value'
if (index_to_mutate != 0):
key += (' (index %d)' % index_to_mutate)
list_of_points = []
for (value, y_list) in iteritems(points):
list_of_points.append({x_label: value, y_label: (sum(y_list) / float(len(y_list)))})
list_of_points.sort(key=(lambda p: p[x_label]))
return {key: list_of_points}
else:
raise NotImplementedError('Only classification and regression implemented.')
|
Returns JSON formatted for a single mutant chart.
Args:
mutant_features: An iterable of `MutantFeatureValue`s representing the
X-axis.
inference_result_proto: A ClassificationResponse or RegressionResponse
returned by Servo, representing the Y-axis.
It contains one 'classification' or 'regression' for every Example that
was sent for inference. The length of that field should be the same length
of mutant_features.
index_to_mutate: The index of the feature being mutated for this chart.
Returns:
A JSON-able dict for rendering a single mutant chart, parseable by
`vz-line-chart` or `vz-bar-chart`.
|
codesearchnet
|
def decode(obj, content_type):
try:
decoder = _decoders_map[content_type]
return decoder(obj)
except KeyError:
raise _errors.UnsupportedFormatError(content_type)
|
Decode an object ton a one of the default content types to a numpy array.
Args:
obj (object): to be decoded.
content_type (str): content type to be used.
Returns:
np.array: decoded object.
|
juraj-google-style
|
def _process_assignments(self, feed_item, creative_assignments, placement_assignments, event_tag_assignments, campaign):
assigned_creatives = []
assigned_placements = []
assigned_event_tags = []
for assignment in feed_item['creative_assignment']:
creative = self._creative_dao.get(assignment, required=True)
assignment[FieldMap.CREATIVE_ID] = creative['id']
if not creative['id'] in assigned_creatives:
assigned_creatives.append(creative['id'])
sequence = assignment.get(FieldMap.CREATIVE_ROTATION_SEQUENCE, None)
weight = assignment.get(FieldMap.CREATIVE_ROTATION_WEIGHT, None)
sequence = sequence if type(sequence) is int else None
weight = weight if type(weight) is int else None
if assignment.get(FieldMap.AD_CREATIVE_ROTATION_START_TIME, ''):
startTime = assignment.get(FieldMap.AD_CREATIVE_ROTATION_START_TIME, '') if 'T' in assignment.get(FieldMap.AD_CREATIVE_ROTATION_START_TIME, '') else StringExtensions.convertDateStrToDateTimeStr(feed_item.get(FieldMap.AD_CREATIVE_ROTATION_START_TIME, None))
assignment[FieldMap.AD_CREATIVE_ROTATION_START_TIME] = startTime
else:
startTime = None
if assignment.get(FieldMap.AD_CREATIVE_ROTATION_END_TIME, ''):
endTime = assignment.get(FieldMap.AD_CREATIVE_ROTATION_END_TIME, '') if 'T' in assignment.get(FieldMap.AD_CREATIVE_ROTATION_END_TIME, '') else StringExtensions.convertDateStrToDateTimeStr(feed_item.get(FieldMap.AD_CREATIVE_ROTATION_END_TIME, None), '23:59:59')
assignment[FieldMap.AD_CREATIVE_ROTATION_END_TIME] = endTime
else:
endTime = None
lp = None
if assignment.get(FieldMap.AD_LANDING_PAGE_ID, '') != 'CAMPAIGN_DEFAULT':
lp = self._landing_page_dao.get(assignment, required=True)
else:
lp = self._landing_page_dao.get({FieldMap.AD_LANDING_PAGE_ID: campaign['defaultLandingPageId']}, required=True)
creative_assignment = {'active': True, 'sequence': sequence, 'weight': weight, 'creativeId': assignment.get(FieldMap.CREATIVE_ID, None), 'startTime': startTime, 'endTime': endTime, 'clickThroughUrl': {'defaultLandingPage': False if (assignment.get(FieldMap.AD_LANDING_PAGE_ID, '') or assignment.get(FieldMap.CUSTOM_CLICK_THROUGH_URL, '')) and assignment.get(FieldMap.AD_LANDING_PAGE_ID, '') != 'CAMPAIGN_DEFAULT' else True, 'landingPageId': lp.get('id', None) if lp else None, 'customClickThroughUrl': assignment.get(FieldMap.CUSTOM_CLICK_THROUGH_URL, '')}}
if creative.get('exitCustomEvents'):
creative_assignment['richMediaExitOverrides'] = []
if assignment.get(FieldMap.AD_LANDING_PAGE_ID, '') or assignment.get(FieldMap.CUSTOM_CLICK_THROUGH_URL, ''):
for exit_custom_event in creative.get('exitCustomEvents', []):
creative_assignment['richMediaExitOverrides'].append({'exitId': exit_custom_event['id'], 'enabled': True, 'clickThroughUrl': {'defaultLandingPage': False if (assignment.get(FieldMap.AD_LANDING_PAGE_ID, '') or assignment.get(FieldMap.CUSTOM_CLICK_THROUGH_URL, '')) and assignment.get(FieldMap.AD_LANDING_PAGE_ID, '') != 'CAMPAIGN_DEFAULT' else True, 'landingPageId': lp.get('id', None) if lp else None, 'customClickThroughUrl': assignment.get(FieldMap.CUSTOM_CLICK_THROUGH_URL, '')}})
creative_assignments.append(creative_assignment)
for assignment in feed_item['placement_assignment']:
placement = self._placement_dao.get(assignment, required=True)
if placement:
assignment[FieldMap.PLACEMENT_ID] = placement['id']
if not placement['id'] in assigned_placements:
assigned_placements.append(placement['id'])
placement_assignments.append({'active': True, 'placementId': assignment.get(FieldMap.PLACEMENT_ID, None)})
event_tags = [{'assignment': item, 'event_tag': self._event_tag_dao.get(item, required=True)} for item in feed_item['event_tag_assignment']]
event_tags += [{'assignment': item, 'event_tag': self._event_tag_dao.get(item, required=True)} for item in feed_item['placement_event_tag_profile']]
for item in event_tags:
assignment = item['assignment']
event_tag = item['event_tag']
if event_tag:
assignment[FieldMap.EVENT_TAG_ID] = event_tag['id']
if not event_tag['id'] in assigned_event_tags:
assigned_event_tags.append(event_tag['id'])
event_tag_assignments.append({'id': event_tag['id'], 'enabled': assignment.get(FieldMap.EVENT_TAG_ENABLED, True)})
|
Updates the ad by setting the values of child objects based on secondary feeds.
Args:
feed_item: Feed item representing the ad from the Bulkdozer feed.
creative_assignments: Feed items representing creative assignments related
with the current ad.
placement_assignments: Feed items representing placement assignments
related with the current ad.
event_tag_assignments: Feed items representing event tag assignments
related with the current ad.
|
github-repos
|
def initialize(self, table):
check_table_dtypes(table, self._keys.dtype, self._values.dtype)
with ops.name_scope(self._name, values=(table.resource_handle, self._keys, self._values)):
init_op = gen_lookup_ops.lookup_table_import_v2(table.resource_handle, self._keys, self._values)
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
return init_op
|
Initializes the given `table` with `keys` and `values` tensors.
Args:
table: The table to initialize.
Returns:
The operation that initializes the table.
Raises:
TypeError: when the keys and values data types do not match the table
key and value data types.
|
github-repos
|
def trigger(self, attr, old, new, hint=None, setter=None):
def invoke():
callbacks = self._callbacks.get(attr)
if callbacks:
for callback in callbacks:
callback(attr, old, new)
if (hasattr(self, '_document') and (self._document is not None)):
self._document._notify_change(self, attr, old, new, hint, setter, invoke)
else:
invoke()
|
Trigger callbacks for ``attr`` on this object.
Args:
attr (str) :
old (object) :
new (object) :
Returns:
None
|
codesearchnet
|
def num_fmt(num, max_digits=None):
if (num is None):
return 'None'
def num_in_mag(num, mag):
return ((mag > num) and (num > ((- 1) * mag)))
if (max_digits is None):
if num_in_mag(num, 1):
if num_in_mag(num, 0.1):
max_digits = 4
else:
max_digits = 3
else:
max_digits = 1
if util_type.is_float(num):
num_str = ((('%.' + str(max_digits)) + 'f') % num)
num_str = num_str.rstrip('0').lstrip('0')
if num_str.startswith('.'):
num_str = ('0' + num_str)
if num_str.endswith('.'):
num_str = (num_str + '0')
return num_str
elif util_type.is_int(num):
return int_comma_str(num)
else:
return '%r'
|
r"""
Weird function. Not very well written. Very special case-y
Args:
num (int or float):
max_digits (int):
Returns:
str:
CommandLine:
python -m utool.util_num --test-num_fmt
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_num import * # NOQA
>>> # build test data
>>> num_list = [0, 0.0, 1.2, 1003232, 41431232., .0000000343, -.443243]
>>> max_digits = None
>>> # execute function
>>> result = [num_fmt(num, max_digits) for num in num_list]
>>> # verify results
>>> print(result)
['0', '0.0', '1.2', '1,003,232', '41431232.0', '0.0', '-0.443']
|
codesearchnet
|
def are_equal(self, mol1, mol2):
b1 = set(self._get_bonds(mol1))
b2 = set(self._get_bonds(mol2))
return (b1 == b2)
|
Compare the bond table of the two molecules.
Args:
mol1: first molecule. pymatgen Molecule object.
mol2: second moleculs. pymatgen Molecule objec.
|
codesearchnet
|
def __init__(self, stream):
super(BinaryWriter, self).__init__()
self.stream = stream
|
Create an instance.
Args:
stream (BytesIO): a stream to operate on. i.e. a neo.IO.MemoryStream or raw BytesIO.
|
juraj-google-style
|
def __init__(self, key, b64secret, passphrase,
api_url="https:
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
|
Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
|
juraj-google-style
|
def _to_proto_sparse_tensor(sparse_tensor, nested_proto, process_leafs, already_processed):
already_processed.add(id(sparse_tensor))
nested_proto.named_tuple.name = _SPARSE_TENSOR_NAME
for str_key in _SPARSE_TENSOR_FIELD:
tensor = getattr(sparse_tensor, str_key)
nested_proto.named_tuple.map[str_key].value = process_leafs(tensor)
|
Serializes a `tf.SparseTensor` into `nested_proto`.
Args:
sparse_tensor: An instance of `tf.SparseTensor`.
nested_proto: A `module_pb2.NestedData` instance to be filled from
`sparse_tensor`.
process_leafs: A function to be applied to the leaf valued of the nested
structure.
already_processed: Set of already processed objects (used to avoid
infinite recursion).
|
codesearchnet
|
def create_runner(ns_path, script, runner_type='Auto', optimized=True):
if ((runner_type == 'Auto') and DRMAA_AVAILABLE):
runner_type = 'GridRunner'
elif (runner_type == 'Auto'):
runner_type = 'ParallelRunner'
return locals().get(runner_type, globals().get(runner_type))(ns_path, script, optimized=optimized)
|
Create a SimulationRunner from a string containing the desired
class implementation, and return it.
Args:
ns_path (str): path to the ns-3 installation to employ in this
SimulationRunner.
script (str): ns-3 script that will be executed to run simulations.
runner_type (str): implementation of the SimulationRunner to use.
Value can be: SimulationRunner (for running sequential
simulations locally), ParallelRunner (for running parallel
simulations locally), GridRunner (for running simulations using
a DRMAA-compatible parallel task scheduler). If Auto,
automatically pick the best available runner (GridRunner if
DRMAA is available, ParallelRunner otherwise).
optimized (bool): whether to configure the runner to employ an
optimized ns-3 build.
|
codesearchnet
|
def copy_default_config_to_user_directory(
basename,
clobber=False,
dst_dir='~/.config/scriptabit'):
dst_dir = os.path.expanduser(dst_dir)
dst = os.path.join(dst_dir, basename)
src = resource_filename(
Requirement.parse("scriptabit"),
os.path.join('scriptabit', basename))
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
if clobber or not os.path.isfile(dst):
shutil.copy(src, dst)
|
Copies the default configuration file into the user config directory.
Args:
basename (str): The base filename.
clobber (bool): If True, the default will be written even if a user
config already exists.
dst_dir (str): The destination directory.
|
juraj-google-style
|
def decode(model_path_prefix: Union[(str, Path)], input_paths: Sequence[Path], label_set: Set[str], *, feature_type: str='fbank', batch_size: int=64, feat_dir: Optional[Path]=None, batch_x_name: str='batch_x:0', batch_x_lens_name: str='batch_x_lens:0', output_name: str='hyp_dense_decoded:0') -> List[List[str]]:
if (not input_paths):
raise PersephoneException('No untranscribed WAVs to transcribe.')
model_path_prefix = str(model_path_prefix)
for p in input_paths:
if (not p.exists()):
raise PersephoneException('The WAV file path {} does not exist'.format(p))
preprocessed_file_paths = []
for p in input_paths:
prefix = p.stem
feature_file_ext = '.{}.npy'.format(feature_type)
conventional_npy_location = ((p.parent.parent / 'feat') / Path((prefix + feature_file_ext)))
if conventional_npy_location.exists():
preprocessed_file_paths.append(conventional_npy_location)
else:
if (not feat_dir):
feat_dir = (p.parent.parent / 'feat')
if (not feat_dir.is_dir()):
os.makedirs(str(feat_dir))
mono16k_wav_path = (feat_dir / '{}.wav'.format(prefix))
feat_path = (feat_dir / '{}.{}.npy'.format(prefix, feature_type))
feat_extract.convert_wav(p, mono16k_wav_path)
preprocessed_file_paths.append(feat_path)
if feat_dir:
feat_extract.from_dir(feat_dir, feature_type)
fn_batches = utils.make_batches(preprocessed_file_paths, batch_size)
metagraph = load_metagraph(model_path_prefix)
with tf.Session() as sess:
metagraph.restore(sess, model_path_prefix)
for fn_batch in fn_batches:
(batch_x, batch_x_lens) = utils.load_batch_x(fn_batch)
feed_dict = {batch_x_name: batch_x, batch_x_lens_name: batch_x_lens}
dense_decoded = sess.run(output_name, feed_dict=feed_dict)
indices_to_labels = labels.make_indices_to_labels(label_set)
human_readable = dense_to_human_readable(dense_decoded, indices_to_labels)
return human_readable
|
Use an existing tensorflow model that exists on disk to decode
WAV files.
Args:
model_path_prefix: The path to the saved tensorflow model.
This is the full prefix to the ".ckpt" file.
input_paths: A sequence of `pathlib.Path`s to WAV files to put through
the model provided.
label_set: The set of all the labels this model uses.
feature_type: The type of features this model uses.
Note that this MUST match the type of features that the
model was trained on initially.
feat_dir: Any files that require preprocessing will be
saved to the path specified by this.
batch_x_name: The name of the tensorflow input for batch_x
batch_x_lens_name: The name of the tensorflow input for batch_x_lens
output_name: The name of the tensorflow output
|
codesearchnet
|
def GetArtifactsInProperOrder(self):
artifact_list = []
while self.reachable_nodes:
node_name = self.reachable_nodes.pop()
node = self.graph[node_name]
if node.is_artifact:
artifact_list.append(node_name)
for next_node_name in node.outgoing:
if (next_node_name not in self.graph):
continue
next_node = self.graph[next_node_name]
if next_node.is_provided:
continue
next_node.incoming.remove(node_name)
if (not (next_node.is_artifact and next_node.incoming)):
next_node.is_provided = True
self.reachable_nodes.add(next_node_name)
return artifact_list
|
Bring the artifacts in a linear order that resolves dependencies.
This method obtains a linear ordering of the nodes and then returns the list
of artifact names.
Returns:
A list of `ArtifactName` instances such that if they are collected in the
given order their dependencies are resolved.
|
codesearchnet
|
def getMonthsBuffer(self, direction):
if (direction == ReadMonths.kWhReverse):
return self.m_rev_mons
return self.m_mons
|
Get the months tariff SerialBlock for meter.
Args:
direction (int): A :class:`~ekmmeters.ReadMonths` value.
Returns:
SerialBlock: Requested months tariffs buffer.
|
codesearchnet
|
def get_tokens(max_value):
vocab = [str(i) for i in range(max_value)]
vocab = set(vocab)
vocab.update(CodeOp.LITERALS)
vocab.update(CodeOp.KEYWORDS)
vocab |= set(''.join(vocab))
return sorted(vocab)
|
Defines tokens.
Args:
max_value: the maximum numeric range for the token.
Returns:
list of string tokens in vocabulary.
|
codesearchnet
|
def __init__(self, url, username, password, enterprise, apiversion, sdk_identifier, monolithe_config):
self.url = url
self.username = username
self.password = password
self.enterprise = enterprise
self.apiversion = apiversion
self.monolithe_config = monolithe_config
self.sdk_identifier = sdk_identifier
|
Initializes Courgette
Args:
url (string): the url of the server with its port
username (string): the username to launch tests
password (string): the password to connect to the server
enterprise (string): the name of the enterprise to connect to the server
apiversion (float): the version of the API to connect
sdk (string): the full name of the SDK to use
|
juraj-google-style
|
def _expand_and_tile(tensor, multiple, dim=0, name=None):
if multiple < 1:
raise ValueError(f'Invalid argument multiple={multiple} for expand_and_tile call. `multiple` must be an integer > 0')
with ops.name_scope(name, 'expand_and_tile', (tensor, multiple, dim)) as scope:
tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)
if isinstance(tensor, sparse_tensor.SparseTensor):
if dim < 0:
expand_dims = array_ops.reshape(array_ops.size(tensor.dense_shape) + dim, [1])
else:
expand_dims = [dim]
expanded_shape = array_ops.concat((array_ops.slice(tensor.dense_shape, [0], expand_dims), [1], array_ops.slice(tensor.dense_shape, expand_dims, [-1])), 0, name='expanded_shape')
expanded = sparse_ops.sparse_reshape(tensor, shape=expanded_shape, name='expand')
if multiple == 1:
return expanded
return sparse_ops.sparse_concat(dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope)
expanded = array_ops.expand_dims(tensor, dim if dim >= 0 else dim - 1, name='expand')
if multiple == 1:
return expanded
ones = array_ops.ones_like(array_ops.shape(tensor))
tile_multiples = array_ops.concat((ones[:dim], (multiple,), ones[dim:]), 0, name='multiples')
return array_ops.tile(expanded, tile_multiples, name=scope)
|
Slice `tensor` shape in 2, then tile along the sliced dimension.
A new dimension is inserted in shape of `tensor` before `dim`, then values are
tiled `multiple` times along the new dimension.
Args:
tensor: Input `Tensor` or `SparseTensor`.
multiple: Integer, number of times to tile.
dim: Integer, dimension along which to tile.
name: Name of operation.
Returns:
`Tensor` result of expanding and tiling `tensor`.
Raises:
ValueError: if `multiple` is less than 1, or `dim` is not in
`[-rank(tensor), rank(tensor)]`.
|
github-repos
|
def _configure(self, session_config=None, cluster_spec=None, task_type=None, task_id=None):
if cluster_spec:
cluster_resolver = cluster_resolver_lib.SimpleClusterResolver(cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec), task_type=task_type, task_id=task_id, num_accelerators={'GPU': self._num_gpus_per_worker})
self._initialize_multi_worker(cluster_resolver)
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
|
Configures the strategy class with `cluster_spec`.
The strategy object will be re-initialized if `cluster_spec` is passed to
`configure` but was not passed when instantiating the strategy.
Args:
session_config: Session config object.
cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the
cluster configurations.
task_type: the current task type.
task_id: the current task id.
Raises:
ValueError: if `cluster_spec` is given but `task_type` or `task_id` is
not.
|
github-repos
|
def __init__(self, max_batch_size: int=5000, project: str=None, retry: Retry=None, timeout: float=120, metadata: Sequence[Tuple[str, str]]=(), catalog_name: str='default_catalog', event_store: str='default_event_store'):
self.max_batch_size = max_batch_size
self.project = project
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.catalog_name = catalog_name
self.event_store = event_store
|
Initializes a :class:`WriteUserEvent` transform.
Args:
batch_size (int): Required. Maximum number of catalogitems
per request.
project (str): Optional. GCP project name in which the catalog
data will be imported.
retry: Optional. Designation of what
errors, if any, should be retried.
timeout (float): Optional. The amount of time, in seconds, to wait
for the request to complete.
metadata: Optional. Strings which
should be sent along with the request as metadata.
catalog_name (str): Optional. Name of the catalog.
Default: 'default_catalog'
event_store (str): Optional. Name of the event store.
Default: 'default_event_store'
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.