code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def watch_key(self):
return _get_tensor_watch_key(self.node_name, self.output_slot, self.debug_op)
|
Watch key identities a debug watch on a tensor.
Returns:
(`str`) A watch key, in the form of `tensor_name`:`debug_op`.
|
github-repos
|
def set_speech_text(self, text):
self.response.outputSpeech.type = 'PlainText'
self.response.outputSpeech.text = text
|
Set response output speech as plain text type.
Args:
text: str. Response speech used when type is 'PlainText'. Cannot exceed
8,000 characters.
|
juraj-google-style
|
def get_all(self, key=None):
key = self.definition.main_key if key is None else key
key = self.definition.key_synonyms.get(key, key)
entries = self._get_all(key)
if key in self.definition.scalar_nonunique_keys:
return set(entries)
return entries
|
Returns all data entries for a particular key. Default is the main key.
Args:
key (str): key whose values to return (default: main key)
Returns:
List of all data entries for the key
|
juraj-google-style
|
def encode_dataset(dataset, vocabulary):
def encode(features):
return {k: vocabulary.encode_tf(v) for (k, v) in features.items()}
return dataset.map(encode, num_parallel_calls=tf.data.experimental.AUTOTUNE)
|
Encode from strings to token ids.
Args:
dataset: a tf.data.Dataset with string values.
vocabulary: a mesh_tensorflow.transformer.Vocabulary
Returns:
a tf.data.Dataset with integer-vector values ending in EOS=1
|
codesearchnet
|
def _spin_product(variables):
multiplier, multiplicand, product, aux = variables
return BinaryQuadraticModel({multiplier: -.5,
multiplicand: -.5,
product: -.5,
aux: -1.},
{(multiplier, multiplicand): .5,
(multiplier, product): .5,
(multiplier, aux): 1.,
(multiplicand, product): .5,
(multiplicand, aux): 1.,
(product, aux): 1.},
2.,
Vartype.SPIN)
|
Create a bqm with a gap of 2 that represents the product of two variables.
Note that spin-product requires an auxiliary variable.
Args:
variables (list):
multiplier, multiplicand, product, aux
Returns:
:obj:`.BinaryQuadraticModel`
|
juraj-google-style
|
def _extract_variable_parts(variable_key, variable):
(name, offset, partitioned) = (None, None, False)
if variable._save_slice_info:
name = variable_key[:variable_key.rfind('/')]
if (not variable._save_slice_info.full_name.endswith(name)):
raise RuntimeError('Unexpected handling of partitioned variable.')
offset = variable._save_slice_info.var_offset[0]
partitioned = True
return (partitioned, name, offset)
|
Matches a variable to individual parts.
Args:
variable_key: String identifier of the variable in the module scope.
variable: Variable tensor.
Returns:
partitioned: Whether the variable is partitioned.
name: Name of the variable up to the partitioning.
offset: Offset of the variable into the full variable.
Raises:
RuntimeError: In case of unexpected variable format.
|
codesearchnet
|
def _on_response_message(self, sequence, topic, message):
try:
conn_key = self._find_connection(topic)
context = self.conns.get_context(conn_key)
except ArgumentError:
self._logger.warn("Dropping message that does not correspond with a known connection, message=%s", message)
return
if 'client' in message and message['client'] != self.name:
self._logger.debug("Dropping message that is for another client %s, we are %s", message['client'], self.name)
if messages.DisconnectionResponse.matches(message):
self.conns.finish_disconnection(conn_key, message['success'], message.get('failure_reason', None))
elif messages.OpenInterfaceResponse.matches(message):
self.conns.finish_operation(conn_key, message['success'], message.get('failure_reason', None))
elif messages.RPCResponse.matches(message):
rpc_message = messages.RPCResponse.verify(message)
self.conns.finish_operation(conn_key, rpc_message['success'], rpc_message.get('failure_reason', None), rpc_message.get('status', None), rpc_message.get('payload', None))
elif messages.ProgressNotification.matches(message):
progress_callback = context.get('progress_callback', None)
if progress_callback is not None:
progress_callback(message['done_count'], message['total_count'])
elif messages.ScriptResponse.matches(message):
if 'progress_callback' in context:
del context['progress_callback']
self.conns.finish_operation(conn_key, message['success'], message.get('failure_reason', None))
elif messages.DisconnectionNotification.matches(message):
try:
conn_key = self._find_connection(topic)
conn_id = self.conns.get_connection_id(conn_key)
except ArgumentError:
self._logger.warn("Dropping disconnect notification that does not correspond with a known connection, topic=%s", topic)
return
self.conns.unexpected_disconnect(conn_key)
self._trigger_callback('on_disconnect', self.id, conn_id)
else:
self._logger.warn("Invalid response message received, message=%s", message)
|
Process a response message received
Args:
sequence (int): The sequence number of the packet received
topic (string): The topic this message was received on
message (dict): The message itself
|
juraj-google-style
|
def load_feature_lists(self, feature_lists):
column_names = []
feature_ranges = []
running_feature_count = 0
for list_id in feature_lists:
feature_list_names = load_lines((self.features_dir + 'X_train_{}.names'.format(list_id)))
column_names.extend(feature_list_names)
start_index = running_feature_count
end_index = ((running_feature_count + len(feature_list_names)) - 1)
running_feature_count += len(feature_list_names)
feature_ranges.append([list_id, start_index, end_index])
X_train = np.hstack([load((self.features_dir + 'X_train_{}.pickle'.format(list_id))) for list_id in feature_lists])
X_test = np.hstack([load((self.features_dir + 'X_test_{}.pickle'.format(list_id))) for list_id in feature_lists])
df_train = pd.DataFrame(X_train, columns=column_names)
df_test = pd.DataFrame(X_test, columns=column_names)
return (df_train, df_test, feature_ranges)
|
Load pickled features for train and test sets, assuming they are saved
in the `features` folder along with their column names.
Args:
feature_lists: A list containing the names of the feature lists to load.
Returns:
A tuple containing 3 items: train dataframe, test dataframe,
and a list describing the index ranges for the feature lists.
|
codesearchnet
|
def from_dict(self, graph_dict):
self.reset_graph()
for new_node in graph_dict:
self.add_node(new_node)
for ind_node, dep_nodes in graph_dict.items():
if not isinstance(dep_nodes, collections.Iterable):
raise TypeError('%s: dict values must be lists' % ind_node)
for dep_node in dep_nodes:
self.add_edge(ind_node, dep_node)
|
Reset the graph and build it from the passed dictionary.
The dictionary takes the form of {node_name: [directed edges]}
Args:
graph_dict (dict): The dictionary used to create the graph.
Raises:
TypeError: Raised if the value of items in the dict are not lists.
|
juraj-google-style
|
def _set_input_tensors(self, interpreter: _interpreter.Interpreter, tensor_data: Sequence[np.ndarray], initialize: bool) -> None:
input_details = interpreter.get_input_details()
if len(input_details) != len(tensor_data):
raise ValueError('Number of inputs provided ({}) does not match number of inputs to the model ({})'.format(len(tensor_data), len(input_details)))
if initialize:
for input_detail, tensor in zip(input_details, tensor_data):
interpreter.resize_tensor_input(input_detail['index'], tensor.shape)
interpreter.allocate_tensors()
for input_detail, tensor in zip(input_details, tensor_data):
if tensor.dtype == np.float32 and input_detail['dtype'] == np.int8:
quant_params = _get_quant_params(input_detail)
if quant_params:
scale, zero_point = quant_params
tensor = np.round(tensor / scale + zero_point).astype(np.int8)
interpreter.set_tensor(input_detail['index'], tensor)
|
Sets input tensors into TFLite model Interpreter.
Args:
interpreter: a tf.lite.Interpreter object with allocated tensors.
tensor_data: a list of Numpy array data.
initialize: set to true when input is first set for the interpreter, to
set input shapes and allocate tensors.
Raises:
ValueError: when inputs can't be set, or size of provided inputs does not
match size of model inputs.
|
github-repos
|
def export(self, top=True):
out = []
if top:
out.append(self._internal_name)
out.append(self._to_str(self.typical_or_extreme_period_name))
out.append(self._to_str(self.typical_or_extreme_period_type))
out.append(self._to_str(self.period_start_day))
out.append(self._to_str(self.period_end_day))
return ",".join(out)
|
Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
|
juraj-google-style
|
async def find_deleted(self, seq_set: SequenceSet, selected: SelectedMailbox) -> Sequence[int]:
session_flags = selected.session_flags
return [msg.uid async for (_, msg) in self.find(seq_set, selected) if (Deleted in msg.get_flags(session_flags))]
|
Return all the active message UIDs that have the ``\\Deleted`` flag.
Args:
seq_set: The sequence set of the possible messages.
selected: The selected mailbox session.
|
codesearchnet
|
def take_screenshot(self, destination, prefix='screenshot', all_displays=False):
filename = self.generate_filename(prefix, extension_name='png')
filename_no_extension, _ = os.path.splitext(filename)
device_path = os.path.join('/storage/emulated/0/', filename)
self.adb.shell(['screencap', '-p', '-a' if all_displays else '', device_path], timeout=TAKE_SCREENSHOT_TIMEOUT_SECOND)
utils.create_dir(destination)
if all_displays:
pic_paths = []
png_files = [device_path]
png_files = self.adb.shell('ls /storage/emulated/0/*.png').decode('utf-8').split('\n')
for device_path in png_files:
if device_path.find(filename_no_extension) < 0:
continue
self.adb.pull([device_path, destination])
pic_paths.append(os.path.join(destination, os.path.basename(device_path)))
self.log.debug('Screenshot taken, saved on the host: %s', pic_paths[-1])
self.adb.shell(['rm', device_path])
return pic_paths
self.adb.pull([device_path, destination])
pic_path = os.path.join(destination, filename)
self.log.debug('Screenshot taken, saved on the host: %s', pic_path)
self.adb.shell(['rm', device_path])
return pic_path
|
Takes a screenshot of the device.
Args:
destination: string, full path to the directory to save in.
prefix: string, prefix file name of the screenshot.
all_displays: bool, if true will take a screenshot on all connnected
displays, if false will take a screenshot on the default display.
Returns:
string, full path to the screenshot file on the host, or
list[str], when all_displays is True, the full paths to the screenshot
files on the host.
|
github-repos
|
def extend(self, *bindings):
self._bindings.extend(self._preprocess(bindings))
return self
|
Append the given bindings to this keymap.
Arguments:
*bindings (Binding): Bindings to be added.
Returns:
Keymap: self
|
codesearchnet
|
def init_test_examples_dependencies() -> Tuple[Dict[str, List[str]], List[str]]:
test_example_deps = {}
all_examples = []
for framework in ['flax', 'pytorch', 'tensorflow']:
test_files = list((PATH_TO_EXAMPLES / framework).glob('test_*.py'))
all_examples.extend(test_files)
examples = [f for f in (PATH_TO_EXAMPLES / framework).glob('**/*.py') if f.parent != PATH_TO_EXAMPLES / framework]
all_examples.extend(examples)
for test_file in test_files:
with open(test_file, 'r', encoding='utf-8') as f:
content = f.read()
test_example_deps[str(test_file.relative_to(PATH_TO_REPO))] = [str(e.relative_to(PATH_TO_REPO)) for e in examples if e.name in content]
test_example_deps[str(test_file.relative_to(PATH_TO_REPO))].append(str(test_file.relative_to(PATH_TO_REPO)))
return (test_example_deps, all_examples)
|
The test examples do not import from the examples (which are just scripts, not modules) so we need some extra
care initializing the dependency map, which is the goal of this function. It initializes the dependency map for
example files by linking each example to the example test file for the example framework.
Returns:
`Tuple[Dict[str, List[str]], List[str]]`: A tuple with two elements: the initialized dependency map which is a
dict test example file to list of example files potentially tested by that test file, and the list of all
example files (to avoid recomputing it later).
|
github-repos
|
def IsErrorSuppressedByNolint(category, linenum):
return (_global_error_suppressions.get(category, False) or
linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
|
Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment or
global suppression.
|
juraj-google-style
|
def get_response(response: Dict[(str, Any)]) -> JSONRPCResponse:
if ('error' in response):
return ErrorResponse(**response)
return SuccessResponse(**response)
|
Converts a deserialized response into a JSONRPCResponse object.
The dictionary be either an error or success response, never a notification.
Args:
response: Deserialized response dictionary. We can assume the response is valid
JSON-RPC here, since it passed the jsonschema validation.
|
codesearchnet
|
def read(self, size=None):
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if self._decoded_stream_size is None:
self._decoded_stream_size = self._GetDecodedStreamSize()
if self._decoded_stream_size < 0:
raise IOError('Invalid decoded stream size.')
if self._current_offset >= self._decoded_stream_size:
return b''
if self._realign_offset:
self._AlignDecodedDataOffset(self._current_offset)
self._realign_offset = False
if size is None:
size = self._decoded_stream_size
if self._current_offset + size > self._decoded_stream_size:
size = self._decoded_stream_size - self._current_offset
decoded_data = b''
if size == 0:
return decoded_data
while size > self._decoded_data_size:
decoded_data = b''.join([
decoded_data,
self._decoded_data[self._decoded_data_offset:]])
remaining_decoded_data_size = (
self._decoded_data_size - self._decoded_data_offset)
self._current_offset += remaining_decoded_data_size
size -= remaining_decoded_data_size
if self._current_offset >= self._decoded_stream_size:
break
read_count = self._ReadEncodedData(self._ENCODED_DATA_BUFFER_SIZE)
self._decoded_data_offset = 0
if read_count == 0:
break
if size > 0:
slice_start_offset = self._decoded_data_offset
slice_end_offset = slice_start_offset + size
decoded_data = b''.join([
decoded_data,
self._decoded_data[slice_start_offset:slice_end_offset]])
self._decoded_data_offset += size
self._current_offset += size
return decoded_data
|
Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
|
juraj-google-style
|
def modified_files(root, tracked_only=False, commit=None):
assert os.path.isabs(root), "Root has to be absolute, got: %s" % root
command = ['hg', 'status']
if commit:
command.append('--change=%s' % commit)
status_lines = subprocess.check_output(command).decode('utf-8').split(
os.linesep)
modes = ['M', 'A']
if not tracked_only:
modes.append(r'\?')
modes_str = '|'.join(modes)
modified_file_status = utils.filter_lines(
status_lines,
r'(?P<mode>%s) (?P<filename>.+)' % modes_str,
groups=('filename', 'mode'))
return dict((os.path.join(root, filename), mode)
for filename, mode in modified_file_status)
|
Returns a list of files that has been modified since the last commit.
Args:
root: the root of the repository, it has to be an absolute path.
tracked_only: exclude untracked files when True.
commit: SHA1 of the commit. If None, it will get the modified files in the
working copy.
Returns: a dictionary with the modified files as keys, and additional
information as value. In this case it adds the status returned by
hg status.
|
juraj-google-style
|
def set_exception(self, exception):
if self.done():
raise RuntimeError('set_exception can only be called once.')
self._exception = exception
self._trigger()
|
Set the result of the future to the given exception.
Args:
exception (:exc:`Exception`): The exception raised.
|
codesearchnet
|
def process_update(x):
if callable(x):
update = lambda: process_update(x())
return update()
elif isinstance(x, ops.Operation):
update = x
elif hasattr(x, 'op'):
update = x.op
else:
update = tensor_conversion.convert_to_tensor_v2_with_dispatch(x)
reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update])
update._unconditional_update = update not in reachable
return update
|
Standardize update ops.
Args:
x: Tensor, op, or callable.
Returns:
An update op.
|
github-repos
|
def get_application_configurations(self, name=None):
if hasattr(self, 'applicationConfigurations'):
return self._get_elements(self.applicationConfigurations, 'applicationConfigurations', ApplicationConfiguration, None, name)
|
Retrieves application configurations for this instance.
Args:
name (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a
regular expression. If `name` is not supplied, then all application configurations are returned.
Returns:
list(ApplicationConfiguration): A list of application configurations matching the given `name`.
.. versionadded 1.12
|
codesearchnet
|
def add_user(self, user_obj):
LOG.info('Adding user %s to the database', user_obj['email'])
if (not ('_id' in user_obj)):
user_obj['_id'] = user_obj['email']
try:
self.user_collection.insert_one(user_obj)
LOG.debug('User inserted')
except DuplicateKeyError as err:
raise IntegrityError('User {} already exists in database'.format(user_obj['email']))
return user_obj
|
Add a user object to the database
Args:
user_obj(scout.models.User): A dictionary with user information
Returns:
user_info(dict): a copy of what was inserted
|
codesearchnet
|
def wait_for_fresh_games(self, poll_interval=15.0):
wait_until_game = self.read_wait_cell()
if not wait_until_game:
return
latest_game = self.latest_game_number
last_latest = latest_game
while latest_game < wait_until_game:
utils.dbg('Latest game {} not yet at required game {} '
'(+{}, {:0.3f} games/sec)'.format(
latest_game,
wait_until_game,
latest_game - last_latest,
(latest_game - last_latest) / poll_interval
))
time.sleep(poll_interval)
last_latest = latest_game
latest_game = self.latest_game_number
|
Block caller until required new games have been played.
Args:
poll_interval: number of seconds to wait between checks
If the cell `table_state=metadata:wait_for_game_number` exists,
then block the caller, checking every `poll_interval` seconds,
until `table_state=metadata:game_counter is at least the value
in that cell.
|
juraj-google-style
|
def remove_role(self, databaseName, roleName, collectionName=None):
role = {"databaseName" : databaseName,
"roleName" : roleName}
if collectionName:
role["collectionName"] = collectionName
if role in self.roles:
self.roles.remove(role)
|
Remove one role
Args:
databaseName (str): Database Name
roleName (RoleSpecs): role
Keyword Args:
collectionName (str): Collection
|
juraj-google-style
|
def _process_update(self, item, feed_item):
item['name'] = feed_item.get(FieldMap.CREATIVE_NAME, None)
self._associate_third_party_urls(feed_item, item)
self._associate_click_tags(feed_item, item)
|
Updates a creative based on the values from the feed.
Args:
item: Object representing the creative to be updated, this object is
updated directly.
feed_item: Feed item representing creative values from the Bulkdozer feed.
|
github-repos
|
def pixelate(x, severity=1):
c = [0.6, 0.5, 0.4, 0.3, 0.25][(severity - 1)]
shape = x.shape
x = tfds.core.lazy_imports.PIL_Image.fromarray(x.astype(np.uint8))
x = x.resize((int((shape[1] * c)), int((shape[0] * c))))
x = x.resize((shape[1], shape[0]))
return np.asarray(x)
|
Pixelate images.
Conduct pixelating corruptions to images by first shrinking the images and
then resizing to original size.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied pixelating
corruption.
|
codesearchnet
|
def generate_proxy_api_files(output_files: list[str], proxy_module_root: str, output_dir: str):
for file in output_files:
file_dir = os.path.dirname(file)
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
module = get_module(file_dir, output_dir)
content = f'from {proxy_module_root}.{module} import *'
with open(file, 'w') as f:
f.write(content)
|
Creates __init__.py files in proxy format for the Python API.
Args:
output_files: List of __init__.py file paths to create.
proxy_module_root: Module root for proxy-import format. If specified, proxy
files with content like `from proxy_module_root.proxy_module import *`
will be created to enable import resolution under TensorFlow.
output_dir: output API root directory.
|
github-repos
|
def _prefix(self):
return self._checkpoint_prefix
|
A common prefix for all checkpoints saved with this manager.
For example, if `directory` (a constructor argument) were `"/tmp/tf-model"`,
`prefix` would be `"/tmp/tf-model/ckpt"` and checkpoints would generally be
numbered `"/tmp/tf-model/ckpt-1"`, `"/tmp/tf-model/ckpt-2"`, and so on. Each
checkpoint has several associated files
(e.g. `"/tmp/tf-model/ckpt-2.index"`).
Returns:
A string prefix.
|
github-repos
|
def finish_operation(self, conn_or_internal_id, success, *args):
data = {'id': conn_or_internal_id, 'success': success, 'callback_args': args}
action = ConnectionAction('finish_operation', data, sync=False)
self._actions.put(action)
|
Finish an operation on a connection.
Args:
conn_or_internal_id (string, int): Either an integer connection id or a string
internal_id
success (bool): Whether the operation was successful
failure_reason (string): Optional reason why the operation failed
result (dict): Optional dictionary containing the results of the operation
|
codesearchnet
|
def attach_bytes(key, the_bytes):
tf_v1.add_to_collection(_ATTACHMENT_COLLECTION_INTERNAL, module_attachment_pb2.ModuleAttachment(key=key, value=the_bytes))
|
Adds a ModuleAttachment to the current graph.
Args:
key: A string with the unique key of the attachment.
the_bytes: A bytes object with the serialized attachment.
|
codesearchnet
|
def flat_transforms_to_matrices(transforms):
with ops.name_scope('flat_transforms_to_matrices'):
transforms = ops.convert_to_tensor(transforms, name='transforms')
if transforms.shape.ndims not in (1, 2):
raise ValueError('Transforms should be 1D or 2D, got: %s' % transforms)
transforms = array_ops.reshape(transforms, constant_op.constant([-1, 8]))
num_transforms = array_ops.shape(transforms)[0]
return array_ops.reshape(array_ops.concat([transforms, array_ops.ones([num_transforms, 1])], axis=1), constant_op.constant([-1, 3, 3]))
|
Converts `tf.contrib.image` projective transforms to affine matrices.
Note that the output matrices map output coordinates to input coordinates. For
the forward transformation matrix, call `tf.linalg.inv` on the result.
Args:
transforms: Vector of length 8, or batches of transforms with shape `(N,
8)`.
Returns:
3D tensor of matrices with shape `(N, 3, 3)`. The output matrices map the
*output coordinates* (in homogeneous coordinates) of each transform to the
corresponding *input coordinates*.
Raises:
ValueError: If `transforms` have an invalid shape.
|
github-repos
|
def classify_format(f):
(l0, l1) = _get_two_lines(f)
if loader.glove.check_valid(l0, l1):
return _glove
elif loader.word2vec_text.check_valid(l0, l1):
return _word2vec_text
elif loader.word2vec_bin.check_valid(l0, l1):
return _word2vec_bin
else:
raise OSError(b'Invalid format')
|
Determine the format of word embedding file by their content. This operation
only looks at the first two lines and does not check the sanity of input
file.
Args:
f (Filelike):
Returns:
class
|
codesearchnet
|
def get_pose_error(target_pose, current_pose):
error = np.zeros(6)
target_pos = target_pose[:3, 3]
current_pos = current_pose[:3, 3]
pos_err = target_pos - current_pos
r1 = current_pose[:3, 0]
r2 = current_pose[:3, 1]
r3 = current_pose[:3, 2]
r1d = target_pose[:3, 0]
r2d = target_pose[:3, 1]
r3d = target_pose[:3, 2]
rot_err = 0.5 * (np.cross(r1, r1d) + np.cross(r2, r2d) + np.cross(r3, r3d))
error[:3] = pos_err
error[3:] = rot_err
return error
|
Computes the error corresponding to target pose - current pose as a 6-dim vector.
The first 3 components correspond to translational error while the last 3 components
correspond to the rotational error.
Args:
target_pose: a 4x4 homogenous matrix for the target pose
current_pose: a 4x4 homogenous matrix for the current pose
Returns:
A 6-dim numpy array for the pose error.
|
juraj-google-style
|
def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]:
raise NotImplementedError
|
Returns a list of dictionaries, containing titles and text of the retrieved documents.
Args:
doc_ids (`np.ndarray` of shape `(batch_size, n_docs)`):
A tensor of document indices.
|
github-repos
|
def _flatten_resource(self, resource: message.Message, select_expr: Mapping[str, python_compiled_expressions.PythonCompiledExpression]) -> Dict[str, Any]:
flat_resource = {}
for col_name, expr in select_expr.items():
messages = expr.evaluate(resource).messages
if len(messages) > 1:
flat_resource[col_name] = []
for msg in messages:
flat_resource[col_name].append(proto_utils.get_value_at_field(msg, 'value'))
elif len(messages) == 1:
flat_resource[col_name] = proto_utils.get_value_at_field(messages[0], 'value')
else:
flat_resource[col_name] = None
return flat_resource
|
Returns a dictionary representing a resource.
Each key matches a column name from the view config select provided by the
user. The corresponding value is the value found in the resource or a list
of matching values in the resource.
Args:
resource: a singular resource from the bundle returned from the FHIR
server.
select_expr: a dictionary representing the column name and compiled fhir
path for each select expression.
|
github-repos
|
def ParseCallsRow(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
call_type = self._GetRowValue(query_hash, row, 'type')
call_type = self.CALL_TYPE.get(call_type, 'UNKNOWN')
duration = self._GetRowValue(query_hash, row, 'duration')
timestamp = self._GetRowValue(query_hash, row, 'date')
event_data = AndroidCallEventData()
event_data.call_type = call_type
event_data.duration = self._GetRowValue(query_hash, row, 'duration')
event_data.name = self._GetRowValue(query_hash, row, 'name')
event_data.number = self._GetRowValue(query_hash, row, 'number')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Call Started')
parser_mediator.ProduceEventWithEventData(event, event_data)
if duration:
if isinstance(duration, py2to3.STRING_TYPES):
try:
duration = int(duration, 10)
except ValueError:
duration = 0
timestamp += duration * 1000
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Call Ended')
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a Call record row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
|
juraj-google-style
|
def resize(img, size, interpolation=Image.BILINEAR):
if (not _is_pil_image(img)):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if (not (isinstance(size, int) or (isinstance(size, Iterable) and (len(size) == 2)))):
raise TypeError('Got inappropriate size arg: {}'.format(size))
if isinstance(size, int):
(w, h) = img.size
if (((w <= h) and (w == size)) or ((h <= w) and (h == size))):
return img
if (w < h):
ow = size
oh = int(((size * h) / w))
return img.resize((ow, oh), interpolation)
else:
oh = size
ow = int(((size * w) / h))
return img.resize((ow, oh), interpolation)
else:
return img.resize(size[::(- 1)], interpolation)
|
r"""Resize the input PIL Image to the given size.
Args:
img (PIL Image): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaing
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``
Returns:
PIL Image: Resized image.
|
codesearchnet
|
def config(self, configlet=None, plane='sdr', **attributes):
begin = time.time()
label = self._chain.target_device.config(configlet, plane, **attributes)
elapsed = time.time() - begin
if label:
self.emit_message("Configuration change last {:.0f}s. Label: {}".format(elapsed, label),
log_level=logging.INFO)
else:
self.emit_message("Configuration failed.", log_level=logging.WARNING)
return label
|
Configure the device.
This method applies configuration to the device.
Args:
configlet (text): The configuration template.
plane (text): sdr or admin
attributes (dict): The dictionary of attributes used in template.
Returns:
A string with commit label or None
|
juraj-google-style
|
def pad(self, file, size=6):
for element in self.splitter.split(file):
if _validate_payload_size(element, size):
yield element
|
Group together as many records as possible to fit in the specified size
This SingleRecordStrategy will not group any record and will return them one by one as
long as they are within the maximum size.
Args:
file (str): file path to read the records from.
size (int): maximum size in MB that each group of records will be fitted to.
passing 0 means unlimited size.
Returns:
generator of records
|
juraj-google-style
|
def restore_or_initialize(self):
if self._latest_checkpoint is not None:
self._checkpoint.restore(self._latest_checkpoint)
if self._checkpoint_interval is not None:
self._last_checkpoint_step = _evaluate(self._step_counter)
return self._latest_checkpoint
if self._init_fn is not None:
self._init_fn()
logging.info('Customized initialization is done through the passed `init_fn`.')
return None
|
Restore items in `checkpoint` from the latest checkpoint file.
This method will first try to restore from the most recent checkpoint in
`directory`. If no checkpoints exist in `directory`, and `init_fn` is
specified, this method will call `init_fn` to do customized
initialization. This can be used to support initialization from pretrained
models.
Note that unlike `tf.train.Checkpoint.restore()`, this method doesn't return
a load status object that users can run assertions on
(e.g. assert_consumed()). Thus to run assertions, users should directly use
`tf.train.Checkpoint.restore()` method.
Returns:
The restored checkpoint path if the latest checkpoint is found and
restored. Otherwise None.
|
github-repos
|
def call(poly, args):
args = list(args)
if (len(args) < poly.dim):
args = (args + ([np.nan] * (poly.dim - len(args))))
elif (len(args) > poly.dim):
raise ValueError('too many arguments')
(x0, x1) = ([], [])
for (idx, arg) in enumerate(args):
if isinstance(arg, Poly):
poly_ = Poly({tuple(np.eye(poly.dim)[idx]): np.array(1)})
x0.append(poly_)
x1.append(arg)
args[idx] = np.nan
if x0:
poly = call(poly, args)
return substitute(poly, x0, x1)
masks = np.zeros(len(args), dtype=bool)
for (idx, arg) in enumerate(args):
if (np.ma.is_masked(arg) or np.any(np.isnan(arg))):
masks[idx] = True
args[idx] = 0
shape = np.array(args[np.argmax([np.prod(np.array(arg).shape) for arg in args])]).shape
args = np.array([(np.ones(shape, dtype=int) * arg) for arg in args])
A = {}
for key in poly.keys:
key_ = (np.array(key) * (1 - masks))
val = np.outer(poly.A[key], np.prod((args.T ** key_).T, axis=0))
val = np.reshape(val, (poly.shape + tuple(shape)))
val = np.where((val != val), 0, val)
mkey = tuple((np.array(key) * masks))
if (not (mkey in A)):
A[mkey] = val
else:
A[mkey] = (A[mkey] + val)
out = Poly(A, poly.dim, None, None)
if (out.keys and (not np.sum(out.keys))):
out = out.A[out.keys[0]]
elif (not out.keys):
out = np.zeros(out.shape, dtype=out.dtype)
return out
|
Evaluate a polynomial along specified axes.
Args:
poly (Poly):
Input polynomial.
args (numpy.ndarray):
Argument to be evaluated. Masked values keeps the variable intact.
Returns:
(Poly, numpy.ndarray):
If masked values are used the Poly is returned. Else an numpy array
matching the polynomial's shape is returned.
|
codesearchnet
|
def add_reciprocal_link(self, target, weight):
if (not isinstance(target, list)):
target_list = [target]
else:
target_list = target
for t in target_list:
self.add_link(t, weight)
t.add_link(self, weight)
|
Add links pointing in either direction between ``self`` and ``target``.
This creates a ``Link`` from ``self`` to ``target`` and a ``Link``
from ``target`` to ``self`` of equal weight. If ``target`` is a list
of ``Node`` 's, repeat this for each one.
Args:
target (Node or list[Node]):
weight (int or float):
Returns: None
Example:
>>> node_1 = Node('One')
>>> node_2 = Node('Two')
>>> node_1.add_reciprocal_link(node_2, 5)
>>> new_link_1 = node_1.link_list[0]
>>> new_link_2 = node_2.link_list[0]
>>> print(new_link_1)
node.Link instance pointing to node with value "Two" with weight 5
>>> print(new_link_2)
node.Link instance pointing to node with value "One" with weight 5
|
codesearchnet
|
def resolve_theme(self, name):
if (name not in settings.CODEMIRROR_THEMES):
msg = "Given theme name '{}' does not exists in 'settings.CODEMIRROR_THEMES'."
raise UnknowThemeError(msg.format(name))
return settings.CODEMIRROR_THEMES.get(name)
|
From given theme name, return theme file path from
``settings.CODEMIRROR_THEMES`` map.
Arguments:
name (string): Theme name.
Raises:
KeyError: When given name does not exist in
``settings.CODEMIRROR_THEMES``.
Returns:
string: Theme file path.
|
codesearchnet
|
def random_transform(self, x, seed=None):
params = self.get_random_transform(x.shape, seed)
return self.apply_transform(x, params)
|
Applies a random transformation to an image.
Args:
x: 3D tensor, single image.
seed: Random seed.
Returns:
A randomly transformed version of the input (same shape).
|
github-repos
|
def _parse_email(self, val):
ret = {'type': None, 'value': None}
try:
ret['type'] = val[1]['type']
except (KeyError, ValueError, TypeError):
pass
ret['value'] = val[3].strip()
try:
self.vars['email'].append(ret)
except AttributeError:
self.vars['email'] = []
self.vars['email'].append(ret)
|
The function for parsing the vcard email addresses.
Args:
val (:obj:`list`): The value to parse.
|
codesearchnet
|
def prepare_config(config: Optional[config_pb2.ConfigProto]) -> config_pb2.ConfigProto:
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
config.graph_options.optimizer_options.opt_level = -1
config.graph_options.rewrite_options.constant_folding = rewriter_config_pb2.RewriterConfig.OFF
config.graph_options.rewrite_options.pin_to_host_optimization = rewriter_config_pb2.RewriterConfig.OFF
return config
|
Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
|
github-repos
|
def gibbs_binding_energy(self, eads=False):
n = self.get_unit_primitive_area
Nads = self.Nads_in_slab
BE = (self.energy - n * self.clean_entry.energy) / Nads - \
sum([ads.energy_per_atom for ads in self.adsorbates])
return BE * Nads if eads else BE
|
Returns the adsorption energy or Gibb's binding energy
of an adsorbate on a surface
Args:
eads (bool): Whether to calculate the adsorption energy
(True) or the binding energy (False) which is just
adsorption energy normalized by number of adsorbates.
|
juraj-google-style
|
def predict_classes(self, x, batch_size=32, verbose=0):
warnings.warn('`model.predict_classes()` is deprecated and will be removed after 2021-01-01. Please use instead:* `np.argmax(model.predict(x), axis=-1)`, if your model does multi-class classification (e.g. if it uses a `softmax` last-layer activation).* `(model.predict(x) > 0.5).astype("int32")`, if your model does binary classification (e.g. if it uses a `sigmoid` last-layer activation).')
proba = self.predict(x, batch_size=batch_size, verbose=verbose)
if proba.shape[-1] > 1:
return proba.argmax(axis=-1)
else:
return (proba > 0.5).astype('int32')
|
Generate class predictions for the input samples.
The input samples are processed batch by batch.
Args:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A numpy array of class predictions.
|
github-repos
|
def output(self, _filename):
for c in self.contracts:
(name, inheritance, var, func_summaries, modif_summaries) = c.get_summary()
txt = "\nContract %s"%name
txt += '\nContract vars: '+str(var)
txt += '\nInheritance:: '+str(inheritance)
table = PrettyTable(["Function",
"Visibility",
"Modifiers",
"Read",
"Write",
"Internal Calls",
"External Calls"])
for (_c_name, f_name, visi, modifiers, read, write, internal_calls, external_calls) in func_summaries:
read = self._convert(read)
write = self._convert(write)
internal_calls = self._convert(internal_calls)
external_calls = self._convert(external_calls)
table.add_row([f_name, visi, modifiers, read, write, internal_calls, external_calls])
txt += "\n \n"+str(table)
table = PrettyTable(["Modifiers",
"Visibility",
"Read",
"Write",
"Internal Calls",
"External Calls"])
for (_c_name, f_name, visi, _, read, write, internal_calls, external_calls) in modif_summaries:
read = self._convert(read)
write = self._convert(write)
internal_calls = self._convert(internal_calls)
external_calls = self._convert(external_calls)
table.add_row([f_name, visi, read, write, internal_calls, external_calls])
txt += "\n\n"+str(table)
txt += "\n"
self.info(txt)
|
_filename is not used
Args:
_filename(string)
|
juraj-google-style
|
def check_password_hash(password, password_hash, salt, N=1 << 14, r=8, p=1, buflen=64):
candidate_hash = generate_password_hash(password, salt, N, r, p, buflen)
return safe_str_cmp(password_hash, candidate_hash)
|
Given a password, hash, salt this function verifies the password is equal to hash/salt.
Args:
- ``password``: The password to perform check on.
Returns:
- ``bool``
|
juraj-google-style
|
def get_metrics_namespace(self) -> str:
return 'BeamML_HuggingFaceModelHandler_Tensor'
|
Returns:
A namespace for metrics collected by the RunInference transform.
|
github-repos
|
def _checkInt(inputvalue, minvalue=None, maxvalue=None, description='inputvalue'):
if not isinstance(description, str):
raise TypeError('The description should be a string. Given: {0!r}'.format(description))
if not isinstance(inputvalue, (int, long)):
raise TypeError('The {0} must be an integer. Given: {1!r}'.format(description, inputvalue))
if not isinstance(minvalue, (int, long, type(None))):
raise TypeError('The minvalue must be an integer or None. Given: {0!r}'.format(minvalue))
if not isinstance(maxvalue, (int, long, type(None))):
raise TypeError('The maxvalue must be an integer or None. Given: {0!r}'.format(maxvalue))
_checkNumerical(inputvalue, minvalue, maxvalue, description)
|
Check that the given integer is valid.
Args:
* inputvalue (int or long): The integer to be checked
* minvalue (int or long, or None): Minimum value of the integer
* maxvalue (int or long, or None): Maximum value of the integer
* description (string): Used in error messages for the checked inputvalue
Raises:
TypeError, ValueError
Note: Can not use the function :func:`_checkString`, as that function uses this function internally.
|
juraj-google-style
|
def int_shape(x):
try:
shape = x.shape
if not isinstance(shape, tuple):
shape = tuple(shape.as_list())
return shape
except ValueError:
return None
|
Returns the shape of tensor or variable as a tuple of int or None entries.
Args:
x: Tensor or variable.
Returns:
A tuple of integers (or None entries).
Examples:
>>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))
>>> tf.keras.backend.int_shape(input)
(2, 4, 5)
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = tf.keras.backend.variable(value=val)
>>> tf.keras.backend.int_shape(kvar)
(2, 2)
|
github-repos
|
def parse_page(raw_page):
ret = {"title": get_title(raw_page), "id": get_id(raw_page)}
if ":" in ret["title"]:
return None
ret["revisions"] = get_revisions(raw_page)
return ret
|
Create a dictionary with title, id, and list of revisions.
The dictionary contains:
"title": a string
"id": an integer
"revisions": a list of strings
Args:
raw_page: a string
Returns:
a dictionary, or None in the case of an error.
|
juraj-google-style
|
def ToParameter(item: StackItem):
if isinstance(item, Array) or isinstance(item, Struct):
items = item.GetArray()
output = [ContractParameter.ToParameter(subitem) for subitem in items]
return ContractParameter(type=ContractParameterType.Array, value=output)
elif isinstance(item, Boolean):
return ContractParameter(type=ContractParameterType.Boolean, value=item.GetBoolean())
elif isinstance(item, ByteArray):
return ContractParameter(type=ContractParameterType.ByteArray, value=item.GetByteArray())
elif isinstance(item, Integer):
return ContractParameter(type=ContractParameterType.Integer, value=str(item.GetBigInteger()))
elif isinstance(item, InteropInterface):
return ContractParameter(type=ContractParameterType.InteropInterface, value=item.GetInterface())
|
Convert a StackItem to a ContractParameter object
Args:
item (neo.VM.InteropService.StackItem) The item to convert to a ContractParameter object
Returns:
ContractParameter
|
juraj-google-style
|
def GetSitelinksFromFeed(client, feed):
feed_mappings = GetFeedMapping(client, feed, PLACEHOLDER_TYPE_SITELINKS)
feed_items = {}
for feed_item in GetFeedItems(client, feed):
site_link_from_feed = {}
for attribute_value in feed_item['attributeValues']:
if attribute_value['feedAttributeId'] in feed_mappings:
for field_id in feed_mappings[attribute_value['feedAttributeId']]:
if field_id == SITE_LINK_FIELDS['TEXT']:
site_link_from_feed['text'] = attribute_value['stringValue']
elif field_id == SITE_LINK_FIELDS['URL']:
site_link_from_feed['url'] = attribute_value['stringValue']
elif field_id == SITE_LINK_FIELDS['FINAL_URLS']:
site_link_from_feed['finalUrls'] = attribute_value['stringValues']
elif field_id == SITE_LINK_FIELDS['FINAL_MOBILE_URLS']:
site_link_from_feed['finalMobileUrls'] = attribute_value[
'stringValues']
elif field_id == SITE_LINK_FIELDS['TRACKING_URL_TEMPLATE']:
site_link_from_feed['trackingUrlTemplate'] = attribute_value[
'stringValue']
elif field_id == SITE_LINK_FIELDS['LINE2']:
site_link_from_feed['line2'] = attribute_value['stringValue']
elif field_id == SITE_LINK_FIELDS['LINE3']:
site_link_from_feed['line3'] = attribute_value['stringValue']
else:
print 'No applicable Site Link Field found for Id: %s' % field_id
feed_items[feed_item['feedItemId']] = site_link_from_feed
return feed_items
|
Gets the sitelinks from a feed.
Args:
client: an AdWordsClient instance.
feed: the feed used to retrieve sitelinks.
Returns:
A dictionary mapping the feed item ID to SiteLinkFromFeed.
|
juraj-google-style
|
def runTemplate(id, data={}):
conn = Qubole.agent()
path = (str(id) + '/run')
res = conn.post(Template.element_path(path), data)
cmdType = res['command_type']
cmdId = res['id']
cmdClass = eval(cmdType)
cmd = cmdClass.find(cmdId)
while (not Command.is_done(cmd.status)):
time.sleep(Qubole.poll_interval)
cmd = cmdClass.find(cmd.id)
return Template.getResult(cmdClass, cmd)
|
Run an existing Template and waits for the Result.
Prints result to stdout.
Args:
`id`: ID of the template to run
`data`: json data containing the input_vars
Returns:
An integer as status (0: success, 1: failure)
|
codesearchnet
|
def to_service(self, service, version):
service_url = self._service_locator.get_service_url(service, version)
return self.__copy_and_set('service_url', self.__strip_trailing_slashes(service_url))
|
Sets the service name and version the request should target
Args:
service (str): The name of the service as displayed in the services.json file
version (str): The version of the service as displayed in the services.json file
Returns:
The request builder instance in order to chain calls
|
codesearchnet
|
def _variable_on_cpu(name, shape, initializer):
with tf.device('/cpu:0'):
dtype = (tf.float16 if FLAGS.use_fp16 else tf.float32)
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
|
Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
|
codesearchnet
|
def create_domain(provider, context, **kwargs):
session = get_session(provider.region)
client = session.client("route53")
domain = kwargs.get("domain")
if not domain:
logger.error("domain argument or BaseDomain variable not provided.")
return False
zone_id = create_route53_zone(client, domain)
return {"domain": domain, "zone_id": zone_id}
|
Create a domain within route53.
Args:
provider (:class:`stacker.providers.base.BaseProvider`): provider
instance
context (:class:`stacker.context.Context`): context instance
Returns: boolean for whether or not the hook succeeded.
|
juraj-google-style
|
def combine_last_two_dimensions(x):
x_shape = common_layers.shape_list(x)
(a, b) = x_shape[(- 2):]
return tf.reshape(x, (x_shape[:(- 2)] + [(a * b)]))
|
Reshape x so that the last two dimension become one.
Args:
x: a Tensor with shape [..., a, b]
Returns:
a Tensor with shape [..., ab]
|
codesearchnet
|
def sinh(x):
if any_symbolic_tensors((x,)):
return Sinh().symbolic_call(x)
return backend.numpy.sinh(x)
|
Hyperbolic sine, element-wise.
Arguments:
x: Input tensor.
Returns:
Output tensor of same shape as `x`.
|
github-repos
|
def is_disconnected(self, node_id):
conn = self._conns.get(node_id)
if conn is None:
return False
return conn.disconnected()
|
Check whether the node connection has been disconnected or failed.
A disconnected node has either been closed or has failed. Connection
failures are usually transient and can be resumed in the next ready()
call, but there are cases where transient failures need to be caught
and re-acted upon.
Arguments:
node_id (int): the id of the node to check
Returns:
bool: True iff the node exists and is disconnected
|
juraj-google-style
|
def get_favorite_radio_shows(self, start=0, max_items=100):
message = 'The output type of this method will probably change in the future to use SoCo data structures'
warnings.warn(message, stacklevel=2)
return self.__get_favorites(RADIO_SHOWS, start, max_items)
|
Get favorite radio shows from Sonos' Radio app.
Returns:
dict: A dictionary containing the total number of favorites, the
number of favorites returned, and the actual list of favorite radio
shows, represented as a dictionary with `title` and `uri` keys.
Depending on what you're building, you'll want to check to see if the
total number of favorites is greater than the amount you
requested (`max_items`), if it is, use `start` to page through and
get the entire list of favorites.
|
codesearchnet
|
def fts_match_all(self, fts, inv):
return all([self.fts_match(fts, s) for s in inv])
|
Return `True` if all segments in `inv` matches the features in fts
Args:
fts (list): a collection of (value, feature) tuples
inv (list): a collection of IPA segments represented as Unicode
strings
Returns:
bool: `True` if all segments in `inv` matches the features in `fts`
|
codesearchnet
|
def error(message):
fail = '\x1b[91m'
end = '\x1b[0m'
sys.exit(((fail + 'Error: {}'.format(message)) + end))
|
Throw an error with the given message and immediately quit.
Args:
message(str): The message to display.
|
codesearchnet
|
def __init__(self, axis=None):
super().__init__()
if axis is None:
axis = []
self._axis = axis
|
Initializes a Squeeze layer.
Args:
axis: An optional list of ints. Defaults to []. If specified, only
squeezes the dimensions listed. The dimension index starts at 0. It is
an error to squeeze a dimension that is not 1. Must be in the range
[-rank(input), rank(input)). Must be specified if input is
a RaggedTensor.
|
github-repos
|
def dispatch(self, msg):
logger.debug(f'Got message: {msg}')
futures = []
matched = False
for behaviour in (x for x in self.behaviours if x.match(msg)):
futures.append(self.submit(behaviour.enqueue(msg)))
logger.debug(f'Message enqueued to behaviour: {behaviour}')
self.traces.append(msg, category=str(behaviour))
matched = True
if (not matched):
logger.warning(f'No behaviour matched for message: {msg}')
self.traces.append(msg)
return futures
|
Dispatch the message to every behaviour that is waiting for
it using their templates match.
Args:
msg (spade.message.Messagge): the message to dispatch.
Returns:
list(asyncio.Future): a list of futures of the append of the message at each matched behaviour.
|
codesearchnet
|
def _create_environment(config, outdir):
if isinstance(config.env, str):
env = gym.make(config.env)
else:
env = config.env()
if not hasattr(env, 'spec'):
setattr(env, 'spec', getattr(env, 'spec', None))
if config.max_length:
env = tools.wrappers.LimitDuration(env, config.max_length)
env = gym.wrappers.Monitor(
env, outdir, lambda unused_episode_number: True)
if isinstance(env.action_space, gym.spaces.Box):
env = tools.wrappers.RangeNormalize(env)
env = tools.wrappers.ClipAction(env)
elif isinstance(env.action_space, gym.spaces.Discrete):
env = tools.wrappers.RangeNormalize(env, action=False)
else:
message = "Unsupported action space '{}'".format(type(env.action_space))
raise NotImplementedError(message)
env = tools.wrappers.ConvertTo32Bit(env)
env = tools.wrappers.CacheSpaces(env)
return env
|
Constructor for an instance of the environment.
Args:
config: Object providing configurations via attributes.
outdir: Directory to store videos in.
Raises:
NotImplementedError: For action spaces other than Box and Discrete.
Returns:
Wrapped OpenAI Gym environment.
|
juraj-google-style
|
def get_relevant_paths_and_versions(self, config: 'XLAConfigOptions'):
if self.ld_library_path is None:
self.ld_library_path = os.environ.get('LD_LIBRARY_PATH', None)
if config.host_compiler == HostCompiler.CLANG:
self.clang_path = _find_executable_or_die('clang', self.clang_path)
self.clang_major_version = self.clang_major_version or _get_clang_major_version(self.clang_path)
self.lld_path = self.lld_path or shutil.which('ld.lld')
elif config.host_compiler == HostCompiler.GCC:
self.gcc_path = _find_executable_or_die('gcc', self.gcc_path)
self.gcc_major_version = self.gcc_major_version or _get_gcc_major_version(self.gcc_path)
if config.backend == Backend.CUDA:
if config.cuda_compiler == CudaCompiler.CLANG:
self.clang_path = _find_executable_or_die('clang', self.clang_path)
if not self.cuda_compute_capabilities:
self.cuda_compute_capabilities = _get_cuda_compute_capabilities_or_die()
|
Gets paths and versions as needed by the config.
Args:
config: XLAConfigOptions instance that determines what paths and versions
to try to autoconfigure.
|
github-repos
|
def run(self, resources):
hwman = resources['connection']
con = hwman.hwman.controller()
test_interface = con.test_interface()
try:
test_interface.synchronize_clock()
print(('Time currently set at %s' % test_interface.current_time_str()))
except:
raise ArgumentError('Error setting RTC time, check if controller actually has RTC or if iotile-support-lib-controller-3 is updated')
|
Sets the RTC timestamp to UTC.
Args:
resources (dict): A dictionary containing the required resources that
we needed access to in order to perform this step.
|
codesearchnet
|
def prefer_type(self, prefer, over):
self._write_lock.acquire()
try:
if self._preferred(preferred=over, over=prefer):
raise ValueError(('Type %r is already preferred over %r.' % (over, prefer)))
prefs = self._prefer_table.setdefault(prefer, set())
prefs.add(over)
finally:
self._write_lock.release()
|
Prefer one type over another type, all else being equivalent.
With abstract base classes (Python's abc module) it is possible for
a type to appear to be a subclass of another type without the supertype
appearing in the subtype's MRO. As such, the supertype has no order
with respect to other supertypes, and this may lead to amguity if two
implementations are provided for unrelated abstract types.
In such cases, it is possible to disambiguate by explictly telling the
function to prefer one type over the other.
Arguments:
prefer: Preferred type (class).
over: The type we don't like (class).
Raises:
ValueError: In case of logical conflicts.
|
codesearchnet
|
def from_stream(cls, stream):
fields = _magic_parser(stream, magic=cls.MAGIC)
if fields:
fields.pop('iter')
return cls(fields)
else:
return None
|
Read the first occurrence of ScfCycle from stream.
Returns:
None if no `ScfCycle` entry is found.
|
codesearchnet
|
def register(config_class, video_processor_class, exist_ok=False):
VIDEO_PROCESSOR_MAPPING.register(config_class, video_processor_class, exist_ok=exist_ok)
|
Register a new video processor for this class.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
video_processor_class ([`BaseVideoProcessor`]):
The video processor to register.
|
github-repos
|
def step(self, action):
self._agent.act(action)
self._handle_command_buffer()
self._client.release()
self._client.acquire()
return self._get_single_state()
|
Supplies an action to the main agent and tells the environment to tick once.
Primary mode of interaction for single agent environments.
Args:
action (np.ndarray): An action for the main agent to carry out on the next tick.
Returns:
tuple: The (state, reward, terminal, info) tuple for the agent. State is a dictionary
from sensor enum (see :obj:`holodeck.sensors.Sensors`) to np.ndarray.
Reward is the float reward returned by the environment.
Terminal is the bool terminal signal returned by the environment.
Info is any additional info, depending on the world. Defaults to None.
|
codesearchnet
|
def _Inject(self, position, call):
self.EnsureGdbPosition(position[0], position[1], None)
self.ClearBreakpoints()
self._AddThreadSpecificBreakpoint(position)
gdb.parse_and_eval(('%s = 1' % GdbCache.PENDINGCALLS_TO_DO))
gdb.parse_and_eval(('%s = 1' % GdbCache.PENDINGBUSY))
try:
self.Continue(position)
if (not gdb.selected_thread().is_stopped()):
raise RuntimeError('Gdb is not acting as expected, is it being run in async mode?')
finally:
gdb.parse_and_eval(('%s = 0' % GdbCache.PENDINGBUSY))
self.Call(position, call)
|
Injects evaluation of 'call' in a safe location in the inferior.
Due to the way these injected function calls work, gdb must not be killed
until the call has returned. If that happens, the inferior will be sent
SIGTRAP upon attempting to return from the dummy frame gdb constructs for
us, and will most probably crash.
Args:
position: array of pid, tid, framedepth specifying the requested position.
call: Any expression gdb can evaluate. Usually a function call.
Raises:
RuntimeError: if gdb is not being run in synchronous exec mode.
|
codesearchnet
|
def _construct_field_operator_expression_dict(expression_list):
between_operators = (u'<=', u'>=')
inverse_operator = {u'>=': u'<=', u'<=': u'>='}
local_field_to_expressions = {}
remaining_expression_list = deque([])
for expression in expression_list:
if all((isinstance(expression, BinaryComposition), (expression.operator in between_operators), (isinstance(expression.left, LocalField) or isinstance(expression.right, LocalField)))):
if isinstance(expression.right, LocalField):
new_operator = inverse_operator[expression.operator]
new_expression = BinaryComposition(new_operator, expression.right, expression.left)
else:
new_expression = expression
field_name = new_expression.left.field_name
expressions_dict = local_field_to_expressions.setdefault(field_name, {})
expressions_dict.setdefault(new_expression.operator, []).append(new_expression)
else:
remaining_expression_list.append(expression)
return (local_field_to_expressions, remaining_expression_list)
|
Construct a mapping from local fields to specified operators, and corresponding expressions.
Args:
expression_list: list of expressions to analyze
Returns:
local_field_to_expressions:
dict mapping local field names to "operator -> list of BinaryComposition" dictionaries,
for each BinaryComposition operator involving the LocalField
remaining_expression_list:
list of remaining expressions that were *not*
BinaryCompositions on a LocalField using any of the between operators
|
codesearchnet
|
def declaration_path(decl):
if (not decl):
return []
if (not decl.cache.declaration_path):
result = [decl.name]
parent = decl.parent
while parent:
if parent.cache.declaration_path:
result.reverse()
decl.cache.declaration_path = (parent.cache.declaration_path + result)
return decl.cache.declaration_path
else:
result.append(parent.name)
parent = parent.parent
result.reverse()
decl.cache.declaration_path = result
return result
return decl.cache.declaration_path
|
Returns a list of parent declarations names.
Args:
decl (declaration_t): declaration for which declaration path
should be calculated.
Returns:
list[(str | basestring)]: list of names, where first item is the top
parent name and last item the inputted
declaration name.
|
codesearchnet
|
def get_wells(self, uwis=None):
if uwis is None:
return Project(self.__list)
return Project([w for w in self if w.uwi in uwis])
|
Returns a new Project with only the wells named by UWI.
Args:
uwis (list): list or tuple of UWI strings.
Returns:
project.
|
juraj-google-style
|
def _CopyFromDateTimeValues(self, date_time_values):
year = date_time_values.get('year', 0)
month = date_time_values.get('month', 0)
day_of_month = date_time_values.get('day_of_month', 0)
hours = date_time_values.get('hours', 0)
minutes = date_time_values.get('minutes', 0)
seconds = date_time_values.get('seconds', 0)
microseconds = date_time_values.get('microseconds', 0)
precision_helper = precisions.PrecisionHelperFactory.CreatePrecisionHelper(
self._precision)
fraction_of_second = precision_helper.CopyMicrosecondsToFractionOfSecond(
microseconds)
self._normalized_timestamp = None
self._number_of_seconds = self._GetNumberOfSecondsFromElements(
year, month, day_of_month, hours, minutes, seconds)
self._time_elements_tuple = (
year, month, day_of_month, hours, minutes, seconds)
self.fraction_of_second = fraction_of_second
self.is_local_time = False
|
Copies time elements from date and time values.
Args:
date_time_values (dict[str, int]): date and time values, such as year,
month, day of month, hours, minutes, seconds, microseconds.
Raises:
ValueError: if no helper can be created for the current precision.
|
juraj-google-style
|
def _CheckByteStreamSize(self, byte_stream, byte_offset, data_type_size):
try:
byte_stream_size = len(byte_stream)
except Exception as exception:
raise errors.MappingError(exception)
if ((byte_stream_size - byte_offset) < data_type_size):
raise errors.ByteStreamTooSmallError('Byte stream too small requested: {0:d} available: {1:d}'.format(data_type_size, byte_stream_size))
|
Checks if the byte stream is large enough for the data type.
Args:
byte_stream (bytes): byte stream.
byte_offset (int): offset into the byte stream where to start.
data_type_size (int): data type size.
Raises:
ByteStreamTooSmallError: if the byte stream is too small.
MappingError: if the size of the byte stream cannot be determined.
|
codesearchnet
|
def splitEkmDate(dateint):
date_str = str(dateint)
dt = namedtuple('EkmDate', ['yy', 'mm', 'dd', 'weekday', 'hh', 'minutes', 'ss'])
if (len(date_str) != 14):
dt.yy = dt.mm = dt.dd = dt.weekday = dt.hh = dt.minutes = dt.ss = 0
return dt
dt.yy = int(date_str[0:2])
dt.mm = int(date_str[2:4])
dt.dd = int(date_str[4:6])
dt.weekday = int(date_str[6:8])
dt.hh = int(date_str[8:10])
dt.minutes = int(date_str[10:12])
dt.ss = int(date_str[12:14])
return dt
|
Break out a date from Omnimeter read.
Note a corrupt date will raise an exception when you
convert it to int to hand to this method.
Args:
dateint (int): Omnimeter datetime as int.
Returns:
tuple: Named tuple which breaks out as followws:
========== =====================
yy Last 2 digits of year
mm Month 1-12
dd Day 1-31
weekday Zero based weekday
hh Hour 0-23
minutes Minutes 0-59
ss Seconds 0-59
========== =====================
|
codesearchnet
|
def render(self, data):
renderers = {
"text/csv": self._render_as_csv,
"text/html": self._render_as_html,
None: self._render_as_html,
}
render = renderers[data.content_type]
return render(data)
|
Renders the reports based on data.content_type's value.
Arguments:
data (ReportViewRequestData): The report data. data.content_type
is used to determine how the reports are rendered.
Returns:
HTTPResponse: The rendered version of the report.
|
juraj-google-style
|
def get_hook(hook_name):
if not pkg_resources.resource_exists(__name__, hook_name):
raise HookNotFoundError
return pkg_resources.resource_string(__name__, hook_name)
|
Returns the specified hook.
Args:
hook_name (str)
Returns:
str - (the content of) the hook
Raises:
HookNotFoundError
|
juraj-google-style
|
def sheetNames(book=None):
if book:
if not book.lower() in [x.lower() for x in bookNames()]:
return False
else:
book=activeBook()
if not book:
return False
poBook=PyOrigin.WorksheetPages(book)
if not len(poBook):
return None
return [x.GetName() for x in poBook.Layers()]
|
return sheet names of a book.
Args:
book (str, optional): If a book is given, pull names from
that book. Otherwise, try the active one
Returns:
list of sheet names (typical case).
None if book has no sheets.
False if book doesn't exlist.
|
juraj-google-style
|
def snow_depth(self, value=999.0):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `snow_depth`'.format(value))
self._snow_depth = value
|
Corresponds to IDD Field `snow_depth`
Args:
value (float): value for IDD Field `snow_depth`
Unit: cm
Missing value: 999.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def write(self, data):
block_remaining = (_BLOCK_SIZE - (self.__position % _BLOCK_SIZE))
if (block_remaining < _HEADER_LENGTH):
self.__writer.write(('\x00' * block_remaining))
self.__position += block_remaining
block_remaining = _BLOCK_SIZE
if (block_remaining < (len(data) + _HEADER_LENGTH)):
first_chunk = data[:(block_remaining - _HEADER_LENGTH)]
self.__write_record(_RECORD_TYPE_FIRST, first_chunk)
data = data[len(first_chunk):]
while True:
block_remaining = (_BLOCK_SIZE - (self.__position % _BLOCK_SIZE))
if (block_remaining >= (len(data) + _HEADER_LENGTH)):
self.__write_record(_RECORD_TYPE_LAST, data)
break
else:
chunk = data[:(block_remaining - _HEADER_LENGTH)]
self.__write_record(_RECORD_TYPE_MIDDLE, chunk)
data = data[len(chunk):]
else:
self.__write_record(_RECORD_TYPE_FULL, data)
|
Write single record.
Args:
data: record data to write as string, byte array or byte sequence.
|
codesearchnet
|
def license():
from os.path import join
with open(join(__path__[0], 'LICENSE.txt')) as lic:
print(lic.read())
|
Print the Bokeh license to the console.
Returns:
None
|
codesearchnet
|
def create_view(operations, operation):
operations.execute("CREATE VIEW %s AS %s" % (
operation.target.name,
operation.target.sqltext
))
|
Implements ``CREATE VIEW``.
Args:
operations: instance of ``alembic.operations.base.Operations``
operation: instance of :class:`.ReversibleOp`
Returns:
``None``
|
juraj-google-style
|
def has_apical_dendrite(neuron, min_number=1, treefun=_read_neurite_type):
types = [treefun(n) for n in neuron.neurites]
return CheckResult((types.count(NeuriteType.apical_dendrite) >= min_number))
|
Check if a neuron has apical dendrites
Arguments:
neuron(Neuron): The neuron object to test
min_number: minimum number of apical dendrites required
treefun: Optional function to calculate the tree type of neuron's
neurites
Returns:
CheckResult with result
|
codesearchnet
|
def transcripts(self, build='37', hgnc_id=None):
query = {'build': build}
if hgnc_id:
query['hgnc_id'] = hgnc_id
return self.transcript_collection.find(query)
|
Return all transcripts.
If a gene is specified return all transcripts for the gene
Args:
build(str)
hgnc_id(int)
Returns:
iterable(transcript)
|
juraj-google-style
|
def _bfd_multiplier(self, **kwargs):
method_name = 'rbridge_id_router_router_bgp_router_bgp_attributes_' \
'bfd_interval_multiplier'
bfd_multiplier = getattr(self._rbridge, method_name)
config = bfd_multiplier(**kwargs)
if kwargs['delete']:
tag = 'multiplier'
config.find('.
return config
|
Return the BFD multiplier XML.
You should not use this method.
You probably want `BGP.bfd`.
Args:
min_tx (str): BFD transmit interval in milliseconds (300, 500, etc)
delete (bool): Remove the configuration if ``True``.
Returns:
XML to be passed to the switch.
Raises:
None
|
juraj-google-style
|
def content(self):
as_text = (self.content_type in _content_types.UTF8_TYPES)
return self.get_data(as_text=as_text)
|
The request incoming data.
It automatic decodes from utf-8
Returns:
(obj): incoming data
|
codesearchnet
|
def product_name(self):
buf = (ctypes.c_char * self.MAX_BUF_SIZE)()
self._dll.JLINKARM_EMU_GetProductName(buf, self.MAX_BUF_SIZE)
return ctypes.string_at(buf).decode()
|
Returns the product name of the connected J-Link.
Args:
self (JLink): the ``JLink`` instance
Returns:
Product name.
|
codesearchnet
|
def chain_part_functions(fns: Sequence[PartFn], match_fns: Sequence[MatchFn] | None=None) -> PartFn:
return functools.partial(_chain_part_functions, _to_tuple_fns(fns, match_fns))
|
Chain the `fns` and execute them concurrently.
See file comment.
Args:
fns: sequence of part functions to chain.
match_fns: sequence of functions that return True if the part should be
processed by the part function. When the part should not be processed, the
part function will not be called and the part will be passed as is. When
match_fns is not provided, all parts are processed by default.
Returns:
Part function that is a chain of the provided Sequence of functions.
Raises:
ValueError: if the length of fns and match_fns is not the same (when
match_fns is provided).
|
github-repos
|
def get_structure_by_formula(self, formula, **kwargs):
structures = []
sql = ('select file, sg from data where formula="- %s -"' % Composition(formula).hill_formula)
text = self.query(sql).split('\n')
text.pop(0)
for l in text:
if l.strip():
(cod_id, sg) = l.split('\t')
r = requests.get(('http:
try:
s = Structure.from_str(r.text, fmt='cif', **kwargs)
structures.append({'structure': s, 'cod_id': int(cod_id), 'sg': sg})
except Exception:
import warnings
warnings.warn(('\nStructure.from_str failed while parsing CIF file:\n%s' % r.text))
raise
return structures
|
Queries the COD for structures by formula. Requires mysql executable to
be in the path.
Args:
cod_id (int): COD id.
kwargs: All kwargs supported by
:func:`pymatgen.core.structure.Structure.from_str`.
Returns:
A list of dict of the format
[{"structure": Structure, "cod_id": cod_id, "sg": "P n m a"}]
|
codesearchnet
|
def add_maps(self, parent, root_path=''):
for mapsource in self.map_folders[root_path]['maps']:
parent.append(self.get_network_link(mapsource))
for folder in self.map_folders[root_path]['folders']:
kml_folder_obj = kml_folder(folder)
parent.append(kml_folder_obj)
self.add_maps(parent=kml_folder_obj, root_path=F_SEP.join((root_path, folder)))
|
Recursively add maps in a folder hierarchy.
Args:
parent (KMLElement): KMLElement to which we want to append child folders or maps respectively
root_path (str): path of 'parent'
|
codesearchnet
|
def __init__(self, backend_wsgi_app, config_manager=None):
if config_manager is None:
config_manager = api_config_manager.ApiConfigManager()
self.config_manager = config_manager
self._backend = backend_wsgi_app
self._dispatchers = []
for base_path in self._backend.base_paths:
self._add_dispatcher('%sexplorer/?$' % base_path,
self.handle_api_explorer_request)
self._add_dispatcher('%sstatic/.*$' % base_path,
self.handle_api_static_request)
api_config_response = self.get_api_configs()
if api_config_response:
self.config_manager.process_api_config_response(api_config_response)
else:
raise api_exceptions.ApiConfigurationError('get_api_configs() returned no configs')
|
Constructor for EndpointsDispatcherMiddleware.
Args:
backend_wsgi_app: A WSGI server that serves the app's endpoints.
config_manager: An ApiConfigManager instance that allows a caller to
set up an existing configuration for testing.
|
juraj-google-style
|
def es_get_class_defs(cls_def, cls_name):
rtn_dict = {key: value for (key, value) in cls_def.items() if key.startswith('kds_es')}
for key in rtn_dict:
del cls_def[key]
return rtn_dict
|
Reads through the class defs and gets the related es class
defintions
Args:
-----
class_defs: RdfDataset of class definitions
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.