code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _calculate_expected_result(dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config):
if config.use_gumbel_for_cells:
gumbel_dist = tfp.distributions.RelaxedBernoulli(config.temperature, logits=dist_per_cell.logits_parameter() * config.temperature)
scaled_probability_per_cell = gumbel_dist.sample()
else:
scaled_probability_per_cell = dist_per_cell.probs_parameter()
scaled_probability_per_cell = scaled_probability_per_cell / numeric_values_scale * input_mask_float
count_result = tf.reduce_sum(scaled_probability_per_cell, axis=1)
numeric_values_masked = tf.where(tf.math.is_nan(numeric_values), tf.zeros_like(numeric_values), numeric_values)
sum_result = tf.reduce_sum(scaled_probability_per_cell * numeric_values_masked, axis=1)
avg_approximation = config.average_approximation_function
if avg_approximation == AverageApproximationFunction.RATIO:
average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION)
elif avg_approximation == AverageApproximationFunction.FIRST_ORDER:
ex = tf.reduce_sum(scaled_probability_per_cell, axis=1, keepdims=True) - scaled_probability_per_cell + 1
average_result = tf.reduce_sum(numeric_values_masked * scaled_probability_per_cell / ex, axis=1)
elif avg_approximation == AverageApproximationFunction.SECOND_ORDER:
ex = tf.reduce_sum(scaled_probability_per_cell, axis=1, keepdims=True) - scaled_probability_per_cell + 1
pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell)
var = tf.reduce_sum(pointwise_var, axis=1, keepdims=True) - pointwise_var
multiplier = (var / tf.math.square(ex) + 1) / ex
average_result = tf.reduce_sum(numeric_values_masked * scaled_probability_per_cell * multiplier, axis=1)
else:
raise ValueError('Invalid average_approximation_function: %s', config.average_approximation_function)
if config.use_gumbel_for_aggregation:
gumbel_dist = tfp.distributions.RelaxedOneHotCategorical(config.aggregation_temperature, logits=logits_aggregation[:, 1:])
aggregation_op_only_probs = gumbel_dist.sample()
else:
aggregation_op_only_probs = stable_softmax(logits_aggregation[:, 1:] / config.aggregation_temperature, axis=-1)
all_results = tf.concat([tf.expand_dims(sum_result, axis=1), tf.expand_dims(average_result, axis=1), tf.expand_dims(count_result, axis=1)], axis=1)
expected_result = tf.reduce_sum(all_results * aggregation_op_only_probs, axis=1)
return expected_result
|
Calculates the expected result given cell and aggregation probabilities.
Args:
dist_per_cell (`tfp.distributions.Bernoulli`):
Cell selection distribution for each cell.
numeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`):
Numeric values of every token. Nan for tokens which are not numeric values.
numeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`):
Scale of the numeric values of every token.
input_mask_float (`tf.Tensor` of shape `(batch_size, seq_length)`):
Mask for the table, without question tokens and table headers.
logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`):
Logits per aggregation operation.
config ([`TapasConfig`]):
Model configuration class with all the hyperparameters of the model
Returns:
expected_result (`tf.Tensor` of shape `(batch_size,)`): The expected result per example.
|
github-repos
|
def load_resource(resource_url: str, forceupdate: bool=False):
log.info(f'Loading resource {resource_url}')
try:
fo = bel.utils.download_file(resource_url)
if (not fo):
log.error(f'Could not download and open file {resource_url}')
return 'Failed to download resource_url'
fo.seek(0)
with gzip.open(fo, 'rt') as f:
metadata = json.loads(f.__next__())
if ('metadata' not in metadata):
log.error(f'Missing metadata entry for {resource_url}')
return 'Cannot load resource file - missing metadata object in first line of file'
if (metadata['metadata']['type'] == 'namespace'):
bel.resources.namespace.load_terms(fo, metadata, forceupdate)
elif (metadata['metadata']['type'] == 'ortholog'):
bel.resources.ortholog.load_orthologs(fo, metadata)
finally:
fo.close()
|
Load BEL Resource file
Forceupdate will create a new index in Elasticsearch regardless of whether
an index with the resource version already exists.
Args:
resource_url: URL from which to download the resource to load into the BEL API
forceupdate: force full update - e.g. don't leave Elasticsearch indexes alone if their version ID matches
|
codesearchnet
|
def repository_contributors(self, **kwargs):
path = ('/projects/%s/repository/contributors' % self.get_id())
return self.manager.gitlab.http_list(path, **kwargs)
|
Return a list of contributors for the project.
Args:
all (bool): If True, return all the items, without pagination
per_page (int): Number of items to retrieve per request
page (int): ID of the page to return (starts with page 1)
as_list (bool): If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server failed to perform the request
Returns:
list: The contributors
|
codesearchnet
|
def parse(cls, data: bytes) -> 'MessageContent':
lines = cls._find_lines(data)
view = memoryview(data)
return cls._parse(data, view, lines)
|
Parse the bytestring into message content.
Args:
data: The bytestring to parse.
|
codesearchnet
|
def convert_bytes_to_c_source(data, array_name, max_line_width=80, include_guard=None, include_path=None, use_tensorflow_license=False):
starting_pad = ' '
array_lines = []
array_line = starting_pad
for value in bytearray(data):
if len(array_line) + 4 > max_line_width:
array_lines.append(array_line + '\n')
array_line = starting_pad
array_line += ' 0x%02x,' % (value,)
if len(array_line) > len(starting_pad):
array_lines.append(array_line + '\n')
array_values = ''.join(array_lines)
if include_guard is None:
include_guard = 'TENSORFLOW_LITE_UTIL_' + array_name.upper() + '_DATA_H_'
if include_path is not None:
include_line = '
else:
include_line = ''
if use_tensorflow_license:
license_text = '\n/* Copyright {year} The TensorFlow Authors. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the "License");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:
else:
license_text = ''
source_template = "{license_text}\n
source_text = source_template.format(array_name=array_name, array_length=len(data), array_values=array_values, license_text=license_text, include_line=include_line)
header_template = "\n{license_text}\n\n
header_text = header_template.format(array_name=array_name, include_guard=include_guard, license_text=license_text)
return (source_text, header_text)
|
Returns strings representing a C constant array containing `data`.
Args:
data: Byte array that will be converted into a C constant.
array_name: String to use as the variable name for the constant array.
max_line_width: The longest line length, for formatting purposes.
include_guard: Name to use for the include guard macro definition.
include_path: Optional path to include in the source file.
use_tensorflow_license: Whether to include the standard TensorFlow Apache2
license in the generated files.
Returns:
Text that can be compiled as a C source file to link in the data as a
literal array of values.
Text that can be used as a C header file to reference the literal array.
|
github-repos
|
def _MergeTaskStorage(self, storage_writer):
if self._processing_profiler:
self._processing_profiler.StartTiming('merge_check')
for task_identifier in storage_writer.GetProcessedTaskIdentifiers():
try:
task = self._task_manager.GetProcessedTaskByIdentifier(task_identifier)
self._task_manager.SampleTaskStatus(task, 'processed')
to_merge = self._task_manager.CheckTaskToMerge(task)
if (not to_merge):
storage_writer.RemoveProcessedTaskStorage(task)
self._task_manager.RemoveTask(task)
self._task_manager.SampleTaskStatus(task, 'removed_processed')
else:
storage_writer.PrepareMergeTaskStorage(task)
self._task_manager.UpdateTaskAsPendingMerge(task)
except KeyError:
logger.error('Unable to retrieve task: {0:s} to prepare it to be merged.'.format(task_identifier))
continue
if self._processing_profiler:
self._processing_profiler.StopTiming('merge_check')
task = None
if (not self._storage_merge_reader_on_hold):
task = self._task_manager.GetTaskPendingMerge(self._merge_task)
if (task or self._storage_merge_reader):
self._status = definitions.STATUS_INDICATOR_MERGING
if self._processing_profiler:
self._processing_profiler.StartTiming('merge')
if task:
if self._storage_merge_reader:
self._merge_task_on_hold = self._merge_task
self._storage_merge_reader_on_hold = self._storage_merge_reader
self._task_manager.SampleTaskStatus(self._merge_task_on_hold, 'merge_on_hold')
self._merge_task = task
try:
self._storage_merge_reader = storage_writer.StartMergeTaskStorage(task)
self._task_manager.SampleTaskStatus(task, 'merge_started')
except IOError as exception:
logger.error('Unable to merge results of task: {0:s} with error: {1!s}'.format(task.identifier, exception))
self._storage_merge_reader = None
if self._storage_merge_reader:
fully_merged = self._storage_merge_reader.MergeAttributeContainers(maximum_number_of_containers=self._MAXIMUM_NUMBER_OF_CONTAINERS)
else:
fully_merged = True
if self._processing_profiler:
self._processing_profiler.StopTiming('merge')
if fully_merged:
try:
self._task_manager.CompleteTask(self._merge_task)
except KeyError as exception:
logger.error('Unable to complete task: {0:s} with error: {1!s}'.format(self._merge_task.identifier, exception))
if (not self._storage_merge_reader_on_hold):
self._merge_task = None
self._storage_merge_reader = None
else:
self._merge_task = self._merge_task_on_hold
self._storage_merge_reader = self._storage_merge_reader_on_hold
self._merge_task_on_hold = None
self._storage_merge_reader_on_hold = None
self._task_manager.SampleTaskStatus(self._merge_task, 'merge_resumed')
self._status = definitions.STATUS_INDICATOR_RUNNING
self._number_of_produced_events = storage_writer.number_of_events
self._number_of_produced_sources = storage_writer.number_of_event_sources
self._number_of_produced_warnings = storage_writer.number_of_warnings
|
Merges a task storage with the session storage.
This function checks all task stores that are ready to merge and updates
the scheduled tasks. Note that to prevent this function holding up
the task scheduling loop only the first available task storage is merged.
Args:
storage_writer (StorageWriter): storage writer for a session storage used
to merge task storage.
|
codesearchnet
|
def _create_formatters(self, instrumentation_block, new_state):
formatters = []
if self._previous_block_never_completed(current_block=instrumentation_block, previous_block=instrumentation_block.previous_instrumentation_block, new_state=new_state):
instrumentation_block.previous_instrumentation_block.set_error_message(self.DEFAULT_INSTRUMENTATION_ERROR_MESSAGE)
formatters.append(_InstrumentationBlockFormatter(instrumentation_block.previous_instrumentation_block))
if not instrumentation_block.is_empty:
formatters.append(_InstrumentationBlockFormatter(instrumentation_block))
return formatters
|
Creates the _InstrumentationBlockFormatters for outputting the
instrumentation method block that have finished parsing.
Args:
instrumentation_block: _InstrumentationBlock, the current
instrumentation method block to create formatters based upon.
new_state: _InstrumentationBlockState, the next state that the
parser will transition to.
Returns:
A list of the formatters tha need to create and add
TestResultRecords to the test results.
|
github-repos
|
def delta_E( self ):
site_delta_E = self.final_site.energy - self.initial_site.energy
if self.nearest_neighbour_energy:
site_delta_E += self.nearest_neighbour_delta_E()
if self.coordination_number_energy:
site_delta_E += self.coordination_number_delta_E()
return site_delta_E
|
The change in system energy if this jump were accepted.
Args:
None
Returns:
(Float): delta E
|
juraj-google-style
|
def create_bulk(self, resource, timeout=(- 1)):
uri = (self.URI + '/bulk')
default_values = self._get_default_values(self.BULK_DEFAULT_VALUES)
updated_data = self._helper.update_resource_fields(resource, default_values)
self._helper.create(updated_data, uri=uri, timeout=timeout)
return self.get_range(resource['namePrefix'], resource['vlanIdRange'])
|
Creates bulk Ethernet networks.
Args:
resource (dict): Specifications to create in bulk.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
list: List of created Ethernet Networks.
|
codesearchnet
|
def set_y_grid_info(self, y_low, y_high, num_y, yscale, yval_name):
self._set_grid_info('y', y_low, y_high, num_y, yscale, yval_name)
return
|
Set the grid values for y.
Create information for the grid of y values.
Args:
num_y (int): Number of points on axis.
y_low/y_high (float): Lowest/highest value for the axis.
yscale (str): Scale of the axis. Choices are 'log' or 'lin'.
yval_name (str): Name representing the axis. See GenerateContainer documentation
for options for the name.
|
codesearchnet
|
def Wget(src_url, tgt_name, tgt_root=None):
if tgt_root is None:
tgt_root = str(CFG["tmp_dir"])
from benchbuild.utils.cmd import wget
tgt_file = local.path(tgt_root) / tgt_name
if not source_required(tgt_file):
Copy(tgt_file, ".")
return
wget(src_url, "-O", tgt_file)
update_hash(tgt_file)
Copy(tgt_file, ".")
|
Download url, if required.
Args:
src_url (str): Our SOURCE url.
tgt_name (str): The filename we want to have on disk.
tgt_root (str): The TARGET directory for the download.
Defaults to ``CFG["tmpdir"]``.
|
juraj-google-style
|
def length(self, rows=None):
rows = tf.range(self._capacity) if rows is None else rows
return tf.gather(self._length, rows)
|
Tensor holding the current length of episodes.
Args:
rows: Episodes to select length from, defaults to all.
Returns:
Batch tensor of sequence lengths.
|
juraj-google-style
|
def make_scheduler(self, **kwargs):
from .launcher import PyFlowScheduler
if not kwargs:
sched = PyFlowScheduler.from_user_config()
else:
filepath = kwargs.pop("filepath", None)
if filepath is not None:
assert not kwargs
sched = PyFlowScheduler.from_file(filepath)
else:
sched = PyFlowScheduler(**kwargs)
sched.add_flow(self)
return sched
|
Build a return a :class:`PyFlowScheduler` to run the flow.
Args:
kwargs: if empty we use the user configuration file.
if `filepath` in kwargs we init the scheduler from filepath.
else pass **kwargs to :class:`PyFlowScheduler` __init__ method.
|
juraj-google-style
|
def run_conditional_decorators(self, context):
logger.debug("starting")
run_me = context.get_formatted_as_type(self.run_me, out_type=bool)
skip_me = context.get_formatted_as_type(self.skip_me, out_type=bool)
swallow_me = context.get_formatted_as_type(self.swallow_me,
out_type=bool)
if run_me:
if not skip_me:
try:
if self.retry_decorator:
self.retry_decorator.retry_loop(context,
self.invoke_step)
else:
self.invoke_step(context=context)
except Exception as ex_info:
if swallow_me:
logger.error(
f"{self.name} Ignoring error because swallow "
"is True for this step.\n"
f"{type(ex_info).__name__}: {ex_info}")
else:
raise
else:
logger.info(
f"{self.name} not running because skip is True.")
else:
logger.info(f"{self.name} not running because run is False.")
logger.debug("done")
|
Evaluate the step decorators to decide whether to run step or not.
Use pypyr.dsl.Step.run_step if you intend on executing the step the
same way pypyr does.
Args:
context: (pypyr.context.Context) The pypyr context. This arg will
mutate.
|
juraj-google-style
|
def num_mode_groups(self):
num = self._libinput.libinput_device_tablet_pad_get_num_mode_groups(self._handle)
if (num < 0):
raise AttributeError('This device is not a tablet pad device')
return num
|
Most devices only provide a single mode group, however devices
such as the Wacom Cintiq 22HD provide two mode groups.
If multiple mode groups are available, a caller should use
:meth:`~libinput.define.TabletPadModeGroup.has_button`,
:meth:`~libinput.define.TabletPadModeGroup.has_ring`
and :meth:`~libinput.define.TabletPadModeGroup.has_strip` to associate
each button, ring and strip with the correct mode group.
Returns:
int: The number of mode groups available on this device.
Raises:
AttributeError
|
codesearchnet
|
def validate_config_value(value, possible_values):
if value not in possible_values:
raise Exception('Invalid config value "%s". Possible values are '
'%s' % (value, ', '.join(e for e in possible_values)))
|
Validate a config value to make sure it is one of the possible values.
Args:
value: the config value to validate.
possible_values: the possible values the value can be
Raises:
Exception if the value is not one of possible values.
|
juraj-google-style
|
def __init__(self, key_path):
super(WindowsRegistryKeyPathFilter, self).__init__()
key_path.rstrip('\\')
self._key_path = key_path
key_path = key_path.upper()
self._key_path_upper = key_path
self._wow64_key_path = None
self._wow64_key_path_upper = None
if key_path.startswith(self._CONTROL_SET_PREFIX.upper()):
self._key_path_prefix, _, self._key_path_suffix = key_path.partition(
'CurrentControlSet'.upper())
else:
self._key_path_prefix = None
self._key_path_suffix = None
wow64_prefix = None
for key_path_prefix in self._WOW64_PREFIXES:
if key_path.startswith(key_path_prefix.upper()):
wow64_prefix = key_path_prefix
break
if wow64_prefix:
key_path_suffix = self._key_path[len(wow64_prefix):]
if key_path_suffix.startswith('\\'):
key_path_suffix = key_path_suffix[1:]
self._wow64_key_path = '\\'.join([
wow64_prefix, 'Wow6432Node', key_path_suffix])
self._wow64_key_path_upper = self._wow64_key_path.upper()
|
Initializes a Windows Registry key filter.
Args:
key_path (str): key path.
|
juraj-google-style
|
def replace_composites_with_components(structure):
if isinstance(structure, CompositeTensor):
return replace_composites_with_components(structure._type_spec._to_components(structure))
elif not nest.is_nested(structure):
return structure
else:
return nest.map_structure(replace_composites_with_components, structure, expand_composites=False)
|
Recursively replaces CompositeTensors with their components.
Args:
structure: A `nest`-compatible structure, possibly containing composite
tensors.
Returns:
A copy of `structure`, where each composite tensor has been replaced by
its components. The result will contain no composite tensors.
Note that `nest.flatten(replace_composites_with_components(structure))`
returns the same value as `nest.flatten(structure)`.
|
github-repos
|
def _get_localized_fn(path, root_dir):
local_fn = path
if path.startswith(root_dir):
local_fn = path.replace(root_dir, '', 1)
if (not local_fn.startswith('/')):
return ('/' + local_fn)
return local_fn
|
Return absolute `path` relative to `root_dir`.
When `path` == ``/home/xex/somefile.txt`` and `root_dir` == ``/home``,
returned path will be ``/xex/somefile.txt``.
Args:
path (str): Absolute path beginning in `root_dir`.
root_dir (str): Absolute path containing `path` argument.
Returns:
str: Local `path` when `root_dir` is considered as root of FS.
|
codesearchnet
|
def _PrintProcessingTime(self, processing_status):
if not processing_status:
processing_time = '00:00:00'
else:
processing_time = time.time() - processing_status.start_time
time_struct = time.gmtime(processing_time)
processing_time = time.strftime('%H:%M:%S', time_struct)
self._output_writer.Write(
'Processing time\t\t: {0:s}\n'.format(processing_time))
|
Prints the processing time.
Args:
processing_status (ProcessingStatus): processing status.
|
juraj-google-style
|
def get_graphs(self, run_key, debug=False):
graph_dict = (self._run_key_to_debug_graphs if debug else self._run_key_to_original_graphs)
graph_wrappers = graph_dict.get(run_key, {})
graph_defs = dict()
for (device_name, wrapper) in graph_wrappers.items():
graph_defs[device_name] = wrapper.graph_def
return graph_defs
|
Get the runtime GraphDef protos associated with a run key.
Args:
run_key: A Session.run kay.
debug: Whether the debugger-decoratedgraph is to be retrieved.
Returns:
A `dict` mapping device name to `GraphDef` protos.
|
codesearchnet
|
def get_all_existing(self, server_group):
self.log.info('Checking for existing scaling policy')
url = '{0}/applications/{1}/clusters/{2}/{1}/serverGroups'.format(API_URL, self.app, self.env)
response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
assert response.ok, 'Error looking for existing Autoscaling Policy for {0}: {1}'.format(self.app, response.text)
scalingpolicies = []
for servergroup in response.json():
if (servergroup['scalingPolicies'] and (servergroup['asg']['autoScalingGroupName'] == server_group)):
self.log.info('Found policies on %s', server_group)
scalingpolicies.append(servergroup['scalingPolicies'])
self.log.debug('Scaling policies: %s', scalingpolicies)
return scalingpolicies
|
Finds all existing scaling policies for an application
Returns:
scalingpolicies (list): List of all existing scaling policies for the application
|
codesearchnet
|
def create_variable(self, feature_column, name, shape, dtype=None, trainable=True, use_resource=True, initializer=None):
del feature_column, name, shape, dtype, trainable, use_resource, initializer
raise NotImplementedError('StateManager.create_variable')
|
Creates a new variable.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
trainable: Whether this variable is trainable or not.
use_resource: If true, we use resource variables. Otherwise we use
RefVariable.
initializer: initializer instance (callable).
Returns:
The created variable.
|
github-repos
|
def _update_dict(self, to_dict, from_dict):
for (key, value) in from_dict.items():
if ((key in to_dict) and isinstance(to_dict[key], dict) and isinstance(from_dict[key], dict)):
self._update_dict(to_dict[key], from_dict[key])
else:
to_dict[key] = from_dict[key]
|
Recursively merges the fields for two dictionaries.
Args:
to_dict (dict): The dictionary onto which the merge is executed.
from_dict (dict): The dictionary merged into to_dict
|
codesearchnet
|
def __init__(self, num_agents, observation_spec, action_spec):
self._num_agents = num_agents
self._observation_spec = observation_spec
self._action_spec = action_spec
self._episode_steps = 0
self.next_timestep = [
environment.TimeStep(
step_type=environment.StepType.MID,
reward=0.,
discount=1.,
observation=self._default_observation(obs_spec, agent_index))
for agent_index, obs_spec in enumerate(observation_spec)]
self.episode_length = float('inf')
|
Initializes the TestEnvironment.
The `next_observation` is initialized to be reward = 0., discount = 1.,
and an appropriately sized observation of all zeros. `episode_length` is set
to `float('inf')`.
Args:
num_agents: The number of agents.
observation_spec: The observation specs for each player.
action_spec: The action specs for each player.
|
juraj-google-style
|
def write_uint64(self, value, little_endian=True):
if little_endian:
endian = '<'
else:
endian = '>'
return self.pack(('%sQ' % endian), value)
|
Pack the value as an unsigned integer and write 8 bytes to the stream.
Args:
value:
little_endian (bool): specify the endianness. (Default) Little endian.
Returns:
int: the number of bytes written.
|
codesearchnet
|
def end_of_chunk(prev_tag, tag, prev_type, type_):
chunk_end = False
if prev_tag == 'E': chunk_end = True
if prev_tag == 'S': chunk_end = True
if prev_tag == 'B' and tag == 'B': chunk_end = True
if prev_tag == 'B' and tag == 'S': chunk_end = True
if prev_tag == 'B' and tag == 'O': chunk_end = True
if prev_tag == 'I' and tag == 'B': chunk_end = True
if prev_tag == 'I' and tag == 'S': chunk_end = True
if prev_tag == 'I' and tag == 'O': chunk_end = True
if prev_tag != 'O' and prev_tag != '.' and prev_type != type_:
chunk_end = True
return chunk_end
|
Checks if a chunk ended between the previous and current word.
Args:
prev_tag: previous chunk tag.
tag: current chunk tag.
prev_type: previous type.
type_: current type.
Returns:
chunk_end: boolean.
|
juraj-google-style
|
def ParseFileEntry(self, parser_mediator, file_entry):
index_file_parser = ChromeCacheIndexFileParser()
file_object = file_entry.GetFileObject()
try:
index_file_parser.ParseFileObject(parser_mediator, file_object)
except (IOError, errors.ParseError) as exception:
file_object.close()
display_name = parser_mediator.GetDisplayName()
raise errors.UnableToParseFile('[{0:s}] unable to parse index file {1:s} with error: {2!s}'.format(self.NAME, display_name, exception))
try:
file_system = file_entry.GetFileSystem()
self._ParseIndexTable(parser_mediator, file_system, file_entry, index_file_parser.index_table)
finally:
file_object.close()
|
Parses Chrome Cache files.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_entry (dfvfs.FileEntry): file entry.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
codesearchnet
|
def python_value(self, value):
value = super(ArrowDateTimeField, self).python_value(value)
if isinstance(value, (datetime.datetime, datetime.date, string_types)):
return arrow.get(value)
return value
|
Return the value in the data base as an arrow object.
Returns:
arrow.Arrow: An instance of arrow with the field filled in.
|
codesearchnet
|
def recipe_sheets_clear(config, auth_read, sheets_sheet, sheets_tab, sheets_range):
sheets(config, {'auth': auth_read, 'sheet': sheets_sheet, 'tab': sheets_tab, 'range': sheets_range, 'clear': True})
|
Clear data from a sheet.
Args:
auth_read (authentication) - Credentials used for reading data.
sheets_sheet (string) - NA
sheets_tab (string) - NA
sheets_range (string) - NA
|
github-repos
|
def size(self, path):
try:
return os.path.getsize(path)
except Exception as e:
raise BeamIOError('Size operation failed', {path: e})
|
Get size of path on the FileSystem.
Args:
path: string path in question.
Returns: int size of path according to the FileSystem.
Raises:
``BeamIOError``: if path doesn't exist.
|
github-repos
|
async def _on_trace_notification(self, trace_event):
conn_string = trace_event.get('connection_string')
payload = trace_event.get('payload')
await self.notify_event(conn_string, 'trace', payload)
|
Callback function called when a trace chunk is received.
Args:
trace_chunk (dict): The received trace chunk information
|
juraj-google-style
|
def _expand_terms(self, terms):
ret = {
'keywords': list(),
'doc': list()}
if not isinstance(terms, dict):
stp = SearchTermParser()
terms = stp.parse(terms, term_join=self.backend._and_join)
if 'about' in terms:
ret['doc'].append(terms['about'])
if 'source' in terms:
ret['keywords'].append(terms['source'])
return ret
|
Expands terms of the dataset to the appropriate fields. It will parse the search phrase
and return only the search term components that are applicable to a Dataset query.
Args:
terms (dict or str):
Returns:
dict: keys are field names, values are query strings
|
juraj-google-style
|
def tf_baseline_loss(self, states, internals, reward, update, reference=None):
if self.baseline_mode == 'states':
loss = self.baseline.loss(
states=states,
internals=internals,
reward=reward,
update=update,
reference=reference
)
elif self.baseline_mode == 'network':
loss = self.baseline.loss(
states=self.network.apply(x=states, internals=internals, update=update),
internals=internals,
reward=reward,
update=update,
reference=reference
)
regularization_loss = self.baseline.regularization_loss()
if regularization_loss is not None:
loss += regularization_loss
return loss
|
Creates the TensorFlow operations for calculating the baseline loss of a batch.
Args:
states: Dict of state tensors.
internals: List of prior internal state tensors.
reward: Reward tensor.
update: Boolean tensor indicating whether this call happens during an update.
reference: Optional reference tensor(s), in case of a comparative loss.
Returns:
Loss tensor.
|
juraj-google-style
|
def setModelData(self, spinBox, model, index):
spinBox.interpretText()
value = spinBox.value()
model.setData(index, value, QtCore.Qt.EditRole)
|
Gets data from the editor widget and stores it in the specified model at the item index.
Args:
spinBox (QDoubleSpinBox): editor widget.
model (QAbstractItemModel): parent model.
index (QModelIndex): model data index.
|
juraj-google-style
|
def _CheckIsDirectory(self, file_entry):
if definitions.FILE_ENTRY_TYPE_DIRECTORY not in self._file_entry_types:
return False
return file_entry.IsDirectory()
|
Checks the is_directory find specification.
Args:
file_entry (FileEntry): file entry.
Returns:
bool: True if the file entry matches the find specification, False if not.
|
juraj-google-style
|
def sin(cls, x: 'TensorFluent') -> 'TensorFluent':
return cls._unary_op(x, tf.sin, tf.float32)
|
Returns a TensorFluent for the sin function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the sin function.
|
codesearchnet
|
def grid_deploy(site, nodes, options):
gk = get_api_client()
environment = options.pop("env_name")
options.update(environment=environment)
options.update(nodes=nodes)
key_path = DEFAULT_SSH_KEYFILE
options.update(key=key_path.read_text())
logger.info("Deploying %s with options %s" % (nodes, options))
deployment = gk.sites[site].deployments.create(options)
while deployment.status not in ["terminated", "error"]:
deployment.refresh()
print("Waiting for the end of deployment [%s]" % deployment.uid)
time.sleep(10)
deploy = []
undeploy = []
if deployment.status == "terminated":
deploy = [node for node, v in deployment.result.items()
if v["state"] == "OK"]
undeploy = [node for node, v in deployment.result.items()
if v["state"] == "KO"]
elif deployment.status == "error":
undeploy = nodes
return deploy, undeploy
|
Deploy and wait for the deployment to be finished.
Args:
site(str): the site
nodes(list): list of nodes (str) to depoy
options(dict): option of the deployment (refer to the Grid'5000 API
Specifications)
Returns:
tuple of deployed(list), undeployed(list) nodes.
|
juraj-google-style
|
def testBroadcastDimension(self, axis, row_length, original_dim_sizes, broadcast_dim_sizes):
original_shape = RaggedTensorDynamicShape.from_dim_sizes(original_dim_sizes)
bcast_shape = RaggedTensorDynamicShape.from_dim_sizes(broadcast_dim_sizes)
self.assertEqual(original_shape.rank, bcast_shape.rank)
bcast1 = original_shape.broadcast_dimension(axis, row_length)
bcast2 = bcast_shape.broadcast_dimension(axis, row_length)
bcast3 = bcast_shape.broadcast_dimension(axis, 1)
self.assertShapeEq(bcast1, bcast_shape)
self.assertShapeEq(bcast2, bcast_shape)
self.assertShapeEq(bcast3, bcast_shape)
|
Tests for the broadcast_dimension method.
Verifies that:
* `original.broadcast_dimension(axis, row_length) == broadcast`
* `broadcast.broadcast_dimension(axis, row_length) == broadcast`
* `broadcast.broadcast_dimension(axis, 1) == broadcast`
Args:
axis: The axis to broadcast
row_length: The slice lengths to broadcast to.
original_dim_sizes: The dimension sizes before broadcasting.
original_dim_sizes[axis] should be equal to `1` or `row_length`.
broadcast_dim_sizes: THe dimension sizes after broadcasting.
|
github-repos
|
def __init__(self, granularity: Granularity) -> None:
super().__init__()
self.chunks = ['']
self.row = 0
self.col = 0
self.current_word = ''
self.on_split_row = False
self.granularity = granularity
|
Initializes the HTML parser for the KNBC corpus.
Args:
granularity: Granularity of the output chunks.
|
github-repos
|
def get_tensor_by_name(self, name) -> tensor_lib.Tensor:
if not isinstance(name, str):
raise TypeError('Tensor names are strings (or similar), not %s.' % type(name).__name__)
tensor = cast(tensor_lib.Tensor, self.as_graph_element(name, allow_tensor=True, allow_operation=False))
return tensor
|
Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Tensor` to return.
Returns:
The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to a tensor in this graph.
|
github-repos
|
def columns(self, dimensions=None):
if dimensions is None:
dimensions = self.dimensions()
else:
dimensions = [self.get_dimension(d, strict=True) for d in dimensions]
return OrderedDict([(d.name, self.dimension_values(d)) for d in dimensions])
|
Convert dimension values to a dictionary.
Returns a dictionary of column arrays along each dimension
of the element.
Args:
dimensions: Dimensions to return as columns
Returns:
Dictionary of arrays for each dimension
|
juraj-google-style
|
def encode(self, s):
try:
import matplotlib.image as im
except ImportError as e:
tf.logging.warning(
"Reading an image requires matplotlib to be installed: %s", e)
raise NotImplementedError("Image reading not implemented.")
return im.imread(s)
|
Transform a string with a filename into a list of RGB integers.
Args:
s: path to the file with an image.
Returns:
ids: list of integers
|
juraj-google-style
|
def acos(cls, x: 'TensorFluent') -> 'TensorFluent':
return cls._unary_op(x, tf.acos, tf.float32)
|
Returns a TensorFluent for the arccos function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the arccos function.
|
codesearchnet
|
def __fa_process_sequence(self, sequence, avoid, initial_state, execution_state, trace_current, next_addr):
ip = sequence.address
next_ip = None
while ip:
try:
instr = sequence.fetch(ip)
except ReilSequenceInvalidAddressError:
assert split_address(ip)[1] == 0x0
next_ip = ip
break
try:
target_addr = sequence.get_next_address(ip)
except ReilSequenceInvalidAddressError:
target_addr = next_addr
next_ip = self.__process_instr(instr, avoid, target_addr, initial_state, execution_state, trace_current)
try:
ip = next_ip if next_ip else sequence.get_next_address(ip)
except ReilSequenceInvalidAddressError:
break
return next_ip
|
Process a REIL sequence.
Args:
sequence (ReilSequence): A REIL sequence to process.
avoid (list): List of address to avoid.
initial_state: Initial state.
execution_state: Execution state queue.
trace_current (list): Current trace.
next_addr: Address of the next instruction following the current one.
Returns:
Returns the next instruction to execute in case there is one, otherwise returns None.
|
juraj-google-style
|
def ParseTable(table):
precondition.AssertIterableType(table, dict)
result = rdf_osquery.OsqueryTable()
result.header = ParseHeader(table)
for row in table:
result.rows.append(ParseRow(result.header, row))
return result
|
Parses table of osquery output.
Args:
table: A table in a "parsed JSON" representation.
Returns:
A parsed `rdf_osquery.OsqueryTable` instance.
|
juraj-google-style
|
def parse_statement(self, statement, orig_contents):
children = []
is_block = False
name = statement.getName()
if name == 'block':
children_statements = statement[1]
for child in children_statements:
parsed = self.parse_statement(child, orig_contents=orig_contents)
children.append(parsed)
locn = statement[0]['location']
statement = statement[0][1]
name = statement.getName()
is_block = True
else:
stmt_language = get_statement()
locn = statement['location']
statement = statement['match']
statement_string = str(u"".join(statement.asList()))
try:
statement = stmt_language.parseString(statement_string)[0]
except (pyparsing.ParseException, pyparsing.ParseSyntaxException) as exc:
raise SensorGraphSyntaxError("Error parsing statement in sensor graph file", message=exc.msg, line=pyparsing.line(locn, orig_contents).strip(), line_number=pyparsing.lineno(locn, orig_contents), column=pyparsing.col(locn, orig_contents))
except SensorGraphSemanticError as exc:
raise SensorGraphSemanticError(exc.msg, line=pyparsing.line(locn, orig_contents).strip(), line_number=pyparsing.lineno(locn, orig_contents), **exc.params)
name = statement.getName()
if name not in statement_map:
raise ArgumentError("Unknown statement in sensor graph file", parsed_statement=statement, name=name)
line = pyparsing.line(locn, orig_contents).strip()
line_number = pyparsing.lineno(locn, orig_contents)
column = pyparsing.col(locn, orig_contents)
location_info = LocationInfo(line, line_number, column)
if is_block:
return statement_map[name](statement, children=children, location=location_info)
return statement_map[name](statement, location_info)
|
Parse a statement, possibly called recursively.
Args:
statement (int, ParseResult): The pyparsing parse result that
contains one statement prepended with the match location
orig_contents (str): The original contents of the file that we're
parsing in case we need to convert an index into a line, column
pair.
Returns:
SensorGraphStatement: The parsed statement.
|
juraj-google-style
|
def from_dict(cls, config_dict: dict[str, Any], **kwargs) -> 'PretrainedConfig':
return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)
kwargs.pop('_from_auto', None)
kwargs.pop('_from_pipeline', None)
if '_commit_hash' in kwargs and '_commit_hash' in config_dict:
kwargs['_commit_hash'] = config_dict['_commit_hash']
config_dict['attn_implementation'] = kwargs.pop('attn_implementation', None)
config = cls(**config_dict)
if hasattr(config, 'pruned_heads'):
config.pruned_heads = {int(key): value for key, value in config.pruned_heads.items()}
if 'num_labels' in kwargs and 'id2label' in kwargs:
num_labels = kwargs['num_labels']
id2label = kwargs['id2label'] if kwargs['id2label'] is not None else []
if len(id2label) != num_labels:
raise ValueError(f'You passed along `num_labels={num_labels}` with an incompatible id to label map: {kwargs['id2label']}. Since those arguments are inconsistent with each other, you should remove one of them.')
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
current_attr = getattr(config, key)
if isinstance(current_attr, PretrainedConfig) and isinstance(value, dict):
value = current_attr.__class__(**value)
setattr(config, key, value)
if key != 'torch_dtype':
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info(f'Model config {config}')
if return_unused_kwargs:
return (config, kwargs)
else:
return config
|
Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters.
Args:
config_dict (`Dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method.
kwargs (`Dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
[`PretrainedConfig`]: The configuration object instantiated from those parameters.
|
github-repos
|
def numeric_task_id(task_id):
if task_id is not None:
if task_id.startswith('task-'):
return int(task_id[len('task-'):])
else:
return int(task_id)
|
Converts a task-id to the numeric task-id.
Args:
task_id: task-id in either task-n or n format
Returns:
n
|
juraj-google-style
|
def _get_fans(shape):
r
if len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
elif len(shape) == 4 or len(shape) == 5:
kernel_size = np.prod(shape[:2])
fan_in = shape[-2] * kernel_size
fan_out = shape[-1] * kernel_size
else:
fan_in = np.sqrt(np.prod(shape))
fan_out = np.sqrt(np.prod(shape))
return fan_in, fan_out
|
r"""Returns the size of input dimension and output dimension, given `shape`.
Args:
shape: A list of integers.
Returns:
fan_in: An int. The value of input dimension.
fan_out: An int. The value of output dimension.
|
juraj-google-style
|
def upsert_run(self, id=None, name=None, project=None, host=None, group=None, tags=None, config=None, description=None, entity=None, state=None, repo=None, job_type=None, program_path=None, commit=None, sweep_name=None, summary_metrics=None, num_retries=None):
mutation = gql('\n mutation UpsertBucket(\n $id: String, $name: String,\n $project: String,\n $entity: String!,\n $groupName: String,\n $description: String,\n $commit: String,\n $config: JSONString,\n $host: String,\n $debug: Boolean,\n $program: String,\n $repo: String,\n $jobType: String,\n $state: String,\n $sweep: String,\n $tags: [String!],\n $summaryMetrics: JSONString,\n ) {\n upsertBucket(input: {\n id: $id,\n name: $name,\n groupName: $groupName,\n modelName: $project,\n entityName: $entity,\n description: $description,\n config: $config,\n commit: $commit,\n host: $host,\n debug: $debug,\n jobProgram: $program,\n jobRepo: $repo,\n jobType: $jobType,\n state: $state,\n sweep: $sweep,\n tags: $tags,\n summaryMetrics: $summaryMetrics,\n }) {\n bucket {\n id\n name\n description\n config\n project {\n id\n name\n entity {\n id\n name\n }\n }\n }\n }\n }\n ')
if (config is not None):
config = json.dumps(config)
if (not description):
description = None
kwargs = {}
if (num_retries is not None):
kwargs['num_retries'] = num_retries
variable_values = {'id': id, 'entity': (entity or self.settings('entity')), 'name': name, 'project': project, 'groupName': group, 'tags': tags, 'description': description, 'config': config, 'commit': commit, 'host': host, 'debug': env.is_debug(), 'repo': repo, 'program': program_path, 'jobType': job_type, 'state': state, 'sweep': sweep_name, 'summaryMetrics': summary_metrics}
response = self.gql(mutation, variable_values=variable_values, **kwargs)
run = response['upsertBucket']['bucket']
project = run.get('project')
if project:
self.set_setting('project', project['name'])
entity = project.get('entity')
if entity:
self.set_setting('entity', entity['name'])
return response['upsertBucket']['bucket']
|
Update a run
Args:
id (str, optional): The existing run to update
name (str, optional): The name of the run to create
group (str, optional): Name of the group this run is a part of
project (str, optional): The name of the project
config (dict, optional): The latest config params
description (str, optional): A description of this project
entity (str, optional): The entity to scope this project to.
repo (str, optional): Url of the program's repository.
state (str, optional): State of the program.
job_type (str, optional): Type of job, e.g 'train'.
program_path (str, optional): Path to the program.
commit (str, optional): The Git SHA to associate the run with
summary_metrics (str, optional): The JSON summary metrics
|
codesearchnet
|
def last(series, order_by=None):
if (order_by is not None):
series = order_series_by(series, order_by)
last_s = series.iloc[(series.size - 1)]
return last_s
|
Returns the last value of a series.
Args:
series (pandas.Series): column to summarize.
Kwargs:
order_by: a pandas.Series or list of series (can be symbolic) to order
the input series by before summarization.
|
codesearchnet
|
def get_image_features(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.LongTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=False):
vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True)
image_embeds = vision_outputs[0]
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)
if qformer_attention_mask is None:
qformer_attention_mask = torch.ones_like(qformer_input_ids)
qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)
query_outputs = self.qformer(input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True)
query_output = query_outputs[0][:, :query_tokens.size(1), :]
language_model_inputs = self.language_projection(query_output)
if return_dict:
return (language_model_inputs, vision_outputs, query_outputs)
return language_model_inputs
|
Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
|
github-repos
|
def distance_between(self, u, v):
if not isinstance(u, Node):
raise TypeError("u must be a Node")
if not isinstance(v, Node):
raise TypeError("v must be a Node")
if u == v:
return 0.
u_dists = {u:0.}; v_dists = {v:0.}
c = u; p = u.parent
while p is not None:
u_dists[p] = u_dists[c]
if c.edge_length is not None:
u_dists[p] += c.edge_length
c = p; p = p.parent
c = v; p = v.parent
while p is not None:
v_dists[p] = v_dists[c]
if c.edge_length is not None:
v_dists[p] += c.edge_length
if p in u_dists:
return u_dists[p] + v_dists[p]
c = p; p = p.parent
raise RuntimeError("u and v are not in the same Tree")
|
Return the distance between nodes ``u`` and ``v`` in this ``Tree``
Args:
``u`` (``Node``): Node ``u``
``v`` (``Node``): Node ``v``
Returns:
``float``: The distance between nodes ``u`` and ``v``
|
juraj-google-style
|
def construct(parent=None, defaults=None, **kwargs):
for key in kwargs:
assert (key in LEGAL_ATTRS), '{} is not legal input'.format(key)
if (parent is not None):
for (key, value) in LEGAL_ATTRS.items():
if ((key not in kwargs) and hasattr(parent, value)):
kwargs[key] = getattr(parent, value)
assert ('cdf' in kwargs), 'cdf function must be defined'
assert ('bnd' in kwargs), 'bnd function must be defined'
if (('str' in kwargs) and isinstance(kwargs['str'], str)):
string = kwargs.pop('str')
kwargs['str'] = (lambda *args, **kwargs: string)
defaults = (defaults if defaults else {})
for key in defaults:
assert (key in LEGAL_ATTRS), 'invalid default value {}'.format(key)
def custom_distribution(**kws):
prm = defaults.copy()
prm.update(kws)
dist = Dist(**prm)
for (key, function) in kwargs.items():
attr_name = LEGAL_ATTRS[key]
setattr(dist, attr_name, types.MethodType(function, dist))
return dist
if ('doc' in kwargs):
custom_distribution.__doc__ = kwargs['doc']
return custom_distribution
|
Random variable constructor.
Args:
cdf:
Cumulative distribution function. Optional if ``parent`` is used.
bnd:
Boundary interval. Optional if ``parent`` is used.
parent (Dist):
Distribution used as basis for new distribution. Any other argument
that is omitted will instead take is function from ``parent``.
doc (str]):
Documentation for the distribution.
str (str, :py:data:typing.Callable):
Pretty print of the variable.
pdf:
Probability density function.
ppf:
Point percentile function.
mom:
Raw moment generator.
ttr:
Three terms recursion coefficient generator.
init:
Custom initialiser method.
defaults (dict):
Default values to provide to initialiser.
Returns:
(Dist):
New custom distribution.
|
codesearchnet
|
def postings(self, quarter, stats_counter=None):
logging.info('Finding postings for %s', quarter)
for posting in self._iter_postings(quarter):
transformed = self._transform(posting)
transformed['id'] = '{}_{}'.format(
self.partner_id,
self._id(posting)
)
if stats_counter:
stats_counter.track(
input_document=posting,
output_document=transformed
)
yield transformed
|
Yield job postings in common schema format
Args:
quarter (str) The quarter, in format '2015Q1'
stats_counter (object, optional) A counter that can track both
input and output documents using a 'track' method.
|
juraj-google-style
|
def mounts(prefix, __mounts):
i = 0
mntpoints = []
for mount in __mounts:
if not isinstance(mount, dict):
mntpoint = "{0}/{1}".format(prefix, str(i))
mntpoints.append(mntpoint)
i = i + 1
return mntpoints
|
Compute the mountpoints of the current user.
Args:
prefix: Define where the job was running if it ran on a cluster.
mounts: All mounts the user currently uses in his file system.
Return:
mntpoints
|
juraj-google-style
|
def FormatCode(unformatted_source, filename='<unknown>', style_config=None, lines=None, print_diff=False):
try:
tree = pytree_utils.ParseCodeToTree(unformatted_source)
except Exception as e:
e.filename = filename
raise errors.YapfError(errors.FormatErrorMsg(e))
reformatted_source = FormatTree(tree, style_config=style_config, lines=lines)
if unformatted_source == reformatted_source:
return ('' if print_diff else reformatted_source, False)
if print_diff:
code_diff = _GetUnifiedDiff(unformatted_source, reformatted_source, filename=filename)
return (code_diff, code_diff.strip() != '')
return (reformatted_source, True)
|
Format a string of Python code.
This provides an alternative entry point to YAPF.
Arguments:
unformatted_source: (unicode) The code to format.
filename: (unicode) The name of the file being reformatted.
style_config: (string) Either a style name or a path to a file that contains
formatting style settings. If None is specified, use the default style
as set in style.DEFAULT_STYLE_FACTORY
lines: (list of tuples of integers) A list of tuples of lines, [start, end],
that we want to format. The lines are 1-based indexed. It can be used by
third-party code (e.g., IDEs) when reformatting a snippet of code rather
than a whole file.
print_diff: (bool) Instead of returning the reformatted source, return a
diff that turns the formatted source into reformatter source.
Returns:
Tuple of (reformatted_source, changed). reformatted_source conforms to the
desired formatting style. changed is True if the source changed.
|
github-repos
|
def __init__(self, map_task, counter_factory, state_sampler, test_shuffle_source=None, test_shuffle_sink=None):
self._map_task = map_task
self._counter_factory = counter_factory
self._ops = []
self._state_sampler = state_sampler
self._test_shuffle_source = test_shuffle_source
self._test_shuffle_sink = test_shuffle_sink
|
Initializes SimpleMapTaskExecutor.
Args:
map_task: The map task we are to run. The maptask contains a list of
operations, and aligned lists for step_names, original_names,
system_names of pipeline steps.
counter_factory: The CounterFactory instance for the work item.
state_sampler: The StateSampler tracking the execution step.
test_shuffle_source: Used during tests for dependency injection into
shuffle read operation objects.
test_shuffle_sink: Used during tests for dependency injection into
shuffle write operation objects.
|
github-repos
|
def get_credentials_for_url(url, opts, force_user=None):
creds = None
verbose = int(opts.get('verbose'))
force_prompt = opts.get('prompt', False)
allow_prompt = (not opts.get('no_prompt', True))
allow_keyring = ((not opts.get('no_keyring', False)) and (not force_user))
allow_netrc = ((not opts.get('no_netrc', False)) and (not force_user))
if (force_user and (not allow_prompt)):
raise RuntimeError('Cannot get credentials for a distinct user ({}) from keyring or .netrc and prompting is disabled.'.format(force_user))
home_path = os.path.expanduser('~')
file_path = os.path.join(home_path, DEFAULT_CREDENTIAL_STORE)
if os.path.isfile(file_path):
raise RuntimeError('Custom password files are no longer supported. Delete {} and use .netrc instead.'.format(file_path))
if ((creds is None) and keyring and allow_keyring):
try:
c = keyring.get_password('pyftpsync', url)
if (c is not None):
creds = c.split(':', 1)
write("Using credentials from keyring('pyftpsync', '{}'): {}:***.".format(url, creds[0]))
elif (verbose >= 4):
write("No credentials found in keyring('pyftpsync', '{}').".format(url))
except Exception as e:
write_error('Could not get password from keyring {}'.format(e))
if ((creds is None) and allow_netrc):
try:
authenticators = None
authenticators = netrc.netrc().authenticators(url)
except CompatFileNotFoundError:
if (verbose >= 4):
write('Could not get password (no .netrc file).')
except Exception as e:
write_error('Could not read .netrc: {}.'.format(e))
if authenticators:
creds = (authenticators[0], authenticators[2])
write('Using credentials from .netrc file: {}:***.'.format(creds[0]))
elif (verbose >= 4):
write("Could not find entry for '{}' in .netrc file.".format(url))
if allow_prompt:
if (creds is None):
creds = prompt_for_password(url)
elif force_prompt:
creds = prompt_for_password(url, default_user=creds[0])
return creds
|
Lookup credentials for a given target in keyring and .netrc.
Optionally prompts for credentials if not found.
Returns:
2-tuple (username, password) or None
|
codesearchnet
|
def inferred_steps(self):
return self._inferred_steps
|
The inferred steps per epoch of the created `Dataset`.
This will be `None` in the case where:
(1) A `Dataset` of unknown cardinality was passed to the `DataHandler`, and
(2) `steps_per_epoch` was not provided, and
(3) The first epoch of iteration has not yet completed.
Returns:
The inferred steps per epoch of the created `Dataset`.
|
github-repos
|
def forward(self, input_ids: torch.Tensor, cache_position: torch.Tensor) -> torch.Tensor:
return self.model.forward(input_ids, cache_position)
|
Forward pass of the module, which is compatible with the ExecuTorch llm runner.
Args:
input_ids (`torch.Tensor`): Tensor representing current input token id to the module.
cache_position (`torch.Tensor`): Tensor representing current input position in the cache.
Returns:
torch.Tensor: Logits output from the model.
|
github-repos
|
def get_node(self, index: int) -> Optional[Node]:
return self._nodes.get(index)
|
Returns the node with the given index if such a node currently exists in the node list.
Arguments:
index (int): The index of the queried node.
Returns:
The node with the given index if such a node currently exists in the node list,
`None` otherwise.
|
juraj-google-style
|
def list_container_instance_groups_sub(access_token, subscription_id):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/Microsoft.ContainerInstance/ContainerGroups',
'?api-version=', CONTAINER_API])
return do_get(endpoint, access_token)
|
List the container groups in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON list of container groups and their properties.
|
juraj-google-style
|
def _AlignDecryptedDataOffset(self, decrypted_data_offset):
self._file_object.seek(0, os.SEEK_SET)
self._decrypter = self._GetDecrypter()
self._decrypted_data = b''
encrypted_data_offset = 0
encrypted_data_size = self._file_object.get_size()
while (encrypted_data_offset < encrypted_data_size):
read_count = self._ReadEncryptedData(self._ENCRYPTED_DATA_BUFFER_SIZE)
if (read_count == 0):
break
encrypted_data_offset += read_count
if (decrypted_data_offset < self._decrypted_data_size):
self._decrypted_data_offset = decrypted_data_offset
break
decrypted_data_offset -= self._decrypted_data_size
|
Aligns the encrypted file with the decrypted data offset.
Args:
decrypted_data_offset (int): decrypted data offset.
|
codesearchnet
|
def satellites_used(feed):
total_satellites = 0
used_satellites = 0
if not isinstance(feed, list):
return 0, 0
for satellites in feed:
total_satellites += 1
if satellites['used'] is True:
used_satellites += 1
return total_satellites, used_satellites
|
Counts number of satellites used in calculation from total visible satellites
Arguments:
feed feed=data_stream.TPV['satellites']
Returns:
total_satellites(int):
used_satellites (int):
|
juraj-google-style
|
def check_symmetry(A):
A = asanyarray(A)
if (A.ndim != 2):
raise ValueError('Checks symmetry only for bi-dimensional arrays.')
if (A.shape[0] != A.shape[1]):
return False
return (abs((A - A.T)).max() < sqrt(finfo(float).eps))
|
Check if ``A`` is a symmetric matrix.
Args:
A (array_like): Matrix.
Returns:
bool: ``True`` if ``A`` is symmetric; ``False`` otherwise.
|
codesearchnet
|
def linear(x):
return x
|
Linear activation function (pass-through).
For example:
>>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32)
>>> b = tf.keras.activations.linear(a)
>>> b.numpy()
array([-3., -1., 0., 1., 3.], dtype=float32)
Args:
x: Input tensor.
Returns:
The input, unmodified.
|
github-repos
|
def parse_lxml(self, file, encoding=None, target_class=HTMLParserTarget, parser_type='html'):
if encoding:
lxml_encoding = (to_lxml_encoding(encoding) or 'latin1')
else:
lxml_encoding = encoding
elements = []
callback_func = elements.append
target = target_class(callback_func)
if (parser_type == 'html'):
parser = lxml.html.HTMLParser(encoding=lxml_encoding, target=target)
elif (parser_type == 'xhtml'):
parser = lxml.html.XHTMLParser(encoding=lxml_encoding, target=target, recover=True)
else:
parser = lxml.etree.XMLParser(encoding=lxml_encoding, target=target, recover=True)
if (parser_type == 'html'):
for dummy in range(3):
parser.feed('<html>'.encode(encoding))
while True:
data = file.read(self.BUFFER_SIZE)
if (not data):
break
parser.feed(data)
for element in elements:
(yield element)
del elements[:]
parser.close()
for element in elements:
(yield element)
|
Return an iterator of elements found in the document.
Args:
file: A file object containing the document.
encoding (str): The encoding of the document.
target_class: A class to be used for target parsing.
parser_type (str): The type of parser to use. Accepted values:
``html``, ``xhtml``, ``xml``.
Returns:
iterator: Each item is an element from
:mod:`.document.htmlparse.element`
|
codesearchnet
|
def get_mutations(aln_df):
mutation_df = aln_df[aln_df['type'] == 'mutation']
tuples = []
if not mutation_df.empty:
subset = mutation_df[['id_a_aa', 'id_a_pos', 'id_b_aa']]
subset['id_a_pos'] = subset['id_a_pos'].astype(int)
tuples = [tuple(x) for x in subset.values]
return tuples
|
Get a list of residue numbers (in the original sequence's numbering) that are mutated
Args:
aln_df (DataFrame): Alignment DataFrame
just_resnums: If only the residue numbers should be returned, instead of a list of tuples of
(original_residue, resnum, mutated_residue)
Returns:
list: Residue mutations
|
juraj-google-style
|
def verifymessage(self, address, signature, message):
verified = self.rpc.call("verifymessage", address, signature, message)
self.logger.debug("Signature verified: %s" % str(verified))
return verified
|
Verifies that a message has been signed by an address.
Args:
address (str): address claiming to have signed the message
signature (str): ECDSA signature
message (str): plaintext message which was signed
Returns:
bool: True if the address signed the message, False otherwise
|
juraj-google-style
|
def estimate_cpdag(skel_graph, sep_set):
dag = skel_graph.to_directed()
node_ids = skel_graph.nodes()
for (i, j) in combinations(node_ids, 2):
adj_i = set(dag.successors(i))
if j in adj_i:
continue
adj_j = set(dag.successors(j))
if i in adj_j:
continue
if sep_set[i][j] is None:
continue
common_k = adj_i & adj_j
for k in common_k:
if k not in sep_set[i][j]:
if dag.has_edge(k, i):
_logger.debug('S: remove edge (%s, %s)' % (k, i))
dag.remove_edge(k, i)
if dag.has_edge(k, j):
_logger.debug('S: remove edge (%s, %s)' % (k, j))
dag.remove_edge(k, j)
def _has_both_edges(dag, i, j):
return dag.has_edge(i, j) and dag.has_edge(j, i)
def _has_any_edge(dag, i, j):
return dag.has_edge(i, j) or dag.has_edge(j, i)
def _has_one_edge(dag, i, j):
return ((dag.has_edge(i, j) and (not dag.has_edge(j, i))) or
(not dag.has_edge(i, j)) and dag.has_edge(j, i))
def _has_no_edge(dag, i, j):
return (not dag.has_edge(i, j)) and (not dag.has_edge(j, i))
old_dag = dag.copy()
while True:
for (i, j) in combinations(node_ids, 2):
if _has_both_edges(dag, i, j):
for k in dag.predecessors(i):
if dag.has_edge(i, k):
continue
if _has_any_edge(dag, k, j):
continue
_logger.debug('R1: remove edge (%s, %s)' % (j, i))
dag.remove_edge(j, i)
break
if _has_both_edges(dag, i, j):
succs_i = set()
for k in dag.successors(i):
if not dag.has_edge(k, i):
succs_i.add(k)
preds_j = set()
for k in dag.predecessors(j):
if not dag.has_edge(j, k):
preds_j.add(k)
if len(succs_i & preds_j) > 0:
_logger.debug('R2: remove edge (%s, %s)' % (j, i))
dag.remove_edge(j, i)
if _has_both_edges(dag, i, j):
adj_i = set()
for k in dag.successors(i):
if dag.has_edge(k, i):
adj_i.add(k)
for (k, l) in combinations(adj_i, 2):
if _has_any_edge(dag, k, l):
continue
if dag.has_edge(j, k) or (not dag.has_edge(k, j)):
continue
if dag.has_edge(j, l) or (not dag.has_edge(l, j)):
continue
_logger.debug('R3: remove edge (%s, %s)' % (j, i))
dag.remove_edge(j, i)
break
if nx.is_isomorphic(dag, old_dag):
break
old_dag = dag.copy()
return dag
|
Estimate a CPDAG from the skeleton graph and separation sets
returned by the estimate_skeleton() function.
Args:
skel_graph: A skeleton graph (an undirected networkx.Graph).
sep_set: An 2D-array of separation set.
The contents look like something like below.
sep_set[i][j] = set([k, l, m])
Returns:
An estimated DAG.
|
juraj-google-style
|
def ParseFileObject(self, parser_mediator, file_object):
page_header_map = self._GetDataTypeMap('dls_page_header')
try:
(page_header, file_offset) = self._ReadStructureFromFileObject(file_object, 0, page_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile('Unable to parse page header with error: {0!s}'.format(exception))
if (page_header.signature not in self._DLS_SIGNATURES):
raise errors.UnableToParseFile('Invalid file signature')
current_page_end = page_header.page_size
file_entry = parser_mediator.GetFileEntry()
date_time = self._GetParentModificationTime(file_entry)
if date_time:
timestamp_description = definitions.TIME_DESCRIPTION_RECORDED
else:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
file_size = file_object.get_size()
while (file_offset < file_size):
if (file_offset >= current_page_end):
try:
(page_header, header_size) = self._ParseDLSPageHeader(file_object, file_offset)
except errors.ParseError as exception:
parser_mediator.ProduceExtractionWarning('Unable to parse page header with error: {0!s}'.format(exception))
break
current_page_end += page_header.page_size
file_offset += header_size
continue
if (page_header.signature == self._DLS_V1_SIGNATURE):
record_map = self._GetDataTypeMap('dls_record_v1')
else:
record_map = self._GetDataTypeMap('dls_record_v2')
try:
(record, record_length) = self._ReadStructureFromFileObject(file_object, file_offset, record_map)
file_offset += record_length
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning('Unable to parse page record with error: {0!s}'.format(exception))
break
event_data = self._BuildEventData(record)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses an fseventsd file.
Args:
parser_mediator (ParserMediator): parser mediator.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the header cannot be parsed.
|
codesearchnet
|
def GetDefaultContract(self):
try:
return self.GetContracts()[0]
except Exception as e:
logger.error(('Could not find default contract: %s' % str(e)))
raise
|
Get the default contract.
Returns:
contract (Contract): if Successful, a contract of type neo.SmartContract.Contract, otherwise an Exception.
Raises:
Exception: if no default contract is found.
Note:
Prints a warning to the console if the default contract could not be found.
|
codesearchnet
|
def port_add(br, port, may_exist=False, internal=False):
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
|
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
|
juraj-google-style
|
def __init__(self, keys=None):
if not keys:
raise errors.FormatError('Missing keys value.')
if not isinstance(keys, list):
raise errors.FormatError('keys must be a list')
for key in keys:
self.ValidateKey(key)
super(WindowsRegistryKeySourceType, self).__init__()
self.keys = keys
|
Initializes a source type.
Args:
keys (Optional[list[str]]): key paths relative to the root of
the Windows Registry.
Raises:
FormatError: when keys is not set.
|
juraj-google-style
|
def send_message(self, message):
try:
if _message_test_port is not None:
_message_test_port.sent.append(message)
yield message.send(self)
except (WebSocketClosedError, StreamClosedError):
log.warning("Failed sending message as connection was closed")
raise gen.Return(None)
|
Send a Bokeh Server protocol message to the connected client.
Args:
message (Message) : a message to send
|
juraj-google-style
|
def build_graph(self):
import tensorflow as tf
input_jpeg = tf.placeholder(tf.string, shape=None)
image = tf.image.decode_jpeg(input_jpeg, channels=self.CHANNELS)
image = tf.expand_dims(image, 0)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.image.resize_bilinear(image, [self.HEIGHT, self.WIDTH], align_corners=False)
image = tf.subtract(image, 0.5)
inception_input = tf.multiply(image, 2.0)
with tf.contrib.slim.arg_scope(_inceptionlib.inception_v3_arg_scope()):
(_, end_points) = _inceptionlib.inception_v3(inception_input, is_training=False)
embedding = end_points['PreLogits']
return (input_jpeg, embedding)
|
Forms the core by building a wrapper around the inception graph.
Here we add the necessary input & output tensors, to decode jpegs,
serialize embeddings, restore from checkpoint etc.
To use other Inception models modify this file. Note that to use other
models beside Inception, you should make sure input_shape matches
their input. Resizing or other modifications may be necessary as well.
See tensorflow/contrib/slim/python/slim/nets/inception_v3.py for
details about InceptionV3.
Returns:
input_jpeg: A tensor containing raw image bytes as the input layer.
embedding: The embeddings tensor, that will be materialized later.
|
codesearchnet
|
def show_warning_messages(self, title=_(u"Incorrect Operation"), box_type='warning'):
msg = self.current.task_data['msg']
self.current.output['msgbox'] = {'type': box_type, "title": title, "msg": msg}
del self.current.task_data['msg']
|
It shows incorrect operations or successful operation messages.
Args:
title (string): title of message box
box_type (string): type of message box (warning, info)
|
juraj-google-style
|
def start_site(name):
ps_cmd = ['Start-WebSite', r"'{0}'".format(name)]
cmd_ret = _srvmgr(ps_cmd)
return cmd_ret['retcode'] == 0
|
Start a Web Site in IIS.
.. versionadded:: 2017.7.0
Args:
name (str): The name of the website to start.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.start_site name='My Test Site'
|
juraj-google-style
|
def get_metalpdb_info(metalpdb_lig_file):
pdb_metals = ['CU', 'ZN', 'MN', 'FE', 'MG', 'CO', 'SE', 'YB', 'SF4', 'FES', 'F3S', 'NI', 'FE2']
coordination_number = 0
endogenous_ligands = []
exogenous_ligands = []
ss = StructProp(ident='metalpdb', structure_path=metalpdb_lig_file, file_type='pdb')
chain_id = op.basename(metalpdb_lig_file)[5]
metal_id = (op.basename(metalpdb_lig_file).split('_')[2], op.basename(metalpdb_lig_file).split('_')[3])
for r in ss.parse_structure().first_model.get_residues():
return_id = (r.get_id(), r.get_resname())
if (r.get_id()[0] != ' '):
if ((not (r.resname.strip() in pdb_metals)) and (r.resname != 'HOH')):
exogenous_ligands.append(return_id)
else:
endogenous_ligands.append(return_id)
for a in r.get_atom():
if (not (a.element in pdb_metals)):
coordination_number += 1
infodict = {metal_id: {'endogenous_ligands': endogenous_ligands, 'exogenous_ligands': exogenous_ligands, 'coordination_number': coordination_number}}
return (chain_id, infodict)
|
Parse a MetalPDB .lig file and return a tuple of the chain ID it represents, along with metal binding information.
Args:
metalpdb_lig_file (str): Path to .lig file
Returns:
tuple: (str, dict) of the chain ID and the parsed metal binding site information
|
codesearchnet
|
def single_offset(self, shape):
single_slice_dim = self.single_slice_dim(shape)
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
|
Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
|
github-repos
|
def get_filelikeobject(filename: str=None, blob: bytes=None) -> BinaryIO:
if ((not filename) and (not blob)):
raise ValueError('no filename and no blob')
if (filename and blob):
raise ValueError('specify either filename or blob')
if filename:
return open(filename, 'rb')
else:
return io.BytesIO(blob)
|
Open a file-like object.
Guard the use of this function with ``with``.
Args:
filename: for specifying via a filename
blob: for specifying via an in-memory ``bytes`` object
Returns:
a :class:`BinaryIO` object
|
codesearchnet
|
def process_buffer(buffer, n_channels):
samples = np.concatenate(buffer)
if n_channels > 1:
samples = samples.reshape((-1, n_channels)).T
samples = librosa.to_mono(samples)
return samples
|
Merge the read blocks and resample if necessary.
Args:
buffer (list): A list of blocks of samples.
n_channels (int): The number of channels of the input data.
Returns:
np.array: The samples
|
juraj-google-style
|
async def get_person(self, id_):
data = (await self._get_person_json(id_, OrderedDict(append_to_response='movie_credits')))
return Person.from_json(data, self.config['data'].get('images'))
|
Retrieve person data by ID.
Arguments:
id_ (:py:class:`int`): The person's TMDb ID.
Returns:
:py:class:`~.Person`: The requested person.
|
codesearchnet
|
def CompleteTask(self, task):
with self._lock:
if (task.identifier not in self._tasks_merging):
raise KeyError('Task {0:s} was not merging.'.format(task.identifier))
self.SampleTaskStatus(task, 'completed')
del self._tasks_merging[task.identifier]
logger.debug('Completed task {0:s}.'.format(task.identifier))
|
Completes a task.
The task is complete and can be removed from the task manager.
Args:
task (Task): task.
Raises:
KeyError: if the task was not merging.
|
codesearchnet
|
def convert_softmax(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting softmax ...')
if names == 'short':
tf_name = 'SMAX' + random_string(4)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
def target_layer(x, dim=params['dim']):
import keras
return keras.activations.softmax(x, axis=dim)
lambda_layer = keras.layers.Lambda(target_layer)
layers[scope_name] = lambda_layer(layers[inputs[0]])
|
Convert softmax layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
|
juraj-google-style
|
def __init__(self, structure, element):
self.structure = structure
self.element = element
sga = SpacegroupAnalyzer(self.structure)
self.symm_structure = sga.get_symmetrized_structure()
self.equiv_sub = []
for equiv_site_set in list(self.symm_structure.equivalent_sites):
vac_site = equiv_site_set[0]
if isinstance(element, str):
vac_specie = vac_site.specie.symbol
else:
vac_specie = vac_site.specie
if element != vac_specie:
defect_site = PeriodicSite(element, vac_site.coords, structure.lattice, coords_are_cartesian=True)
sub = Substitution(structure, defect_site)
self.equiv_sub.append(sub)
|
Initializes a Substitution Generator
note: an Antisite is considered a type of substitution
Args:
structure(Structure): pymatgen structure object
element (str or Element or Specie): element for the substitution
|
juraj-google-style
|
def _ufunc_dispatch(ufunc, method, i, inputs, **kwargs):
if 'out' in kwargs and kwargs['out'] is not None:
raise Error('for distributed ufuncs `out=` is not yet implemented')
nin = 2 if ufunc is np.dot else ufunc.nin
if nin is 1 and method == '__call__':
return vectorize(ufunc.__call__)(inputs[0], **kwargs)
elif nin is 2 and method == '__call__':
from distob import engine
here = engine.eid
locs, weights = zip(*[_engine_affinity(a) for a in inputs])
bshape = _broadcast_shape(*inputs)
locs = list(locs)
for i, loc in enumerate(locs):
if isinstance(loc, _TupleType):
num_new_axes = len(bshape) - inputs[i].ndim
if num_new_axes > 0:
locs[i] = (locs[i][0], locs[i][1] + num_new_axes)
if ufunc is np.dot:
locs = [here if isinstance(m, _TupleType) else m for m in locs]
if locs[0] == locs[1]:
location = locs[0]
else:
smallest = 0 if weights[0] <= weights[1] else 1
largest = 1 - smallest
if locs[0] is here or locs[1] is here:
location = here if weights[0] == weights[1] else locs[largest]
else:
if weights[smallest]*2 < weights[largest] + weights[smallest]:
location = locs[largest]
else:
location = here
inputs = [_ufunc_move_input(a, location, bshape) for a in inputs]
if location is here:
return ufunc.__call__(inputs[0], inputs[1], **kwargs)
else:
if isinstance(location, numbers.Integral):
return call(ufunc.__call__, inputs[0], inputs[1], **kwargs)
else:
engine_ids, distaxis = location
n = len(engine_ids)
is_dist = tuple(isinstance(a, DistArray) for a in inputs)
assert(is_dist[0] or is_dist[1])
for i in 0, 1:
if is_dist[i]:
ndim = inputs[i].ndim
assert(inputs[i]._distaxis == distaxis)
assert(inputs[i]._n == n)
def _remote_ucall(inputs, **kwargs):
return ufunc.__call__(inputs[0], inputs[1], **kwargs)
results = []
kwargs = kwargs.copy()
kwargs['block'] = False
kwargs['prefer_local'] = False
for j in range(n):
subinputs = tuple(inputs[i]._subarrays[j] if
is_dist[i] else inputs[i] for i in (0, 1))
results.append(call(_remote_ucall, subinputs, **kwargs))
results = [convert_result(ar) for ar in results]
return DistArray(results, distaxis)
elif ufunc.nin > 2:
raise Error(u'Distributing ufuncs with >2 inputs is not yet supported')
else:
raise Error(u'Distributed ufunc.%s() is not yet implemented' % method)
|
Route ufunc execution intelligently to local host or remote engine(s)
depending on where the inputs are, to minimize the need to move data.
Args:
see numpy documentation for __numpy_ufunc__
|
juraj-google-style
|
def _is_injective(self):
return True
|
Returns true iff the forward map `g` is injective (one-to-one function).
**WARNING** This hidden property and its behavior are subject to change.
Note: Non-injective maps `g` are supported, provided their domain `D` can
be partitioned into `k` disjoint subsets, `Union{D1, ..., Dk}`, such that,
ignoring sets of measure zero, the restriction of `g` to each subset is a
differentiable bijection onto `g(D)`.
Returns:
is_injective: Python `bool`.
|
github-repos
|
def accumulate_dict_from_superclasses(cls, propname):
cachename = "__cached_all" + propname
if cachename not in cls.__dict__:
d = dict()
for c in inspect.getmro(cls):
if issubclass(c, HasProps) and hasattr(c, propname):
base = getattr(c, propname)
for k,v in base.items():
if k not in d:
d[k] = v
setattr(cls, cachename, d)
return cls.__dict__[cachename]
|
Traverse the class hierarchy and accumulate the special dicts
``MetaHasProps`` stores on classes:
Args:
name (str) : name of the special attribute to collect.
Typically meaningful values are: ``__dataspecs__``,
``__overridden_defaults__``
|
juraj-google-style
|
def match_objects(self, set_a, set_b, time_a, time_b):
costs = (self.cost_matrix(set_a, set_b, time_a, time_b) * 100)
min_row_costs = costs.min(axis=1)
min_col_costs = costs.min(axis=0)
good_rows = np.where((min_row_costs < 100))[0]
good_cols = np.where((min_col_costs < 100))[0]
assignments = []
if ((len(good_rows) > 0) and (len(good_cols) > 0)):
munk = Munkres()
initial_assignments = munk.compute(costs[tuple(np.meshgrid(good_rows, good_cols, indexing='ij'))].tolist())
initial_assignments = [(good_rows[x[0]], good_cols[x[1]]) for x in initial_assignments]
for a in initial_assignments:
if (costs[(a[0], a[1])] < 100):
assignments.append(a)
return assignments
|
Match two sets of objects at particular times.
Args:
set_a: list of STObjects
set_b: list of STObjects
time_a: time at which set_a is being evaluated for matching
time_b: time at which set_b is being evaluated for matching
Returns:
List of tuples containing (set_a index, set_b index) for each match
|
codesearchnet
|
def process_arguments(self, func, args):
pos_args = []
kw_args = {}
while (len(args) > 0):
if (func.metadata.spec_filled(pos_args, kw_args) and (not self._is_flag(args[0]))):
break
arg = args.pop(0)
if (arg == '--'):
break
elif self._is_flag(arg):
arg_value = None
arg_name = None
if (len(arg) == 2):
arg_name = func.metadata.match_shortname(arg[1:], filled_args=pos_args)
else:
if (not arg.startswith('--')):
raise ArgumentError('Invalid method of specifying keyword argument that did not start with --', argument=arg)
arg = arg[2:]
if ('=' in arg):
(arg, arg_value) = arg.split('=', 1)
arg_name = func.metadata.match_shortname(arg, filled_args=pos_args)
arg_type = func.metadata.param_type(arg_name)
if (arg_type is None):
raise ArgumentError('Attempting to set a parameter from command line that does not have type information', argument=arg_name)
if (arg_value is None):
arg_value = self._extract_arg_value(arg_name, arg_type, args)
kw_args[arg_name] = arg_value
else:
pos_args.append(arg)
if ((len(args) > 0) and (args[0] == '--')):
args.pop(0)
return (pos_args, kw_args, args)
|
Process arguments from the command line into positional and kw args.
Arguments are consumed until the argument spec for the function is filled
or a -- is found or there are no more arguments. Keyword arguments can be
specified using --field=value, -f value or --field value. Positional
arguments are specified just on the command line itself.
If a keyword argument (`field`) is a boolean, it can be set to True by just passing
--field or -f without needing to explicitly pass True unless this would cause
ambiguity in parsing since the next expected positional argument is also a boolean
or a string.
Args:
func (callable): A function previously annotated with type information
args (list): A list of all of the potential arguments to this function.
Returns:
(args, kw_args, unused args): A tuple with a list of args, a dict of
keyword args and a list of any unused args that were not processed.
|
codesearchnet
|
def long_click(self, pos, duration=2.0):
try:
duration = float(duration)
except ValueError:
raise ValueError('Argument `duration` should be <float>. Got {}'.format(repr(duration)))
if not (0 <= pos[0] <= 1) or not (0 <= pos[1] <= 1):
raise InvalidOperationException('Click position out of screen. {}'.format(repr(pos)))
return self.agent.input.longClick(pos[0], pos[1], duration)
|
Similar to click but press the screen for the given time interval and then release
Args:
pos (:obj:`2-list/2-tuple`): coordinates (x, y) in range from 0 to 1
duration: duration of press the screen
|
juraj-google-style
|
def _build(self, inputs):
shape_inputs = inputs.get_shape().as_list()
rank = len(shape_inputs)
full_multiples = [1] * rank
for dim, multiple in zip(self._dims, self._multiples):
full_multiples[dim] = multiple
return tf.tile(inputs, multiples=full_multiples)
|
Connects the `TileByDim` module into the graph.
Args:
inputs: `Tensor` to tile.
Returns:
The tiled tensor.
|
juraj-google-style
|
def load(self,cache_genotype=False,cache_phenotype=True):
self.f = h5py.File(self.file_name,'r')
self.pheno = self.f['phenotype']
self.geno = self.f['genotype']
self.genoM = self.geno['matrix']
self.phenoM = self.pheno['matrix']
self.sample_ID = self.geno['row_header']['sample_ID'][:]
self.genoChrom = self.geno['col_header']['chrom'][:]
self.genoPos = self.geno['col_header']['pos'][:]
if 'pos_cum' in list(self.geno['col_header'].keys()):
self.genoPos_cum = self.geno['col_header']['pos_cum'][:]
else:
self.genoPos_cum = None
self.phenotype_ID = self.pheno['col_header']['phenotype_ID'][:]
if cache_genotype:
self.genoM = self.genoM[:]
if cache_phenotype:
self.phenoM = self.phenoM[:]
headers = list(self.pheno['col_header'].keys())
if 'gene_ID' in headers:
self.eqtl = True
self.geneID = self.pheno['col_header']['gene_ID'][:]
self.gene_pos = SP.array([self.pheno['col_header']['gene_chrom'][:],self.pheno['col_header']['gene_start'][:],self.pheno['col_header']['gene_end']],dtype='int').T
self.geneIDs= list(set(self.geneID))
else:
self.eqtl = False
if 'environment' in headers:
self.E = self.pheno['col_header/environment'][:]
self.Es = list(set(self.E))
else:
self.E = None
self.N = self.genoM.shape[0]
self.S = self.genoM.shape[1]
self.P = self.phenoM.shape[1]
assert (self.genoM.shape[0]==self.phenoM.shape[0]), 'dimension missmatch'
|
load data file
Args:
cache_genotype: load genotypes fully into memory (default: False)
cache_phenotype: load phentopyes fully intro memry (default: True)
|
juraj-google-style
|
def _receive_signal(self, progress_subscript):
self.progress = self._estimate_progress()
self.updateProgress.emit(int(self.progress))
|
this function takes care of signals emitted by the subscripts
Args:
progress_subscript: progress of subscript
|
juraj-google-style
|
def from_json(cls, json):
params = dict((str(k), v) for k, v in json.iteritems()
if k in cls._PARAMS)
if cls._OFFSET_PARAM in params:
params[cls._OFFSET_PARAM] = base64.b64decode(params[cls._OFFSET_PARAM])
return cls(**params)
|
Creates an instance of the InputReader for the given input shard's state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the given JSON parameters.
|
juraj-google-style
|
def ignore():
def parse_line(line):
if (not isinstance(line, string_types)):
line = line.decode('utf-8')
line = line.split('
return line
ignore_files = [conf.proj_path('.gitignore'), conf.proj_path('.git/info/exclude'), config().get('core.excludesfile')]
result = []
for ignore_file in ignore_files:
if (not (ignore_file and os.path.exists(ignore_file))):
continue
with open(ignore_file) as fp:
parsed = (parse_line(l) for l in fp.readlines())
result += [x for x in parsed if x]
return result
|
Return a list of patterns in the project .gitignore
Returns:
list[str]: List of patterns set to be ignored by git.
|
codesearchnet
|
def register_gpt_plugin(self, fs_guid, plugin):
key = uuid.UUID(fs_guid.lower())
self.logger.debug('GPT: {}, GUID: {}'
.format(self.__get_plugin_name(plugin), fs_guid))
self.__gpt_plugins[key].append(plugin)
|
Used in plugin's registration routine,
to associate it's detection method with given filesystem guid
Args:
fs_guid: filesystem guid that is read from GPT partition entry
plugin: plugin that supports this filesystem
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.