code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def add_module(self, module_name, module_ui):
m_button = tk.Label(self.module_selection, text=module_name, bg="white", anchor="w")
m_button.grid(column=0, row=len(self.module_selection.winfo_children()), padx=0, pady=0, sticky="W E N S")
self.module_buttons[module_name] = m_button
m_button.bind("<Button-1>", lambda e: self.module_selected(module_name, module_ui))
|
Adds a module to the list
Args:
module_name (str): The name of the module
module_ui: The function to call to create the module's UI
|
juraj-google-style
|
def _PrintExtractionStatusUpdateLinear(self, processing_status):
for worker_status in processing_status.workers_status:
status_line = '{0:s} (PID: {1:d}) - events produced: {2:d} - file: {3:s} - running: {4!s}\n'.format(worker_status.identifier, worker_status.pid, worker_status.number_of_produced_events, worker_status.display_name, (worker_status.status not in definitions.ERROR_STATUS_INDICATORS))
self._output_writer.Write(status_line)
|
Prints an extraction status update in linear mode.
Args:
processing_status (ProcessingStatus): processing status.
|
codesearchnet
|
def status(self, job_ids):
statuses = []
for job_id in job_ids:
instance = self.client.instances().get(instance=job_id, project=self.project_id, zone=self.zone).execute()
self.resources[job_id]['status'] = translate_table[instance['status']]
statuses.append(translate_table[instance['status']])
return statuses
|
Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Args:
- job_ids (list) : A list of job identifiers
Returns:
- A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',
'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderException or its subclasses
|
juraj-google-style
|
def _compute_inside_group(df):
inside_group = df.copy()
inside_group['type'] = 'child'
inside_group['variation'] = inside_group['value'] / inside_group[
'value_start']
inside_group.drop(['upperGroup_label', 'insideGroup', 'value_start'],
axis=1, inplace=True)
inside_group.rename(columns={'insideGroup_label': 'label'},
inplace=True)
return inside_group
|
Compute inside Group
Args:
df(dataframe):
Returns: Dataframe
|
juraj-google-style
|
def encode(self, builder: expressions.Builder, select_scalars_as_array: bool=True, use_resource_alias: bool=False) -> str:
self._use_resource_alias = use_resource_alias
result = self.visit(builder.node)
if select_scalars_as_array or _fhir_path_data_types.returns_collection(builder.node.return_type):
return f'ARRAY(SELECT {result.sql_alias}\nFROM {result.to_subquery()}\nWHERE {result.sql_alias} IS NOT NULL)'
else:
return f'{result.to_subquery()}'
|
Returns a Standard SQL encoding of a FHIRPath expression.
If select_scalars_as_array is True, the resulting Standard SQL encoding
always returns a top-level `ARRAY`, whose elements are non-`NULL`. Otherwise
the resulting SQL will attempt to return a scalar when possible and only
return an `ARRAY` for actual collections.
Args:
builder: The FHIR Path builder to encode as a SQL string.
select_scalars_as_array: When True, always builds SQL selecting results in
an array. When False, attempts to build SQL returning scalars where
possible.
use_resource_alias: Determines whether it is necessary to call the
resource table directly through an alias.
Returns:
A Standard SQL representation of the provided FHIRPath expression.
|
github-repos
|
def _is_node_return_ended(self, node):
if isinstance(node, astroid.Return):
return True
if isinstance(node, astroid.Call):
try:
funcdef_node = node.func.inferred()[0]
if self._is_function_def_never_returning(funcdef_node):
return True
except astroid.InferenceError:
pass
if isinstance(node, astroid.While):
return True
if isinstance(node, astroid.Raise):
if not node.exc:
return True
if not utils.is_node_inside_try_except(node):
return True
exc = utils.safe_infer(node.exc)
if exc is None or exc is astroid.Uninferable:
return False
exc_name = exc.pytype().split(".")[-1]
handlers = utils.get_exception_handlers(node, exc_name)
handlers = list(handlers) if handlers is not None else []
if handlers:
return any(
self._is_node_return_ended(_handler) for _handler in handlers
)
return True
if isinstance(node, astroid.If):
is_orelse_returning = any(
self._is_node_return_ended(_ore)
for _ore in node.orelse
if not isinstance(_ore, astroid.FunctionDef)
)
is_if_returning = any(
self._is_node_return_ended(_ifn)
for _ifn in node.body
if not isinstance(_ifn, astroid.FunctionDef)
)
return is_if_returning and is_orelse_returning
return any(
self._is_node_return_ended(_child)
for _child in node.get_children()
if not isinstance(_child, astroid.ExceptHandler)
)
|
Check if the node ends with an explicit return statement.
Args:
node (astroid.NodeNG): node to be checked.
Returns:
bool: True if the node ends with an explicit statement, False otherwise.
|
juraj-google-style
|
def xw_plus_b_v1(x, weights, biases, name=None):
with ops.name_scope(name, 'xw_plus_b_v1', [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name='x')
weights = ops.convert_to_tensor(weights, name='weights')
biases = ops.convert_to_tensor(biases, name='biases')
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
|
Computes matmul(x, weights) + biases.
This is a deprecated version of that will soon be removed.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b_v1" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
|
github-repos
|
def serializable_value(self, obj):
value = self.__get__(obj, obj.__class__)
return self.property.serialize_value(value)
|
Produce the value as it should be serialized.
Sometimes it is desirable for the serialized value to differ from
the ``__get__`` in order for the ``__get__`` value to appear simpler
for user or developer convenience.
Args:
obj (HasProps) : the object to get the serialized attribute for
Returns:
JSON-like
|
codesearchnet
|
def is_control(input, model_file=None, model_proto=None, name=None):
return _gen_sentencepiece_processor_op.sentencepiece_get_piece_type(
input, model_file=model_file, model_proto=model_proto, name=name,
piece_type=1)
|
Returns true if input id is control piece.
Args:
input: An arbitrary tensor of int32.
model_file: The sentencepiece model file path.
model_proto: The sentencepiece model serialized proto.
Either `model_file` or `model_proto` must be set.
name: The name argument that is passed to the op function.
Returns:
A tensor of bool with the same shape as input.
|
juraj-google-style
|
def set_hash_value(self, key, field, value, pipeline=False):
if pipeline:
self._pipeline.hset(key, field, str(value))
else:
self._db.hset(key, field, str(value))
|
Set the value of field in a hash stored at key.
Args:
key (str): key (name) of the hash
field (str): Field within the hash to set
value: Value to set
pipeline (bool): True, start a transaction block. Default false.
|
codesearchnet
|
def get_command_from_result(script, result, debug=False):
if (not debug):
command = (((('python waf --run "' + script) + ' ') + ' '.join([('--%s=%s' % (param, value)) for (param, value) in result['params'].items()])) + '"')
else:
command = ((((('python waf --run ' + script) + ' --command-template="') + 'gdb --args %s ') + ' '.join([('--%s=%s' % (param, value)) for (param, value) in result['params'].items()])) + '"')
return command
|
Return the command that is needed to obtain a certain result.
Args:
params (dict): Dictionary containing parameter: value pairs.
debug (bool): Whether the command should include the debugging
template.
|
codesearchnet
|
async def debug(self, conn_id, name, cmd_args):
device = self._get_property(conn_id, 'device')
retval = None
try:
if name == 'dump_state':
retval = device.dump_state()
elif name == 'restore_state':
state = cmd_args['snapshot']
device.restore_state(state)
elif name == 'load_scenario':
scenario = cmd_args['scenario']
device.load_metascenario(scenario)
elif name == 'track_changes':
if cmd_args['enabled']:
device.state_history.enable()
else:
device.state_history.disable()
elif name == 'dump_changes':
outpath = cmd_args['path']
device.state_history.dump(outpath)
else:
reason = "Unknown command %s" % name
raise DeviceAdapterError(conn_id, 'debug {}'.format(name), reason)
except Exception as exc:
self._logger.exception("Error processing debug command %s: args=%s", name, cmd_args)
reason = "Exception %s occurred during processing" % str(exc)
raise DeviceAdapterError(conn_id, 'debug {}'.format(name), reason) from exc
return retval
|
Asynchronously complete a named debug command.
The command name and arguments are passed to the underlying device adapter
and interpreted there.
Args:
conn_id (int): A unique identifer that will refer to this connection
name (string): the name of the debug command we want to invoke
cmd_args (dict): any arguments that we want to send with this command.
|
juraj-google-style
|
async def update_flags(self, messages: Sequence[MessageT],
flag_set: FrozenSet[Flag], mode: FlagOp) -> None:
...
|
Update the permanent flags of each messages.
Args:
messages: The message objects.
flag_set: The set of flags for the update operation.
flag_op: The mode to change the flags.
|
juraj-google-style
|
def parse(self, filepath, content):
try:
parsed = json.loads(content)
except ValueError:
msg = "No JSON object could be decoded from file: {}"
raise SettingsBackendError(msg.format(filepath))
return parsed
|
Parse opened settings content using JSON parser.
Args:
filepath (str): Settings object, depends from backend
content (str): Settings content from opened file, depends from
backend.
Raises:
boussole.exceptions.SettingsBackendError: If parser can not decode
a valid JSON object.
Returns:
dict: Dictionnary containing parsed setting elements.
|
juraj-google-style
|
def ParseFileObject(self, parser_mediator, file_object):
scca_file = pyscca.file()
try:
scca_file.open_file_object(file_object)
except IOError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to open file with error: {0!s}'.format(exception))
return
format_version = scca_file.format_version
executable_filename = scca_file.executable_filename
prefetch_hash = scca_file.prefetch_hash
run_count = scca_file.run_count
number_of_volumes = scca_file.number_of_volumes
volume_serial_numbers = []
volume_device_paths = []
path = ''
for volume_information in iter(scca_file.volumes):
volume_serial_number = volume_information.serial_number
volume_device_path = volume_information.device_path
volume_serial_numbers.append(volume_serial_number)
volume_device_paths.append(volume_device_path)
timestamp = volume_information.get_creation_time_as_integer()
if timestamp:
event_data = windows_events.WindowsVolumeEventData()
event_data.device_path = volume_device_path
event_data.origin = parser_mediator.GetFilename()
event_data.serial_number = volume_serial_number
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
for filename in iter(scca_file.filenames):
if not filename:
continue
if (filename.startswith(volume_device_path) and
filename.endswith(executable_filename)):
_, _, path = filename.partition(volume_device_path)
mapped_files = []
for entry_index, file_metrics in enumerate(scca_file.file_metrics_entries):
mapped_file_string = file_metrics.filename
if not mapped_file_string:
parser_mediator.ProduceExtractionWarning(
'missing filename for file metrics entry: {0:d}'.format(
entry_index))
continue
file_reference = file_metrics.file_reference
if file_reference:
mapped_file_string = (
'{0:s} [MFT entry: {1:d}, sequence: {2:d}]').format(
mapped_file_string, file_reference & 0xffffffffffff,
file_reference >> 48)
mapped_files.append(mapped_file_string)
event_data = WinPrefetchExecutionEventData()
event_data.executable = executable_filename
event_data.mapped_files = mapped_files
event_data.number_of_volumes = number_of_volumes
event_data.path = path
event_data.prefetch_hash = prefetch_hash
event_data.run_count = run_count
event_data.version = format_version
event_data.volume_device_paths = volume_device_paths
event_data.volume_serial_numbers = volume_serial_numbers
timestamp = scca_file.get_last_run_time_as_integer(0)
if not timestamp:
parser_mediator.ProduceExtractionWarning('missing last run time')
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
else:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_RUN)
parser_mediator.ProduceEventWithEventData(event, event_data)
if format_version >= 26:
for last_run_time_index in range(1, 8):
timestamp = scca_file.get_last_run_time_as_integer(last_run_time_index)
if not timestamp:
continue
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
date_time_description = 'Previous {0:s}'.format(
definitions.TIME_DESCRIPTION_LAST_RUN)
event = time_events.DateTimeValuesEvent(
date_time, date_time_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
scca_file.close()
|
Parses a Windows Prefetch file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
|
juraj-google-style
|
def export_model(self, export_formats, export_dir=None):
export_dir = (export_dir or self.logdir)
return self._export_model(export_formats, export_dir)
|
Exports model based on export_formats.
Subclasses should override _export_model() to actually
export model to local directory.
Args:
export_formats (list): List of formats that should be exported.
export_dir (str): Optional dir to place the exported model.
Defaults to self.logdir.
Return:
A dict that maps ExportFormats to successfully exported models.
|
codesearchnet
|
def map_texture_to_surface(texture, surface):
texture_x, texture_y = texture
surface_h, surface_w = surface.shape
surface_x = np.clip(
np.int32(surface_w * texture_x - 1e-9), 0, surface_w - 1)
surface_y = np.clip(
np.int32(surface_h * texture_y - 1e-9), 0, surface_h - 1)
surface_z = surface[surface_y, surface_x]
return surface_z
|
Returns values on a surface for points on a texture.
Args:
texture (texture): the texture to trace over the surface
surface (surface): the surface to trace along
Returns:
an array of surface heights for each point in the
texture. Line separators (i.e. values that are ``nan`` in
the texture) will be ``nan`` in the output, so the output
will have the same dimensions as the x/y axes in the
input texture.
|
juraj-google-style
|
def _is_definition_section(source):
try:
definitions = textwrap.dedent(source).split('\n', 1)[1].splitlines()
return all(
re.match(r'\s\s+((?!\s\s).+)\s\s+.+', s) for s in definitions)
except IndexError:
return False
|
Determine if the source is a definition section.
Args:
source: The usage string source that may be a section.
Returns:
True if the source describes a definition section; otherwise, False.
|
juraj-google-style
|
def broadcast_row_partition(self, rp):
if not rp.is_uniform():
return RowPartition.from_row_lengths(self.broadcast_tensor(rp.row_lengths()))
else:
return RowPartition.from_uniform_row_length(rp.uniform_row_length(), nvals=rp.uniform_row_length() * self.dest_nrows(), nrows=self.dest_nrows())
|
Return a new shape where the rows are broadcasted.
*--self--->*
| |
rp result
| |
V V
*--------->*
This is equivalent to:
return RowPartition.from_row_lengths(self.broadcast(rp.row_lengths()))
However, if the shape has uniform row length, then that property is
maintained.
Args:
rp: a row partition.
Returns:
a RowPartition representing a broadcast version of this row partition.
|
github-repos
|
def add_pending(self, panel_obj, hgnc_gene, action, info=None):
valid_actions = ['add', 'delete', 'edit']
if (action not in valid_actions):
raise ValueError('Invalid action {0}'.format(action))
info = (info or {})
pending_action = {'hgnc_id': hgnc_gene['hgnc_id'], 'action': action, 'info': info, 'symbol': hgnc_gene['hgnc_symbol']}
updated_panel = self.panel_collection.find_one_and_update({'_id': panel_obj['_id']}, {'$addToSet': {'pending': pending_action}}, return_document=pymongo.ReturnDocument.AFTER)
return updated_panel
|
Add a pending action to a gene panel
Store the pending actions in panel.pending
Args:
panel_obj(dict): The panel that is about to be updated
hgnc_gene(dict)
action(str): choices=['add','delete','edit']
info(dict): additional gene info (disease_associated_transcripts,
reduced_penetrance, mosaicism, database_entry_version ,
inheritance_models, comment)
Returns:
updated_panel(dict):
|
codesearchnet
|
def embed(self, x):
shape_x = common_layers.shape_list(x)
x_flat = tf.reshape(x, [-1, 1])
c = self.int_to_bit(x_flat, num_bits=self.hparams.z_size, base=2)
shape = common_layers.shape_list(c)
new_shape = shape
new_shape.append(self.hparams.num_blocks)
new_shape.append(int(self.hparams.z_size / self.hparams.num_blocks))
c = tf.to_int32(tf.reshape(c, shape=new_shape))
h1_shape = shape_x
h1_shape.append(self.hparams.hidden_size)
h1 = tf.zeros(dtype=tf.float32, shape=h1_shape)
c_int = self.bit_to_int(
c, num_bits=int(self.hparams.z_size / self.hparams.num_blocks), base=2)
c_hot = tf.one_hot(c_int, depth=self.hparams.block_v_size, axis=-1)
c_hot_flat = tf.reshape(
c_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size])
h1 = tf.matmul(tf.transpose(c_hot_flat, perm=[1, 0, 2]), self.means)
h1 = tf.transpose(h1, perm=[1, 0, 2])
h1 = tf.reshape(h1, shape=h1_shape)
h1_shape[0] = self.hparams.batch_size
h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2")
res = tf.layers.dense(
tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin")
return res
|
Embedding function that takes discrete latent and returns embedding.
Args:
x: Input to the discretization bottleneck.
Returns:
Continuous embedding to be passed on to the decoder.
Raises:
ValueError: For unknown or missing arguments.
|
juraj-google-style
|
def download_aspera(self, user, host, silent=False):
aspera_home = os.environ.get('ASPERA_HOME', None)
if (not aspera_home):
raise ValueError('environment variable $ASPERA_HOME not set')
if (not os.path.exists(aspera_home)):
raise ValueError('$ASPERA_HOME directory {} does not exist'.format(aspera_home))
ascp = os.path.join(aspera_home, 'connect/bin/ascp')
key = os.path.join(aspera_home, 'connect/etc/asperaweb_id_dsa.openssh')
if (not os.path.exists(ascp)):
raise ValueError('could not find ascp binary')
if (not os.path.exists(key)):
raise ValueError('could not find openssh key')
parsed_url = urlparse(self.url)
cmd = '{} -i {} -k1 -T -l400m {}@{}:{} {}'.format(ascp, key, user, host, parsed_url.path, self._temp_file_name)
logger.debug(cmd)
try:
pr = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
(stdout, stderr) = pr.communicate()
if (not silent):
logger.debug(('Aspera stdout: ' + str(stdout)))
logger.debug(('Aspera stderr: ' + str(stderr)))
if (pr.returncode == 0):
logger.debug(('Moving %s to %s' % (self._temp_file_name, self.destination)))
shutil.move(self._temp_file_name, self.destination)
logger.debug(('Successfully downloaded %s' % self.url))
else:
logger.error(('Failed to download %s using Aspera Connect' % self.url))
finally:
try:
os.remove(self._temp_file_name)
except OSError:
pass
|
Download file with Aspera Connect.
For details see the documentation ov Aspera Connect
Args:
user (:obj:`str`): FTP user.
host (:obj:`str`): FTP host. Defaults to "ftp-trace.ncbi.nlm.nih.gov".
|
codesearchnet
|
def build_as_function_and_v1_graph(func: Callable[..., Any]) -> Callable[..., None]:
if tf_inspect.isclass(func):
raise ValueError('`run_in_graph_mode_and_function` only supports test methods.')
@parameterized.named_parameters(('_v1_graph', 'v1_graph'), ('_function', 'function'))
@functools.wraps(func)
def decorated(self: 'TensorFlowTestCase', run_mode: str, *args, **kwargs) -> None:
if run_mode == 'v1_graph':
with ops.Graph().as_default():
func(self, *args, **kwargs)
elif run_mode == 'function':
@def_function.function
def function_in_eager():
func(self, *args, **kwargs)
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
raise ValueError('Unknown run mode %s' % run_mode)
return decorated
|
Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
|
github-repos
|
def _ScheduleTasks(self, storage_writer):
logger.debug('Task scheduler started')
self._status = definitions.STATUS_INDICATOR_RUNNING
event_source_heap = _EventSourceHeap()
self._FillEventSourceHeap(storage_writer, event_source_heap, start_with_first=True)
event_source = event_source_heap.PopEventSource()
task = None
while (event_source or self._task_manager.HasPendingTasks()):
if self._abort:
break
try:
if (not task):
task = self._task_manager.CreateRetryTask()
if ((not task) and event_source):
task = self._task_manager.CreateTask(self._session_identifier)
task.file_entry_type = event_source.file_entry_type
task.path_spec = event_source.path_spec
event_source = None
self._number_of_consumed_sources += 1
if self._guppy_memory_profiler:
self._guppy_memory_profiler.Sample()
if task:
if self._ScheduleTask(task):
logger.debug('Scheduled task {0:s} for path specification {1:s}'.format(task.identifier, task.path_spec.comparable))
self._task_manager.SampleTaskStatus(task, 'scheduled')
task = None
else:
self._task_manager.SampleTaskStatus(task, 'schedule_attempted')
self._MergeTaskStorage(storage_writer)
if (not event_source_heap.IsFull()):
self._FillEventSourceHeap(storage_writer, event_source_heap)
if ((not task) and (not event_source)):
event_source = event_source_heap.PopEventSource()
except KeyboardInterrupt:
self._abort = True
self._processing_status.aborted = True
if self._status_update_callback:
self._status_update_callback(self._processing_status)
for task in self._task_manager.GetFailedTasks():
warning = warnings.ExtractionWarning(message='Worker failed to process path specification', path_spec=task.path_spec)
self._storage_writer.AddWarning(warning)
self._processing_status.error_path_specs.append(task.path_spec)
self._status = definitions.STATUS_INDICATOR_IDLE
if self._abort:
logger.debug('Task scheduler aborted')
else:
logger.debug('Task scheduler stopped')
|
Schedules tasks.
Args:
storage_writer (StorageWriter): storage writer for a session storage.
|
codesearchnet
|
def _open_ring_2d(x_size: int, y_size: int, z_coord: int) -> List[Tuple[int, int, int]]:
ret = []
for i in range(y_size
for j in range(1, x_size):
ret.append((j, 2 * i, z_coord))
for j in range(x_size - 1, 0, -1):
ret.append((j, 2 * i + 1, z_coord))
for i in range(y_size - 1, 0, -1):
ret.append((0, i, z_coord))
return ret
|
Ring-order of a X by Y mesh, with a fixed Z coordinate.
For example, in a 4x4 mesh, this returns the following order.
0 -- 1 -- 2 -- 3
| | | |
15-- 6 -- 5 -- 4
| | | |
14-- 7 -- 8 -- 9
| | | |
13-- 12-- 11-- 10
Note that chip 0 is not included in the output.
Args:
x_size: An integer represents the mesh size in the x-dimension. Must be
larger than 1.
y_size: An integer represents the mesh size in the y-dimension. Must be
larger than 1.
z_coord: An integer represents the z-coordinate to use for the chips in the
ring.
Returns:
A list of (x,y,z) triples in ring order.
|
github-repos
|
def parse_sv_frequencies(variant):
frequency_keys = [
'clingen_cgh_benignAF',
'clingen_cgh_benign',
'clingen_cgh_pathogenicAF',
'clingen_cgh_pathogenic',
'clingen_ngi',
'clingen_ngiAF',
'swegen',
'swegenAF',
'decipherAF',
'decipher'
]
sv_frequencies = {}
for key in frequency_keys:
value = variant.INFO.get(key, 0)
if 'AF' in key:
value = float(value)
else:
value = int(value)
if value > 0:
sv_frequencies[key] = value
return sv_frequencies
|
Parsing of some custom sv frequencies
These are very specific at the moment, this will hopefully get better over time when the
field of structural variants is more developed.
Args:
variant(cyvcf2.Variant)
Returns:
sv_frequencies(dict)
|
juraj-google-style
|
def has_no_title(self, title, **kwargs):
try:
self.assert_no_title(title, **kwargs)
return True
except ExpectationNotMet:
return False
|
Checks if the page doesn't have the given title.
Args:
title (str | RegexObject): The string that the title should include.
**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.
Returns:
bool: Whether it doesn't match.
|
juraj-google-style
|
def namespace_for_prefix(self, prefix):
try:
ni = self.__lookup_prefix(prefix)
except PrefixNotFoundError:
return None
else:
return ni.uri
|
Get the namespace the given prefix maps to.
Args:
prefix (str): The prefix
Returns:
str: The namespace, or None if the prefix isn't mapped to
anything in this set.
|
juraj-google-style
|
def schema_from_json(self, file_or_path):
if isinstance(file_or_path, io.IOBase):
return self._schema_from_json_file_object(file_or_path)
with open(file_or_path) as file_obj:
return self._schema_from_json_file_object(file_obj)
|
Takes a file object or file path that contains json that describes
a table schema.
Returns:
List of schema field objects.
|
codesearchnet
|
def video_augmentation(features, hue=False, saturate=False, contrast=False):
(inputs, targets) = (features['inputs'], features['targets'])
in_steps = common_layers.shape_list(inputs)[0]
video = tf.concat((inputs, targets), axis=0)
if hue:
video = tf.image.random_hue(video, max_delta=0.2)
if saturate:
video = tf.image.random_saturation(video, lower=0.5, upper=1.5)
if contrast:
video = tf.image.random_contrast(video, lower=0.5, upper=1.5)
(features['inputs'], features['targets']) = (video[:in_steps], video[in_steps:])
return features
|
Augments video with optional hue, saturation and constrast.
Args:
features: dict, with keys "inputs", "targets".
features["inputs"], 4-D Tensor, shape=(THWC)
features["targets"], 4-D Tensor, shape=(THWC)
hue: bool, apply hue_transform.
saturate: bool, apply saturation transform.
contrast: bool, apply constrast transform.
Returns:
augment_features: dict with transformed "inputs" and "targets".
|
codesearchnet
|
def minimize(self, minimize):
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
|
Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
|
juraj-google-style
|
def dump(o, f):
if (not f.write):
raise TypeError('You can only dump an object to a file descriptor')
d = dumps(o)
f.write(d)
return d
|
Writes out dict as toml to a file
Args:
o: Object to dump into toml
f: File descriptor where the toml should be stored
Returns:
String containing the toml corresponding to dictionary
Raises:
TypeError: When anything other than file descriptor is passed
|
codesearchnet
|
def get_axis_value(self, axis):
if (self.type != EventType.POINTER_AXIS):
raise AttributeError(_wrong_meth.format(self.type))
return self._libinput.libinput_event_pointer_get_axis_value(self._handle, axis)
|
Return the axis value of the given axis.
The interpretation of the value depends on the axis. For the two
scrolling axes :attr:`~libinput.constant.PointerAxis.SCROLL_VERTICAL`
and :attr:`~libinput.constant.PointerAxis.SCROLL_HORIZONTAL`, the value
of the event is in relative scroll units, with the positive direction
being down or right, respectively. For the interpretation of the value,
see :attr:`axis_source`.
If :meth:`has_axis` returns False for an axis, this method returns 0
for that axis.
For pointer events that are not of type
:attr:`~libinput.constant.Event.POINTER_AXIS`, this method raises
:exc:`AttributeError`.
Args:
axis (~libinput.constant.PointerAxis): The axis who's value to get.
Returns:
float: The axis value of this event.
Raises:
AttributeError
|
codesearchnet
|
def list_pull_requests(self, username, page, status=None):
request_url = '{}/api/0/user/{}/requests/filed'.format(self.instance, username)
payload = {}
if (username is not None):
payload['username'] = username
if (page is not None):
payload['page'] = page
if (status is not None):
payload['status'] = status
return_value = self._call_api(request_url, params=payload)
return return_value['requests']
|
List pull-requests filed by user.
Params:
username (string): filters the username of the user whose activity you are interested in.
page (integer): the page requested. Defaults to 1.
status (string): filter the status of pull requests. Default: Open,
can be Closed, Merged, All.
Returns:
list: A list of Pull-Requests filed by a given user for all the
projects for given Pagure instance.
|
codesearchnet
|
def find_primitive(self):
(lattice, scaled_positions, numbers) = spglib.find_primitive(self._cell, symprec=self._symprec)
species = [self._unique_species[(i - 1)] for i in numbers]
return Structure(lattice, species, scaled_positions, to_unit_cell=True).get_reduced_structure()
|
Find a primitive version of the unit cell.
Returns:
A primitive cell in the input cell is searched and returned
as an Structure object. If no primitive cell is found, None is
returned.
|
codesearchnet
|
def _hard_upsample(self, hidden_states, durations):
if hidden_states.size(0) == 1:
hidden_states = torch.repeat_interleave(hidden_states, durations.view(-1), dim=1)
else:
if hidden_states.shape[0] > 1 and self.training:
logger.warning_once('`self.training=True` and you use batching. You lose parallelism during the hifigan\n forward pass because the samples are interleaved.')
hidden_states = [torch.repeat_interleave(hidden_state, duration, dim=0) for hidden_state, duration in zip(hidden_states, durations)]
hidden_states = nn.utils.rnn.pad_sequence(hidden_states, batch_first=True)
return hidden_states
|
Repeats the time dimension of each sample in the batch based on the corresponding duration.
Args:
hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, *)`, *optional*):
The sequence to repeat, where `*` is any number of sequence-specific dimensions including none.
durations (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indicates how many times to repeat time segments.
|
github-repos
|
def _update_inplace(self, new_query_compiler):
old_query_compiler = self._query_compiler
self._query_compiler = new_query_compiler
old_query_compiler.free()
|
Updates the current DataFrame inplace.
Args:
new_query_compiler: The new QueryCompiler to use to manage the data
|
juraj-google-style
|
def write_config_file(config_instance, appdirs=DEFAULT_APPDIRS,
file_name=DEFAULT_CONFIG_FILENAME):
path = get_config_path(appdirs, file_name)
with open(path, 'w') as fobj:
config_instance.write(fobj)
return config_instance
|
Write a ConfigParser instance to file at the correct location.
Args:
config_instance: Config instance to safe to file.
appdirs (HamsterAppDirs, optional): ``HamsterAppDirs`` instance storing app/user specific
path information.
file_name (text_type, optional): Name of the config file. Defaults to
``DEFAULT_CONFIG_FILENAME``.
Returns:
SafeConfigParser: Instance written to file.
|
juraj-google-style
|
def _get_corrupted_example(self, x):
corruption_type = self.builder_config.corruption_type
severity = self.builder_config.severity
return {'gaussian_noise': corruptions.gaussian_noise, 'shot_noise': corruptions.shot_noise, 'impulse_noise': corruptions.impulse_noise, 'defocus_blur': corruptions.defocus_blur, 'frosted_glass_blur': corruptions.frosted_glass_blur, 'zoom_blur': corruptions.zoom_blur, 'fog': corruptions.fog, 'brightness': corruptions.brightness, 'contrast': corruptions.contrast, 'elastic': corruptions.elastic, 'pixelate': corruptions.pixelate, 'jpeg_compression': corruptions.jpeg_compression}[corruption_type](x, severity)
|
Return corrupted images.
Args:
x: numpy array, uncorrupted image.
Returns:
numpy array, corrupted images.
|
codesearchnet
|
def cos(times: np.ndarray, amp: complex, freq: float, phase: float=0) -> np.ndarray:
return (amp * np.cos(((((2 * np.pi) * freq) * times) + phase)).astype(np.complex_))
|
Continuous cosine wave.
Args:
times: Times to output wave for.
amp: Pulse amplitude.
freq: Pulse frequency, units of 1/dt.
phase: Pulse phase.
|
codesearchnet
|
def CreateUnit(self, parent=None, value=None, bid_amount=None):
unit = {'xsi_type': 'ProductPartition', 'partitionType': 'UNIT'}
if (parent is not None):
unit['parentCriterionId'] = parent['id']
unit['caseValue'] = value
if ((bid_amount is not None) and (bid_amount > 0)):
bidding_strategy_configuration = {'bids': [{'xsi_type': 'CpcBid', 'bid': {'xsi_type': 'Money', 'microAmount': str(bid_amount)}}]}
adgroup_criterion = {'xsi_type': 'BiddableAdGroupCriterion', 'biddingStrategyConfiguration': bidding_strategy_configuration}
else:
adgroup_criterion = {'xsi_type': 'NegativeAdGroupCriterion'}
adgroup_criterion['adGroupId'] = self.adgroup_id
adgroup_criterion['criterion'] = unit
self.CreateAddOperation(adgroup_criterion)
return unit
|
Creates a unit node.
Args:
parent: The node that should be this node's parent.
value: The value being partitioned on.
bid_amount: The amount to bid for matching products, in micros.
Returns:
A new unit node.
|
codesearchnet
|
def normalize(code):
if (len(code) == 3):
return code
normalized = translate(code)
if normalized:
return normalized
country = countries.get(code, None)
if country:
return country.alpha3.lower()
return code
|
Normalize language codes to ISO 639-2. If all conversions fails, return the
`code` as it was given.
Args:
code (str): Language / country code.
Returns:
str: ISO 639-2 country code.
|
codesearchnet
|
def unpack_small_tensors(tower_grads, packing):
if not packing:
return tower_grads
new_tower_grads = []
num_devices = len(tower_grads)
num_packed = len(packing.keys())
for dev_idx, gv_list in enumerate(tower_grads):
new_gv_list = gv_list[num_packed:]
for i in xrange(0, num_packed):
k = "%d:%d" % (dev_idx, i)
gpt = packing[k]
gv = unpack_grad_tuple(gv_list[i], gpt)
for gi, idx in enumerate(gpt.indices):
assert idx == gpt.indices[gi]
new_gv_list.insert(idx, gv[gi])
new_tower_grads.append(new_gv_list)
return new_tower_grads
|
Undo the structure alterations to tower_grads done by pack_small_tensors.
Args:
tower_grads: List of List of (grad, var) tuples.
packing: A dict generated by pack_small_tensors describing the changes
it made to tower_grads.
Returns:
new_tower_grads: identical to tower_grads except that concatentations
of small tensors have been split apart and returned to their original
positions, paired with their original variables.
|
juraj-google-style
|
def vcf_records(self, qualified=False):
if qualified:
sample_names = self.qualified_sample_names
else:
sample_names = self.sample_names
for line in self._file_reader.read_lines():
if line.startswith("
continue
yield VcfRecord.parse_record(line, sample_names)
|
Generates parsed VcfRecord objects.
Typically called in a for loop to process each vcf record in a
VcfReader. VcfReader must be opened in advanced and closed when
complete. Skips all headers.
Args:
qualified: When True, sample names are prefixed with file name
Returns:
Parsed VcfRecord
Raises:
StopIteration: when reader is exhausted.
TypeError: if reader is closed.
|
juraj-google-style
|
def convert(self, vroot, entry_variables):
self.graph_info = GraphInfo(vroot)
self.entry_variables = entry_variables
cnt = 0
with nn.parameter_scope(self.name):
for t, func in enumerate(self.graph_info.funcs):
if func.name == "BatchNormalization":
bn_func = func
if bn_func.info.args["batch_stat"] == False:
o = self._bn_linear_conversion(bn_func, cnt)
cnt += 1
continue
o = self._identity_conversion(func)
self.end_variable = o
return self.end_variable
|
All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
|
juraj-google-style
|
def _compute_upper_group(df):
upper_group = df.groupby(['groups']).agg({
'value': sum,
'value_start': sum,
'upperGroup_label': 'first',
'upperGroup_order': 'first'
}).reset_index()
upper_group['type'] = 'parent'
upper_group['variation'] = upper_group['value'] / upper_group[
'value_start']
upper_group.drop(['value_start'], axis=1, inplace=True)
upper_group.rename(columns={'upperGroup_label': 'label'}, inplace=True)
return upper_group
|
Compute upperGroup
Args:
df (Dataframe):
Returns: Dataframe
|
juraj-google-style
|
def CreateBiddingStrategy(client):
bidding_strategy_service = client.GetService(
'BiddingStrategyService', version='v201809')
shared_bidding_strategy = {
'name': 'Maximize Clicks %s' % uuid.uuid4(),
'biddingScheme': {
'xsi_type': 'TargetSpendBiddingScheme',
'bidCeiling': {
'microAmount': '2000000'
}
}
}
operation = {
'operator': 'ADD',
'operand': shared_bidding_strategy
}
response = bidding_strategy_service.mutate([operation])
new_bidding_strategy = response['value'][0]
print ('Shared bidding strategy with name "%s" and ID "%s" of type "%s"'
'was created.' %
(new_bidding_strategy['name'], new_bidding_strategy['id'],
new_bidding_strategy['biddingScheme']['BiddingScheme.Type']))
return new_bidding_strategy
|
Creates a bidding strategy object.
Args:
client: AdWordsClient the client to run the example with.
Returns:
dict An object representing a bidding strategy.
|
juraj-google-style
|
def apply2(self, func, *args, **kwargs):
ret = func(args[0], self._t, *args[1:], **kwargs)
return LinearWrap(ret)
|
Apply a function on the wrapped tensor. The tensor
will be the second argument of func.
This is because many symbolic functions
(such as tensorpack's layers) takes 'scope' as the first argument.
Returns:
LinearWrap: ``LinearWrap(func(args[0], self.tensor(), *args[1:], **kwargs))``.
|
codesearchnet
|
def _avro_rows(block, avro_schema):
blockio = six.BytesIO(block.avro_rows.serialized_binary_rows)
while True:
try:
yield fastavro.schemaless_reader(blockio, avro_schema)
except StopIteration:
break
|
Parse all rows in a stream block.
Args:
block ( \
~google.cloud.bigquery_storage_v1beta1.types.ReadRowsResponse \
):
A block containing Avro bytes to parse into rows.
avro_schema (fastavro.schema):
A parsed Avro schema, used to deserialized the bytes in the
block.
Returns:
Iterable[Mapping]:
A sequence of rows, represented as dictionaries.
|
juraj-google-style
|
def execute_show(args, root_dir):
key = None
if args.get('key'):
key = args['key']
status = command_factory('status')({}, root_dir=root_dir)
if ((key not in status['data']) or (status['data'][key]['status'] != 'running')):
print('No running process with this key, use `log` to show finished processes.')
return
else:
status = command_factory('status')({}, root_dir=root_dir)
if isinstance(status['data'], str):
print(status['data'])
return
for k in sorted(status['data'].keys()):
if (status['data'][k]['status'] == 'running'):
key = k
break
if (key is None):
print('No running process, use `log` to show finished processes.')
return
config_dir = os.path.join(root_dir, '.config/pueue')
stdoutFile = os.path.join(config_dir, 'pueue_process_{}.stdout'.format(key))
stderrFile = os.path.join(config_dir, 'pueue_process_{}.stderr'.format(key))
stdoutDescriptor = open(stdoutFile, 'r')
stderrDescriptor = open(stderrFile, 'r')
running = True
if args['watch']:
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
curses.curs_set(2)
stdscr.keypad(True)
stdscr.refresh()
try:
while running:
stdscr.clear()
stdoutDescriptor.seek(0)
message = stdoutDescriptor.read()
stdscr.addstr(0, 0, message)
stdscr.refresh()
time.sleep(2)
except Exception:
curses.nocbreak()
stdscr.keypad(False)
curses.echo()
curses.endwin()
else:
print('Stdout output:\n')
stdoutDescriptor.seek(0)
print(get_descriptor_output(stdoutDescriptor, key))
print('\n\nStderr output:\n')
stderrDescriptor.seek(0)
print(get_descriptor_output(stderrDescriptor, key))
|
Print stderr and stdout of the current running process.
Args:
args['watch'] (bool): If True, we open a curses session and tail
the output live in the console.
root_dir (string): The path to the root directory the daemon is running in.
|
codesearchnet
|
def compute_loss(self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], return_outputs: bool=False, num_items_in_batch: Optional[torch.Tensor]=None):
if (self.label_smoother is not None or self.compute_loss_func is not None) and 'labels' in inputs:
labels = inputs.pop('labels')
else:
labels = None
if self.model_accepts_loss_kwargs:
loss_kwargs = {}
if num_items_in_batch is not None:
loss_kwargs['num_items_in_batch'] = num_items_in_batch
inputs = {**inputs, **loss_kwargs}
outputs = model(**inputs)
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
unwrapped_model = self.accelerator.unwrap_model(model)
if _is_peft_model(unwrapped_model):
model_name = unwrapped_model.base_model.model._get_name()
else:
model_name = unwrapped_model._get_name()
if self.compute_loss_func is not None:
loss = self.compute_loss_func(outputs, labels, num_items_in_batch=num_items_in_batch)
elif model_name in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values():
loss = self.label_smoother(outputs, labels, shift_labels=True)
else:
loss = self.label_smoother(outputs, labels)
else:
if isinstance(outputs, dict) and 'loss' not in outputs:
raise ValueError(f'The model did not return a loss from the inputs, only the following keys: {','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}.')
loss = outputs['loss'] if isinstance(outputs, dict) else outputs[0]
if self.args.average_tokens_across_devices and (self.model_accepts_loss_kwargs or self.compute_loss_func) and (num_items_in_batch is not None):
loss *= self.accelerator.num_processes
return (loss, outputs) if return_outputs else loss
|
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Args:
model (`nn.Module`):
The model to compute the loss for.
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The input data for the model.
return_outputs (`bool`, *optional*, defaults to `False`):
Whether to return the model outputs along with the loss.
num_items_in_batch (Optional[torch.Tensor], *optional*):
The number of items in the batch. If num_items_in_batch is not passed,
Returns:
The loss of the model along with its output if return_outputs was set to True
Subclass and override for custom behavior. If you are not using `num_items_in_batch` when computing your loss,
make sure to overwrite `self.model_accepts_loss_kwargs` to `False`. Otherwise, the loss calculationg might be slightly inacurate when performing gradient accumulation.
|
github-repos
|
def proxy_num(self, protocol=None):
http_num = len(self.proxies['http'])
https_num = len(self.proxies['https'])
if (protocol == 'http'):
return http_num
elif (protocol == 'https'):
return https_num
else:
return (http_num + https_num)
|
Get the number of proxies in the pool
Args:
protocol (str, optional): 'http' or 'https' or None. (default None)
Returns:
If protocol is None, return the total number of proxies, otherwise,
return the number of proxies of corresponding protocol.
|
codesearchnet
|
def get_njobs_in_queue(self, username=None):
if (username is None):
username = getpass.getuser()
(njobs, process) = self._get_njobs_in_queue(username=username)
if ((process is not None) and (process.returncode != 0)):
err_msg = ('Error trying to get the number of jobs in the queue' + 'The error response reads:\n {}'.format(process.stderr.read()))
logger.critical(err_msg)
if (not isinstance(self, ShellAdapter)):
logger.info('The number of jobs currently in the queue is: {}'.format(njobs))
return njobs
|
returns the number of jobs in the queue, probably using subprocess or shutil to
call a command like 'qstat'. returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
|
codesearchnet
|
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False):
residual = hidden_states
query = key = self.with_pos_embed(hidden_states, position_embeddings)
hidden_states = self.self_attn(queries=query, keys=key, values=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
hidden_states, attentions = hidden_states if output_attentions else (hidden_states[0], None)
hidden_states = self.dropout(hidden_states)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.encoder_feedforward_dropout(hidden_states)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if self.training:
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
if output_attentions:
return (hidden_states, attentions)
return (hidden_states,)
|
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
position_embeddings (`torch.FloatTensor`, *optional*):
Object queries (also called content embeddings), to be added to the hidden states.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
|
github-repos
|
def CacheFileSystem(self, path_spec, file_system):
identifier = self._GetFileSystemCacheIdentifier(path_spec)
self._file_system_cache.CacheObject(identifier, file_system)
|
Caches a file system object based on a path specification.
Args:
path_spec (PathSpec): path specification.
file_system (FileSystem): file system object.
|
codesearchnet
|
def insert_tokenizer_in_auto_module(old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns):
if old_model_patterns.tokenizer_class is None or new_model_patterns.tokenizer_class is None:
return
with open(TRANSFORMERS_PATH / 'models' / 'auto' / 'tokenization_auto.py', 'r', encoding='utf-8') as f:
content = f.read()
pattern_tokenizer = re.compile('^\\s*TOKENIZER_MAPPING_NAMES\\s*=\\s*OrderedDict\\b')
lines = content.split('\n')
idx = 0
while not pattern_tokenizer.search(lines[idx]):
idx += 1
idx += 1
while not lines[idx].startswith('TOKENIZER_MAPPING = _LazyAutoMapping'):
if lines[idx].endswith(','):
block = lines[idx]
else:
block = []
while not lines[idx].startswith(' ),'):
block.append(lines[idx])
idx += 1
block.append(lines[idx])
block = '\n'.join(block)
idx += 1
if f'"{old_model_patterns.model_type}"' in block and old_model_patterns.tokenizer_class in block:
break
new_block = block.replace(old_model_patterns.model_type, new_model_patterns.model_type)
new_block = new_block.replace(old_model_patterns.tokenizer_class, new_model_patterns.tokenizer_class)
new_lines = lines[:idx] + [new_block] + lines[idx:]
with open(TRANSFORMERS_PATH / 'models' / 'auto' / 'tokenization_auto.py', 'w', encoding='utf-8') as f:
f.write('\n'.join(new_lines))
|
Add a tokenizer to the relevant mappings in the auto module.
Args:
old_model_patterns (`ModelPatterns`): The patterns for the old model.
new_model_patterns (`ModelPatterns`): The patterns for the new model.
|
github-repos
|
def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> GenerationConfig:
if isinstance(gen_config_arg, GenerationConfig):
gen_config = deepcopy(gen_config_arg)
else:
pretrained_model_name = Path(gen_config_arg) if isinstance(gen_config_arg, str) else gen_config_arg
config_file_name = None
if pretrained_model_name.is_file():
config_file_name = pretrained_model_name.name
pretrained_model_name = pretrained_model_name.parent
elif pretrained_model_name.is_dir():
pass
else:
pretrained_model_name = gen_config_arg
gen_config = GenerationConfig.from_pretrained(pretrained_model_name, config_file_name)
try:
gen_config.validate(strict=True)
except ValueError as exc:
raise ValueError(str(exc) + '\n\nFix these issues to train your model.')
return gen_config
|
Loads a `~generation.GenerationConfig` from the `Seq2SeqTrainingArguments.generation_config` arguments.
Args:
gen_config_arg (`str` or [`~generation.GenerationConfig]`):
`Seq2SeqTrainingArguments.generation_config` argument.
Returns:
A `~generation.GenerationConfig`.
|
github-repos
|
def from_linearized(first, second, intersections):
(s, t, success) = segment_intersection(first.start_node, first.end_node, second.start_node, second.end_node)
bad_parameters = False
if success:
if (not (_helpers.in_interval(s, 0.0, 1.0) and _helpers.in_interval(t, 0.0, 1.0))):
bad_parameters = True
else:
if ((first.error == 0.0) and (second.error == 0.0)):
raise ValueError(_UNHANDLED_LINES)
bad_parameters = True
s = 0.5
t = 0.5
if bad_parameters:
if (not convex_hull_collide(first.curve.nodes, second.curve.nodes)):
return
orig_s = (((1 - s) * first.curve.start) + (s * first.curve.end))
orig_t = (((1 - t) * second.curve.start) + (t * second.curve.end))
(refined_s, refined_t) = _intersection_helpers.full_newton(orig_s, first.curve.original_nodes, orig_t, second.curve.original_nodes)
(refined_s, success) = _helpers.wiggle_interval(refined_s)
if (not success):
return
(refined_t, success) = _helpers.wiggle_interval(refined_t)
if (not success):
return
add_intersection(refined_s, refined_t, intersections)
|
Determine curve-curve intersection from pair of linearizations.
.. note::
This assumes that at least one of ``first`` and ``second`` is
not a line. The line-line case should be handled "early"
by :func:`check_lines`.
.. note::
This assumes the caller has verified that the bounding boxes
for ``first`` and ``second`` actually intersect.
If there is an intersection along the segments, adds that intersection
to ``intersections``. Otherwise, returns without doing anything.
Args:
first (Linearization): First curve being intersected.
second (Linearization): Second curve being intersected.
intersections (list): A list of existing intersections.
Raises:
ValueError: If ``first`` and ``second`` both have linearization error
of ``0.0`` (i.e. they are both lines). This is because this
function expects the caller to have used :func:`check_lines`
already.
|
codesearchnet
|
def transform(self, path):
if ((path is None) or (not path)):
return None
obj_parent_modules = path.split('.')
objects = [obj_parent_modules.pop((- 1))]
while True:
try:
parent_module_path = '.'.join(obj_parent_modules)
parent_module = importlib.import_module(parent_module_path)
break
except ImportError:
if (len(obj_parent_modules) == 1):
raise ImportError(("No module named '%s'" % obj_parent_modules[0]))
objects.insert(0, obj_parent_modules.pop((- 1)))
current_object = parent_module
for obj in objects:
current_object = getattr(current_object, obj)
return current_object
|
Transform a path into an actual Python object.
The path can be arbitrary long. You can pass the path to a package,
a module, a class, a function or a global variable, as deep as you
want, as long as the deepest module is importable through
``importlib.import_module`` and each object is obtainable through
the ``getattr`` method. Local objects will not work.
Args:
path (str): the dot-separated path of the object.
Returns:
object: the imported module or obtained object.
|
codesearchnet
|
def list_leases(self, uuid=None):
try:
lease_files = os.listdir(self.path)
except OSError as e:
raise_from(LagoSubnetLeaseBadPermissionsException(self.path, e.strerror), e)
leases = [self.create_lease_object_from_idx(lease_file.split('.')[0]) for lease_file in lease_files if (lease_file != LOCK_NAME)]
if (not uuid):
return leases
else:
return [lease for lease in leases if (lease.uuid == uuid)]
|
List current subnet leases
Args:
uuid(str): Filter the leases by uuid
Returns:
list of :class:~Lease: current leases
|
codesearchnet
|
async def download_cot_artifact(chain, task_id, path):
link = chain.get_link(task_id)
log.debug('Verifying {} is in {} cot artifacts...'.format(path, task_id))
if (not link.cot):
log.warning('Chain of Trust for "{}" in {} does not exist. See above log for more details. Skipping download of this artifact'.format(path, task_id))
return
if (path not in link.cot['artifacts']):
raise CoTError('path {} not in {} {} chain of trust artifacts!'.format(path, link.name, link.task_id))
url = get_artifact_url(chain.context, task_id, path)
loggable_url = get_loggable_url(url)
log.info('Downloading Chain of Trust artifact:\n{}'.format(loggable_url))
(await download_artifacts(chain.context, [url], parent_dir=link.cot_dir, valid_artifact_task_ids=[task_id]))
full_path = link.get_artifact_full_path(path)
for (alg, expected_sha) in link.cot['artifacts'][path].items():
if (alg not in chain.context.config['valid_hash_algorithms']):
raise CoTError('BAD HASH ALGORITHM: {}: {} {}!'.format(link.name, alg, full_path))
real_sha = get_hash(full_path, hash_alg=alg)
if (expected_sha != real_sha):
raise CoTError('BAD HASH on file {}: {}: Expected {} {}; got {}!'.format(full_path, link.name, alg, expected_sha, real_sha))
log.debug('{} matches the expected {} {}'.format(full_path, alg, expected_sha))
return full_path
|
Download an artifact and verify its SHA against the chain of trust.
Args:
chain (ChainOfTrust): the chain of trust object
task_id (str): the task ID to download from
path (str): the relative path to the artifact to download
Returns:
str: the full path of the downloaded artifact
Raises:
CoTError: on failure.
|
codesearchnet
|
def _format_line(headers, fields):
assert len(fields) == len(headers), (fields, headers)
fields = ["%2.4f" % field if isinstance(field, float) else str(field)
for field in fields]
return ' '.join(' ' * max(0, len(header) - len(field)) + field
for (header, field) in zip(headers, fields))
|
Format a line of a table.
Arguments:
headers: A list of strings that are used as the table headers.
fields: A list of the same length as `headers` where `fields[i]` is
the entry for `headers[i]` in this row. Elements can be of
arbitrary types. Pass `headers` to print the header row.
Returns:
A pretty string.
|
juraj-google-style
|
def eig_one_step(current_vector, learning_rate, vector_prod_fn):
grad = 2*vector_prod_fn(current_vector)
current_objective = tf.reshape(tf.matmul(tf.transpose(current_vector),
grad) / 2., shape=())
grad = grad - current_vector*tf.matmul(tf.transpose(current_vector), grad)
grad_norm = tf.norm(grad)
grad_norm_sq = tf.square(grad_norm)
norm_grad = grad / grad_norm
directional_second_derivative = (
tf.reshape(2*tf.matmul(tf.transpose(norm_grad),
vector_prod_fn(norm_grad)),
shape=()))
grad_m_grad = directional_second_derivative*grad_norm_sq / 2
if directional_second_derivative / 2. < current_objective:
return norm_grad
if directional_second_derivative > 0.:
step = -1. * grad_norm / directional_second_derivative
else:
if grad_norm_sq <= 1e-16:
step = 0.0
else:
step = -2. * tf.reduce_sum(current_vector*grad) / grad_norm_sq
gain = -(2 * tf.reduce_sum(current_vector*grad) +
(step*step) * grad_m_grad)
if gain < 0.:
step = -learning_rate * grad_norm
current_vector = current_vector + step * norm_grad
return tf.nn.l2_normalize(current_vector)
|
Function that performs one step of gd (variant) for min eigen value.
Args:
current_vector: current estimate of the eigen vector with minimum eigen
value.
learning_rate: learning rate.
vector_prod_fn: function which returns product H*x, where H is a matrix for
which we computing eigenvector.
Returns:
updated vector after one step
|
juraj-google-style
|
def sg_regularizer_loss(scale=1.0):
return (scale * tf.reduce_mean(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)))
|
r""" Get regularizer losss
Args:
scale: A scalar. A weight applied to regularizer loss
|
codesearchnet
|
def RegisterDefinition(self, artifact_definition):
artifact_definition_name = artifact_definition.name.lower()
if artifact_definition_name in self._artifact_definitions:
raise KeyError(
'Artifact definition already set for name: {0:s}.'.format(
artifact_definition.name))
self._artifact_definitions[artifact_definition_name] = artifact_definition
self._defined_artifact_names.add(artifact_definition.name)
for source in artifact_definition.sources:
if source.type_indicator == definitions.TYPE_INDICATOR_ARTIFACT_GROUP:
self._artifact_name_references.update(source.names)
|
Registers an artifact definition.
Artifact definitions are identified based on their lower case name.
Args:
artifact_definition (ArtifactDefinition): an artifact definition.
Raises:
KeyError: if artifact definition is already set for the corresponding
name.
|
juraj-google-style
|
def __init__(self, unicodeHexValue, block):
if unicodeHexValue < 0 or unicodeHexValue > 0x10FFFF:
raise (ValueError, "numeric value outside Unicode range")
self.unicodeHexValue = unicodeHexValue
self.chr = chr(self.unicodeHexValue)
self.name = unicodedata.name(self.chr)
self.equivalents = {}
self._block = block
|
Set up a unicode character.
Arguments:
unicodeHexValue -- an integer that should correspond to a
Unicode code point.
block -- the CharacterBlock this character belongs to.
Raises:
ValueError -- if unicodeHexValue is not a valid code point.
|
juraj-google-style
|
def list_json_files(directory, recursive=False):
json_files = []
for (top, dirs, files) in os.walk(directory):
dirs.sort()
paths = (os.path.join(top, f) for f in sorted(files))
json_files.extend((x for x in paths if is_json(x)))
if (not recursive):
break
return json_files
|
Return a list of file paths for JSON files within `directory`.
Args:
directory: A path to a directory.
recursive: If ``True``, this function will descend into all
subdirectories.
Returns:
A list of JSON file paths directly under `directory`.
|
codesearchnet
|
def run(self):
accounts = list(AWSAccount.get_all(include_disabled=False).values())
for account in accounts:
self.log.debug('Updating VPC Flow Logs for {}'.format(account))
self.session = get_aws_session(account)
role_arn = self.confirm_iam_role(account)
for aws_region in AWS_REGIONS:
try:
vpc_list = VPC.get_all(account, aws_region).values()
need_vpc_flow_logs = [x for x in vpc_list if (x.vpc_flow_logs_status != 'ACTIVE')]
for vpc in need_vpc_flow_logs:
if self.confirm_cw_log(account, aws_region, vpc.id):
self.create_vpc_flow_logs(account, aws_region, vpc.id, role_arn)
else:
self.log.info('Failed to confirm log group for {}/{}'.format(account, aws_region))
except Exception:
self.log.exception('Failed processing VPCs for {}/{}.'.format(account, aws_region))
db.session.commit()
|
Main entry point for the auditor worker.
Returns:
`None`
|
codesearchnet
|
def multiply(x1, x2, output_shape=None, name=None):
if (not isinstance(x2, Tensor)):
return ScalarMultiplyOperation(x1, x2).outputs[0]
with tf.name_scope(name, default_name='mul'):
(x1, x2) = binary_arguments_to_tensors(x1, x2)
return einsum([x1, x2], output_shape=_infer_binary_broadcast_shape(x1.shape, x2.shape, output_shape))
|
Binary multiplication with broadcasting.
Args:
x1: a Tensor
x2: a Tensor
output_shape: an optional Shape
name: an optional string
Returns:
a Tensor
|
codesearchnet
|
def relu(x):
if any_symbolic_tensors((x,)):
return Relu().symbolic_call(x)
return backend.nn.relu(x)
|
Rectified linear unit activation function.
It is defined as `f(x) = max(0, x)`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x1 = keras.ops.convert_to_tensor([-1.0, 0.0, 1.0, 0.2])
>>> keras.ops.relu(x1)
array([0.0, 0.0, 1.0, 0.2], dtype=float32)
|
github-repos
|
def replace_characters(self, text, characters, replacement=''):
if (not characters):
return text
characters = ''.join(sorted(characters))
if (characters in self._characters_regexes):
characters_regex = self._characters_regexes[characters]
else:
characters_regex = re.compile(('[%s]' % re.escape(characters)))
self._characters_regexes[characters] = characters_regex
return characters_regex.sub(replacement, text)
|
Remove characters from text.
Removes custom characters from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
characters: Characters that will be replaced.
replacement: New text that will replace the custom characters.
Returns:
The text without the given characters.
|
codesearchnet
|
def regroup(values, wrap_class=values_lib.PerReplica, always_wrap=False):
v0 = values[0]
if isinstance(v0, list):
for v in values[1:]:
assert isinstance(v, list)
assert len(v) == len(v0), 'len(v) == %d, len(v0) == %d, v: %s, v0: %s' % (len(v), len(v0), v, v0)
return [regroup(tuple((v[i] for v in values)), wrap_class, always_wrap) for i in range(len(v0))]
if isinstance(v0, tuple):
for v in values[1:]:
assert isinstance(v, tuple)
assert len(v) == len(v0), f'Values to regroup had different lengths: len(v) == {len(v)}, len(v0) == {len(v0)}, v: {v}, v0: {v0}'
regrouped_tuple = tuple((regroup(tuple((v[i] for v in values)), wrap_class, always_wrap) for i in range(len(v0))))
if hasattr(v0, '_fields'):
assert hasattr(v0, '_make')
return v0._make(regrouped_tuple)
else:
return regrouped_tuple
if isinstance(v0, abc.Mapping):
v0keys = v0.keys()
for v in values[1:]:
assert isinstance(v, abc.Mapping), 'v[0]: %r v[i]: %r' % (v0, v)
assert set(v.keys()) == set(v0keys), 'v[0].keys: %s v[i].keys: %s' % (set(v0keys), set(v.keys()))
return type(v0)({key: regroup(tuple((v[key] for v in values)), wrap_class, always_wrap) for key in v0keys})
same_id = True
for v in values[1:]:
if v is not v0:
same_id = False
break
if same_id and isinstance(v0, values_lib.DistributedVariable):
return v0
if same_id and (not always_wrap) and (value_container(v0) is v0):
return v0
if not isinstance(v0, resource_variable_ops._UnreadVariable) and value_container(v0) is not v0:
assert not isinstance(v0, values_lib.MirroredVariable), 'ids = %s, values = %s' % ([id(v) for v in values], values)
distributed_container = value_container(v0)
assert distributed_container is not None
for v in values[1:]:
assert distributed_container is value_container(v)
return distributed_container
return wrap_class(values)
|
Makes a nest per-replica into a nest of PerReplica/Mirrored values.
Args:
values: Values to regroup
wrap_class: Class that `values` be wrapped in.
always_wrap: Always wrap the `values` in `wrap_class` even if the values
are the same except for DistributeVariable.
Returns:
Wrapped `values`.
|
github-repos
|
def authenticate(self, username, password):
if self.config.get('LDAP_BIND_DIRECT_CREDENTIALS'):
result = self.authenticate_direct_credentials(username, password)
elif not self.config.get('LDAP_ALWAYS_SEARCH_BIND') and \
self.config.get('LDAP_USER_RDN_ATTR') == \
self.config.get('LDAP_USER_LOGIN_ATTR'):
result = self.authenticate_direct_bind(username, password)
else:
result = self.authenticate_search_bind(username, password)
return result
|
An abstracted authentication method. Decides whether to perform a
direct bind or a search bind based upon the login attribute configured
in the config.
Args:
username (str): Username of the user to bind
password (str): User's password to bind with.
Returns:
AuthenticationResponse
|
juraj-google-style
|
def get_metrics_for_resource(access_token, subscription_id, resource_group, resource_provider, resource_type, resource_name):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/', resource_provider, '/', resource_type, '/', resource_name, '/providers/microsoft.insights', '/metrics?api-version=', INSIGHTS_PREVIEW_API])
return do_get(endpoint, access_token)
|
Get the monitoring metrics for a resource.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
resource_type (str): Type of resource.
resource_name (str): Name of resource.
Returns:
HTTP response. JSON body of resource metrics.
|
codesearchnet
|
def get_posts(self, num=None, tag=None, private=False):
posts = self.posts
if not private:
posts = [post for post in posts if post.public]
if tag:
posts = [post for post in posts if tag in post.tags]
if num:
return posts[:num]
return posts
|
Get all the posts added to the blog.
Args:
num (int): Optional. If provided, only return N posts (sorted by date,
most recent first).
tag (Tag): Optional. If provided, only return posts that have a
specific tag.
private (bool): By default (if False), private posts are not included.
If set to True, private posts will also be included.
|
juraj-google-style
|
def load_steps(working_dir=None, steps_dir=None, step_file=None,
step_list=None):
if steps_dir is not None:
step_files = glob.glob(os.path.join(steps_dir, '*.cwl'))
elif step_file is not None:
step_files = [step_file]
elif step_list is not None:
step_files = []
for path in step_list:
if os.path.isdir(path):
step_files += glob.glob(os.path.join(path, '*.cwl'))
else:
step_files.append(path)
else:
step_files = []
if working_dir is not None:
step_files = sort_loading_order(step_files)
steps = {}
for f in step_files:
if working_dir is not None:
if not working_dir == os.path.dirname(f) and not is_url(f):
copied_file = os.path.join(working_dir, os.path.basename(f))
shutil.copy2(f, copied_file)
f = copied_file
try:
s = Step(f)
steps[s.name] = s
except (NotImplementedError, ValidationException,
PackedWorkflowException) as e:
logger.warning(e)
return steps
|
Return a dictionary containing Steps read from file.
Args:
steps_dir (str, optional): path to directory containing CWL files.
step_file (str, optional): path or http(s) url to a single CWL file.
step_list (list, optional): a list of directories, urls or local file
paths to CWL files or directories containing CWL files.
Return:
dict containing (name, Step) entries.
|
juraj-google-style
|
def moveRel(xOffset=None, yOffset=None, duration=0.0, tween=linear, pause=None, _pause=True):
_failSafeCheck()
(xOffset, yOffset) = _unpackXY(xOffset, yOffset)
_mouseMoveDrag('move', None, None, xOffset, yOffset, duration, tween)
_autoPause(pause, _pause)
|
Moves the mouse cursor to a point on the screen, relative to its current
position.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): How far left (for negative values) or
right (for positive values) to move the cursor. 0 by default. If tuple, this is used for x and y.
y (int, float, None, optional): How far up (for negative values) or
down (for positive values) to move the cursor. 0 by default.
duration (float, optional): The amount of time it takes to move the mouse
cursor to the new xy coordinates. If 0, then the mouse cursor is moved
instantaneously. 0.0 by default.
tween (func, optional): The tweening function used if the duration is not
0. A linear tween is used by default. See the tweens.py file for
details.
Returns:
None
|
codesearchnet
|
def Kdp(scatterer):
if (scatterer.thet0 != scatterer.thet) or \
(scatterer.phi0 != scatterer.phi):
raise ValueError("A forward scattering geometry is needed to " + \
"compute the specific differential phase.")
S = scatterer.get_S()
return 1e-3 * (180.0/np.pi) * scatterer.wavelength * (S[1,1]-S[0,0]).real
|
Specific differential phase (K_dp) for the current setup.
Args:
scatterer: a Scatterer instance.
Returns:
K_dp [deg/km].
NOTE: This only returns the correct value if the particle diameter and
wavelength are given in [mm]. The scatterer object should be set to
forward scattering geometry before calling this function.
|
juraj-google-style
|
def _ircounts2radiance(counts, scale, offset):
rad = ((counts - offset) / scale)
return rad.clip(min=0)
|
Convert IR counts to radiance
Reference: [IR].
Args:
counts: Raw detector counts
scale: Scale [mW-1 m2 cm sr]
offset: Offset [1]
Returns:
Radiance [mW m-2 cm-1 sr-1]
|
codesearchnet
|
def random_hermitian_matrix(num_qubits):
dim = 2 ** num_qubits
val_range = 2
random_real = tf.cast(tf.random.uniform([dim, dim], -val_range, val_range), tf.complex128)
random_imag = 1j * tf.cast(tf.random.uniform([dim, dim], -val_range, val_range), tf.complex128)
random_matrix = random_real + random_imag
return random_matrix + tf.linalg.adjoint(random_matrix)
|
Returns a random Hermitian matrix.
Uses the property that A + A* is Hermitian for any matrix A.
Args:
num_qubits: Number of qubits on which the matrix acts.
|
github-repos
|
def select_char_code_table(self, table):
tables = {'standard': 0,
'eastern european': 1,
'western european': 2,
'spare': 3
}
if table in tables:
self.send(chr(27)+'t'+chr(tables[table]))
else:
raise RuntimeError('Invalid char table.')
|
Select character code table, from tree built in ones.
Args:
table: The desired character code table. Choose from 'standard', 'eastern european', 'western european', and 'spare'
Returns:
None
Raises:
RuntimeError: Invalid chartable.
|
juraj-google-style
|
def mols_to_file(mols, path):
with open(path, 'w') as f:
f.write(mols_to_text(mols))
|
Save molecules to the SDFile format file
Args:
mols: list of molecule objects
path: file path to save
|
codesearchnet
|
def create(labels=None, **kw):
if labels is not None:
kw[u'labels'] = encoding.PyValueToMessage(MetricValue.LabelsValue,
labels)
return MetricValue(**kw)
|
Constructs a new metric value.
This acts as an alternate to MetricValue constructor which
simplifies specification of labels. Rather than having to create
a MetricValue.Labels instance, all that's necessary to specify the
required string.
Args:
labels (dict([string, [string]]):
**kw: any other valid keyword args valid in the MetricValue constructor
Returns
:class:`MetricValue`: the created instance
|
juraj-google-style
|
def __init__(self, batch_env, step, is_training, should_log, config):
self._batch_env = batch_env
self._step = step
self._is_training = is_training
self._should_log = should_log
self._config = config
self._observ_filter = parts.StreamingNormalize(
self._batch_env.observ[0], center=True, scale=True, clip=5,
name='normalize_observ')
self._reward_filter = parts.StreamingNormalize(
self._batch_env.reward[0], center=False, scale=True, clip=10,
name='normalize_reward')
self._use_gpu = self._config.use_gpu and utility.available_gpus()
policy_params, state = self._initialize_policy()
self._initialize_memory(policy_params)
with tf.device('/gpu:0' if self._use_gpu else '/cpu:0'):
self._optimizer = self._config.optimizer(self._config.learning_rate)
self._penalty = tf.Variable(
self._config.kl_init_penalty, False, dtype=tf.float32)
with tf.variable_scope('ppo_temporary'):
with tf.device('/gpu:0'):
if state is None:
self._last_state = None
else:
var_like = lambda x: tf.Variable(lambda: tf.zeros_like(x), False)
self._last_state = tools.nested.map(var_like, state)
with tf.variable_scope('ppo_temporary'):
self._last_action = tf.Variable(
tf.zeros_like(self._batch_env.action), False, name='last_action')
self._last_policy = tools.nested.map(
lambda x: tf.Variable(tf.zeros_like(x[:, 0], False)), policy_params)
|
Create an instance of the PPO algorithm.
Args:
batch_env: In-graph batch environment.
step: Integer tensor holding the current training step.
is_training: Boolean tensor for whether the algorithm should train.
should_log: Boolean tensor for whether summaries should be returned.
config: Object containing the agent configuration as attributes.
|
juraj-google-style
|
def __init__(self, filename, content_generator=None, content_length=None):
precondition.AssertType(filename, Text)
self.filename = filename
self.content_length = content_length
if content_generator is None:
raise ValueError("content_generator can't be None")
self.content_generator = content_generator
|
ApiBinaryStream constructor.
Args:
filename: A file name to be used by the browser when user downloads the
file.
content_generator: A generator that yields byte chunks (of any size) to
be streamed to the user.
content_length: The length of the stream, if known upfront.
Raises:
ValueError: if content_generator is None.
|
juraj-google-style
|
def _create_w_objective(m, X, R):
genes, clusters = m.shape
cells = X.shape[1]
R1 = R.reshape((genes, 1)).dot(np.ones((1, cells)))
def objective(w):
w = w.reshape((m.shape[1], X.shape[1]))
d = m.dot(w)+eps
return np.sum((X + R1)*np.log(d + R1) - X*np.log(d))/genes
def deriv(w):
w2 = w.reshape((m.shape[1], X.shape[1]))
d = m.dot(w2)+eps
temp = X/d
temp2 = (X+R1)/(d+R1)
m1 = m.T.dot(temp2)
m2 = m.T.dot(temp)
deriv = m1 - m2
return deriv.flatten()/genes
return objective, deriv
|
Creates an objective function and its derivative for W, given M and X (data)
Args:
m (array): genes x clusters
X (array): genes x cells
R (array): 1 x genes
|
juraj-google-style
|
def all_distances(coords1, coords2):
c1 = np.array(coords1)
c2 = np.array(coords2)
z = (c1[:, None, :] - c2[None, :, :]) ** 2
return np.sum(z, axis=-1) ** 0.5
|
Returns the distances between two lists of coordinates
Args:
coords1: First set of cartesian coordinates.
coords2: Second set of cartesian coordinates.
Returns:
2d array of cartesian distances. E.g the distance between
coords1[i] and coords2[j] is distances[i,j]
|
juraj-google-style
|
def write_index(self, overwrite: bool=False, mock: bool=False) -> None:
write_if_allowed(self.index_filename, self.index_content(), overwrite=overwrite, mock=mock)
|
Writes the index file, if permitted.
Args:
overwrite: allow existing files to be overwritten?
mock: pretend to write, but don't
|
codesearchnet
|
def tournament_number2name(self, number):
tournaments = self.get_tournaments()
d = {t['tournament']: t['name'] for t in tournaments}
return d.get(number, None)
|
Translate tournament number to tournament name.
Args:
number (int): tournament number to translate
Returns:
name (str): name of the tournament or `None` if unknown.
Examples:
>>> NumerAPI().tournament_number2name(4)
'delta'
>>> NumerAPI().tournament_number2name(99)
None
|
juraj-google-style
|
def build_ellipse(X, Y):
x_mean = np.mean(X)
y_mean = np.mean(Y)
cov_matrix = np.cov(np.vstack((X, Y)))
(U, s, V) = linalg.svd(cov_matrix, full_matrices=False)
chi_95 = np.sqrt(4.61)
width = ((np.sqrt(cov_matrix[0][0]) * chi_95) * 2)
height = ((np.sqrt(cov_matrix[1][1]) * chi_95) * 2)
eigenvector = V.T[0]
angle = np.arctan((eigenvector[1] / eigenvector[0]))
return (x_mean, y_mean, width, height, angle)
|
Construct ellipse coordinates from two arrays of numbers.
Args:
X (1D array_like)
Y (1D array_like)
Returns:
float: The mean of `X`.
float: The mean of `Y`.
float: The width of the ellipse.
float: The height of the ellipse.
float: The angle of orientation of the ellipse.
|
codesearchnet
|
def record(*fields):
@six.add_metaclass(_RecordMetaClass)
class RecordType(object):
_record_sentinel = True
_record_fields = fields
return RecordType
|
Constructs a type that can be extended to create immutable, value types.
Examples:
A typical declaration looks like::
class MyRecord(record('a', ('b', 1))):
pass
The above would make a sub-class of ``collections.namedtuple`` that was named ``MyRecord`` with
a constructor that had the ``b`` field set to 1 by default.
Note:
This uses meta-class machinery to rewrite the inheritance hierarchy.
This is done in order to make sure that the underlying ``namedtuple`` instance is
bound to the right type name and to make sure that the synthetic class that is generated
to enable this machinery is not enabled for sub-classes of a user's record class.
Args:
fields (list[str | (str, any)]): A sequence of str or pairs that
|
codesearchnet
|
def remove_site(name):
current_sites = list_sites()
if name not in current_sites:
log.debug('Site already absent: %s', name)
return True
ps_cmd = ['Remove-WebSite', '-Name', r"'{0}'".format(name)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to remove site: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
log.debug('Site removed successfully: %s', name)
return True
|
Delete a website from IIS.
Args:
name (str): The IIS site name.
Returns:
bool: True if successful, otherwise False
.. note::
This will not remove the application pool used by the site.
CLI Example:
.. code-block:: bash
salt '*' win_iis.remove_site name='My Test Site'
|
juraj-google-style
|
def get_likelihood(self, uni_matrix):
uni_dim = uni_matrix.shape[1]
num_edge = len(self.edges)
values = np.zeros([1, num_edge])
new_uni_matrix = np.empty([uni_dim, uni_dim])
for i in range(num_edge):
edge = self.edges[i]
value, left_u, right_u = edge.get_likelihood(uni_matrix)
new_uni_matrix[edge.L, edge.R] = left_u
new_uni_matrix[edge.R, edge.L] = right_u
values[0, i] = np.log(value)
return np.sum(values), new_uni_matrix
|
Compute likelihood of the tree given an U matrix.
Args:
uni_matrix(numpy.array): univariate matrix to evaluate likelihood on.
Returns:
tuple[float, numpy.array]:
likelihood of the current tree, next level conditional univariate matrix
|
juraj-google-style
|
def updateParams(self, newvalues):
for (param, value) in newvalues.items():
if param not in self.model.freeparams:
raise RuntimeError("Can't handle param: {0}".format(
param))
if newvalues:
self.model.updateParams(newvalues)
self._updateInternals()
self._paramsarray = None
|
Update model parameters and re-compute likelihoods.
This method is the **only** acceptable way to update model
parameters. The likelihood is re-computed as needed
by this method.
Args:
`newvalues` (dict)
A dictionary keyed by param name and with value as new
value to set. Each parameter name must either be a
valid model parameter (in `model.freeparams`).
|
juraj-google-style
|
class ByteRewriter:
LEAF = '[LEAF]'
def __init__(self, rewriting_rules: Union[str, Dict[str, str]]):
if isinstance(rewriting_rules, str):
with open(rewriting_rules, 'r') as f:
rewriting_rules = json.load(f)
elif not isinstance(rewriting_rules, dict):
raise ValueError(f'rewriting_rules should be either a path to json file or a dict, got {type(rewriting_rules)}')
self.hash_tree = self.construct_hash_tree(rewriting_rules)
reverse_rewriting_rules = {v: k for k, v in rewriting_rules.items()}
self.reverse_hash_tree = self.construct_hash_tree(reverse_rewriting_rules)
def add_leaf(self, hash_tree: Dict[str, Union[dict, List[str]]], byte_in_sequence: str, byte_out_sequence: str):
byte_in_list = byte_in_sequence.split(' ')
byte_out_list = byte_out_sequence.split(' ')
tree_pointer = hash_tree
for b in byte_in_list:
if b not in tree_pointer:
tree_pointer[b] = {}
tree_pointer = tree_pointer[b]
tree_pointer[self.LEAF] = byte_out_list
def construct_hash_tree(self, rewriting_rules: Dict[str, str]) -> Dict[str, Union[dict, List[str]]]:
hash_tree = defaultdict(dict)
for b in (f'{x:02x}' for x in range(256)):
hash_tree[b][self.LEAF] = [b]
for in_sequence, out_sequence in rewriting_rules.items():
self.add_leaf(hash_tree, in_sequence, out_sequence)
return hash_tree
def search_hash_tree(self, byte_sequence: List[str]) -> Union[None, List[str]]:
tree_pointer = self.hash_tree
for b in byte_sequence:
if b in tree_pointer:
tree_pointer = tree_pointer[b]
else:
return None
return tree_pointer[self.LEAF]
def rewrite_bytes(self, in_bytes: List[str], reverse=False) -> List[str]:
out_bytes = []
b_start = 0
b_end = 0
while b_start < len(in_bytes):
tree_pointer = self.hash_tree if not reverse else self.reverse_hash_tree
for j in range(b_start, len(in_bytes)):
b = in_bytes[j]
if b in tree_pointer:
tree_pointer = tree_pointer[b]
elif j == b_start:
cur_leaf = [b]
b_end = j
break
else:
break
if self.LEAF in tree_pointer:
cur_leaf = tree_pointer[self.LEAF]
b_end = j
out_bytes.extend(cur_leaf)
b_start = b_end + 1
return out_bytes
|
Byte rewriter class for MyT5 tokenizer.
This class is used to rewrite bytes using a hash tree. The hash tree is constructed from a set of rewriting rules.
Args:
rewriting_rules (`str` or `Dict[str, str]`):
A path to a json file containing the rewriting rules or a dictionary containing the rewriting rules.
|
github-repos
|
def load_compositors(self, sensor_names):
comps = {}
mods = {}
for sensor_name in sensor_names:
if (sensor_name not in self.compositors):
self.load_sensor_composites(sensor_name)
if (sensor_name in self.compositors):
comps[sensor_name] = DatasetDict(self.compositors[sensor_name].copy())
mods[sensor_name] = self.modifiers[sensor_name].copy()
return (comps, mods)
|
Load all compositor configs for the provided sensors.
Args:
sensor_names (list of strings): Sensor names that have matching
``sensor_name.yaml`` config files.
Returns:
(comps, mods): Where `comps` is a dictionary:
sensor_name -> composite ID -> compositor object
And `mods` is a dictionary:
sensor_name -> modifier name -> (modifier class,
modifiers options)
Note that these dictionaries are copies of those cached in
this object.
|
codesearchnet
|
def DeleteSignedBinary(binary_urn,
token = None):
if _ShouldUseLegacyDatastore():
try:
aff4.FACTORY.Open(
binary_urn, aff4_type=aff4.AFF4Stream, mode="r", token=token)
except aff4.InstantiationError:
raise SignedBinaryNotFoundError(binary_urn)
aff4.FACTORY.Delete(binary_urn, token=token)
if data_store.RelationalDBEnabled():
try:
data_store.REL_DB.ReadSignedBinaryReferences(
_SignedBinaryIDFromURN(binary_urn))
except db.UnknownSignedBinaryError:
if _ShouldUseLegacyDatastore():
return
else:
raise SignedBinaryNotFoundError(binary_urn)
data_store.REL_DB.DeleteSignedBinaryReferences(
_SignedBinaryIDFromURN(binary_urn))
|
Deletes the binary with the given urn from the datastore.
Args:
binary_urn: RDFURN that serves as a unique identifier for the binary.
token: ACL token to use with the legacy (non-relational) datastore.
Raises:
SignedBinaryNotFoundError: If the signed binary does not exist.
|
juraj-google-style
|
def appliance_device_snmp_v3_trap_destinations(self):
if (not self.__appliance_device_snmp_v3_trap_destinations):
self.__appliance_device_snmp_v3_trap_destinations = ApplianceDeviceSNMPv3TrapDestinations(self.__connection)
return self.__appliance_device_snmp_v3_trap_destinations
|
Gets the ApplianceDeviceSNMPv3TrapDestinations API client.
Returns:
ApplianceDeviceSNMPv3TrapDestinations:
|
codesearchnet
|
def mark_flags_as_mutual_exclusive(flag_names, required=False, flag_values=_flagvalues.FLAGS):
for flag_name in flag_names:
if (flag_values[flag_name].default is not None):
warnings.warn('Flag --{} has a non-None default value. That does not make sense with mark_flags_as_mutual_exclusive, which checks whether the listed flags have a value other than None.'.format(flag_name))
def validate_mutual_exclusion(flags_dict):
flag_count = sum((1 for val in flags_dict.values() if (val is not None)))
if ((flag_count == 1) or ((not required) and (flag_count == 0))):
return True
raise _exceptions.ValidationError('{} one of ({}) must have a value other than None.'.format(('Exactly' if required else 'At most'), ', '.join(flag_names)))
register_multi_flags_validator(flag_names, validate_mutual_exclusion, flag_values=flag_values)
|
Ensures that only one flag among flag_names is not None.
Important note: This validator checks if flag values are None, and it does not
distinguish between default and explicit values. Therefore, this validator
does not make sense when applied to flags with default values other than None,
including other false values (e.g. False, 0, '', []). That includes multi
flags with a default value of [] instead of None.
Args:
flag_names: [str], names of the flags.
required: bool. If true, exactly one of the flags must have a value other
than None. Otherwise, at most one of the flags can have a value other
than None, and it is valid for all of the flags to be None.
flag_values: flags.FlagValues, optional FlagValues instance where the flags
are defined.
|
codesearchnet
|
def __init__(self, client=None, workingdir='/workingdir'):
self.client = self.connect_to_docker(client)
self.default_wdir = workingdir
self.hostname = self.client.base_url
|
Initialization:
Args:
client (docker.Client): a docker-py client. If not passed, we will try to create the
client from the job's environmental varaibles
workingdir (str): default working directory to create in the containers
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.