code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _get_event_id(object_type: str) -> str:
key = _keys.event_counter(object_type)
DB.watch(key, pipeline=True)
count = DB.get_value(key)
DB.increment(key)
DB.execute()
if (count is None):
count = 0
return '{}_event_{:08d}'.format(object_type, int(count))
|
Return an event key for the event on the object type.
This must be a unique event id for the object.
Args:
object_type (str): Type of object
Returns:
str, event id
|
codesearchnet
|
def _compile_output_step(outputs):
if not outputs:
raise GraphQLCompilationError(u'No fields were selected for output! Please mark at least '
u'one field with the @output directive.')
output_fields = {}
for output_name, output_context in six.iteritems(outputs):
location = output_context['location']
optional = output_context['optional']
graphql_type = output_context['type']
expression = None
existence_check = None
if isinstance(location, FoldScopeLocation):
if optional:
raise AssertionError(u'Unreachable state reached, optional in fold: '
u'{}'.format(output_context))
if location.field == COUNT_META_FIELD_NAME:
expression = expressions.FoldCountContextField(location)
else:
expression = expressions.FoldedContextField(location, graphql_type)
else:
expression = expressions.OutputContextField(location, graphql_type)
if optional:
existence_check = expressions.ContextFieldExistence(location.at_vertex())
if existence_check:
expression = expressions.TernaryConditional(
existence_check, expression, expressions.NullLiteral)
output_fields[output_name] = expression
return blocks.ConstructResult(output_fields)
|
Construct the final ConstructResult basic block that defines the output format of the query.
Args:
outputs: dict, output name (string) -> output data dict, specifying the location
from where to get the data, and whether the data is optional (and therefore
may be missing); missing optional data is replaced with 'null'
Returns:
a ConstructResult basic block that constructs appropriate outputs for the query
|
juraj-google-style
|
def fasta_files_equal(seq_file1, seq_file2):
seq1 = SeqIO.read(open(seq_file1), 'fasta')
seq2 = SeqIO.read(open(seq_file2), 'fasta')
if str(seq1.seq) == str(seq2.seq):
return True
else:
return False
|
Check equality of a FASTA file to another FASTA file
Args:
seq_file1: Path to a FASTA file
seq_file2: Path to another FASTA file
Returns:
bool: If the sequences are the same
|
juraj-google-style
|
def add_arguments(self, parser, bootstrap=False):
[item.add_argument(parser, bootstrap)
for item in self._get_items(bootstrap=False)]
|
Adds all items to the parser passed in.
Args:
parser (argparse.ArgumentParser): The parser to add all items to.
bootstrap (bool): Flag to indicate whether you only want to mark
bootstrapped items as required on the command-line.
|
juraj-google-style
|
def all_indices_partitioned(self):
return self._all_indices_partitioned
|
all_indices_partitioned property.
Returns:
True if we are inside a control flow construct and not all pfor iterations
may be active.
|
github-repos
|
def add_nested_compat_imports(module_builder, compat_api_versions, output_package):
imported_modules = module_builder.get_destination_modules()
for v in compat_api_versions:
for sv in compat_api_versions:
subcompat_module = _SUBCOMPAT_MODULE_TEMPLATE % (v, sv)
compat_module = _COMPAT_MODULE_TEMPLATE % sv
module_builder.copy_imports(compat_module, subcompat_module)
module_builder.copy_imports('%s.compat' % compat_module, '%s.compat' % subcompat_module)
compat_prefixes = tuple((_COMPAT_MODULE_TEMPLATE % v + '.' for v in compat_api_versions))
for imported_module in imported_modules:
if not imported_module.startswith(compat_prefixes):
continue
module_split = imported_module.split('.')
if len(module_split) > 3 and module_split[2] == 'compat':
src_module = '.'.join(module_split[:3])
src_name = module_split[3]
assert src_name != 'v1' and src_name != 'v2', imported_module
else:
src_module = '.'.join(module_split[:2])
src_name = module_split[2]
if src_name == 'compat':
continue
for compat_api_version in compat_api_versions:
module_builder.add_import(symbol=None, source_module_name='%s.%s' % (output_package, src_module), source_name=src_name, dest_module_name='compat.v%d.%s' % (compat_api_version, src_module), dest_name=src_name)
|
Adds compat.vN.compat.vK modules to module builder.
To avoid circular imports, we want to add __init__.py files under
compat.vN.compat.vK and under compat.vN.compat.vK.compat. For all other
imports, we point to corresponding modules under compat.vK.
Args:
module_builder: `_ModuleInitCodeBuilder` instance.
compat_api_versions: Supported compatibility versions.
output_package: Base output python package where generated API will be
added.
|
github-repos
|
def get_actions(self, issues):
actions = []
try:
for issue in issues:
action_item = self.determine_action(issue)
if action_item['action'] != AuditActions.IGNORE:
action_item['owners'] = self.get_contacts(issue)
actions.append(action_item)
finally:
db.session.rollback()
return actions
|
Returns a list of actions to executed
Args:
issues (`list` of :obj:`RequiredTagsIssue`): List of issues
Returns:
`list` of `dict`
|
juraj-google-style
|
def verify_response(response, status_code, content_type=None):
status = int(response.status.split(' ', 1)[0])
if (status != status_code):
return False
if (content_type is None):
return True
for (header, value) in response.headers:
if (header.lower() == 'content-type'):
return (value == content_type)
return False
|
Verifies that a response has the expected status and content type.
Args:
response: The ResponseTuple to be checked.
status_code: An int, the HTTP status code to be compared with response
status.
content_type: A string with the acceptable Content-Type header value.
None allows any content type.
Returns:
True if both status_code and content_type match, else False.
|
codesearchnet
|
def to_timestamp(self, data):
result = pd.Series(index=data.index)
_slice = ~data[self.col_name].isnull()
result[_slice] = data[_slice][self.col_name].astype('int64')
return result
|
Transform a datetime series into linux epoch.
Args:
data(pandas.DataFrame): DataFrame containins a column named as `self.col_name`.
Returns:
pandas.Series
|
juraj-google-style
|
def _cleanup_keys_with_confirmation(self, keys_to_delete):
print('Round name: ', self.round_name)
print('Number of entities to be deleted: ', len(keys_to_delete))
if not keys_to_delete:
return
if self.verbose:
print('Entities to delete:')
idx = 0
prev_key_prefix = None
dots_printed_after_same_prefix = False
for k in keys_to_delete:
if idx >= 20:
print(' ...')
print(' ...')
break
key_prefix = (k.flat_path[0:1]
if k.flat_path[0] in [u'SubmissionType', u'WorkType']
else k.flat_path[0])
if prev_key_prefix == key_prefix:
if not dots_printed_after_same_prefix:
print(' ...')
dots_printed_after_same_prefix = True
else:
print(' ', k)
dots_printed_after_same_prefix = False
idx += 1
prev_key_prefix = key_prefix
print()
inp = input_str('Are you sure? (type "yes" without quotes to confirm): ')
if inp != 'yes':
return
with self.datastore_client.no_transact_batch() as batch:
for k in keys_to_delete:
batch.delete(k)
print('Data deleted')
|
Asks confirmation and then deletes entries with keys.
Args:
keys_to_delete: list of datastore keys for which entries should be deleted
|
juraj-google-style
|
def compute_actor_handle_id_non_forked(actor_handle_id, current_task_id):
assert isinstance(actor_handle_id, ActorHandleID)
assert isinstance(current_task_id, TaskID)
handle_id_hash = hashlib.sha1()
handle_id_hash.update(actor_handle_id.binary())
handle_id_hash.update(current_task_id.binary())
handle_id = handle_id_hash.digest()
return ActorHandleID(handle_id)
|
Deterministically compute an actor handle ID in the non-forked case.
This code path is used whenever an actor handle is pickled and unpickled
(for example, if a remote function closes over an actor handle). Then,
whenever the actor handle is used, a new actor handle ID will be generated
on the fly as a deterministic function of the actor ID, the previous actor
handle ID and the current task ID.
TODO(rkn): It may be possible to cause problems by closing over multiple
actor handles in a remote function, which then get unpickled and give rise
to the same actor handle IDs.
Args:
actor_handle_id: The original actor handle ID.
current_task_id: The ID of the task that is unpickling the handle.
Returns:
An ID for the new actor handle.
|
codesearchnet
|
def check_hardware(self, expected):
if (len(expected) < 10):
expected += ('\x00' * (10 - len(expected)))
(err,) = self.rpc(0, 3, expected, result_format='L')
if (err == 0):
return True
return False
|
Make sure the hardware version is what we expect.
This convenience function is meant for ensuring that we are talking to
a tile that has the correct hardware version.
Args:
expected (str): The expected hardware string that is compared
against what is reported by the hardware_version RPC.
Returns:
bool: true if the hardware is the expected version, false otherwise
|
codesearchnet
|
def _ParseCachedEntry8(self, value_data, cached_entry_offset):
try:
cached_entry = self._ReadStructureFromByteStream(
value_data[cached_entry_offset:], cached_entry_offset,
self._cached_entry_data_type_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse cached entry value with error: {0!s}'.format(
exception))
if cached_entry.signature not in (
self._CACHED_ENTRY_SIGNATURE_8_0, self._CACHED_ENTRY_SIGNATURE_8_1):
raise errors.ParseError('Unsupported cache entry signature')
cached_entry_data = value_data[cached_entry_offset:]
if cached_entry.signature == self._CACHED_ENTRY_SIGNATURE_8_0:
data_type_map_name = 'appcompatcache_cached_entry_body_8_0'
elif cached_entry.signature == self._CACHED_ENTRY_SIGNATURE_8_1:
data_type_map_name = 'appcompatcache_cached_entry_body_8_1'
data_type_map = self._GetDataTypeMap(data_type_map_name)
context = dtfabric_data_maps.DataTypeMapContext()
try:
cached_entry_body = self._ReadStructureFromByteStream(
cached_entry_data[12:], cached_entry_offset + 12,
data_type_map, context=context)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse cached entry body with error: {0!s}'.format(
exception))
data_offset = context.byte_size
data_size = cached_entry_body.data_size
cached_entry_object = AppCompatCacheCachedEntry()
cached_entry_object.cached_entry_size = (
12 + cached_entry.cached_entry_data_size)
cached_entry_object.insertion_flags = cached_entry_body.insertion_flags
cached_entry_object.last_modification_time = (
cached_entry_body.last_modification_time)
cached_entry_object.path = cached_entry_body.path
cached_entry_object.shim_flags = cached_entry_body.shim_flags
if data_size > 0:
cached_entry_object.data = cached_entry_data[
data_offset:data_offset + data_size]
return cached_entry_object
|
Parses a Windows 8.0 or 8.1 cached entry.
Args:
value_data (bytes): value data.
cached_entry_offset (int): offset of the first cached entry data
relative to the start of the value data.
Returns:
AppCompatCacheCachedEntry: cached entry.
Raises:
ParseError: if the value data could not be parsed.
|
juraj-google-style
|
def remove_handler(self, name):
index = None
for i, h in enumerate(self.capture_handlers):
if h['name'] == name:
index = i
if index is not None:
self.capture_handlers[index]['logger'].close()
del self.capture_handlers[index]
|
Remove a handler given a name
Note, if multiple handlers have the same name the last matching
instance in the handler list will be removed.
Args:
name:
The name of the handler to remove
|
juraj-google-style
|
def _refresh(self, http):
self.devshell_response = _SendRecv()
self.access_token = self.devshell_response.access_token
expires_in = self.devshell_response.expires_in
if expires_in is not None:
delta = datetime.timedelta(seconds=expires_in)
self.token_expiry = client._UTCNOW() + delta
else:
self.token_expiry = None
|
Refreshes the access token.
Args:
http: unused HTTP object
|
juraj-google-style
|
def process(self):
print('Artifacts to be collected: {0!s}'.format(self.artifacts))
hunt_args = flows_pb2.ArtifactCollectorFlowArgs(artifact_list=self.artifacts, use_tsk=self.use_tsk, ignore_interpolation_errors=True, apply_parsers=False)
return self._create_hunt('ArtifactCollectorFlow', hunt_args)
|
Construct and start new Artifact Collection hunt.
Returns:
The newly created GRR hunt object.
Raises:
RuntimeError: if no items specified for collection.
|
codesearchnet
|
def __init__(self, context):
self._event_multiplexer = context.multiplexer
self._logdir = context.logdir
self._debugger_data_server = None
self._grpc_port = None
|
Constructs a debugger plugin for TensorBoard.
This plugin adds handlers for retrieving debugger-related data. The plugin
also starts a debugger data server once the log directory is passed to the
plugin via the call to get_plugin_apps.
Args:
context: A base_plugin.TBContext instance.
|
juraj-google-style
|
def _add_query_parameter(url, name, value):
if value is None:
return url
else:
return update_query_params(url, {name: value})
|
Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
|
juraj-google-style
|
def copy_binary(directory, origin_tag, new_tag, version, package):
print('Rename and copy binaries with %s to %s.' % (origin_tag, new_tag))
origin_binary = BINARY_STRING_TEMPLATE % (package, version, origin_tag)
new_binary = BINARY_STRING_TEMPLATE % (package, version, new_tag)
zip_ref = zipfile.ZipFile(os.path.join(directory, origin_binary), 'r')
try:
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
zip_ref.extractall()
zip_ref.close()
old_py_ver = re.search('(cp\\d\\d-cp\\d\\d)', origin_tag).group(1)
new_py_ver = re.search('(cp\\d\\d-cp\\d\\d)', new_tag).group(1)
wheel_file = os.path.join(tmpdir, '%s-%s.dist-info' % (package, version), 'WHEEL')
with open(wheel_file, 'r') as f:
content = f.read()
with open(wheel_file, 'w') as f:
f.write(content.replace(old_py_ver, new_py_ver))
zout = zipfile.ZipFile(directory + new_binary, 'w', zipfile.ZIP_DEFLATED)
zip_these_files = ['%s-%s.dist-info' % (package, version), '%s-%s.data' % (package, version), 'tensorflow', 'tensorflow_core']
for dirname in zip_these_files:
for root, _, files in os.walk(dirname):
for filename in files:
zout.write(os.path.join(root, filename))
zout.close()
finally:
shutil.rmtree(tmpdir)
|
Rename and copy binaries for different python versions.
Args:
directory: string of directory
origin_tag: str of the old python version tag
new_tag: str of the new tag
version: the version of the package
package: str, name of the package
|
github-repos
|
def fast_tpu_gather(params, indices, name=None):
with tf.name_scope(name):
dtype = params.dtype
def _gather(params, indices):
'Fast gather using one_hot and batch matmul.'
if (dtype != tf.float32):
params = tf.to_float(params)
shape = common_layers.shape_list(params)
indices_shape = common_layers.shape_list(indices)
ndims = params.shape.ndims
if (ndims == 2):
params = tf.expand_dims(params, axis=(- 1))
if (ndims > 3):
params = tf.reshape(params, [shape[0], shape[1], (- 1)])
gather_result = tf.matmul(tf.one_hot(indices, shape[1], dtype=params.dtype), params)
if (ndims == 2):
gather_result = tf.squeeze(gather_result, axis=(- 1))
if (ndims > 3):
shape[1] = indices_shape[1]
gather_result = tf.reshape(gather_result, shape)
if (dtype != tf.float32):
gather_result = tf.cast(gather_result, dtype)
return gather_result
if dtype.is_integer:
gather_result = tf.batch_gather(params, indices)
else:
gather_result = _gather(params, indices)
return gather_result
|
Fast gather implementation for models running on TPU.
This function use one_hot and batch matmul to do gather, which is faster
than gather_nd on TPU. For params that have dtype of int32 (sequences to
gather from), batch_gather is used to keep accuracy.
Args:
params: A tensor from which to gather values.
[batch_size, original_size, ...]
indices: A tensor used as the index to gather values.
[batch_size, selected_size].
name: A string, name of the operation (optional).
Returns:
gather_result: A tensor that has the same rank as params.
[batch_size, selected_size, ...]
|
codesearchnet
|
def available_readers(as_dict=False):
readers = []
for reader_configs in configs_for_reader():
try:
reader_info = read_reader_config(reader_configs)
except (KeyError, IOError, yaml.YAMLError):
LOG.warning("Could not import reader config from: %s", reader_configs)
LOG.debug("Error loading YAML", exc_info=True)
continue
readers.append(reader_info if as_dict else reader_info['name'])
return readers
|
Available readers based on current configuration.
Args:
as_dict (bool): Optionally return reader information as a dictionary.
Default: False
Returns: List of available reader names. If `as_dict` is `True` then
a list of dictionaries including additionally reader information
is returned.
|
juraj-google-style
|
def get_subnets(
target='ec2',
purpose='internal',
env='',
region='', ):
account_az_dict = defaultdict(defaultdict)
subnet_id_dict = defaultdict(defaultdict)
subnet_url = '{0}/subnets/aws'.format(API_URL)
subnet_response = requests.get(subnet_url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
if not subnet_response.ok:
raise SpinnakerTimeout(subnet_response.text)
subnet_list = subnet_response.json()
for subnet in subnet_list:
LOG.debug('Subnet: %(account)s\t%(region)s\t%(target)s\t%(vpcId)s\t' '%(availabilityZone)s', subnet)
if subnet.get('target', '') == target:
availability_zone = subnet['availabilityZone']
account = subnet['account']
subnet_region = subnet['region']
subnet_id = subnet['id']
try:
if availability_zone not in account_az_dict[account][subnet_region]:
account_az_dict[account][subnet_region].append(availability_zone)
except KeyError:
account_az_dict[account][subnet_region] = [availability_zone]
if subnet['purpose'] == purpose:
try:
subnet_id_dict[account][subnet_region].append(subnet_id)
except KeyError:
subnet_id_dict[account][subnet_region] = [subnet_id]
LOG.debug('%s regions: %s', account, list(account_az_dict[account].keys()))
if all([env, region]):
try:
region_dict = {region: account_az_dict[env][region]}
region_dict['subnet_ids'] = {region: subnet_id_dict[env][region]}
LOG.debug('Region dict: %s', region_dict)
return region_dict
except KeyError:
raise SpinnakerSubnetError(env=env, region=region)
LOG.debug('AZ dict:\n%s', pformat(dict(account_az_dict)))
return account_az_dict
|
Get all availability zones for a given target.
Args:
target (str): Type of subnets to look up (ec2 or elb).
env (str): Environment to look up.
region (str): AWS Region to find Subnets for.
Returns:
az_dict: dictionary of availbility zones, structured like
{ $region: [ $avaibilityzones ] }
or
{ $account: $region: [ $availabilityzone] }
|
juraj-google-style
|
def fmtVersion(*vsnparts):
if (len(vsnparts) < 1):
raise s_exc.BadTypeValu(valu=repr(vsnparts), name='fmtVersion', mesg='Not enough version parts to form a version string with.')
ret = '.'.join([str(part).lower() for part in vsnparts])
return ret
|
Join a string of parts together with a . separator.
Args:
*vsnparts:
Returns:
|
codesearchnet
|
def find_proxy_plugin(component, plugin_name):
reg = ComponentRegistry()
plugins = reg.load_extensions('iotile.proxy_plugin', comp_filter=component, class_filter=TileBusProxyPlugin,
product_name='proxy_plugin')
for _name, plugin in plugins:
if plugin.__name__ == plugin_name:
return plugin
raise DataError("Could not find proxy plugin module in registered components or installed distributions",
component=component, name=plugin_name)
|
Attempt to find a proxy plugin provided by a specific component
Args:
component (string): The name of the component that provides the plugin
plugin_name (string): The name of the plugin to load
Returns:
TileBuxProxyPlugin: The plugin, if found, otherwise raises DataError
|
juraj-google-style
|
def pre_save(self, instance, add: bool):
if not isinstance(instance, AtomicSlugRetryMixin):
raise ImproperlyConfigured((
'Model \'%s\' does not inherit from AtomicSlugRetryMixin. '
'Without this, the LocalizedUniqueSlugField will not work.'
) % type(instance).__name__)
slugs = LocalizedValue()
for lang_code, value in self._get_populate_values(instance):
if not value:
continue
slug = slugify(value, allow_unicode=True)
if instance.pk is not None:
current_slug = getattr(instance, self.name).get(lang_code)
if current_slug is not None:
stripped_slug = current_slug[0:current_slug.rfind('-')]
if slug == stripped_slug:
slugs.set(lang_code, current_slug)
continue
if self.include_time:
slug += '-%d' % datetime.now().microsecond
retries = getattr(instance, 'retries', 0)
if retries > 0:
if not self.include_time:
slug += '-'
slug += '%d' % retries
slugs.set(lang_code, slug)
setattr(instance, self.name, slugs)
return slugs
|
Ran just before the model is saved, allows us to built
the slug.
Arguments:
instance:
The model that is being saved.
add:
Indicates whether this is a new entry
to the database or an update.
Returns:
The localized slug that was generated.
|
juraj-google-style
|
def __init__(self, layers, scope='layered-network', summary_labels=()):
self.layers_spec = layers
super(LayeredNetwork, self).__init__(scope=scope, summary_labels=summary_labels)
self.parse_layer_spec(layer_spec=self.layers_spec, layer_counter=Counter())
|
Single-stack layered network.
Args:
layers: List of layer specification dicts.
|
juraj-google-style
|
def save(self, filething=None, padding=None):
try:
self.tags._inject(filething.fileobj, padding)
except (IOError, error) as e:
reraise(self._Error, e, sys.exc_info()[2])
except EOFError:
raise self._Error("no appropriate stream found")
|
save(filething=None, padding=None)
Save a tag to a file.
If no filename is given, the one most recently loaded is used.
Args:
filething (filething)
padding (:obj:`mutagen.PaddingFunction`)
Raises:
mutagen.MutagenError
|
juraj-google-style
|
def __init__(self, preread=None, precompile=None, file_path=None):
self.index = None
self.compiled = None
if file_path:
self._index_file = file_path
self._index_handle = open(self._index_file, "r")
self._ParseIndex(preread, precompile)
|
Create new IndexTable object.
Args:
preread: func, Pre-processing, applied to each field as it is read.
precompile: func, Pre-compilation, applied to each field before compiling.
file_path: String, Location of file to use as input.
|
juraj-google-style
|
def build_variant(variant, case_obj, case_id=None, gq_treshold=None):
variant_obj = None
sv = False
if variant.var_type == 'sv':
sv = True
variant_id = get_variant_id(variant)
ref = variant.REF
alt = variant.ALT[0]
coordinates = get_coords(variant)
chrom = coordinates['chrom']
pos = coordinates['pos']
found_homozygote = 0
found_hemizygote = 0
if sv:
found_variant = True
else:
found_variant = False
for ind_obj in case_obj['individuals']:
ind_id = ind_obj['ind_id']
ind_pos = ind_obj['ind_index']
gq = int(variant.gt_quals[ind_pos])
if (gq_treshold and gq < gq_treshold):
continue
genotype = GENOTYPE_MAP[variant.gt_types[ind_pos]]
if genotype in ['het', 'hom_alt']:
LOG.debug("Found variant")
found_variant = True
if chrom in ['X','Y'] and ind_obj['sex'] == 1:
if not check_par(chrom, pos):
LOG.debug("Found hemizygous variant")
found_hemizygote = 1
if genotype == 'hom_alt':
LOG.debug("Found homozygote alternative variant")
found_homozygote = 1
if found_variant:
variant_obj = Variant(
variant_id=variant_id,
chrom=chrom,
pos=pos,
end=coordinates['end'],
ref=ref,
alt=alt,
end_chrom=coordinates['end_chrom'],
sv_type = coordinates['sv_type'],
sv_len = coordinates['sv_length'],
case_id = case_id,
homozygote = found_homozygote,
hemizygote = found_hemizygote,
is_sv = sv,
id_column = variant.ID,
)
return variant_obj
|
Return a Variant object
Take a cyvcf2 formated variant line and return a models.Variant.
If criterias are not fullfilled, eg. variant have no gt call or quality
is below gq treshold then return None.
Args:
variant(cyvcf2.Variant)
case_obj(Case): We need the case object to check individuals sex
case_id(str): The case id
gq_treshold(int): Genotype Quality treshold
Return:
formated_variant(models.Variant): A variant dictionary
|
juraj-google-style
|
def _on_skip(self, record):
logging.info('Reason to skip: %s', record.details)
logging.info(RESULT_LINE_TEMPLATE, record.test_name, record.result)
self.on_skip(record)
|
Proxy function to guarantee the base implementation of on_skip is
called.
Args:
record: records.TestResultRecord, a copy of the test record for
this test, containing all information of the test execution
including exception objects.
|
github-repos
|
def from_preset(preset):
if (preset == 'vesta_2019'):
cut_offs = loadfn(os.path.join(_directory, 'vesta_cutoffs.yaml'))
return CutOffDictNN(cut_off_dict=cut_offs)
else:
raise ValueError('Unrecognised preset: {}'.format(preset))
|
Initialise a CutOffDictNN according to a preset set of cut-offs.
Args:
preset (str): A preset name. The list of supported presets are:
- "vesta_2019": The distance cut-offs used by the VESTA
visualisation program.
Returns:
A CutOffDictNN using the preset cut-off dictionary.
|
codesearchnet
|
def expand_value_set_url(self, url: str) -> Optional[value_set_pb2.ValueSet]:
for resolver in self._resolvers:
expanded_value_set = resolver.expand_value_set_url(url)
if expanded_value_set is not None:
return expanded_value_set
return None
|
Retrieves the expanded value set definition for the given URL.
Attempts to expand the value set using definitions available to the
instance's package manager. If the expansion can not be performed with
available resources, makes network calls to a terminology service to perform
the expansion.
Args:
url: The URL of the value set to expand.
Returns:
A value set protocol buffer expanded to include the codes it represents.
|
github-repos
|
def run(self, sensor_graph, model):
did_downgrade = False
for node, inputs, _outputs in sensor_graph.iterate_bfs():
can_downgrade = False
if node.func_name != u'copy_all_a':
continue
input_a, trigger_a = node.inputs[0]
if input_a.selector.match_type in (DataStream.InputType, DataStream.UnbufferedType):
can_downgrade = True
elif isinstance(trigger_a, InputTrigger) and trigger_a.comp_string == u'==' and trigger_a.use_count and trigger_a.reference == 1:
can_downgrade = True
elif isinstance(trigger_a, TrueTrigger) and not input_a.selector.buffered:
can_downgrade = True
for in_node in inputs:
if input_a.matches(in_node.stream) and in_node.func_name == u'copy_all_a' and in_node.input_a.match_type not in (DataStream.InputType, DataStream.UnbufferedType):
can_downgrade = False
break
if can_downgrade:
did_downgrade = True
node.set_func(u'copy_latest_a', sensor_graph.find_processing_function(u'copy_latest_a'))
return did_downgrade
|
Run this optimization pass on the sensor graph
If necessary, information on the device model being targeted
can be found in the associated model argument.
Args:
sensor_graph (SensorGraph): The sensor graph to optimize
model (DeviceModel): The device model we're using
|
juraj-google-style
|
def pseudos_with_symbols(self, symbols):
pseudos = self.select_symbols(symbols, ret_list=True)
found_symbols = [p.symbol for p in pseudos]
duplicated_elements = [s for (s, o) in collections.Counter(found_symbols).items() if (o > 1)]
if duplicated_elements:
raise ValueError(('Found multiple occurrences of symbol(s) %s' % ', '.join(duplicated_elements)))
missing_symbols = [s for s in symbols if (s not in found_symbols)]
if missing_symbols:
raise ValueError(('Missing data for symbol(s) %s' % ', '.join(missing_symbols)))
return pseudos
|
Return the pseudos with the given chemical symbols.
Raises:
ValueError if one of the symbols is not found or multiple occurences are present.
|
codesearchnet
|
def select(self, selector):
if self.closed():
raise ValueError('Attempt to call select() on a closed Queryable.')
try:
selector = make_selector(selector)
except ValueError:
raise TypeError('select() parameter selector={selector} cannot beconverted into a callable selector'.format(selector=repr(selector)))
if (selector is identity):
return self
return self._create(imap(selector, self))
|
Transforms each element of a sequence into a new form.
Each element of the source is transformed through a selector function
to produce a corresponding element in teh result sequence.
If the selector is identity the method will return self.
Note: This method uses deferred execution.
Args:
selector: A unary function mapping a value in the source sequence
to the corresponding value in the generated generated sequence.
The single positional argument to the selector function is the
element value. The return value of the selector function
should be the corresponding element of the result sequence.
Returns:
A Queryable over generated sequence whose elements are the result
of invoking the selector function on each element of the source
sequence.
Raises:
ValueError: If this Queryable has been closed.
TypeError: If selector is not callable.
|
codesearchnet
|
def _load_config_file(path):
with io.open(utils.abs_path(path), 'r', encoding='utf-8') as f:
conf = yaml.load(f)
return conf
|
Loads a test config file.
The test config file has to be in YAML format.
Args:
path: A string that is the full path to the config file, including the
file name.
Returns:
A dict that represents info in the config file.
|
juraj-google-style
|
def CopyFromDateTimeString(self, time_string):
super(APFSTime, self)._CopyFromDateTimeString(time_string)
if (self._timestamp is None or self._timestamp < self._INT64_MIN or
self._timestamp > self._INT64_MAX):
raise ValueError('Date time value not supported.')
|
Copies a APFS timestamp from a date and time string.
Args:
time_string (str): date and time value formatted as:
YYYY-MM-DD hh:mm:ss.######[+-]##:##
Where # are numeric digits ranging from 0 to 9 and the seconds
fraction can be either 3 or 6 digits. The time of day, seconds
fraction and time zone offset are optional. The default time zone
is UTC.
Raises:
ValueError: if the date and time value is not supported.
|
juraj-google-style
|
def _find_dtype_iterable(iterable: Iterable[Any], dtype: Optional[dtypes.DType]) -> Optional[dtypes.DType]:
if dtype is not None:
return dtype
for x in iterable:
dtype = _find_dtype(x, dtype)
return dtype
|
Find the preferred dtype of a list of objects.
This will go over the iterable, and use the first object with a preferred
dtype. The dtype passed has highest priority if it is not None.
Args:
iterable: an iterable with things that might have a dtype.
dtype: an overriding dtype, or None.
Returns:
an optional dtype.
|
github-repos
|
def get_pdbs_for_gene(bigg_model, bigg_gene, cache_dir=tempfile.gettempdir(), force_rerun=False):
my_structures = []
gene = ssbio.utils.request_json(link='http:
outfile='{}_{}.json'.format(bigg_model, bigg_gene),
outdir=cache_dir,
force_rerun_flag=force_rerun)
uniprots = []
if 'database_links' in gene:
if 'UniProt' in gene['database_links']:
uniprots = [x['id'] for x in gene['database_links']['UniProt']]
elif 'NCBI GI' in gene['database_links']:
uniprots = []
gis = [x['id'] for x in gene['database_links']['NCBI GI']]
gi_uniprots = bs_unip.mapping(fr='P_GI', to='ACC', query=gis).values()
uniprots.extend(gi_uniprots)
uniprots = ssbio.utils.flatlist_dropdup(uniprots)
uniprots = [x for x in uniprots if ssbio.databases.uniprot.is_valid_uniprot_id(x)]
if uniprots:
for u in uniprots:
get_best_structure = ssbio.databases.pdb.best_structures(uniprot_id=u, outdir=cache_dir)
if get_best_structure:
for best_structure in get_best_structure:
my_structures.append((best_structure['pdb_id'], best_structure['chain_id']))
return my_structures
|
Attempt to get a rank-ordered list of available PDB structures for a BiGG Model and its gene.
Args:
bigg_model: BiGG Model ID
bigg_gene: BiGG Gene ID
Returns:
list: rank-ordered list of tuples of (pdb_id, chain_id)
|
juraj-google-style
|
def __init__(self, bucket, key, info=None, context=None):
if context is None:
context = google.datalab.Context.default()
self._context = context
self._api = _api.Api(context)
self._bucket = bucket
self._key = key
self._info = info
|
Initializes an instance of an Object.
Args:
bucket: the name of the bucket containing the object.
key: the key of the object.
info: the information about the object if available.
context: an optional Context object providing project_id and credentials. If a specific
project id or credentials are unspecified, the default ones configured at the global
level are used.
|
juraj-google-style
|
def _GetTitleFromChromeWebStore(self, extension_identifier):
if (extension_identifier in self._extensions):
return self._extensions.get(extension_identifier)
page_content = self._GetChromeWebStorePage(extension_identifier)
if (not page_content):
logger.warning('[{0:s}] no data returned for extension identifier: {1:s}'.format(self.NAME, extension_identifier))
return None
(first_line, _, _) = page_content.partition('\n')
match = self._TITLE_RE.search(first_line)
name = None
if match:
title = match.group(1)
if title.startswith('Chrome Web Store - '):
name = title[19:]
elif title.endswith('- Chrome Web Store'):
name = title[:(- 19)]
if (not name):
self._extensions[extension_identifier] = 'UNKNOWN'
return None
self._extensions[extension_identifier] = name
return name
|
Retrieves the name of the extension from the Chrome store website.
Args:
extension_identifier (str): Chrome extension identifier.
Returns:
str: name of the extension or None.
|
codesearchnet
|
def __init__(self, max_batch_tokens: int):
self.max_batch_tokens = max_batch_tokens
self._setup_metrics()
|
Initialize metrics for continuous batch processor.
Args:
max_batch_tokens: Maximum number of tokens in a batch
|
github-repos
|
def update(self, car_id=None, wake_if_asleep=False, force=False):
cur_time = time.time()
with self.__lock:
last_update = self._last_attempted_update_time
if (force or ((cur_time - last_update) > self.update_interval)):
cars = self.get_vehicles()
for car in cars:
self.car_online[car['id']] = (car['state'] == 'online')
self._last_attempted_update_time = cur_time
update_succeeded = False
for (id_, value) in self.car_online.items():
if ((car_id is not None) and (car_id != id_)):
continue
if (value and ((id_ in self.__update) and self.__update[id_]) and (force or (id_ not in self._last_update_time) or ((cur_time - self._last_update_time[id_]) > self.update_interval))):
try:
data = self.get(id_, 'data', wake_if_asleep)
except TeslaException:
data = None
if (data and data['response']):
response = data['response']
self.__climate[car_id] = response['climate_state']
self.__charging[car_id] = response['charge_state']
self.__state[car_id] = response['vehicle_state']
self.__driving[car_id] = response['drive_state']
self.__gui[car_id] = response['gui_settings']
self.car_online[car_id] = (response['state'] == 'online')
self._last_update_time[car_id] = time.time()
update_succeeded = True
return update_succeeded
|
Update all vehicle attributes in the cache.
This command will connect to the Tesla API and first update the list of
online vehicles assuming no attempt for at least the [update_interval].
It will then update all the cached values for cars that are awake
assuming no update has occurred for at least the [update_interval].
Args:
inst (Controller): The instance of a controller
car_id (string): The vehicle to update. If None, all cars are updated.
wake_if_asleep (bool): Keyword arg to force a vehicle awake. This is
processed by the wake_up decorator.
force (bool): Keyword arg to force a vehicle update regardless of the
update_interval
Returns:
True if any update succeeded for any vehicle else false
Throws:
RetryLimitError
|
codesearchnet
|
def wiki_request(self, params):
params["format"] = "json"
if "action" not in params:
params["action"] = "query"
limit = self._rate_limit
last_call = self._rate_limit_last_call
if limit and last_call and last_call + self._min_wait > datetime.now():
wait_time = (last_call + self._min_wait) - datetime.now()
time.sleep(int(wait_time.total_seconds()))
req = self._get_response(params)
if self._rate_limit:
self._rate_limit_last_call = datetime.now()
return req
|
Make a request to the MediaWiki API using the given search
parameters
Args:
params (dict): Request parameters
Returns:
A parsed dict of the JSON response
Note:
Useful when wanting to query the MediaWiki site for some \
value that is not part of the wrapper API
|
juraj-google-style
|
def _get_password(params):
user_name = params['user']
service_name = params['host'] + ':' + params['driver']
return keyring.get_password(service_name=service_name,
username=user_name)
|
Get the password for a database connection from :mod:`keyring`
Args:
params (dict): database configuration, as defined in :mod:`ozelot.config`
Returns:
str: password
|
juraj-google-style
|
class TFMinLengthLogitsProcessor(TFLogitsProcessor):
def __init__(self, min_length: int, eos_token_id: int):
if not isinstance(min_length, int) or min_length < 0:
raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}')
if not isinstance(eos_token_id, int) or eos_token_id < 0:
raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}')
self.min_length = min_length
self.eos_token_id = eos_token_id
def _apply_eos_token_mask(self, scores: tf.Tensor) -> tf.Tensor:
eos_token_id_mask = tf.range(scores.shape[-1]) == self.eos_token_id
scores = tf.where(eos_token_id_mask, float('-inf'), scores)
return scores
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
scores = tf.cond(tf.less(cur_len, self.min_length), lambda: self._apply_eos_token_mask(scores), lambda: tf.identity(scores))
return scores
|
[`TFLogitsProcessor`] enforcing a min-length by setting EOS probability to 0.
Args:
min_length (`int`):
The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`.
eos_token_id (`int`):
The id of the *end-of-sequence* token.
|
github-repos
|
def _extract_namespace_ast_node(self, desc):
if len(desc) == 0 or not isinstance(desc[0], AstNamespace):
if self._debug:
self._logger.info('Description: %r', desc)
raise InvalidSpec('First declaration in a stone must be '
'a namespace. Possibly caused by preceding '
'errors.', desc[0].lineno, desc[0].path)
for item in desc[1:]:
if isinstance(item, AstNamespace):
raise InvalidSpec('Only one namespace declaration per file.',
item[0].lineno, item[0].path)
return desc.pop(0)
|
Checks that the namespace is declared first in the spec, and that only
one namespace is declared.
Args:
desc (List[stone.stone.parser.ASTNode]): All AST nodes in a spec
file in the order they were defined.
Return:
stone.frontend.ast.AstNamespace: The namespace AST node.
|
juraj-google-style
|
def get_functionalHome(self, functionalHomeType: type) -> FunctionalHome:
for x in self.functionalHomes:
if isinstance(x, functionalHomeType):
return x
return None
|
gets the specified functionalHome
Args:
functionalHome(type): the type of the functionalHome which should be returned
Returns:
the FunctionalHome or None if it couldn't be found
|
juraj-google-style
|
def cvt2frames(self, frame_dir, file_start=0, filename_tmpl='{:06d}.jpg', start=0, max_num=0, show_progress=True):
mkdir_or_exist(frame_dir)
if (max_num == 0):
task_num = (self.frame_cnt - start)
else:
task_num = min((self.frame_cnt - start), max_num)
if (task_num <= 0):
raise ValueError('start must be less than total frame number')
if (start > 0):
self._set_real_position(start)
def write_frame(file_idx):
img = self.read()
filename = osp.join(frame_dir, filename_tmpl.format(file_idx))
cv2.imwrite(filename, img)
if show_progress:
track_progress(write_frame, range(file_start, (file_start + task_num)))
else:
for i in range(task_num):
img = self.read()
if (img is None):
break
filename = osp.join(frame_dir, filename_tmpl.format((i + file_start)))
cv2.imwrite(filename, img)
|
Convert a video to frame images
Args:
frame_dir (str): Output directory to store all the frame images.
file_start (int): Filenames will start from the specified number.
filename_tmpl (str): Filename template with the index as the
placeholder.
start (int): The starting frame index.
max_num (int): Maximum number of frames to be written.
show_progress (bool): Whether to show a progress bar.
|
codesearchnet
|
def get_type(mime=None, ext=None):
for kind in types:
if kind.extension is ext or kind.mime is mime:
return kind
return None
|
Returns the file type instance searching by
MIME type or file extension.
Args:
ext: file extension string. E.g: jpg, png, mp4, mp3
mime: MIME string. E.g: image/jpeg, video/mpeg
Returns:
The matched file type instance. Otherwise None.
|
juraj-google-style
|
def add_record(self, record):
record.update_record()
if record.result == TestResultEnums.TEST_RESULT_SKIP:
self.skipped.append(record)
return
self.executed.append(record)
if record.result == TestResultEnums.TEST_RESULT_FAIL:
self.failed.append(record)
elif record.result == TestResultEnums.TEST_RESULT_PASS:
self.passed.append(record)
else:
self.error.append(record)
|
Adds a test record to test result.
A record is considered executed once it's added to the test result.
Adding the record finalizes the content of a record, so no change
should be made to the record afterwards.
Args:
record: A test record object to add.
|
juraj-google-style
|
def create_downloader_of_type(type_name):
downloaders = available_downloaders()
if type_name not in downloaders.keys():
raise UnknownDownloaderException('Unknown downloader: %s' % (type_name,))
return downloaders[type_name]()
|
Create an instance of the downloader with the given name.
Args:
type_name: The name of a downloader.
Returns:
An instance of the downloader with the given type.
|
juraj-google-style
|
def run_commands(self, program_language, program_main):
if program_language == 'python':
python_exe = sys.executable
ptvsd_host = 'localhost'
if self.args.docker:
python_exe = 'python'
ptvsd_host = '0.0.0.0'
if self.args.vscd:
self.update_environment()
command = [
python_exe,
'-m',
'ptvsd',
'--host',
ptvsd_host,
'--port',
self.args.vscd_port,
'--wait',
'{}.py'.format(program_main),
]
else:
command = [python_exe, '.', program_main]
cli_command = [str(s) for s in command + self.profile.get('profile_args').standard]
print_command = ' '.join(
str(s) for s in command + self.profile.get('profile_args').masked
)
if self.args.unmask:
print_command = ' '.join(
str(s) for s in command + self.profile.get('profile_args').quoted
)
elif program_language == 'java':
if self.args.docker:
command = ['java', '-cp', self.tcex_json.get('class_path', './target/*')]
else:
command = [
self.tcex_json.get('java_path', program_language),
'-cp',
self.tcex_json.get('class_path', './target/*'),
]
cli_command = command + self.profile.get('profile_args').standard + [program_main]
print_command = ' '.join(
command + self.profile.get('profile_args').masked + [program_main]
)
if self.args.unmask:
print_command = ' '.join(
command + self.profile.get('profile_args').quoted + [program_main]
)
return {'cli_command': cli_command, 'print_command': print_command}
|
Return the run Print Command.
Args:
program_language (str): The language of the current App/Project.
program_main (str): The executable name.
Returns:
dict: A dictionary containing the run command and a printable version of the command.
|
juraj-google-style
|
def convert(model_flags: _model_flags_pb2.ModelFlags, conversion_flags: _conversion_flags_pb2.ConverterFlags, input_data_str: Optional[str]=None, debug_info_str: Optional[str]=None):
try:
return wrap_converter.wrapped_convert(model_flags.SerializeToString(), conversion_flags.SerializeToString(), input_data_str, debug_info_str)
except Exception as e:
converter_error = ConverterError(str(e))
for error_data in _metrics_wrapper.retrieve_collected_errors():
converter_error.append_error(error_data)
if error_data.error_code == converter_error_data_pb2.ConverterErrorData.ERROR_STATEFUL_PARTITIONED_CALL_IN_FINAL_IR and (not conversion_flags.guarantee_all_funcs_one_use):
conversion_flags.guarantee_all_funcs_one_use = True
return convert(model_flags, conversion_flags, input_data_str, debug_info_str)
raise converter_error
|
Converts `input_data_str` to a TFLite model.
Args:
model_flags: Proto describing model properties, see `model_flags.proto`.
conversion_flags: Proto describing conversion properties, see
`compiler/mlir/lite/converter_flags.proto`.
input_data_str: Input data in serialized form (e.g. a graphdef is common, or
it can be hlo text or proto)
debug_info_str: Serialized `GraphDebugInfo` proto describing logging
information.
Returns:
Converted model in serialized form (e.g. a TFLITE model is common).
Raises:
ConverterError: When conversion fails in TFLiteConverter, usually due to
ops not being supported.
|
github-repos
|
def get_gcc_version():
key = 'gcc_ver'
out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key])
if err and FLAGS.debug:
print('Error in detecting GCC version:\n %s' % str(err))
return out.strip(b'\n')
|
Retrieves version of GCC detected.
Returns:
String that is the version of GCC.
e.g. '7.3.0'
|
github-repos
|
def _CalculateHashesFileEntry(self, file_system, file_entry, parent_full_path, output_writer):
full_path = file_system.JoinPath([parent_full_path, file_entry.name])
for data_stream in file_entry.data_streams:
hash_value = self._CalculateHashDataStream(file_entry, data_stream.name)
display_path = self._GetDisplayPath(file_entry.path_spec, full_path, data_stream.name)
output_writer.WriteFileHash(display_path, (hash_value or 'N/A'))
for sub_file_entry in file_entry.sub_file_entries:
self._CalculateHashesFileEntry(file_system, sub_file_entry, full_path, output_writer)
|
Recursive calculates hashes starting with the file entry.
Args:
file_system (dfvfs.FileSystem): file system.
file_entry (dfvfs.FileEntry): file entry.
parent_full_path (str): full path of the parent file entry.
output_writer (StdoutWriter): output writer.
|
codesearchnet
|
def Add(self, entry):
if not isinstance(entry, PasswdMapEntry):
raise TypeError
return super(PasswdMap, self).Add(entry)
|
Add a new object, verify it is a PasswdMapEntry instance.
Args:
entry: A PasswdMapEntry instance.
Returns:
True if added successfully, False otherwise.
Raises:
TypeError: The argument is of the wrong type.
|
github-repos
|
def collective_leader(cluster_spec, task_type, task_id):
cluster_spec = normalize_cluster_spec(cluster_spec)
if not cluster_spec.as_dict():
return ''
_validate_cluster_spec(cluster_spec, task_type, task_id)
if task_type == 'evaluator':
return ''
if 'chief' in cluster_spec.jobs:
return '/job:chief/replica:0/task:0'
assert 'worker' in cluster_spec.jobs
return '/job:worker/replica:0/task:0'
|
Return the job name for the leader of for collective ops.
Args:
cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object specifying the
cluster configurations.
task_type: the task type in the cluster.
task_id: the task id in the cluster.
Returns:
a string indicating the leader job name or empty string if no need to set
leader job.
|
github-repos
|
def split_by_criteria(dictionary, keys=None, prefix=None):
keys = (keys or [])
keys = set(keys)
included_items = {k: dictionary[k] for k in dictionary.keys() if ((k in keys) or (prefix and k.startswith(prefix)))}
excluded_items = {k: dictionary[k] for k in dictionary.keys() if (k not in included_items)}
return SplitResultSpec(included=included_items, excluded=excluded_items)
|
Split a dictionary in two by the provided keys.
Args:
dictionary (dict[str, object]): A Python dictionary
keys (sequence [str]): A sequence of keys which will be added the split criteria
prefix (str): A prefix which will be added the split criteria
Returns:
`SplitResultSpec` : A collections.namedtuple with the following attributes:
* Args:
included (dict[str, object]: A dictionary with the keys included in the criteria.
excluded (dict[str, object]: A dictionary with the keys not included in the criteria.
|
codesearchnet
|
def RegisterDecompressor(cls, decompressor):
compression_method = decompressor.COMPRESSION_METHOD.lower()
if compression_method in cls._decompressors:
raise KeyError(
'Decompressor for compression method: {0:s} already set.'.format(
decompressor.COMPRESSION_METHOD))
cls._decompressors[compression_method] = decompressor
|
Registers a decompressor for a specific compression method.
Args:
decompressor (type): decompressor class.
Raises:
KeyError: if the corresponding decompressor is already set.
|
juraj-google-style
|
def __init__(self, channel):
self.GetChanges = channel.unary_stream(
'/pb.Data/GetChanges',
request_serializer=lookout_dot_sdk_dot_service__data__pb2.ChangesRequest.SerializeToString,
response_deserializer=lookout_dot_sdk_dot_service__data__pb2.Change.FromString,
)
self.GetFiles = channel.unary_stream(
'/pb.Data/GetFiles',
request_serializer=lookout_dot_sdk_dot_service__data__pb2.FilesRequest.SerializeToString,
response_deserializer=lookout_dot_sdk_dot_service__data__pb2.File.FromString,
)
|
Constructor.
Args:
channel: A grpc.Channel.
|
juraj-google-style
|
def _invalid_string_quote(self, quote, row, correct_quote=None, col=None):
if not correct_quote:
correct_quote = SMART_QUOTE_OPTS.get(self.config.string_quote)
self.add_message(
'invalid-string-quote',
line=row,
args=(quote, correct_quote),
**self.get_offset(col)
)
|
Add a message for an invalid string literal quote.
Args:
quote: The quote characters that were found.
row: The row number the quote character was found on.
correct_quote: The quote characters that is required. If None
(default), will use the one from the config.
col: The column the quote characters were found on.
|
juraj-google-style
|
def square(times: np.ndarray, amp: complex, period: float, phase: float = 0) -> np.ndarray:
x = times/period+phase/np.pi
return amp*(2*(2*np.floor(x) - np.floor(2*x)) + 1).astype(np.complex_)
|
Continuous square wave.
Args:
times: Times to output wave for.
amp: Pulse amplitude. Wave range is [-amp, amp].
period: Pulse period, units of dt.
phase: Pulse phase.
|
juraj-google-style
|
def all_label_values(self, label_list_ids=None):
values = set()
for label_list in self.label_lists.values():
if ((label_list_ids is None) or (label_list.idx in label_list_ids)):
values = values.union(label_list.label_values())
return values
|
Return a set of all label-values occurring in this utterance.
Args:
label_list_ids (list): If not None, only label-values from
label-lists with an id contained in this list
are considered.
Returns:
:class:`set`: A set of distinct label-values.
|
codesearchnet
|
def unset(entity, *types):
if (not types):
types = (TypedField,)
fields = list(entity._fields.keys())
remove = (x for x in fields if isinstance(x, types))
for field in remove:
del entity._fields[field]
|
Unset the TypedFields on the input `entity`.
Args:
entity: A mixbox.Entity object.
*types: A variable-length list of TypedField subclasses. If not
provided, defaults to TypedField.
|
codesearchnet
|
def feedback(self, dna: DNA, reward: Union[float, Tuple[float]]) -> None:
if self.needs_feedback:
if self.multi_objective and isinstance(reward, float):
reward = (reward,)
elif not self.multi_objective and isinstance(reward, tuple):
if len(reward) != 1:
raise ValueError(f'{self!r} is single objective, but the reward {reward!r} contains multiple objectives.')
reward = reward[0]
self._feedback(dna, reward)
self._num_feedbacks += 1
|
Feedback a completed trial to the algorithm.
Args:
dna: a DNA object.
reward: reward for the DNA. It is a float if `self.multi_objective`
returns False, otherwise it's a tuple of floats.
|
github-repos
|
def __init__(self, plugin_name=None, text=None):
super(AnalysisReport, self).__init__()
self.filter_string = None
self.plugin_name = plugin_name
self.report_array = None
self.report_dict = None
self.text = text
self.time_compiled = None
|
Initializes the analysis report.
Args:
plugin_name (Optional[str]): name of the analysis plugin that generated
the report.
text (Optional[str]): report text.
|
juraj-google-style
|
def _add_strings_to_commastring(self, field, strings):
allstringsadded = True
for string in strings:
if (not self._add_string_to_commastring(field, string)):
allstringsadded = False
return allstringsadded
|
Add a list of strings to a comma separated list of strings
Args:
field (str): Field containing comma separated list
strings (List[str]): list of strings to add
Returns:
bool: True if all strings added or False if any already present.
|
codesearchnet
|
def load(file_path, parse_line_fn):
vocabulary = []
embeddings = []
embeddings_dim = None
for line in tf.gfile.GFile(file_path):
(token, embedding) = parse_line_fn(line)
if (not embeddings_dim):
embeddings_dim = len(embedding)
elif (embeddings_dim != len(embedding)):
raise ValueError('Inconsistent embedding dimension detected, %d != %d for token %s', embeddings_dim, len(embedding), token)
vocabulary.append(token)
embeddings.append(embedding)
return (vocabulary, np.array(embeddings))
|
Loads a text embedding into memory as a numpy matrix.
Args:
file_path: Path to the text embedding file.
parse_line_fn: callback function to parse each file line.
Returns:
A tuple of (list of vocabulary tokens, numpy matrix of embedding vectors).
Raises:
ValueError: if the data in the sstable is inconsistent.
|
codesearchnet
|
def _fn(self, arg0, arg1):
return arg0 + arg1
|
fn doc.
Args:
arg0: Arg 0.
arg1: Arg 1.
Returns:
Sum of args.
|
github-repos
|
def ed25519_private_key_from_string(string):
try:
return Ed25519PrivateKey.from_private_bytes(base64.b64decode(string))
except (UnsupportedAlgorithm, Base64Error) as exc:
raise ScriptWorkerEd25519Error("Can't create Ed25519PrivateKey: {}!".format(str(exc)))
|
Create an ed25519 private key from ``string``, which is a seed.
Args:
string (str): the string to use as a seed.
Returns:
Ed25519PrivateKey: the private key
|
codesearchnet
|
def CopyToDateTimeString(self):
if ((self._timestamp is None) or (self._timestamp < self._INT64_MIN) or (self._timestamp > self._INT64_MAX)):
return None
return super(APFSTime, self)._CopyToDateTimeString()
|
Copies the APFS timestamp to a date and time string.
Returns:
str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.#########" or
None if the timestamp is missing or invalid.
|
codesearchnet
|
async def _sync_all_conversations(client):
conv_states = []
sync_timestamp = None
request = hangouts_pb2.SyncRecentConversationsRequest(
request_header=client.get_request_header(),
max_conversations=CONVERSATIONS_PER_REQUEST,
max_events_per_conversation=1,
sync_filter=[
hangouts_pb2.SYNC_FILTER_INBOX,
hangouts_pb2.SYNC_FILTER_ARCHIVED,
]
)
for _ in range(MAX_CONVERSATION_PAGES):
logger.info(
'Requesting conversations page %s', request.last_event_timestamp
)
response = await client.sync_recent_conversations(request)
conv_states = list(response.conversation_state) + conv_states
sync_timestamp = parsers.from_timestamp(
response.response_header.current_server_time
)
if response.continuation_end_timestamp == 0:
logger.info('Reached final conversations page')
break
else:
request.last_event_timestamp = response.continuation_end_timestamp
else:
logger.warning('Exceeded maximum number of conversation pages')
logger.info('Synced %s total conversations', len(conv_states))
return conv_states, sync_timestamp
|
Sync all conversations by making paginated requests.
Conversations are ordered by ascending sort timestamp.
Args:
client (Client): Connected client.
Raises:
NetworkError: If the requests fail.
Returns:
tuple of list of ``ConversationState`` messages and sync timestamp
|
juraj-google-style
|
def __init__(self, config_reader=None):
if config_reader:
self._config = config_reader
else:
self._config = WTF_CONFIG_READER
|
Constructor
Args:
config_reader (ConfigReader) - override default config reader.
|
juraj-google-style
|
def from_string(cls, prjs):
def parse(v):
try:
return int(v)
except ValueError:
pass
try:
return float(v)
except ValueError:
return v
parts = [o.lstrip('+') for o in prjs.strip().split()]
items = map((lambda kv: (((len(kv) == 2) and (kv[0], parse(kv[1]))) or (kv[0], True))), (p.split('=') for p in parts))
return cls({k: v for (k, v) in items if (('+' + k) in PROJ4_PARAMS.keys())})
|
Turn a PROJ.4 string into a mapping of parameters. Bare parameters
like "+no_defs" are given a value of ``True``. All keys are checked
against the ``all_proj_keys`` list.
Args:
prjs (str): A PROJ4 string.
|
codesearchnet
|
def create_asset_accesspolicy(access_token, name, duration, permission="1"):
path = '/AccessPolicies'
endpoint = ''.join([ams_rest_endpoint, path])
body = '{ \
"Name": "' + str(name) + '", \
"DurationInMinutes": "' + duration + '", \
"Permissions": "' + permission + '" \
}'
return do_ams_post(endpoint, path, body, access_token)
|
Create Media Service Asset Access Policy.
Args:
access_token (str): A valid Azure authentication token.
name (str): A Media Service Asset Access Policy Name.
duration (str): A Media Service duration.
permission (str): A Media Service permission.
Returns:
HTTP response. JSON body.
|
juraj-google-style
|
def check_dummies(overwrite: bool=False):
dummy_files = create_dummy_files()
short_names = {'torch': 'pt'}
path = os.path.join(PATH_TO_TRANSFORMERS, 'utils')
dummy_file_paths = {backend: os.path.join(path, f'dummy_{short_names.get(backend, backend)}_objects.py') for backend in dummy_files.keys()}
actual_dummies = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(file_path):
with open(file_path, 'r', encoding='utf-8', newline='\n') as f:
actual_dummies[backend] = f.read()
else:
actual_dummies[backend] = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(f'Updating transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py as the main __init__ has new objects.')
with open(dummy_file_paths[backend], 'w', encoding='utf-8', newline='\n') as f:
f.write(dummy_files[backend])
else:
found = False
for _actual, _dummy in zip(actual_dummies['torch'].split('class'), dummy_files['torch'].split('class')):
if _actual != _dummy:
actual_broken = _actual
dummy_broken = _dummy
found = True
break
if not found:
print('A transient error was found with the dummies, please investigate.')
continue
raise ValueError(f'The main __init__ has objects that are not present in transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py.\n It is likely the following objects are responsible, see these excerpts: \n---------------------------------- Actual -------------------------------------\n \n {actual_broken} \n---------------------------------- Dummy -------------------------------------\n \n {dummy_broken} \nRun `make fix-copies` to fix this.')
|
Check if the dummy files are up to date and maybe `overwrite` with the right content.
Args:
overwrite (`bool`, *optional*, default to `False`):
Whether or not to overwrite the content of the dummy files. Will raise an error if they are not up to date
when `overwrite=False`.
|
github-repos
|
def GetValueByPath(self, path_segments):
key = self.root_key
for path_segment in path_segments:
if isinstance(key, dict):
try:
key = key[path_segment]
except KeyError:
return None
elif isinstance(key, list):
try:
list_index = int(path_segment, 10)
except ValueError:
return None
key = key[list_index]
else:
return None
if not key:
return None
return key
|
Retrieves a plist value by path.
Args:
path_segments (list[str]): path segment strings relative to the root
of the plist.
Returns:
object: The value of the key specified by the path or None.
|
juraj-google-style
|
def flash_write(self, addr, data, nbits=None, flags=0):
self._dll.JLINKARM_BeginDownload(flags)
self.memory_write(addr, data, nbits=nbits)
bytes_flashed = self._dll.JLINKARM_EndDownload()
if bytes_flashed < 0:
raise errors.JLinkFlashException(bytes_flashed)
return bytes_flashed
|
Writes data to the flash region of a device.
The given number of bits, if provided, must be either ``8``, ``16``, or
``32``.
Args:
self (JLink): the ``JLink`` instance
addr (int): starting flash address to write to
data (list): list of data units to write
nbits (int): number of bits to use for each unit
Returns:
Number of bytes written to flash.
|
juraj-google-style
|
def layer_postprocess(layer_input, layer_output, hparams):
return layer_prepostprocess(layer_input, layer_output, sequence=hparams.layer_postprocess_sequence, dropout_rate=hparams.layer_prepostprocess_dropout, norm_type=hparams.norm_type, depth=None, epsilon=hparams.norm_epsilon, dropout_broadcast_dims=comma_separated_string_to_integer_list(getattr(hparams, 'layer_prepostprocess_dropout_broadcast_dims', '')), default_name='layer_postprocess')
|
Apply layer postprocessing.
See layer_prepostprocess() for details.
A hyperparameters object is passed for convenience. The hyperparameters
that may be used are:
layer_postprocess_sequence
layer_prepostprocess_dropout
norm_type
hidden_size
norm_epsilon
Args:
layer_input: a Tensor
layer_output: a Tensor
hparams: a hyperparameters object.
Returns:
a Tensor
|
codesearchnet
|
def software_breakpoint(self):
software_types = [
enums.JLinkBreakpoint.SW_RAM,
enums.JLinkBreakpoint.SW_FLASH,
enums.JLinkBreakpoint.SW
]
return any(self.Type & stype for stype in software_types)
|
Returns whether this is a software breakpoint.
Args:
self (JLinkBreakpointInfo): the ``JLinkBreakpointInfo`` instance
Returns:
``True`` if the breakpoint is a software breakpoint, otherwise
``False``.
|
juraj-google-style
|
def inception_v3_arg_scope(weight_decay=4e-05, stddev=0.1, batch_norm_var_collection='moving_vars'):
batch_norm_params = {'decay': 0.9997, 'epsilon': 0.001, 'updates_collections': tf.GraphKeys.UPDATE_OPS, 'variables_collections': {'beta': None, 'gamma': None, 'moving_mean': [batch_norm_var_collection], 'moving_variance': [batch_norm_var_collection]}}
with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d], weights_initializer=tf.truncated_normal_initializer(stddev=stddev), activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params) as sc:
return sc
|
Defines the default InceptionV3 arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
|
codesearchnet
|
def defer(coro, delay=1):
assert_corofunction(coro=coro)
@asyncio.coroutine
def wrapper(*args, **kw):
(yield from asyncio.sleep(delay))
return (yield from coro(*args, **kw))
return wrapper
|
Returns a coroutine function wrapper that will defer the given coroutine
execution for a certain amount of seconds in a non-blocking way.
This function can be used as decorator.
Arguments:
coro (coroutinefunction): coroutine function to defer.
delay (int/float): number of seconds to defer execution.
Raises:
TypeError: if coro argument is not a coroutine function.
Returns:
filtered values (list): ordered list of resultant values.
Usage::
# Usage as function
await paco.defer(coro, delay=1)
await paco.defer(coro, delay=0.5)
# Usage as decorator
@paco.defer(delay=1)
async def mul_2(num):
return num * 2
await mul_2(2)
# => 4
|
codesearchnet
|
def ReadPreprocessingInformation(self, knowledge_base):
if not self._storage_file:
raise IOError('Unable to read from closed storage writer.')
self._storage_file.ReadPreprocessingInformation(knowledge_base)
|
Reads preprocessing information.
The preprocessing information contains the system configuration which
contains information about various system specific configuration data,
for example the user accounts.
Args:
knowledge_base (KnowledgeBase): is used to store the preprocessing
information.
Raises:
IOError: when the storage writer is closed.
OSError: when the storage writer is closed.
|
juraj-google-style
|
def _convert_dynamic_dimension_to_zero(shape):
if shape.rank is None:
return shape
return tensor_shape.TensorShape([0 if d is None else d for d in shape.as_list()])
|
Converts dynamic dimensions in `shape` to zero.
The fake params created to match the intermediates captured in other branches
could have dynamic dimensions. But the XLA shape is not able to handle
dynamic dimensions in TF TensorShape. Setting the dynamic dimensions to
size zero will help avoid failing safety checks in bridge. When XLA
DynamicConditional op reconciles branch differences, XLA will replace the
dimension size 0 with a bounded dimension determined from the shape of
real argument in the other branch.
Note: Rank unknown shapes are returned as they are.
Args:
shape: The TensorShape of fake param.
Returns:
The new TensorShape with dynamic dimensions set to zero.
|
github-repos
|
def strip_iterable(self) -> 'IOTypeHints':
if self.output_types is None or not self.has_simple_output_type():
return self
output_type = self.output_types[0][0]
if output_type is None or isinstance(output_type, type(None)):
return self
if isinstance(output_type, typehints.UnionConstraint):
types = list(output_type.union_types)
if len(types) == 2:
try:
types.remove(type(None))
output_type = types[0]
except ValueError:
pass
if isinstance(output_type, typehints.TypeVariable):
return self._replace(output_types=((typehints.Any,), {}), origin=self._make_origin([self], tb=False, msg=['strip_iterable()']))
yielded_type = typehints.get_yielded_type(output_type)
return self._replace(output_types=((yielded_type,), {}), origin=self._make_origin([self], tb=False, msg=['strip_iterable()']))
|
Removes outer Iterable (or equivalent) from output type.
Only affects instances with simple output types, otherwise is a no-op.
Does not modify self.
Designed to be used with type hints from callables of ParDo, FlatMap, DoFn.
Output type may be Optional[T], in which case the result of stripping T is
used as the output type.
Output type may be None/NoneType, in which case nothing is done.
Example: Generator[Tuple(int, int)] becomes Tuple(int, int)
Returns:
A copy of this instance with a possibly different output type.
Raises:
ValueError if output type is simple and not iterable.
|
github-repos
|
def add_timeout_callback(self, callback, timeout_milliseconds):
from ..server.callbacks import TimeoutCallback
cb = TimeoutCallback(self, None, timeout_milliseconds)
return self._add_session_callback(cb, callback, one_shot=True, originator=self.add_timeout_callback)
|
Add callback to be invoked once, after a specified timeout passes.
Args:
callback (callable) :
A callback function to execute after timeout
timeout_milliseconds (int) :
Number of milliseconds before callback execution.
Returns:
TimeoutCallback : can be used with ``remove_timeout_callback``
.. note::
Timeout callbacks only work within the context of a Bokeh server
session. This function will no effect when Bokeh outputs to
standalone HTML or Jupyter notebook cells.
|
codesearchnet
|
def compose_object(self, file_list, destination_file, content_type):
xml_setting_list = ['<ComposeRequest>']
for meta_data in file_list:
xml_setting_list.append('<Component>')
for key, val in meta_data.iteritems():
xml_setting_list.append('<%s>%s</%s>' % (key, val, key))
xml_setting_list.append('</Component>')
xml_setting_list.append('</ComposeRequest>')
xml = ''.join(xml_setting_list)
if content_type is not None:
headers = {'Content-Type': content_type}
else:
headers = None
status, resp_headers, content = self.put_object(
api_utils._quote_filename(destination_file) + '?compose',
payload=xml,
headers=headers)
errors.check_status(status, [200], destination_file, resp_headers,
body=content)
|
COMPOSE multiple objects together.
Using the given list of files, calls the put object with the compose flag.
This call merges all the files into the destination file.
Args:
file_list: list of dicts with the file name.
destination_file: Path to the destination file.
content_type: Content type for the destination file.
|
juraj-google-style
|
def __convertRlocToRouterId(self, xRloc16):
routerList = []
routerList = self.__sendCommand(WPANCTL_CMD + 'getprop -v Thread:RouterTable')
print routerList
print xRloc16
for line in routerList:
if re.match('\[|\]', line):
continue
if re.match(WPAN_CARRIER_PROMPT, line, re.M|re.I):
break
router = []
router = self.__stripValue(line).split(',')
for item in router:
if 'RouterId' in item:
routerid = item.split(':')[1]
elif 'RLOC16' in line:
rloc16 = line.split(':')[1]
else:
pass
if isinstance(xRloc16, str):
rloc16 = '0x' + rloc16
if rloc16 == xRloc16:
return routerid
elif isinstance(xRloc16, int):
if int(rloc16, 16) == xRloc16:
return routerid
else:
pass
return None
|
mapping Rloc16 to router id
Args:
xRloc16: hex rloc16 short address
Returns:
actual router id allocated by leader
|
juraj-google-style
|
def gradient_tensor(self, x_tensor):
x_tensor_name = self._get_tensor_name(x_tensor)
if x_tensor_name not in self._gradient_tensors:
raise LookupError('This GradientsDebugger has not received any gradient tensor for x-tensor %s' % x_tensor_name)
return self._gradient_tensors[x_tensor_name]
|
Get the gradient tensor of an x-tensor.
Args:
x_tensor: (`tf.Tensor`, `tf.Variable` or `str`) The x-tensor object or its
name. x-tensor refers to the independent `tf.Tensor`, i.e., the tensor
on the denominator of the differentiation.
Returns:
If found, the gradient tensor.
Raises:
TypeError: If `x_tensor` is not a `tf.Tensor`, `tf.Variable` or `str`.
LookupError: If the `x_tensor` has not been registered with a gradient
tensor.
|
github-repos
|
def load(self, filename, bs=512):
self.__filename = filename
self.__volumes = []
self.__partition_scheme = rawdisk.scheme.common.detect_scheme(filename)
plugin_objects = [plugin.plugin_object for plugin in self.__fs_plugins]
fs_detector = FilesystemDetector(fs_plugins=plugin_objects)
if self.__partition_scheme == PartitionScheme.SCHEME_MBR:
self.__load_mbr_volumes(filename, fs_detector, bs)
elif self.__partition_scheme == PartitionScheme.SCHEME_GPT:
self.__load_gpt_volumes(filename, fs_detector, bs)
else:
self.logger.warning('Partitioning scheme could not be determined.')
volume = fs_detector.detect_standalone(filename, offset=0)
if volume is not None:
volume.load(filename, offset=0)
self.__volumes.append(volume)
else:
self.logger.warning(
'Were not able to detect standalone volume type')
|
Starts filesystem analysis. Detects supported filesystems and \
loads :attr:`partitions` array.
Args:
filename - Path to file or device for reading.
Raises:
IOError - File/device does not exist or is not readable.
|
juraj-google-style
|
def compose_auth_header(
auth: Union[MutableMapping, str, bytes], registry_addr: str = None
) -> str:
if isinstance(auth, Mapping):
if "identitytoken" in auth:
pass
elif "auth" in auth:
return compose_auth_header(auth["auth"], registry_addr)
else:
if registry_addr:
auth["serveraddress"] = registry_addr
auth_json = json.dumps(auth).encode("utf-8")
elif isinstance(auth, (str, bytes)):
if isinstance(auth, bytes):
auth = auth.decode("utf-8")
s = base64.b64decode(auth)
username, passwd = s.split(b":", 1)
config = {
"username": username.decode("utf-8"),
"password": passwd.decode("utf-8"),
"email": None,
"serveraddress": registry_addr,
}
auth_json = json.dumps(config).encode("utf-8")
else:
raise TypeError("auth must be base64 encoded string/bytes or a dictionary")
auth = base64.b64encode(auth_json).decode("ascii")
return auth
|
Validate and compose base64-encoded authentication header
with an optional support for parsing legacy-style "user:password"
strings.
Args:
auth: Authentication information
registry_addr: An address of the registry server
Returns:
A base64-encoded X-Registry-Auth header value
|
juraj-google-style
|
def set_state_vector(self, state: Union[(int, np.ndarray)]):
self._stepper.reset_state(state)
|
Updates the state of the simulator to the given new state.
Args:
state: If this is an int, then this is the state to reset
the stepper to, expressed as an integer of the computational basis.
Integer to bitwise indices is little endian. Otherwise if this is
a np.ndarray this must be the correct size and have dtype of
np.complex64.
Raises:
ValueError if the state is incorrectly sized or not of the correct
dtype.
|
codesearchnet
|
def write(self, file_prefix: str) -> str:
|
Serializes proto to disk.
Args:
file_prefix: string prefix of the filepath.
Returns:
The actual path the proto is written to.
|
github-repos
|
def load_yaml(path):
with open(path, 'rt') as f:
yamldict = yaml.load(f.read(), Loader=yamlloader.ordereddict.CSafeLoader)
if not yamldict:
raise (LoadError('YAML file: %s is empty!' % path))
return yamldict
|
Load YAML file into an ordered dictionary
Args:
path (str): Path to YAML file
Returns:
OrderedDict: Ordered dictionary containing loaded YAML file
|
juraj-google-style
|
def process(filename, args, detector_classes, printer_classes):
ast = '--ast-compact-json'
if args.legacy_ast:
ast = '--ast-json'
args.filter_paths = parse_filter_paths(args)
slither = Slither(filename, ast_format=ast, **vars(args))
return _process(slither, detector_classes, printer_classes)
|
The core high-level code for running Slither static analysis.
Returns:
list(result), int: Result list and number of contracts analyzed
|
codesearchnet
|
def parse_args(test: typing.Optional[typing.List[str]]=None) -> argparse.Namespace:
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('weight_file', help='A file path for the learned weights.')
parser.add_argument('-o', '--outfile', help='A file path to export a model file. (default: model.json)', default='model.json', type=str)
parser.add_argument('--scale', help='A scale factor for the output scores', default=1000, type=int)
if test is None:
return parser.parse_args()
else:
return parser.parse_args(test)
|
Parses commandline arguments.
Args:
test (typing.Optional[typing.List[str]], optional): Commandline args for
testing. Defaults to None.
Returns:
Parsed arguments (argparse.Namespace).
|
github-repos
|
def _add_state_variable(self, name, shape, dtype, initializer=None, partitioner=None, use_resource=None, **kwargs):
weight = self.add_weight(name=name, shape=shape, dtype=dtype, initializer=initializer, regularizer=None, trainable=False, constraint=None, partitioner=partitioner, use_resource=use_resource, **kwargs)
self.state_variables[name] = weight
return weight
|
Add a variable that can hold state which is updated during adapt().
Args:
name: Variable name.
shape: Variable shape. Defaults to scalar if unspecified.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
initializer: initializer instance (callable).
partitioner: Partitioner to be passed to the `Trackable` API.
use_resource: Whether to use `ResourceVariable`
**kwargs: Additional keyword arguments. Accepted values are `getter` and
`collections`.
Returns:
The created variable.
|
github-repos
|
def select_segments(self, jsonpath: str) -> List[Segment]:
path = self.etk.parse_json_path(jsonpath)
matches = path.find(self.cdr_document)
segments = list()
for a_match in matches:
this_segment = Segment(str(a_match.full_path), a_match.value, self)
segments.append(this_segment)
return segments
|
Dereferences the json_path inside the document and returns the selected elements.
This method should compile and cache the compiled json_path in case the same path
is reused by multiple extractors.
Args:
jsonpath (str): a valid JSON path.
Returns: A list of Segments object that contains the elements selected by the json path.
|
codesearchnet
|
def Sample(self, operation, description, data_size, compressed_data_size):
sample_time = time.time()
sample = '{0:f}\t{1:s}\t{2:s}\t{3:d}\t{4:d}\n'.format(
sample_time, operation, description, data_size, compressed_data_size)
self._WritesString(sample)
|
Takes a sample of data read or written for profiling.
Args:
operation (str): operation, either 'read' or 'write'.
description (str): description of the data read.
data_size (int): size of the data read in bytes.
compressed_data_size (int): size of the compressed data read in bytes.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.