code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def from_tensor_list(element_spec, tensor_list):
return _from_tensor_list_helper(lambda spec, value: spec._from_tensor_list(value), element_spec, tensor_list)
|
Returns an element constructed from the given spec and tensor list.
Args:
element_spec: A nested structure of `tf.TypeSpec` objects representing to
element type specification.
tensor_list: A list of tensors to use for constructing the value.
Returns:
An element constructed from the given spec and tensor list.
Raises:
ValueError: If the number of tensors needed to construct an element for
the given spec does not match the given number of tensors or the given
spec is not compatible with the tensor list.
|
github-repos
|
def __init__(self, interpreter=None, signature_key=None):
if not interpreter:
raise ValueError('None interpreter provided.')
if not signature_key:
raise ValueError('None signature_key provided.')
self._interpreter = interpreter
self._interpreter_wrapper = interpreter._interpreter
self._signature_key = signature_key
signature_defs = interpreter._get_full_signature_list()
if signature_key not in signature_defs:
raise ValueError(f'Invalid signature_key provided: "{signature_key}".')
self._signature_def = signature_defs[signature_key]
self._outputs = self._signature_def['outputs'].items()
self._inputs = self._signature_def['inputs']
self._subgraph_index = self._interpreter_wrapper.GetSubgraphIndexFromSignature(self._signature_key)
|
Constructor.
Args:
interpreter: Interpreter object that is already initialized with the
requested model.
signature_key: SignatureDef key to be used.
|
github-repos
|
def do_state(args):
rest_client = RestClient(args.url, args.user)
if args.subcommand == 'list':
response = rest_client.list_state(args.subtree, args.head)
leaves = response['data']
head = response['head']
keys = ('address', 'size', 'data')
headers = tuple(k.upper() for k in keys)
def parse_leaf_row(leaf, decode=True):
decoded = b64decode(leaf['data'])
return (
leaf['address'],
len(decoded),
str(decoded) if decode else leaf['data'])
if args.format == 'default':
fmt.print_terminal_table(headers, leaves, parse_leaf_row)
print('HEAD BLOCK: "{}"'.format(head))
elif args.format == 'csv':
fmt.print_csv(headers, leaves, parse_leaf_row)
print('(data for head block: "{}")'.format(head))
elif args.format == 'json' or args.format == 'yaml':
state_data = {
'head': head,
'data': [{k: d for k, d in zip(keys, parse_leaf_row(l, False))}
for l in leaves]}
if args.format == 'yaml':
fmt.print_yaml(state_data)
elif args.format == 'json':
fmt.print_json(state_data)
else:
raise AssertionError('Missing handler: {}'.format(args.format))
else:
raise AssertionError('Missing handler: {}'.format(args.format))
if args.subcommand == 'show':
output = rest_client.get_leaf(args.address, args.head)
if output is not None:
print('DATA: "{}"'.format(b64decode(output['data'])))
print('HEAD: "{}"'.format(output['head']))
else:
raise CliException('No data available at {}'.format(args.address))
|
Runs the batch list or batch show command, printing output to the
console
Args:
args: The parsed arguments sent to the command at runtime
|
juraj-google-style
|
def format(obj, options):
formatters = {
float_types: lambda x: '{:.{}g}'.format(x, options.digits),
}
for _types, fmtr in formatters.items():
if isinstance(obj, _types):
return fmtr(obj)
try:
if six.PY2 and isinstance(obj, six.string_types):
return str(obj.encode('utf-8'))
return str(obj)
except:
return 'OBJECT'
|
Return a string representation of the Python object
Args:
obj: The Python object
options: Format options
|
juraj-google-style
|
def normalize_docroot(app, root):
srcdir = app.env.srcdir
default_version = app.config.javalink_default_version
if isinstance(root, basestring):
(url, base) = _parse_docroot_str(srcdir, root)
return {'root': url, 'base': base, 'version': default_version}
else:
normalized = {}
normalized['root'] = _parse_docroot_str(srcdir, root['root'])[0]
if ('base' in root):
normalized['base'] = _parse_docroot_str(srcdir, root['base'])[1]
else:
normalized['base'] = _parse_docroot_str(srcdir, root['root'])[1]
if ('version' in root):
normalized['version'] = root['version']
else:
normalized['version'] = default_version
return normalized
|
Creates a package-list URL and a link base from a docroot element.
Args:
app: the global app object
root: the docroot element [string or dictionary]
|
codesearchnet
|
def _reshape(self, input_dims=None, output_dims=None):
if (input_dims is not None):
if (np.product(input_dims) != self._input_dim):
raise QiskitError('Reshaped input_dims are incompatible with combined input dimension.')
self._input_dims = tuple(input_dims)
if (output_dims is not None):
if (np.product(output_dims) != self._output_dim):
raise QiskitError('Reshaped input_dims are incompatible with combined input dimension.')
self._output_dims = tuple(output_dims)
return self
|
Reshape input and output dimensions of operator.
Arg:
input_dims (tuple): new subsystem input dimensions.
output_dims (tuple): new subsystem output dimensions.
Returns:
Operator: returns self with reshaped input and output dimensions.
Raises:
QiskitError: if combined size of all subsystem input dimension or
subsystem output dimensions is not constant.
|
codesearchnet
|
def request(self, batch: Sequence[Any], model: genai.Client, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:
if inference_args is None:
inference_args = {}
responses = self.request_fn(self.model_name, batch, model, inference_args)
return utils._convert_to_result(batch, responses, self.model_name)
|
Sends a prediction request to a Gemini service containing a batch
of inputs and matches that input with the prediction response from
the endpoint as an iterable of PredictionResults.
Args:
batch: a sequence of any values to be passed to the Gemini service.
Should be inputs accepted by the provided inference function.
model: a genai.Client object configured to access the desired service.
inference_args: any additional arguments to send as part of the
prediction request.
Returns:
An iterable of Predictions.
|
github-repos
|
def DirnamePath(self, path):
if path.endswith(self.PATH_SEPARATOR):
path = path[:(- 1)]
if (not path):
return None
(dirname, _, _) = path.rpartition(self.PATH_SEPARATOR)
return dirname
|
Determines the directory name of the path.
The file system root is represented by an empty string.
Args:
path (str): path.
Returns:
str: directory name of the path or None.
|
codesearchnet
|
def download_tabular_rows_as_dicts(self, url, headers=1, keycolumn=1, **kwargs):
kwargs['headers'] = headers
stream = self.get_tabular_stream(url, **kwargs)
output_dict = dict()
headers = stream.headers
key_header = headers[(keycolumn - 1)]
for row in stream.iter(keyed=True):
first_val = row[key_header]
output_dict[first_val] = dict()
for header in row:
if (header == key_header):
continue
else:
output_dict[first_val][header] = row[header]
return output_dict
|
Download multicolumn csv from url and return dictionary where keys are first column and values are
dictionaries with keys from column headers and values from columns beneath
Args:
url (str): URL to download
headers (Union[int, List[int], List[str]]): Number of row(s) containing headers or list of headers. Defaults to 1.
keycolumn (int): Number of column to be used for key. Defaults to 1.
**kwargs:
file_type (Optional[str]): Type of file. Defaults to inferring.
delimiter (Optional[str]): Delimiter used for values in each row. Defaults to inferring.
Returns:
Dict[Dict]: Dictionary where keys are first column and values are dictionaries with keys from column
headers and values from columns beneath
|
codesearchnet
|
def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_length):
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device)
return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length
|
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
|
github-repos
|
def get_transcript_ids(ensembl, gene_id):
ensembl_genes = ensembl.get_genes_for_hgnc_id(gene_id)
transcript_ids = ensembl.get_transcript_ids_for_ensembl_gene_ids(ensembl_genes, [gene_id])
alt_symbols = []
if len(transcript_ids) == 0:
alt_symbols = ensembl.get_previous_symbol(gene_id)
genes = [ensembl.get_genes_for_hgnc_id(symbol) for symbol in alt_symbols]
genes = [item for sublist in genes for item in sublist]
ensembl_genes += genes
symbols = [gene_id] + alt_symbols
transcript_ids = ensembl.get_transcript_ids_for_ensembl_gene_ids(ensembl_genes, symbols)
return get_transcript_lengths(ensembl, transcript_ids)
|
gets transcript IDs for a gene.
Args:
ensembl: EnsemblRequest object to request data from ensembl
gene_id: HGNC symbol for gene
Returns:
dictionary of transcript ID: transcript lengths for all transcripts
for a given HGNC symbol.
|
juraj-google-style
|
def add_object(self, file_path, file_object, error_fct=None):
error_fct = error_fct or self.raise_os_error
if not file_path:
target_directory = self.root
else:
target_directory = self.resolve(file_path)
if not S_ISDIR(target_directory.st_mode):
error = errno.ENOENT if self.is_windows_fs else errno.ENOTDIR
error_fct(error, file_path)
target_directory.add_entry(file_object)
|
Add a fake file or directory into the filesystem at file_path.
Args:
file_path: The path to the file to be added relative to self.
file_object: File or directory to add.
error_class: The error class to be thrown if file_path does
not correspond to a directory (used internally(
Raises:
IOError or OSError: if file_path does not correspond to a
directory.
|
juraj-google-style
|
def external_ids(self, **kwargs):
path = self._get_series_id_season_number_path('external_ids')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Get the external ids that we have stored for a TV season by season
number.
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
|
codesearchnet
|
def isin(self, values):
return self.__constructor__(
query_compiler=self._query_compiler.isin(values=values)
)
|
Fill a DataFrame with booleans for cells contained in values.
Args:
values (iterable, DataFrame, Series, or dict): The values to find.
Returns:
A new DataFrame with booleans representing whether or not a cell
is in values.
True: cell is contained in values.
False: otherwise
|
juraj-google-style
|
def skip_while(self, predicate):
if self.closed():
raise ValueError('Attempt to call take_while() on a closed Queryable.')
if (not is_callable(predicate)):
raise TypeError('skip_while() parameter predicate={0} is not callable'.format(repr(predicate)))
return self._create(itertools.dropwhile(predicate, self))
|
Omit elements from the start for which a predicate is True.
Note: This method uses deferred execution.
Args:
predicate: A single argument predicate function.
Returns:
A Queryable over the sequence of elements beginning with the first
element for which the predicate returns False.
Raises:
ValueError: If the Queryable is closed().
TypeError: If predicate is not callable.
|
codesearchnet
|
def get_nodes(self):
nodes = []
for (age, level) in enumerate(self.nodes):
nodes.append([])
for node in level:
nodes[age].append(node.get_tuple())
return nodes
|
Get the tree nodes as list.
Returns:
list: A 2d-list holding the grown nodes coordinates as tupel for every age.
Example:
[
[(10, 40)],
[(20, 80), (100, 30)],
[(100, 90), (120, 40), ...],
...
]
|
codesearchnet
|
def __init__(self, object_type: str, object_id: str = None):
if object_type not in [PB_KEY, SBI_KEY]:
raise RuntimeError('Invalid object type')
self._type = object_type
self._id = object_id
self._key = self.get_key(object_type, object_id)
self._check_object_exists()
|
Initialise variables.
Args:
object_type (str): Type of object.
object_id (str): ID of the object.
|
juraj-google-style
|
def check_info_annotation(annotation, info, extra_info, alternatives, individuals=[]):
number = extra_info['Number']
if is_number(number):
number_of_entrys = float(number)
if number_of_entrys != 0:
if len(annotation) != number_of_entrys:
raise SyntaxError("Info field {0} has the wrong "\
"number of entrys according to the vcf header."\
" Vcf header specifies {1} should have {2} entry(s)".format(
'='.join([info, ','.join(annotation)]),
info,
number
))
elif number == 'A':
if len(annotation) != len(alternatives):
raise SyntaxError("Info field {0} has the wrong "\
"number of entrys according to the vcf header."\
"Vcf header specifies {1} should have {2} entry(s)".format(
'='.join([info, ','.join(annotation)]),
info,
number
))
elif number == 'R':
if len(annotation) != (len(alternatives) + 1):
raise SyntaxError("Info field {0} has the wrong "\
"number of entrys according to the vcf header."\
"Vcf header specifies {1} should have {2} entry(s)".format(
'='.join([info, ','.join(annotation)]),
info,
number
))
elif number == 'G':
if len(annotation) != len(individuals):
raise SyntaxError("Info field {0} has the wrong "\
"number of entrys according to the vcf header."\
"Vcf header specifies {1} should have {2} entry(s)".format(
'='.join([info, ','.join(annotation)]),
info,
number
))
return True
|
Check if the info annotation corresponds to the metadata specification
Arguments:
annotation (list): The annotation from the vcf file
info (str): Name of the info field
extra_info (dict): The metadata specification
alternatives (list): A list with the alternative variants
individuals (list): a list with the individuals
Returns:
bool: If the annotation is correct or not
|
juraj-google-style
|
def _gather_beams(nested, beam_indices, batch_size, new_beam_size):
batch_pos = (tf.range((batch_size * new_beam_size))
batch_pos = tf.reshape(batch_pos, [batch_size, new_beam_size])
coordinates = tf.stack([batch_pos, beam_indices], axis=2)
return nest.map_structure((lambda state: tf.gather_nd(state, coordinates)), nested)
|
Gather beams from nested structure of tensors.
Each tensor in nested represents a batch of beams, where beam refers to a
single search state (beam search involves searching through multiple states
in parallel).
This function is used to gather the top beams, specified by
beam_indices, from the nested tensors.
Args:
nested: Nested structure (tensor, list, tuple or dict) containing tensors
with shape [batch_size, beam_size, ...].
beam_indices: int32 tensor with shape [batch_size, new_beam_size]. Each
value in beam_indices must be between [0, beam_size), and are not
necessarily unique.
batch_size: int size of batch
new_beam_size: int number of beams to be pulled from the nested tensors.
Returns:
Nested structure containing tensors with shape
[batch_size, new_beam_size, ...]
|
codesearchnet
|
def hook(self, function, event, dependencies):
if (event is None):
for e in self._events.keys():
self.hook(function, e, dependencies)
return
if ((not isinstance(event, str)) and isinstance(event, Iterable)):
for e in event:
self.hook(function, e, dependencies)
return
event_list = self._events.get(event, None)
if (event_list is None):
raise NameError(("Invalid key provided '%s'. Valid options: %s" % (event, ', '.join(self._events.keys()))))
return
return event_list.hook(function, dependencies)
|
Tries to load the hook to the event
Args:
function (func): Function that will be called when the event is called
Kwargs:
dependencies (str): String or Iterable with modules whose hooks should be called before this one
Raises:
NameError
Note that the dependencies are module-wide, that means that if
`parent.foo` and `parent.bar` are both subscribed to `example` event
and `child` enumerates `parent` as dependcy, **both** `foo` and `bar`
must be called in order for the dependcy to get resolved.
|
codesearchnet
|
def __new__(cls, input_array, tol=1e-3):
obj = super().__new__(cls, input_array, check_rank=3)
if not (obj - np.transpose(obj, (0, 2, 1)) < tol).all():
warnings.warn("Input piezo tensor does "
"not satisfy standard symmetries")
return obj.view(cls)
|
Create an PiezoTensor object. The constructor throws an error if
the shape of the input_matrix argument is not 3x3x3, i. e. in true
tensor notation. Note that the constructor uses __new__ rather than
__init__ according to the standard method of subclassing numpy
ndarrays.
Args:
input_matrix (3x3x3 array-like): the 3x6 array-like
representing the piezo tensor
|
juraj-google-style
|
def names(self):
result = []
for (key, value) in self.iteritems():
if (value & self.bitmask):
result.append(key)
return result
|
List of selected enum names.
Returns:
list: Enum names.
|
codesearchnet
|
def exp2(x):
if any_symbolic_tensors((x,)):
return Exp2().symbolic_call(x)
return backend.numpy.exp2(x)
|
Calculate the base-2 exponential of all elements in the input tensor.
Args:
x: Input tensor.
Returns:
Output tensor, element-wise base-2 exponential of `x`.
|
github-repos
|
def grid(self, force_rerun=False):
log.debug('{}: running grid maker...'.format(self.id))
if not self.receptormol2_path or not self.box_path:
return ValueError('Please run protein_only_and_noH and showbox')
gridscript = op.join(self.dock_dir, "{}_grid.in".format(self.id))
out_name = op.join(self.dock_dir, "{}_grid.out".format(self.id))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=out_name):
with open(gridscript, "w") as f:
grid_text = .format(op.basename(self.receptormol2_path), op.basename(self.box_path), self.amb_file, self.id)
f.write(grid_text)
os.chdir(self.dock_dir)
cmd = "grid -i {} -o {}".format(op.basename(gridscript), op.basename(out_name))
os.system(cmd)
if ssbio.utils.is_non_zero_file(out_name):
self.grid_path = out_name
log.debug('{}: successful grid creation'.format(self.grid_path))
else:
log.critical('{}: grid failed to run on receptor + box file'.format(self.box_path))
|
Create the scoring grid within the dummy box.
Args:
force_rerun (bool): If method should be rerun even if output file exists
|
juraj-google-style
|
def from_bank_code(cls, country_code, bank_code):
try:
return cls(registry.get('bank_code')[(country_code, bank_code)]['bic'])
except KeyError:
raise ValueError('Invalid bank code {!r} for country {!r}'.format(bank_code, country_code))
|
Create a new BIC object from country- and bank-code.
Examples:
>>> bic = BIC.from_bank_code('DE', '20070000')
>>> bic.country_code
'DE'
>>> bic.bank_code
'DEUT'
>>> bic.location_code
'HH'
>>> BIC.from_bank_code('DE', '01010101')
Traceback (most recent call last):
...
ValueError: Invalid bank code '01010101' for country 'DE'
Args:
country_code (str): ISO 3166 alpha2 country-code.
bank_code (str): Country specific bank-code.
Returns:
BIC: a BIC object generated from the given country code and bank code.
Raises:
ValueError: If the given bank code wasn't found in the registry
Note:
This currently only works for German bank-codes.
|
codesearchnet
|
def add_topic(self, topic):
if topic in self._topics:
return Future().success(set(self._topics))
self._topics.add(topic)
return self.cluster.request_update()
|
Add a topic to the list of topics tracked via metadata.
Arguments:
topic (str): topic to track
Returns:
Future: resolves after metadata request/response
|
juraj-google-style
|
def sum(vari, axis=None):
if isinstance(vari, Poly):
core = vari.A.copy()
for key in vari.keys:
core[key] = sum(core[key], axis)
return Poly(core, vari.dim, None, vari.dtype)
return np.sum(vari, axis)
|
Sum the components of a shapeable quantity along a given axis.
Args:
vari (chaospy.poly.base.Poly, numpy.ndarray):
Input data.
axis (int):
Axis over which the sum is taken. By default ``axis`` is None, and
all elements are summed.
Returns:
(chaospy.poly.base.Poly, numpy.ndarray):
Polynomial array with same shape as ``vari``, with the specified
axis removed. If ``vari`` is an 0-d array, or ``axis`` is None,
a (non-iterable) component is returned.
Examples:
>>> vari = cp.prange(3)
>>> print(vari)
[1, q0, q0^2]
>>> print(cp.sum(vari))
q0^2+q0+1
|
codesearchnet
|
def load(self, source, mode='create', source_format='csv', csv_options=None, ignore_unknown_values=False, max_bad_records=0):
job = self.load_async(source, mode=mode, source_format=source_format, csv_options=csv_options, ignore_unknown_values=ignore_unknown_values, max_bad_records=max_bad_records)
if (job is not None):
job.wait()
return job
|
Load the table from GCS.
Args:
source: the URL of the source objects(s). Can include a wildcard '*' at the end of the item
name. Can be a single source or a list.
mode: one of 'create', 'append', or 'overwrite'. 'append' or 'overwrite' will fail if the
table does not already exist, while 'create' will fail if it does. The default is
'create'. If 'create' the schema will be inferred if necessary.
source_format: the format of the data, 'csv' or 'json'; default 'csv'.
csv_options: if source format is 'csv', additional options as a CSVOptions object.
ignore_unknown_values: if True, accept rows that contain values that do not match the schema;
the unknown values are ignored (default False).
max_bad_records: the maximum number of bad records that are allowed (and ignored) before
returning an 'invalid' error in the Job result (default 0).
Returns:
A Job object for the completed load Job if it was started successfully; else None.
|
codesearchnet
|
def add_response(self, req, resp):
if self._cache is None:
return
signature = sign(req.checkRequest)
with self._cache as c:
now = self._timer()
quota_scale = 0
item = c.get(signature)
if item is None:
c[signature] = CachedItem(
resp, self.service_name, now, quota_scale)
else:
item.last_check_time = now
item.response = resp
item.quota_scale = quota_scale
item.is_flushing = False
c[signature] = item
|
Adds the response from sending to `req` to this instance's cache.
Args:
req (`ServicecontrolServicesCheckRequest`): the request
resp (CheckResponse): the response from sending the request
|
juraj-google-style
|
def fill_slot(self, filler_pipeline_key, slot, value):
if (not isinstance(filler_pipeline_key, db.Key)):
filler_pipeline_key = db.Key(filler_pipeline_key)
if _TEST_MODE:
slot._set_value_test(filler_pipeline_key, value)
else:
encoded_value = json.dumps(value, sort_keys=True, cls=mr_util.JsonEncoder)
value_text = None
value_blob = None
if (len(encoded_value) <= _MAX_JSON_SIZE):
value_text = db.Text(encoded_value)
else:
value_blob = _write_json_blob(encoded_value, filler_pipeline_key.name())
def txn():
slot_record = db.get(slot.key)
if (slot_record is None):
raise UnexpectedPipelineError(('Tried to fill missing slot "%s" by pipeline ID "%s" with value: %r' % (slot.key, filler_pipeline_key.name(), value)))
slot_record.filler = filler_pipeline_key
slot_record.value_text = value_text
slot_record.value_blob = value_blob
slot_record.status = _SlotRecord.FILLED
slot_record.fill_time = self._gettime()
slot_record.put()
task = taskqueue.Task(url=self.barrier_handler_path, params=dict(slot_key=slot.key, use_barrier_indexes=True), headers={'X-Ae-Slot-Key': slot.key, 'X-Ae-Filler-Pipeline-Key': filler_pipeline_key})
task.add(queue_name=self.queue_name, transactional=True)
db.run_in_transaction_options(db.create_transaction_options(propagation=db.ALLOWED), txn)
self.session_filled_output_names.add(slot.name)
|
Fills a slot, enqueueing a task to trigger pending barriers.
Args:
filler_pipeline_key: db.Key or stringified key of the _PipelineRecord
that filled this slot.
slot: The Slot instance to fill.
value: The serializable value to assign.
Raises:
UnexpectedPipelineError if the _SlotRecord for the 'slot' could not
be found in the Datastore.
|
codesearchnet
|
def run(self):
while self.should_run:
try:
self.logger.debug('Sending heartbeat, seq ' + last_sequence)
self.ws.send(json.dumps({
'op': 1,
'd': last_sequence
}))
except Exception as e:
self.logger.error(f'Got error in heartbeat: {str(e)}')
finally:
elapsed = 0.0
while elapsed < self.interval and self.should_run:
time.sleep(self.TICK_INTERVAL)
elapsed += self.TICK_INTERVAL
|
Runs the thread
This method handles sending the heartbeat to the Discord websocket server, so the connection
can remain open and the bot remain online for those commands that require it to be.
Args:
None
|
juraj-google-style
|
def bots(self):
json = self.skype.conn('GET', '{0}/agents'.format(SkypeConnection.API_BOT), auth=SkypeConnection.Auth.SkypeToken).json().get('agentDescriptions', [])
return [self.merge(SkypeBotUser.fromRaw(self.skype, raw)) for raw in json]
|
Retrieve a list of all known bots.
Returns:
SkypeBotUser list: resulting bot user objects
|
codesearchnet
|
def _ParseCachedEntryVista(self, value_data, cached_entry_offset):
try:
cached_entry = self._ReadStructureFromByteStream(value_data[cached_entry_offset:], cached_entry_offset, self._cached_entry_data_type_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to parse cached entry value with error: {0!s}'.format(exception))
path_size = cached_entry.path_size
maximum_path_size = cached_entry.maximum_path_size
path_offset = cached_entry.path_offset
if ((path_offset > 0) and (path_size > 0)):
path_size += path_offset
maximum_path_size += path_offset
try:
path = value_data[path_offset:path_size].decode('utf-16-le')
except UnicodeDecodeError:
raise errors.ParseError('Unable to decode cached entry path to string')
cached_entry_object = AppCompatCacheCachedEntry()
cached_entry_object.cached_entry_size = self._cached_entry_data_type_map.GetByteSize()
cached_entry_object.insertion_flags = cached_entry.insertion_flags
cached_entry_object.last_modification_time = cached_entry.last_modification_time
cached_entry_object.path = path
cached_entry_object.shim_flags = cached_entry.shim_flags
return cached_entry_object
|
Parses a Windows Vista cached entry.
Args:
value_data (bytes): value data.
cached_entry_offset (int): offset of the first cached entry data
relative to the start of the value data.
Returns:
AppCompatCacheCachedEntry: cached entry.
Raises:
ParseError: if the value data could not be parsed.
|
codesearchnet
|
def Copy(self):
result = QueueManager(store=self.data_store, token=self.token)
result.prev_frozen_timestamps = self.prev_frozen_timestamps
result.frozen_timestamp = self.frozen_timestamp
return result
|
Return a copy of the queue manager.
Returns:
Copy of the QueueManager object.
NOTE: pending writes/deletions are not copied. On the other hand, if the
original object has a frozen timestamp, a copy will have it as well.
|
codesearchnet
|
def _get_back_up_generator(frame_function, *args, **kwargs):
lines = next(frame_function(*args, **kwargs)).split('\n')
width = len(lines[0])
height = len(lines)
if (height == 1):
return util.BACKSPACE_GEN(width)
return util.BACKLINE_GEN(height)
|
Create a generator for the provided animation function that backs up
the cursor after a frame. Assumes that the animation function provides
a generator that yields strings of constant width and height.
Args:
frame_function: A function that returns a FrameGenerator.
args: Arguments for frame_function.
kwargs: Keyword arguments for frame_function.
Returns:
a generator that generates backspace/backline characters for
the animation func generator.
|
codesearchnet
|
def parse_global_args(argv):
parser = create_parser()
args = parser.parse_args(argv)
should_log = args.include or args.exclude or (args.verbose > 0)
verbosity = args.verbose
root = logging.getLogger()
if should_log:
formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname).3s %(name)s %(message)s',
'%y-%m-%d %H:%M:%S')
if args.logfile:
handler = logging.FileHandler(args.logfile)
else:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
if args.include and args.exclude:
print("You cannot combine whitelisted (-i) and blacklisted (-e) loggers, you must use one or the other.")
sys.exit(1)
loglevels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
if verbosity >= len(loglevels):
verbosity = len(loglevels) - 1
level = loglevels[verbosity]
if args.include:
for name in args.include:
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
root.addHandler(logging.NullHandler())
else:
for name in args.exclude:
logger = logging.getLogger(name)
logger.disabled = True
root.setLevel(level)
root.addHandler(handler)
else:
root.addHandler(logging.NullHandler())
return args
|
Parse all global iotile tool arguments.
Any flag based argument at the start of the command line is considered as
a global flag and parsed. The first non flag argument starts the commands
that are passed to the underlying hierarchical shell.
Args:
argv (list): The command line for this command
Returns:
Namespace: The parsed arguments, with all of the commands that should
be executed in an iotile shell as the attribute 'commands'
|
juraj-google-style
|
def tf_broadcast(*args):
if len(args) <= 1:
return args
sh = array_ops.shape(args[0])
for arg in args[1:]:
sh = array_ops.broadcast_dynamic_shape(sh, array_ops.shape(arg))
return [array_ops.broadcast_to(arg, sh) for arg in args]
|
Broadcast tensors.
Args:
*args: a list of tensors whose shapes are broadcastable against each other.
Returns:
Tensors broadcasted to the common shape.
|
github-repos
|
def Spearman(poly, dist, sample=10000, retall=False, **kws):
samples = dist.sample(sample, **kws)
poly = polynomials.flatten(poly)
Y = poly(*samples)
if retall:
return spearmanr(Y.T)
return spearmanr(Y.T)[0]
|
Calculate Spearman's rank-order correlation coefficient.
Args:
poly (Poly):
Polynomial of interest.
dist (Dist):
Defines the space where correlation is taken.
sample (int):
Number of samples used in estimation.
retall (bool):
If true, return p-value as well.
Returns:
(float, numpy.ndarray):
Correlation output ``rho``. Of type float if two-dimensional problem.
Correleation matrix if larger.
(float, numpy.ndarray):
The two-sided p-value for a hypothesis test whose null hypothesis
is that two sets of data are uncorrelated, has same dimension as
``rho``.
|
codesearchnet
|
def CompileReport(self, mediator):
lines_of_text = []
if self._output_format == 'yaml':
lines_of_text.append(
yaml.safe_dump_all(self._service_collection.services))
else:
lines_of_text.append('Listing Windows Services')
for service in self._service_collection.services:
lines_of_text.append(self._FormatServiceText(service))
lines_of_text.append('')
lines_of_text.append('')
report_text = '\n'.join(lines_of_text)
return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)
|
Compiles an analysis report.
Args:
mediator (AnalysisMediator): mediates interactions between analysis
plugins and other components, such as storage and dfvfs.
Returns:
AnalysisReport: report.
|
juraj-google-style
|
def parse_timing(self, nids=None):
paths = [task.output_file.path for task in self.iflat_tasks(nids=nids)]
from .abitimer import AbinitTimerParser
parser = AbinitTimerParser()
read_ok = parser.parse(paths)
if read_ok:
return parser
return None
|
Parse the timer data in the main output file(s) of Abinit.
Requires timopt /= 0 in the input file (usually timopt = -1)
Args:
nids: optional list of node identifiers used to filter the tasks.
Return: :class:`AbinitTimerParser` instance, None if error.
|
juraj-google-style
|
def log_images(self, name, images, step=None):
if isinstance(images, six.string_types):
raise TypeError('"images" should be a list of ndarrays, got {}'
.format(type(images)))
self._check_step(step)
tf_name = self._ensure_tf_name(name)
summary = self._image_summary(tf_name, images, step=step)
self._log_summary(tf_name, summary, images, step=step)
|
Log new images for given name on given step.
Args:
name (str): name of the variable (it will be converted to a valid
tensorflow summary name).
images (list): list of images to visualize
step (int): non-negative integer used for visualization
|
juraj-google-style
|
def _string_from_ip_int(cls, ip_int):
return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
if isinstance(b, bytes)
else b)
for b in _compat_to_bytes(ip_int, 4, 'big'))
|
Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
|
juraj-google-style
|
def easeInElastic(n, amplitude=1, period=0.3):
_checkRange(n)
return 1 - easeOutElastic(1-n, amplitude=amplitude, period=period)
|
An elastic tween function that begins with an increasing wobble and then snaps into the destination.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
|
juraj-google-style
|
def _configure_from_mapping(self, item, whitelist_keys=False, whitelist=None):
if (whitelist is None):
whitelist = self.config.keys()
if whitelist_keys:
item = {k: v for (k, v) in item.items() if (k in whitelist)}
self.config.from_mapping(item)
return self
|
Configure from a mapping, or dict, like object.
Args:
item (dict):
A dict-like object that we can pluck values from.
Keyword Args:
whitelist_keys (bool):
Should we whitelist the keys before adding them to the
configuration? If no whitelist is provided, we use the
pre-existing config keys as a whitelist.
whitelist (list[str]):
An explicit list of keys that should be allowed. If provided
and ``whitelist_keys`` is true, we will use that as our
whitelist instead of pre-existing app config keys.
Returns:
fleaker.App:
Returns itself.
|
codesearchnet
|
def get_subject_with_local_validation(jwt_bu64, cert_obj):
try:
jwt_dict = validate_and_decode(jwt_bu64, cert_obj)
except JwtException as e:
return log_jwt_bu64_info(logging.error, str(e), jwt_bu64)
try:
return jwt_dict['sub']
except LookupError:
log_jwt_dict_info(logging.error, 'Missing "sub" key', jwt_dict)
|
Validate the JWT and return the subject it contains.
- The JWT is validated by checking that it was signed with a CN certificate.
- The returned subject can be trusted for authz and authn operations.
- Possible validation errors include:
- A trusted (TLS/SSL) connection could not be made to the CN holding the
signing certificate.
- The JWT could not be decoded.
- The JWT signature signature was invalid.
- The JWT claim set contains invalid "Not Before" or "Expiration Time" claims.
Args:
jwt_bu64: bytes
The JWT encoded using a a URL safe flavor of Base64.
cert_obj: cryptography.Certificate
Public certificate used for signing the JWT (typically the CN cert).
Returns:
- On successful validation, the subject contained in the JWT is returned.
- If validation fails for any reason, errors are logged and None is returned.
|
codesearchnet
|
def group(self, group_id):
self._validate_group_id(group_id)
return self._Context(self, group_id)
|
Enter a context where the lock is with group `group_id`.
Args:
group_id: The group for which to acquire and release the lock.
Returns:
A context manager which will acquire the lock for `group_id`.
|
github-repos
|
def _next_dna(self, dna: Optional['DNA']=None) -> Optional['DNA']:
if dna is None:
return DNA(self.min_value)
raise NotImplementedError('`next_dna` is not supported on `Float` yet.')
|
Returns the next DNA in the space represented by this spec.
Args:
dna: The DNA whose next will be returned. If None, `next_dna` will return
the first DNA.
Returns:
The next DNA or None if there is no next DNA.
|
github-repos
|
def get_stored_variation(self, experiment, user_profile):
user_id = user_profile.user_id
variation_id = user_profile.get_variation_for_experiment(experiment.id)
if variation_id:
variation = self.config.get_variation_from_id(experiment.key, variation_id)
if variation:
self.logger.info('Found a stored decision. User "%s" is in variation "%s" of experiment "%s".' % (
user_id,
variation.key,
experiment.key
))
return variation
return None
|
Determine if the user has a stored variation available for the given experiment and return that.
Args:
experiment: Object representing the experiment for which user is to be bucketed.
user_profile: UserProfile object representing the user's profile.
Returns:
Variation if available. None otherwise.
|
juraj-google-style
|
def roll50(msg):
d = hex2bin(data(msg))
if d[0] == '0':
return None
sign = int(d[1])
value = bin2int(d[2:11])
if sign:
value = value - 512
angle = value * 45.0 / 256.0
return round(angle, 1)
|
Roll angle, BDS 5,0 message
Args:
msg (String): 28 bytes hexadecimal message (BDS50) string
Returns:
float: angle in degrees,
negative->left wing down, positive->right wing down
|
juraj-google-style
|
def LessThanOrEqualTo(self, value):
self._awql = self._CreateSingleValueCondition(value, '<=')
return self._query_builder
|
Sets the type of the WHERE clause as "less than or equal to.
Args:
value: The value to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to.
|
juraj-google-style
|
def delete(self, key):
data = None
if key is not None:
data = self.db.delete(key.strip())
else:
self.tcex.log.warning(u'The key field was None.')
return data
|
Delete method of CRUD operation for all data types.
Args:
key (string): The variable to write to the DB.
Returns:
(string): Result of DB write.
|
juraj-google-style
|
def parse_uniprot_xml_metadata(sr):
xref_dbs_to_keep = ['GO', 'KEGG', 'PDB', 'PROSITE', 'Pfam', 'RefSeq']
infodict = {}
infodict['alt_uniprots'] = list(set(sr.annotations['accessions']).difference([sr.id]))
infodict['gene_name'] = None
if ('gene_name_primary' in sr.annotations):
infodict['gene_name'] = sr.annotations['gene_name_primary']
infodict['description'] = sr.description
infodict['taxonomy'] = None
if ('organism' in sr.annotations):
infodict['taxonomy'] = sr.annotations['organism']
infodict['seq_version'] = sr.annotations['sequence_version']
infodict['seq_date'] = sr.annotations['sequence_modified']
infodict['entry_version'] = sr.annotations['version']
infodict['entry_date'] = sr.annotations['modified']
tmp = defaultdict(list)
for xref in sr.dbxrefs:
database = xref.split(':', 1)[0]
xrefs = xref.split(':', 1)[(- 1)]
if (database in xref_dbs_to_keep):
if (database == 'PDB'):
tmp['pdbs'].append(xrefs)
else:
tmp[database.lower()].append(xrefs)
infodict.update(tmp)
return infodict
|
Load relevant attributes and dbxrefs from a parsed UniProt XML file in a SeqRecord.
Returns:
dict: All parsed information
|
codesearchnet
|
def make_acro(past, prefix, s):
def _make_acro(s, t=0):
'Make an acronym of s for trial t'
v = ['a', 'e', 'i', 'o', 'u', 'y']
c = [chr(x) for x in six_xrange(ord('a'), (ord('z') + 1)) if (chr(x) not in v)]
s = re.sub('\\W+', '', s.lower())
vx = [x for x in s if (x in v)]
cx = [x for x in s if (x in c)]
if s.startswith('Mc'):
if (t < 1):
return ('Mc' + v[0])
if (t < 2):
return ('Mc' + c[0])
if (s[0] in v):
if (t < 1):
return ((vx[0] + cx[0]) + cx[1])
if (t < 2):
return ((vx[0] + vx[1]) + cx[0])
if ((s[0] in c) and (s[1] in c)):
if (t < 1):
return ((cx[0] + cx[1]) + vx[0])
if (t < 2):
return ((cx[0] + cx[1]) + cx[2])
if (t < 3):
return ((cx[0] + vx[0]) + cx[1])
if (t < 4):
return ((cx[0] + cx[1]) + cx[2])
if (t < 5):
return ((cx[0] + vx[0]) + vx[1])
if (t < 6):
return ((cx[0] + cx[1]) + cx[(- 1)])
if (t < 7):
return s[0:3]
if (t < 8):
return s[1:4]
if (t < 9):
return s[2:5]
if (t < 10):
return s[3:6]
return None
for t in six_xrange(11):
try:
a = _make_acro(s, t)
if (a is not None):
if prefix:
aps = (prefix + a)
else:
aps = a
if (aps not in past):
past.add(aps)
return a
except IndexError:
pass
raise Exception('Could not get acronym.')
|
Create a three letter acronym from the input string s.
Args:
past: A set object, for storing acronyms that have already been created
prefix: A prefix added to the acronym before storing in the set
s: The string to create the acronym from.
|
codesearchnet
|
def _clean_query_string(q):
q = q.replace('()', '').strip()
if q.endswith('('):
q = q[:(- 1)].strip()
if ((q[(- 3):] == 'AND') or (q[(- 3):] == 'NOT')):
q = q[:(- 3)]
elif (q[(- 2):] == 'OR'):
q = q[:(- 2)]
while (q.count('(') > q.count(')')):
q += ')'
while (q.count(')') > q.count('(')):
q = ('(' + q)
return q.strip()
|
Clean up a query string for searching.
Removes unmatched parentheses and joining operators.
Arguments:
q (str): Query string to be cleaned
Returns:
str: The clean query string.
|
codesearchnet
|
def _non_slot_variables(self):
return self._non_slot_dict.values()
|
Additional variables created by the `Optimizer`.
Returns:
A list or tuple of variables.
|
github-repos
|
def handle_discovery_request(self, path, request, start_response):
if path == self._GET_REST_API:
return self._get_rest_doc(request, start_response)
elif path == self._GET_RPC_API:
error_msg = ('RPC format documents are no longer supported with the '
'Endpoints Framework for Python. Please use the REST '
'format.')
_logger.error('%s', error_msg)
return util.send_wsgi_error_response(error_msg, start_response)
elif path == self._LIST_API:
return self._list(request, start_response)
return False
|
Returns the result of a discovery service request.
This calls start_response and returns the response body.
Args:
path: A string containing the API path (the portion of the path
after /_ah/api/).
request: An ApiRequest, the transformed request sent to the Discovery API.
start_response: A function with semantics defined in PEP-333.
Returns:
The response body. Or returns False if the request wasn't handled by
DiscoveryService.
|
juraj-google-style
|
def generate(data, iterations=1000, force_strength=5.0, dampening=0.01, max_velocity=2.0, max_distance=50, is_3d=True):
edges = [{'source': s, 'target': t} for (s, t) in data]
nodes = force_directed_layout.run(edges, iterations, force_strength, dampening, max_velocity, max_distance, is_3d)
return {'edges': edges, 'nodes': nodes}
|
Runs a force-directed algorithm on a graph, returning a data structure.
Args:
data: An adjacency list of tuples (ie. [(1,2),...])
iterations: (Optional) Number of FDL iterations to run in coordinate
generation
force_strength: (Optional) Strength of Coulomb and Hooke forces
(edit this to scale the distance between nodes)
dampening: (Optional) Multiplier to reduce force applied to nodes
max_velocity: (Optional) Maximum distance a node can move in one step
max_distance: (Optional) The maximum inter-node distance considered
is_3d: (Optional) Generates three-dimensional coordinates
Outputs a json-serializable Python object. To visualize, pass the output to
`jgraph.draw(...)`.
|
codesearchnet
|
def register(cls, name: str, plugin: Type[ConnectionPlugin]) -> None:
existing_plugin = cls.available.get(name)
if existing_plugin is None:
cls.available[name] = plugin
elif existing_plugin != plugin:
raise ConnectionPluginAlreadyRegistered(
f"Connection plugin {plugin.__name__} can't be registered as "
f"{name!r} because plugin {existing_plugin.__name__} "
f"was already registered under this name"
)
|
Registers a connection plugin with a specified name
Args:
name: name of the connection plugin to register
plugin: defined connection plugin class
Raises:
:obj:`nornir.core.exceptions.ConnectionPluginAlreadyRegistered` if
another plugin with the specified name was already registered
|
juraj-google-style
|
def str2dict(str_in):
dict_out = safe_eval(str_in)
if not isinstance(dict_out, dict):
dict_out = None
return dict_out
|
Extracts a dict from a string.
Args:
str_in (string) that contains python dict
Returns:
(dict) or None if no valid dict was found
Raises:
-
|
juraj-google-style
|
def stop_stream_capturer(self, address):
address = str(address)
if (address not in self._stream_capturers):
raise ValueError('Capturer address does not match a managed capturer')
stream_cap = self._stream_capturers[address]
self._pool.killone(stream_cap[1])
del self._stream_capturers[address]
|
Stop a capturer that the manager controls.
Args:
address:
An address array of the form ['host', 'port'] or similar
depending on the connection type of the stream capturer being
terminated. The capturer for the address will be terminated
along with all handlers for that capturer if the address is
that of a managed capturer.
Raises:
ValueError:
The provided address doesn't match a capturer that is
currently managed.
|
codesearchnet
|
def run(self, copy_to_current_on_exit=False, site_property=None):
scratch = tempfile.gettempdir()
with ScratchDir(scratch, copy_to_current_on_exit=copy_to_current_on_exit) as scratch_dir:
self._write_input(input_dir=scratch_dir)
packmol_input = open(os.path.join(scratch_dir, self.input_file), 'r')
p = Popen(self.packmol_bin, stdin=packmol_input, stdout=PIPE, stderr=PIPE)
(stdout, stderr) = p.communicate()
output_file = os.path.join(scratch_dir, self.control_params['output'])
if os.path.isfile(output_file):
packed_mol = BabelMolAdaptor.from_file(output_file, self.control_params['filetype'])
packed_mol = packed_mol.pymatgen_mol
print('packed molecule written to {}'.format(self.control_params['output']))
if site_property:
packed_mol = self.restore_site_properties(site_property=site_property, filename=output_file)
return packed_mol
else:
print('Packmol execution failed')
print(stdout, stderr)
return None
|
Write the input file to the scratch directory, run packmol and return
the packed molecule.
Args:
copy_to_current_on_exit (bool): Whether or not to copy the packmol
input/output files from the scratch directory to the current
directory.
site_property (str): if set then the specified site property
for the the final packed molecule will be restored.
Returns:
Molecule object
|
codesearchnet
|
def cluster_info(cpu, cfg):
cpus = cpu.cpu_count
pods_per_core = cfg.doc.find("pods-per-core")
pods_per_core_int = int(pods_per_core.value) if pods_per_core else PODS_PER_CORE
cfg_max_pods = cfg.doc.find("max-pods")
cfg_max_pods_int = int(cfg_max_pods.value) if cfg_max_pods else MAX_PODS
calc_max_pods = cpus * pods_per_core_int
return {
"cpu_count": cpus,
"pods_per_core": pods_per_core_int,
"pods_per_core_customized": bool(pods_per_core),
"max_pods": min(cfg_max_pods_int, calc_max_pods),
"max_pods_customized": bool(cfg_max_pods)
}
|
Collects fact for each host
Collects the cpu and node configuration facts to be used by the rule.
Arguments:
cpu (CpuInfo): Parser object for the cpu info.
cfg (NodeConfig): Parser object for the node configuration.
Returns:
dict: Dictionary of fact information including the keys
``cpu_count``, ``pods_per_core_int``, ``pods_per_core_customized``,
``max_pods``, and ``max_pods_customized``.
|
juraj-google-style
|
def get_boards(board_name_list, *args, **kwargs):
if isinstance(board_name_list, basestring):
board_name_list = board_name_list.split()
return [Board(name, *args, **kwargs) for name in board_name_list]
|
Given a list of boards, return :class:`basc_py4chan.Board` objects.
Args:
board_name_list (list): List of board names to get, eg: ['b', 'tg']
Returns:
dict of :class:`basc_py4chan.Board`: Requested boards.
|
juraj-google-style
|
def __field_to_parameter_type(self, field):
variant = field.variant
if variant == messages.Variant.MESSAGE:
raise TypeError('A message variant can\'t be used in a parameter.')
custom_variant_map = {
messages.Variant.SINT32: 'int32',
messages.Variant.SINT64: 'int64',
messages.Variant.BOOL: 'boolean',
messages.Variant.ENUM: 'string',
}
return custom_variant_map.get(variant) or variant.name.lower()
|
Converts the field variant type into a string describing the parameter.
Args:
field: An instance of a subclass of messages.Field.
Returns:
A string corresponding to the variant enum of the field, with a few
exceptions. In the case of signed ints, the 's' is dropped; for the BOOL
variant, 'boolean' is used; and for the ENUM variant, 'string' is used.
Raises:
TypeError: if the field variant is a message variant.
|
juraj-google-style
|
def gaussian_bags_of_words(Y, vocab=vocab1k, sigma=1, bag_size=[25, 50], **kwargs):
def make_distribution(sigma, num_words):
p = abs(np.random.normal(0, sigma, num_words))
return (p / sum(p))
num_words = len(vocab)
word_dists = {y: make_distribution(sigma, num_words) for y in set(Y)}
bag_sizes = np.random.choice(range(min(bag_size), max(bag_size)), len(Y))
X = []
items = []
for (i, (y, length)) in enumerate(zip(Y, bag_sizes)):
x = torch.from_numpy(np.random.choice(num_words, length, p=word_dists[y]))
X.append(x)
items.append(' '.join((vocab[j] for j in x)))
return (X, items)
|
Generate Gaussian bags of words based on label assignments
Args:
Y: np.array of true labels
sigma: (float) the standard deviation of the Gaussian distributions
bag_size: (list) the min and max length of bags of words
Returns:
X: (Tensor) a tensor of indices representing tokens
D: (list) a list of sentences (strings)
The sentences are conditionally independent, given a label.
Note that technically we use a half-normal distribution here because we
take the absolute value of the normal distribution.
Example:
TBD
|
codesearchnet
|
def apply_transformation(self, structure):
sga = SpacegroupAnalyzer(structure, symprec=self.symprec, angle_tolerance=self.angle_tolerance)
return sga.get_conventional_standard_structure(international_monoclinic=self.international_monoclinic)
|
Returns most primitive cell for structure.
Args:
structure: A structure
Returns:
The same structure in a conventional standard setting
|
codesearchnet
|
def matmul(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:
out = math_ops.matmul(input_tensor, random_tensor_gen_fn((2, 3)))
out = math_ops.matmul(out, random_tensor_gen_fn((3, 4)))
return {'output': out}
|
Performs a matrix multiplication.
Args:
input_tensor: Input tensor to matmul with the filter.
Returns:
A 'output' -> output tensor mapping
|
github-repos
|
def parameter_combinations(test_parameters: Sequence[Mapping[str, Sequence[Any]]]) -> Sequence[Mapping[str, Any]]:
real_parameters = []
for parameters in test_parameters:
keys = parameters.keys()
for curr in itertools.product(*parameters.values()):
real_parameters.append(dict(zip(keys, curr)))
return real_parameters
|
Generate all combinations of test parameters.
Args:
test_parameters: List of dictionaries that maps parameter keys and values.
Returns:
real_parameters: All possible combinations of the parameters as list of
dictionaries.
|
github-repos
|
def get_rml_processors(es_defs):
proc_defs = es_defs.get("kds_esRmlProcessor", [])
if proc_defs:
new_defs = []
for proc in proc_defs:
params = proc['kds_rmlProcessorParams'][0]
proc_kwargs = {}
if params.get("kds_rtn_format"):
proc_kwargs["rtn_format"] = params.get("kds_rtn_format")[0]
new_def = dict(name=proc['rdfs_label'][0],
subj=params["kds_subjectKwarg"][0],
proc_kwargs=proc_kwargs,
force=proc.get('kds_forceNested',[False])[0],
processor=CFG.rml.get_processor(\
proc['rdfs_label'][0],
proc['kds_esRmlMapping'],
proc['rdf_type'][0]))
new_defs.append(new_def)
es_defs['kds_esRmlProcessor'] = new_defs
return es_defs
|
Returns the es_defs with the instaniated rml_processor
Args:
-----
es_defs: the rdf_class elacticsearch defnitions
cls_name: the name of the tied class
|
juraj-google-style
|
def Completions(component, verbose=False):
if inspect.isroutine(component) or inspect.isclass(component):
spec = inspectutils.GetFullArgSpec(component)
return _CompletionsFromArgs(spec.args + spec.kwonlyargs)
if isinstance(component, (tuple, list)):
return [str(index) for index in range(len(component))]
if inspect.isgenerator(component):
return []
return [_FormatForCommand(member_name) for member_name, _ in VisibleMembers(component, verbose=verbose)]
|
Gives possible Fire command completions for the component.
A completion is a string that can be appended to a command to continue that
command. These are used for TAB-completions in Bash for Fire CLIs.
Args:
component: The component whose completions to list.
verbose: Whether to include all completions, even private members.
Returns:
A list of completions for a command that would so far return the component.
|
github-repos
|
def multiple(layer: int, limit: int) -> Set[str]:
return {str(x).zfill(2) for x in [(2 ** x) for x in range(limit)] if ((x % (2 ** (layer - 1))) == 0)}
|
Returns a set of strings to be used as Slots with Pabianas default Clock.
Args:
layer: The layer in the hierarchy this Area is placed in.
Technically, the number specifies how many of the Clocks signals are relevant to the Area.
Between 1 and limit.
limit: The number of layers of the hierarchy.
|
codesearchnet
|
def generate_secret_file(file_path, pattern, service, environment, clients):
changed = False
with open(file_path) as json_file:
data = json.load(json_file, object_pairs_hook=OrderedDict)
try:
for key, value in data["params"][environment].items():
if pattern in key:
if "aws:kms:decrypt" in value:
print("Found match, key {} but value is encrypted already; skipping...".format(key))
else:
print("Found match, encrypting key {}".format(key))
encrypted_password = ef_utils.kms_encrypt(clients['kms'], service, environment, value)
data["params"][environment][key] = format_secret(encrypted_password)
changed = True
except KeyError:
ef_utils.fail("Error env: {} does not exist in parameters file".format(environment))
if changed:
with open(file_path, "w") as encrypted_file:
json.dump(data, encrypted_file, indent=2, separators=(',', ': '))
encrypted_file.write("\n")
|
Generate a parameter files with it's secrets encrypted in KMS
Args:
file_path (string): Path to the parameter file to be encrypted
pattern (string): Pattern to do fuzzy string matching
service (string): Service to use KMS key to encrypt file
environment (string): Environment to encrypt values
clients (dict): KMS AWS client that has been instantiated
Returns:
None
Raises:
IOError: If the file does not exist
|
juraj-google-style
|
def get_without(self, fragments, use_lookup=None):
if (use_lookup is None):
use_lookup = settings['defaults']['use_lookup']
if pd.api.types.is_list_like(fragments):
for fragment in fragments:
try:
index_of_all_fragments |= fragment.index
except NameError:
index_of_all_fragments = fragment.index
else:
index_of_all_fragments = fragments.index
missing_part = self.loc[self.index.difference(index_of_all_fragments)]
missing_part = missing_part.fragmentate(use_lookup=use_lookup)
return sorted(missing_part, key=len, reverse=True)
|
Return self without the specified fragments.
Args:
fragments: Either a list of :class:`~chemcoord.Cartesian` or a
:class:`~chemcoord.Cartesian`.
use_lookup (bool): Use a lookup variable for
:meth:`~chemcoord.Cartesian.get_bonds`. The default is
specified in ``settings['defaults']['use_lookup']``
Returns:
list: List containing :class:`~chemcoord.Cartesian`.
|
codesearchnet
|
def _md5sum(file_path):
md5 = hashlib.md5()
with open(file_path, "rb") as md5_file:
while True:
data = md5_file.read(1024 * 1024 * 4)
if not data:
break
md5.update(data)
return md5.digest()
|
Helper function that builds and md5sum from a file in chunks.
Args:
file_path: The path to the file you want an md5sum for.
Returns:
A string containing an md5sum.
|
juraj-google-style
|
def set_ocha_url(cls, url=None):
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url
|
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
|
juraj-google-style
|
def update(self, current, values=None, finalize=None):
if finalize is None:
if self.target is None:
finalize = False
else:
finalize = current >= self.target
values = values or []
for k, v in values:
if k not in self._values_order:
self._values_order.append(k)
if k not in self.stateful_metrics:
if finalize:
self._values[k] = [v, 1]
else:
value_base = max(current - self._seen_so_far, 1)
if k not in self._values:
self._values[k] = [v * value_base, value_base]
else:
self._values[k][0] += v * value_base
self._values[k][1] += value_base
else:
self._values[k] = [v, 1]
self._seen_so_far = current
message = ''
special_char_len = 0
now = time.time()
time_per_unit = self._estimate_step_duration(current, now)
if self.verbose == 1:
if now - self._last_update < self.interval and (not finalize):
return
if self._dynamic_display:
message += '\x08' * self._prev_total_width
message += '\r'
else:
message += '\n'
if self.target is not None:
numdigits = int(math.log10(self.target)) + 1
bar = ('%' + str(numdigits) + 'd/%d') % (current, self.target)
bar = f'\x1b[1m{bar}\x1b[0m '
special_char_len += 8
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += '\x1b[32m' + '━' * prog_width + '\x1b[0m'
special_char_len += 9
bar += '\x1b[37m' + '━' * (self.width - prog_width) + '\x1b[0m'
special_char_len += 9
else:
bar = '%7d/Unknown' % current
message += bar
if self.target is not None and (not finalize):
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = '%d:%02d:%02d' % (eta
elif eta > 60:
eta_format = '%d:%02d' % (eta
else:
eta_format = '%ds' % eta
info = f' \x1b[1m{eta_format}\x1b[0m'
else:
info = f' \x1b[1m{now - self._start:.0f}s\x1b[0m'
special_char_len += 8
info += self._format_time(time_per_unit, self.unit_name)
for k in self._values_order:
info += f' - {k}:'
if isinstance(self._values[k], list):
avg = backend.convert_to_numpy(backend.numpy.mean(self._values[k][0] / max(1, self._values[k][1])))
avg = float(avg)
if abs(avg) > 0.001:
info += f' {avg:.4f}'
else:
info += f' {avg:.4e}'
else:
info += f' {self._values[k]}'
message += info
total_width = len(bar) + len(info) - special_char_len
if self._prev_total_width > total_width:
message += ' ' * (self._prev_total_width - total_width)
if finalize:
message += '\n'
io_utils.print_msg(message, line_break=False)
self._prev_total_width = total_width
message = ''
elif self.verbose == 2:
if finalize:
numdigits = int(math.log10(self.target)) + 1
count = ('%' + str(numdigits) + 'd/%d') % (current, self.target)
info = f'{count} - {now - self._start:.0f}s'
info += ' -' + self._format_time(time_per_unit, self.unit_name)
for k in self._values_order:
info += f' - {k}:'
avg = backend.convert_to_numpy(backend.numpy.mean(self._values[k][0] / max(1, self._values[k][1])))
if avg > 0.001:
info += f' {avg:.4f}'
else:
info += f' {avg:.4e}'
info += '\n'
message += info
io_utils.print_msg(message, line_break=False)
message = ''
self._last_update = now
|
Updates the progress bar.
Args:
current: Index of current step.
values: List of tuples: `(name, value_for_last_step)`. If `name` is
in `stateful_metrics`, `value_for_last_step` will be displayed
as-is. Else, an average of the metric over time will be
displayed.
finalize: Whether this is the last update for the progress bar. If
`None`, defaults to `current >= self.target`.
|
github-repos
|
def draw_line(self, x1, y1, x2, y2, color):
check_int_err(lib.lineRGBA(self._ptr, x1, y1, x2, y2, color[0], color[1], color[2], color[3]))
|
Draw a line.
Args:
x1 (int): The x coordinate of the start of the line.
y1 (int): The y coordinate of the start of the line.
x2 (int): The x coordinate of the end of the line.
y2 (int): The y coordinate of the end of the line.
color (Tuple[int, int, int, int]): The color of the circle.
Raises:
SDLError: If an error is encountered.
|
codesearchnet
|
def _GetNumberOfDaysInCentury(self, year):
if year < 0:
raise ValueError('Year value out of bounds.')
year, _ = divmod(year, 100)
if self._IsLeapYear(year):
return 36525
return 36524
|
Retrieves the number of days in a century.
Args:
year (int): year in the century e.g. 1970.
Returns:
int: number of (remaining) days in the century.
Raises:
ValueError: if the year value is out of bounds.
|
juraj-google-style
|
def get(self):
try:
item = self._queue.get_nowait()
except (Empty, PersistEmpty):
return None
if self._persistence_path:
self._queue.task_done()
return item
|
Gets a single item from the queue and returns it. If the queue is empty, this method will return None.
Returns:
:class:`contracts.Envelope`. a telemetry envelope object or None if the queue is empty.
|
codesearchnet
|
def loop_until_timeout_or_valid(timeout_s, function, validation_fn, sleep_s=1):
if ((timeout_s is None) or (not hasattr(timeout_s, 'has_expired'))):
timeout_s = PolledTimeout(timeout_s)
while True:
result = function()
if (validation_fn(result) or timeout_s.has_expired()):
return result
time.sleep(sleep_s)
|
Loops until the specified function returns valid or a timeout is reached.
Note: The function may return anything which, when passed to validation_fn,
evaluates to implicit True. This function will loop calling the function as
long as the result of validation_fn(function_result) returns something which
evaluates to False. We ensure function is called at least once regardless
of timeout.
Args:
timeout_s: The number of seconds to wait until a timeout condition is
reached. As a convenience, this accepts None to mean never timeout. Can
also be passed a PolledTimeout object instead of an integer.
function: The function to call each iteration.
validation_fn: The validation function called on the function result to
determine whether to keep looping.
sleep_s: The number of seconds to wait after calling the function.
Returns:
Whatever the function returned last.
|
codesearchnet
|
def fit(self, volumes, energies):
eos_fit = self.model(np.array(volumes), np.array(energies))
eos_fit.fit()
return eos_fit
|
Fit energies as function of volumes.
Args:
volumes (list/np.array)
energies (list/np.array)
Returns:
EOSBase: EOSBase object
|
juraj-google-style
|
def get_params_from_sqlalchemy_url(db_url):
result = urlsplit(db_url)
return {'database': result.path[1:], 'host': result.hostname, 'port': result.port, 'username': result.username, 'password': result.password, 'driver': result.scheme}
|
Gets PostgreSQL database connection parameters from SQLAlchemy url
Args:
db_url (str): SQLAlchemy url
Returns:
Dict[str,Any]: Dictionary of database connection parameters
|
codesearchnet
|
def heightmap_get_minmax(hm: np.ndarray) -> Tuple[float, float]:
mi = ffi.new("float *")
ma = ffi.new("float *")
lib.TCOD_heightmap_get_minmax(_heightmap_cdata(hm), mi, ma)
return mi[0], ma[0]
|
Return the min and max values of this heightmap.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
Returns:
Tuple[float, float]: The (min, max) values.
.. deprecated:: 2.0
Use ``hm.min()`` or ``hm.max()`` instead.
|
juraj-google-style
|
def get_tag_hash(self, tag_name):
tag_object = get_single_item_from_sequence(sequence=self._github_repository.tags(), condition=(lambda tag: (tag.name == tag_name)), no_item_error_message='No tag "{}" exist'.format(tag_name), too_many_item_error_message='Too many tags "{}" found'.format(tag_name))
return tag_object.commit.sha
|
Fetch the commit hash that was tagged with ``tag_name``.
Args:
tag_name (str): the name of the tag
Returns:
str: the commit hash linked by the tag
|
codesearchnet
|
def preprocess_model(self, model: 'PreTrainedModel', **kwargs):
model.is_quantized = True
model.quantization_method = self.quantization_config.quant_method
if self.pre_quantized:
self._convert_model_for_quantization(model)
return self._process_model_before_weight_loading(model, **kwargs)
|
Setting model attributes and/or converting model before weights loading. At this point
the model should be initialized on the meta device so you can freely manipulate the skeleton
of the model in order to replace modules in-place. Make sure to override the abstract method `_process_model_before_weight_loading`.
Args:
model (`~transformers.PreTrainedModel`):
The model to quantize
kwargs (`dict`, *optional*):
The keyword arguments that are passed along `_process_model_before_weight_loading`.
|
github-repos
|
def random_new_from_seed(
seed: Hashable, algo: int = RNG_CMWC
) -> tcod.random.Random:
return tcod.random.Random(algo, seed)
|
Return a new Random instance. Using the given ``seed`` and ``algo``.
Args:
seed (Hashable): The RNG seed. Should be a 32-bit integer, but any
hashable object is accepted.
algo (int): The random number algorithm to use.
Returns:
Random: A new Random instance using the given algorithm.
|
juraj-google-style
|
def _ip_int_from_string(self, ip_str):
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError(ip_str)
packed_ip = 0
for oc in octets:
try:
packed_ip = (packed_ip << 8) | self._parse_octet(oc)
except ValueError:
raise AddressValueError(ip_str)
return packed_ip
|
Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
|
juraj-google-style
|
def verify(self, obj):
if not isinstance(obj, float):
raise ValidationError("Object is not a float", reason='object is not a float', object=obj)
return obj
|
Verify that the object conforms to this verifier's schema.
Args:
obj (object): A python object to verify
Raises:
ValidationError: If there is a problem verifying the dictionary, a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation.
|
juraj-google-style
|
def put(self, file_path, upload_path = ''):
f = open(file_path, "r")
c = f.read()
file_name = os.path.basename(file_path)
now = datetime.datetime.now().isoformat()
url = nurls['put'] + upload_path + file_name
headers = {'userid': self.user_id,
'useridx': self.useridx,
'MODIFYDATE': now,
'Content-Type': magic.from_file(file_path, mime=True),
'charset': 'UTF-8',
'Origin': 'http:
}
r = self.session.put(url = url, data = c, headers = headers)
return self.resultManager(r.text)
|
PUT
Args:
file_path: Full path for a file you want to upload
upload_path: Ndrive path where you want to upload file
ex) /Picture/
Returns:
True: Upload success
False: Upload failed
|
juraj-google-style
|
def _parse_impute2_line(self, line):
row = line.rstrip("\r\n").split(" ")
prob = np.array(row[5:], dtype=float)
prob.shape = (prob.shape[0]
dosage = 2 * prob[:, 2] + prob[:, 1]
if self.prob_t > 0:
dosage[~np.any(prob >= self.prob_t, axis=1)] = np.nan
return Genotypes(
Variant(row[1], CHROM_STR_ENCODE.get(row[0], row[0]), int(row[2]),
[row[3], row[4]]),
dosage,
reference=row[3],
coded=row[4],
multiallelic=False,
)
|
Parses the current IMPUTE2 line (a single variant).
Args:
line (str): An IMPUTE2 line.
Returns:
Genotypes: The genotype in dosage format.
Warning
=======
By default, the genotypes object has multiallelic set to False.
|
juraj-google-style
|
def from_name(cls, name, *, queue=DefaultJobQueueName.Workflow, clear_data_store=True, arguments=None):
new_workflow = cls(queue=queue, clear_data_store=clear_data_store)
new_workflow.load(name, arguments=arguments)
return new_workflow
|
Create a workflow object from a workflow script.
Args:
name (str): The name of the workflow script.
queue (str): Name of the queue the workflow should be scheduled to.
clear_data_store (bool): Remove any documents created during the workflow
run in the data store after the run.
arguments (dict): Dictionary of additional arguments that are ingested
into the data store prior to the execution of the workflow.
Returns:
Workflow: A fully initialised workflow object
|
codesearchnet
|
def VerifyStructure(self, parser_mediator, lines):
if self._VERIFICATION_REGEX.match(lines):
return True
return False
|
Verifies whether content corresponds to a Zsh extended_history file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
lines (str): one or more lines from the text file.
Returns:
bool: True if the line was successfully parsed.
|
codesearchnet
|
def visit_node(self, node):
raise NotImplementedError('Subclasses must implement this.')
|
Visitor function.
Args:
node: Node
Returns:
bool, whether the node should be revisited; subclasses can visit every
reachable node exactly once by always returning False
|
github-repos
|
def _subscribe_new(tensor, side_effects, control_cache):
update_input = []
for consumer_op in list(tensor.consumers()):
update_input.append((consumer_op, list(consumer_op.inputs).index(tensor)))
update_control_input = control_cache.get_control_outputs(tensor.op)
name_scope = tensor.op.name + '/subscription/'
with ops.name_scope(name_scope):
outs = []
for s in side_effects:
outs += s(tensor)
with ops.control_dependencies(outs):
out = array_ops.identity(tensor)
for consumer_op, index in update_input:
consumer_op._update_input(index, out)
for consumer_op in update_control_input:
new_control_inputs = consumer_op.control_inputs
if tensor.op in new_control_inputs:
new_control_inputs.remove(tensor.op)
new_control_inputs.append(out.op)
consumer_op._remove_all_control_inputs()
consumer_op._add_control_inputs(new_control_inputs)
return out
|
Helper method that subscribes a single tensor to a list of side_effects.
Args:
tensor: `tf.Tensor`
side_effects: List of side_effect functions see subscribe for details.
control_cache: `_ControlOutputCache` helper to get control_outputs faster.
Returns:
The modified replacement to the passed in tensor which triggers the side
effects.
|
github-repos
|
def typed_dict_error(self, stack, obj, name):
if name:
err_msg = f'TypedDict {obj.class_name} does not contain key {name}'
else:
err_msg = f'TypedDict {obj.class_name} requires all keys to be constant strings'
self.error(stack, err_msg)
|
Accessing a nonexistent key in a typed dict.
Args:
stack: the frame stack
obj: the typed dict instance
name: the key name
|
github-repos
|
def _create_deployment_object(self, job_name, job_image,
deployment_name, port=80,
replicas=1,
cmd_string=None,
engine_json_file='~/.ipython/profile_default/security/ipcontroller-engine.json',
engine_dir='.'):
security_context = None
if 'security' in self.config['execution']:
security_context = client.V1SecurityContext(run_as_group=self.group_id,
run_as_user=self.user_id,
run_as_non_root=self.run_as_non_root)
environment_vars = client.V1EnvVar(name="TEST", value="SOME DATA")
launch_args = ["-c", "{0}; /app/deploy.sh;".format(cmd_string)]
print(launch_args)
container = None
if security_context:
container = client.V1Container(
name=job_name,
image=job_image,
ports=[client.V1ContainerPort(container_port=port)],
command=['/bin/bash'],
args=launch_args,
env=[environment_vars],
security_context=security_context)
else:
container = client.V1Container(
name=job_name,
image=job_image,
ports=[client.V1ContainerPort(container_port=port)],
command=['/bin/bash'],
args=launch_args,
env=[environment_vars])
secret = None
if self.secret:
secret = client.V1LocalObjectReference(name=self.secret)
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(labels={"app": job_name}),
spec=client.V1PodSpec(containers=[container], image_pull_secrets=[secret]))
spec = client.ExtensionsV1beta1DeploymentSpec(replicas=replicas,
template=template)
deployment = client.ExtensionsV1beta1Deployment(
api_version="extensions/v1beta1",
kind="Deployment",
metadata=client.V1ObjectMeta(name=deployment_name),
spec=spec)
return deployment
|
Create a kubernetes deployment for the job.
Args:
- job_name (string) : Name of the job and deployment
- job_image (string) : Docker image to launch
KWargs:
- port (integer) : Container port
- replicas : Number of replica containers to maintain
Returns:
- True: The deployment object to launch
|
juraj-google-style
|
def get_device(self, addr_or_name):
if addr_or_name in self._devices:
return self._devices[addr_or_name]
for v in self._devices.values():
if v == addr_or_name:
return v
return None
|
Retrieve a device with a given address or name from the results.
Args:
addr_or_name (str): a string containing either a BLE address in xx:xx:xx:xx:xx:xx
format, or a plain device name. The supplied value is checked as an address
first and if that fails to produce a result, it is matched against each
named device in the collection.
Returns:
The first matching :class:`ScanResult` instance, or None.
|
juraj-google-style
|
def validate_default_element(self, value):
if isinstance(value, (six.string_types, six.integer_types)):
if self.__type:
self.__type(value)
return value
return super(EnumField, self).validate_default_element(value)
|
Validate default element of Enum field.
Enum fields allow for delayed resolution of default values
when the type of the field has not been resolved. The default
value of a field may be a string or an integer. If the Enum
type of the field has been resolved, the default value is
validated against that type.
Args:
value: Value to validate.
Raises:
ValidationError if value is not expected message type.
|
codesearchnet
|
def get_components(edges, vertices=None):
if vertices is None:
vertices = set(chain(edges.ix[:, 0], edges.ix[:, 1]))
visited = set()
components = []
for id in vertices:
if id not in visited:
c = follow(id, edges)
visited.update(c)
components.append(c)
return components
|
Return connected components from graph determined by edges matrix
Args:
edges: DataFrame of (undirected) edges.
vertices: set of vertices in graph. Defaults to union of all vertices in edges.
Returns:
set of connected components, each of which is a set of vertices.
|
juraj-google-style
|
def get_layer(self, name=None, index=None):
if index is not None and name is not None:
raise ValueError(f'Provide only a layer name or a layer index. Received: index={index}, name={name}.')
if index is not None:
if len(self.layers) <= index:
raise ValueError(f'Was asked to retrieve layer at index {index} but model only has {len(self.layers)} layers.')
else:
return self.layers[index]
if name is not None:
for layer in self.layers:
if layer.name == name:
return layer
raise ValueError(f'No such layer: {name}. Existing layers are: {list((layer.name for layer in self.layers))}.')
raise ValueError('Provide either a layer name or layer index at `get_layer`.')
|
Retrieves a layer based on either its name (unique) or index.
If `name` and `index` are both provided, `index` will take precedence.
Indices are based on order of horizontal graph traversal (bottom-up).
Args:
name: String, name of layer.
index: Integer, index of layer.
Returns:
A layer instance.
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.