code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _ReportSameIdButNotMerged(self, entity_id, reason):
self.feed_merger.problem_reporter.SameIdButNotMerged(self,
entity_id,
reason)
|
Report that two entities have the same id but could not be merged.
Args:
entity_id: The id of the entities.
reason: A string giving a reason why they could not be merged.
|
juraj-google-style
|
def convert_hardtanh(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting hardtanh (clip) ...')
def target_layer(x, max_val=float(params['max_val']), min_val=float(params['min_val'])):
return tf.minimum(max_val, tf.maximum(min_val, x))
lambda_layer = keras.layers.Lambda(target_layer)
layers[scope_name] = lambda_layer(layers[inputs[0]])
|
Convert hardtanh layer.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
|
codesearchnet
|
def serialize_attrs(self, *args):
cls = type(self)
result = {}
for a in args:
if hasattr(cls, a) and a not in cls.attrs_forbidden_for_serialization():
val = getattr(self, a)
if is_list_like(val):
result[a] = list(val)
else:
result[a] = val
return result
|
Converts and instance to a dictionary with only the specified
attributes as keys
Args:
*args (list): The attributes to serialize
Examples:
>>> customer = Customer.create(name="James Bond", email="007@mi.com",
phone="007", city="London")
>>> customer.serialize_attrs('name', 'email')
{'name': u'James Bond', 'email': u'007@mi.com'}
|
juraj-google-style
|
def find_stable_entry(self, pH, V):
energies_at_conditions = [e.normalized_energy_at_conditions(pH, V)
for e in self.stable_entries]
return self.stable_entries[np.argmin(energies_at_conditions)]
|
Finds stable entry at a pH,V condition
Args:
pH (float): pH to find stable entry
V (float): V to find stable entry
Returns:
|
juraj-google-style
|
def get_formatted_as_type(self, value, default=None, out_type=str):
if value is None:
value = default
if isinstance(value, SpecialTagDirective):
result = value.get_value(self)
return types.cast_to_type(result, out_type)
if isinstance(value, str):
result = self.get_formatted_string(value)
result_type = type(result)
if out_type is result_type:
return result
elif out_type is bool and result_type is str:
return result.lower() in ['true', '1', '1.0']
else:
return out_type(result)
else:
return out_type(value)
|
Return formatted value for input value, returns as out_type.
Caveat emptor: if out_type is bool and value a string,
return will be True if str is 'True'. It will be False for all other
cases.
Args:
value: the value to format
default: if value is None, set to this
out_type: cast return as this type
Returns:
Formatted value of type out_type
|
juraj-google-style
|
def _AlignUncompressedDataOffset(self, uncompressed_data_offset):
self._file_object.seek(0, os.SEEK_SET)
self._decompressor = self._GetDecompressor()
self._uncompressed_data = b''
compressed_data_offset = 0
compressed_data_size = self._file_object.get_size()
while compressed_data_offset < compressed_data_size:
read_count = self._ReadCompressedData(self._COMPRESSED_DATA_BUFFER_SIZE)
if read_count == 0:
break
compressed_data_offset += read_count
if uncompressed_data_offset < self._uncompressed_data_size:
self._uncompressed_data_offset = uncompressed_data_offset
break
uncompressed_data_offset -= self._uncompressed_data_size
|
Aligns the compressed file with the uncompressed data offset.
Args:
uncompressed_data_offset (int): uncompressed data offset.
|
juraj-google-style
|
def get_effective_ecs(self, strain, order=2):
ec_sum = 0
for (n, ecs) in enumerate(self[(order - 2):]):
ec_sum += (ecs.einsum_sequence(([strain] * n)) / factorial(n))
return ec_sum
|
Returns the effective elastic constants
from the elastic tensor expansion.
Args:
strain (Strain or 3x3 array-like): strain condition
under which to calculate the effective constants
order (int): order of the ecs to be returned
|
codesearchnet
|
def excel_to_dict(excel_filepath, encapsulate_filepath=False, **kwargs):
result = {}
try:
callbacks = {'to_dictlist': excel_todictlist}
callbacks.update(kwargs.get('alt_callbacks', {}))
excel_data = callbacks.get('to_dictlist')(excel_filepath, **kwargs)
for sheet in excel_data.keys():
try:
kwargs['rows'] = excel_data.get(sheet, [])
result[sheet] = csv_to_dict(excel_filepath, **kwargs)
except Exception as ex:
logger.error('Fail to parse sheet {} - {}'.format(sheet, ex))
result[sheet] = []
continue
if encapsulate_filepath:
result = {excel_filepath: result}
except Exception as ex:
msg = 'Fail transform excel to dict - {}'.format(ex)
logger.error(msg, excel_filepath=excel_filepath)
return result
|
Turn excel into dict.
Args:
:excel_filepath: path to excel file to turn into dict.
:limits: path to csv file to turn into dict
|
juraj-google-style
|
def reqTickers(self, *contracts: List[Contract], regulatorySnapshot: bool=False) -> List[Ticker]:
return self._run(self.reqTickersAsync(*contracts, regulatorySnapshot=regulatorySnapshot))
|
Request and return a list of snapshot tickers.
The list is returned when all tickers are ready.
This method is blocking.
Args:
contracts: Contracts to get tickers for.
regulatorySnapshot: Request NBBO snapshots (may incur a fee).
|
codesearchnet
|
def delete(self, file_path, branch, commit_message, **kwargs):
path = ('%s/%s' % (self.path, file_path.replace('/', '%2F')))
data = {'branch': branch, 'commit_message': commit_message}
self.gitlab.http_delete(path, query_data=data, **kwargs)
|
Delete a file on the server.
Args:
file_path (str): Path of the file to remove
branch (str): Branch from which the file will be removed
commit_message (str): Commit message for the deletion
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabDeleteError: If the server cannot perform the request
|
codesearchnet
|
def info(self, **kwargs):
path = self._get_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Get the system wide configuration info.
Returns:
A dict respresentation of the JSON returned from the API.
|
codesearchnet
|
def content(self):
if (not self._content_data):
if is_seekable(self.file):
with wpull.util.reset_file_offset(self.file):
self._content_data = self.file.read()
else:
self._content_data = self.file.read()
return self._content_data
|
Return the content of the file.
If this function is invoked, the contents of the entire file is read
and cached.
Returns:
``bytes``: The entire content of the file.
|
codesearchnet
|
def check_spec(self, pos_args, kwargs=None):
if (kwargs is None):
kwargs = {}
if ((self.varargs is not None) or (self.kwargs is not None)):
raise InternalError('check_spec cannot be called on a function that takes *args or **kwargs')
missing = object()
arg_vals = ([missing] * len(self.arg_names))
kw_indices = {name: i for (i, name) in enumerate(self.arg_names)}
for (i, arg) in enumerate(pos_args):
if (i >= len(arg_vals)):
raise ArgumentError(('Too many positional arguments, first excessive argument=%s' % str(arg)))
arg_vals[i] = arg
for (arg, val) in kwargs.items():
index = kw_indices.get(arg)
if (index is None):
raise ArgumentError(('Cannot find argument by name: %s' % arg))
if (arg_vals[index] is not missing):
raise ValidationError(('Argument %s passed twice' % arg))
arg_vals[index] = val
if (len(self.arg_defaults) > 0):
for i in range(0, len(self.arg_defaults)):
neg_index = ((- len(self.arg_defaults)) + i)
if (arg_vals[neg_index] is missing):
arg_vals[neg_index] = self.arg_defaults[i]
if (missing in arg_vals):
index = arg_vals.index(missing)
raise ArgumentError(('Missing a required argument (position: %d, name: %s)' % (index, self.arg_names[index])))
return {name: val for (name, val) in zip(self.arg_names, arg_vals)}
|
Check if there are any missing or duplicate arguments.
Args:
pos_args (list): A list of arguments that will be passed as positional
arguments.
kwargs (dict): A dictionary of the keyword arguments that will be passed.
Returns:
dict: A dictionary of argument name to argument value, pulled from either
the value passed or the default value if no argument is passed.
Raises:
ArgumentError: If a positional or keyword argument does not fit in the spec.
ValidationError: If an argument is passed twice.
|
codesearchnet
|
def _is_device_list_single_worker(devices):
specs = []
for d in devices:
name = d.name if isinstance(d, context.LogicalDevice) else d
specs.append(tf_device.DeviceSpec.from_string(name))
num_workers = len({(d.job, d.task, d.replica) for d in specs})
all_local = all((d.job in (None, 'localhost') for d in specs))
any_local = any((d.job in (None, 'localhost') for d in specs))
if any_local and (not all_local):
raise ValueError("Local device should have only 'localhost' in the job field in device string. E.g. 'job:localhost' in /job:localhost/replica:0/task:0/device:CPU:0Devices cannot have mixed list of device strings containing both localhost and other job types such as worker, ps etc. ")
if num_workers == 1 and (not all_local):
if any((d.task is None for d in specs)):
raise ValueError("Remote device string must have task specified.E.g. 'task:0' in /job:worker/replica:0/task:0/device:CPU:0")
return num_workers == 1
|
Checks whether the devices list is for single or multi-worker.
Args:
devices: a list of device strings or tf.config.LogicalDevice objects, for
either local or for remote devices.
Returns:
a boolean indicating whether these device strings are for local or for
remote.
Raises:
ValueError: if device strings are not consistent.
|
github-repos
|
def save_target_classes_for_batch(self,
filename,
image_batches,
batch_id):
images = image_batches.data[batch_id]['images']
with open(filename, 'w') as f:
for image_id, image_val in iteritems(images):
target_class = self.get_target_class(image_val['dataset_image_id'])
f.write('{0}.png,{1}\n'.format(image_id, target_class))
|
Saves file with target class for given dataset batch.
Args:
filename: output filename
image_batches: instance of ImageBatchesBase with dataset batches
batch_id: dataset batch ID
|
juraj-google-style
|
def close(self, file_des):
file_handle = self.filesystem.get_open_file(file_des)
file_handle.close()
|
Close a file descriptor.
Args:
file_des: An integer file descriptor for the file object requested.
Raises:
OSError: bad file descriptor.
TypeError: if file descriptor is not an integer.
|
juraj-google-style
|
def save_exported_model(self, dst_saved_model_path: str, exported_model_serialized: bytes, src_saved_model_path: str, tags: set[str], serialized_signature_def_map: dict[str, bytes]) -> Optional[bool]:
exported_model = exported_model_pb2.ExportedModel.FromString(exported_model_serialized)
signature_def_map = {}
for key, serialized_signature_def in serialized_signature_def_map.items():
signature_def_map[key] = meta_graph_pb2.SignatureDef.FromString(serialized_signature_def)
return _call_and_return_none_on_error(func=functools.partial(_save_model_and_copy_assets, exported_model, src_saved_model_path, dst_saved_model_path, signature_def_map, tags), error_msg=f'Failed to save model "{dst_saved_model_path}", signature_def_map: {signature_def_map}, tags: {tags}.')
|
Saves `ExportedModel` to `dst_saved_model_path` as a SavedModel.
Args:
dst_saved_model_path: Destination path to save the exported model.
exported_model_serialized: Exported model to export as SavedModel.
src_saved_model_path: Path to the source SavedModel. This will be used to
copy the asset files to `dst_saved_model_path`.
tags: Tags to attach to the saved MetaGraphDef.
serialized_signature_def_map: Signature key -> serialized SignatureDef.
Returns:
`True` upon successful execution. `None` when an error is raised
internally.
|
github-repos
|
def ParseCloudEntryRow(self, parser_mediator, query, row, cache=None, database=None, **unused_kwargs):
query_hash = hash(query)
parent_resource_id = self._GetRowValue(query_hash, row, 'parent_resource_id')
filename = self._GetRowValue(query_hash, row, 'filename')
cloud_path = self.GetCloudPath(parent_resource_id, cache, database)
cloud_filename = '{0:s}{1:s}'.format(cloud_path, filename)
event_data = GoogleDriveSnapshotCloudEntryEventData()
event_data.document_type = self._GetRowValue(query_hash, row, 'doc_type')
event_data.path = cloud_filename
event_data.query = query
event_data.shared = bool(self._GetRowValue(query_hash, row, 'shared'))
event_data.size = self._GetRowValue(query_hash, row, 'size')
event_data.url = self._GetRowValue(query_hash, row, 'url')
timestamp = self._GetRowValue(query_hash, row, 'modified')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'created')
if timestamp:
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a cloud entry row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
cache (SQLiteCache): cache.
database (SQLiteDatabase): database.
|
codesearchnet
|
def poisson_objective(X, m, w):
clusters, cells = w.shape
genes = X.shape[0]
d = m.dot(w)+eps
return np.sum(d - X*np.log(d))/genes
|
Creates an objective function and its derivative for M, given W and X
Args:
w (array): clusters x cells
X (array): genes x cells
selected_genes (array): array of ints - genes to be selected
|
juraj-google-style
|
def InitializeDebuggeeLabels(self, flags):
self._debuggee_labels = {}
for (label, var_names) in six.iteritems(_DEBUGGEE_LABELS):
for name in var_names:
value = os.environ.get(name)
if value:
if label == labels.Debuggee.MODULE and value == 'default':
break
self._debuggee_labels[label] = value
break
if flags:
self._debuggee_labels.update(
{name: value for (name, value) in six.iteritems(flags)
if name in _DEBUGGEE_LABELS})
self._debuggee_labels['projectid'] = self._project_id
|
Initialize debuggee labels from environment variables and flags.
The caller passes all the flags that the the debuglet got. This function
will only use the flags used to label the debuggee. Flags take precedence
over environment variables.
Debuggee description is formatted from available flags.
Args:
flags: dictionary of debuglet command line flags.
|
juraj-google-style
|
def probe_async(self, callback):
topics = MQTTTopicValidator(self.prefix)
self.client.publish(topics.probe, {'type': 'command', 'operation': 'probe', 'client': self.name})
callback(self.id, True, None)
|
Probe for visible devices connected to this DeviceAdapter.
Args:
callback (callable): A callback for when the probe operation has completed.
callback should have signature callback(adapter_id, success, failure_reason) where:
success: bool
failure_reason: None if success is True, otherwise a reason for why we could not probe
|
codesearchnet
|
def for_loop(loop_fn, loop_fn_dtypes, iters, parallel_iterations=None):
flat_loop_fn_dtypes = nest.flatten(loop_fn_dtypes)
is_none_list = []
def while_body(i, *ta_list):
fn_conv = autograph.tf_convert(loop_fn, autograph_ctx.control_status_ctx())
fn_output = nest.flatten(fn_conv(i))
if len(fn_output) != len(flat_loop_fn_dtypes):
raise ValueError(f'Number of expected outputs {len(flat_loop_fn_dtypes)}, does not match the number of actual outputs {len(fn_output)} from loop_fn: {loop_fn} with output {fn_output}.')
outputs = []
del is_none_list[:]
is_none_list.extend((x is None for x in fn_output))
for out, ta in zip(fn_output, ta_list):
if out is not None:
ta = ta.write(i, out)
outputs.append(ta)
return tuple([i + 1] + outputs)
if parallel_iterations is not None:
extra_args = {'parallel_iterations': parallel_iterations}
else:
extra_args = {}
ta_list = while_loop.while_loop(lambda i, *ta: i < iters, while_body, [0] + [tensor_array_ops.TensorArray(dtype.base_dtype, iters) for dtype in flat_loop_fn_dtypes], **extra_args)[1:]
output = [None if is_none else ta.stack() for ta, is_none in zip(ta_list, is_none_list)]
assert len(output) in (0, len(flat_loop_fn_dtypes))
if not output:
loop_var = array_ops.placeholder_with_default(0, shape=[])
try:
loop_fn_out = loop_fn(loop_var)
out_shapes = [[0] + ops.convert_to_tensor(x).shape for x in nest.flatten(loop_fn_out)]
output = [array_ops.zeros(out_shapes[i], dt) for i, dt in enumerate(flat_loop_fn_dtypes)]
except Exception:
output = [array_ops.zeros([0])]
return nest.pack_sequence_as(loop_fn_dtypes, output)
|
Runs `loop_fn` `iters` times and stacks the outputs.
Runs `loop_fn` `iters` times, with input values from 0 to `iters - 1`, and
stacks corresponding outputs of the different runs.
Args:
loop_fn: A function that takes an int32 scalar tf.Tensor object representing
the iteration number, and returns a possibly nested structure of tensor
objects. The shape of these outputs should not depend on the input.
loop_fn_dtypes: dtypes for the outputs of `loop_fn`.
iters: Number of iterations for which to run `loop_fn`.
parallel_iterations: The number of iterations that can be dispatched in
parallel. This knob can be used to control the total memory usage.
Returns:
Returns a nested structure of stacked output tensor objects with the same
nested structure as the output of `loop_fn`.
|
github-repos
|
def write_to_file_by_name(folder, fname, data, mkdir=False):
if (not os.path.isdir(folder)):
if mkdir:
preparedir(folder)
else:
created = preparedir(folder, False)
if (not created):
raise ValueError(('Failed to find %s.' % folder))
file_path = os.path.join(folder, fname)
with open(file_path, writemode) as outf:
try:
outf.write(str(data))
return file_path
except Exception as e:
raise IOError(('Failed to write %s to file:\n\t%s' % (fname, str(e))))
|
Write a string of data to file by filename and folder.
Args:
folder: Target folder (e.g. c:/ladybug).
fname: File name (e.g. testPts.pts).
data: Any data as string.
mkdir: Set to True to create the directory if doesn't exist (Default: False).
|
codesearchnet
|
def _add_string_to_commastring(self, field, string):
if (string in self._get_stringlist_from_commastring(field)):
return False
strings = ('%s,%s' % (self.data.get(field, ''), string))
if (strings[0] == ','):
strings = strings[1:]
self.data[field] = strings
return True
|
Add a string to a comma separated list of strings
Args:
field (str): Field containing comma separated list
string (str): String to add
Returns:
bool: True if string added or False if string already present
|
codesearchnet
|
def update_hash_with_array(hash_value, int_array):
if int_array is not None:
for i in int_array:
hash_value = update_hash_with_primitive_value(hash_value, i)
return hash_value
|
Update the hash value using a TFLite int array.
Args:
hash_value (int): The current hash value.
int_array: A TFLite int array to incorporate into the hash.
Returns:
int: The updated hash value.
|
github-repos
|
def DisplayTree(node, children, level=0):
value = ''
node_type = ''
if ('caseValue' in node):
case_value = node['caseValue']
node_type = case_value['ProductDimension.Type']
if (node_type == 'ProductCanonicalCondition'):
value = (case_value['condition'] if ('condition' in case_value) else 'OTHER')
elif (node_type == 'ProductBiddingCategory'):
value = ('%s(%s)' % (case_value['type'], (case_value['value'] if ('value' in case_value) else 'OTHER')))
else:
value = (case_value['value'] if ('value' in case_value) else 'OTHER')
print(('%sid: %s, node_type: %s, value: %s\n' % ((' ' * level), node['id'], node_type, value)))
for child_node in children[node['id']]:
DisplayTree(child_node, children, (level + 1))
|
Recursively display a node and each of its children.
Args:
node: The node we're displaying the children of.
children: Children of the parent node.
level: How deep in the tree we are.
|
codesearchnet
|
def get_block(self, block_name):
return self.new(self.data.loc[(block_name, slice(None)), :])
|
getblock 获取板块, block_name是list或者是单个str
Arguments:
block_name {[type]} -- [description]
Returns:
[type] -- [description]
|
juraj-google-style
|
def year(self, value=None):
if (value is not None):
try:
value = int(value)
except ValueError:
raise ValueError('value {} need to be of type int for field `year`'.format(value))
self._year = value
|
Corresponds to IDD Field `year`
Args:
value (int): value for IDD Field `year`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
codesearchnet
|
def db010(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `db010`'.format(value))
self._db010 = value
|
Corresponds to IDD Field `db010`
Dry-bulb temperature corresponding to 1.0% annual cumulative frequency of occurrence (warm conditions)
Args:
value (float): value for IDD Field `db010`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def GetAPIScope(api_name):
try:
return SCOPES[api_name]
except KeyError:
raise googleads.errors.GoogleAdsValueError(
'Invalid API name "%s" provided. Acceptable values are: %s' %
(api_name, SCOPES.keys()))
|
Retrieves the scope for the given API name.
Args:
api_name: A string identifying the name of the API we want to retrieve a
scope for.
Returns:
A string that is the scope for the given API name.
Raises:
GoogleAdsValueError: If the given api_name is invalid; accepted values are
"adwords" and "ad_manager".
|
juraj-google-style
|
def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False):
assert not (input_ids is None and inputs_embeds is None)
if input_ids is not None:
check_embeddings_within_bounds(input_ids, self.config.vocab_size)
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if position_ids is None:
if input_ids is not None:
position_ids = self.create_position_ids_from_input_ids(input_ids=input_ids)
else:
position_ids = tf.expand_dims(tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
final_embeddings = inputs_embeds + position_embeds
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings
|
Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor.
|
github-repos
|
def GetAllPluginInformation(cls, show_all=True):
results = []
for plugin_class in iter(cls._plugin_classes.values()):
plugin_object = plugin_class()
if not show_all and not plugin_class.ENABLE_IN_EXTRACTION:
continue
doc_string, _, _ = plugin_class.__doc__.partition('\n')
type_string = cls._PLUGIN_TYPE_STRINGS.get(plugin_object.plugin_type)
information_tuple = (plugin_object.plugin_name, doc_string, type_string)
results.append(information_tuple)
return sorted(results)
|
Retrieves a list of the registered analysis plugins.
Args:
show_all (Optional[bool]): True if all analysis plugin names should
be listed.
Returns:
list[tuple[str, str, str]]: the name, docstring and type string of each
analysis plugin in alphabetical order.
|
juraj-google-style
|
def add_oxidation_state_by_site(self, oxidation_states):
if (len(oxidation_states) != len(self.sites)):
raise ValueError('Oxidation states of all sites must be specified.')
for (site, ox) in zip(self.sites, oxidation_states):
new_sp = {}
for (el, occu) in site.species.items():
sym = el.symbol
new_sp[Specie(sym, ox)] = occu
site.species = new_sp
|
Add oxidation states to a structure by site.
Args:
oxidation_states (list): List of oxidation states.
E.g., [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2]
|
codesearchnet
|
def copy_submission_to_destination(self, src_filename, dst_subdir,
submission_id):
extension = [e for e in ALLOWED_EXTENSIONS if src_filename.endswith(e)]
if len(extension) != 1:
logging.error('Invalid submission extension: %s', src_filename)
return
dst_filename = os.path.join(self.target_dir, dst_subdir,
submission_id + extension[0])
cmd = ['gsutil', 'cp', src_filename, dst_filename]
if subprocess.call(cmd) != 0:
logging.error('Can\'t copy submission to destination')
else:
logging.info('Submission copied to: %s', dst_filename)
|
Copies submission to target directory.
Args:
src_filename: source filename of the submission
dst_subdir: subdirectory of the target directory where submission should
be copied to
submission_id: ID of the submission, will be used as a new
submission filename (before extension)
|
juraj-google-style
|
def _create_batch(signer, transactions):
txn_ids = [txn.header_signature for txn in transactions]
batch_header = BatchHeader(
signer_public_key=signer.get_public_key().as_hex(),
transaction_ids=txn_ids).SerializeToString()
return Batch(
header=batch_header,
header_signature=signer.sign(batch_header),
transactions=transactions)
|
Creates a batch from a list of transactions and a public key, and signs
the resulting batch with the given signing key.
Args:
signer (:obj:`Signer`): The cryptographic signer
transactions (list of `Transaction`): The transactions to add to the
batch.
Returns:
`Batch`: The constructed and signed batch.
|
juraj-google-style
|
def plot_spectra_pieces_pdf(ss, aint=10, pdf_filename='pieces.pdf', setup=_default_setup):
import f311.explorer as ex
(xmin, xmax, ymin_, ymax, _, yspan) = calc_max_min(ss)
ymin = (ymin_ if (setup.ymin is None) else setup.ymin)
num_pages = int(math.ceil(((xmax - xmin) / aint)))
a99.format_BLB()
pdf = matplotlib.backends.backend_pdf.PdfPages(pdf_filename)
logger = a99.get_python_logger()
for h in range(num_pages):
fig = plt.figure()
lambda0 = (xmin + (h * aint))
lambda1 = (lambda0 + aint)
logger.info('Printing page {0:d}/{1:d} ([{2:g}, {3:g}])'.format((h + 1), num_pages, lambda0, lambda1))
for (i, s) in enumerate(ss):
s_cut = ex.cut_spectrum(s, lambda0, lambda1)
ax = plt.gca()
ax.plot(s_cut.x, s_cut.y, label=s.title)
if (setup.flag_xlabel and setup.fmt_xlabel):
plt.xlabel('Wavelength (interval: [{0:g}, {1:g}])'.format(lambda0, lambda1))
xspan = (lambda1 - lambda0)
ax.set_xlim([(lambda0 - (xspan * _T)), (lambda1 + (xspan * _T))])
ax.set_ylim([(ymin - (yspan * _T)), (ymax + (yspan * _T))])
if setup.flag_legend:
leg = plt.legend(loc=0)
a99.format_legend(leg)
plt.tight_layout()
pdf.savefig(fig)
plt.close()
pdf.close()
logger.info('File {0!s} successfully created.'.format(pdf_filename))
|
Plots spectra, overlapped, in small wavelength intervals into a PDF file,
one interval per page of the PDF file.
Args:
ss: list of Spectrum objects
aint: wavelength interval for each plot
pdf_filename: name of output file
setup: PlotSpectrumSetup object
**Note** overrides setup.fmt_xlabel; leaves y-labell and title blank
|
codesearchnet
|
def remove_token(self, *, payer_id, credit_card_token_id):
payload = {
"language": self.client.language.value,
"command": PaymentCommand.REMOVE_TOKEN.value,
"merchant": {
"apiLogin": self.client.api_login,
"apiKey": self.client.api_key
},
"removeCreditCardToken": {
"payerId": payer_id,
"creditCardTokenId": credit_card_token_id
},
"test": self.client.is_test
}
return self.client._post(self.url, json=payload)
|
This feature allows you to delete a tokenized credit card register.
Args:
payer_id:
credit_card_token_id:
Returns:
|
juraj-google-style
|
def coord2healpix(coords, frame, nside, nest=True):
if (coords.frame.name != frame):
c = coords.transform_to(frame)
else:
c = coords
if hasattr(c, 'ra'):
phi = c.ra.rad
theta = ((0.5 * np.pi) - c.dec.rad)
return hp.pixelfunc.ang2pix(nside, theta, phi, nest=nest)
elif hasattr(c, 'l'):
phi = c.l.rad
theta = ((0.5 * np.pi) - c.b.rad)
return hp.pixelfunc.ang2pix(nside, theta, phi, nest=nest)
elif hasattr(c, 'x'):
return hp.pixelfunc.vec2pix(nside, c.x.kpc, c.y.kpc, c.z.kpc, nest=nest)
elif hasattr(c, 'w'):
return hp.pixelfunc.vec2pix(nside, c.w.kpc, c.u.kpc, c.v.kpc, nest=nest)
else:
raise dustexceptions.CoordFrameError('No method to transform from coordinate frame "{}" to HEALPix.'.format(frame))
|
Calculate HEALPix indices from an astropy SkyCoord. Assume the HEALPix
system is defined on the coordinate frame ``frame``.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): The input coordinates.
frame (:obj:`str`): The frame in which the HEALPix system is defined.
nside (:obj:`int`): The HEALPix nside parameter to use. Must be a power of 2.
nest (Optional[:obj:`bool`]): ``True`` (the default) if nested HEALPix ordering
is desired. ``False`` for ring ordering.
Returns:
An array of pixel indices (integers), with the same shape as the input
SkyCoord coordinates (:obj:`coords.shape`).
Raises:
:obj:`dustexceptions.CoordFrameError`: If the specified frame is not supported.
|
codesearchnet
|
def __init__(self, path):
if isinstance(path, list):
path = os.path.join(*path)
self.path = Path(path).resolve()
if not self.path.is_dir():
log.error("No path exists at {}".format(self.path))
err_msg = "Path '{}' is not a directory.".format(self.path)
raise NotADirectoryError(err_msg)
log.info("%d Serving static files out of %s" % (id(self), self.path))
|
Construct Static method.
Args:
path (str or list): The directory path to search for files.
If this is a list, the paths will be path-joined
automatically.
|
juraj-google-style
|
def get_ticks(self):
tick_distance = []
tick_labels = []
previous_label = self._bs.qpoints[0].label
previous_branch = self._bs.branches[0]['name']
for (i, c) in enumerate(self._bs.qpoints):
if (c.label is not None):
tick_distance.append(self._bs.distance[i])
this_branch = None
for b in self._bs.branches:
if (b['start_index'] <= i <= b['end_index']):
this_branch = b['name']
break
if ((c.label != previous_label) and (previous_branch != this_branch)):
label1 = c.label
if (label1.startswith('\\') or (label1.find('_') != (- 1))):
label1 = (('$' + label1) + '$')
label0 = previous_label
if (label0.startswith('\\') or (label0.find('_') != (- 1))):
label0 = (('$' + label0) + '$')
tick_labels.pop()
tick_distance.pop()
tick_labels.append(((label0 + '$\\mid$') + label1))
elif (c.label.startswith('\\') or (c.label.find('_') != (- 1))):
tick_labels.append((('$' + c.label) + '$'))
else:
tick_labels.append(c.label)
previous_label = c.label
previous_branch = this_branch
return {'distance': tick_distance, 'label': tick_labels}
|
Get all ticks and labels for a band structure plot.
Returns:
A dict with 'distance': a list of distance at which ticks should
be set and 'label': a list of label for each of those ticks.
|
codesearchnet
|
def temp45(msg):
d = hex2bin(data(msg))
sign = int(d[16])
value = bin2int(d[17:26])
if sign:
value = value - 512
temp = value * 0.25
temp = round(temp, 1)
return temp
|
Static air temperature.
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
float: tmeperature in Celsius degree
|
juraj-google-style
|
def calibrate_and_quantize_single(self, dataset_gen, input_type, output_type, allow_float, op_output_name, resize_input=True):
self._feed_tensors(dataset_gen, resize_input)
return self._calibrator.QuantizeModel(np.dtype(input_type.as_numpy_dtype()).num, np.dtype(output_type.as_numpy_dtype()).num, allow_float, op_output_name)
|
Calibrates the model with specified generator and then quantizes it.
Only the single op with output op_output_name will be quantized.
The input shapes of the calibrator are resized with the calibration data.
Returns:
A quantized model.
Args:
dataset_gen: A generator that generates calibration samples.
input_type: A tf.dtype representing the desired real-value input type.
output_type: A tf.dtype representing the desired real-value output type.
allow_float: A boolean. False if the resulting model cannot perform float
computation, useful when targeting an integer-only backend. If False, an
error will be thrown if an operation cannot be quantized, otherwise the
model will fallback to float ops.
op_output_name: A string, only this op will be quantized.
resize_input: A boolean. True if the shape of the sample data is different
from the input.
|
github-repos
|
def from_saved_model(cls, saved_model_dir, signature_keys=None, tags=None):
TFLiteConverterBase._set_original_model_type(conversion_metadata_fb.ModelType.TF_SAVED_MODEL)
if not context.executing_eagerly():
signature_key = None
if signature_keys:
if len(signature_keys) != 1:
raise ValueError('Only support a single signature key.')
else:
signature_key = signature_keys[0]
logging.warning('Invoking the TF1 implementation of TFLiteConverter because eager is disabled. Consider enabling eager.')
return TFLiteConverter.from_saved_model(saved_model_dir, signature_key=signature_key, tag_set=tags)
if tags is None:
tags = set([_tag_constants.SERVING])
with context.eager_mode():
saved_model = _load(saved_model_dir, tags)
if not signature_keys:
signature_keys = list(saved_model.signatures.keys())
if not signature_keys:
raise ValueError('Only support at least one signature key.')
if len(signature_keys) > 1 and hasattr(saved_model, 'serve') and (not hasattr(saved_model, '_default_save_signature')):
saved_model.serving_default = saved_model.serve
delattr(saved_model, 'serve')
signature_keys = ['serving_default']
funcs = []
for key in signature_keys:
if key not in saved_model.signatures:
raise ValueError("Invalid signature key '{}' found. Valid keys are '{}'.".format(key, ','.join(saved_model.signatures)))
funcs.append(saved_model.signatures[key])
saved_model_converter = TFLiteSavedModelConverterV2(saved_model_dir, tags, signature_keys)
if saved_model_converter.saved_model_dir:
return saved_model_converter
return cls(funcs, saved_model)
|
Creates a TFLiteConverter object from a SavedModel directory.
Args:
saved_model_dir: SavedModel directory to convert.
signature_keys: List of keys identifying SignatureDef containing inputs
and outputs. Elements should not be duplicated. By default the
`signatures` attribute of the MetaGraphdef is used. (default
saved_model.signatures)
tags: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present. (default
{tf.saved_model.SERVING} or {'serve'})
Returns:
TFLiteConverter object.
Raises:
Invalid signature keys.
|
github-repos
|
def get_tests_dir(append_path=None):
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith('tests'):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
|
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
|
github-repos
|
def unpack(packet):
validate_packet(packet)
version = packet[0]
try:
pyof_lib = PYOF_VERSION_LIBS[version]
except KeyError:
raise UnpackException('Version not supported')
try:
message = pyof_lib.common.utils.unpack_message(packet)
return message
except (UnpackException, ValueError) as exception:
raise UnpackException(exception)
|
Unpack the OpenFlow Packet and returns a message.
Args:
packet: buffer with the openflow packet.
Returns:
GenericMessage: Message unpacked based on openflow packet.
Raises:
UnpackException: if the packet can't be unpacked.
|
juraj-google-style
|
def concatenate_unique(la, lb):
la_set = set(la)
for l in lb:
if l not in la_set:
la.append(l)
la_set.add(l)
return la
|
Add all the elements of `lb` to `la` if they are not there already.
The elements added to `la` maintain ordering with respect to `lb`.
Args:
la: List of Python objects.
lb: List of Python objects.
Returns:
`la`: The list `la` with missing elements from `lb`.
|
github-repos
|
def IsDeletedOrDefault(clean_lines, linenum):
open_paren = clean_lines.elided[linenum].find('(')
if open_paren < 0:
return False
(close_line, _, close_paren) = CloseExpression(
clean_lines, linenum, open_paren)
if close_paren < 0:
return False
return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:])
|
Check if current constructor or operator is deleted or default.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if this is a deleted or default constructor.
|
juraj-google-style
|
def register_subcommand(parser: ArgumentParser):
dataclass_types = (ChatArguments,)
chat_parser = parser.add_parser('chat', dataclass_types=dataclass_types)
group = chat_parser.add_argument_group('Positional arguments')
group.add_argument('model_name_or_path_positional', type=str, default=None, help='Name of the pre-trained model.')
group.add_argument('generate_flags', type=str, default=None, help="Flags to pass to `generate`, using a space as a separator between flags. Accepts booleans, numbers, and lists of integers, more advanced parameterization should be set through --generation-config. Example: `transformers chat <model_repo> max_new_tokens=100 do_sample=False eos_token_id=[1,2]`. If you're a new user, check this basic flag guide: https:
chat_parser.set_defaults(func=chat_command_factory)
|
Register this command to argparse so it's available for the transformer-cli
Args:
parser: Root parser to register command-specific arguments
|
github-repos
|
def create_backup(name):
if (name in list_backups()):
raise CommandExecutionError('Backup already present: {0}'.format(name))
ps_cmd = ['Backup-WebConfiguration', '-Name', "'{0}'".format(name)]
cmd_ret = _srvmgr(ps_cmd)
if (cmd_ret['retcode'] != 0):
msg = 'Unable to backup web configuration: {0}\nError: {1}'.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
return (name in list_backups())
|
r'''
Backup an IIS Configuration on the System.
.. versionadded:: 2017.7.0
.. note::
Backups are stored in the ``$env:Windir\System32\inetsrv\backup``
folder.
Args:
name (str): The name to give the backup
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_backup good_config_20170209
|
codesearchnet
|
def filter_logits(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
logits = logits.clone()
top_k = min(top_k, logits.size(-1))
if top_k > 0:
indices_to_remove = logits < torch.topk(logits, top_k, dim=-1)[0][..., -1:]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cumulative_probs > top_p
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = torch.zeros_like(logits, dtype=torch.bool).scatter_(dim=-1, index=sorted_indices, src=sorted_indices_to_remove)
logits[indices_to_remove] = filter_value
return logits
|
Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits (`torch.Tensor`):
logits distribution shape (vocabulary size)
top_k (`int`, *optional*, defaults to 0):
When `top_k >0` keep only top key tokens with highest probability (top-k filtering).
top_p (`int`, *optional*, defaults to 0):
When `top_p>0.0` keep the top tokens with cumulative probability >= `top_p` (nucleus filtering).
|
github-repos
|
def dp996(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `dp996`'.format(value))
self._dp996 = value
|
Corresponds to IDD Field `dp996`
Dew-point temperature corresponding to 99.6% annual cumulative
frequency of occurrence (cold conditions)
Args:
value (float): value for IDD Field `dp996`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def exit_actor():
worker = ray.worker.global_worker
if ((worker.mode == ray.WORKER_MODE) and (not worker.actor_id.is_nil())):
worker.raylet_client.disconnect()
ray.disconnect()
ray.global_state.disconnect()
sys.exit(0)
assert False, 'This process should have terminated.'
else:
raise Exception('exit_actor called on a non-actor worker.')
|
Intentionally exit the current actor.
This function is used to disconnect an actor and exit the worker.
Raises:
Exception: An exception is raised if this is a driver or this
worker is not an actor.
|
codesearchnet
|
def freeze_matrix(script, all_layers=False):
filter_xml = ''.join([
' <filter name="Freeze Current Matrix">\n',
' <Param name="allLayers" ',
'value="%s" ' % str(all_layers).lower(),
'description="Apply to all visible Layers" ',
'type="RichBool" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None
|
Freeze the current transformation matrix into the coordinates of the
vertices of the mesh (and set this matrix to the identity).
In other words it applies in a definitive way the current matrix to the
vertex coordinates.
Args:
script: the FilterScript object or script filename to write
the filter to.
all_layers (bool): If selected the filter will be applied to all
visible mesh layers.
|
juraj-google-style
|
def _RawData(self, data):
if (not isinstance(data, dict)):
return data
result = collections.OrderedDict()
for (k, v) in iteritems(data):
result[k] = self._RawData(v)
return result
|
Convert data to common format.
Configuration options are normally grouped by the functional component which
define it (e.g. Logging.path is the path parameter for the logging
subsystem). However, sometimes it is more intuitive to write the config as a
flat string (e.g. Logging.path). In this case we group all the flat strings
in their respective sections and create the sections automatically.
Args:
data: A dict of raw data.
Returns:
a dict in common format. Any keys in the raw data which have a "." in them
are separated into their own sections. This allows the config to be
written explicitly in dot notation instead of using a section.
|
codesearchnet
|
def log(level, msg, *args, **kwargs):
if (level > converter.ABSL_DEBUG):
standard_level = (converter.STANDARD_DEBUG - (level - 1))
else:
if (level < converter.ABSL_FATAL):
level = converter.ABSL_FATAL
standard_level = converter.absl_to_standard(level)
_absl_logger.log(standard_level, msg, *args, **kwargs)
|
Logs 'msg % args' at absl logging level 'level'.
If no args are given just print msg, ignoring any interpolation specifiers.
Args:
level: int, the absl logging level at which to log the message
(logging.DEBUG|INFO|WARNING|ERROR|FATAL). While some C++ verbose logging
level constants are also supported, callers should prefer explicit
logging.vlog() calls for such purpose.
msg: str, the message to be logged.
*args: The args to be substitued into the msg.
**kwargs: May contain exc_info to add exception traceback to message.
|
codesearchnet
|
def verified(self, institute_id):
query = {
'verb' : 'validate',
'institute' : institute_id,
}
res = []
validate_events = self.event_collection.find(query)
for validated in list(validate_events):
case_id = validated['case']
var_obj = self.variant(case_id=case_id, document_id=validated['variant_id'])
case_obj = self.case(case_id=case_id)
if not case_obj or not var_obj:
continue
var_obj['case_obj'] = {
'display_name' : case_obj['display_name'],
'individuals' : case_obj['individuals']
}
res.append(var_obj)
return res
|
Return all verified variants for a given institute
Args:
institute_id(str): institute id
Returns:
res(list): a list with validated variants
|
juraj-google-style
|
def on_fire(self, watermark, window, context):
pass
|
Called when a trigger actually fires.
Args:
watermark: (a lower bound on) the watermark of the system
window: the window whose trigger is being fired
context: a context (e.g. a TriggerContext instance) for managing state
and setting timers
Returns:
whether this trigger is finished
|
github-repos
|
def copy_buffer(self, dst, src, size=-1, *, read_offset=0, write_offset=0) -> None:
self.mglo.copy_buffer(dst.mglo, src.mglo, size, read_offset, write_offset)
|
Copy buffer content.
Args:
dst (Buffer): The destination buffer.
src (Buffer): The source buffer.
size (int): The number of bytes to copy.
Keyword Args:
read_offset (int): The read offset.
write_offset (int): The write offset.
|
juraj-google-style
|
def distance_from_point(self, pt):
return np.linalg.norm((np.array(pt) - self.coords))
|
Returns distance between the site and a point in space.
Args:
pt: Cartesian coordinates of point.
Returns:
Distance (float)
|
codesearchnet
|
def make_rsa_keypair(bits):
private_key = rsa.generate_private_key(public_exponent=65537, key_size=bits, backend=default_backend())
private_pem = private_key.private_bytes(encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption())
public_pem = private_key.public_key().public_bytes(encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo)
return (private_pem, public_pem)
|
Generate an RSA keypair.
Args:
bits (int): number of bits to use for the key.
Returns:
(private_key, public_key) - both as PEM encoded strings
|
codesearchnet
|
def _compare_constant_tuple_prefix(op, prefix, constant_tuple, reverse):
length = min(len(prefix), len(constant_tuple))
trimmed_prefix = prefix[:length]
trimmed_constant_tuple = constant_tuple[:length]
if trimmed_prefix == trimmed_constant_tuple:
if len(prefix) >= len(constant_tuple):
if reverse:
return op in (slots.LT, slots.LE, slots.NE)
else:
return op in (slots.NE, slots.GE, slots.GT)
return None
if reverse:
return _compare_constants(op, trimmed_constant_tuple, trimmed_prefix)
else:
return _compare_constants(op, trimmed_prefix, trimmed_constant_tuple)
|
Compares a tuple's constant prefix against a constant tuple.
Args:
op: A comparison operator, such as LT (less than).
prefix: A constant prefix of a non-constant tuple (referred to as "left" in
the inline comments). So if left=(3, 2, ...), prefix=(3, 2).
constant_tuple: A constant tuple (referred to as "right").
reverse: Whether left and right should be reversed for the comparison.
Returns:
A bool of the comparison result if it can be determined, None otherwise.
|
github-repos
|
def BSearchFloor(a, x, lo=0, hi=None):
if len(a) == 0: return -1
hi = hi if hi is not None else len(a)
pos = bisect_left(a, x, lo, hi)
return pos - 1 if pos >= hi \
else (pos if x == a[pos] else (pos - 1 if pos > lo else -1))
|
Returns highest i such as a[i] <= x, or -1 if x < all elements in a
So, if x is in between two elements in a, this function will return the
index of the lower element, hence "Floor".
Arguments:
a -- ordered numeric sequence
x -- element to search within a
lo -- lowest index to consider in search
hi -- highest index to consider in search
|
juraj-google-style
|
def get_model_loader(filename):
assert isinstance(filename, six.string_types), filename
filename = os.path.expanduser(filename)
if filename.endswith('.npy'):
assert tf.gfile.Exists(filename), filename
return DictRestore(np.load(filename, encoding='latin1').item())
elif filename.endswith('.npz'):
assert tf.gfile.Exists(filename), filename
obj = np.load(filename)
return DictRestore(dict(obj))
else:
return SaverRestore(filename)
|
Get a corresponding model loader by looking at the file name.
Returns:
SessInit: either a :class:`DictRestore` (if name ends with 'npy/npz') or
:class:`SaverRestore` (otherwise).
|
codesearchnet
|
def _calculate_page_index(index, data):
if index > data['total_results']:
raise ValueError('index not in paged data')
page_length = len(data['results'])
return (index
|
Determine the location of a given index in paged data.
Arguments:
index (:py:class:`int`): The overall index.
data: (:py:class:`dict`) The first page of data.
Returns:
:py:class:`tuple`: The location of that index, in the format
``(page, index_in_page)``.
|
juraj-google-style
|
def get_dimension_type(self, dim):
dim_obj = self.get_dimension(dim)
if dim_obj and dim_obj.type is not None:
return dim_obj.type
return self.interface.dimension_type(self, dim_obj)
|
Get the type of the requested dimension.
Type is determined by Dimension.type attribute or common
type of the dimension values, otherwise None.
Args:
dimension: Dimension to look up by name or by index
Returns:
Declared type of values along the dimension
|
juraj-google-style
|
def copy(self, name=None):
cpy = copy.copy(self)
if name:
cpy.name = name
return cpy
|
shallow copy of the instruction.
Args:
name (str): name to be given to the copied circuit,
if None then the name stays the same
Returns:
Instruction: a shallow copy of the current instruction, with the name
updated if it was provided
|
codesearchnet
|
def auto_call_functors(enabled: bool=True) -> ContextManager[None]:
return thread_local.thread_local_value_scope(_TLS_AUTO_CALL_FUNCTORS, enabled, False)
|
Returns a context manager to enable or disable auto call for functors.
`auto_call_functors` is thread-safe and can be nested. For example::
@pg.symbolize
def foo(x, y):
return x + y
with pg.auto_call_functors(True):
a = foo(1, 2)
assert a == 3
with pg.auto_call_functors(False):
b = foo(1, 2)
assert isinstance(b, foo)
Args:
enabled: If True, enable auto call for functors.
Otherwise, auto call will be disabled.
Returns:
A context manager for enabling/disabling auto call for functors.
|
github-repos
|
def validate_args(func: Method, *args: Any, **kwargs: Any) -> Method:
signature(func).bind(*args, **kwargs)
return func
|
Check if the request's arguments match a function's signature.
Raises TypeError exception if arguments cannot be passed to a function.
Args:
func: The function to check.
args: Positional arguments.
kwargs: Keyword arguments.
Raises:
TypeError: If the arguments cannot be passed to the function.
|
juraj-google-style
|
def _clean_options(method, provided_options):
provided_options = (provided_options or {})
default_options = get_minimizer_options(method)
result = {}
for (name, default) in default_options.items():
if (name in provided_options):
result[name] = provided_options[name]
else:
result[name] = default_options[name]
return result
|
Clean the given input options.
This will make sure that all options are present, either with their default values or with the given values,
and that no other options are present then those supported.
Args:
method (str): the method name
provided_options (dict): the given options
Returns:
dict: the resulting options dictionary
|
codesearchnet
|
def minimize(self, minimize):
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
|
Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
|
codesearchnet
|
def set_metadata(self, **kwargs):
if self._traceme and kwargs:
self._traceme.SetMetadata(**kwargs)
|
Sets metadata in this trace event.
Args:
**kwargs: metadata in key-value pairs.
This method enables setting metadata in a trace event after it is
created.
Example usage:
```python
def call(function):
with tf.profiler.experimental.Trace("call",
function_name=function.name) as tm:
binary, in_cache = jit_compile(function)
tm.set_metadata(in_cache=in_cache)
execute(binary)
```
In this example, we want to trace how much time spent on
calling a function, which includes compilation and execution.
The compilation can be either getting a cached copy of the
binary or actually generating the binary, which is indicated
by the boolean "in_cache" returned by jit_compile(). We need
to use set_metadata() to pass in_cache because we did not know
the in_cache value when the trace was created (and we cannot
create the trace after jit_compile(), because we want
to measure the entire duration of call()).
|
github-repos
|
def formal_cities(reverse=False):
output = {}
fname = pkg_resources.resource_filename(__name__, 'resources/Formal_City_Name_Pairs.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
if (not reverse):
output[row[0]] = row[1]
else:
output[row[1]] = row[0]
return output
|
Get a dictionary that maps all Backpage city names to their presentable, formal names.
Returns:
dictionary of Backpage city names mapped to formal city names
|
codesearchnet
|
def _process_between_filter_directive(filter_operation_info, location, context, parameters):
filtered_field_type = filter_operation_info.field_type
filtered_field_name = filter_operation_info.field_name
argument_inferred_type = strip_non_null_from_type(filtered_field_type)
(arg1_expression, arg1_non_existence) = _represent_argument(location, context, parameters[0], argument_inferred_type)
(arg2_expression, arg2_non_existence) = _represent_argument(location, context, parameters[1], argument_inferred_type)
lower_bound_clause = expressions.BinaryComposition(u'>=', expressions.LocalField(filtered_field_name), arg1_expression)
if (arg1_non_existence is not None):
lower_bound_clause = expressions.BinaryComposition(u'||', arg1_non_existence, lower_bound_clause)
upper_bound_clause = expressions.BinaryComposition(u'<=', expressions.LocalField(filtered_field_name), arg2_expression)
if (arg2_non_existence is not None):
upper_bound_clause = expressions.BinaryComposition(u'||', arg2_non_existence, upper_bound_clause)
filter_predicate = expressions.BinaryComposition(u'&&', lower_bound_clause, upper_bound_clause)
return blocks.Filter(filter_predicate)
|
Return a Filter basic block that checks that a field is between two values, inclusive.
Args:
filter_operation_info: FilterOperationInfo object, containing the directive and field info
of the field where the filter is to be applied.
location: Location where this filter is used.
context: dict, various per-compilation data (e.g. declared tags, whether the current block
is optional, etc.). May be mutated in-place in this function!
parameters: list of 2 elements, specifying the time range in which the data must lie;
if either of the elements is optional and missing,
their side of the check is assumed to be True
Returns:
a Filter basic block that performs the range check
|
codesearchnet
|
def is_seq_of(seq, expected_type, seq_type=None):
if (seq_type is None):
exp_seq_type = collections_abc.Sequence
else:
assert isinstance(seq_type, type)
exp_seq_type = seq_type
if (not isinstance(seq, exp_seq_type)):
return False
for item in seq:
if (not isinstance(item, expected_type)):
return False
return True
|
Check whether it is a sequence of some type.
Args:
seq (Sequence): The sequence to be checked.
expected_type (type): Expected type of sequence items.
seq_type (type, optional): Expected sequence type.
Returns:
bool: Whether the sequence is valid.
|
codesearchnet
|
def item_to_mrc(code, val):
if isinstance(val, basestring):
return [val_to_mrc(code, val)]
if isinstance(val, dict):
val = [val]
return dicts_to_mrc(code, val)
|
Convert `val` to MRC, whether it is dict or string.
Args:
code (str): Code of the field.
val (str or dict): Value of the field.
Returns:
list: MRC lines for output template.
|
juraj-google-style
|
def Parse(self, parser_mediator, file_object):
if (not file_object):
raise errors.UnableToParseFile('Invalid file object')
if (self._INITIAL_FILE_OFFSET is not None):
file_object.seek(self._INITIAL_FILE_OFFSET, os.SEEK_SET)
parser_mediator.AppendToParserChain(self)
try:
self.ParseFileObject(parser_mediator, file_object)
finally:
parser_mediator.PopFromParserChain()
|
Parses a single file-like object.
Args:
parser_mediator (ParserMediator): a parser mediator.
file_object (dvfvs.FileIO): a file-like object to parse.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
codesearchnet
|
def parse_history_node(h_node):
if isinstance(h_node, dict):
return HistoryNode.from_dict(h_node)
else:
if (len(h_node) != 3):
raise ValueError('Invalid History node, should be dict or (name, version, description) tuple: {}'.format(h_node))
return HistoryNode(h_node[0], h_node[1], h_node[2])
|
Parses a History Node object from either a dict or a tuple.
Args:
h_node: A dict with name/url/description fields or a 3-element
tuple.
Returns:
History node.
|
codesearchnet
|
def _validate_config(config):
if (not isinstance(config, list)):
raise TypeError('Config must be a list')
for config_dict in config:
if (not isinstance(config_dict, dict)):
raise TypeError('Config must be a list of dictionaries')
label = config_dict.keys()[0]
cfg = config_dict[label]
if (not isinstance(cfg, dict)):
raise TypeError('Config structure is broken')
if ('host' not in cfg):
raise TypeError('Config entries must have a value for host')
if ((not isinstance(cfg['host'], str)) and (not isinstance(cfg['host'], list))):
raise TypeError('Host must be a string or a list.')
if ('port' not in cfg):
raise TypeError('Config entries must have a value for port')
if (not isinstance(cfg['port'], int)):
raise TypeError('Port must be an int')
if ('dbpath' not in cfg):
raise TypeError('Config entries must have a value for dbpath')
if (not isinstance(cfg['dbpath'], str)):
if (not isinstance(cfg['dbpath'], list)):
raise TypeError('Dbpath must either a string or a list of strings')
for dbpath in cfg['dbpath']:
if (not isinstance(dbpath, str)):
raise TypeError('Dbpath must either a string or a list of strings')
if (('read_preference' in cfg) and (not isinstance(cfg['read_preference'], str))):
raise TypeError('Read_preference must be a string')
if (('replicaSet' in cfg) and (not isinstance(cfg['replicaSet'], str))):
raise TypeError('replicaSet must be a string')
|
Validate that the provided configurtion is valid.
Each dictionary in the configuration list must have the following
mandatory entries :
{label: {host(string), port(int), dbpath(string|list of strings)}}
It can also contain 1 optional key:
{read_preference(string)}
Args:
config: the list of configurations provided at instantiation
Raises:
TypeError: a fault in the configurations is found
|
codesearchnet
|
def add_oxidation_state_by_element(self, oxidation_states):
try:
for site in self.sites:
new_sp = {}
for (el, occu) in site.species.items():
sym = el.symbol
new_sp[Specie(sym, oxidation_states[sym])] = occu
site.species = new_sp
except KeyError:
raise ValueError('Oxidation state of all elements must be specified in the dictionary.')
|
Add oxidation states.
Args:
oxidation_states (dict): Dict of oxidation states.
E.g., {"Li":1, "Fe":2, "P":5, "O":-2}
|
codesearchnet
|
def destroy(ads):
for ad in ads:
try:
ad.services.stop_all()
except:
ad.log.exception('Failed to clean up properly.')
|
Cleans up AndroidDevice objects.
Args:
ads: A list of AndroidDevice objects.
|
juraj-google-style
|
def get_percentile(self, percentile):
assert 0 <= percentile <= 100, \
'percentile must be between 0 and 100. Got {}'.format(percentile)
return self._percentile(self._values, percentile)
|
Get a value representing a the input percentile of the Data Collection.
Args:
percentile: A float value from 0 to 100 representing the
requested percentile.
Return:
The Data Collection value at the input percentile
|
juraj-google-style
|
def sub_pi_to_number(self, subpage=1, subitem=1):
if subitem == None:
subitem = 0
if subpage == None:
return 0
else:
if subpage > 1:
return ((subpage - 1) * self.subpage_items) + subitem
else:
return 0 + subitem
|
Convert subpage & subitem to a integer
* if page == 1, then return 0, since the item count is the true # of items
* if page == 2, then return, page-1 * items_per_page, since we are
returning the # of items on a full page.
Args:
* None
Returns:
* Integer - Which represents the number of items up to the page.
|
juraj-google-style
|
def try_serialize_handler(handler):
if (isinstance(handler, types.InstanceType) or
(isinstance(handler, object) and
not inspect.isfunction(handler) and
not inspect.ismethod(handler)) and
hasattr(handler, "__call__")):
return pickle.dumps(handler)
return None
|
Try to serialize map/reduce handler.
Args:
handler: handler function/instance. Handler can be a function or an
instance of a callable class. In the latter case, the handler will
be serialized across slices to allow users to save states.
Returns:
serialized handler string or None.
|
juraj-google-style
|
def port_list(br):
cmd = 'ovs-vsctl list-ports {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
|
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
|
juraj-google-style
|
def UpdateFrom(self, src):
if not isinstance(src, PathInfo):
raise TypeError("expected `%s` but got `%s`" % (PathInfo, type(src)))
if self.path_type != src.path_type:
raise ValueError(
"src [%s] does not represent the same path type as self [%s]" %
(src.path_type, self.path_type))
if self.components != src.components:
raise ValueError("src [%s] does not represent the same path as self [%s]"
% (src.components, self.components))
if src.HasField("stat_entry"):
self.stat_entry = src.stat_entry
self.last_stat_entry_timestamp = max(self.last_stat_entry_timestamp,
src.last_stat_entry_timestamp)
self.directory = self.directory or src.directory
|
Merge path info records.
Merges src into self.
Args:
src: An rdfvalues.objects.PathInfo record, will be merged into self.
Raises:
ValueError: If src does not represent the same path.
|
juraj-google-style
|
def __init__(self, message):
super(IllegalOperation, self).__init__(
reason=enums.ResultReason.ILLEGAL_OPERATION,
message=message
)
|
Create an IllegalOperation exception.
Args:
message (string): A string containing information about the error.
|
juraj-google-style
|
def db_stats(self):
data = dict(action='db-stats')
jsondata = self._api_request(params=data)
stats = DBStats(total_clicks=int(jsondata['db-stats']['total_clicks']), total_links=int(jsondata['db-stats']['total_links']))
return stats
|
Get database statistics.
Returns:
DBStats: Total clicks and links statistics.
Raises:
requests.exceptions.HTTPError: Generic HTTP Error
|
codesearchnet
|
def add_word(self, word):
word = word.lower()
if (not (word.isascii() and word.isalpha())):
raise ValueError("Invalid character in word '{}'".format(word))
word = word.encode(encoding='ascii')
result = cgaddag.gdg_add_word(self.gdg, word)
if (result == 1):
raise ValueError("Invalid character in word '{}'".format(word))
elif (result == 2):
raise MemoryError('Out of memory, GADDAG is in an undefined state')
|
Add a word to the GADDAG.
Args:
word: A word to be added to the GADDAG.
|
codesearchnet
|
def insert_arguments_into_query(compilation_result, arguments):
_ensure_arguments_are_provided(compilation_result.input_metadata, arguments)
if compilation_result.language == MATCH_LANGUAGE:
return insert_arguments_into_match_query(compilation_result, arguments)
elif compilation_result.language == GREMLIN_LANGUAGE:
return insert_arguments_into_gremlin_query(compilation_result, arguments)
elif compilation_result.language == SQL_LANGUAGE:
return insert_arguments_into_sql_query(compilation_result, arguments)
else:
raise AssertionError(u'Unrecognized language in compilation result: '
u'{}'.format(compilation_result))
|
Insert the arguments into the compiled GraphQL query to form a complete query.
Args:
compilation_result: a CompilationResult object derived from the GraphQL compiler
arguments: dict, mapping argument name to its value, for every parameter the query expects.
Returns:
string, a query in the appropriate output language, with inserted argument data
|
juraj-google-style
|
def record_gradient(op_name, inputs, attrs, outputs):
pywrap_tfe.TFE_Py_RecordGradient(op_name, inputs, attrs, outputs, ops.get_name_scope())
|
Explicitly record the gradient for a given op.
Args:
op_name: The op name as listed in the `OpDef` for the op.
inputs: A list of tensor inputs to the op.
attrs: The op attributes as a flattened list of alternating attribute names
and attribute values.
outputs: A list of tensor outputs from the op.
|
github-repos
|
def get_user_groups(self, dn, group_search_dn=None, _connection=None):
connection = _connection
if (not connection):
connection = self._make_connection(bind_user=self.config.get('LDAP_BIND_USER_DN'), bind_password=self.config.get('LDAP_BIND_USER_PASSWORD'))
connection.bind()
safe_dn = ldap3.utils.conv.escape_filter_chars(dn)
search_filter = '(&{group_filter}({members_attr}={user_dn}))'.format(group_filter=self.config.get('LDAP_GROUP_OBJECT_FILTER'), members_attr=self.config.get('LDAP_GROUP_MEMBERS_ATTR'), user_dn=safe_dn)
log.debug("Searching for groups for specific user with filter '{0}' , base '{1}' and scope '{2}'".format(search_filter, (group_search_dn or self.full_group_search_dn), self.config.get('LDAP_GROUP_SEARCH_SCOPE')))
connection.search(search_base=(group_search_dn or self.full_group_search_dn), search_filter=search_filter, attributes=self.config.get('LDAP_GET_GROUP_ATTRIBUTES'), search_scope=getattr(ldap3, self.config.get('LDAP_GROUP_SEARCH_SCOPE')))
results = []
for item in connection.response:
if (('type' not in item) or (item.get('type') != 'searchResEntry')):
continue
group_data = item['attributes']
group_data['dn'] = item['dn']
results.append(group_data)
if (not _connection):
self.destroy_connection(connection)
return results
|
Gets a list of groups a user at dn is a member of
Args:
dn (str): The dn of the user to find memberships for.
_connection (ldap3.Connection): A connection object to use when
searching. If not given, a temporary connection will be
created, and destroyed after use.
group_search_dn (str): The search dn for groups. Defaults to
``'{LDAP_GROUP_DN},{LDAP_BASE_DN}'``.
Returns:
list: A list of LDAP groups the user is a member of.
|
codesearchnet
|
def create_user(self, claims):
username_claim = settings.USERNAME_CLAIM
usermodel = get_user_model()
user, created = usermodel.objects.get_or_create(**{
usermodel.USERNAME_FIELD: claims[username_claim]
})
if created or not user.password:
user.set_unusable_password()
logger.debug("User '{}' has been created.".format(claims[username_claim]))
return user
|
Create the user if it doesn't exist yet
Args:
claims (dict): claims from the access token
Returns:
django.contrib.auth.models.User: A Django user
|
juraj-google-style
|
def __getitem__(self, index):
getter = coordinates.Coordinates.from_string(index)
return getter(self._values)
|
Return the value(s) of the given cell(s).
Args:
index (str): cell/row/col index ('A1', '2', 'B') or slice ('A1':'C3')
Returns:
value (cell), list(col, row), or nested list (two-dimentional slice)
Raises:
TypeError: if ``index`` is not a string or slice of strings
ValueError: if ``index`` canot be parsed
IndexError: if ``index`` is out of range
|
juraj-google-style
|
def __init__(self, num_embeddings, num_additional_embeddings, embedding_dim, partially_freeze: Optional[bool]=False, device=None, dtype=None, padding_idx=None, **kwargs) -> None:
if padding_idx is not None and padding_idx > num_embeddings:
raise ValueError(f'padding_idx must be within num_embeddings. Got {padding_idx} and {num_embeddings}')
super().__init__(num_embeddings=num_embeddings, embedding_dim=embedding_dim, device=device, dtype=dtype, padding_idx=padding_idx, **kwargs)
self.num_embeddings = num_embeddings
self.padding_idx = padding_idx
self.num_additional_embeddings = num_additional_embeddings
self.partially_freeze = partially_freeze
if partially_freeze:
self.weight.requires_grad_(False)
if self.num_additional_embeddings > 0:
self.additional_embedding = nn.Embedding(num_embeddings=self.num_additional_embeddings, embedding_dim=embedding_dim, device=device, dtype=dtype)
|
Args:
num_embeddings (`int`):
Size of the dictionary of embeddings
num_additional_embeddings (`int`):
Number of additional embeddings. Only useful when you `partially_freeze=True`.
embedding_dim (`int`):
The size of each embedding vector
partially_freeze: (`bool`, *optional*, defaults to `False`):
If `True`, the regular `weight` will be frozen. `additional_weight` is never frozen.
padding_idx (`int`, *optional*):
The padding index (needs to be less than num_embeddings)
Note: there are a lot of other parameters to initialize a standard `nn.Embedding` such as `padding_idx`,
`max_norm` or `norm_type`. We are not supporting these.
|
github-repos
|
def Reinit(self, pid, auto_symfile_loading=True):
self.ShutDownGdb()
self.__init__(pid, auto_symfile_loading, architecture=self.arch)
|
Reinitializes the object with a new pid.
Since all modes might need access to this object at any time, this object
needs to be long-lived. To make this clear in the API, this shorthand is
supplied.
Args:
pid: the pid of the target process
auto_symfile_loading: whether the symbol file should automatically be
loaded by gdb.
|
juraj-google-style
|
def _ScanVolumeSystemRoot(self, scan_context, scan_node, base_path_specs):
if ((not scan_node) or (not scan_node.path_spec)):
raise errors.ScannerError('Invalid scan node.')
if (scan_node.type_indicator == definitions.TYPE_INDICATOR_APFS_CONTAINER):
volume_identifiers = self._GetAPFSVolumeIdentifiers(scan_node)
elif (scan_node.type_indicator == definitions.TYPE_INDICATOR_VSHADOW):
volume_identifiers = self._GetVSSStoreIdentifiers(scan_node)
volume_identifiers.reverse()
else:
raise errors.ScannerError('Unsupported volume system type: {0:s}.'.format(scan_node.type_indicator))
for volume_identifier in volume_identifiers:
location = '/{0:s}'.format(volume_identifier)
sub_scan_node = scan_node.GetSubNodeByLocation(location)
if (not sub_scan_node):
raise errors.ScannerError('Scan node missing for volume identifier: {0:s}.'.format(volume_identifier))
self._ScanVolume(scan_context, sub_scan_node, base_path_specs)
|
Scans a volume system root scan node for volume and file systems.
Args:
scan_context (SourceScannerContext): source scanner context.
scan_node (SourceScanNode): volume system root scan node.
base_path_specs (list[PathSpec]): file system base path specifications.
Raises:
ScannerError: if the scan node is invalid, the scan node type is not
supported or if a sub scan node cannot be retrieved.
|
codesearchnet
|
def __init__(self, project_id, instance_id, table_id, flush_count, max_row_bytes):
super().__init__()
self.beam_options = {'project_id': project_id, 'instance_id': instance_id, 'table_id': table_id, 'flush_count': flush_count, 'max_row_bytes': max_row_bytes}
self.table = None
self.batcher = None
self.service_call_metric = None
self.written = Metrics.counter(self.__class__, 'Written Row')
|
Constructor of the Write connector of Bigtable
Args:
project_id(str): GCP Project of to write the Rows
instance_id(str): GCP Instance to write the Rows
table_id(str): GCP Table to write the `DirectRows`
flush_count(int): Max number of rows to flush
max_row_bytes(int) Max number of row mutations size to flush
|
github-repos
|
def parse_case(config):
if 'owner' not in config:
raise ConfigError("A case has to have a owner")
if 'family' not in config:
raise ConfigError("A case has to have a 'family'")
individuals = parse_individuals(config['samples'])
case_data = {
'owner': config['owner'],
'collaborators': [config['owner']],
'case_id': config['family'],
'display_name': config.get('family_name', config['family']),
'genome_build': config.get('human_genome_build'),
'rank_model_version': config.get('rank_model_version'),
'rank_score_threshold': config.get('rank_score_threshold', 0),
'analysis_date': config['analysis_date'],
'individuals': individuals,
'vcf_files': {
'vcf_snv': config.get('vcf_snv'),
'vcf_sv': config.get('vcf_sv'),
'vcf_str': config.get('vcf_str'),
'vcf_cancer': config.get('vcf_cancer'),
'vcf_snv_research': config.get('vcf_snv_research'),
'vcf_sv_research': config.get('vcf_sv_research'),
'vcf_cancer_research': config.get('vcf_cancer_research'),
},
'default_panels': config.get('default_gene_panels', []),
'gene_panels': config.get('gene_panels', []),
'assignee': config.get('assignee'),
'peddy_ped': config.get('peddy_ped'),
'peddy_sex': config.get('peddy_sex'),
'peddy_check': config.get('peddy_check'),
'delivery_report': config.get('delivery_report'),
'multiqc': config.get('multiqc'),
'track': config.get('track', 'rare'),
}
if 'madeline' in config:
mad_path = Path(config['madeline'])
if not mad_path.exists():
raise ValueError("madeline path not found: {}".format(mad_path))
with mad_path.open('r') as in_handle:
case_data['madeline_info'] = in_handle.read()
if (case_data['vcf_files']['vcf_cancer'] or case_data['vcf_files']['vcf_cancer_research']):
case_data['track'] = 'cancer'
return case_data
|
Parse case information from config or PED files.
Args:
config (dict): case config with detailed information
Returns:
dict: parsed case data
|
juraj-google-style
|
def get_topic_triggers(rs, topic, thats, depth=0, inheritance=0, inherited=False):
if (depth > rs._depth):
rs._warn('Deep recursion while scanning topic inheritance')
rs._say(((((((((('\tCollecting trigger list for topic ' + topic) + '(depth=') + str(depth)) + '; inheritance=') + str(inheritance)) + '; ') + 'inherited=') + str(inherited)) + ')'))
if (not (topic in rs._topics)):
rs._warn("Inherited or included topic {} doesn't exist or has no triggers".format(topic))
return []
triggers = []
inThisTopic = []
if (not thats):
if (topic in rs._topics):
for trigger in rs._topics[topic]:
inThisTopic.append([trigger['trigger'], trigger])
elif (topic in rs._thats.keys()):
for curtrig in rs._thats[topic].keys():
for (previous, pointer) in rs._thats[topic][curtrig].items():
inThisTopic.append([pointer['trigger'], pointer])
if (topic in rs._includes):
for includes in rs._includes[topic]:
rs._say(((('\t\tTopic ' + topic) + ' includes ') + includes))
triggers.extend(get_topic_triggers(rs, includes, thats, (depth + 1), inheritance, True))
if (topic in rs._lineage):
for inherits in rs._lineage[topic]:
rs._say(((('\t\tTopic ' + topic) + ' inherits ') + inherits))
triggers.extend(get_topic_triggers(rs, inherits, thats, (depth + 1), (inheritance + 1), False))
if ((topic in rs._lineage) or inherited):
for trigger in inThisTopic:
rs._say(((('\t\tPrefixing trigger with {inherits=' + str(inheritance)) + '}') + trigger[0]))
triggers.append([((('{inherits=' + str(inheritance)) + '}') + trigger[0]), trigger[1]])
else:
triggers.extend(inThisTopic)
return triggers
|
Recursively scan a topic and return a list of all triggers.
Arguments:
rs (RiveScript): A reference to the parent RiveScript instance.
topic (str): The original topic name.
thats (bool): Are we getting triggers for 'previous' replies?
depth (int): Recursion step counter.
inheritance (int): The inheritance level counter, for topics that
inherit other topics.
inherited (bool): Whether the current topic is inherited by others.
Returns:
[]str: List of all triggers found.
|
codesearchnet
|
def _ParseTokenType(self, file_object, file_offset):
token_type_map = self._GetDataTypeMap('uint8')
(token_type, _) = self._ReadStructureFromFileObject(file_object, file_offset, token_type_map)
return token_type
|
Parses a token type.
Args:
file_object (dfvfs.FileIO): file-like object.
file_offset (int): offset of the token relative to the start of
the file-like object.
Returns:
int: token type
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.