code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _create_temp_cache(self, num_traced_tensors, num_signatures, graph):
init_value = constant_op.constant(_COMPACT_TRACE_ENTRY_INIT_VALUE, dtype=dtypes.float32, shape=[num_signatures])
self._temp_cache_var[graph] = [init_value for _ in range(num_traced_tensors)]
|
Creates a temporary cache with the given dimensions.
Fills the self._temp_cache_var with num_traced_tensors tf.constant() ops
that have shape of [num_signatures].
Args:
num_traced_tensors: Int, denoting total number of traced tensors.
num_signatures: Int, denoting the number of statistics collected per
tensors.
graph: TensorFlow graph.
|
github-repos
|
def __iter__(self):
raise NotImplementedError('Must be implemented in descendants')
|
Creates an iterator for the `tf.distribute.DistributedDataset`.
The returned iterator implements the Python Iterator protocol.
Example usage:
>>> global_batch_size = 4
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4]).repeat().batch(global_batch_size)
>>> distributed_iterator = iter(strategy.experimental_distribute_dataset(dataset))
>>> print(next(distributed_iterator))
PerReplica:{
0: tf.Tensor([1 2], shape=(2,), dtype=int32),
1: tf.Tensor([3 4], shape=(2,), dtype=int32)
}
Returns:
An `tf.distribute.DistributedIterator` instance for the given
`tf.distribute.DistributedDataset` object to enumerate over the
distributed data.
|
github-repos
|
def sample_poly(self, poly, scalar=None, bias_range=1, poly_range=None, ignored_terms=None, **parameters):
if (ignored_terms is None):
ignored_terms = set()
else:
ignored_terms = {frozenset(term) for term in ignored_terms}
(original, poly) = (poly, poly.copy())
if (scalar is not None):
poly.scale(scalar, ignored_terms=ignored_terms)
else:
poly.normalize(bias_range=bias_range, poly_range=poly_range, ignored_terms=ignored_terms)
try:
v = next((v for (v, bias) in original.items() if (bias and (v not in ignored_terms))))
except StopIteration:
scalar = 1
else:
scalar = (poly[v] / original[v])
sampleset = self.child.sample_poly(poly, **parameters)
if ignored_terms:
sampleset.record.energy = original.energies((sampleset.record.sample, sampleset.variables))
else:
sampleset.record.energy /= scalar
return sampleset
|
Scale and sample from the given binary polynomial.
If scalar is not given, problem is scaled based on bias and polynomial
ranges. See :meth:`.BinaryPolynomial.scale` and
:meth:`.BinaryPolynomial.normalize`
Args:
poly (obj:`.BinaryPolynomial`): A binary polynomial.
scalar (number, optional):
Value by which to scale the energy range of the binary polynomial.
bias_range (number/pair, optional, default=1):
Value/range by which to normalize the all the biases, or if
`poly_range` is provided, just the linear biases.
poly_range (number/pair, optional):
Value/range by which to normalize the higher order biases.
ignored_terms (iterable, optional):
Biases associated with these terms are not scaled.
**parameters:
Other parameters for the sampling method, specified by
the child sampler.
|
codesearchnet
|
def register_model(cls, model):
rest_name = model.rest_name
resource_name = model.resource_name
if rest_name not in cls._model_rest_name_registry:
cls._model_rest_name_registry[rest_name] = [model]
cls._model_resource_name_registry[resource_name] = [model]
elif model not in cls._model_rest_name_registry[rest_name]:
cls._model_rest_name_registry[rest_name].append(model)
cls._model_resource_name_registry[resource_name].append(model)
|
Register a model class according to its remote name
Args:
model: the model to register
|
juraj-google-style
|
def add_permission_by_name(self, code, save=False):
if (not save):
return [('%s | %s' % (p.name, p.code)) for p in Permission.objects.filter(code__contains=code)]
for p in Permission.objects.filter(code__contains=code):
if (p not in self.Permissions):
self.Permissions(permission=p)
if p:
self.save()
|
Adds a permission with given name.
Args:
code (str): Code name of the permission.
save (bool): If False, does nothing.
|
codesearchnet
|
def generate_output_network(self, json_data=None, hr=True, show_name=False, colorize=True):
if (json_data is None):
json_data = {}
output = generate_output(line='0', short=(HR_RDAP['network']['_short'] if hr else 'network'), name=(HR_RDAP['network']['_name'] if (hr and show_name) else None), is_parent=True, colorize=colorize)
for (key, val) in json_data['network'].items():
if (key in ['links', 'status']):
output += self.generate_output_list(source='network', key=key, val=val, line='1', hr=hr, show_name=show_name, colorize=colorize)
elif (key in ['notices', 'remarks']):
output += self.generate_output_notices(source='network', key=key, val=val, line='1', hr=hr, show_name=show_name, colorize=colorize)
elif (key == 'events'):
output += self.generate_output_events(source='network', key=key, val=val, line='1', hr=hr, show_name=show_name, colorize=colorize)
elif (key not in ['raw']):
output += generate_output(line='1', short=(HR_RDAP['network'][key]['_short'] if hr else key), name=(HR_RDAP['network'][key]['_name'] if (hr and show_name) else None), value=val, colorize=colorize)
return output
|
The function for generating CLI output RDAP network results.
Args:
json_data (:obj:`dict`): The data to process. Defaults to None.
hr (:obj:`bool`): Enable human readable key translations. Defaults
to True.
show_name (:obj:`bool`): Show human readable name (default is to
only show short). Defaults to False.
colorize (:obj:`bool`): Colorize the console output with ANSI
colors. Defaults to True.
Returns:
str: The generated output.
|
codesearchnet
|
def write_jsonl_file(fname, data):
if not isinstance(data, list):
print('warning: malformed json data for file', fname)
return
with open(fname, 'w') as of:
for row in data:
if row.strip():
of.write('%s\n' % row.strip())
|
Writes a jsonl file.
Args:
data: list of json encoded data
|
juraj-google-style
|
def get_volume_details(self, volume_name: str) -> dict:
if (volume_name not in self.volumes):
raise RuntimeError('No such volume found: ', volume_name)
volume = self._client.volumes.get(volume_name)
return volume.attrs
|
Get details of the volume.
Args:
volume_name (str): Name of the volume
Returns:
dict, details of the volume
|
codesearchnet
|
def use_db(path, mode=WorkDB.Mode.create):
database = WorkDB(path, mode)
try:
yield database
finally:
database.close()
|
Open a DB in file `path` in mode `mode` as a context manager.
On exiting the context the DB will be automatically closed.
Args:
path: The path to the DB file.
mode: The mode in which to open the DB. See the `Mode` enum for
details.
Raises:
FileNotFoundError: If `mode` is `Mode.open` and `path` does not
exist.
|
juraj-google-style
|
def converted_self(self):
if self._converted_self is None:
old_name = self.function.signature.name
new_name = self._enclosing_graph.converted_function_names[old_name]
self.converted_enclosing_graph.rename_function(old_name, new_name)
self._converted_self = self.converted_enclosing_graph.functions[new_name]
return self._converted_self
|
The Function copy to be converted.
The copy will be renamed according to the graph's converted_function_name
map, to ensure the name does not match anything currently in TensorFlow's
function cache.
Returns:
The function instance to be converted.
|
github-repos
|
def usufyToXlsxExport(d, fPath):
from pyexcel_xlsx import get_data
try:
oldData = {'OSRFramework': get_data(fPath)}
except:
oldData = {'OSRFramework': []}
tabularData = _generateTabularData(d, oldData)
from pyexcel_xlsx import save_data
save_data(fPath, tabularData)
|
Workaround to export to a .xlsx file.
Args:
-----
d: Data to export.
fPath: File path for the output file.
|
codesearchnet
|
def flatten(value: '_instance_base.SimpleValue', classes: 'list[class_mixin.Class]') -> bool:
if isinstance(value, _abstract.AnnotationClass):
value = value.base_cls
if isinstance(value, _abstract.Class):
classes.append(value)
return False
elif isinstance(value, _abstract.Tuple):
ambiguous = False
for var in value.pyval:
if len(var.bindings) != 1 or flatten(var.bindings[0].data, classes):
ambiguous = True
return ambiguous
elif isinstance(value, _abstract.Union):
ambiguous = False
for val in value.options:
if flatten(val, classes):
ambiguous = True
return ambiguous
else:
return True
|
Flatten the contents of value into classes.
If value is a Class, it is appended to classes.
If value is a PythonConstant of type tuple, then each element of the tuple
that has a single binding is also flattened.
Any other type of value, or tuple elements that have multiple bindings are
ignored.
Args:
value: An abstract value.
classes: A list to be modified.
Returns:
True iff a value was ignored during flattening.
|
github-repos
|
def __init__(self, data, label=None):
if hasattr(data, 'to_matrix'):
data = data.to_matrix()
elif hasattr(data, 'to_operator'):
data = data.to_operator().data
data = numpy.array(data, dtype=complex)
if not is_unitary_matrix(data):
raise ExtensionError("Input matrix is not unitary.")
input_dim, output_dim = data.shape
n_qubits = int(numpy.log2(input_dim))
if input_dim != output_dim or 2**n_qubits != input_dim:
raise ExtensionError(
"Input matrix is not an N-qubit operator.")
super().__init__('unitary', n_qubits, [data], label=label)
|
Create a gate from a numeric unitary matrix.
Args:
data (matrix or Operator): unitary operator.
label (str): unitary name for backend [Default: None].
Raises:
ExtensionError: if input data is not an N-qubit unitary operator.
|
juraj-google-style
|
def monkey_patch(enabled=True):
if enabled:
Image.open = imdirect_open
else:
Image.open = pil_open
|
Monkey patching PIL.Image.open method
Args:
enabled (bool): If the monkey patch should be activated or deactivated.
|
juraj-google-style
|
def _fill_shape(x, n):
if ((not isinstance(n, numbers.Integral)) or (n < 1)):
raise TypeError('n must be a positive integer')
if ((isinstance(x, numbers.Integral) or isinstance(x, tf.Dimension)) and (x > 0)):
return ((x,) * n)
try:
if ((len(x) == n) and all(((v > 0) for v in x))):
return tuple(x)
except TypeError:
pass
raise TypeError('x is {}, must be either a positive integer or an iterable of positive integers of size {}'.format(x, n))
|
Converts a dimension to a tuple of dimensions of a given size.
This is used to allow shorthand notation for various configuration parameters.
A user can provide either, for example, `2` or `[2, 2]` as a kernel shape, and
this function returns `(2, 2)` in both cases. Passing `[1, 2]` will return
`(1, 2)`.
Args:
x: An integer, tf.Dimension, or an iterable of them.
n: An integer, the size of the desired output list
Returns:
If `x` is an integer, a tuple of size `n` containing `n` copies of `x`.
If `x` is an iterable of integers or tf.Dimension of size `n`, it returns
`tuple(x)`.
Raises:
TypeError: If n is not a positive integer;
or if x is neither integer nor an iterable of size n.
|
codesearchnet
|
def _create_produce_requests(self, collated):
requests = {}
for node_id, batches in six.iteritems(collated):
requests[node_id] = self._produce_request(
node_id, self.config['acks'],
self.config['request_timeout_ms'], batches)
return requests
|
Transfer the record batches into a list of produce requests on a
per-node basis.
Arguments:
collated: {node_id: [RecordBatch]}
Returns:
dict: {node_id: ProduceRequest} (version depends on api_version)
|
juraj-google-style
|
def get_var(self, var, info_cb=DEFAULT_MESSAGE_CALLBACK):
return self._simple_command('getvar', arg=var, info_cb=info_cb)
|
Returns the given variable's definition.
Args:
var: A variable the bootloader tracks, such as version.
info_cb: See Download. Usually no messages.
Returns:
Value of var according to the current bootloader.
|
juraj-google-style
|
def get_default_connection_info(self, provider_name):
provider = self._provider_client.get_by_name(provider_name)
if provider:
return provider['defaultConnectionInfo']
else:
return {}
|
Gets default connection info for a specific provider.
Args:
provider_name: Name of the provider.
Returns:
dict: Default connection information.
|
juraj-google-style
|
def add_dataset(self, dataset, datasets_to_check=None):
showcase_dataset = self._get_showcase_dataset_dict(dataset)
if datasets_to_check is None:
datasets_to_check = self.get_datasets()
for dataset in datasets_to_check:
if showcase_dataset['package_id'] == dataset['id']:
return False
self._write_to_hdx('associate', showcase_dataset, 'package_id')
return True
|
Add a dataset
Args:
dataset (Union[Dataset,Dict,str]): Either a dataset id or dataset metadata either from a Dataset object or a dictionary
datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase.
Returns:
bool: True if the dataset was added, False if already present
|
juraj-google-style
|
def set_volume(percentage):
if percentage > 100 or percentage < 0:
raise ValueError('percentage must be an integer between 0 and 100')
if system.get_name() == 'windows':
pass
elif system.get_name() == 'mac':
volume_int = percentage / 10
sp.Popen(['osascript', '-e', 'set Volume %d' % volume_int]).wait()
else:
formatted = str(percentage) + '%'
sp.Popen(['amixer', '--quiet', 'sset', 'Master', formatted]).wait()
|
Set the volume.
Sets the volume to a given percentage (integer between 0 and 100).
Args:
percentage (int): The percentage (as a 0 to 100 integer) to set the volume to.
Raises:
ValueError: if the percentage is >100 or <0.
|
juraj-google-style
|
def _build_command(self, python_executable, lib_dir_fq, proxy_enabled):
exe_command = [
os.path.expanduser(python_executable),
'-m',
'pip',
'install',
'-r',
self.requirements_file,
'--ignore-installed',
'--quiet',
'--target',
lib_dir_fq,
]
if self.args.no_cache_dir:
exe_command.append('--no-cache-dir')
if proxy_enabled:
trusted_hosts = ['pypi.org', 'pypi.python.org', 'files.pythonhosted.org']
for host in trusted_hosts:
exe_command.append('--trusted-host')
exe_command.append(host)
return exe_command
|
Build the pip command for installing dependencies.
Args:
python_executable (str): The fully qualified path of the Python executable.
lib_dir_fq (str): The fully qualified path of the lib directory.
Returns:
list: The Python pip command with all required args.
|
juraj-google-style
|
def ParseRecord(self, parser_mediator, key, structure):
if key not in self._SUPPORTED_KEYS:
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
if key == 'chromeos_syslog_line':
date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()
try:
date_time.CopyFromStringISO8601(structure.chromeos_date)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0:s}'.format(structure.chromeos_date))
return
else:
month = timelib.MONTH_DICT.get(structure.month.lower(), 0)
if month != 0:
self._UpdateYear(parser_mediator, month)
time_elements_tuple = (
self._year_use, month, structure.day, structure.hour,
structure.minute, structure.second)
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
date_time.is_local_time = True
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(time_elements_tuple))
return
plugin = None
if key == 'syslog_comment':
event_data = SyslogCommentEventData()
event_data.body = structure.body
event_data.offset = 0
else:
event_data = SyslogLineEventData()
event_data.body = structure.body
event_data.hostname = structure.hostname or None
event_data.offset = 0
event_data.pid = structure.pid
event_data.reporter = structure.reporter
event_data.severity = structure.severity
plugin = self._plugin_by_reporter.get(structure.reporter, None)
if plugin:
attributes = {
'hostname': structure.hostname,
'severity': structure.severity,
'reporter': structure.reporter,
'pid': structure.pid,
'body': structure.body}
try:
plugin.Process(parser_mediator, date_time, attributes)
except errors.WrongPlugin:
plugin = None
if not plugin:
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a matching entry.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): elements parsed from the file.
Raises:
ParseError: when the structure type is unknown.
|
juraj-google-style
|
def gather_initializers(root_trackable):
trackable_objects = list_objects(root_trackable)
return [c.initializer for c in trackable_objects if hasattr(c, 'initializer') and c.initializer is not None]
|
Traverse the object graph and find initialization ops.
Looks for `Trackable` objects which are dependencies of
`root_trackable` and which have an `initializer` property. Includes
initializers for slot variables only if the variable they are slotting for and
the optimizer are dependencies of `root_trackable` (i.e. if they would be
saved with a checkpoint).
Args:
root_trackable: A `Trackable` object to gather initializers for.
Returns:
A list of initialization ops.
|
github-repos
|
def get_service_account_token(request, service_account='default'):
token_json = get(request, 'instance/service-accounts/{0}/token'.format(service_account))
token_expiry = (_helpers.utcnow() + datetime.timedelta(seconds=token_json['expires_in']))
return (token_json['access_token'], token_expiry)
|
Get the OAuth 2.0 access token for a service account.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
service_account (str): The string 'default' or a service account email
address. The determines which service account for which to acquire
an access token.
Returns:
Union[str, datetime]: The access token and its expiration.
Raises:
google.auth.exceptions.TransportError: if an error occurred while
retrieving metadata.
|
codesearchnet
|
def GetAttribute(self, identifier):
if (not self._is_parsed):
self._Parse()
self._is_parsed = True
if (identifier not in self._attributes):
return None
return self._attributes[identifier]
|
Retrieves a specific attribute.
Args:
identifier (str): identifier of the attribute within the volume.
Returns:
VolumeAttribute: volume attribute or None if not available.
|
codesearchnet
|
def visit_ImportFrom(self, node):
if not node.module:
self.generic_visit(node)
return
from_import = node.module
from_import_first_component = from_import.split('.')[0]
import_renames = getattr(self._api_change_spec, 'import_renames', {})
import_rename_spec = import_renames.get(from_import_first_component, None)
if not import_rename_spec:
self.generic_visit(node)
return
updated_aliases = []
same_aliases = []
for import_alias in node.names:
full_module_name = '%s.%s' % (from_import, import_alias.name)
if excluded_from_module_rename(full_module_name, import_rename_spec):
same_aliases.append(import_alias)
else:
updated_aliases.append(import_alias)
if not updated_aliases:
self.generic_visit(node)
return
assert self._stack[-1] is node
parent = self._stack[-2]
new_from_import = import_rename_spec.new_name + from_import[len(from_import_first_component):]
updated_node = ast.ImportFrom(new_from_import, updated_aliases, node.level)
ast.copy_location(updated_node, node)
pasta.ast_utils.replace_child(parent, node, updated_node)
additional_import_log = ''
if same_aliases:
same_node = ast.ImportFrom(from_import, same_aliases, node.level, col_offset=node.col_offset, lineno=node.lineno)
ast.copy_location(same_node, node)
parent.body.insert(parent.body.index(updated_node), same_node)
pasta.base.formatting.set(same_node, 'prefix', pasta.base.formatting.get(updated_node, 'prefix'))
additional_import_log = ' and %r' % pasta.dump(same_node)
self.add_log(INFO, node.lineno, node.col_offset, 'Changed import from %r to %r%s.' % (pasta.dump(node), pasta.dump(updated_node), additional_import_log))
self.generic_visit(node)
|
Handle visiting an import-from node in the AST.
Args:
node: Current Node
|
github-repos
|
def simple_balance(self, as_of=None, raw=False, leg_query=None, **kwargs):
legs = self.legs
if as_of:
legs = legs.filter(transaction__date__lte=as_of)
if (leg_query or kwargs):
leg_query = (leg_query or models.Q())
legs = legs.filter(leg_query, **kwargs)
return ((legs.sum_to_balance() * (1 if raw else self.sign)) + self._zero_balance())
|
Get the balance for this account, ignoring all child accounts
Args:
as_of (Date): Only include transactions on or before this date
raw (bool): If true the returned balance should not have its sign
adjusted for display purposes.
leg_query (models.Q): Django Q-expression, will be used to filter the transaction legs.
allows for more complex filtering than that provided by **kwargs.
kwargs (dict): Will be used to filter the transaction legs
Returns:
Balance
|
codesearchnet
|
def from_api_repr(cls, resource):
from google.cloud.bigquery.dataset import DatasetReference
project = resource["projectId"]
dataset_id = resource["datasetId"]
table_id = resource["tableId"]
return cls(DatasetReference(project, dataset_id), table_id)
|
Factory: construct a table reference given its API representation
Args:
resource (Dict[str, object]):
Table reference representation returned from the API
Returns:
google.cloud.bigquery.table.TableReference:
Table reference parsed from ``resource``.
|
juraj-google-style
|
def __init__(self, func, lower_control_flow, aggressive_inlining, variable_names_allowlist=None, variable_names_denylist=None):
self._func = func
graph_def = _run_inline_graph_optimization(func, lower_control_flow, aggressive_inlining)
super(_FunctionConverterData, self).__init__(graph_def, variable_names_allowlist=variable_names_allowlist, variable_names_denylist=variable_names_denylist)
self._build_tensor_data()
|
Creates the conversion data for the given function.
Args:
func: ConcreteFunction.
lower_control_flow: Boolean indicating whether or not to lower control
flow ops such as If and While.
aggressive_inlining: Boolean indicating whether or not to do aggressive
function inlining (might be unsafe if function has stateful ops, not
properly connected to control outputs).
variable_names_allowlist: The set of variable names to convert (by
default, all variables are converted).
variable_names_denylist: The set of variable names to omit converting to
constants.
|
github-repos
|
def delete_public_ip(access_token, subscription_id, resource_group, public_ip_name):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/publicIPAddresses/', public_ip_name, '?api-version=', NETWORK_API])
return do_delete(endpoint, access_token)
|
Delete a public ip addresses associated with a resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
public_ip_name (str): Name of the public ip address resource.
Returns:
HTTP response.
|
codesearchnet
|
def get_available_storage_system(self, **kwargs):
uri = self._helper.build_uri_with_query_string(kwargs, '/available-storage-system')
return self._helper.do_get(uri)
|
Retrieves a specific storage system and its associated volumes available to the server profile based
on the given server hardware type and enclosure group.
Args:
enclosureGroupUri (str):
The URI of the enclosure group associated with the resource.
serverHardwareTypeUri (str):
The URI of the server hardware type associated with the resource.
storageSystemId (str):
The storage system ID associated with the resource.
Returns:
dict: Available storage system.
|
codesearchnet
|
def __init__(self, minimum=None, maximum=None):
super(FloatTypeChecker, self).__init__(base_type=float)
self.minimum = minimum
self.maximum = maximum
|
Initialization method.
Args:
minimum (float): a minimum value (included).
maximum (float): a maximum value (included).
|
juraj-google-style
|
def _update_job_info(cls, job_dir):
meta_file = os.path.join(job_dir, JOB_META_FILE)
meta = parse_json(meta_file)
if meta:
logging.debug(('Update job info for %s' % meta['job_id']))
JobRecord.objects.filter(job_id=meta['job_id']).update(end_time=timestamp2date(meta['end_time']))
|
Update information for given job.
Meta file will be loaded if exists, and the job information in
in db backend will be updated.
Args:
job_dir (str): Directory path of the job.
Return:
Updated dict of job meta info
|
codesearchnet
|
def load_profile_include(self, include_directory):
include_directory = os.path.join(self.app_path, include_directory)
if not os.path.isdir(include_directory):
msg = 'Provided include directory does not exist ({}).'.format(include_directory)
sys.exit(msg)
for filename in sorted(os.listdir(include_directory)):
if filename.endswith('.json'):
fqfn = os.path.join(include_directory, filename)
self.load_profiles_from_file(fqfn)
|
Load included configuration files.
Args:
include_directory (str): The path of the profile include directory.
|
juraj-google-style
|
def _GetAxisFromLabel(subscripts, label):
splits = subscripts.split(ellipsis)
index = splits[0].find(label)
if index != -1:
return index
if len(splits) < 2:
return None
index = splits[1].find(label)
if index != -1:
return index - len(splits[1])
return None
|
Returns the axis (possibly negative) corresponding to a label.
Returns the axis index of the axis label if it is before an ellipsis (or if
the ellipsis is not present), and the negative index if it occurs after the
ellipsis. E.g. index of `b` in `ab...cd`, is `1`, but that of `c` is `-2`.
For multiple occurrences, returns the leftmost one. If not found, returns
None.
Args:
subscripts: A string denoting the einsum subscript (e.g. `ab...cd`)
label: The single character axis label.
|
github-repos
|
def setup(self, host, flow_id, reason, grr_server_url, grr_username, grr_password, approvers=None, verify=True):
super(GRRFlowCollector, self).setup(reason, grr_server_url, grr_username, grr_password, approvers=approvers, verify=verify)
self.flow_id = flow_id
self.host = host
|
Initializes a GRR flow collector.
Args:
host: hostname of machine.
flow_id: ID of GRR flow to retrieve.
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate.
|
codesearchnet
|
def ContainsAll(self, *values):
self._awql = self._CreateMultipleValuesCondition(values, 'CONTAINS_ALL')
return self._query_builder
|
Sets the type of the WHERE clause as "contains all".
Args:
*values: The values to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to.
|
codesearchnet
|
def Write(self, output_writer):
if self._title and len(self._title) > self._MAXIMUM_WIDTH:
raise RuntimeError('Title length out of bounds.')
if self._number_of_columns not in (0, 2):
raise RuntimeError('Unsupported number of columns: {0:d}.'.format(
self._number_of_columns))
if self._column_width < 0 or self._column_width >= self._MAXIMUM_WIDTH:
raise RuntimeError('Column width out of bounds.')
output_writer.Write('\n')
self._WriteHeader(output_writer)
if self._columns:
self._WriteRow(output_writer, self._columns)
self._WriteSeparatorLine(output_writer)
for values in self._rows:
self._WriteRow(output_writer, values)
self._WriteSeparatorLine(output_writer)
|
Writes the table to the output writer.
Args:
output_writer (OutputWriter): output writer.
Raises:
RuntimeError: if the title exceeds the maximum width or
if the table has more than 2 columns or
if the column width is out of bounds.
|
juraj-google-style
|
def convert_unicode(value):
if isinstance(value, dict):
return {convert_unicode(key): convert_unicode(value) for (key, value) in value.iteritems()}
elif isinstance(value, list):
return [convert_unicode(item) for item in value]
elif isinstance(value, unicode):
return value.encode('utf-8')
else:
return value
|
Resolves python 2 issue with json loading in unicode instead of string
Args:
value (str): Unicode value to be converted
Returns:
(str): converted string
|
codesearchnet
|
def get_average_along_axis(self, ind):
m = self.data["total"]
ng = self.dim
if ind == 0:
total = np.sum(np.sum(m, axis=1), 1)
elif ind == 1:
total = np.sum(np.sum(m, axis=0), 1)
else:
total = np.sum(np.sum(m, axis=0), 0)
return total / ng[(ind + 1) % 3] / ng[(ind + 2) % 3]
|
Get the averaged total of the volumetric data a certain axis direction.
For example, useful for visualizing Hartree Potentials from a LOCPOT
file.
Args:
ind (int): Index of axis.
Returns:
Average total along axis
|
juraj-google-style
|
def __init__(self, builtins, full_names=True, allow_singletons=False):
super().__init__(allow_singletons)
self._builtins = builtins
self._full_names = full_names
|
Create this visitor.
Args:
builtins: The builtins module.
full_names: Whether to use fully qualified names for lookup.
allow_singletons: Whether to allow singleton types like Ellipsis.
|
github-repos
|
def _aggregate_gradients(self, grads_and_vars):
return self.gradient_aggregator(grads_and_vars)
|
Called in `apply_gradients` to aggregate gradients across devices.
Note that user subclasses may override this, so the interface should not be
changed.
Args:
grads_and_vars: List of (gradient, variable) pairs.
Returns:
A list of (aggregated_gradient, variable) pairs. By default, this calls
`self.gradient_aggregator`.
|
github-repos
|
def format_plugins(plugins):
formatted = []
for plugin_ in plugins:
formatted_plugin = format_plugin(plugin_)
formatted.append(formatted_plugin)
return formatted
|
Serialise multiple plug-in
Returns:
List of JSON-compatible plug-ins
|
codesearchnet
|
def getFilepaths(self, filename):
return (os.path.join(os.environ['HOME'], filename), os.path.join(self.mackup.mackup_folder, filename))
|
Get home and mackup filepaths for given file
Args:
filename (str)
Returns:
home_filepath, mackup_filepath (str, str)
|
codesearchnet
|
def check_syntax(self, app_path=None):
app_path = (app_path or '.')
for filename in sorted(os.listdir(app_path)):
error = None
status = True
if filename.endswith('.py'):
try:
with open(filename, 'rb') as f:
ast.parse(f.read(), filename=filename)
except SyntaxError:
status = False
e = []
for line in traceback.format_exc().split('\n')[(- 5):(- 2)]:
e.append(line.strip())
error = ' '.join(e)
elif filename.endswith('.json'):
try:
with open(filename, 'r') as fh:
json.load(fh)
except ValueError as e:
status = False
error = e
else:
continue
if error:
self.validation_data['errors'].append('Syntax validation failed for {} ({}).'.format(filename, error))
self.validation_data['fileSyntax'].append({'filename': filename, 'status': status})
|
Run syntax on each ".py" and ".json" file.
Args:
app_path (str, optional): Defaults to None. The path of Python files.
|
codesearchnet
|
def _filter_and_bucket_subtokens(subtoken_counts, min_count):
subtoken_buckets = []
for (subtoken, count) in six.iteritems(subtoken_counts):
if (count < min_count):
continue
while (len(subtoken_buckets) <= len(subtoken)):
subtoken_buckets.append(set())
subtoken_buckets[len(subtoken)].add(subtoken)
return subtoken_buckets
|
Return a bucketed list of subtokens that are filtered by count.
Args:
subtoken_counts: defaultdict mapping subtokens to their counts
min_count: int count used to filter subtokens
Returns:
List of subtoken sets, where subtokens in set i have the same length=i.
|
codesearchnet
|
def normalize_collaboration(collaboration):
if not collaboration:
return []
collaboration = collaboration.strip()
if collaboration.startswith('(') and collaboration.endswith(')'):
collaboration = collaboration[1:-1]
collaborations = _RE_AND.split(collaboration)
collaborations = (_RE_COLLABORATION_LEADING.sub('', collab)
for collab in collaborations)
collaborations = (_RE_COLLABORATION_TRAILING.sub('', collab)
for collab in collaborations)
return [collab.strip() for collab in collaborations]
|
Normalize collaboration string.
Args:
collaboration: a string containing collaboration(s) or None
Returns:
list: List of extracted and normalized collaborations
Examples:
>>> from inspire_schemas.utils import normalize_collaboration
>>> normalize_collaboration('for the CMS and ATLAS Collaborations')
['CMS', 'ATLAS']
|
juraj-google-style
|
def CreateSourceType(cls, type_indicator, attributes):
if (type_indicator not in cls._source_type_classes):
raise errors.FormatError('Unsupported type indicator: {0:s}.'.format(type_indicator))
return cls._source_type_classes[type_indicator](**attributes)
|
Creates a source type.
Args:
type_indicator (str): source type indicator.
attributes (dict[str, object]): source type attributes.
Returns:
SourceType: a source type.
Raises:
FormatError: if the type indicator is not set or unsupported,
or if required attributes are missing.
|
codesearchnet
|
def get_dimension_index(self, dimension):
if isinstance(dimension, int):
if (dimension < (self.ndims + len(self.vdims)) or
dimension < len(self.dimensions())):
return dimension
else:
return IndexError('Dimension index out of bounds')
dim = dimension_name(dimension)
try:
dimensions = self.kdims+self.vdims
return [i for i, d in enumerate(dimensions) if d == dim][0]
except IndexError:
raise Exception("Dimension %s not found in %s." %
(dim, self.__class__.__name__))
|
Get the index of the requested dimension.
Args:
dimension: Dimension to look up by name or by index
Returns:
Integer index of the requested dimension
|
juraj-google-style
|
def extract_lookups_from_string(value):
lookups = set()
for match in LOOKUP_REGEX.finditer(value):
groupdict = match.groupdict()
raw = match.groups()[0]
lookup_type = groupdict["type"]
lookup_input = groupdict["input"]
lookups.add(Lookup(lookup_type, lookup_input, raw))
return lookups
|
Extract any lookups within a string.
Args:
value (str): string value we're extracting lookups from
Returns:
list: list of :class:`stacker.lookups.Lookup` if any
|
juraj-google-style
|
def verify_ed25519_signature(public_key, contents, signature, message):
try:
public_key.verify(signature, contents)
except InvalidSignature as exc:
raise ScriptWorkerEd25519Error((message % {'exc': str(exc)}))
|
Verify that ``signature`` comes from ``public_key`` and ``contents``.
Args:
public_key (Ed25519PublicKey): the key to verify the signature
contents (bytes): the contents that was signed
signature (bytes): the signature to verify
message (str): the error message to raise.
Raises:
ScriptWorkerEd25519Error: on failure
|
codesearchnet
|
def ch_stop_time(self, *channels: List[Channel]) -> int:
intervals = list(itertools.chain(*(self._table[chan] for chan in channels if (chan in self._table))))
if intervals:
return max((interval.end for interval in intervals))
return 0
|
Return maximum time of timeslots over all channels.
Args:
*channels: Channels over which to obtain stop time.
|
codesearchnet
|
def create_test_record(self, mobly_test_class):
details = self._get_details()
extras = self._get_extras()
tr_record = records.TestResultRecord(t_name=self._get_full_name(), t_class=mobly_test_class)
if self._begin_time:
tr_record.begin_time = self._begin_time
if self._is_failed():
tr_record.test_fail(e=signals.TestFailure(details=details, extras=extras))
elif self._status_code in _InstrumentationStatusCodeCategories.SKIPPED:
tr_record.test_skip(e=signals.TestSkip(details=details, extras=extras))
elif self._status_code in _InstrumentationStatusCodeCategories.PASS:
tr_record.test_pass(e=signals.TestPass(details=details, extras=extras))
elif self._status_code in _InstrumentationStatusCodeCategories.TIMING:
if self._error_message:
tr_record.test_error(e=signals.TestError(details=details, extras=extras))
else:
tr_record = None
else:
tr_record.test_error(e=signals.TestError(details=details, extras=extras))
if self._known_keys[_InstrumentationKnownStatusKeys.STACK]:
tr_record.termination_signal.stacktrace = self._known_keys[_InstrumentationKnownStatusKeys.STACK]
return tr_record
|
Creates a TestResultRecord for the instrumentation block.
Args:
mobly_test_class: string, the name of the Mobly test case
executing the instrumentation run.
Returns:
A TestResultRecord with an appropriate signals exception
representing the instrumentation test method's result status.
|
github-repos
|
def do_keygen(args):
if args.key_name is not None:
key_name = args.key_name
else:
key_name = 'validator'
key_dir = get_key_dir()
if not os.path.exists(key_dir):
raise CliException("Key directory does not exist: {}".format(key_dir))
priv_filename = os.path.join(key_dir, key_name + '.priv')
pub_filename = os.path.join(key_dir, key_name + '.pub')
if not args.force:
file_exists = False
for filename in [priv_filename, pub_filename]:
if os.path.exists(filename):
file_exists = True
print('file exists: {}'.format(filename), file=sys.stderr)
if file_exists:
raise CliException(
'files exist, rerun with --force to overwrite existing files')
context = create_context('secp256k1')
private_key = context.new_random_private_key()
public_key = context.get_public_key(private_key)
try:
priv_exists = os.path.exists(priv_filename)
with open(priv_filename, 'w') as priv_fd:
if not args.quiet:
if priv_exists:
print('overwriting file: {}'.format(priv_filename))
else:
print('writing file: {}'.format(priv_filename))
priv_fd.write(private_key.as_hex())
priv_fd.write('\n')
keydir_info = os.stat(key_dir)
keydir_gid = keydir_info.st_gid
keydir_uid = keydir_info.st_uid
os.chown(priv_filename, keydir_uid, keydir_gid)
os.chmod(priv_filename, 0o640)
pub_exists = os.path.exists(pub_filename)
with open(pub_filename, 'w') as pub_fd:
if not args.quiet:
if pub_exists:
print('overwriting file: {}'.format(pub_filename))
else:
print('writing file: {}'.format(pub_filename))
pub_fd.write(public_key.as_hex())
pub_fd.write('\n')
os.chown(pub_filename, keydir_uid, keydir_gid)
os.chmod(pub_filename, 0o644)
except IOError as ioe:
raise CliException('IOError: {}'.format(str(ioe)))
|
Executes the key generation operation, given the parsed arguments.
Args:
args (:obj:`Namespace`): The parsed args.
|
juraj-google-style
|
def pop_chunk(self, chunk_max_size):
if (self._total_length < chunk_max_size):
res = self._tobytes()
self.clear()
return res
first_iteration = True
while True:
try:
data = self._deque.popleft()
data_length = len(data)
self._total_length -= data_length
if first_iteration:
if (data_length == chunk_max_size):
return data
elif (data_length > chunk_max_size):
view = self._get_pointer_or_memoryview(data, data_length)
self.appendleft(view[chunk_max_size:])
return view[:chunk_max_size]
else:
chunk_write_buffer = WriteBuffer()
elif ((chunk_write_buffer._total_length + data_length) > chunk_max_size):
view = self._get_pointer_or_memoryview(data, data_length)
limit = ((chunk_max_size - chunk_write_buffer._total_length) - data_length)
self.appendleft(view[limit:])
data = view[:limit]
chunk_write_buffer.append(data)
if (chunk_write_buffer._total_length >= chunk_max_size):
break
except IndexError:
self._has_view = False
break
first_iteration = False
return chunk_write_buffer._tobytes()
|
Pops a chunk of the given max size.
Optimized to avoid too much string copies.
Args:
chunk_max_size (int): max size of the returned chunk.
Returns:
string (bytes) with a size <= chunk_max_size.
|
codesearchnet
|
def random(length: int=8, chars: str=(digits + ascii_lowercase)) -> Iterator[str]:
while True:
(yield ''.join([choice(chars) for _ in range(length)]))
|
A random string.
Not unique, but has around 1 in a million chance of collision (with the default 8
character length). e.g. 'fubui5e6'
Args:
length: Length of the random string.
chars: The characters to randomly choose from.
|
codesearchnet
|
def _get_row_partition_type_tensor_pairs_tail(partition):
if partition._has_precomputed_value_rowids():
return ('VALUE_ROWIDS', partition.value_rowids())
else:
return ('ROW_SPLITS', partition.row_splits())
|
Gets a row partition type tensor pair for the tail.
If value_rowid is defined, then it is used. Otherwise, row_splits
are used.
Args:
partition: a RowPartition.
Returns:
A list of (row_partition_type, row_partition_tensor) pairs.
|
github-repos
|
def _check_zero_size(self):
block_zero = ((self.end[0] <= self.start[0]) or (self.end[1] <= self.start[1]))
if block_zero:
self.flag_change(self.flags, 'fatal', worksheet=self.worksheet, message=self.FLAGS['0-size'])
return block_zero
|
Checks for zero height or zero width blocks and flags the occurrence.
Returns:
True if the block is size 0.
|
codesearchnet
|
def add_to_cache(cls, remote_info, container):
if not isinstance(container, cls):
raise TypeError('%r not an instance of %r, could not be added to cache.' %
(container, cls))
if remote_info in cls.__remote_info_cache:
raise KeyError('Cache has collision but should not.')
cls.__remote_info_cache[remote_info] = container
|
Adds a ResourceContainer to a cache tying it to a protorpc method.
Args:
remote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding
to a method.
container: An instance of ResourceContainer.
Raises:
TypeError: if the container is not an instance of cls.
KeyError: if the remote method has been reference by a container before.
This created remote method should never occur because a remote method
is created once.
|
juraj-google-style
|
def __init__(self, permissive=True):
self._journal_contents = ''
self._init_journal(permissive=permissive)
|
Inititalize the journal maker object.
Appends the first lines in the journal (JrnObj variable and timestamp)
to the _journal_contents.
Args:
permissive (bool): if True most errors in journal will not
cause Revit to stop journal execution.
Some still do.
|
juraj-google-style
|
def transmute(df, *keep_columns, **kwargs):
keep_cols = []
for col in flatten(keep_columns):
try:
keep_cols.append(col.name)
except:
if isinstance(col, str):
keep_cols.append(col)
elif isinstance(col, int):
keep_cols.append(df.columns[col])
df = df.assign(**kwargs)
columns = ([k for k in kwargs.keys()] + list(keep_cols))
return df[columns]
|
Creates columns and then returns those new columns and optionally specified
original columns from the DataFrame.
This works like `mutate`, but designed to discard the original columns used
to create the new ones.
Args:
*keep_columns: Column labels to keep. Can be string, symbolic, or
integer position.
Kwargs:
**kwargs: keys are the names of the new columns, values indicate
what the new column values will be.
Example:
diamonds >> transmute(x_plus_y=X.x + X.y, y_div_z=(X.y / X.z)) >> head(3)
y_div_z x_plus_y
0 1.637860 7.93
1 1.662338 7.73
2 1.761905 8.12
|
codesearchnet
|
def _build_rule_message(self, column: str, rule: str, error: str, value: Any, rule_params: dict={}) -> LogMessage:
return self._base_log.copy() | LogMessage(log_type=LogType.RULE.value, column=column, rule=rule, error=error, value=value, rule_params=json.dumps(rule_params))
|
Adds rule error information to base log message.
Args:
* column: column where the rule is applied
* rule: rule that is violated and raises this message
* value: value that violates the rule
* rule_params: optional, parameters set for the rule
Returns:
* log: LogMessage dictionary
|
github-repos
|
def _enter_scope_uncached(self):
if self._auxiliary_name_scope:
current_name_scope = None
else:
name_scope = ops.get_name_scope()
if name_scope:
name_scope += '/'
current_name_scope = ops.name_scope(name_scope, skip_on_eager=False)
else:
current_name_scope = ops.name_scope(name_scope, skip_on_eager=False)
if self._name_or_scope is not None:
if not isinstance(self._name_or_scope, (VariableScope, str)):
raise TypeError('VariableScope: name_or_scope must be a string or VariableScope.')
if isinstance(self._name_or_scope, str):
name_scope = self._name_or_scope
else:
name_scope = self._name_or_scope.name.split('/')[-1]
if name_scope or current_name_scope:
current_name_scope = current_name_scope or ops.name_scope(name_scope, skip_on_eager=False)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
if isinstance(self._name_or_scope, str):
old_name_scope = current_name_scope_name
else:
old_name_scope = self._name_or_scope.original_name_scope
pure_variable_scope = _pure_variable_scope(self._name_or_scope, reuse=self._reuse, initializer=self._initializer, regularizer=self._regularizer, caching_device=self._caching_device, partitioner=self._partitioner, custom_getter=self._custom_getter, old_name_scope=old_name_scope, dtype=self._dtype, use_resource=self._use_resource, constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else:
self._current_name_scope = None
pure_variable_scope = _pure_variable_scope(self._name_or_scope, reuse=self._reuse, initializer=self._initializer, regularizer=self._regularizer, caching_device=self._caching_device, partitioner=self._partitioner, custom_getter=self._custom_getter, dtype=self._dtype, use_resource=self._use_resource, constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else:
if self._reuse:
raise ValueError('reuse=True cannot be used without a name_or_scope')
current_name_scope = current_name_scope or ops.name_scope(self._default_name, skip_on_eager=False)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
unique_default_name = _get_unique_variable_scope(self._default_name)
pure_variable_scope = _pure_variable_scope(unique_default_name, initializer=self._initializer, regularizer=self._regularizer, caching_device=self._caching_device, partitioner=self._partitioner, custom_getter=self._custom_getter, old_name_scope=current_name_scope_name, dtype=self._dtype, use_resource=self._use_resource, constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
|
Enters the context manager when there is no cached scope yet.
Returns:
The entered variable scope.
Raises:
TypeError: A wrong type is passed as `scope` at __init__().
ValueError: `reuse` is incorrectly set at __init__().
|
github-repos
|
def evaluate(estimator, eval_args):
values = {}
checkpoint_path = estimator.latest_checkpoint()
if (not checkpoint_path):
return values
tf.logging.info('Starting evaluation on checkpoint %s', checkpoint_path)
for eval_name in eval_args:
(input_fn, eval_steps) = eval_args[eval_name]
metric_values = estimator.evaluate(input_fn, steps=eval_steps, name=eval_name, checkpoint_path=checkpoint_path)
for (key, val) in metric_values.iteritems():
values[((eval_name + '/') + key)] = val
tf.logging.info(values)
return values
|
Runs evaluation on the latest model checkpoint & logs to tensorboard.
Args:
estimator: A tf.Estimator object.
eval_args: Dictionary of {eval_name: (input_fn, eval_steps)} where eval_name
is the name of the evaluation set, e.g. "train" or "val", input_fn is an
input function returning a tuple (features, labels), and eval_steps is the
number of steps for which to evaluate the model. If None, evaluates until
input_fn raises an end-of-input exception.
Returns:
A dict of metric values from the evaluation. May be empty, e.g. if the
training job has not yet saved a checkpoint or the checkpoint is deleted by
the time the TPU worker initializes.
|
codesearchnet
|
def train_fn(data_dir=None, output_dir=None, model_class=gin.REQUIRED, dataset=gin.REQUIRED, input_names=None, target_names=None, train_steps=1000, eval_steps=1, eval_frequency=100):
(train_data, eval_data, features_info, keys) = train_and_eval_dataset(dataset, data_dir)
if (input_names is None):
input_names = keys[0]
if (target_names is None):
target_names = keys[1]
model = model_class(features_info=features_info, input_names=input_names, target_names=target_names)
optimize_fn(model)
train_batches = shuffle_and_batch_data(train_data, target_names, features_info, training=True)
eval_batches = shuffle_and_batch_data(eval_data, target_names, features_info, training=False)
model.fit(train_batches, epochs=1, steps_per_epoch=1)
callbacks = []
callbacks.append(tf.keras.callbacks.History())
callbacks.append(tf.keras.callbacks.BaseLogger())
last_epoch = 0
if (output_dir is not None):
callbacks.append(tf.keras.callbacks.TensorBoard(log_dir=output_dir))
output_format = os.path.join(output_dir, 'model-{epoch:05d}')
callbacks.append(tf.keras.callbacks.ModelCheckpoint(filepath=output_format, save_weights_only=True))
checkpoints = tf.gfile.Glob(os.path.join(output_dir, 'model-*'))
checkpoints = [os.path.basename(ckpt)[6:] for ckpt in checkpoints]
epoch_numbers = [int(ckpt[:5]) for ckpt in checkpoints if (len(ckpt) > 4)]
epoch_numbers.sort()
if epoch_numbers:
last_epoch = epoch_numbers[(- 1)]
saved_path = os.path.join(output_dir, ('model-%05d' % last_epoch))
model.load_weights(saved_path)
model.fit(train_batches, epochs=(train_steps
|
Train the given model on the given dataset.
Args:
data_dir: Directory where the data is located.
output_dir: Directory where to put the logs and checkpoints.
model_class: The model class to train.
dataset: The name of the dataset to train on.
input_names: List of strings with the names of the features on input.
target_names: List of strings with the names of the target features.
train_steps: for how many steps to train.
eval_steps: for how many steps to do evaluation.
eval_frequency: how often (every this many steps) to run evaluation.
|
codesearchnet
|
def internal_link_sets(self):
if (not self.__internal_link_sets):
self.__internal_link_sets = InternalLinkSets(self.__connection)
return self.__internal_link_sets
|
Gets the InternalLinkSets API client.
Returns:
InternalLinkSets:
|
codesearchnet
|
def parse_section_links(self, section_title):
soup = BeautifulSoup(self.html, "html.parser")
headlines = soup.find_all("span", {"class": "mw-headline"})
tmp_soup = BeautifulSoup(section_title, "html.parser")
tmp_sec_title = tmp_soup.get_text().lower()
id_tag = None
for headline in headlines:
tmp_id = headline.text
if tmp_id.lower() == tmp_sec_title:
id_tag = headline.get("id")
break
if id_tag is not None:
return self._parse_section_links(id_tag)
return None
|
Parse all links within a section
Args:
section_title (str): Name of the section to pull
Returns:
list: List of (title, url) tuples
Note:
Returns **None** if section title is not found
Note:
Side effect is to also pull the html which can be slow
Note:
This is a parsing operation and not part of the standard API
|
juraj-google-style
|
def init_variable(v, init, name='init'):
with ops.name_scope(None, v.op.name + '/', [v, init]):
with ops.name_scope(name) as scope:
with ops.colocate_with(v):
if callable(init):
assert v.get_shape().is_fully_defined(), 'Variable shape unknown.'
value = init(v.get_shape().as_list(), v.dtype.base_dtype)
value = ops.convert_to_tensor(value, name='value')
return gen_state_ops.assign(v, value, name=scope)
else:
init = ops.convert_to_tensor(init, name='init')
return gen_state_ops.assign(v, init, name=scope)
|
Initializes variable with "init".
This op does the following:
if init is a Tensor, v = init
if callable(init): v = init(VariableShape(v), v.dtype)
Args:
v: Variable to initialize
init: Tensor to assign to v,
Or an object convertible to Tensor e.g. nparray,
Or an Initializer that generates a tensor given the shape and type of v.
An "Initializer" is a callable that returns a tensor that "v" should be
set to. It will be called as init(shape, dtype).
name: Optional name for the op.
Returns:
The operation that initializes v.
|
github-repos
|
def rouge_l_fscore(predictions, labels):
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
|
ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
|
juraj-google-style
|
def blend(self, other, percent=0.5):
dest = 1.0 - percent
rgb = tuple(((u * percent) + (v * dest) for u, v in zip(self.__rgb, other.__rgb)))
a = (self.__a * percent) + (other.__a * dest)
return Color(rgb, 'rgb', a, self.__wref)
|
blend this color with the other one.
Args:
:other:
the grapefruit.Color to blend with this one.
Returns:
A grapefruit.Color instance which is the result of blending
this color on the other one.
>>> c1 = Color.from_rgb(1, 0.5, 0, 0.2)
>>> c2 = Color.from_rgb(1, 1, 1, 0.6)
>>> c3 = c1.blend(c2)
>>> c3
Color(1.0, 0.75, 0.5, 0.4)
|
juraj-google-style
|
def _add_to_quick_menu(self, key, wf):
if key in settings.QUICK_MENU:
self.output['quick_menu'].append(wf)
|
Appends menu entries to dashboard quickmenu according
to :attr:`zengine.settings.QUICK_MENU`
Args:
key: workflow name
wf: workflow menu entry
|
juraj-google-style
|
def hr_dp004(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `hr_dp004`'.format(value))
self._hr_dp004 = value
|
Corresponds to IDD Field `hr_dp004`
humidity ratio corresponding to
Dew-point temperature corresponding to 0.4% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `hr_dp004`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def variable_created_in_scope(self, v):
return v._distribute_strategy == self._container_strategy_weakref()
|
Tests whether `v` was created while this strategy scope was active.
Variables created inside the strategy scope are "owned" by it:
>>> strategy = tf.distribute.MirroredStrategy()
>>> with strategy.scope():
... v = tf.Variable(1.)
>>> strategy.extended.variable_created_in_scope(v)
True
Variables created outside the strategy are not owned by it:
>>> strategy = tf.distribute.MirroredStrategy()
>>> v = tf.Variable(1.)
>>> strategy.extended.variable_created_in_scope(v)
False
Args:
v: A `tf.Variable` instance.
Returns:
True if `v` was created inside the scope, False if not.
|
github-repos
|
def register_rml(self, filepath, **kwargs):
name = os.path.split(filepath)[(- 1)]
if ((name in self.rml_maps) and (self.rml_maps[name] != filepath)):
raise Exception('RML name already registered. Filenames must be unique.', (self.rml_maps[name], filepath))
self.rml_maps[name] = filepath
|
Registers the filepath for an rml mapping
Args:
-----
filepath: the path the rml file
|
codesearchnet
|
def config_conf_section():
config_dict = OrderedDict((('create', ConfOpt(None, True, None, {'action': 'store_true'}, False, 'create most global config file')), ('create_local', ConfOpt(None, True, None, {'action': 'store_true'}, False, 'create most local config file')), ('update', ConfOpt(None, True, None, {'action': 'store_true'}, False, 'add missing entries to config file')), ('edit', ConfOpt(None, True, None, {'action': 'store_true'}, False, 'open config file in a text editor')), ('editor', ConfOpt('vim', False, None, {}, True, 'text editor'))))
return config_dict
|
Define a configuration section handling config file.
Returns:
dict of ConfOpt: it defines the 'create', 'update', 'edit' and 'editor'
configuration options.
|
codesearchnet
|
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool=False) -> torch.Tensor:
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16:
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
|
github-repos
|
def build_transcript(transcript_info, build='37'):
try:
transcript_id = transcript_info['ensembl_transcript_id']
except KeyError:
raise KeyError("Transcript has to have ensembl id")
build = build
is_primary = transcript_info.get('is_primary', False)
refseq_id = transcript_info.get('refseq_id')
refseq_identifiers = transcript_info.get('refseq_identifiers')
try:
chrom = transcript_info['chrom']
except KeyError:
raise KeyError("Transcript has to have a chromosome")
try:
start = int(transcript_info['transcript_start'])
except KeyError:
raise KeyError("Transcript has to have start")
except TypeError:
raise TypeError("Transcript start has to be integer")
try:
end = int(transcript_info['transcript_end'])
except KeyError:
raise KeyError("Transcript has to have end")
except TypeError:
raise TypeError("Transcript end has to be integer")
try:
hgnc_id = int(transcript_info['hgnc_id'])
except KeyError:
raise KeyError("Transcript has to have a hgnc id")
except TypeError:
raise TypeError("hgnc id has to be integer")
transcript_obj = HgncTranscript(
transcript_id=transcript_id,
hgnc_id=hgnc_id,
chrom=chrom,
start=start,
end=end,
is_primary=is_primary,
refseq_id=refseq_id,
refseq_identifiers=refseq_identifiers,
build=build
)
for key in list(transcript_obj):
if transcript_obj[key] is None:
transcript_obj.pop(key)
return transcript_obj
|
Build a hgnc_transcript object
Args:
transcript_info(dict): Transcript information
Returns:
transcript_obj(HgncTranscript)
{
transcript_id: str, required
hgnc_id: int, required
build: str, required
refseq_id: str,
chrom: str, required
start: int, required
end: int, required
is_primary: bool
}
|
juraj-google-style
|
def Or(exprs):
return simplify_exprs(exprs, _Or, TRUE, FALSE)
|
Create a disjunction or its simplified equivalent.
This will ensure that, when an _Or is returned, none of its immediate
subterms is TRUE, FALSE, or another disjunction.
Args:
exprs: An iterable. The subterms.
Returns:
A BooleanTerm.
|
github-repos
|
def build_exon(exon_info, build='37'):
try:
chrom = exon_info['chrom']
except KeyError:
raise KeyError("Exons has to have a chromosome")
try:
start = int(exon_info['start'])
except KeyError:
raise KeyError("Exon has to have a start")
except TypeError:
raise TypeError("Exon start has to be integer")
try:
end = int(exon_info['end'])
except KeyError:
raise KeyError("Exon has to have a end")
except TypeError:
raise TypeError("Exon end has to be integer")
try:
rank = int(exon_info['rank'])
except KeyError:
raise KeyError("Exon has to have a rank")
except TypeError:
raise TypeError("Exon rank has to be integer")
try:
exon_id = exon_info['exon_id']
except KeyError:
raise KeyError("Exons has to have a id")
try:
transcript = exon_info['transcript']
except KeyError:
raise KeyError("Exons has to have a transcript")
try:
hgnc_id = int(exon_info['hgnc_id'])
except KeyError:
raise KeyError("Exons has to have a hgnc_id")
except TypeError:
raise TypeError("hgnc_id has to be integer")
exon_obj = Exon(
exon_id = exon_id,
chrom = chrom,
start = start,
end = end,
rank = rank,
transcript = transcript,
hgnc_id = hgnc_id,
build = build,
)
return exon_obj
|
Build a Exon object object
Args:
exon_info(dict): Exon information
Returns:
exon_obj(Exon)
"exon_id": str, # str(chrom-start-end)
"chrom": str,
"start": int,
"end": int,
"transcript": str, # ENST ID
"hgnc_id": int, # HGNC_id
"rank": int, # Order of exon in transcript
"build": str, # Genome build
|
juraj-google-style
|
def ignore_errors(self, log_warning=False, name=None) -> 'DatasetV2':
from tensorflow.python.data.ops import ignore_errors_op
return ignore_errors_op._ignore_errors(self, log_warning, name)
|
Drops elements that cause errors.
>>> dataset = tf.data.Dataset.from_tensor_slices([1., 2., 0., 4.])
>>> dataset = dataset.map(lambda x: tf.debugging.check_numerics(1. / x, ""))
>>> list(dataset.as_numpy_iterator())
Traceback (most recent call last):
...
InvalidArgumentError: ... Tensor had Inf values
>>> dataset = dataset.ignore_errors()
>>> list(dataset.as_numpy_iterator())
[1.0, 0.5, 0.25]
Args:
log_warning: (Optional.) A bool indicating whether or not ignored errors
should be logged to stderr. Defaults to `False`.
name: (Optional.) A string indicating a name for the `tf.data` operation.
Returns:
A new `Dataset` with the transformation applied as described above.
|
github-repos
|
def unique(seen, *iterables):
_add = seen.add
return (i for i in chain(*iterables) if i not in seen and not _add(i))
|
Get the unique items in iterables while preserving order. Note that this
mutates the seen set provided only when the returned generator is used.
Args:
seen (set): either an empty set, or the set of things already seen
*iterables: one or more iterable lists to chain together
Returns:
generator:
|
juraj-google-style
|
def add_op_consumer(self, src_op_name, src_slot, dst_op_name, dst_slot):
self._op_consumers[src_op_name].append((src_slot, dst_op_name, dst_slot))
|
Add a consuming op for this op.
Args:
src_op_name: Name of the op of which the output tensor is being consumed.
src_slot: 0-based output slot of the op being consumed.
dst_op_name: Name of the consuming op (e.g., "Conv2D_3/BiasAdd")
dst_slot: 0-based input slot of the consuming op that receives the tensor
from this op.
|
github-repos
|
async def join(self, *, remote_addrs: Iterable[str], listen_addr: str='0.0.0.0:2377', join_token: str, advertise_addr: str=None, data_path_addr: str=None) -> bool:
data = {'RemoteAddrs': list(remote_addrs), 'JoinToken': join_token, 'ListenAddr': listen_addr, 'AdvertiseAddr': advertise_addr, 'DataPathAddr': data_path_addr}
(await self.docker._query('swarm/join', method='POST', data=clean_map(data)))
return True
|
Join a swarm.
Args:
listen_addr
Used for inter-manager communication
advertise_addr
Externally reachable address advertised to other nodes.
data_path_addr
Address or interface to use for data path traffic.
remote_addrs
Addresses of manager nodes already participating in the swarm.
join_token
Secret token for joining this swarm.
|
codesearchnet
|
def _make_patterns(patterns):
field_registry = display_fields.FieldRegistry()
pattern_list = display_pattern.ScreenPatternList(field_registry=field_registry)
for pattern in patterns:
pattern_list.add(pattern.split('\n'))
return pattern_list
|
Create a ScreenPatternList from a given pattern text.
Args:
pattern_txt (str list): the patterns
Returns:
mpdlcd.display_pattern.ScreenPatternList: a list of patterns from the
given entries.
|
codesearchnet
|
def deserialize(json, cls=None):
LOGGER.debug('deserialize(%s)', json)
out = simplejson.loads(json)
if (isinstance(out, dict) and (cls is not None)):
return cls(**out)
return out
|
Deserialize a JSON string into a Python object.
Args:
json (str): the JSON string.
cls (:py:class:`object`):
if the ``json`` is deserialized into a ``dict`` and
this argument is set,
the ``dict`` keys are passed as keyword arguments to the
given ``cls`` initializer.
Returns:
Python object representation of the given JSON string.
|
codesearchnet
|
def remove_user_from_template(self, template_id, account_id=None, email_address=None):
return self._add_remove_user_template(self.TEMPLATE_REMOVE_USER_URL, template_id, account_id, email_address)
|
Removes the specified Account's access to the specified Template
Args:
template_id (str): The id of the template to remove the account's access from.
account_id (str): The id of the account to remove access from the template. The account id prevails if both account_id and email_address are provided.
email_address (str): The email address of the account to remove access from.
Returns:
An Template object
|
juraj-google-style
|
def key_exists(hive, key, use_32bit_registry=False):
r
return __utils__['reg.key_exists'](hive=hive,
key=key,
use_32bit_registry=use_32bit_registry)
|
r'''
Check that the key is found in the registry. This refers to keys and not
value/data pairs.
Args:
hive (str): The hive to connect to
key (str): The key to check
use_32bit_registry (bool): Look in the 32bit portion of the registry
Returns:
bool: True if exists, otherwise False
CLI Example:
.. code-block:: bash
salt '*' reg.key_exists HKLM SOFTWARE\Microsoft
|
juraj-google-style
|
def log_device_compatibility_check(policy_name):
global _logged_compatibility_check
if _logged_compatibility_check:
return
_logged_compatibility_check = True
gpus = config.list_physical_devices('GPU')
gpu_details_list = [config.get_device_details(g) for g in gpus]
_log_device_compatibility_check(policy_name, gpu_details_list)
|
Logs a compatibility check if the devices support the policy.
Currently only logs for the policy mixed_float16. A log is shown only the
first time this function is called.
Args:
policy_name: The name of the dtype policy.
|
github-repos
|
class CLIPSegImageSegmentationOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
conditional_embeddings: Optional[torch.FloatTensor] = None
pooled_output: Optional[torch.FloatTensor] = None
vision_model_output: BaseModelOutputWithPooling = None
decoder_output: CLIPSegDecoderOutput = None
def to_tuple(self) -> Tuple[Any]:
return tuple((self[k] if k not in ['vision_model_output', 'decoder_output'] else getattr(self, k).to_tuple() for k in self.keys()))
|
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for image-text similarity.
...
vision_model_output (`BaseModelOutputWithPooling`):
The output of the [`CLIPSegVisionModel`].
|
github-repos
|
def send_notifications(self, notification_type, *args):
if (notification_type in self.notifications):
for (notification_id, callback) in self.notifications[notification_type]:
try:
callback(*args)
except:
self.logger.exception('Problem calling notify callback!')
|
Fires off the notification for the specific event. Uses var args to pass in a
arbitrary list of parameter according to which notification type was fired.
Args:
notification_type: Type of notification to fire (String from .helpers.enums.NotificationTypes)
args: variable list of arguments to the callback.
|
codesearchnet
|
def _TensorArrayWriteGrad(op: ops.Operation, flow):
handle = op.inputs[0]
index = op.inputs[1]
dtype = op.get_attr('T')
grad_source = _GetGradSource(flow)
flow_out = array_ops.identity(op.outputs[0], 'flow_out')
with ops.control_dependencies([flow_out]):
flow = array_ops.identity(flow, 'write_barrier')
g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow, colocate_with_first_write_call=False).grad(source=grad_source, flow=flow)
grad = g.read(index)
return [None, None, grad, flow]
|
Gradient for TensorArrayWrite.
Args:
op: Forward TensorArrayWrite op.
flow: Gradient `Tensor` flow to TensorArrayWrite.
Returns:
A grad `Tensor`, the gradient created in an upstream ReadGrad or PackGrad.
|
github-repos
|
def ints_to_string(ints):
if not isinstance(ints, list):
return six.u(str(ints))
return '|'.join(six.u(str(l)) for l in ints)
|
Convert a list of integers to a *|* separated string.
Args:
ints (list[int]|int): List of integer items to convert or single
integer to convert.
Returns:
str: Formatted string
|
juraj-google-style
|
def hexstr(text):
text = text.strip().lower()
if text.startswith(('0x', '0X')):
text = text[2:]
if not text:
raise s_exc.BadTypeValu(valu=text, name='hexstr',
mesg='No string left after stripping')
try:
s_common.uhex(text)
except (binascii.Error, ValueError) as e:
raise s_exc.BadTypeValu(valu=text, name='hexstr', mesg=str(e))
return text
|
Ensure a string is valid hex.
Args:
text (str): String to normalize.
Examples:
Norm a few strings:
hexstr('0xff00')
hexstr('ff00')
Notes:
Will accept strings prefixed by '0x' or '0X' and remove them.
Returns:
str: Normalized hex string.
|
juraj-google-style
|
def predecesors_pattern(element, root):
def is_root_container(el):
return el.parent.parent.getTagName() == ""
if not element.parent or not element.parent.parent or \
is_root_container(element):
return []
trail = [
[
element.parent.parent.getTagName(),
_params_or_none(element.parent.parent.params)
],
[
element.parent.getTagName(),
_params_or_none(element.parent.params)
],
[element.getTagName(), _params_or_none(element.params)],
]
match = root.match(*trail)
if element in match:
return [
PathCall("match", match.index(element), trail)
]
|
Look for `element` by its predecesors.
Args:
element (obj): HTMLElement instance of the object you are looking for.
root (obj): Root of the `DOM`.
Returns:
list: ``[PathCall()]`` - list with one :class:`PathCall` object (to \
allow use with ``.extend(predecesors_pattern())``).
|
juraj-google-style
|
def AddFiles(self, hash_id_metadatas):
for (hash_id, metadata) in iteritems(hash_id_metadatas):
self.AddFile(hash_id, metadata)
|
Adds multiple files to the file store.
Args:
hash_id_metadatas: A dictionary mapping hash ids to file metadata (a tuple
of hash client path and blob references).
|
codesearchnet
|
def Dump(self, output):
data = {
'current_content_length': self._current_content_length,
'is_last': self._is_last,
'server': self._request_builder.GetServer(),
'upload_url': self._upload_url,
'version': self._request_builder.GetVersion()
}
try:
yaml.dump(data, output)
except yaml.YAMLError as e:
raise googleads.errors.GoogleAdsError(
'Error dumping IncrementalUploadHelper to file: %s' % str(e))
|
Serialize the IncrementalUploadHelper and store in file-like object.
Args:
output: a file-like object where the status of the IncrementalUploadHelper
will be written.
Raises:
GoogleAdsError: If a YAMLError occurs while writing to the file.
|
juraj-google-style
|
def add_subscription(self, channel, callback_function):
if channel not in CHANNELS:
CHANNELS.append(channel)
SUBSCRIPTIONS[channel] = [callback_function]
else:
SUBSCRIPTIONS[channel].append(callback_function)
if self._subscribed:
_LOGGER.info("New channel added after main subscribe call.")
self._pubnub.subscribe().channels(channel).execute()
|
Add a channel to subscribe to and a callback function to
run when the channel receives an update.
If channel already exists, create a new "subscription"
and append another callback function.
Args:
channel (str): The channel to add a subscription too.
callback_function (func): The function to run on an
update to the passed in channel.
|
juraj-google-style
|
def swapaxes(x, axis1, axis2):
if any_symbolic_tensors((x,)):
return Swapaxes(axis1, axis2).symbolic_call(x)
return backend.numpy.swapaxes(x, axis1=axis1, axis2=axis2)
|
Interchange two axes of a tensor.
Args:
x: Input tensor.
axis1: First axis.
axis2: Second axis.
Returns:
A tensor with the axes swapped.
|
github-repos
|
def floatx():
return _FLOATX
|
Return the default float type, as a string.
E.g. `'bfloat16'`, `'float16'`, `'float32'`, `'float64'`.
Returns:
String, the current default float type.
Example:
>>> keras.config.floatx()
'float32'
|
github-repos
|
def sparse_grid(func, order, dim=None, skew=None):
if (not isinstance(order, int)):
orders = numpy.array(order).flatten()
dim = orders.size
m_order = int(numpy.min(orders))
skew = [(order - m_order) for order in orders]
return sparse_grid(func, m_order, dim, skew)
(abscissas, weights) = ([], [])
bindex = chaospy.bertran.bindex(((order - dim) + 1), order, dim)
if (skew is None):
skew = numpy.zeros(dim, dtype=int)
else:
skew = numpy.array(skew, dtype=int)
assert (len(skew) == dim)
for idx in range((chaospy.bertran.terms(order, dim) - chaospy.bertran.terms((order - dim), dim))):
idb = bindex[idx]
(abscissa, weight) = func((skew + idb))
weight *= (((- 1) ** (order - sum(idb))) * comb((dim - 1), (order - sum(idb))))
abscissas.append(abscissa)
weights.append(weight)
abscissas = numpy.concatenate(abscissas, 1)
weights = numpy.concatenate(weights, 0)
abscissas = numpy.around(abscissas, 15)
order = numpy.lexsort(tuple(abscissas))
abscissas = abscissas.T[order].T
weights = weights[order]
diff = numpy.diff(abscissas.T, axis=0)
unique = numpy.ones(len(abscissas.T), bool)
unique[1:] = (diff != 0).any(axis=1)
length = len(weights)
idx = 1
while (idx < length):
while ((idx < length) and unique[idx]):
idx += 1
idy = (idx + 1)
while ((idy < length) and (not unique[idy])):
idy += 1
if ((idy - idx) > 1):
weights[(idx - 1)] = numpy.sum(weights[(idx - 1):idy])
idx = (idy + 1)
abscissas = abscissas[(:, unique)]
weights = weights[unique]
return (abscissas, weights)
|
Smolyak sparse grid constructor.
Args:
func (:py:data:typing.Callable):
Function that takes a single argument ``order`` of type
``numpy.ndarray`` and with ``order.shape = (dim,)``
order (int, numpy.ndarray):
The order of the grid. If ``numpy.ndarray``, it overrides both
``dim`` and ``skew``.
dim (int):
Number of dimension.
skew (list):
Order skewness.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.