code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def _load_schema_for_record(data, schema=None):
if schema is None:
if '$schema' not in data:
raise SchemaKeyNotFound(data=data)
schema = data['$schema']
if isinstance(schema, six.string_types):
schema = load_schema(schema_name=schema)
return schema
|
Load the schema from a given record.
Args:
data (dict): record data.
schema (Union[dict, str]): schema to validate against.
Returns:
dict: the loaded schema.
Raises:
SchemaNotFound: if the given schema was not found.
SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
found in ``data``.
jsonschema.SchemaError: if the schema is invalid.
|
juraj-google-style
|
def model_from_config(config, custom_objects=None):
if isinstance(config, list):
raise TypeError('`model_from_config` expects a dictionary, not a list. Maybe you meant to use `Sequential.from_config(config)`?')
from tensorflow.python.keras.layers import deserialize
return deserialize(config, custom_objects=custom_objects)
|
Instantiates a Keras model from its config.
Usage:
```
# for a Functional API model
tf.keras.Model().from_config(model.get_config())
# for a Sequential model
tf.keras.Sequential().from_config(model.get_config())
```
Args:
config: Configuration dictionary.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
Raises:
TypeError: if `config` is not a dictionary.
|
github-repos
|
def _get_showcase_dataset_dict(self, dataset):
if (isinstance(dataset, hdx.data.dataset.Dataset) or isinstance(dataset, dict)):
if ('id' not in dataset):
dataset = hdx.data.dataset.Dataset.read_from_hdx(dataset['name'])
dataset = dataset['id']
elif (not isinstance(dataset, str)):
raise hdx.data.hdxobject.HDXError(('Type %s cannot be added as a dataset!' % type(dataset).__name__))
if (is_valid_uuid(dataset) is False):
raise hdx.data.hdxobject.HDXError(('%s is not a valid dataset id!' % dataset))
return {'showcase_id': self.data['id'], 'package_id': dataset}
|
Get showcase dataset dict
Args:
showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary
Returns:
Dict: showcase dataset dict
|
codesearchnet
|
def __init__(self, class_to_mock):
MockAnything.__dict__['__init__'](self)
self._known_methods = set()
self._known_vars = set()
self._class_to_mock = class_to_mock
for method in dir(class_to_mock):
if callable(getattr(class_to_mock, method)):
self._known_methods.add(method)
else:
self._known_vars.add(method)
|
Initialize a mock object.
This determines the methods and properties of the class and stores them.
Args:
# class_to_mock: class to be mocked
class_to_mock: class
|
juraj-google-style
|
def forge_relationship(self, left_id, left_type, right_id, right_type, rel_type='Related To', rel_date=None, rel_confidence='high', rel_reason=''):
if (not rel_date):
rel_date = datetime.datetime.now()
type_trans = self._type_translation(left_type)
submit_url = '{}/{}/{}/'.format(self.url, type_trans, left_id)
params = {'api_key': self.api_key, 'username': self.username}
data = {'action': 'forge_relationship', 'right_type': right_type, 'right_id': right_id, 'rel_type': rel_type, 'rel_date': rel_date, 'rel_confidence': rel_confidence, 'rel_reason': rel_reason}
r = requests.patch(submit_url, params=params, data=data, proxies=self.proxies, verify=self.verify)
if (r.status_code == 200):
log.debug('Relationship built successfully: {0} <-> {1}'.format(left_id, right_id))
return True
else:
log.error('Error with status code {0} and message {1} between these indicators: {2} <-> {3}'.format(r.status_code, r.text, left_id, right_id))
return False
|
Forges a relationship between two TLOs.
Args:
left_id: The CRITs ID of the first indicator
left_type: The CRITs TLO type of the first indicator
right_id: The CRITs ID of the second indicator
right_type: The CRITs TLO type of the second indicator
rel_type: The relationships type ("Related To", etc)
rel_date: datetime.datetime object for the date of the
relationship. If left blank, it will be datetime.datetime.now()
rel_confidence: The relationship confidence (high, medium, low)
rel_reason: Reason for the relationship.
Returns:
True if the relationship was created. False otherwise.
|
codesearchnet
|
def _OpenFileObject(self, path_spec):
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
file_object = resolver.Resolver.OpenFileObject(
path_spec.parent, resolver_context=self._resolver_context)
vhdi_file = pyvhdi.file()
vhdi_file.open_file_object(file_object)
if vhdi_file.parent_identifier:
file_system = resolver.Resolver.OpenFileSystem(
path_spec.parent, resolver_context=self._resolver_context)
try:
self._OpenParentFile(file_system, path_spec.parent, vhdi_file)
finally:
file_system.Close()
self._sub_file_objects.append(file_object)
self._parent_vhdi_files.reverse()
self._sub_file_objects.reverse()
return vhdi_file
|
Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyvhdi.file: a file-like object.
Raises:
PathSpecError: if the path specification is incorrect.
|
juraj-google-style
|
def extract_cluster(self, target_sites, **kwargs):
cluster = list(target_sites)
others = [site for site in self if (site not in cluster)]
size = 0
while (len(cluster) > size):
size = len(cluster)
new_others = []
for site in others:
for site2 in cluster:
if CovalentBond.is_bonded(site, site2, **kwargs):
cluster.append(site)
break
else:
new_others.append(site)
others = new_others
return cluster
|
Extracts a cluster of atoms based on bond lengths
Args:
target_sites ([Site]): List of initial sites to nucleate cluster.
\\*\\*kwargs: kwargs passed through to CovalentBond.is_bonded.
Returns:
[Site/PeriodicSite] Cluster of atoms.
|
codesearchnet
|
def match_pattern(self, pat, word):
segs = self.word_fts(word)
if (len(pat) != len(segs)):
return None
elif all([(set(p) <= s) for (p, s) in zip(pat, segs)]):
return segs
|
Implements fixed-width pattern matching.
Matches just in case pattern is the same length (in segments) as the
word and each of the segments in the pattern is a featural subset of the
corresponding segment in the word. Matches return the corresponding list
of feature sets; failed matches return None.
Args:
pat (list): pattern consisting of a sequence of sets of (value,
feature) tuples
word (unicode): a Unicode IPA string consisting of zero or more
segments
Returns:
list: corresponding list of feature sets or, if there is no match,
None
|
codesearchnet
|
def search(self, scope, search, **kwargs):
data = {'scope': scope, 'search': search}
return self.http_list('/search', query_data=data, **kwargs)
|
Search GitLab resources matching the provided string.'
Args:
scope (str): Scope of the search
search (str): Search string
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabSearchError: If the server failed to perform the request
Returns:
GitlabList: A list of dicts describing the resources found.
|
juraj-google-style
|
def dumps(ms, single=False, pretty_print=False, **kwargs):
if single:
ms = [ms]
return serialize(ms, pretty_print=pretty_print, **kwargs)
|
Serialize an Xmrs object to the Prolog representation
Args:
ms: an iterator of Xmrs objects to serialize (unless the
*single* option is `True`)
single: if `True`, treat *ms* as a single Xmrs object instead
of as an iterator
pretty_print: if `True`, add newlines and indentation
Returns:
the Prolog string representation of a corpus of Xmrs
|
juraj-google-style
|
def unbind(self, devices_to_unbind):
if (self.entity_api_key == ''):
return {'status': 'failure', 'response': 'No API key found in request'}
url = (self.base_url + 'api/0.1.0/subscribe/unbind')
headers = {'apikey': self.entity_api_key}
data = {'exchange': 'amq.topic', 'keys': devices_to_unbind, 'queue': self.entity_id}
with self.no_ssl_verification():
r = requests.delete(url, json=data, headers=headers)
print(r)
response = dict()
if ('No API key' in str(r.content.decode('utf-8'))):
response['status'] = 'failure'
r = json.loads(r.content.decode('utf-8'))['message']
elif ('unbind' in str(r.content.decode('utf-8'))):
response['status'] = 'success'
r = r.content.decode('utf-8')
else:
response['status'] = 'failure'
r = r.content.decode('utf-8')
response['response'] = str(r)
return response
|
This function allows an entity to unbound devices that are already bound.
Args:
devices_to_unbind (list): an array of devices that are to be unbound ( stop listening)
Example unbind(["test10","testDemo105"])
|
codesearchnet
|
def main_op():
init = variables.global_variables_initializer()
init_local = variables.local_variables_initializer()
init_tables = lookup_ops.tables_initializer()
return control_flow_ops.group(init, init_local, init_tables)
|
Returns a main op to init variables and tables.
Returns the main op including the group of ops that initializes all
variables, initializes local variables and initialize all tables.
Returns:
The set of ops to be run as part of the main op upon the load operation.
|
github-repos
|
def _get_label_encoder_and_max(self, x):
label_count = x.fillna(NAN_INT).value_counts()
n_uniq = label_count.shape[0]
label_count = label_count[label_count >= self.min_obs]
n_uniq_new = label_count.shape[0]
offset = 0 if n_uniq == n_uniq_new else 1
label_encoder = pd.Series(np.arange(n_uniq_new) + offset, index=label_count.index)
max_label = label_encoder.max()
label_encoder = label_encoder.to_dict()
return label_encoder, max_label
|
Return a mapping from values and its maximum of a column to integer labels.
Args:
x (pandas.Series): a categorical column to encode.
Returns:
label_encoder (dict): mapping from values of features to integers
max_label (int): maximum label
|
juraj-google-style
|
def get_actual_replica(self, service_id: str) -> str:
if (not self._manager):
raise RuntimeError('Only the Swarm manager node can retrieve replication level of the service')
service_details = self.get_service_details(service_id)
actual_replica = service_details['Spec']['Mode']['Replicated']['Replicas']
return actual_replica
|
Get the actual replica level of a service.
Args:
service_id (str): docker swarm service id
Returns:
str, replicated level of the service
|
codesearchnet
|
def paginate_resources(cls, request, resources, on_fail_status):
if (not resources):
return (resources, client_list_control_pb2.ClientPagingResponse())
paging = request.paging
limit = (min(paging.limit, MAX_PAGE_SIZE) or DEFAULT_PAGE_SIZE)
try:
if paging.start:
start_index = cls.index_by_id(paging.start, resources)
else:
start_index = 0
if ((start_index < 0) or (start_index >= len(resources))):
raise AssertionError
except AssertionError:
raise _ResponseFailed(on_fail_status)
paged_resources = resources[start_index:(start_index + limit)]
if ((start_index + limit) < len(resources)):
paging_response = client_list_control_pb2.ClientPagingResponse(next=cls.id_by_index((start_index + limit), resources), start=cls.id_by_index(start_index, resources), limit=limit)
else:
paging_response = client_list_control_pb2.ClientPagingResponse(start=cls.id_by_index(start_index, resources), limit=limit)
return (paged_resources, paging_response)
|
Truncates a list of resources based on ClientPagingControls
Args:
request (object): The parsed protobuf request object
resources (list of objects): The resources to be paginated
Returns:
list: The paginated list of resources
object: The ClientPagingResponse to be sent back to the client
|
codesearchnet
|
def add(self, other):
if not isinstance(other, SuperOp):
other = SuperOp(other)
if self.dim != other.dim:
raise QiskitError("other QuantumChannel dimensions are not equal")
return SuperOp(self._data + other.data, self.input_dims(),
self.output_dims())
|
Return the QuantumChannel self + other.
Args:
other (QuantumChannel): a quantum channel.
Returns:
SuperOp: the linear addition self + other as a SuperOp object.
Raises:
QiskitError: if other cannot be converted to a channel or
has incompatible dimensions.
|
juraj-google-style
|
def graph_structure(self, x, standalone=True):
if standalone:
x = tf.concat(tf.split(x, 2, axis=0), axis=1)
with argscope([tf.layers.conv2d], activation=lambda x: tf.nn.leaky_relu(x, 0.1),
padding='valid', strides=2, kernel_size=3,
data_format='channels_first'), \
argscope([tf.layers.conv2d_transpose], padding='same', activation=tf.identity,
data_format='channels_first', strides=2, kernel_size=4):
x = tf.layers.conv2d(pad(x, 3), 64, kernel_size=7, name='conv1')
conv2 = tf.layers.conv2d(pad(x, 2), 128, kernel_size=5, name='conv2')
x = tf.layers.conv2d(pad(conv2, 2), 256, kernel_size=5, name='conv3')
conv3 = tf.layers.conv2d(pad(x, 1), 256, name='conv3_1', strides=1)
x = tf.layers.conv2d(pad(conv3, 1), 512, name='conv4')
conv4 = tf.layers.conv2d(pad(x, 1), 512, name='conv4_1', strides=1)
x = tf.layers.conv2d(pad(conv4, 1), 512, name='conv5')
conv5 = tf.layers.conv2d(pad(x, 1), 512, name='conv5_1', strides=1)
x = tf.layers.conv2d(pad(conv5, 1), 1024, name='conv6')
conv6 = tf.layers.conv2d(pad(x, 1), 1024, name='conv6_1', strides=1)
flow6 = tf.layers.conv2d(pad(conv6, 1), 2, name='predict_flow6', strides=1, activation=tf.identity)
flow6_up = tf.layers.conv2d_transpose(flow6, 2, name='upsampled_flow6_to_5', use_bias=False)
x = tf.layers.conv2d_transpose(conv6, 512, name='deconv5', activation=lambda x: tf.nn.leaky_relu(x, 0.1))
concat5 = tf.concat([conv5, x, flow6_up], axis=1, name='concat5')
flow5 = tf.layers.conv2d(pad(concat5, 1), 2, name='predict_flow5', strides=1, activation=tf.identity)
flow5_up = tf.layers.conv2d_transpose(flow5, 2, name='upsampled_flow5_to_4', use_bias=False)
x = tf.layers.conv2d_transpose(concat5, 256, name='deconv4', activation=lambda x: tf.nn.leaky_relu(x, 0.1))
concat4 = tf.concat([conv4, x, flow5_up], axis=1, name='concat4')
flow4 = tf.layers.conv2d(pad(concat4, 1), 2, name='predict_flow4', strides=1, activation=tf.identity)
flow4_up = tf.layers.conv2d_transpose(flow4, 2, name='upsampled_flow4_to_3', use_bias=False)
x = tf.layers.conv2d_transpose(concat4, 128, name='deconv3', activation=lambda x: tf.nn.leaky_relu(x, 0.1))
concat3 = tf.concat([conv3, x, flow4_up], axis=1, name='concat3')
flow3 = tf.layers.conv2d(pad(concat3, 1), 2, name='predict_flow3', strides=1, activation=tf.identity)
flow3_up = tf.layers.conv2d_transpose(flow3, 2, name='upsampled_flow3_to_2', use_bias=False)
x = tf.layers.conv2d_transpose(concat3, 64, name='deconv2', activation=lambda x: tf.nn.leaky_relu(x, 0.1))
concat2 = tf.concat([conv2, x, flow3_up], axis=1, name='concat2')
flow2 = tf.layers.conv2d(pad(concat2, 1), 2, name='predict_flow2', strides=1, activation=tf.identity)
return tf.identity(flow2, name='flow2')
|
Architecture of FlowNetSimple in Figure 2 of FlowNet 1.0.
Args:
x: 2CHW if standalone==True, else NCHW where C=12 is a concatenation
of 5 tensors of [3, 3, 3, 2, 1] channels.
standalone: If True, this model is used to predict flow from two inputs.
If False, this model is used as part of the FlowNet2.
|
juraj-google-style
|
def get_densities(self, spin=None):
if (self.densities is None):
result = None
elif (spin is None):
if (Spin.down in self.densities):
result = (self.densities[Spin.up] + self.densities[Spin.down])
else:
result = self.densities[Spin.up]
else:
result = self.densities[spin]
return result
|
Returns the density of states for a particular spin.
Args:
spin: Spin
Returns:
Returns the density of states for a particular spin. If Spin is
None, the sum of all spins is returned.
|
codesearchnet
|
def unpack(self, buff=None, offset=0):
length = UBInt16()
length.unpack(buff, offset)
length.unpack(buff, offset=offset+MeterStats.meter_id.get_size())
super().unpack(buff[:offset+length.value], offset=offset)
|
Unpack *buff* into this object.
This method will convert a binary data into a readable value according
to the attribute format.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails.
|
juraj-google-style
|
def GetEntries(self, parser_mediator, match=None, **unused_kwargs):
format_version = match.get('WebHistoryFileVersion', None)
if format_version != 1:
parser_mediator.ProduceExtractionWarning(
'unsupported Safari history version: {0!s}'.format(format_version))
return
if 'WebHistoryDates' not in match:
return
for history_entry in match.get('WebHistoryDates', {}):
last_visited_date = history_entry.get('lastVisitedDate', None)
if last_visited_date is None:
parser_mediator.ProduceExtractionWarning('missing last visited date')
continue
try:
timestamp = float(last_visited_date)
except (TypeError, ValueError):
parser_mediator.ProduceExtractionWarning(
'unable to convert last visited date {0:s}'.format(
last_visited_date))
continue
display_title = history_entry.get('displayTitle', None)
event_data = SafariHistoryEventData()
if display_title != event_data.title:
event_data.display_title = display_title
event_data.title = history_entry.get('title', None)
event_data.url = history_entry.get('', None)
event_data.visit_count = history_entry.get('visitCount', None)
event_data.was_http_non_get = history_entry.get(
'lastVisitWasHTTPNonGet', None)
timestamp = int(timestamp)
date_time = dfdatetime_cocoa_time.CocoaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Extracts Safari history items.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
|
juraj-google-style
|
def add_comments(self, comments):
for comment in comments:
if ((comment not in self.comments) and (len(comment) > 0)):
self.comments.append(comment)
if (len(self.comments[0]) == 0):
self.comments.pop(0)
|
Add comments to the localization entry
Args:
comments (list of str): The comments to be added to the localization entry.
|
codesearchnet
|
def list(self, *args, **kwargs):
return [
self.prepare_model(n)
for n in self.client.api.nodes(*args, **kwargs)
]
|
List swarm nodes.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name``, ``membership`` and ``role``.
Default: ``None``
Returns:
A list of :py:class:`Node` objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.nodes.list(filters={'role': 'manager'})
|
juraj-google-style
|
def eigvals(self, name='eigvals'):
if not self.is_self_adjoint:
raise NotImplementedError('Only self-adjoint matrices are supported.')
with self._name_scope(name):
return self._eigvals()
|
Returns the eigenvalues of this linear operator.
If the operator is marked as self-adjoint (via `is_self_adjoint`)
this computation can be more efficient.
Note: This currently only supports self-adjoint operators.
Args:
name: A name for this `Op`.
Returns:
Shape `[B1,...,Bb, N]` `Tensor` of same `dtype` as `self`.
|
github-repos
|
def edit_distance_1(self, word):
word = word.lower()
if (self._check_if_should_check(word) is False):
return {word}
letters = self._word_frequency.letters
splits = [(word[:i], word[i:]) for i in range((len(word) + 1))]
deletes = [(L + R[1:]) for (L, R) in splits if R]
transposes = [(((L + R[1]) + R[0]) + R[2:]) for (L, R) in splits if (len(R) > 1)]
replaces = [((L + c) + R[1:]) for (L, R) in splits if R for c in letters]
inserts = [((L + c) + R) for (L, R) in splits for c in letters]
return set((((deletes + transposes) + replaces) + inserts))
|
Compute all strings that are one edit away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance one from the \
provided word
|
codesearchnet
|
def delete(self, dry=False, meta=None, index_fields=None):
from datetime import datetime
if (not dry):
self.pre_delete()
(results, errors) = self._delete_relations(dry)
if (not (dry or errors)):
self.deleted = True
self.deleted_at = datetime.now()
self.save(internal=True, meta=meta, index_fields=index_fields)
self.post_delete()
if settings.ENABLE_CACHING:
cache.delete(self.key)
return (results, errors)
|
Sets the objects "deleted" field to True and,
current time to "deleted_at" fields then saves it to DB.
Args:
dry (bool): False. Do not execute the actual deletion.
Just list what will be deleted as a result of relations.
meta (dict): JSON serializable meta data for logging of save operation.
{'lorem': 'ipsum', 'dolar': 5}
index_fields (list): Tuple list for secondary indexing keys in riak (with 'bin' or 'int').
bin is used for string fields, int is used for integer fields.
[('lorem','bin'),('dolar','int')]
Returns:
Tuple. (results [], errors [])
|
codesearchnet
|
def _del_conversation(self, conversation_key: str) -> None:
if (conversation_key in self.conversations.keys()):
del self.conversations[conversation_key]
log.info(f'Deleted conversation, key: {conversation_key}')
|
Deletes Conversation instance.
Args:
conversation_key: Conversation key.
|
codesearchnet
|
def _get_parameter_conversion_entry(parameter_config):
entry = _PARAM_CONVERSION_MAP.get(parameter_config.get('type'))
if entry is None and 'enum' in parameter_config:
entry = _PARAM_CONVERSION_MAP['enum']
return entry
|
Get information needed to convert the given parameter to its API type.
Args:
parameter_config: The dictionary containing information specific to the
parameter in question. This is retrieved from request.parameters in the
method config.
Returns:
The entry from _PARAM_CONVERSION_MAP with functions/information needed to
validate and convert the given parameter from a string to the type expected
by the API.
|
juraj-google-style
|
def most_specific_common_supertype(self, others):
try:
for other in others:
self.sanity_check_type(other)
nest.assert_same_structure(self._element_spec, other._element_spec)
except (TypeError, ValueError):
return None
self_elements = nest.flatten(self._element_spec)
others_elements = [nest.flatten(other._element_spec) for other in others]
common_elements = [None] * len(self_elements)
for i, self_element in enumerate(self_elements):
common_elements[i] = self_element.most_specific_common_supertype([other_elements[i] for other_elements in others_elements])
if common_elements[i] is None:
return None
common_element_spec = nest.pack_sequence_as(self._element_spec, common_elements)
return type(self)(self._input_workers, common_element_spec, self._strategy, self._options, cardinality=self._cardinality, enable_get_next_as_optional=self._enable_get_next_as_optional)
|
Returns the most specific supertype of `self` and `others`.
Args:
others: A Sequence of `TypeSpec`.
Returns `None` if a supertype does not exist.
|
github-repos
|
def load_from_file(self, filename=None, *, strict=True):
self.set_to_default()
if filename:
self._update_from_file(filename)
elif (LIGHTFLOW_CONFIG_ENV not in os.environ):
if os.path.isfile(os.path.join(os.getcwd(), LIGHTFLOW_CONFIG_NAME)):
self._update_from_file(os.path.join(os.getcwd(), LIGHTFLOW_CONFIG_NAME))
elif os.path.isfile(expand_env_var('~/{}'.format(LIGHTFLOW_CONFIG_NAME))):
self._update_from_file(expand_env_var('~/{}'.format(LIGHTFLOW_CONFIG_NAME)))
elif strict:
raise ConfigLoadError('Could not find the configuration file.')
else:
self._update_from_file(expand_env_var(os.environ[LIGHTFLOW_CONFIG_ENV]))
self._update_python_paths()
|
Load the configuration from a file.
The location of the configuration file can either be specified directly in the
parameter filename or is searched for in the following order:
1. In the environment variable given by LIGHTFLOW_CONFIG_ENV
2. In the current execution directory
3. In the user's home directory
Args:
filename (str): The location and name of the configuration file.
strict (bool): If true raises a ConfigLoadError when the configuration
cannot be found.
Raises:
ConfigLoadError: If the configuration cannot be found.
|
codesearchnet
|
def execute(self, action):
if self.env.game_over():
return self.env.getScreenRGB(), True, 0
action_space = self.env.getActionSet()
reward = self.env.act(action_space[action])
new_state = self.env.getScreenRGB()
done = self.env.game_over()
return new_state, done, reward
|
Executes action, observes next state and reward.
Args:
actions: Action to execute.
Returns:
(Dict of) next state(s), boolean indicating terminal, and reward signal.
|
juraj-google-style
|
def _load_data(self, data, from_db=False):
self._data = data[:]
self.setattrs(
values=[],
node_stack=[],
node_dict={},
)
self._from_db = from_db
|
Stores the data at self._data, actual object creation done at _generate_instances()
Args:
data (list): List of dicts.
from_db (bool): Default False. Is this data coming from DB or not.
|
juraj-google-style
|
def concurrent_exec(func, param_list, max_workers=30, raise_on_exception=False):
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
future_to_params = {executor.submit(func, *p): p for p in param_list}
return_vals = []
exceptions = []
for future in concurrent.futures.as_completed(future_to_params):
params = future_to_params[future]
try:
return_vals.append(future.result())
except Exception as exc:
logging.exception('%s generated an exception: %s', params, traceback.format_exc())
return_vals.append(exc)
exceptions.append(exc)
if raise_on_exception and exceptions:
error_messages = []
for exception in exceptions:
error_messages.append(''.join(traceback.format_exception(exception.__class__, exception, exception.__traceback__)))
raise RuntimeError('\n\n'.join(error_messages))
return return_vals
|
Executes a function with different parameters pseudo-concurrently.
This is basically a map function. Each element (should be an iterable) in
the param_list is unpacked and passed into the function. Due to Python's
GIL, there's no true concurrency. This is suited for IO-bound tasks.
Args:
func: The function that performs a task.
param_list: A list of iterables, each being a set of params to be
passed into the function.
max_workers: int, the number of workers to use for parallelizing the
tasks. By default, this is 30 workers.
raise_on_exception: bool, raises all of the task failures if any of the
tasks failed if `True`. By default, this is `False`.
Returns:
A list of return values from each function execution. If an execution
caused an exception, the exception object will be the corresponding
result.
Raises:
RuntimeError: If executing any of the tasks failed and
`raise_on_exception` is True.
|
github-repos
|
def set_error_message(self, error_message):
self._empty = False
self.error_message = error_message
|
Sets an error message on an instrumentation block.
This method is used exclusively to indicate that a test method failed
to complete, which is usually cause by a crash of some sort such that
the test method is marked as error instead of ignored.
Args:
error_message: string, an error message to be added to the
TestResultRecord to explain that something wrong happened.
|
github-repos
|
def parse_cgmlst_alleles(cgmlst_fasta):
out = defaultdict(list)
for header, seq in parse_fasta(cgmlst_fasta):
if not '|' in header:
raise Exception('Unexpected format for cgMLST fasta file header. No "|" (pipe) delimiter present! Header="{}"'.format(header))
marker_name, allele_name = header.split('|')
out[marker_name].append(seq)
return out
|
Parse cgMLST alleles from fasta file
cgMLST FASTA file must have a header format of ">{marker name}|{allele name}"
Args:
cgmlst_fasta (str): cgMLST fasta file path
Returns:
dict of list: Marker name to list of allele sequences
|
juraj-google-style
|
def on(self, evnt, func, base=None):
funcs = self._syn_funcs[evnt]
if (func in funcs):
return
funcs.append(func)
if (base is not None):
def fini():
self.off(evnt, func)
base.onfini(fini)
|
Add an base function callback for a specific event with optional filtering. If the function returns a
coroutine, it will be awaited.
Args:
evnt (str): An event name
func (function): A callback function to receive event tufo
Examples:
Add a callback function and fire it:
async def baz(event):
x = event[1].get('x')
y = event[1].get('y')
return x + y
d.on('foo', baz)
# this fire triggers baz...
await d.fire('foo', x=10, y=20)
Returns:
None:
|
codesearchnet
|
def bbox_rot90(bbox, factor, rows, cols):
if factor < 0 or factor > 3:
raise ValueError('Parameter n must be in range [0;3]')
x_min, y_min, x_max, y_max = bbox
if factor == 1:
bbox = [y_min, 1 - x_max, y_max, 1 - x_min]
if factor == 2:
bbox = [1 - x_max, 1 - y_max, 1 - x_min, 1 - y_min]
if factor == 3:
bbox = [1 - y_max, x_min, 1 - y_min, x_max]
return bbox
|
Rotates a bounding box by 90 degrees CCW (see np.rot90)
Args:
bbox (tuple): A tuple (x_min, y_min, x_max, y_max).
factor (int): Number of CCW rotations. Must be in range [0;3] See np.rot90.
rows (int): Image rows.
cols (int): Image cols.
|
juraj-google-style
|
def fetch(url: str, **kwargs) -> Selector:
kwargs.setdefault('headers', DEFAULT_HEADERS)
try:
res = requests.get(url, **kwargs)
res.raise_for_status()
except requests.RequestException as e:
print(e)
else:
html = res.text
tree = Selector(text=html)
return tree
|
Send HTTP request and parse it as a DOM tree.
Args:
url (str): The url of the site.
Returns:
Selector: allows you to select parts of HTML text using CSS or XPath expressions.
|
juraj-google-style
|
def CreateStorageWriterForFile(cls, session, path):
if sqlite_file.SQLiteStorageFile.CheckSupportedFormat(path):
return sqlite_writer.SQLiteStorageFileWriter(session, path)
return None
|
Creates a storage writer based on the file.
Args:
session (Session): session the storage changes are part of.
path (str): path to the storage file.
Returns:
StorageWriter: a storage writer or None if the storage file cannot be
opened or the storage format is not supported.
|
juraj-google-style
|
def save(self, filename, image_format="eps", width=8, height=6):
self.get_plot(width, height).savefig(filename, format=image_format)
|
Save the plot to an image file.
Args:
filename: Filename to save to.
image_format: Format to save to. Defaults to eps.
|
juraj-google-style
|
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1 is not None:
output += token_ids_1 + [self.sep_token_id]
return output
|
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A ELECTRA sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
github-repos
|
def setValue(self, value):
if value >= self.minimum() and value <= self.maximum():
self._lineEdit.setText(str(value))
elif value < self.minimum():
self._lineEdit.setText(str(self.minimum()))
elif value > self.maximum():
self._lineEdit.setText(str(self.maximum()))
return True
|
setter function to _lineEdit.text. Sets minimum/maximum as new value if value is out of bounds.
Args:
value (int/long): new value to set.
Returns
True if all went fine.
|
juraj-google-style
|
def read_vocab(args, column_name):
vocab_path = os.path.join(args.analysis,
feature_transforms.VOCAB_ANALYSIS_FILE % column_name)
if not file_io.file_exists(vocab_path):
return []
vocab, _ = feature_transforms.read_vocab_file(vocab_path)
return vocab
|
Reads a vocab file if it exists.
Args:
args: command line flags
column_name: name of column to that has a vocab file.
Returns:
List of vocab words or [] if the vocab file is not found.
|
juraj-google-style
|
def get_connectable_volume_templates(self, start=0, count=(- 1), filter='', query='', sort=''):
uri = (self.URI + '/connectable-volume-templates')
get_uri = self._client.build_query_uri(start=start, count=count, filter=filter, query=query, sort=sort, uri=uri)
return self._client.get(get_uri)
|
Gets the storage volume templates that are available on the specified networks based on the storage system
port's expected network connectivity. If there are no storage volume templates that meet the specified
connectivity criteria, an empty collection will be returned.
Returns:
list: Storage volume templates.
|
codesearchnet
|
def get_changes_since(self, timestamp: str) -> Dict[str, List]:
rg = []
cg = []
ra = []
ca = []
layers = []
if self.last_modified() > timestamp:
if self.row_graphs.last_modified() > timestamp:
for name in self.row_graphs.keys():
if self.row_graphs.last_modified(name) > timestamp:
rg.append(name)
if self.col_graphs.last_modified() > timestamp:
for name in self.col_graphs.keys():
if self.col_graphs.last_modified(name) > timestamp:
cg.append(name)
if self.ra.last_modified() > timestamp:
for name in self.ra.keys():
if self.ra.last_modified(name) > timestamp:
ra.append(name)
if self.ca.last_modified() > timestamp:
for name in self.ca.keys():
if self.ca.last_modified(name) > timestamp:
ca.append(name)
if self.layers.last_modified() > timestamp:
for name in self.layers.keys():
if self.layers.last_modified(name) > timestamp:
layers.append(name)
return {"row_graphs": rg, "col_graphs": cg, "row_attrs": ra, "col_attrs": ca, "layers": layers}
|
Get a summary of the parts of the file that changed since the given time
Args:
timestamp: ISO8601 timestamp
Return:
dict: Dictionary like ``{"row_graphs": rg, "col_graphs": cg, "row_attrs": ra, "col_attrs": ca, "layers": layers}`` listing the names of objects that were modified since the given time
|
juraj-google-style
|
def MakeSuiteFromHist(hist, name=None):
if name is None:
name = hist.name
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, name)
|
Makes a normalized suite from a Hist object.
Args:
hist: Hist object
name: string name
Returns:
Suite object
|
juraj-google-style
|
def installed(name, source):
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if (not name):
raise SaltInvocationError('Must specify a KB "name"')
if (not source):
raise SaltInvocationError('Must specify a "source" file to install')
if __salt__['wusa.is_installed'](name):
ret['result'] = True
ret['comment'] = '{0} already installed'.format(name)
return ret
if (__opts__['test'] is True):
ret['result'] = None
ret['comment'] = '{0} would be installed'.format(name)
ret['result'] = None
return ret
cached_source_path = __salt__['cp.cache_file'](path=source, saltenv=__env__)
if (not cached_source_path):
msg = 'Unable to cache {0} from saltenv "{1}"'.format(salt.utils.url.redact_http_basic_auth(source), __env__)
ret['comment'] = msg
return ret
__salt__['wusa.install'](cached_source_path)
if __salt__['wusa.is_installed'](name):
ret['comment'] = '{0} was installed'.format(name)
ret['changes'] = {'old': False, 'new': True}
ret['result'] = True
else:
ret['comment'] = '{0} failed to install'.format(name)
return ret
|
Ensure an update is installed on the minion
Args:
name(str):
Name of the Windows KB ("KB123456")
source (str):
Source of .msu file corresponding to the KB
Example:
.. code-block:: yaml
KB123456:
wusa.installed:
- source: salt://kb123456.msu
|
codesearchnet
|
def _split_ir_into_match_steps(pruned_ir_blocks):
output = []
current_tuple = None
for block in pruned_ir_blocks:
if isinstance(block, OutputSource):
continue
elif isinstance(block, root_block_types):
if current_tuple is not None:
output.append(current_tuple)
current_tuple = (block,)
elif isinstance(block, (CoerceType, Filter, MarkLocation)):
current_tuple += (block,)
else:
raise AssertionError(u'Unexpected block type when converting to MATCH query: '
u'{} {}'.format(block, pruned_ir_blocks))
if current_tuple is None:
raise AssertionError(u'current_tuple was unexpectedly None: {}'.format(pruned_ir_blocks))
output.append(current_tuple)
return [_per_location_tuple_to_step(x) for x in output]
|
Split a list of IR blocks into per-location MATCH steps.
Args:
pruned_ir_blocks: list of IR basic block objects that have gone through a lowering step.
Returns:
list of MatchStep namedtuples, each of which contains all basic blocks that correspond
to a single MATCH step.
|
juraj-google-style
|
def _token_to_subtoken_ids(self, token):
cache_location = hash(token) % self._cache_size
cache_key, cache_value = self._cache[cache_location]
if cache_key == token:
return cache_value
ret = self._escaped_token_to_subtoken_ids(
_escape_token(token, self._alphabet))
self._cache[cache_location] = (token, ret)
return ret
|
Converts token to a list of subtoken ids.
Args:
token: a string.
Returns:
a list of integers in the range [0, vocab_size)
|
juraj-google-style
|
def add_defaults_to_kwargs(defaults, **kwargs):
defaults = dict(defaults)
defaults.update(kwargs)
return defaults
|
Updates `kwargs` with dict of `defaults`
Args:
defaults: A dictionary of keys and values
**kwargs: The kwargs to update.
Returns:
The updated kwargs.
|
codesearchnet
|
def _create_uninitialized_mirrored_tpu_variables(**kwargs):
if kwargs.get('initial_value', None) is None:
return _create_mirrored_tpu_variables(**kwargs)
value_list = []
initial_value = None
for i, d in enumerate(devices):
with ops.device(d):
if i == 0:
initial_value = kwargs.get('initial_value', None)
with maybe_init_scope():
if initial_value is not None:
if callable(initial_value):
initial_value = initial_value()
initial_value = ops.convert_to_tensor(initial_value, dtype=kwargs.get('dtype', None))
if i > 0:
var0name = value_list[0].name.split(':')[0]
kwargs['name'] = '%s/replica_%d/' % (var0name, i)
kwargs['initial_value'] = initial_value
if kwargs.get('dtype', None) is None:
kwargs['dtype'] = kwargs['initial_value'].dtype
if kwargs.get('shape', None) is None:
kwargs['shape'] = kwargs['initial_value'].shape
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
v = uninitialized_variable_creator(**kwargs)
assert not isinstance(v, tpu_values.TPUMirroredVariable)
value_list.append(v)
return value_list
|
Returns a list of `tf.Variable`s.
The list contains `number_replicas` `tf.Variable`s and can be used to
initialize a `TPUMirroredVariable`.
Args:
**kwargs: the keyword arguments for creating a variable
|
github-repos
|
def _run_dnb_normalization(self, dnb_data, sza_data):
dnb_data = xr.DataArray(dnb_data, dims=('y', 'x'))
sza_data = xr.DataArray(sza_data, dims=('y', 'x'))
good_mask = ~(dnb_data.isnull() | sza_data.isnull())
output_dataset = dnb_data.where(good_mask)
output_dataset = output_dataset.values.copy()
dnb_data = dnb_data.values
sza_data = sza_data.values
day_mask, mixed_mask, night_mask = make_day_night_masks(
sza_data,
good_mask.values,
self.high_angle_cutoff,
self.low_angle_cutoff,
stepsDegrees=self.mixed_degree_step)
did_equalize = False
if day_mask.any():
LOG.debug("Histogram equalizing DNB day data...")
histogram_equalization(dnb_data, day_mask, out=output_dataset)
did_equalize = True
if mixed_mask:
for mask in mixed_mask:
if mask.any():
LOG.debug("Histogram equalizing DNB mixed data...")
histogram_equalization(dnb_data, mask, out=output_dataset)
did_equalize = True
if night_mask.any():
LOG.debug("Histogram equalizing DNB night data...")
histogram_equalization(dnb_data, night_mask, out=output_dataset)
did_equalize = True
if not did_equalize:
raise RuntimeError("No valid data found to histogram equalize")
return output_dataset
|
Scale the DNB data using a histogram equalization method.
Args:
dnb_data (ndarray): Day/Night Band data array
sza_data (ndarray): Solar Zenith Angle data array
|
juraj-google-style
|
async def build_task_dependencies(chain, task, name, my_task_id):
log.info("build_task_dependencies {} {}".format(name, my_task_id))
if name.count(':') > chain.context.config['max_chain_length']:
raise CoTError("Too deep recursion!\n{}".format(name))
sorted_dependencies = find_sorted_task_dependencies(task, name, my_task_id)
for task_name, task_id in sorted_dependencies:
if task_id not in chain.dependent_task_ids():
link = LinkOfTrust(chain.context, task_name, task_id)
json_path = link.get_artifact_full_path('task.json')
try:
task_defn = await chain.context.queue.task(task_id)
link.task = task_defn
chain.links.append(link)
makedirs(os.path.dirname(json_path))
with open(json_path, 'w') as fh:
fh.write(format_json(task_defn))
await build_task_dependencies(chain, task_defn, task_name, task_id)
except TaskclusterFailure as exc:
raise CoTError(str(exc))
|
Recursively build the task dependencies of a task.
Args:
chain (ChainOfTrust): the chain of trust to add to.
task (dict): the task definition to operate on.
name (str): the name of the task to operate on.
my_task_id (str): the taskId of the task to operate on.
Raises:
CoTError: on failure.
|
juraj-google-style
|
def guess_peb_size(path):
file_offset = 0
offsets = []
f = open(path, 'rb')
f.seek(0,2)
file_size = f.tell()+1
f.seek(0)
for _ in range(0, file_size, FILE_CHUNK_SZ):
buf = f.read(FILE_CHUNK_SZ)
for m in re.finditer(UBI_EC_HDR_MAGIC, buf):
start = m.start()
if not file_offset:
file_offset = start
idx = start
else:
idx = start+file_offset
offsets.append(idx)
file_offset += FILE_CHUNK_SZ
f.close()
occurances = {}
for i in range(0, len(offsets)):
try:
diff = offsets[i] - offsets[i-1]
except:
diff = offsets[i]
if diff not in occurances:
occurances[diff] = 0
occurances[diff] += 1
most_frequent = 0
block_size = None
for offset in occurances:
if occurances[offset] > most_frequent:
most_frequent = occurances[offset]
block_size = offset
return block_size
|
Determine the most likely block size
Arguments:
Str:path -- Path to file.
Returns:
Int -- PEB size.
Searches file for Magic Number, picks most
common length between them.
|
juraj-google-style
|
def union_update(self, *others):
_elements = self._elements
_total = self._total
for other in map(self._as_mapping, others):
for (element, multiplicity) in other.items():
old_multiplicity = _elements.get(element, 0)
if (multiplicity > old_multiplicity):
_elements[element] = multiplicity
_total += (multiplicity - old_multiplicity)
self._total = _total
|
r"""Update the multiset, adding elements from all others using the maximum multiplicity.
>>> ms = Multiset('aab')
>>> ms.union_update('bc')
>>> sorted(ms)
['a', 'a', 'b', 'c']
You can also use the ``|=`` operator for the same effect. However, the operator version
will only accept a set as other operator, not any iterable, to avoid errors.
>>> ms = Multiset('aab')
>>> ms |= Multiset('bccd')
>>> sorted(ms)
['a', 'a', 'b', 'c', 'c', 'd']
For a variant of the operation which does not modify the multiset, but returns a new
multiset instead see :meth:`union`.
Args:
others: The other sets to union this multiset with. Can also be any :class:`~typing.Iterable`\[~T]
or :class:`~typing.Mapping`\[~T, :class:`int`] which are then converted to :class:`Multiset`\[~T].
|
codesearchnet
|
def tflite_convert(fn, input_templates):
fn = def_function.function(fn)
concrete_func = fn.get_concrete_function(*input_templates)
converter = lite.TFLiteConverterV2([concrete_func])
return converter.convert()
|
Converts the provided fn to tf.lite model.
Args:
fn: A callable that expects a list of inputs like input_templates that
returns a tensor or structure of tensors.
input_templates: A list of Tensors, ndarrays or TensorSpecs describing the
inputs that fn expects. The actual values of the Tensors or ndarrays are
unused.
Returns:
The serialized tf.lite model.
|
github-repos
|
def __eq__(self, other):
if self._begin == other._begin and self._end == other._end:
return True
return False
|
Two intervals are the same if they have the same begin and end.
Args:
other (Interval): other Interval
Returns:
bool: are self and other equal.
|
juraj-google-style
|
def GetAPFSVolumeByPathSpec(self, path_spec):
volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(path_spec)
if (volume_index is None):
return None
return self._fsapfs_container.get_volume(volume_index)
|
Retrieves an APFS volume for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyfsapfs.volume: an APFS volume or None if not available.
|
codesearchnet
|
def _GetDisplayPath(self, path_spec, full_path, data_stream_name):
display_path = ''
if path_spec.HasParent():
parent_path_spec = path_spec.parent
if parent_path_spec and parent_path_spec.type_indicator == (
dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION):
display_path = ''.join([display_path, parent_path_spec.location])
display_path = ''.join([display_path, full_path])
if data_stream_name:
display_path = ':'.join([display_path, data_stream_name])
return display_path
|
Retrieves a path to display.
Args:
path_spec (dfvfs.PathSpec): path specification of the file entry.
full_path (str): full path of the file entry.
data_stream_name (str): name of the data stream.
Returns:
str: path to display.
|
juraj-google-style
|
def get_scan_plot(self, coords=None):
from pymatgen.util.plotting import pretty_plot
plt = pretty_plot(12, 8)
d = self.read_scan()
if (coords and (coords in d['coords'])):
x = d['coords'][coords]
plt.xlabel(coords)
else:
x = range(len(d['energies']))
plt.xlabel('points')
plt.ylabel('Energy (eV)')
e_min = min(d['energies'])
y = [((e - e_min) * Ha_to_eV) for e in d['energies']]
plt.plot(x, y, 'ro--')
return plt
|
Get a matplotlib plot of the potential energy surface.
Args:
coords: internal coordinate name to use as abcissa.
|
codesearchnet
|
def generate_cot_body(context):
try:
cot = {'artifacts': get_cot_artifacts(context), 'chainOfTrustVersion': 1, 'runId': context.claim_task['runId'], 'task': context.task, 'taskId': context.claim_task['status']['taskId'], 'workerGroup': context.claim_task['workerGroup'], 'workerId': context.config['worker_id'], 'workerType': context.config['worker_type'], 'environment': get_cot_environment(context)}
except (KeyError,) as exc:
raise ScriptWorkerException("Can't generate chain of trust! {}".format(str(exc)))
return cot
|
Generate the chain of trust dictionary.
This is the unsigned and unformatted chain of trust artifact contents.
Args:
context (scriptworker.context.Context): the scriptworker context.
Returns:
dict: the unsignd and unformatted chain of trust artifact contents.
Raises:
ScriptWorkerException: on error.
|
codesearchnet
|
def _read_from_seg(self, n):
result = self._seg.read(size=n)
if result == "":
return result
offset = self._seg.tell()
if offset > self._seg_valid_length:
extra = offset - self._seg_valid_length
result = result[:-1*extra]
self._offset += len(result)
return result
|
Read from current seg.
Args:
n: max number of bytes to read.
Returns:
valid bytes from the current seg. "" if no more is left.
|
juraj-google-style
|
def DataIsInteger(self):
return (self.data_type in (definitions.REG_DWORD, definitions.REG_DWORD_BIG_ENDIAN, definitions.REG_QWORD))
|
Determines, based on the data type, if the data is an integer.
The data types considered strings are: REG_DWORD (REG_DWORD_LITTLE_ENDIAN),
REG_DWORD_BIG_ENDIAN and REG_QWORD.
Returns:
bool: True if the data is an integer, False otherwise.
|
codesearchnet
|
def browse(self, path=None):
params = None
if path:
assert isinstance(path, string_types)
params = {'current': path}
return self.get('browse', params=params)
|
Returns a list of directories matching the path given.
Args:
path (str): glob pattern.
Returns:
List[str]
|
codesearchnet
|
def reset_logical_devices(device_type, count):
reset_context()
devices = tf_config.list_physical_devices(device_type)
if device_type.upper() not in ('CPU', 'GPU'):
raise ValueError('resetting logical device for non-supported device type : %s' % device_type)
if count < len(devices):
devices = devices[:count]
tf_config.set_visible_devices(devices, device_type=device_type.upper())
for i, device in enumerate(devices):
n = (i + 1) * count
assert n > 0
configs = []
for ordinal in range(n):
if device_type.upper() == 'GPU':
dev_config = context.LogicalDeviceConfiguration(memory_limit=_DEFAULT_GPU_MEMORY_LIMIT, experimental_device_ordinal=ordinal)
else:
dev_config = context.LogicalDeviceConfiguration()
configs.append(dev_config)
tf_config.set_logical_device_configuration(device, configs)
|
Resets logical devices for CPU/GPU.
Logical devices can only be instantiated once on a particular context. For
now, context re-use is triggering some function duplication errors, so we
reset the context on each call.
Args:
device_type: The device_type to reset.
count: numbers of virtual device to reset to.
|
github-repos
|
def refill_main_wallet(self, from_address, to_address, nfees, ntokens, password, min_confirmations=6, sync=False):
(path, from_address) = from_address
unsigned_tx = self._t.simple_transaction(from_address, (([(to_address, self.fee)] * nfees) + ([(to_address, self.token)] * ntokens)), min_confirmations=min_confirmations)
signed_tx = self._t.sign_transaction(unsigned_tx, password)
txid = self._t.push(signed_tx)
return txid
|
Refill the Federation wallet with tokens and fees. This keeps the federation wallet clean.
Dealing with exact values simplifies the transactions. No need to calculate change. Easier to keep track of the
unspents and prevent double spends that would result in transactions being rejected by the bitcoin network.
Args:
from_address (Tuple[str]): Refill wallet address. Refills the federation wallet with tokens and fees
to_address (str): Federation wallet address
nfees (int): Number of fees to transfer. Each fee is 10000 satoshi. Used to pay for the transactions
ntokens (int): Number of tokens to transfer. Each token is 600 satoshi. Used to register hashes in the blockchain
password (str): Password for the Refill wallet. Used to sign the transaction
min_confirmations (int): Number of confirmations when chosing the inputs of the transaction. Defaults to 6
sync (bool): Perform the transaction in synchronous mode, the call to the function will block until there is at
least on confirmation on the blockchain. Defaults to False
Returns:
str: transaction id
|
codesearchnet
|
def snyder_opt(self, structure):
nsites = structure.num_sites
volume = structure.volume
num_density = 1e30 * nsites / volume
return 1.66914e-23 * \
(self.long_v(structure) + 2.*self.trans_v(structure))/3. \
/ num_density ** (-2./3.) * (1 - nsites ** (-1./3.))
|
Calculates Snyder's optical sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's optical sound velocity (in SI units)
|
juraj-google-style
|
def ToDebugString(self, indentation_level=1):
indentation = (' ' * indentation_level)
text_parts = ['{0:s}path segment index: {1:d}\n'.format(indentation, self.path_segment_index)]
for (path_segment, scan_object) in self._path_segments.items():
text_parts.append('{0:s}path segment: {1:s}\n'.format(indentation, path_segment))
if isinstance(scan_object, PathFilterScanTreeNode):
text_parts.append('{0:s}scan tree node:\n'.format(indentation))
text_parts.append(scan_object.ToDebugString((indentation_level + 1)))
elif isinstance(scan_object, py2to3.STRING_TYPES):
text_parts.append('{0:s}path: {1:s}\n'.format(indentation, scan_object))
text_parts.append('{0:s}default value:\n'.format(indentation))
if isinstance(self.default_value, PathFilterScanTreeNode):
text_parts.append('{0:s}scan tree node:\n'.format(indentation))
text_parts.append(self.default_value.ToDebugString((indentation_level + 1)))
elif isinstance(self.default_value, py2to3.STRING_TYPES):
text_parts.append('{0:s}pattern: {1:s}\n'.format(indentation, self.default_value))
text_parts.append('\n')
return ''.join(text_parts)
|
Converts the path filter scan tree node into a debug string.
Args:
indentation_level: an integer containing the text indentation level.
Returns:
A string containing a debug representation of the path filter scan
tree node.
|
codesearchnet
|
def execute(self, triple_map, output, **kwargs):
subjects = []
logical_src_iterator = str(triple_map.logicalSource.iterator)
json_object = kwargs.get('obj', self.source)
if logical_src_iterator == ".":
results = [None,]
else:
json_path_exp = jsonpath_ng.parse(logical_src_iterator)
results = [r.value for r in json_path_exp.find(json_object)][0]
for row in results:
subject = self.generate_term(term_map=triple_map.subjectMap,
**kwargs)
for pred_obj_map in triple_map.predicateObjectMap:
predicate = pred_obj_map.predicate
if pred_obj_map.template is not None:
output.add((
subject,
predicate,
self.generate_term(term_map=pred_obj_map, **kwargs)))
if pred_obj_map.parentTriplesMap is not None:
self.__handle_parents__(
output,
parent_map=pred_obj_map.parentTriplesMap,
subject=subject,
predicate=predicate,
obj=row,
**kwargs)
if pred_obj_map.reference is not None:
ref_exp = jsonpath_ng.parse(str(pred_obj_map.reference))
found_objects = [r.value for r in ref_exp.find(row)]
for obj in found_objects:
if rdflib.term._is_valid_uri(obj):
rdf_obj = rdflib.URIRef(str(obj))
else:
rdf_obj = rdflib.Literal(str(obj))
output.add((subject, predicate, rdf_obj))
if pred_obj_map.constant is not None:
output.add((subject,
predicate,
pred_obj_map.constant))
subjects.append(subject)
return subjects
|
Method executes mapping between JSON source and
output RDF
Args:
-----
triple_map: SimpleNamespace
|
juraj-google-style
|
def needs_summary(self, value: Any, *, name: Optional[str]=None, parent: Any=None, title: Union[str, Html, None]=None, enable_summary: Optional[bool]=None, enable_summary_for_str: bool=True, max_summary_len_for_str: int=80) -> bool:
del parent
if isinstance(enable_summary, bool):
return enable_summary
assert enable_summary is None
if not enable_summary_for_str and isinstance(value, str):
return False
if name is None and title is None and (isinstance(value, (int, float, bool, type(None))) or (isinstance(value, str) and len(value) <= max_summary_len_for_str)):
return False
return True
|
Returns True if the object needs a summary.
Args:
value: The value to render.
name: The referred field name of the value.
parent: The parent of the value.
title: The title of the summary.
enable_summary: Whether to enable the summary. If None, summary will
be enabled for complex types or when string exceeds
`max_summary_len_for_str`.
enable_summary_for_str: Whether to enable the summary for strings.
max_summary_len_for_str: The maximum length of the string to display.
Returns:
True if the object needs a summary.
|
github-repos
|
def _CreateTaskStorageWriter(self, path, task):
return SQLiteStorageFileWriter(self._session, path, storage_type=definitions.STORAGE_TYPE_TASK, task=task)
|
Creates a task storage writer.
Args:
path (str): path to the storage file.
task (Task): task.
Returns:
SQLiteStorageFileWriter: storage writer.
|
codesearchnet
|
def add_vlan_int(self, vlan_id):
config = ET.Element('config')
vlinterface = ET.SubElement(config, 'interface-vlan',
xmlns=("urn:brocade.com:mgmt:"
"brocade-interface"))
interface = ET.SubElement(vlinterface, 'interface')
vlan = ET.SubElement(interface, 'vlan')
name = ET.SubElement(vlan, 'name')
name.text = vlan_id
try:
self._callback(config)
return True
except Exception as error:
logging.error(error)
return False
|
Add VLAN Interface. VLAN interfaces are required for VLANs even when
not wanting to use the interface for any L3 features.
Args:
vlan_id: ID for the VLAN interface being created. Value of 2-4096.
Returns:
True if command completes successfully or False if not.
Raises:
None
|
juraj-google-style
|
def prefixsearch(self, prefix, results=10):
self._check_query(prefix, 'Prefix must be specified')
query_params = {'list': 'prefixsearch', 'pssearch': prefix, 'pslimit': ('max' if (results > 500) else results), 'psnamespace': 0, 'psoffset': 0}
raw_results = self.wiki_request(query_params)
self._check_error_response(raw_results, prefix)
return [rec['title'] for rec in raw_results['query']['prefixsearch']]
|
Perform a prefix search using the provided prefix string
Args:
prefix (str): Prefix string to use for search
results (int): Number of pages with the prefix to return
Returns:
list: List of page titles
Note:
**Per the documentation:** "The purpose of this module is \
similar to action=opensearch: to take user input and provide \
the best-matching titles. Depending on the search engine \
backend, this might include typo correction, redirect \
avoidance, or other heuristics."
|
codesearchnet
|
def create_sequence_pretty_tensor(sequence_input, shape=None, save_state=True):
inputs = prettytensor.wrap_sequence(sequence_input.inputs, tensor_shape=shape)
targets = prettytensor.wrap_sequence(sequence_input.targets)
if save_state:
bookkeeper.set_recurrent_state_saver(sequence_input)
return (inputs, targets)
|
Creates a PrettyTensor object for the given sequence.
The first dimension is treated as a time-dimension * batch and a default is
set for `unroll` and `state_saver`.
TODO(eiderman): Remove shape.
Args:
sequence_input: A SequenceInput or StateSavingSequenceInput
shape: The shape of each item in the sequence (including batch).
save_state: If true, use the sequence_input's state and save_state methods.
Returns:
2 Layers: inputs, targets
|
codesearchnet
|
def pauseProducing(self):
if (not self._running):
return
self._running = False
for consumer in self._consumers.values():
(yield consumer.channel.basic_cancel(consumer_tag=consumer.tag))
_legacy_twisted_log.msg('Paused retrieval of messages for the server queue')
|
Pause the reception of messages by canceling all existing consumers.
This does not disconnect from the server.
Message reception can be resumed with :meth:`resumeProducing`.
Returns:
Deferred: fired when the production is paused.
|
codesearchnet
|
def extract_class(jar, name):
with jar.open(name) as entry:
return LinkableClass(javatools.unpack_class(entry))
|
Extracts a LinkableClass from a jar.
Args:
jar: An open ZipFile instance.
name: A string containing the binary name of a class.
Raises:
KeyError: The class does not exist in the jar.
|
codesearchnet
|
def get_logging_dir(appname='default'):
from utool._internal import meta_util_cache
from utool._internal import meta_util_cplat
from utool import util_cache
if ((appname is None) or (appname == 'default')):
appname = util_cache.get_default_appname()
resource_dpath = meta_util_cplat.get_resource_dir()
default = join(resource_dpath, appname, 'logs')
log_dir = meta_util_cache.global_cache_read(logdir_cacheid, appname=appname, default=default)
log_dir_realpath = realpath(log_dir)
return log_dir_realpath
|
The default log dir is in the system resource directory
But the utool global cache allows for the user to override
where the logs for a specific app should be stored.
Returns:
log_dir_realpath (str): real path to logging directory
|
codesearchnet
|
def alexa(self) -> dict:
response = {'response': {'shouldEndSession': False, 'outputSpeech': {'type': 'PlainText', 'text': self.content}, 'card': {'type': 'Simple', 'content': self.content}}}
return response
|
Returns Amazon Alexa compatible state of the PlainText instance.
Creating Amazon Alexa response blank with populated "outputSpeech" and
"card sections.
Returns:
response: Amazon Alexa representation of PlainText state.
|
codesearchnet
|
def interpret_obj(self, obj, v_level_indexes, h_level_indexes, v_level_visibility, h_level_visibility, v_level_sort_keys, h_level_sort_keys, v_level_titles, h_level_titles):
if (not isinstance(obj, NonStringIterable)):
raise self.error('Cannot make a table from object {!r}'.format(obj))
rectangular_rows = tabulate(obj, v_level_indexes=v_level_indexes, h_level_indexes=h_level_indexes, v_level_visibility=v_level_visibility, h_level_visibility=h_level_visibility, v_level_sort_keys=v_level_sort_keys, h_level_sort_keys=h_level_sort_keys, v_level_titles=v_level_titles, h_level_titles=h_level_titles)
assert is_rectangular(rectangular_rows)
(num_rows, num_cols) = size(rectangular_rows)
return (rectangular_rows, num_cols)
|
Interpret the given Python object as a table.
Args:
obj: A sequence (later a mapping, too)
Returns:
A list of lists represents rows of cells.
Raises:
TypeError: If the type couldn't be interpreted as a table.
|
codesearchnet
|
def create_member(self, member_json):
return trolly.member.Member(trello_client=self, member_id=member_json['id'], name=member_json['fullName'], data=member_json)
|
Create a Member object from JSON object
Returns:
Member: The member from the given `member_json`.
|
codesearchnet
|
def topological_sort(self):
graph = self.graph
in_degree = {}
for u in graph:
in_degree[u] = 0
for u in graph:
for v in graph[u]:
in_degree[v] += 1
queue = deque()
for u in in_degree:
if (in_degree[u] == 0):
queue.appendleft(u)
sorted_graph = []
while queue:
u = queue.pop()
sorted_graph.append(u)
for v in sorted(graph[u]):
in_degree[v] -= 1
if (in_degree[v] == 0):
queue.appendleft(v)
if (len(sorted_graph) == len(graph)):
return sorted_graph
else:
raise ValueError('graph is not acyclic')
|
Returns a topological ordering of the DAG.
Returns:
list: A list of topologically sorted nodes in the graph.
Raises:
ValueError: Raised if the graph is not acyclic.
|
codesearchnet
|
def institute(self, institute_id):
LOG.debug("Fetch institute {}".format(institute_id))
institute_obj = self.institute_collection.find_one({
'_id': institute_id
})
if institute_obj is None:
LOG.debug("Could not find institute {0}".format(institute_id))
return institute_obj
|
Featch a single institute from the backend
Args:
institute_id(str)
Returns:
Institute object
|
juraj-google-style
|
def _MaybePurgeOrphanedData(self, event):
if not self.purge_orphaned_data:
return
if self.file_version and self.file_version >= 2:
self._CheckForRestartAndMaybePurge(event)
else:
self._CheckForOutOfOrderStepAndMaybePurge(event)
|
Maybe purge orphaned data due to a TensorFlow crash.
When TensorFlow crashes at step T+O and restarts at step T, any events
written after step T are now "orphaned" and will be at best misleading if
they are included in TensorBoard.
This logic attempts to determine if there is orphaned data, and purge it
if it is found.
Args:
event: The event to use as a reference, to determine if a purge is needed.
|
juraj-google-style
|
def name_from_class(cls, measurement_class):
if (not getattr(cls, '_measurements_initialized', False)):
cls._measurement_map = dict(((m.name, m) for m in all_measurements()))
cls._measurements_initialized = True
try:
name = getattr(measurement_class, 'name')
except AttributeError:
raise UnrecognizedMeasurementError(("No 'name' attribute in %s" % measurement_class))
else:
cls._measurement_map[name] = measurement_class
return name
|
For a given measurement class, return its generic name.
The given class is expected to have a ``name`` attribute, otherwise this
function will raise an execption. The point of using this method instead
of just trying to grab that attribute in the application is to cache
measurement name to class mappings for future use.
Returns:
the generic OpenXC name for a measurement class.
Raise:
UnrecognizedMeasurementError: if the class does not have a valid
generic name
|
codesearchnet
|
def add_snmp_host(self, **kwargs):
host_info = kwargs.pop('host_info')
community = kwargs.pop('community')
callback = kwargs.pop('callback', self._callback)
config = ET.Element('config')
snmp_server = ET.SubElement(config, 'snmp-server', xmlns='urn:brocade.com:mgmt:brocade-snmp')
host = ET.SubElement(snmp_server, 'host')
ip_addr = ET.SubElement(host, 'ip')
ip_addr.text = host_info[0]
com = ET.SubElement(host, 'community')
com.text = community
udp_port = ET.SubElement(host, 'udp-port')
udp_port.text = host_info[1]
return callback(config)
|
Add SNMP host to NOS device.
Args:
host_info (tuple(str, str)): Tuple of host IP and port.
community (str): Community string to be added to device.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `host_info` or `community` is not defined.
|
codesearchnet
|
def _create_in_hdx(self, object_type, id_field_name, name_field_name,
file_to_upload=None):
self.check_required_fields()
if id_field_name in self.data and self._load_from_hdx(object_type, self.data[id_field_name]):
logger.warning('%s exists. Updating %s' % (object_type, self.data[id_field_name]))
self._merge_hdx_update(object_type, id_field_name, file_to_upload)
else:
self._save_to_hdx('create', name_field_name, file_to_upload)
|
Helper method to check if resource exists in HDX and if so, update it, otherwise create it
Args:
object_type (str): Description of HDX object type (for messages)
id_field_name (str): Name of field containing HDX object identifier
name_field_name (str): Name of field containing HDX object name
file_to_upload (Optional[str]): File to upload to HDX (if url not supplied)
Returns:
None
|
juraj-google-style
|
async def get(self, public_key):
if settings.SIGNATURE_VERIFICATION:
super().verify()
compiler = re.compile('\\((.*?)\\)')
match = compiler.search(self.request.headers.get('User-Agent'))
try:
source = match.group(1)
except:
source = None
(await self.account.logsource(public_key=public_key, source=source))
logging.debug('\n\n [+] -- Get account data.')
response = (await self.account.getaccountdata(public_key=public_key))
logging.debug('\n')
logging.debug(response)
logging.debug('\n')
if ('error' in response.keys()):
self.set_status(response['error'])
self.write(response)
raise tornado.web.Finish
wallets = (await self.account.balance.get_wallets(uid=response['id']))
if isinstance(wallets, dict):
if ('error' in wallets.keys()):
self.set_status(wallets['error'])
self.write(wallets)
raise tornado.web.Finish
response.update({'wallets': json.dumps([i for i in wallets['wallets'] if (i.get('coinid') not in ['BTC', 'LTC', 'ETH'])])})
self.write(response)
|
Receive account data
Accepts:
Query string:
- "public_key" - str
Query string params:
- message ( signed dictionary ):
- "timestamp" - str
Returns:
- "device_id" - str
- "phone" - str
- "public_key" - str
- "count" - int ( wallets amount )
- "level" - int (2 by default)
- "news_count" - int (0 by default)
- "email" - str
- "wallets" - list
Verified: True
|
codesearchnet
|
def forward(self, hidden_features):
hidden_features = self.dropout_layer(hidden_features)
forecast = self.base_pt_block(hidden_features)
return forecast
|
Args:
hidden_features (`torch.Tensor` of shape `(batch_size x num_patch x d_model)` in `flatten` mode
or `(batch_size x n_vars x num_patch x d_model)` in `common_channel`/`mix_channel` mode.): Input hidden
features.
Returns:
`torch.Tensor` of shape `(batch_size x n_vars x num_patch x patch_length)`.
|
github-repos
|
def check_regularizers(regularizers, keys):
if (regularizers is None):
return {}
_assert_is_dictlike(regularizers, valid_keys=keys)
keys = set(keys)
if (not (set(regularizers) <= keys)):
extra_keys = (set(regularizers) - keys)
raise KeyError('Invalid regularizer keys {}, regularizers can only be provided for {}'.format(', '.join(("'{}'".format(key) for key in extra_keys)), ', '.join(("'{}'".format(key) for key in keys))))
_check_nested_callables(regularizers, 'Regularizer')
return dict(regularizers)
|
Checks the given regularizers.
This checks that `regularizers` is a dictionary that only contains keys in
`keys`, and furthermore the entries in `regularizers` are functions or
further dictionaries (the latter used, for example, in passing regularizers
to modules inside modules) that must satisfy the same constraints.
Args:
regularizers: Dictionary of regularizers (allowing nested dictionaries) or
None.
keys: Iterable of valid keys for `regularizers`.
Returns:
Copy of checked dictionary of regularizers. If `regularizers=None`, an empty
dictionary will be returned.
Raises:
KeyError: If an regularizers is provided for a key not in `keys`.
TypeError: If a provided regularizer is not a callable function, or
`regularizers` is not a Mapping.
|
codesearchnet
|
def ssa(scatterer, h_pol=True):
ext_xs = ext_xsect(scatterer, h_pol=h_pol)
return ((sca_xsect(scatterer, h_pol=h_pol) / ext_xs) if (ext_xs > 0.0) else 0.0)
|
Single-scattering albedo for the current setup, with polarization.
Args:
scatterer: a Scatterer instance.
h_pol: If True (default), use horizontal polarization.
If False, use vertical polarization.
Returns:
The single-scattering albedo.
|
codesearchnet
|
def _decorator(func):
func.__doc__ = '\n Assert the condition `x {sym} y` holds element-wise.\n\n This Op checks that `x[i] {sym} y[i]` holds for every pair of (possibly\n broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is\n trivially satisfied.\n\n If `x` {sym} `y` does not hold, `message`, as well as the first `summarize`\n entries of `x` and `y` are printed, and `InvalidArgumentError` is raised.\n\n When using inside `tf.function`, this API takes effects during execution.\n It\'s recommended to use this API with `tf.control_dependencies` to\n ensure the correct execution order.\n\n In the following example, without `tf.control_dependencies`, errors may\n not be raised at all.\n Check `tf.control_dependencies` for more details.\n\n >>> def check_size(x):\n ... with tf.control_dependencies([\n ... tf.debugging.{opname}(tf.size(x), {test_var},\n ... message=\'Bad tensor size\')]):\n ... return x\n\n >>> check_size(tf.ones([2, 3], tf.float32))\n Traceback (most recent call last):\n ...\n InvalidArgumentError: ...\n\n Args:\n x: Numeric `Tensor`.\n y: Numeric `Tensor`, same dtype as and broadcastable to `x`.\n message: A string to prefix to the default message. (optional)\n summarize: Print this many entries of each tensor. (optional)\n name: A name for this operation (optional). Defaults to "{opname}".\n\n Returns:\n Op that raises `InvalidArgumentError` if `x {sym} y` is False. This can\n be used with `tf.control_dependencies` inside of `tf.function`s to\n block followup computation until the check has executed.\n @compatibility(eager)\n returns None\n @end_compatibility\n\n Raises:\n InvalidArgumentError: if the check can be performed immediately and\n `x == y` is False. The check can be performed immediately during eager\n execution or if `x` and `y` are statically known.\n '.format(sym=sym, opname=opname, test_var=test_var)
return func
|
Decorator that adds docstring to the function for symbol `sym`.
Args:
func: Function for a TensorFlow op
Returns:
A version of `func` with documentation attached.
|
github-repos
|
def InterpolatePath(path, knowledge_base, users=None, path_args=None, depth=0):
sys_formatters = {
"systemroot": "c:\\Windows"
}
if path_args:
sys_formatters.update(path_args)
if users:
results = []
for user in users:
user = GetUserInfo(knowledge_base, user)
if user:
formatters = dict((x.name, y) for x, y in user.ListSetFields())
formatters.update(sys_formatters)
try:
results.append(path.format(**formatters))
except KeyError:
pass
return results
else:
try:
path = path.format(**sys_formatters)
except KeyError:
logging.warning("Failed path interpolation on %s", path)
return ""
if "{" in path and depth < 10:
path = InterpolatePath(
path,
knowledge_base=knowledge_base,
users=users,
path_args=path_args,
depth=depth + 1)
return path
|
Take a string as a path on a client and interpolate with client data.
Args:
path: A single string/unicode to be interpolated.
knowledge_base: An rdf_client.KnowledgeBase object.
users: A list of string usernames, or None.
path_args: A dict of additional args to use in interpolation. These take
precedence over any system provided variables.
depth: A counter for recursion depth.
Returns:
A single string if users is None, otherwise a list of strings.
|
juraj-google-style
|
def rr_history(self, ips):
api_name = 'opendns-rr_history'
fmt_url_path = u'dnsdb/ip/a/{0}.json'
return self._multi_get(api_name, fmt_url_path, ips)
|
Get the domains related to input ips.
Args:
ips: an enumerable of strings as ips
Returns:
An enumerable of resource records and features
|
juraj-google-style
|
def run_tag_processor(self, tag_proc_name):
tag_processor = self.tag_procs[tag_proc_name]
for tag in tag_processor.find(self.soup):
self.process_tag(tag_proc_name, tag)
|
Run a tag processor.
Args:
tag_proc_name: A string key that maps to the TagProcessor to run.
|
juraj-google-style
|
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
resized_input_values = sess.run(decoded_image_tensor,
{image_data_tensor: image_data})
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: resized_input_values})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
|
Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
decoded_image_tensor: Output of initial image resizing and preprocessing.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
|
juraj-google-style
|
def __init__(self, num_shards):
self._num_shards = num_shards
|
Creates a new `FixedShardsPartitioner`.
Args:
num_shards: `int`, number of shards to partition.
|
github-repos
|
def _parse_package(cls, package_string):
(pkg, arch) = rsplit(package_string, cls._arch_sep(package_string))
if (arch not in KNOWN_ARCHITECTURES):
(pkg, arch) = (package_string, None)
(pkg, release) = rsplit(pkg, '-')
(name, version) = rsplit(pkg, '-')
(epoch, version) = (version.split(':', 1) if (':' in version) else ['0', version])
if (name.startswith('oracleasm') and name.endswith('.el5')):
(name, version2) = name.split('-', 1)
version = ((version2 + '-') + version)
return {'name': name, 'version': version, 'release': release, 'arch': arch, 'epoch': epoch}
|
Helper method for parsing package string.
Args:
package_string (str): dash separated package string such as 'bash-4.2.39-3.el7'
Returns:
dict: dictionary containing 'name', 'version', 'release' and 'arch' keys
|
codesearchnet
|
def _check_put_dtypes(self, vals, indices=None):
if isinstance(vals, dict):
if not self._names:
raise ValueError('Staging areas must have names to enqueue a dictionary')
if not set(vals.keys()).issubset(self._names):
raise ValueError(f'Keys in dictionary to put do not match names of staging area. Dictionary: {sorted(vals.keys())}Queue: {sorted(self._names)}')
vals, indices, _ = zip(*[(vals[k], i, k) for i, k in enumerate(self._names) if k in vals])
else:
if self._names:
raise ValueError('You must enqueue a dictionary in a staging area with names')
if indices is None:
raise ValueError('Indices must be supplied when inserting a list of tensors')
if len(indices) != len(vals):
raise ValueError(f"Number of indices {len(indices)} doesn't match number of values {len(vals)}")
if not isinstance(vals, (list, tuple)):
vals = [vals]
indices = [0]
if not len(vals) <= len(self._dtypes):
raise ValueError(f'Unexpected number of inputs {len(vals)} vs {len(self._dtypes)}')
tensors = []
for val, i in zip(vals, indices):
dtype, shape = (self._dtypes[i], self._shapes[i])
if val.dtype != dtype:
raise ValueError(f'Datatypes do not match. Received val.dtype {str(val.dtype)} and dtype {str(dtype)}')
val.get_shape().assert_is_compatible_with(shape)
tensors.append(ops.convert_to_tensor(val, dtype=dtype, name='component_%d' % i))
return (tensors, indices)
|
Validate and convert `vals` to a list of `Tensor`s.
The `vals` argument can be a Tensor, a list or tuple of tensors, or a
dictionary with tensor values.
If `vals` is a list, then the appropriate indices associated with the
values must be provided.
If it is a dictionary, the staging area must have been constructed with a
`names` attribute and the dictionary keys must match the staging area names.
`indices` will be inferred from the dictionary keys.
If the staging area was constructed with a `names` attribute, `vals` must
be a dictionary.
Checks that the dtype and shape of each value matches that
of the staging area.
Args:
vals: A tensor, a list or tuple of tensors, or a dictionary.
Returns:
A (tensors, indices) tuple where `tensors` is a list of `Tensor` objects
and `indices` is a list of indices associated with the tensors.
Raises:
ValueError: If `vals` or `indices` is invalid.
|
github-repos
|
def add_root(self, model, setter=None):
if (model in self._roots):
return
self._push_all_models_freeze()
try:
self._roots.append(model)
finally:
self._pop_all_models_freeze()
self._trigger_on_change(RootAddedEvent(self, model, setter))
|
Add a model as a root of this Document.
Any changes to this model (including to other models referred to
by it) will trigger ``on_change`` callbacks registered on this
document.
Args:
model (Model) :
The model to add as a root of this document.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
|
codesearchnet
|
def minimize_best_n(Members):
return(list(reversed(sorted(
Members, key=lambda Member: Member.fitness_score
))))
|
Orders population members from lowest fitness to highest fitness
Args:
Members (list): list of PyGenetics Member objects
Returns:
lsit: ordered lsit of Members, from highest fitness to lowest fitness
|
juraj-google-style
|
def initialize():
dst_path = get_user_config_path()
copied = False
if (not os.path.exists(dst_path)):
src_path = os.path.join(os.path.dirname(__file__), 'defaultconfig.py')
shutil.copyfile(src_path, dst_path)
copied = True
return (copied, dst_path)
|
Initialize a default config file if it doesn't exist yet.
Returns:
tuple: A tuple of (copied, dst_path). `copied` is a bool indicating if
this function created the default config file. `dst_path` is the
path of the user config file.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.