code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
---|---|---|
def __init__(self, issues = None):
self._issues = []
self._config = {}
self._project = None
self.issues = issues
|
Class constructor.
Args:
issues (list): List of `Issue` instances
|
juraj-google-style
|
def compute_delta(deps: List[str], imports: List[str], rule_dir: str, source_to_rules: SourceToRule, rule_name: str) -> Optional[DepsDelta]:
issues = []
adds = set()
subs = set()
expanded_deps = set([expand_dep(dep, rule_dir) for dep in deps])
used_deps = set()
for imp in imports:
imp_items = tuple(imp.split('.'))
if imp_items[0] in BUILT_IN_MODULES or imp in BUILT_IN_MODULES:
continue
possible_srcs = list_possible_source_of_import(imp_items)
matching_possible_src = None
for possible_src in possible_srcs:
if possible_src in source_to_rules:
matching_possible_src = possible_src
break
if matching_possible_src is None:
issues.append(f'Cannot infer dependency for "{imp}". Possible source files: {possible_srcs}.')
continue
possible_deps = source_to_rules[matching_possible_src]
if len(possible_deps) > 1:
issues.append(f'Multiple possible rules for "{imp}"')
if possible_deps[0] == expand_dep(':' + rule_name, rule_dir):
continue
if possible_deps[0] not in expanded_deps:
adds.add(possible_deps[0])
else:
used_deps.add(possible_deps[0])
for dep in expanded_deps:
if dep in used_deps:
continue
subs.add(dep)
if adds or subs or issues:
return DepsDelta(adds=list(adds), subs=list(subs), issues=issues)
else:
return None
|
Computes the operation on the deps to support all the imports.
Args:
deps: Dependencies of the rule.
imports: Imports of the rule.
rule_dir: Path of the rule relative to the repo root.
source_to_rules: Mapping from all available source files to rules.
|
github-repos
|
def add(name, **kwargs):
if not info(name):
comp_obj = _get_computer_object()
try:
new_group = comp_obj.Create('group', name)
new_group.SetInfo()
log.info('Successfully created group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to create group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s already exists.', name)
return False
return True
|
Add the specified group
Args:
name (str):
The name of the group to add
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.add foo
|
juraj-google-style
|
def get_class_attributes(cls):
for (name, value) in cls.__dict__.items():
if GenericStruct._is_pyof_attribute(value):
(yield (name, value))
|
Return a generator for class attributes' names and value.
This method strict relies on the PEP 520 (Preserving Class Attribute
Definition Order), implemented on Python 3.6. So, if this behaviour
changes this whole lib can loose its functionality (since the
attributes order are a strong requirement.) For the same reason, this
lib will not work on python versions earlier than 3.6.
.. code-block:: python3
for name, value in self.get_class_attributes():
print("attribute name: {}".format(name))
print("attribute type: {}".format(value))
Returns:
generator: tuples with attribute name and value.
|
codesearchnet
|
def inference(self, observed_arr):
if observed_arr.ndim < 4:
observed_arr = np.expand_dims(observed_arr, axis=1)
self.__add_channel_flag = True
else:
self.__add_channel_flag = False
return super().inference(observed_arr)
|
Draws samples from the `true` distribution.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced.
|
juraj-google-style
|
def stop_dag(self, name=None):
return self._client.send(Request(action='stop_dag', payload={'name': (name if (name is not None) else self._dag_name)})).success
|
Send a stop signal to the specified dag or the dag that hosts this task.
Args:
name str: The name of the dag that should be stopped. If no name is given the
dag that hosts this task is stopped.
Upon receiving the stop signal, the dag will not queue any new tasks and wait
for running tasks to terminate.
Returns:
bool: True if the signal was sent successfully.
|
codesearchnet
|
def PrintResponse(batch_job_helper, response_xml):
response = batch_job_helper.ParseResponse(response_xml)
if 'rval' in response['mutateResponse']:
for data in response['mutateResponse']['rval']:
if 'errorList' in data:
print 'Operation %s - FAILURE:' % data['index']
print '\terrorType=%s' % data['errorList']['errors']['ApiError.Type']
print '\ttrigger=%s' % data['errorList']['errors']['trigger']
print '\terrorString=%s' % data['errorList']['errors']['errorString']
print '\tfieldPath=%s' % data['errorList']['errors']['fieldPath']
print '\treason=%s' % data['errorList']['errors']['reason']
if 'result' in data:
print 'Operation %s - SUCCESS.' % data['index']
|
Prints the BatchJobService response.
Args:
batch_job_helper: a BatchJobHelper instance.
response_xml: a string containing a response from the BatchJobService.
|
juraj-google-style
|
def compute_match(mapping, weight_dict):
if veryVerbose:
print("Computing match for mapping", file=DEBUG_LOG)
print(mapping, file=DEBUG_LOG)
if tuple(mapping) in match_triple_dict:
if veryVerbose:
print("saved value", match_triple_dict[tuple(mapping)], file=DEBUG_LOG)
return match_triple_dict[tuple(mapping)]
match_num = 0
for i, m in enumerate(mapping):
if m == -1:
continue
current_node_pair = (i, m)
if current_node_pair not in weight_dict:
continue
if veryVerbose:
print("node_pair", current_node_pair, file=DEBUG_LOG)
for key in weight_dict[current_node_pair]:
if key == -1:
match_num += weight_dict[current_node_pair][key]
if veryVerbose:
print("instance/attribute match", weight_dict[current_node_pair][key], file=DEBUG_LOG)
elif key[0] < i:
continue
elif mapping[key[0]] == key[1]:
match_num += weight_dict[current_node_pair][key]
if veryVerbose:
print("relation match with", key, weight_dict[current_node_pair][key], file=DEBUG_LOG)
if veryVerbose:
print("match computing complete, result:", match_num, file=DEBUG_LOG)
match_triple_dict[tuple(mapping)] = match_num
return match_num
|
Given a node mapping, compute match number based on weight_dict.
Args:
mappings: a list of node index in AMR 2. The ith element (value j) means node i in AMR 1 maps to node j in AMR 2.
Returns:
matching triple number
Complexity: O(m*n) , m is the node number of AMR 1, n is the node number of AMR 2
|
juraj-google-style
|
def get_attributes(self, uid=None, attribute_names=None):
if (uid is not None):
if (not isinstance(uid, six.string_types)):
raise TypeError('uid must be a string')
if (attribute_names is not None):
if (not isinstance(attribute_names, list)):
raise TypeError('attribute_names must be a list of strings')
else:
for attribute_name in attribute_names:
if (not isinstance(attribute_name, six.string_types)):
raise TypeError('attribute_names must be a list of strings')
result = self.proxy.get_attributes(uid, attribute_names)
status = result.result_status.value
if (status == enums.ResultStatus.SUCCESS):
return (result.uuid, result.attributes)
else:
reason = result.result_reason.value
message = result.result_message.value
raise exceptions.KmipOperationFailure(status, reason, message)
|
Get the attributes associated with a managed object.
If the uid is not specified, the appliance will use the ID placeholder
by default.
If the attribute_names list is not specified, the appliance will
return all viable attributes for the managed object.
Args:
uid (string): The unique ID of the managed object with which the
retrieved attributes should be associated. Optional, defaults
to None.
attribute_names (list): A list of string attribute names
indicating which attributes should be retrieved. Optional,
defaults to None.
|
codesearchnet
|
def _truncate(self, processed_features: Union[dict[str, np.ndarray], BatchFeature], max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, truncation: Optional[bool]=None):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.')
required_input = processed_features[self.model_input_names[0]]
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = (max_length
needs_to_be_truncated = len(required_input) > max_length
if needs_to_be_truncated:
processed_features[self.model_input_names[0]] = processed_features[self.model_input_names[0]][:max_length]
if 'attention_mask' in processed_features:
processed_features['attention_mask'] = processed_features['attention_mask'][:max_length]
return processed_features
|
Truncate inputs to predefined length or max length in the batch
Args:
processed_features(`Union[Dict[str, np.ndarray], BatchFeature]`):
Dictionary of input values (`np.ndarray[float]`) / input vectors (`List[np.ndarray[float]]`) or batch
of inputs values (`List[np.ndarray[int]]`) / input vectors (`List[np.ndarray[int]]`)
max_length (`int`, *optional*):
maximum length of the returned list and optionally padding length (see below)
pad_to_multiple_of (`int`, *optional*) :
Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to
enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs
which benefit from having sequence lengths be a multiple of 128.
truncation (`bool`, *optional*):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
|
github-repos
|
def decode_base64(data):
data = bytes(data, encoding="ascii")
missing_padding = len(data) % 4
if missing_padding != 0:
data += b'=' * (4 - missing_padding)
return base64.b64decode(data)
|
Decodes a base64 string, with padding being optional
Args:
data: A base64 encoded string
Returns:
bytes: The decoded bytes
|
juraj-google-style
|
def formatted(self, func):
other = EscapedString.__new__(EscapedString)
other.strings = []
for is_literal, value in self.strings:
if not is_literal:
value = func(value)
other.strings.append((is_literal, value))
return other
|
Return the string with non-literal parts formatted.
Args:
func (callable): Callable that translates a string into a
formatted string.
Returns:
`EscapedString` object.
|
juraj-google-style
|
def get_idx_types(rng_def, ranges):
idx_types = rng_def.get('kds_esIndexType', []).copy()
if not idx_types:
nested = False
for rng in ranges:
if range_is_obj(rng, __MODULE__.rdfclass):
nested = True
if nested:
idx_types.append('es_Nested')
return idx_types
|
Returns the elasticsearch index types for the obj
args:
rng_def: the range defintion dictionay
ranges: rdfproperty ranges
|
juraj-google-style
|
def __generate_reference__(self, triple_map, **kwargs):
raw_value = self.source.get(str(triple_map.reference))
if raw_value is None or len(raw_value) < 1:
return
if hasattr(triple_map, "datatype"):
if triple_map.datatype == NS_MGR.xsd.anyURI.rdflib:
output = rdflib.URIRef(raw_value)
else:
output = rdflib.Literal(
raw_value,
datatype=triple_map.datatype)
else:
output = rdflib.Literal(raw_value)
return output
|
Generates a RDF entity based on triple map
Args:
triple_map(SimpleNamespace): Triple Map
|
juraj-google-style
|
def check_tx(self, raw_transaction):
self.abort_if_abci_chain_is_not_synced()
logger.debug('check_tx: %s', raw_transaction)
transaction = decode_transaction(raw_transaction)
if self.bigchaindb.is_valid_transaction(transaction):
logger.debug('check_tx: VALID')
return ResponseCheckTx(code=CodeTypeOk)
else:
logger.debug('check_tx: INVALID')
return ResponseCheckTx(code=CodeTypeError)
|
Validate the transaction before entry into
the mempool.
Args:
raw_tx: a raw string (in bytes) transaction.
|
codesearchnet
|
def spliceext(filepath, s):
root, ext = os.path.splitext(safepath(filepath))
return root + s + ext
|
Add s into filepath before the extension
Args:
filepath (str, path): file path
s (str): string to splice
Returns:
str
|
juraj-google-style
|
def create_unit(self, name, unit):
self._single_request('Units.Set', unitName=name, body={'desiredState': unit.desiredState, 'options': unit.options})
return self.get_unit(name)
|
Create a new Unit in the cluster
Create and modify Unit entities to communicate to fleet the desired state of the cluster.
This simply declares what should be happening; the backend system still has to react to
the changes in this desired state. The actual state of the system is communicated with
UnitState entities.
Args:
name (str): The name of the unit to create
unit (Unit): The unit to submit to fleet
Returns:
Unit: The unit that was created
Raises:
fleet.v1.errors.APIError: Fleet returned a response code >= 400
|
codesearchnet
|
def remove(self, future):
if self._loop.get_debug():
logger.debug('Removing %s from the linked list.', future)
if (future.prev is None):
assert (future is self.head)
self.head = future.next
if (self.head is None):
self.tail = None
if (not self.cancelled()):
self.set_result(None)
else:
self.head.prev = None
elif (future.next is None):
assert (future is self.tail)
self.tail = future.prev
if (self.tail is None):
self.head = None
if (not self.cancelled()):
self.set_result(None)
else:
self.tail.prev = None
|
Remove an object from the linked list.
Args:
future (PlasmaObjectFuture): A PlasmaObjectFuture instance.
|
codesearchnet
|
def UnlockScanNode(self, path_spec):
if (not self.HasScanNode(path_spec)):
raise KeyError('Scan node does not exist.')
if (path_spec not in self._locked_scan_nodes):
raise KeyError('Scan node is not locked.')
del self._locked_scan_nodes[path_spec]
self._scan_nodes[path_spec].scanned = False
|
Marks a scan node as unlocked.
Args:
path_spec (PathSpec): path specification.
Raises:
KeyError: if the scan node does not exists or is not locked.
|
codesearchnet
|
def user_avatar_url(username, size=64, default="retro"):
openid = "http:
return libravatar_url(openid=openid, size=size, default=default)
|
Get the avatar URL of the provided Fedora username.
The URL is returned from the Libravatar service.
Args:
username (str): The username to get the avatar of.
size (int): Size of the avatar in pixels (it's a square).
default (str): Default avatar to return if not found.
Returns:
str: The URL to the avatar image.
|
juraj-google-style
|
def __lt__(self, other):
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value < other.value
|
Returns True if `self` is known to be less than `other`.
Dimensions are compared as follows:
```python
(tf.compat.v1.Dimension(m) < tf.compat.v1.Dimension(n)) == (m < n)
(tf.compat.v1.Dimension(m) < tf.compat.v1.Dimension(None)) == None
(tf.compat.v1.Dimension(None) < tf.compat.v1.Dimension(n)) == None
(tf.compat.v1.Dimension(None) < tf.compat.v1.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value < other.value` if both are known, otherwise
None.
|
github-repos
|
class LabelSmoother:
epsilon: float = 0.1
ignore_index: int = -100
def __call__(self, model_output, labels, shift_labels=False):
logits = model_output['logits'] if isinstance(model_output, dict) else model_output[0]
if shift_labels:
logits = logits[..., :-1, :].contiguous()
labels = labels[..., 1:].contiguous()
log_probs = -nn.functional.log_softmax(logits, dim=-1)
if labels.dim() == log_probs.dim() - 1:
labels = labels.unsqueeze(-1)
padding_mask = labels.eq(self.ignore_index)
labels = torch.clamp(labels, min=0)
nll_loss = log_probs.gather(dim=-1, index=labels)
smoothed_loss = log_probs.sum(dim=-1, keepdim=True, dtype=torch.float32)
nll_loss.masked_fill_(padding_mask, 0.0)
smoothed_loss.masked_fill_(padding_mask, 0.0)
num_active_elements = padding_mask.numel() - padding_mask.long().sum()
nll_loss = nll_loss.sum() / num_active_elements
smoothed_loss = smoothed_loss.sum() / (num_active_elements * log_probs.shape[-1])
return (1 - self.epsilon) * nll_loss + self.epsilon * smoothed_loss
|
Adds label-smoothing on a pre-computed output from a Transformers model.
Args:
epsilon (`float`, *optional*, defaults to 0.1):
The label smoothing factor.
ignore_index (`int`, *optional*, defaults to -100):
The index in the labels to ignore when computing the loss.
|
github-repos
|
def url(self, suffix=""):
return super(neuroRemote,
self).url('{}/'.format(self._ext) + suffix)
|
Return a constructed URL, appending an optional suffix (uri path).
Arguments:
suffix (str : ""): The suffix to append to the end of the URL
Returns:
str: The complete URL
|
juraj-google-style
|
def shape(self):
return self._shape
|
The statically known shape of the RaggedTensor.
Examples:
>>> rt = tf.ragged.constant([[0], [1, 2]])
>>> tf.type_spec_from_value(rt).shape
TensorShape([2, None])
>>> rt = tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1)
>>> tf.type_spec_from_value(rt).shape
TensorShape([2, None, 2])
Returns:
A `tf.TensorShape` containing the statically known shape of the
RaggedTensor. Ragged dimensions have a size of `None`.
|
github-repos
|
def disconnect_container_from_network(self, container, net_id,
force=False):
data = {"Container": container}
if force:
if version_lt(self._version, '1.22'):
raise InvalidVersion(
'Forced disconnect was introduced in API 1.22'
)
data['Force'] = force
url = self._url("/networks/{0}/disconnect", net_id)
res = self._post_json(url, data=data)
self._raise_for_status(res)
|
Disconnect a container from a network.
Args:
container (str): container ID or name to be disconnected from the
network
net_id (str): network ID
force (bool): Force the container to disconnect from a network.
Default: ``False``
|
juraj-google-style
|
def py_hash(key, num_buckets):
b, j = -1, 0
if num_buckets < 1:
raise ValueError('num_buckets must be a positive number')
while j < num_buckets:
b = int(j)
key = ((key * long(2862933555777941757)) + 1) & 0xffffffffffffffff
j = float(b + 1) * (float(1 << 31) / float((key >> 33) + 1))
return int(b)
|
Generate a number in the range [0, num_buckets).
Args:
key (int): The key to hash.
num_buckets (int): Number of buckets to use.
Returns:
The bucket number `key` computes to.
Raises:
ValueError: If `num_buckets` is not a positive number.
|
juraj-google-style
|
def ParseLeakFilesTable(
self, parser_mediator, database=None, table=None, **unused_kwargs):
if database is None:
raise ValueError('Missing database value.')
if table is None:
raise ValueError('Missing table value.')
for esedb_record in table.records:
if parser_mediator.abort:
break
record_values = self._GetRecordValues(
parser_mediator, table.name, esedb_record)
event_data = MsieWebCacheLeakFilesEventData()
event_data.cached_filename = record_values.get('Filename', None)
event_data.leak_identifier = record_values.get('LeakId', None)
timestamp = record_values.get('CreationTime', None)
if timestamp:
date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses the LeakFiles table.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
database (Optional[pyesedb.file]): ESE database.
table (Optional[pyesedb.table]): table.
Raises:
ValueError: if the database or table value is missing.
|
juraj-google-style
|
def print_headers(head, outfile=None, silent=False):
for header_line in head.print_header():
if outfile:
outfile.write(header_line+'\n')
else:
if not silent:
print(header_line)
return
|
Print the vcf headers.
If a result file is provided headers will be printed here, otherwise
they are printed to stdout.
Args:
head (HeaderParser): A vcf header object
outfile (FileHandle): A file handle
silent (Bool): If nothing should be printed.
|
juraj-google-style
|
def entityLabel(rdfGraph, anEntity, language=DEFAULT_LANGUAGE, getall=True):
if getall:
temp = []
for o in rdfGraph.objects(anEntity, RDFS.label):
temp += [o]
return temp
else:
for o in rdfGraph.objects(anEntity, RDFS.label):
if getattr(o, 'language') and getattr(o, 'language') == language:
return o
return ""
|
Returns the rdfs.label value of an entity (class or property), if existing.
Defaults to DEFAULT_LANGUAGE. Returns the RDF.Literal resource
Args:
language: 'en', 'it' etc..
getall: returns a list of all labels rather than a string
|
juraj-google-style
|
def update(self, node_spec):
return self.client.api.update_node(self.id, self.version, node_spec)
|
Update the node's configuration.
Args:
node_spec (dict): Configuration settings to update. Any values
not provided will be removed. Default: ``None``
Returns:
`True` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> node_spec = {'Availability': 'active',
'Name': 'node-name',
'Role': 'manager',
'Labels': {'foo': 'bar'}
}
>>> node.update(node_spec)
|
codesearchnet
|
def get_tz(tz) -> str:
from xbbg.const import exch_info
if tz is None: return DEFAULT_TZ
to_tz = tz
if isinstance(tz, str):
if hasattr(TimeZone, tz):
to_tz = getattr(TimeZone, tz)
else:
exch = exch_info(ticker=tz)
if 'tz' in exch.index:
to_tz = exch.tz
return to_tz
|
Convert tz from ticker / shorthands to timezone
Args:
tz: ticker or timezone shorthands
Returns:
str: Python timzone
Examples:
>>> get_tz('NY')
'America/New_York'
>>> get_tz(TimeZone.NY)
'America/New_York'
>>> get_tz('BHP AU Equity')
'Australia/Sydney'
|
juraj-google-style
|
def normalize_list_like_lines(generation):
lines = generation.split('\n')
output_lines = []
for line_no, line in enumerate(lines):
match = re.search('. ([-*]) ', line)
if not match or line[0] not in ('-', '*'):
output_lines.append(line)
continue
delim = match.group(1) + ' '
splits = line.split(delim)[1:]
replacement = ''
delim1 = line[0] + ' '
for i, item in enumerate(splits):
level = 0
potential_numeral, _, rest = item.strip().partition(' ')
if not rest:
continue
if re.match('^[\\dixv]+((?:\\.[\\dixv])?)+$', potential_numeral, flags=re.I | re.M):
level = potential_numeral.count('.')
replacement += ('\n' if i > 0 else '') + '\t' * level + (delim if i > 0 or line_no == 0 else delim1) + item.strip()
if line_no == len(lines) - 1:
replacement += '\n'
output_lines.append(replacement)
return '\n'.join(output_lines)
|
Normalize lines in the given text that resemble list items. The function looks for lines that start optionally with
'-' or '*', possibly followed by Roman numerals or digits indicating nesting levels. The function reformats such
lines to make them more structured.
Args:
generation (str): The input text containing lines that need to be normalized.
Returns:
str: The input text with the list-like lines normalized.
Note:
The function uses regular expressions to identify and reformat the list-like lines. The patterns capture
optional bullet points, nesting levels indicated by numerals, and the actual list item content. The
normalization adjusts the bullet point style and nesting levels based on the captured patterns.
|
github-repos
|
def find_bind_module(name, verbose=False):
bindnames = get_bind_modules(verbose=verbose)
bindfile = bindnames.get(name)
if bindfile:
return bindfile
if not verbose:
return None
fuzzy_matches = get_close_pkgs(name, bindnames.keys())
if fuzzy_matches:
rows = [(x[0], bindnames[x[0]]) for x in fuzzy_matches]
print "'%s' not found. Close matches:" % name
print '\n'.join(columnise(rows))
else:
print "No matches."
return None
|
Find the bind module matching the given name.
Args:
name (str): Name of package to find bind module for.
verbose (bool): If True, print extra output.
Returns:
str: Filepath to bind module .py file, or None if not found.
|
juraj-google-style
|
def forward(self, hidden: torch.Tensor):
if self.mode == 'mix_channel':
hidden = self.channel_feature_mixer(hidden)
hidden = self.patch_mixer(hidden)
hidden = self.feature_mixer(hidden)
return hidden
|
Args:
hidden (`torch.Tensor` of shape `(batch_size, num_patches, d_model)`):
Input tensor to the layer.
Returns:
`torch.Tensor`: Transformed tensor.
|
github-repos
|
def connected_emulators(self, host=enums.JLinkHost.USB):
res = self._dll.JLINKARM_EMU_GetList(host, 0, 0)
if (res < 0):
raise errors.JLinkException(res)
num_devices = res
info = (structs.JLinkConnectInfo * num_devices)()
num_found = self._dll.JLINKARM_EMU_GetList(host, info, num_devices)
if (num_found < 0):
raise errors.JLinkException(num_found)
return list(info)[:num_found]
|
Returns a list of all the connected emulators.
Args:
self (JLink): the ``JLink`` instance
host (int): host type to search (default: ``JLinkHost.USB``)
Returns:
List of ``JLinkConnectInfo`` specifying the connected emulators.
Raises:
JLinkException: if fails to enumerate devices.
|
codesearchnet
|
def get_config_parameter_boolean(config: ConfigParser, section: str, param: str, default: bool) -> bool:
try:
value = config.getboolean(section, param)
except (TypeError, ValueError, NoOptionError):
log.warning('Configuration variable {} not found or improper in section [{}]; using default of {!r}', param, section, default)
value = default
return value
|
Get Boolean parameter from ``configparser`` ``.INI`` file.
Args:
config: :class:`ConfigParser` object
section: section name within config file
param: name of parameter within section
default: default value
Returns:
parameter value, or default
|
codesearchnet
|
def create_transformation(self, rotation=None, translation=None):
mat = None
if (rotation is not None):
mat = Matrix44.from_eulers(Vector3(rotation))
if (translation is not None):
trans = matrix44.create_from_translation(Vector3(translation))
if (mat is None):
mat = trans
else:
mat = matrix44.multiply(mat, trans)
return mat
|
Creates a transformation matrix woth rotations and translation.
Args:
rotation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3`
translation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3`
Returns:
A 4x4 matrix as a :py:class:`numpy.array`
|
codesearchnet
|
def get_name(cls):
global _registry_loaded
if (not _registry_loaded):
load_message_classes()
try:
return _class_to_schema_name[cls]
except KeyError:
raise TypeError('The class {} is not in the message registry, which indicates it is not in the current list of entry points for "fedora_messaging". Please check that the class has been added to your package\'s entry points.'.format(repr(cls)))
|
Retrieve the schema name associated with a message class.
Returns:
str: The schema name.
Raises:
TypeError: If the message class isn't registered. Check your entry point
for correctness.
|
codesearchnet
|
def GetNotificationsForAllShards(self, queue):
notifications_by_session_id = {}
for queue_shard in self.GetAllNotificationShards(queue):
self._GetUnsortedNotifications(
queue_shard, notifications_by_session_id=notifications_by_session_id)
return notifications_by_session_id.values()
|
Returns notifications for all shards of a queue at once.
Used by worker_test_lib.MockWorker to cover all shards with a single worker.
Args:
queue: usually rdfvalue.RDFURN("aff4:/W")
Returns:
List of rdf_flows.GrrNotification objects
|
juraj-google-style
|
def get_image_features(self, pixel_values: torch.FloatTensor, vision_feature_layers: Optional[Union[int, List[int]]]=None):
vision_feature_layers = vision_feature_layers if vision_feature_layers is not None else self.config.vision_feature_layers
image_outputs = self.vision_tower(pixel_values, output_hidden_states=True)
if isinstance(vision_feature_layers, int):
image_features = image_outputs.hidden_states[vision_feature_layers][:, 1:]
else:
image_features = [image_outputs.hidden_states[index][:, 1:] for index in vision_feature_layers]
image_features = torch.cat(image_features, dim=-1)
image_features = self.multi_modal_projector(image_features)
return image_features
|
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)
The tensors corresponding to the input images.
vision_feature_layers (`Union[int, List[int]]`):
The vision feature layer, or the list of indexes of the layers to select
the vision feature.
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
|
github-repos
|
def get_reverse_dns(ip_address, cache=None, nameservers=None, timeout=2.0):
hostname = None
try:
address = dns.reversename.from_address(ip_address)
hostname = query_dns(address, 'PTR', cache=cache, nameservers=nameservers, timeout=timeout)[0]
except dns.exception.DNSException:
pass
return hostname
|
Resolves an IP address to a hostname using a reverse DNS query
Args:
ip_address (str): The IP address to resolve
cache (ExpiringDict): Cache storage
nameservers (list): A list of one or more nameservers to use
(Cloudflare's public DNS resolvers by default)
timeout (float): Sets the DNS query timeout in seconds
Returns:
str: The reverse DNS hostname (if any)
|
codesearchnet
|
def _aggregation_op(cls, op: Callable[([tf.Tensor, Optional[Sequence[int]]], tf.Tensor)], x: 'TensorFluent', vars_list: List[str]) -> 'TensorFluent':
axis = cls._varslist2axis(x, vars_list)
t = op(x.tensor, axis)
scope = []
for var in x.scope.as_list():
if (var not in vars_list):
scope.append(var)
batch = x.batch
return TensorFluent(t, scope, batch=batch)
|
Returns a TensorFluent for the aggregation `op` applied to fluent `x`.
Args:
op: The aggregation operation.
x: The input fluent.
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the aggregation operator's output.
|
codesearchnet
|
def longest_existing_path(_path):
existing_path = _path
while True:
_path_new = os.path.dirname(existing_path)
if exists(_path_new):
existing_path = _path_new
break
if (_path_new == existing_path):
print('!!! [utool] This is a very illformated path indeed.')
existing_path = ''
break
existing_path = _path_new
return existing_path
|
r"""
Returns the longest root of _path that exists
Args:
_path (str): path string
Returns:
str: _path - path string
CommandLine:
python -m utool.util_path --exec-longest_existing_path
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> target = dirname(ut.__file__)
>>> _path = join(target, 'nonexist/foobar')
>>> existing_path = longest_existing_path(_path)
>>> result = ('existing_path = %s' % (str(existing_path),))
>>> print(result)
>>> assert existing_path == target
|
codesearchnet
|
def from_bulk_and_miller(cls, structure, miller_index, min_slab_size=8.0, min_vacuum_size=10.0, max_normal_search=None, center_slab=True, selective_dynamics=False, undercoord_threshold=0.09):
vnn_bulk = VoronoiNN(tol=0.05)
bulk_coords = [len(vnn_bulk.get_nn(structure, n)) for n in range(len(structure))]
struct = structure.copy(site_properties={'bulk_coordinations': bulk_coords})
slabs = generate_all_slabs(struct, max_index=max(miller_index), min_slab_size=min_slab_size, min_vacuum_size=min_vacuum_size, max_normal_search=max_normal_search, center_slab=center_slab)
slab_dict = {slab.miller_index: slab for slab in slabs}
if (miller_index not in slab_dict):
raise ValueError('Miller index not in slab dict')
this_slab = slab_dict[miller_index]
vnn_surface = VoronoiNN(tol=0.05, allow_pathological=True)
(surf_props, undercoords) = ([], [])
this_mi_vec = get_mi_vec(this_slab)
mi_mags = [np.dot(this_mi_vec, site.coords) for site in this_slab]
average_mi_mag = np.average(mi_mags)
for (n, site) in enumerate(this_slab):
bulk_coord = this_slab.site_properties['bulk_coordinations'][n]
slab_coord = len(vnn_surface.get_nn(this_slab, n))
mi_mag = np.dot(this_mi_vec, site.coords)
undercoord = ((bulk_coord - slab_coord) / bulk_coord)
undercoords += [undercoord]
if ((undercoord > undercoord_threshold) and (mi_mag > average_mi_mag)):
surf_props += ['surface']
else:
surf_props += ['subsurface']
new_site_properties = {'surface_properties': surf_props, 'undercoords': undercoords}
new_slab = this_slab.copy(site_properties=new_site_properties)
return cls(new_slab, selective_dynamics)
|
This method constructs the adsorbate site finder from a bulk
structure and a miller index, which allows the surface sites
to be determined from the difference in bulk and slab coordination,
as opposed to the height threshold.
Args:
structure (Structure): structure from which slab
input to the ASF is constructed
miller_index (3-tuple or list): miller index to be used
min_slab_size (float): min slab size for slab generation
min_vacuum_size (float): min vacuum size for slab generation
max_normal_search (int): max normal search for slab generation
center_slab (bool): whether to center slab in slab generation
selective dynamics (bool): whether to assign surface sites
to selective dynamics
undercoord_threshold (float): threshold of "undercoordation"
to use for the assignment of surface sites. Default is
0.1, for which surface sites will be designated if they
are 10% less coordinated than their bulk counterpart
|
codesearchnet
|
def byte_swap_tensor_content(tensor, from_endiness, to_endiness):
if tensor.dtype in byte_swappable:
tshape = tensor.tensor_shape.dim
tensor_bytes = tensor.tensor_content
if tensor_bytes:
tensor_size = 1
for sz in tshape:
if sz.size != 0:
tensor_size *= sz.size
chunksize = len(tensor_bytes)
to_swap = [tensor_bytes[i:i + chunksize] for i in range(0, len(tensor_bytes), chunksize)]
tensor.tensor_content = b''.join([int.from_bytes(byteswap, from_endiness).to_bytes(chunksize, to_endiness) for byteswap in to_swap])
|
Byte swaps.
Args:
tensor: Target tensor to change endiness.
from_endiness: The original endianness format. "big" or "little"
to_endiness: The target endianness format. "big" or "little"
|
github-repos
|
def slice(filename, number_tiles=None, col=None, row=None, save=True):
im = Image.open(filename)
im_w, im_h = im.size
columns = 0
rows = 0
if not number_tiles is None:
validate_image(im, number_tiles)
columns, rows = calc_columns_rows(number_tiles)
extras = (columns * rows) - number_tiles
else:
validate_image_col_row(im, col, row)
columns = col
rows = row
extras = (columns * rows) - number_tiles
tile_w, tile_h = int(floor(im_w / columns)), int(floor(im_h / rows))
tiles = []
number = 1
for pos_y in range(0, im_h - rows, tile_h):
for pos_x in range(0, im_w - columns, tile_w):
area = (pos_x, pos_y, pos_x + tile_w, pos_y + tile_h)
image = im.crop(area)
position = (int(floor(pos_x / tile_w)) + 1,
int(floor(pos_y / tile_h)) + 1)
coords = (pos_x, pos_y)
tile = Tile(image, number, position, coords)
tiles.append(tile)
number += 1
if save:
save_tiles(tiles,
prefix=get_basename(filename),
directory=os.path.dirname(filename))
return tuple(tiles)
|
Split an image into a specified number of tiles.
Args:
filename (str): The filename of the image to split.
number_tiles (int): The number of tiles required.
Kwargs:
save (bool): Whether or not to save tiles to disk.
Returns:
Tuple of :class:`Tile` instances.
|
juraj-google-style
|
def inflate_plugins(self, plugins_definition, inflate_method):
if isinstance(plugins_definition, list):
return self.inflate_plugin_list(plugins_definition, inflate_method)
elif isinstance(plugins_definition, dict):
return self.inflate_plugin_dict(plugins_definition, inflate_method)
else:
raise ValueError('%s type is not supported for a plugin list, '
'use list or dict' % type(plugins_definition))
|
Inflate multiple plugins based on a list/dict definition.
Args:
plugins_definition (list/dict): the plugins definitions.
inflate_method (method): the method to indlate each plugin.
Returns:
list: a list of plugin instances.
Raises:
ValueError: when the definition type is not list or dict.
|
juraj-google-style
|
def read(cls, data):
if isinstance(data, pd.DataFrame):
output = OrderedDict({})
output['version'] = '2.0'
output['class'] = 'dimension'
[label] = [x for x in list(data.columns.values) if
x not in ['id', 'index']]
output['label'] = label
output['category'] = OrderedDict({})
output['category']['index'] = data.id.tolist()
output['category']['label'] = OrderedDict(
zip(data.id.values, data[label].values))
return cls(output)
elif isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring) and data.startswith(("http:
"https:
"ftp:
"ftps:
return cls(request(data))
elif isinstance(data,basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
|
Reads data from URL, Dataframe, JSON string, JSON file
or OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON string, a JSON file,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dimension populated with data.
|
juraj-google-style
|
def _get_new_finished_state(self, state, new_seq, new_log_probs):
i = state[_StateKeys.CUR_INDEX]
finished_seq = state[_StateKeys.FINISHED_SEQ]
finished_scores = state[_StateKeys.FINISHED_SCORES]
finished_flags = state[_StateKeys.FINISHED_FLAGS]
finished_seq = tf.concat([finished_seq, tf.zeros([self.batch_size, self.beam_size, 1], tf.int32)], axis=2)
length_norm = _length_normalization(self.alpha, (i + 1))
new_scores = (new_log_probs / length_norm)
new_finished_flags = tf.equal(new_seq[(:, :, (- 1))], self.eos_id)
new_scores += ((1.0 - tf.to_float(new_finished_flags)) * (- INF))
finished_seq = tf.concat([finished_seq, new_seq], axis=1)
finished_scores = tf.concat([finished_scores, new_scores], axis=1)
finished_flags = tf.concat([finished_flags, new_finished_flags], axis=1)
(top_finished_seq, top_finished_scores, top_finished_flags) = _gather_topk_beams([finished_seq, finished_scores, finished_flags], finished_scores, self.batch_size, self.beam_size)
return {_StateKeys.FINISHED_SEQ: top_finished_seq, _StateKeys.FINISHED_SCORES: top_finished_scores, _StateKeys.FINISHED_FLAGS: top_finished_flags}
|
Combine new and old finished sequences, and gather the top k sequences.
Args:
state: A dictionary with the current loop state.
new_seq: New sequences generated by growing the current alive sequences
int32 tensor with shape [batch_size, beam_size, i + 1]
new_log_probs: Log probabilities of new sequences
float32 tensor with shape [batch_size, beam_size]
Returns:
Dictionary with finished keys from _StateKeys:
{Top beam_size finished sequences based on score,
Scores of finished sequences,
Finished flags of finished sequences}
|
codesearchnet
|
def load_and_use(path):
example_cond, example_a, example_b = _get_example_tensors()
restored = tf.saved_model.load(path)
return restored.use_multiplex(example_cond, example_a, example_b)
|
Load and used a model that was previously created by `save()`.
Args:
path: Directory to load model from, typically the same directory that was
used by save().
Returns:
A tensor that is the result of using the multiplex op that is
tf.constant([1, 20, 3, 40, 5], dtype=tf.int64).
|
github-repos
|
def rename_document(self, did, name):
payload = {'name': name}
return self._api.request('post', ('/api/documents/' + did), body=payload)
|
Renames the specified document.
Args:
- did (str): Document ID
- name (str): New document name
Returns:
- requests.Response: Onshape response data
|
codesearchnet
|
def is_link(path):
if sys.getwindowsversion().major < 6:
raise SaltInvocationError('Symlinks are only supported on Windows Vista or later.')
try:
return salt.utils.path.islink(path)
except Exception as exc:
raise CommandExecutionError(exc)
|
Check if the path is a symlink
This is only supported on Windows Vista or later.
Inline with Unix behavior, this function will raise an error if the path
is not a symlink, however, the error raised will be a SaltInvocationError,
not an OSError.
Args:
path (str): The path to a file or directory
Returns:
bool: True if path is a symlink, otherwise False
CLI Example:
.. code-block:: bash
salt '*' file.is_link /path/to/link
|
juraj-google-style
|
def _identify_eds_ing(first, second):
A = set([first.L, first.R])
A.update(first.D)
B = set([second.L, second.R])
B.update(second.D)
depend_set = A & B
left, right = sorted(list(A ^ B))
return left, right, depend_set
|
Find nodes connecting adjacent edges.
Args:
first(Edge): Edge object representing the first edge.
second(Edge): Edge object representing the second edge.
Returns:
tuple[int, int, set[int]]: The first two values represent left and right node
indicies of the new edge. The third value is the new dependence set.
|
juraj-google-style
|
def wait_for_js(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
if (len(args) < 1):
return function(*args, **kwargs)
else:
self = args[0]
if hasattr(self, 'wait_for_js'):
self.wait_for_js()
return function(*args, **kwargs)
return wrapper
|
Method decorator that waits for JavaScript dependencies before executing `function`.
If the function is not a method, the decorator has no effect.
Args:
function (callable): Method to decorate.
Returns:
Decorated method
|
codesearchnet
|
def _calc_block_mean_variance(image, mask, blocksize):
I = image.copy()
I_f = (I.astype(np.float32) / 255.0)
result = np.zeros(((image.shape[0] / blocksize), (image.shape[1] / blocksize)), dtype=np.float32)
for i in xrange(0, (image.shape[0] - blocksize), blocksize):
for j in xrange(0, (image.shape[1] - blocksize), blocksize):
patch = I_f[(i:((i + blocksize) + 1), j:((j + blocksize) + 1))]
mask_patch = mask[(i:((i + blocksize) + 1), j:((j + blocksize) + 1))]
tmp1 = np.zeros((blocksize, blocksize))
tmp2 = np.zeros((blocksize, blocksize))
(mean, std_dev) = cv2.meanStdDev(patch, tmp1, tmp2, mask_patch)
value = 0
if (std_dev[0][0] > MEAN_VARIANCE_THRESHOLD):
value = mean[0][0]
result[((i / blocksize), (j / blocksize))] = value
small_image = cv2.resize(I, ((image.shape[1] / blocksize), (image.shape[0] / blocksize)))
(res, inpaintmask) = cv2.threshold(result, 0.02, 1, cv2.THRESH_BINARY)
inpainted = cv2.inpaint(small_image, inpaintmask.astype(np.uint8), 5, cv2.INPAINT_TELEA)
res = cv2.resize(inpainted, (image.shape[1], image.shape[0]))
return res
|
Adaptively determines image background.
Args:
image: image converted 1-channel image.
mask: 1-channel mask, same size as image.
blocksize: adaptive algorithm parameter.
Returns:
image of same size as input with foreground inpainted with background.
|
codesearchnet
|
def train(self, debug=True, force=False, single_thread=False, timeout=20):
if ((not self.must_train) and (not force)):
return
self.padaos.compile()
self.train_thread = Thread(target=self._train, kwargs=dict(debug=debug, single_thread=single_thread, timeout=timeout), daemon=True)
self.train_thread.start()
self.train_thread.join(timeout)
self.must_train = False
return (not self.train_thread.is_alive())
|
Trains all the loaded intents that need to be updated
If a cache file exists with the same hash as the intent file,
the intent will not be trained and just loaded from file
Args:
debug (bool): Whether to print a message to stdout each time a new intent is trained
force (bool): Whether to force training if already finished
single_thread (bool): Whether to force running in a single thread
timeout (float): Seconds before cancelling training
Returns:
bool: True if training succeeded without timeout
|
codesearchnet
|
def model_fn(features, labels, mode, params, config):
del labels, config
if params["analytic_kl"] and params["mixture_components"] != 1:
raise NotImplementedError(
"Using `analytic_kl` is only supported when `mixture_components = 1` "
"since there's no closed form otherwise.")
encoder = make_encoder(params["activation"],
params["latent_size"],
params["base_depth"])
decoder = make_decoder(params["activation"],
params["latent_size"],
IMAGE_SHAPE,
params["base_depth"])
latent_prior = make_mixture_prior(params["latent_size"],
params["mixture_components"])
image_tile_summary(
"input", tf.cast(features, dtype=tf.float32), rows=1, cols=16)
approx_posterior = encoder(features)
approx_posterior_sample = approx_posterior.sample(params["n_samples"])
decoder_likelihood = decoder(approx_posterior_sample)
image_tile_summary(
"recon/sample",
tf.cast(decoder_likelihood.sample()[:3, :16], dtype=tf.float32),
rows=3,
cols=16)
image_tile_summary(
"recon/mean",
decoder_likelihood.mean()[:3, :16],
rows=3,
cols=16)
distortion = -decoder_likelihood.log_prob(features)
avg_distortion = tf.reduce_mean(input_tensor=distortion)
tf.compat.v1.summary.scalar("distortion", avg_distortion)
if params["analytic_kl"]:
rate = tfd.kl_divergence(approx_posterior, latent_prior)
else:
rate = (approx_posterior.log_prob(approx_posterior_sample)
- latent_prior.log_prob(approx_posterior_sample))
avg_rate = tf.reduce_mean(input_tensor=rate)
tf.compat.v1.summary.scalar("rate", avg_rate)
elbo_local = -(rate + distortion)
elbo = tf.reduce_mean(input_tensor=elbo_local)
loss = -elbo
tf.compat.v1.summary.scalar("elbo", elbo)
importance_weighted_elbo = tf.reduce_mean(
input_tensor=tf.reduce_logsumexp(input_tensor=elbo_local, axis=0) -
tf.math.log(tf.cast(params["n_samples"], dtype=tf.float32)))
tf.compat.v1.summary.scalar("elbo/importance_weighted",
importance_weighted_elbo)
random_image = decoder(latent_prior.sample(16))
image_tile_summary(
"random/sample",
tf.cast(random_image.sample(), dtype=tf.float32),
rows=4,
cols=4)
image_tile_summary("random/mean", random_image.mean(), rows=4, cols=4)
global_step = tf.compat.v1.train.get_or_create_global_step()
learning_rate = tf.compat.v1.train.cosine_decay(
params["learning_rate"], global_step, params["max_steps"])
tf.compat.v1.summary.scalar("learning_rate", learning_rate)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops={
"elbo":
tf.compat.v1.metrics.mean(elbo),
"elbo/importance_weighted":
tf.compat.v1.metrics.mean(importance_weighted_elbo),
"rate":
tf.compat.v1.metrics.mean(avg_rate),
"distortion":
tf.compat.v1.metrics.mean(avg_distortion),
},
)
|
Builds the model function for use in an estimator.
Arguments:
features: The input features for the estimator.
labels: The labels, unused here.
mode: Signifies whether it is train or test or predict.
params: Some hyperparameters as a dictionary.
config: The RunConfig, unused here.
Returns:
EstimatorSpec: A tf.estimator.EstimatorSpec instance.
|
juraj-google-style
|
def num_connected_components(self, unitary_only=False):
reg_offset = 0
reg_map = {}
if unitary_only:
regs = self.qregs
else:
regs = self.qregs+self.cregs
for reg in regs:
reg_map[reg.name] = reg_offset
reg_offset += reg.size
sub_graphs = [[bit] for bit in range(reg_offset)]
num_sub_graphs = len(sub_graphs)
for instr, qargs, cargs in self.data:
if unitary_only:
args = qargs
num_qargs = len(args)
else:
args = qargs+cargs
num_qargs = len(args) + (1 if instr.control else 0)
if num_qargs >= 2 and instr.name not in ['barrier', 'snapshot']:
graphs_touched = []
num_touched = 0
if instr.control and not unitary_only:
creg = instr.control[0]
creg_int = reg_map[creg.name]
for coff in range(creg.size):
temp_int = creg_int+coff
for k in range(num_sub_graphs):
if temp_int in sub_graphs[k]:
graphs_touched.append(k)
num_touched += 1
break
for item in args:
reg_int = reg_map[item[0].name]+item[1]
for k in range(num_sub_graphs):
if reg_int in sub_graphs[k]:
if k not in graphs_touched:
graphs_touched.append(k)
num_touched += 1
break
if num_touched > 1:
connections = []
for idx in graphs_touched:
connections.extend(sub_graphs[idx])
_sub_graphs = []
for idx in range(num_sub_graphs):
if idx not in graphs_touched:
_sub_graphs.append(sub_graphs[idx])
_sub_graphs.append(connections)
sub_graphs = _sub_graphs
num_sub_graphs -= (num_touched-1)
if num_sub_graphs == 1:
break
return num_sub_graphs
|
How many non-entangled subcircuits can the circuit be factored to.
Args:
unitary_only (bool): Compute only unitary part of graph.
Returns:
int: Number of connected components in circuit.
|
juraj-google-style
|
def __init__(self, event_type: str):
if not isinstance(event_type, str) or event_type == "":
raise TypeError("Invalid event type: {}".format(event_type))
self._event_type: str = event_type
self._target: EventDispatcherBase = None
|
Constructor.
Args:
event_type (str): The type - string identifier - of the event.
Must not be `None` or empty string.
|
juraj-google-style
|
def init_c_overturn(step):
(rbot, rtop) = misc.get_rbounds(step)
xieut = step.sdat.par['tracersin']['fe_eut']
k_fe = step.sdat.par['tracersin']['k_fe']
xi0l = step.sdat.par['tracersin']['fe_cont']
xi0s = (k_fe * xi0l)
xired = (xi0l / xieut)
rsup = (((rtop ** 3) - ((xired ** (1 / (1 - k_fe))) * ((rtop ** 3) - (rbot ** 3)))) ** (1 / 3))
def initprof(rpos):
'Theoretical initial profile.'
if (rpos < rsup):
return (xi0s * ((((rtop ** 3) - (rbot ** 3)) / ((rtop ** 3) - (rpos ** 3))) ** (1 - k_fe)))
return xieut
rad = np.linspace(rbot, rtop, 500)
initprof = np.vectorize(initprof)
return (initprof(rad), rad)
|
Initial concentration.
This compute the resulting composition profile if fractional
crystallization of a SMO is assumed.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the composition and the radial position
at which it is evaluated.
|
codesearchnet
|
def get_first_content(el_list, alt=None, strip=True):
if not el_list:
return alt
content = el_list[0].getContent()
if strip:
content = content.strip()
if not content:
return alt
return content
|
Return content of the first element in `el_list` or `alt`. Also return `alt`
if the content string of first element is blank.
Args:
el_list (list): List of HTMLElement objects.
alt (default None): Value returner when list or content is blank.
strip (bool, default True): Call .strip() to content.
Returns:
str or alt: String representation of the content of the first element \
or `alt` if not found.
|
juraj-google-style
|
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(-1, 1)
hidden_states = self.conv1(hidden_states)
hidden_states = torch.relu(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.conv2(hidden_states)
hidden_states = hidden_states.transpose(-1, 1)
return hidden_states
|
Calculate forward propagation.
Args:
hidden_states (torch.Tensor): Batch of input tensors (batch_size, time, input_channels).
Returns:
torch.Tensor: Batch of output tensors (batch_size, time, hidden_channels).
|
github-repos
|
def parse_data_types_and_routes_from_doc_ref(api, doc, namespace_context, ignore_missing_entries=False):
assert (doc is not None)
data_types = set()
routes = defaultdict(set)
for match in doc_ref_re.finditer(doc):
try:
tag = match.group('tag')
val = match.group('val')
supplied_namespace = api.namespaces[namespace_context]
if (tag == 'field'):
if ('.' in val):
(type_name, __) = val.split('.', 1)
doc_type = supplied_namespace.data_type_by_name[type_name]
data_types.add(doc_type)
else:
pass
elif (tag == 'route'):
if ('.' in val):
(namespace_name, val) = val.split('.', 1)
namespace = api.namespaces[namespace_name]
else:
namespace = supplied_namespace
try:
(route_name, version) = parse_route_name_and_version(val)
except ValueError as ex:
raise KeyError(str(ex))
route = namespace.routes_by_name[route_name].at_version[version]
routes[namespace.name].add(route)
elif (tag == 'type'):
if ('.' in val):
(namespace_name, val) = val.split('.', 1)
doc_type = api.namespaces[namespace_name].data_type_by_name[val]
data_types.add(doc_type)
else:
doc_type = supplied_namespace.data_type_by_name[val]
data_types.add(doc_type)
except KeyError:
if (not ignore_missing_entries):
raise
return (data_types, routes)
|
Given a documentation string, parse it and return all references to other
data types and routes.
Args:
- api: The API containing this doc ref.
- doc: The documentation string to parse.
- namespace_context: The namespace name relative to this documentation.
- ignore_missing_entries: If set, this will skip references to nonexistent data types instead
of raising an exception.
Returns:
- a tuple of referenced data types and routes
|
codesearchnet
|
def get_or_create(self, defaults=None, **kwargs):
try:
return (self.get(**kwargs), False)
except ObjectDoesNotExist:
pass
data = (defaults or {})
data.update(kwargs)
return (self._model_class(**data).blocking_save(), True)
|
Looks up an object with the given kwargs, creating a new one if necessary.
Args:
defaults (dict): Used when we create a new object. Must map to fields
of the model.
\*\*kwargs: Used both for filtering and new object creation.
Returns:
A tuple of (object, created), where created is a boolean variable
specifies whether the object was newly created or not.
Example:
In the following example, *code* and *name* fields are used to query the DB.
.. code-block:: python
obj, is_new = Permission.objects.get_or_create({'description': desc},
code=code, name=name)
{description: desc} dict is just for new creations. If we can't find any
records by filtering on *code* and *name*, then we create a new object by
using all of the inputs.
|
codesearchnet
|
def __init__(self, retriever):
self._page_token = None
self._first_page = True
self._retriever = retriever
self._count = 0
|
Initializes an instance of an Iterator.
Args:
retriever: a function that can retrieve the next page of items.
|
juraj-google-style
|
def impad_to_multiple(img, divisor, pad_val=0):
pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor
pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor
return impad(img, (pad_h, pad_w), pad_val)
|
Pad an image to ensure each edge to be multiple to some number.
Args:
img (ndarray): Image to be padded.
divisor (int): Padded image edges will be multiple to divisor.
pad_val (number or sequence): Same as :func:`impad`.
Returns:
ndarray: The padded image.
|
juraj-google-style
|
def getShareInfo(item):
key = f'_syn_sharinfo_{item.__class__.__module__}_{item.__class__.__qualname__}'
info = getattr(item, key, None)
if (info is not None):
return info
meths = {}
info = {'meths': meths}
for name in dir(item):
if name.startswith('_'):
continue
attr = getattr(item, name, None)
if (not callable(attr)):
continue
wrapped = getattr(attr, '__syn_wrapped__', None)
if (wrapped in unwraps):
real = inspect.unwrap(attr)
if inspect.isasyncgenfunction(real):
meths[name] = {'genr': True}
continue
if inspect.isasyncgenfunction(attr):
meths[name] = {'genr': True}
try:
setattr(item, key, info)
except Exception as e:
logger.exception(f'Failed to set magic on {item}')
try:
setattr(item.__class__, key, info)
except Exception as e:
logger.exception(f'Failed to set magic on {item.__class__}')
return info
|
Get a dictionary of special annotations for a Telepath Proxy.
Args:
item: Item to inspect.
Notes:
This will set the ``_syn_telemeth`` attribute on the item
and the items class, so this data is only computed once.
Returns:
dict: A dictionary of methods requiring special handling by the proxy.
|
codesearchnet
|
class XGBoostModelHandlerDatatable(XGBoostModelHandler[datatable.Frame, PredictionResult, Union[xgboost.Booster, xgboost.XGBModel]]):
def run_inference(self, batch: Sequence[datatable.Frame], model: Union[xgboost.Booster, xgboost.XGBModel], inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:
return self._inference_fn(batch, model, inference_args)
def get_num_bytes(self, batch: Sequence[datatable.Frame]) -> int:
return sum((sys.getsizeof(element) for element in batch))
|
Implementation of the ModelHandler interface for XGBoost
using datatable dataframes as input.
Example Usage::
pcoll | RunInference(
XGBoostModelHandlerDatatable(
model_class="XGBoost Model Class",
model_state="my_model_state.json")))
Args:
model_class: class of the XGBoost model that defines the model
structure.
model_state: path to a json file that contains the model's
configuration.
inference_fn: the inference function to use during RunInference.
default=default_xgboost_inference_fn
|
github-repos
|
def update(self, friendly_name=None, description=None, query=None):
self._table._load_info()
if (query is not None):
if isinstance(query, _query.Query):
query = query.sql
self._table._info['view'] = {'query': query}
self._table.update(friendly_name=friendly_name, description=description)
|
Selectively updates View information.
Any parameters that are None (the default) are not applied in the update.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
query: if not None, a new query string for the View.
|
codesearchnet
|
def default_peek(python_type, exposes):
with_args = False
make = python_type
try:
make()
except (SystemExit, KeyboardInterrupt):
raise
except:
make = lambda: python_type.__new__(python_type)
try:
make()
except (SystemExit, KeyboardInterrupt):
raise
except:
make = lambda args: python_type.__new__(python_type, *args)
with_args = True
def missing(attr):
return AttributeError("can't set attribute '{}' ({})".format(attr, python_type))
if with_args:
def peek(store, container, _stack=None):
state = []
for attr in exposes:
if attr in container:
state.append(store.peek(attr, container, _stack=_stack))
else:
state.append(None)
return make(state)
elif '__dict__' in exposes:
def peek(store, container, _stack=None):
obj = make()
for attr in container:
val = store.peek(attr, container, _stack=_stack)
try:
setattr(obj, attr, val)
except AttributeError:
raise missing(attr)
return obj
else:
def peek(store, container, _stack=None):
obj = make()
for attr in exposes:
if attr in container:
val = store.peek(attr, container, _stack=_stack)
else:
val = None
try:
setattr(obj, attr, val)
except AttributeError:
raise missing(attr)
return obj
return peek
|
Autoserializer factory.
Works best in Python 3.
Arguments:
python_type (type): type constructor.
exposes (iterable): sequence of attributes.
Returns:
callable: deserializer (`peek` routine).
|
juraj-google-style
|
def _unify_call_signature(i, dist_fn):
if distribution_util.is_distribution_instance(dist_fn):
return ((lambda *_: dist_fn), None)
if (not callable(dist_fn)):
raise TypeError('{} must be either `tfd.Distribution`-like or `callable`.'.format(dist_fn))
args = _get_required_args(dist_fn)
if (not args):
return ((lambda *_: dist_fn()), ())
@functools.wraps(dist_fn)
def dist_fn_wrapped(*xs):
'Calls `dist_fn` with reversed and truncated args.'
if (i != len(xs)):
raise ValueError('Internal Error: Unexpected number of inputs provided to {}-th distribution maker (dist_fn: {}, expected: {}, saw: {}).'.format(i, dist_fn, i, len(xs)))
if (len(xs) < len(args)):
raise ValueError('Internal Error: Too few inputs provided to {}-th distribution maker (dist_fn: {}, expected: {}, saw: {}).'.format(i, dist_fn, len(args), len(xs)))
return dist_fn(*reversed(xs[(- len(args)):]))
return (dist_fn_wrapped, args)
|
Creates `dist_fn_wrapped` which calls `dist_fn` with all prev nodes.
Args:
i: Python `int` corresponding to position in topologically sorted DAG.
dist_fn: Python `callable` which takes a subset of previously constructed
distributions (in reverse order) and produces a new distribution instance.
Returns:
dist_fn_wrapped: Python `callable` which takes all previous distributions
(in non reverse order) and produces a new distribution instance.
args: `tuple` of `str` representing the arg names of `dist_fn` (and in non
wrapped, "natural" order). `None` is returned only if the input is not a
`callable`.
|
codesearchnet
|
class Permute(Layer):
def __init__(self, dims, **kwargs):
super(Permute, self).__init__(**kwargs)
self.dims = tuple(dims)
if sorted(dims) != list(range(1, len(dims) + 1)):
raise ValueError('Invalid permutation `dims` for Permute Layer: %s. The set of indices in `dims` must be consecutive and start from 1.' % (dims,))
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = copy.copy(input_shape)
for i, dim in enumerate(self.dims):
target_dim = input_shape[dim]
output_shape[i + 1] = target_dim
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
return array_ops.transpose(inputs, perm=(0,) + self.dims)
def get_config(self):
config = {'dims': self.dims}
base_config = super(Permute, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
Permutes the dimensions of the input according to a given pattern.
Useful e.g. connecting RNNs and convnets.
Example:
```python
model = Sequential()
model.add(Permute((2, 1), input_shape=(10, 64)))
# now: model.output_shape == (None, 64, 10)
# note: `None` is the batch dimension
```
Args:
dims: Tuple of integers. Permutation pattern does not include the
samples dimension. Indexing starts at 1.
For instance, `(2, 1)` permutes the first and second dimensions
of the input.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
|
github-repos
|
def to_representation(self, value):
if not value:
return None
image = get_thumbnail(value, self.geometry_string, **self.options)
try:
request = self.context.get('request', None)
return request.build_absolute_uri(image.url)
except:
try:
return super(HyperlinkedSorlImageField, self).to_representation(image)
except AttributeError:
return super(HyperlinkedSorlImageField, self).to_native(image.url)
|
Perform the actual serialization.
Args:
value: the image to transform
Returns:
a url pointing at a scaled and cached image
|
juraj-google-style
|
def highlight(text: str, color_code: int, bold: bool=False) -> str:
return '{}\x1b[{}m{}\x1b[0m'.format(('\x1b[1m' if bold else ''), color_code, text)
|
Wraps the given string with terminal color codes.
Args:
text: The content to highlight.
color_code: The color to highlight with, e.g. 'shelltools.RED'.
bold: Whether to bold the content in addition to coloring.
Returns:
The highlighted string.
|
codesearchnet
|
def List(self, request, global_params=None):
config = self.GetMethodConfig('List')
return self._RunMethod(config, request, global_params=global_params)
|
List all GitHubEnterpriseConfigs for a given project.
Args:
request: (CloudbuildProjectsGithubEnterpriseConfigsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListGithubEnterpriseConfigsResponse) The response message.
|
github-repos
|
def _EnforceProcessMemoryLimit(self, memory_limit):
if resource:
if (memory_limit is None):
memory_limit = (((4 * 1024) * 1024) * 1024)
elif (memory_limit == 0):
memory_limit = resource.RLIM_INFINITY
resource.setrlimit(resource.RLIMIT_DATA, (memory_limit, memory_limit))
|
Enforces a process memory limit.
Args:
memory_limit (int): maximum number of bytes the process is allowed
to allocate, where 0 represents no limit and None a default of
4 GiB.
|
codesearchnet
|
def run(self, dag):
self.layout = self.layout or self.property_set['layout']
if self.layout is None:
raise TranspilerError("EnlargeWithAncilla requires property_set[\"layout\"] or"
" \"layout\" parameter to run")
layout_virtual_qubits = self.layout.get_virtual_bits().keys()
new_qregs = set(virtual_qubit[0] for virtual_qubit in layout_virtual_qubits
if virtual_qubit not in dag.wires)
for qreg in new_qregs:
dag.add_qreg(qreg)
return dag
|
Extends dag with virtual qubits that are in layout but not in the circuit yet.
Args:
dag (DAGCircuit): DAG to extend.
Returns:
DAGCircuit: An extended DAG.
Raises:
TranspilerError: If there is not layout in the property set or not set at init time.
|
juraj-google-style
|
def disable_control_flow_v2(unused_msg: str) -> Callable[[_F], _F]:
def wrapper(func: _F) -> _F:
func._disable_control_flow_v2 = True
return func
return wrapper
|
Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
|
github-repos
|
def xml(self):
self.pendingvalidation()
E = ElementMaker(namespace='http:
attribs = {}
attribs['{http:
attribs['version'] = FOLIAVERSION
attribs['generator'] = ('pynlpl.formats.folia-v' + LIBVERSION)
metadataattribs = {}
metadataattribs[(('{' + NSFOLIA) + '}type')] = self.metadatatype
if isinstance(self.metadata, ExternalMetaData):
metadataattribs[(('{' + NSFOLIA) + '}src')] = self.metadata.url
e = E.FoLiA(E.metadata(E.annotations(*self.xmldeclarations()), *self.xmlmetadata(), **metadataattribs), **attribs)
for text in self.data:
e.append(text.xml())
return e
|
Serialise the document to XML.
Returns:
lxml.etree.Element
See also:
:meth:`Document.xmlstring`
|
codesearchnet
|
def traverse_nodes(self, node_set, depth=0):
tab = ' '
result = list()
for n in node_set:
repr = (n if (self.nodes[n]['type'] == 'variable') else f"{n}{inspect.signature(self.nodes[n]['lambda_fn'])}")
result.append(f'{(tab * depth)}{repr}')
result.extend(self.traverse_nodes(self.successors(n), depth=(depth + 1)))
return result
|
BFS traversal of nodes that returns name traversal as large string.
Args:
node_set: Set of input nodes to begin traversal.
depth: Current traversal depth for child node viewing.
Returns:
type: String containing tabbed traversal view.
|
codesearchnet
|
def CheckTaskToMerge(self, task):
with self._lock:
is_abandoned = task.identifier in self._tasks_abandoned
is_processing = task.identifier in self._tasks_processing
is_queued = task.identifier in self._tasks_queued
if not is_queued and not is_processing and not is_abandoned:
raise KeyError('Status of task {0:s} is unknown.'.format(
task.identifier))
return is_queued or is_processing or is_abandoned and not task.has_retry
|
Checks if the task should be merged.
Args:
task (Task): task.
Returns:
bool: True if the task should be merged.
Raises:
KeyError: if the task was not queued, processing or abandoned.
|
juraj-google-style
|
def put_many(self, type: Type[T], items: Iterable[T]) -> None:
LOGGER.info("Getting SinkHandlers for \"{type}\"".format(type=type.__name__))
try:
handlers = self._put_types[type]
except KeyError:
try:
LOGGER.info("Building new SinkHandlers for \"{type}\"".format(type=type.__name__))
handlers = self._put_handlers(type)
except NoConversionError:
handlers = None
self._get_types[type] = handlers
LOGGER.info("Creating new PipelineContext")
context = self._new_context()
LOGGER.info("Sending items \"{items}\" to SourceHandlers".format(items=items))
if handlers is not None:
items = list(items)
for handler in handlers:
handler.put_many(items, context)
|
Puts multiple objects of the same type into the data sink. The objects may be transformed into a new type for insertion if necessary.
Args:
items: An iterable (e.g. list) of objects to be inserted into the data pipeline.
|
juraj-google-style
|
def CreateAdsWithCustomizations(client, adgroup_ids, feed_name):
adgroup_ad_service = client.GetService('AdGroupAdService', 'v201809')
expanded_text_ad = {'xsi_type': 'ExpandedTextAd', 'headlinePart1': ('Luxury Cruise to {=%s.Name}' % feed_name), 'headlinePart2': ('Only {=%s.Price}' % feed_name), 'description': ('Offer ends in {=countdown(%s.Date)}!' % feed_name), 'finalUrls': ['http:
operations = [{'operator': 'ADD', 'operand': {'adGroupId': adgroup, 'ad': expanded_text_ad}} for adgroup in adgroup_ids]
response = adgroup_ad_service.mutate(operations)
if (response and ('value' in response)):
for ad in response['value']:
print(('Created an ad with ID "%s", type "%s", and status "%s".' % (ad['ad']['id'], ad['ad']['Ad.Type'], ad['status'])))
else:
raise errors.GoogleAdsError('No ads were added.')
|
Creates ExpandedTextAds that use ad customizations for specified AdGroups.
Args:
client: an AdWordsClient instance.
adgroup_ids: a list containing the AdGroup ids to add ExpandedTextAds to.
feed_name: the name of the feed used to apply customizations.
Raises:
GoogleAdsError: if no ExpandedTextAds were added.
|
codesearchnet
|
def structure_np_to_list(data):
if isinstance(data, np.ndarray):
return data.tolist()
if isinstance(data, dict):
return {key: structure_np_to_list(value) for key, value in data.items()}
if isinstance(data, list):
return [structure_np_to_list(item) for item in data]
if isinstance(data, (int, float, str, bytes)):
return data
raise ValueError(f'Non supported type {type(data)}')
|
Apply a function to a recursive structure of dict and list.
Args:
data: The data to apply the function to.
Returns:
The data with the function applied.
|
github-repos
|
def _AddHeader(self, fp):
text = textwrap.wrap(textwrap.dedent(self.config_header), break_on_hyphens=False)
fp.write('\n'.join([('
fp.write('\n\n')
|
Create a file header in the config.
Args:
fp: int, a file pointer for writing the header.
|
codesearchnet
|
def rot90(array, k=1, axes=(0, 1)):
array = convert_to_tensor(array)
if array.ndim < 2:
raise ValueError(f'Input array must have at least 2 dimensions. Received: array.ndim={array.ndim}')
if len(axes) != 2 or axes[0] == axes[1]:
raise ValueError(f'Invalid axes: {axes}. Axes must be a tuple of two different dimensions.')
axes = tuple((axis if axis >= 0 else array.ndim + axis for axis in axes))
if not builtins.all((0 <= axis < array.ndim for axis in axes)):
raise ValueError(f'Invalid axes {axes} for tensor with {array.ndim} dimensions')
rotated = torch.rot90(array, k=k, dims=axes)
if isinstance(array, np.ndarray):
rotated = rotated.cpu().numpy()
return rotated
|
Rotate an array by 90 degrees in the specified plane using PyTorch.
Args:
array: Input tensor
k: Number of 90-degree rotations (default=1)
axes: Tuple of two axes that define the
plane of rotation (defaults to `(0, 1)`).
Returns:
Rotated tensor
|
github-repos
|
def heightmap_clamp(hm: np.ndarray, mi: float, ma: float) -> None:
hm.clip(mi, ma)
|
Clamp all values on this heightmap between ``mi`` and ``ma``
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
mi (float): The lower bound to clamp to.
ma (float): The upper bound to clamp to.
.. deprecated:: 2.0
Do ``hm.clip(mi, ma)`` instead.
|
codesearchnet
|
def dprintx(passeditem, special=False):
if DEBUGALL:
if special:
from pprint import pprint
pprint(passeditem)
else:
print(('%s%s%s' % (C_TI, passeditem, C_NORM)))
|
Print Text if DEBUGALL set, optionally with PrettyPrint.
Args:
passeditem (str): item to print
special (bool): determines if item prints with PrettyPrint
or regular print.
|
codesearchnet
|
def get_parameters(params=None, path='', grad_only=True):
global current_scope
if (params is None):
params = OrderedDict()
for (k, v) in iteritems(current_scope):
if isinstance(v, dict):
with parameter_scope(k):
params = get_parameters(params, ('/'.join([path, k]) if path else k), grad_only=grad_only)
else:
assert isinstance(v, nn.Variable)
if ((not grad_only) or v.need_grad):
params[('/'.join([path, k]) if path else k)] = v
return params
|
Get parameter Variables under the current parameter scope.
Args:
params (dict): Internal use. User doesn't set it manually.
path (str): Internal use. User doesn't set it manually.
grad_only (bool): Retrieve all parameters under the current scope if
False, while only parameters with need_grad=True are retrieved
if True.
Returns:
dict: {:obj:`str` : :obj:`~nnabla.Variable`}
|
codesearchnet
|
def depth_april_average_ground_temperature(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `depth_april_average_ground_temperature`'.format(value))
self._depth_april_average_ground_temperature = value
|
Corresponds to IDD Field `depth_april_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_april_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def __init__(self, component=None, action=None, target=None, args=None, filename=None, lineno=None, error=None, capacity=None):
self.component = component
self._action = action
self._target = target
self.args = args
self._filename = filename
self._lineno = lineno
self._error = error
self._separator = False
self._capacity = capacity
|
Instantiates a FireTraceElement.
Args:
component: The result of this element of the trace.
action: The type of action (e.g. instantiating a class) taking place.
target: (string) The name of the component being acted upon.
args: The args consumed by the represented action.
filename: The file in which the action is defined, or None if N/A.
lineno: The line number on which the action is defined, or None if N/A.
error: The error represented by the action, or None if N/A.
capacity: (bool) Whether the action could have accepted additional args.
|
github-repos
|
def input_fn(is_training, data_dir, batch_size, num_epochs=1, num_gpus=None, dtype=tf.float32):
mlperf_log.resnet_print(key=mlperf_log.INPUT_ORDER)
filenames = get_filenames(is_training, data_dir)
dataset = tf.data.Dataset.from_tensor_slices(filenames)
if is_training:
dataset = dataset.shuffle(buffer_size=_NUM_TRAIN_FILES)
dataset = dataset.flat_map(tf.data.TFRecordDataset)
return resnet_run_loop.process_record_dataset(dataset=dataset, is_training=is_training, batch_size=batch_size, shuffle_buffer=_SHUFFLE_BUFFER, parse_record_fn=parse_record, num_epochs=num_epochs, num_gpus=num_gpus, examples_per_epoch=(_NUM_IMAGES['train'] if is_training else None), dtype=dtype)
|
Input function which provides batches for train or eval.
Args:
is_training: A boolean denoting whether the input is for training.
data_dir: The directory containing the input data.
batch_size: The number of samples per batch.
num_epochs: The number of epochs to repeat the dataset.
num_gpus: The number of gpus used for training.
dtype: Data type to use for images/features
Returns:
A dataset that can be used for iteration.
|
codesearchnet
|
def is_chief(cluster_spec=None, task_type=None, task_id=None):
if has_worker_context():
return dc_context.get_current_worker_context().is_chief
_validate_cluster_spec(cluster_spec, task_type, task_id)
cluster_spec = normalize_cluster_spec(cluster_spec).as_dict()
if task_type == 'chief' or task_type == 'evaluator':
return True
if 'chief' not in cluster_spec and task_type == 'worker' and (task_id == 0):
return True
return False
|
Returns whether the given task is chief in the cluster.
Since there is at most one evaluator and the evaluator itself should be
independent of the training cluster, the evaluator job is also a chief job on
its own.
If this is currently running under a `_WorkerContext` of distribute
coordinator, the arguments can be omitted as the result is already available.
Args:
cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object specifying the
cluster configurations.
task_type: the task type in the cluster.
task_id: the task id in the cluster.
Returns:
a boolean indicating whether the given task is chief.
Raises:
ValueError: if `task_type` is not in the `cluster_spec` or `task_id` exceeds
the maximum id of the `task_type`.
|
github-repos
|
def ToScriptHash(self, address):
if len(address) == 34:
if address[0] == 'A':
data = b58decode(address)
if data[0] != self.AddressVersion:
raise ValueError('Not correct Coin Version')
checksum = Crypto.Default().Hash256(data[:21])[:4]
if checksum != data[21:]:
raise Exception('Address format error')
return UInt160(data=data[1:21])
else:
raise Exception('Address format error')
else:
raise ValueError('Not correct Address, wrong length.')
|
Retrieve the script_hash based from an address.
Args:
address (str): a base58 encoded address.
Raises:
ValuesError: if an invalid address is supplied or the coin version is incorrect
Exception: if the address string does not start with 'A' or the checksum fails
Returns:
UInt160: script hash.
|
juraj-google-style
|
def set_metadata(self, entity_type, entity_id, metadata):
if (not is_valid_uuid(entity_id)):
raise StorageArgumentException('Invalid UUID for entity_id: {0}'.format(entity_id))
if (not isinstance(metadata, dict)):
raise StorageArgumentException('The metadata was not provided as a dictionary')
return self._authenticated_request.to_endpoint('{}/{}/metadata/'.format(entity_type, entity_id)).with_json_body(metadata).return_body().post()
|
Set metadata for an entity.
Args:
entity_type (str): Type of the entity. Admitted values: ['project',
'folder', 'file'].
entity_id (str): The UUID of the entity to be modified.
metadata (dict): A dictionary of key/value pairs to be written as
metadata.
Warning:
It will replace all existing metadata with the provided dictionary.
Returns:
A dictionary of the updated metadata::
{
u'bar': u'200',
u'foo': u'100'
}
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
|
codesearchnet
|
def get_file_list(self):
if os.path.isdir(self.root_path):
return [os.path.join(self.root_path, f) for f in os.listdir(self.root_path) if os.path.isfile(os.path.join(self.root_path, f))]
else:
return [self.root_path]
|
Retrieve the list of absolute paths to all the files in this data source.
Returns:
List[str] List of absolute paths.
|
codesearchnet
|
def allzeros(msg):
d = hex2bin(data(msg))
if bin2int(d) > 0:
return False
else:
return True
|
check if the data bits are all zeros
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
bool: True or False
|
juraj-google-style
|
def run_census(flags_obj, ctx):
train_file = os.path.join(flags_obj.data_dir, census_dataset.TRAINING_FILE)
test_file = os.path.join(flags_obj.data_dir, census_dataset.EVAL_FILE)
def train_input_fn():
return census_dataset.input_fn(
train_file, flags_obj.epochs_between_evals, True, flags_obj.batch_size)
def eval_input_fn():
return census_dataset.input_fn(test_file, 1, False, flags_obj.batch_size)
tensors_to_log = {
'average_loss': '{loss_prefix}head/truediv',
'loss': '{loss_prefix}head/weighted_loss/Sum'
}
model_helpers.apply_clean(flags.FLAGS)
model = build_estimator(
model_dir=flags_obj.model_dir, model_type=flags_obj.model_type,
model_column_fn=census_dataset.build_model_columns,
inter_op=flags_obj.inter_op_parallelism_threads,
intra_op=flags_obj.intra_op_parallelism_threads,
ctx=ctx)
loss_prefix = LOSS_PREFIX.get(flags_obj.model_type, '')
tensors_to_log = {k: v.format(loss_prefix=loss_prefix)
for k, v in tensors_to_log.items()}
train_hooks = hooks_helper.get_train_hooks(
flags_obj.hooks, model_dir=flags_obj.model_dir,
batch_size=flags_obj.batch_size, tensors_to_log=tensors_to_log)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, hooks=train_hooks)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn)
tf.estimator.train_and_evaluate(model, train_spec, eval_spec)
|
Construct all necessary functions and call run_loop.
Args:
flags_obj: Object containing user specified flags.
|
juraj-google-style
|
def check_file(self, fs, info):
if ((self.exclude is not None) and fs.match(self.exclude, info.name)):
return False
return fs.match(self.filter, info.name)
|
Check if a filename should be included.
Override to exclude files from the walk.
Arguments:
fs (FS): A filesystem instance.
info (Info): A resource info object.
Returns:
bool: `True` if the file should be included.
|
codesearchnet
|
def expo(base=2, factor=1, max_value=None):
n = 0
while True:
a = (factor * (base ** n))
if ((max_value is None) or (a < max_value)):
(yield a)
n += 1
else:
(yield max_value)
|
Generator for exponential decay.
Args:
base: The mathematical base of the exponentiation operation
factor: Factor to multiply the exponentation by.
max_value: The maximum value to yield. Once the value in the
true exponential sequence exceeds this, the value
of max_value will forever after be yielded.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.